aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/CodingStyle126
-rw-r--r--Documentation/SubmitChecklist6
-rw-r--r--Documentation/accounting/getdelays.c64
-rw-r--r--Documentation/cpu-freq/core.txt2
-rw-r--r--Documentation/dvb/cards.txt4
-rw-r--r--Documentation/feature-removal-schedule.txt33
-rw-r--r--Documentation/filesystems/bfs.txt2
-rw-r--r--Documentation/filesystems/ocfs2.txt3
-rw-r--r--Documentation/i2c/busses/i2c-amd81112
-rw-r--r--Documentation/i2c/busses/i2c-i8015
-rw-r--r--Documentation/i2c/busses/i2c-nforce26
-rw-r--r--Documentation/ioctl/ioctl-decoding.txt24
-rw-r--r--Documentation/kbuild/kconfig-language.txt8
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--Documentation/networking/dccp.txt6
-rw-r--r--Documentation/spi/pxa2xx16
-rw-r--r--Documentation/video4linux/CARDLIST.cx882
-rw-r--r--Documentation/video4linux/CARDLIST.saa71347
-rw-r--r--Documentation/video4linux/cafe_ccic54
-rw-r--r--Documentation/video4linux/zr36120.txt162
-rw-r--r--MAINTAINERS17
-rw-r--r--Makefile35
-rw-r--r--arch/alpha/kernel/osf_sys.c6
-rw-r--r--arch/arm/kernel/apm.c2
-rw-r--r--arch/arm/kernel/ecard.c2
-rw-r--r--arch/arm/mach-omap1/Kconfig1
-rw-r--r--arch/arm/mach-omap1/board-osk.c1
-rw-r--r--arch/arm/mach-pnx4008/Makefile2
-rw-r--r--arch/arm/mach-pnx4008/i2c.c167
-rw-r--r--arch/arm/mach-pxa/Kconfig16
-rw-r--r--arch/arm/mach-realview/core.c13
-rw-r--r--arch/arm/mach-realview/core.h1
-rw-r--r--arch/arm/mach-realview/realview_eb.c1
-rw-r--r--arch/arm/mach-versatile/core.c14
-rw-r--r--arch/arm/plat-omap/Kconfig2
-rw-r--r--arch/arm26/kernel/ecard.c2
-rw-r--r--arch/arm26/kernel/irq.c2
-rw-r--r--arch/avr32/boards/atstk1000/atstk1002.c76
-rw-r--r--arch/avr32/kernel/avr32_ksyms.c2
-rw-r--r--arch/avr32/kernel/process.c7
-rw-r--r--arch/avr32/kernel/setup.c24
-rw-r--r--arch/avr32/lib/delay.c2
-rw-r--r--arch/avr32/mach-at32ap/at32ap7000.c182
-rw-r--r--arch/avr32/mach-at32ap/extint.c22
-rw-r--r--arch/avr32/mach-at32ap/intc.c4
-rw-r--r--arch/avr32/mach-at32ap/pio.c85
-rw-r--r--arch/avr32/mach-at32ap/sm.c289
-rw-r--r--arch/cris/arch-v10/drivers/axisflashmap.c2
-rw-r--r--arch/cris/arch-v10/drivers/gpio.c2
-rw-r--r--arch/cris/arch-v32/drivers/axisflashmap.c2
-rw-r--r--arch/cris/arch-v32/drivers/gpio.c2
-rw-r--r--arch/cris/arch-v32/kernel/signal.c2
-rw-r--r--arch/cris/kernel/profile.c2
-rw-r--r--arch/frv/kernel/pm.c6
-rw-r--r--arch/h8300/kernel/ints.c2
-rw-r--r--arch/h8300/platform/h8s/ints.c2
-rw-r--r--arch/i386/kernel/apm.c2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig6
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Makefile2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c813
-rw-r--r--arch/i386/kernel/cpu/cpufreq/gx-suspmod.c4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c8
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c38
-rw-r--r--arch/i386/kernel/cpu/cpufreq/sc520_freq.c7
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c15
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.c32
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.h1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-smi.c3
-rw-r--r--arch/i386/kernel/microcode.c6
-rw-r--r--arch/i386/kernel/smpboot.c2
-rw-r--r--arch/ia64/hp/sim/simserial.c2
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/crash.c22
-rw-r--r--arch/ia64/kernel/crash_dump.c48
-rw-r--r--arch/ia64/kernel/jprobes.S3
-rw-r--r--arch/ia64/kernel/kprobes.c226
-rw-r--r--arch/ia64/kernel/machine_kexec.c7
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/kernel/setup.c33
-rw-r--r--arch/ia64/kernel/smp.c4
-rw-r--r--arch/ia64/kernel/traps.c50
-rw-r--r--arch/ia64/mm/contig.c9
-rw-r--r--arch/ia64/mm/init.c9
-rw-r--r--arch/ia64/sn/kernel/setup.c12
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c2
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c15
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c64
-rw-r--r--arch/m68k/mm/kmap.c2
-rw-r--r--arch/mips/configs/malta_defconfig80
-rw-r--r--arch/mips/kernel/apm.c2
-rw-r--r--arch/mips/kernel/kspd.c2
-rw-r--r--arch/mips/kernel/reset.c2
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/lasat/sysctl.c17
-rw-r--r--arch/mips/lib/csum_partial_copy.c3
-rw-r--r--arch/mips/mips-boards/malta/Makefile2
-rw-r--r--arch/mips/mips-boards/malta/malta_setup.c39
-rw-r--r--arch/mips/mm/cache.c1
-rw-r--r--arch/mips/mm/init.c17
-rw-r--r--arch/parisc/hpux/sys_hpux.c2
-rw-r--r--arch/parisc/kernel/unwind.c2
-rw-r--r--arch/powerpc/Kconfig8
-rw-r--r--arch/powerpc/configs/ps3_defconfig1
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/cputable.c5
-rw-r--r--arch/powerpc/kernel/head_32.S7
-rw-r--r--arch/powerpc/kernel/module_32.c23
-rw-r--r--arch/powerpc/kernel/module_64.c23
-rw-r--r--arch/powerpc/kernel/nvram_64.c4
-rw-r--r--arch/powerpc/kernel/of_device.c4
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/kernel/pci_32.c145
-rw-r--r--arch/powerpc/kernel/pci_64.c42
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c2
-rw-r--r--arch/powerpc/kernel/prom.c55
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/rtas.c35
-rw-r--r--arch/powerpc/kernel/sysfs.c16
-rw-r--r--arch/powerpc/kernel/traps.c56
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S6
-rw-r--r--arch/powerpc/mm/imalloc.c6
-rw-r--r--arch/powerpc/mm/numa.c65
-rw-r--r--arch/powerpc/platforms/4xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/52xx/lite5200.c2
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c2
-rw-r--r--arch/powerpc/platforms/cell/pmu.c5
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/maple/pci.c2
-rw-r--r--arch/powerpc/platforms/maple/setup.c12
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig11
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c13
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c275
-rw-r--r--arch/powerpc/platforms/pseries/setup.c30
-rw-r--r--arch/powerpc/platforms/pseries/smp.c200
-rw-r--r--arch/powerpc/sysdev/Makefile3
-rw-r--r--arch/powerpc/sysdev/dcr.S39
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c40
-rw-r--r--arch/powerpc/sysdev/rom.c1
-rw-r--r--arch/powerpc/xmon/xmon.c10
-rw-r--r--arch/ppc/8260_io/fcc_enet.c4
-rw-r--r--arch/ppc/8xx_io/cs4218_tdm.c2
-rw-r--r--arch/ppc/Kconfig4
-rw-r--r--arch/ppc/kernel/pci.c41
-rw-r--r--arch/ppc/platforms/4xx/Kconfig2
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/kernel/debug.c8
-rw-r--r--arch/s390/kernel/s390_ext.c2
-rw-r--r--arch/sh/Kconfig14
-rw-r--r--arch/sh/Kconfig.debug3
-rw-r--r--arch/sh/Makefile7
-rw-r--r--arch/sh/boards/landisk/irq.c4
-rw-r--r--arch/sh/boards/se/7206/irq.c16
-rw-r--r--arch/sh/boards/se/7619/Makefile2
-rw-r--r--arch/sh/boards/se/7619/io.c102
-rw-r--r--arch/sh/boards/se/7619/setup.c21
-rw-r--r--arch/sh/boot/Makefile40
-rw-r--r--arch/sh/boot/compressed/Makefile6
-rw-r--r--arch/sh/boot/compressed/head.S3
-rw-r--r--arch/sh/boot/compressed/misc.c3
-rw-r--r--arch/sh/configs/landisk_defconfig85
-rw-r--r--arch/sh/configs/se7206_defconfig142
-rw-r--r--arch/sh/configs/se7619_defconfig744
-rw-r--r--arch/sh/drivers/push-switch.c13
-rw-r--r--arch/sh/kernel/cpu/Makefile1
-rw-r--r--arch/sh/kernel/cpu/sh2/entry.S32
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c41
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c62
-rw-r--r--arch/sh/kernel/cpu/sh4/Makefile9
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c9
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c31
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c7
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile19
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh73180.c (renamed from arch/sh/kernel/cpu/sh4/clock-sh73180.c)0
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7343.c99
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7770.c (renamed from arch/sh/kernel/cpu/sh4/clock-sh7770.c)0
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7780.c (renamed from arch/sh/kernel/cpu/sh4/clock-sh7780.c)0
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh73180.c (renamed from arch/sh/kernel/cpu/sh4/setup-sh73180.c)0
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7343.c (renamed from arch/sh/kernel/cpu/sh4/setup-sh7343.c)0
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c80
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7770.c (renamed from arch/sh/kernel/cpu/sh4/setup-sh7770.c)0
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c (renamed from arch/sh/kernel/cpu/sh4/setup-sh7780.c)0
-rw-r--r--arch/sh/kernel/early_printk.c20
-rw-r--r--arch/sh/kernel/entry-common.S15
-rw-r--r--arch/sh/kernel/head.S3
-rw-r--r--arch/sh/kernel/process.c15
-rw-r--r--arch/sh/kernel/setup.c41
-rw-r--r--arch/sh/kernel/sh_ksyms.c15
-rw-r--r--arch/sh/kernel/signal.c2
-rw-r--r--arch/sh/kernel/sys_sh.c8
-rw-r--r--arch/sh/kernel/traps.c35
-rw-r--r--arch/sh/kernel/vmlinux.lds.S2
-rw-r--r--arch/sh/mm/Kconfig12
-rw-r--r--arch/sh/mm/cache-sh4.c2
-rw-r--r--arch/sh/mm/init.c2
-rw-r--r--arch/sparc/kernel/ioport.c6
-rw-r--r--arch/sparc/kernel/irq.c4
-rw-r--r--arch/sparc/kernel/of_device.c3
-rw-r--r--arch/sparc/kernel/ptrace.c5
-rw-r--r--arch/sparc/kernel/sun4d_irq.c9
-rw-r--r--arch/sparc/mm/io-unit.c8
-rw-r--r--arch/sparc64/Kconfig8
-rw-r--r--arch/sparc64/Kconfig.debug4
-rw-r--r--arch/sparc64/defconfig54
-rw-r--r--arch/sparc64/kernel/Makefile1
-rw-r--r--arch/sparc64/kernel/chmc.c3
-rw-r--r--arch/sparc64/kernel/entry.S27
-rw-r--r--arch/sparc64/kernel/head.S8
-rw-r--r--arch/sparc64/kernel/isa.c12
-rw-r--r--arch/sparc64/kernel/kprobes.c91
-rw-r--r--arch/sparc64/kernel/of_device.c3
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c16
-rw-r--r--arch/sparc64/kernel/ptrace.c5
-rw-r--r--arch/sparc64/kernel/rtrap.S23
-rw-r--r--arch/sparc64/kernel/stacktrace.c41
-rw-r--r--arch/sparc64/kernel/sun4v_ivec.S20
-rw-r--r--arch/sparc64/kernel/sys_sunos32.c4
-rw-r--r--arch/sparc64/kernel/traps.c30
-rw-r--r--arch/sparc64/kernel/unaligned.c44
-rw-r--r--arch/sparc64/kernel/visemul.c6
-rw-r--r--arch/sparc64/mm/ultra.S8
-rw-r--r--arch/um/drivers/net_kern.c18
-rw-r--r--arch/um/include/net_kern.h2
-rw-r--r--arch/um/sys-i386/ldt.c4
-rw-r--r--arch/v850/Kconfig28
-rw-r--r--arch/x86_64/Kconfig2
-rw-r--r--arch/x86_64/kernel/cpufreq/Kconfig6
-rw-r--r--arch/x86_64/kernel/cpufreq/Makefile2
-rw-r--r--arch/x86_64/kernel/vsyscall.c3
-rw-r--r--arch/xtensa/Kconfig21
-rw-r--r--arch/xtensa/Makefile25
-rw-r--r--arch/xtensa/boot/boot-elf/bootstrap.S3
-rw-r--r--arch/xtensa/boot/boot-redboot/bootstrap.S37
-rw-r--r--arch/xtensa/configs/iss_defconfig6
-rw-r--r--arch/xtensa/kernel/Makefile2
-rw-r--r--arch/xtensa/kernel/align.S42
-rw-r--r--arch/xtensa/kernel/asm-offsets.c5
-rw-r--r--arch/xtensa/kernel/coprocessor.S2
-rw-r--r--arch/xtensa/kernel/entry.S256
-rw-r--r--arch/xtensa/kernel/head.S53
-rw-r--r--arch/xtensa/kernel/irq.c107
-rw-r--r--arch/xtensa/kernel/pci-dma.c44
-rw-r--r--arch/xtensa/kernel/process.c108
-rw-r--r--arch/xtensa/kernel/ptrace.c28
-rw-r--r--arch/xtensa/kernel/setup.c41
-rw-r--r--arch/xtensa/kernel/signal.c28
-rw-r--r--arch/xtensa/kernel/syscall.c95
-rw-r--r--arch/xtensa/kernel/syscalls.c288
-rw-r--r--arch/xtensa/kernel/syscalls.h247
-rw-r--r--arch/xtensa/kernel/time.c8
-rw-r--r--arch/xtensa/kernel/traps.c56
-rw-r--r--arch/xtensa/kernel/vectors.S12
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S26
-rw-r--r--arch/xtensa/lib/checksum.S3
-rw-r--r--arch/xtensa/lib/memcopy.S2
-rw-r--r--arch/xtensa/lib/memset.S2
-rw-r--r--arch/xtensa/lib/strncpy_user.S2
-rw-r--r--arch/xtensa/lib/strnlen_user.S2
-rw-r--r--arch/xtensa/lib/usercopy.S2
-rw-r--r--arch/xtensa/mm/fault.c10
-rw-r--r--arch/xtensa/mm/init.c6
-rw-r--r--arch/xtensa/mm/misc.S265
-rw-r--r--arch/xtensa/mm/tlb.c445
-rw-r--r--arch/xtensa/platform-iss/console.c8
-rw-r--r--arch/xtensa/platform-iss/network.c2
-rw-r--r--block/ll_rw_blk.c19
-rw-r--r--block/scsi_ioctl.c10
-rw-r--r--crypto/blkcipher.c1
-rw-r--r--crypto/sha512.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acorn/block/fd1772.c4
-rw-r--r--drivers/acorn/char/i2c.c2
-rw-r--r--drivers/ata/pdc_adma.c4
-rw-r--r--drivers/atm/.gitignore2
-rw-r--r--drivers/atm/eni.c4
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/atm/nicstar.c4
-rw-r--r--drivers/atm/zatm.c4
-rw-r--r--drivers/base/dmapool.c2
-rw-r--r--drivers/block/Kconfig7
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/cciss.c23
-rw-r--r--drivers/block/cpqarray.c10
-rw-r--r--drivers/block/swim_iop.c578
-rw-r--r--drivers/cdrom/cdrom.c8
-rw-r--r--drivers/cdrom/cm206.c2
-rw-r--r--drivers/char/Kconfig10
-rw-r--r--drivers/char/agp/Kconfig4
-rw-r--r--drivers/char/agp/generic.c25
-rw-r--r--drivers/char/consolemap.c2
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c4
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c12
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c6
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c132
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c18
-rw-r--r--drivers/char/lcd.c2
-rw-r--r--drivers/char/lp.c2
-rw-r--r--drivers/char/mem.c12
-rw-r--r--drivers/char/mxser_new.c8
-rw-r--r--drivers/char/n_r3964.c37
-rw-r--r--drivers/char/n_tty.c1
-rw-r--r--drivers/char/pcmcia/synclink_cs.c2
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/char/rio/riocmd.c2
-rw-r--r--drivers/char/rtc.c38
-rw-r--r--drivers/char/sx.c8
-rw-r--r--drivers/char/synclink.c2
-rw-r--r--drivers/char/synclinkmp.c4
-rw-r--r--drivers/char/sysrq.c37
-rw-r--r--drivers/char/tty_io.c8
-rw-r--r--drivers/char/viocons.c10
-rw-r--r--drivers/char/vt.c2
-rw-r--r--drivers/char/vt_ioctl.c10
-rw-r--r--drivers/char/watchdog/at91rm9200_wdt.c6
-rw-r--r--drivers/char/watchdog/mpcore_wdt.c2
-rw-r--r--drivers/char/watchdog/omap_wdt.c2
-rw-r--r--drivers/char/watchdog/pcwd_usb.c5
-rw-r--r--drivers/char/watchdog/rm9k_wdt.c44
-rw-r--r--drivers/clocksource/acpi_pm.c6
-rw-r--r--drivers/cpufreq/cpufreq.c153
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c33
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c33
-rw-r--r--drivers/cpufreq/cpufreq_performance.c9
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c9
-rw-r--r--drivers/cpufreq/cpufreq_stats.c11
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c11
-rw-r--r--drivers/cpufreq/freq_table.c28
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/fc4/fc.c10
-rw-r--r--drivers/i2c/algos/Kconfig11
-rw-r--r--drivers/i2c/algos/Makefile1
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c8
-rw-r--r--drivers/i2c/algos/i2c-algo-ite.c806
-rw-r--r--drivers/i2c/algos/i2c-algo-ite.h117
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c7
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c8
-rw-r--r--drivers/i2c/algos/i2c-algo-sgi.c8
-rw-r--r--drivers/i2c/busses/Kconfig49
-rw-r--r--drivers/i2c/busses/Makefile4
-rw-r--r--drivers/i2c/busses/i2c-at91.c325
-rw-r--r--drivers/i2c/busses/i2c-elektor.c2
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c16
-rw-r--r--drivers/i2c/busses/i2c-i810.c6
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c9
-rw-r--r--drivers/i2c/busses/i2c-ite.c278
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c2
-rw-r--r--drivers/i2c/busses/i2c-ixp4xx.c2
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c89
-rw-r--r--drivers/i2c/busses/i2c-omap.c4
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c2
-rw-r--r--drivers/i2c/busses/i2c-parport.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c2
-rw-r--r--drivers/i2c/busses/i2c-pnx.c708
-rw-r--r--drivers/i2c/busses/i2c-prosavage.c2
-rw-r--r--drivers/i2c/busses/i2c-savage4.c2
-rw-r--r--drivers/i2c/busses/i2c-versatile.c153
-rw-r--r--drivers/i2c/busses/i2c-via.c2
-rw-r--r--drivers/i2c/busses/i2c-voodoo3.c6
-rw-r--r--drivers/i2c/busses/scx200_i2c.c2
-rw-r--r--drivers/i2c/chips/ds1337.c8
-rw-r--r--drivers/i2c/chips/tps65010.c21
-rw-r--r--drivers/i2c/i2c-core.c67
-rw-r--r--drivers/i2c/i2c-dev.c44
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/ide/ide-cd.c7
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/ide/ide-tape.c8
-rw-r--r--drivers/ide/pci/hpt366.c886
-rw-r--r--drivers/ide/pci/pdc202xx_new.c483
-rw-r--r--drivers/ide/pci/piix.c4
-rw-r--r--drivers/ide/setup-pci.c4
-rw-r--r--drivers/ieee1394/pcilynx.c2
-rw-r--r--drivers/infiniband/core/Makefile6
-rw-r--r--drivers/infiniband/core/cm.c4
-rw-r--r--drivers/infiniband/core/cma.c416
-rw-r--r--drivers/infiniband/core/fmr_pool.c12
-rw-r--r--drivers/infiniband/core/mad.c90
-rw-r--r--drivers/infiniband/core/mad_priv.h6
-rw-r--r--drivers/infiniband/core/ucma.c874
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c5
-rw-r--r--drivers/infiniband/core/uverbs_mem.c12
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c13
-rw-r--r--drivers/infiniband/hw/ipath/Makefile1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_dma.c189
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c75
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c3
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c125
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c81
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h2
-rw-r--r--drivers/input/keyboard/hilkbd.c5
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/isdn/act2000/act2000_isa.c2
-rw-r--r--drivers/isdn/capi/capidrv.c2
-rw-r--r--drivers/isdn/divert/divert_procfs.c2
-rw-r--r--drivers/isdn/divert/isdn_divert.c6
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c118
-rw-r--r--drivers/isdn/hisax/Kconfig10
-rw-r--r--drivers/isdn/hysdn/hysdn_procconf.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c2
-rw-r--r--drivers/isdn/i4l/isdn_audio.c6
-rw-r--r--drivers/isdn/i4l/isdn_net.c2
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c2
-rw-r--r--drivers/isdn/pcbit/layer2.c2
-rw-r--r--drivers/kvm/Kconfig37
-rw-r--r--drivers/kvm/Makefile10
-rw-r--r--drivers/kvm/kvm.h551
-rw-r--r--drivers/kvm/kvm_main.c1917
-rw-r--r--drivers/kvm/kvm_svm.h44
-rw-r--r--drivers/kvm/kvm_vmx.h14
-rw-r--r--drivers/kvm/mmu.c686
-rw-r--r--drivers/kvm/paging_tmpl.h391
-rw-r--r--drivers/kvm/segment_descriptor.h17
-rw-r--r--drivers/kvm/svm.c1641
-rw-r--r--drivers/kvm/svm.h315
-rw-r--r--drivers/kvm/vmx.c2014
-rw-r--r--drivers/kvm/vmx.h296
-rw-r--r--drivers/kvm/x86_emulate.c1409
-rw-r--r--drivers/kvm/x86_emulate.h185
-rw-r--r--drivers/leds/Kconfig22
-rw-r--r--drivers/macintosh/adb.c2
-rw-r--r--drivers/macintosh/apm_emu.c2
-rw-r--r--drivers/macintosh/smu.c2
-rw-r--r--drivers/macintosh/via-pmu68k.c2
-rw-r--r--drivers/md/Kconfig1
-rw-r--r--drivers/md/faulty.c2
-rw-r--r--drivers/md/md.c17
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c367
-rw-r--r--drivers/media/Kconfig2
-rw-r--r--drivers/media/common/ir-keymaps.c55
-rw-r--r--drivers/media/common/saa7146_i2c.c16
-rw-r--r--drivers/media/dvb/b2c2/Kconfig1
-rw-r--r--drivers/media/dvb/b2c2/flexcop-fe-tuner.c10
-rw-r--r--drivers/media/dvb/bt8xx/Kconfig2
-rw-r--r--drivers/media/dvb/bt8xx/dst_ca.c2
-rw-r--r--drivers/media/dvb/bt8xx/dvb-bt8xx.c11
-rw-r--r--drivers/media/dvb/bt8xx/dvb-bt8xx.h2
-rw-r--r--drivers/media/dvb/cinergyT2/cinergyT2.c13
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig14
-rw-r--r--drivers/media/dvb/dvb-usb/Makefile3
-rw-r--r--drivers/media/dvb/dvb-usb/a800.c36
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c271
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700.h5
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c40
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c200
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mb.c113
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mc.c26
-rw-r--r--drivers/media/dvb/dvb-usb/digitv.c22
-rw-r--r--drivers/media/dvb/dvb-usb/dtt200u.c24
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h14
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-remote.c37
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.c22
-rw-r--r--drivers/media/dvb/dvb-usb/nova-t-usb2.c34
-rw-r--r--drivers/media/dvb/dvb-usb/ttusb2.c270
-rw-r--r--drivers/media/dvb/dvb-usb/ttusb2.h70
-rw-r--r--drivers/media/dvb/dvb-usb/umt-010.c24
-rw-r--r--drivers/media/dvb/dvb-usb/vp702x.c20
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c40
-rw-r--r--drivers/media/dvb/frontends/Kconfig24
-rw-r--r--drivers/media/dvb/frontends/Makefile3
-rw-r--r--drivers/media/dvb/frontends/dib3000mc.c7
-rw-r--r--drivers/media/dvb/frontends/dib7000m.c1191
-rw-r--r--drivers/media/dvb/frontends/dib7000m.h51
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c1019
-rw-r--r--drivers/media/dvb/frontends/dib7000p.h46
-rw-r--r--drivers/media/dvb/frontends/dibx000_common.h13
-rw-r--r--drivers/media/dvb/frontends/dvb-pll.c67
-rw-r--r--drivers/media/dvb/frontends/dvb-pll.h7
-rw-r--r--drivers/media/dvb/frontends/lg_h06xf.h64
-rw-r--r--drivers/media/dvb/frontends/lgdt330x.c257
-rw-r--r--drivers/media/dvb/frontends/lgdt330x_priv.h15
-rw-r--r--drivers/media/dvb/frontends/lgh06xf.c134
-rw-r--r--drivers/media/dvb/frontends/lgh06xf.h35
-rw-r--r--drivers/media/dvb/frontends/or51132.c176
-rw-r--r--drivers/media/dvb/frontends/or51211.c124
-rw-r--r--drivers/media/dvb/frontends/tda1004x.c10
-rw-r--r--drivers/media/dvb/frontends/tda1004x.h5
-rw-r--r--drivers/media/dvb/frontends/tda8083.c30
-rw-r--r--drivers/media/dvb/frontends/tda826x.c12
-rw-r--r--drivers/media/dvb/frontends/tua6100.c3
-rw-r--r--drivers/media/dvb/pluto2/pluto2.c8
-rw-r--r--drivers/media/dvb/ttpci/Kconfig1
-rw-r--r--drivers/media/dvb/ttpci/av7110.c2
-rw-r--r--drivers/media/dvb/ttpci/av7110_ir.c25
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c26
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c334
-rw-r--r--drivers/media/dvb/ttpci/budget.c2
-rw-r--r--drivers/media/dvb/ttusb-dec/ttusb_dec.c11
-rw-r--r--drivers/media/dvb/ttusb-dec/ttusbdecfe.c4
-rw-r--r--drivers/media/video/Kconfig31
-rw-r--r--drivers/media/video/Makefile6
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c6
-rw-r--r--drivers/media/video/bt8xx/bttv-input.c101
-rw-r--r--drivers/media/video/cafe_ccic-regs.h160
-rw-r--r--drivers/media/video/cafe_ccic.c2228
-rw-r--r--drivers/media/video/cx88/Kconfig1
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c179
-rw-r--r--drivers/media/video/cx88/cx88-cards.c86
-rw-r--r--drivers/media/video/cx88/cx88-core.c2
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c333
-rw-r--r--drivers/media/video/cx88/cx88-input.c77
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c348
-rw-r--r--drivers/media/video/cx88/cx88-tvaudio.c13
-rw-r--r--drivers/media/video/cx88/cx88-video.c32
-rw-r--r--drivers/media/video/cx88/cx88-vp3054-i2c.c2
-rw-r--r--drivers/media/video/cx88/cx88.h47
-rw-r--r--drivers/media/video/dabusb.c4
-rw-r--r--drivers/media/video/ir-kbd-i2c.c46
-rw-r--r--drivers/media/video/mxb.c8
-rw-r--r--drivers/media/video/ov7670.c1333
-rw-r--r--drivers/media/video/planb.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c16
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c26
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.c81
-rw-r--r--drivers/media/video/saa7115.c18
-rw-r--r--drivers/media/video/saa7134/saa7134-alsa.c63
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c222
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c11
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c222
-rw-r--r--drivers/media/video/saa7134/saa7134-i2c.c1
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c76
-rw-r--r--drivers/media/video/saa7134/saa7134.h8
-rw-r--r--drivers/media/video/stv680.c21
-rw-r--r--drivers/media/video/tda9887.c6
-rw-r--r--drivers/media/video/tuner-core.c4
-rw-r--r--drivers/media/video/tuner-simple.c4
-rw-r--r--drivers/media/video/tuner-types.c15
-rw-r--r--drivers/media/video/tveeprom.c9
-rw-r--r--drivers/media/video/usbvideo/quickcam_messenger.c9
-rw-r--r--drivers/media/video/usbvideo/usbvideo.c2
-rw-r--r--drivers/media/video/usbvision/Kconfig12
-rw-r--r--drivers/media/video/usbvision/Makefile5
-rw-r--r--drivers/media/video/usbvision/usbvision-cards.c157
-rw-r--r--drivers/media/video/usbvision/usbvision-core.c2554
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c571
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c2051
-rw-r--r--drivers/media/video/usbvision/usbvision.h558
-rw-r--r--drivers/media/video/v4l1-compat.c18
-rw-r--r--drivers/media/video/v4l2-common.c85
-rw-r--r--drivers/media/video/videocodec.c2
-rw-r--r--drivers/media/video/videodev.c173
-rw-r--r--drivers/media/video/vino.c2
-rw-r--r--drivers/media/video/vivi.c16
-rw-r--r--drivers/media/video/zoran_card.c2
-rw-r--r--drivers/media/video/zr36120.c2079
-rw-r--r--drivers/media/video/zr36120.h279
-rw-r--r--drivers/media/video/zr36120_i2c.c132
-rw-r--r--drivers/media/video/zr36120_mem.c78
-rw-r--r--drivers/media/video/zr36120_mem.h3
-rw-r--r--drivers/message/i2o/core.h4
-rw-r--r--drivers/message/i2o/driver.c2
-rw-r--r--drivers/message/i2o/exec-osm.c2
-rw-r--r--drivers/message/i2o/i2o_config.c4
-rw-r--r--drivers/mmc/at91_mci.c346
-rw-r--r--drivers/mmc/mmc_queue.c4
-rw-r--r--drivers/mmc/sdhci.c4
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c2
-rw-r--r--drivers/mtd/rfd_ftl.c2
-rw-r--r--drivers/net/8139too.c5
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/appletalk/ipddp.c2
-rw-r--r--drivers/net/bsd_comp.c2
-rw-r--r--drivers/net/chelsio/cxgb2.c23
-rw-r--r--drivers/net/chelsio/sge.c115
-rw-r--r--drivers/net/chelsio/sge.h4
-rw-r--r--drivers/net/e100.c3
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/irda/irport.c2
-rw-r--r--drivers/net/lp486e.c4
-rw-r--r--drivers/net/macb.c8
-rw-r--r--drivers/net/macb.h6
-rw-r--r--drivers/net/myri10ge/myri10ge.c498
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/ppp_deflate.c4
-rw-r--r--drivers/net/ppp_mppe.c2
-rw-r--r--drivers/net/skge.c2
-rw-r--r--drivers/net/slip.c6
-rw-r--r--drivers/net/smc91x.h90
-rw-r--r--drivers/net/ucc_geth.c12
-rw-r--r--drivers/net/wan/Kconfig5
-rw-r--r--drivers/net/wan/hostess_sv11.c2
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/x25_asy.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_download.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/ipw2100.c2
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/wavelan_cs.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c2
-rw-r--r--drivers/parisc/iosapic.c2
-rw-r--r--drivers/parport/Kconfig6
-rw-r--r--drivers/pci/hotplug/Kconfig3
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c8
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pcmcia/at91_cf.c2
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pnp/isapnp/core.c22
-rw-r--r--drivers/pnp/pnpacpi/core.c6
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c22
-rw-r--r--drivers/pnp/pnpbios/core.c16
-rw-r--r--drivers/pnp/pnpbios/proc.c8
-rw-r--r--drivers/pnp/pnpbios/rsparser.c16
-rw-r--r--drivers/ps3/Makefile1
-rw-r--r--drivers/ps3/vuart.c965
-rw-r--r--drivers/ps3/vuart.h94
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c (renamed from drivers/rtc/rtc-at91.c)5
-rw-r--r--drivers/rtc/rtc-dev.c2
-rw-r--r--drivers/rtc/rtc-ds1672.c2
-rw-r--r--drivers/rtc/rtc-lib.c81
-rw-r--r--drivers/rtc/rtc-omap.c3
-rw-r--r--drivers/rtc/rtc-pcf8563.c6
-rw-r--r--drivers/rtc/rtc-proc.c4
-rw-r--r--drivers/rtc/rtc-rs5c372.c2
-rw-r--r--drivers/rtc/rtc-s3c.c6
-rw-r--r--drivers/rtc/rtc-sa1100.c4
-rw-r--r--drivers/rtc/rtc-sh.c245
-rw-r--r--drivers/rtc/rtc-sysfs.c2
-rw-r--r--drivers/rtc/rtc-x1205.c10
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/s390/char/keyboard.c2
-rw-r--r--drivers/s390/char/sclp_cpi.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c4
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c4
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c2
-rw-r--r--drivers/s390/net/ctcmain.c6
-rw-r--r--drivers/s390/net/iucv.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/sbus/char/vfc_dev.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aacraid/comminit.c2
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aic7xxx_old.c2
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/dpt_i2o.c10
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/pluto.c2
-rw-r--r--drivers/scsi/sr_ioctl.c2
-rw-r--r--drivers/scsi/sr_vendor.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/serial/Kconfig57
-rw-r--r--drivers/serial/icom.c2
-rw-r--r--drivers/serial/sh-sci.c22
-rw-r--r--drivers/serial/sh-sci.h19
-rw-r--r--drivers/spi/pxa2xx_spi.c733
-rw-r--r--drivers/usb/gadget/at91_udc.c2
-rw-r--r--drivers/usb/gadget/serial.c2
-rw-r--r--drivers/usb/host/hc_crisv10.c2
-rw-r--r--drivers/usb/misc/auerswald.c4
-rw-r--r--drivers/usb/misc/uss720.c2
-rw-r--r--drivers/usb/net/rndis_host.c2
-rw-r--r--drivers/usb/serial/cypress_m8.c2
-rw-r--r--drivers/usb/serial/digi_acceleport.c4
-rw-r--r--drivers/usb/serial/io_ti.c2
-rw-r--r--drivers/usb/serial/ipaq.c2
-rw-r--r--drivers/usb/serial/kobil_sct.c4
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/whiteheat.c4
-rw-r--r--drivers/usb/storage/sddr09.c2
-rw-r--r--drivers/video/Kconfig10
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/amba-clcd.c2
-rw-r--r--drivers/video/amifb.c8
-rw-r--r--drivers/video/aty/atyfb_base.c4
-rw-r--r--drivers/video/aty/radeon_i2c.c8
-rw-r--r--drivers/video/gxt4500.c741
-rw-r--r--drivers/video/i810/i810-i2c.c6
-rw-r--r--drivers/video/intelfb/intelfb_i2c.c4
-rw-r--r--drivers/video/matrox/i2c-matroxfb.c4
-rw-r--r--drivers/video/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c2
-rw-r--r--drivers/video/nvidia/nv_i2c.c6
-rw-r--r--drivers/video/riva/rivafb-i2c.c6
-rw-r--r--drivers/video/savage/savagefb-i2c.c2
-rw-r--r--drivers/video/sstfb.c335
-rw-r--r--drivers/w1/slaves/Kconfig4
-rw-r--r--fs/Kconfig14
-rw-r--r--fs/aio.c2
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/befs/btree.c2
-rw-r--r--fs/befs/debug.c6
-rw-r--r--fs/bfs/inode.c4
-rw-r--r--fs/binfmt_elf_fdpic.c3
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/bio.c2
-rw-r--r--fs/block_dev.c202
-rw-r--r--fs/buffer.c33
-rw-r--r--fs/cifs/cifssmb.c2
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/compat.c10
-rw-r--r--fs/direct-io.c323
-rw-r--r--fs/exec.c2
-rw-r--r--fs/fcntl.c5
-rw-r--r--fs/file.c255
-rw-r--r--fs/inode.c43
-rw-r--r--fs/jbd/transaction.c2
-rw-r--r--fs/jffs/inode-v23.c4
-rw-r--r--fs/jffs/intrep.c14
-rw-r--r--fs/jfs/jfs_dtree.c4
-rw-r--r--fs/jfs/jfs_filsys.h42
-rw-r--r--fs/jfs/jfs_imap.c2
-rw-r--r--fs/lockd/clntlock.c10
-rw-r--r--fs/lockd/clntproc.c39
-rw-r--r--fs/lockd/svclock.c4
-rw-r--r--fs/lockd/svcshare.c2
-rw-r--r--fs/lockd/xdr.c8
-rw-r--r--fs/lockd/xdr4.c8
-rw-r--r--fs/namespace.c5
-rw-r--r--fs/ncpfs/inode.c34
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfsd/export.c33
-rw-r--r--fs/nfsd/lockd.c2
-rw-r--r--fs/nfsd/nfs4proc.c625
-rw-r--r--fs/nfsd/nfs4state.c91
-rw-r--r--fs/nfsd/nfs4xdr.c14
-rw-r--r--fs/nfsd/nfsfh.c6
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c4
-rw-r--r--fs/ocfs2/cluster/nodemanager.c198
-rw-r--r--fs/ocfs2/cluster/nodemanager.h17
-rw-r--r--fs/ocfs2/cluster/tcp.c162
-rw-r--r--fs/ocfs2/cluster/tcp.h8
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h15
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c4
-rw-r--r--fs/ocfs2/dlm/dlmlock.c4
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c6
-rw-r--r--fs/ocfs2/dlmglue.c79
-rw-r--r--fs/ocfs2/file.c8
-rw-r--r--fs/ocfs2/heartbeat.c9
-rw-r--r--fs/ocfs2/inode.c3
-rw-r--r--fs/ocfs2/journal.c46
-rw-r--r--fs/ocfs2/journal.h5
-rw-r--r--fs/ocfs2/localalloc.c2
-rw-r--r--fs/ocfs2/mmap.c6
-rw-r--r--fs/ocfs2/namei.c8
-rw-r--r--fs/ocfs2/ocfs2.h5
-rw-r--r--fs/ocfs2/ocfs2_fs.h14
-rw-r--r--fs/ocfs2/slot_map.c2
-rw-r--r--fs/ocfs2/suballoc.c6
-rw-r--r--fs/ocfs2/super.c96
-rw-r--r--fs/ocfs2/vote.c7
-rw-r--r--fs/open.c3
-rw-r--r--fs/pipe.c12
-rw-r--r--fs/proc/base.c24
-rw-r--r--fs/proc/proc_misc.c12
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/reiserfs/xattr_acl.c2
-rw-r--r--fs/select.c10
-rw-r--r--fs/smbfs/inode.c5
-rw-r--r--fs/smbfs/proc.c6
-rw-r--r--fs/smbfs/smbiod.c5
-rw-r--r--fs/splice.c8
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c2
-rw-r--r--include/asm-arm/arch-pnx4008/i2c.h67
-rw-r--r--include/asm-arm/arch-pxa/pxa2xx_spi.h5
-rw-r--r--include/asm-arm/thread_info.h2
-rw-r--r--include/asm-avr32/arch-at32ap/at32ap7000.h33
-rw-r--r--include/asm-avr32/arch-at32ap/board.h3
-rw-r--r--include/asm-avr32/arch-at32ap/portmux.h20
-rw-r--r--include/asm-avr32/dma-mapping.h12
-rw-r--r--include/asm-avr32/pgalloc.h2
-rw-r--r--include/asm-frv/thread_info.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/asm-i386/msr.h5
-rw-r--r--include/asm-i386/thread_info.h2
-rw-r--r--include/asm-i386/topology.h1
-rw-r--r--include/asm-ia64/break.h4
-rw-r--r--include/asm-ia64/kprobes.h1
-rw-r--r--include/asm-ia64/sn/xpc.h2
-rw-r--r--include/asm-ia64/thread_info.h2
-rw-r--r--include/asm-ia64/topology.h1
-rw-r--r--include/asm-m68k/swim_iop.h221
-rw-r--r--include/asm-mips/compat.h1
-rw-r--r--include/asm-mips/mach-ip27/irq.h2
-rw-r--r--include/asm-mips/mach-ip27/topology.h1
-rw-r--r--include/asm-mips/pci.h6
-rw-r--r--include/asm-mips/ptrace.h8
-rw-r--r--include/asm-mips/sn/arch.h1
-rw-r--r--include/asm-mips/sn/klconfig.h2
-rw-r--r--include/asm-mips/system.h9
-rw-r--r--include/asm-powerpc/Kbuild2
-rw-r--r--include/asm-powerpc/bitops.h2
-rw-r--r--include/asm-powerpc/bug.h80
-rw-r--r--include/asm-powerpc/cputable.h16
-rw-r--r--include/asm-powerpc/dcr-native.h37
-rw-r--r--include/asm-powerpc/dcr.h2
-rw-r--r--include/asm-powerpc/hw_irq.h19
-rw-r--r--include/asm-powerpc/module.h2
-rw-r--r--include/asm-powerpc/pci-bridge.h4
-rw-r--r--include/asm-powerpc/pci.h33
-rw-r--r--include/asm-powerpc/reg.h2
-rw-r--r--include/asm-powerpc/rtas.h3
-rw-r--r--include/asm-powerpc/thread_info.h2
-rw-r--r--include/asm-powerpc/topology.h1
-rw-r--r--include/asm-ppc/pci-bridge.h8
-rw-r--r--include/asm-ppc/pci.h23
-rw-r--r--include/asm-ppc/reg_booke.h36
-rw-r--r--include/asm-sh/atomic-irq.h71
-rw-r--r--include/asm-sh/atomic-llsc.h107
-rw-r--r--include/asm-sh/atomic.h153
-rw-r--r--include/asm-sh/bug.h53
-rw-r--r--include/asm-sh/bugs.h12
-rw-r--r--include/asm-sh/checksum.h69
-rw-r--r--include/asm-sh/cpu-sh4/cache.h2
-rw-r--r--include/asm-sh/cpu-sh4/freq.h2
-rw-r--r--include/asm-sh/dma-mapping.h10
-rw-r--r--include/asm-sh/irq.h5
-rw-r--r--include/asm-sh/pgtable.h47
-rw-r--r--include/asm-sh/processor.h8
-rw-r--r--include/asm-sh/push-switch.h3
-rw-r--r--include/asm-sh/thread_info.h2
-rw-r--r--include/asm-sh64/pgalloc.h2
-rw-r--r--include/asm-sparc64/dma.h6
-rw-r--r--include/asm-sparc64/irqflags.h89
-rw-r--r--include/asm-sparc64/kprobes.h11
-rw-r--r--include/asm-sparc64/rwsem.h32
-rw-r--r--include/asm-sparc64/system.h49
-rw-r--r--include/asm-sparc64/ttable.h45
-rw-r--r--include/asm-x86_64/msr.h4
-rw-r--r--include/asm-x86_64/thread_info.h2
-rw-r--r--include/asm-x86_64/topology.h1
-rw-r--r--include/asm-xtensa/asmmacro.h153
-rw-r--r--include/asm-xtensa/bug.h25
-rw-r--r--include/asm-xtensa/byteorder.h5
-rw-r--r--include/asm-xtensa/cache.h20
-rw-r--r--include/asm-xtensa/cacheasm.h177
-rw-r--r--include/asm-xtensa/cacheflush.h2
-rw-r--r--include/asm-xtensa/checksum.h2
-rw-r--r--include/asm-xtensa/coprocessor.h13
-rw-r--r--include/asm-xtensa/dma.h5
-rw-r--r--include/asm-xtensa/elf.h9
-rw-r--r--include/asm-xtensa/fcntl.h98
-rw-r--r--include/asm-xtensa/fixmap.h252
-rw-r--r--include/asm-xtensa/io.h64
-rw-r--r--include/asm-xtensa/irq.h8
-rw-r--r--include/asm-xtensa/irq_regs.h1
-rw-r--r--include/asm-xtensa/mmu_context.h269
-rw-r--r--include/asm-xtensa/page.h10
-rw-r--r--include/asm-xtensa/param.h2
-rw-r--r--include/asm-xtensa/pgtable.h41
-rw-r--r--include/asm-xtensa/platform-iss/hardware.h10
-rw-r--r--include/asm-xtensa/platform-iss/simcall.h62
-rw-r--r--include/asm-xtensa/posix_types.h2
-rw-r--r--include/asm-xtensa/processor.h24
-rw-r--r--include/asm-xtensa/ptrace.h2
-rw-r--r--include/asm-xtensa/regs.h138
-rw-r--r--include/asm-xtensa/sembuf.h2
-rw-r--r--include/asm-xtensa/shmbuf.h21
-rw-r--r--include/asm-xtensa/stat.h112
-rw-r--r--include/asm-xtensa/syscall.h20
-rw-r--r--include/asm-xtensa/system.h2
-rw-r--r--include/asm-xtensa/termbits.h11
-rw-r--r--include/asm-xtensa/timex.h17
-rw-r--r--include/asm-xtensa/tlbflush.h42
-rw-r--r--include/asm-xtensa/uaccess.h1
-rw-r--r--include/asm-xtensa/unistd.h798
-rw-r--r--include/asm-xtensa/variant-fsf/core.h359
-rw-r--r--include/asm-xtensa/variant-fsf/tie.h22
-rw-r--r--include/asm-xtensa/xtensa/cacheasm.h708
-rw-r--r--include/asm-xtensa/xtensa/cacheattrasm.h432
-rw-r--r--include/asm-xtensa/xtensa/config-linux_be/core.h1270
-rw-r--r--include/asm-xtensa/xtensa/config-linux_be/defs.h270
-rw-r--r--include/asm-xtensa/xtensa/config-linux_be/specreg.h99
-rw-r--r--include/asm-xtensa/xtensa/config-linux_be/system.h198
-rw-r--r--include/asm-xtensa/xtensa/config-linux_be/tie.h275
-rw-r--r--include/asm-xtensa/xtensa/coreasm.h526
-rw-r--r--include/asm-xtensa/xtensa/corebits.h77
-rw-r--r--include/asm-xtensa/xtensa/hal.h822
-rw-r--r--include/asm-xtensa/xtensa/simcall.h130
-rw-r--r--include/asm-xtensa/xtensa/xt2000-uart.h155
-rw-r--r--include/asm-xtensa/xtensa/xt2000.h408
-rw-r--r--include/asm-xtensa/xtensa/xtboard.h120
-rw-r--r--include/linux/Kbuild2
-rw-r--r--include/linux/aio.h1
-rw-r--r--include/linux/bio.h1
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/coda_linux.h2
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/configfs.h25
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/cpuset.h22
-rw-r--r--include/linux/dccp.h26
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/file.h15
-rw-r--r--include/linux/freezer.h13
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/fsl_devices.h1
-rw-r--r--include/linux/futex.h2
-rw-r--r--include/linux/gameport.h2
-rw-r--r--include/linux/i2c-algo-bit.h5
-rw-r--r--include/linux/i2c-algo-ite.h72
-rw-r--r--include/linux/i2c-algo-pca.h1
-rw-r--r--include/linux/i2c-algo-pcf.h3
-rw-r--r--include/linux/i2c-algo-sgi.h1
-rw-r--r--include/linux/i2c-id.h19
-rw-r--r--include/linux/i2c-pnx.h43
-rw-r--r--include/linux/i2c.h75
-rw-r--r--include/linux/ide.h3
-rw-r--r--include/linux/init.h1
-rw-r--r--include/linux/init_task.h3
-rw-r--r--include/linux/interrupt.h3
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kvm.h227
-rw-r--r--include/linux/lockd/bind.h2
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/lockd/sm_inter.h2
-rw-r--r--include/linux/lockd/xdr.h8
-rw-r--r--include/linux/lockdep.h16
-rw-r--r--include/linux/mount.h1
-rw-r--r--include/linux/n_r3964.h2
-rw-r--r--include/linux/ncp_mount.h2
-rw-r--r--include/linux/nfsd/nfsd.h4
-rw-r--r--include/linux/nfsd/state.h1
-rw-r--r--include/linux/nfsd/xdr4.h40
-rw-r--r--include/linux/nsproxy.h1
-rw-r--r--include/linux/pipe_fs_i.h5
-rw-r--r--include/linux/raid/raid5.h3
-rw-r--r--include/linux/reciprocal_div.h32
-rw-r--r--include/linux/rtc.h1
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/seqlock.h7
-rw-r--r--include/linux/slab.h306
-rw-r--r--include/linux/slab_def.h100
-rw-r--r--include/linux/smb_fs_sb.h2
-rw-r--r--include/linux/sysctl.h7
-rw-r--r--include/linux/sysrq.h22
-rw-r--r--include/linux/task_io_accounting.h37
-rw-r--r--include/linux/task_io_accounting_ops.h47
-rw-r--r--include/linux/taskstats.h28
-rw-r--r--include/linux/tfrc.h8
-rw-r--r--include/linux/timer.h6
-rw-r--r--include/linux/topology.h5
-rw-r--r--include/linux/videodev2.h1
-rw-r--r--include/media/ir-common.h1
-rw-r--r--include/media/saa7146.h20
-rw-r--r--include/media/tuner-types.h4
-rw-r--r--include/media/tuner.h1
-rw-r--r--include/media/tveeprom.h2
-rw-r--r--include/media/v4l2-common.h7
-rw-r--r--include/media/v4l2-dev.h14
-rw-r--r--include/net/ax25.h2
-rw-r--r--include/net/bluetooth/hci.h4
-rw-r--r--include/net/ip.h3
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/rdma/ib_marshall.h5
-rw-r--r--include/rdma/ib_verbs.h253
-rw-r--r--include/rdma/rdma_cm.h62
-rw-r--r--include/rdma/rdma_cm_ib.h3
-rw-r--r--include/rdma/rdma_user_cm.h206
-rw-r--r--include/video/sstfb.h13
-rw-r--r--init/Kconfig11
-rw-r--r--init/initramfs.c6
-rw-r--r--init/main.c15
-rw-r--r--init/version.c5
-rw-r--r--ipc/msgutil.c4
-rw-r--r--kernel/cpuset.c82
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/fork.c38
-rw-r--r--kernel/lockdep.c203
-rw-r--r--kernel/nsproxy.c4
-rw-r--r--kernel/power/process.c21
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/sched.c515
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/sysctl.c71
-rw-r--r--kernel/time/clocksource.c8
-rw-r--r--kernel/timer.c162
-rw-r--r--kernel/tsacct.c9
-rw-r--r--lib/Kconfig5
-rw-r--r--lib/Kconfig.debug51
-rw-r--r--lib/Makefile5
-rw-r--r--lib/bitrev.c4
-rw-r--r--lib/ioremap.c1
-rw-r--r--lib/reciprocal_div.c9
-rw-r--r--mm/filemap.c9
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memory.c32
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page-writeback.c89
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/readahead.c2
-rw-r--r--mm/slab.c36
-rw-r--r--mm/slob.c16
-rw-r--r--mm/truncate.c4
-rw-r--r--mm/vmscan.c8
-rw-r--r--net/ax25/ax25_addr.c2
-rw-r--r--net/bluetooth/hci_sock.c4
-rw-r--r--net/core/netpoll.c39
-rw-r--r--net/dccp/ackvec.c4
-rw-r--r--net/dccp/ccid.h10
-rw-r--r--net/dccp/ccids/ccid2.c12
-rw-r--r--net/dccp/ccids/ccid3.c517
-rw-r--r--net/dccp/ccids/ccid3.h46
-rw-r--r--net/dccp/ccids/lib/packet_history.c219
-rw-r--r--net/dccp/ccids/lib/packet_history.h128
-rw-r--r--net/dccp/ccids/lib/tfrc.h23
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c28
-rw-r--r--net/dccp/dccp.h3
-rw-r--r--net/dccp/feat.c6
-rw-r--r--net/dccp/input.c47
-rw-r--r--net/dccp/ipv4.c26
-rw-r--r--net/dccp/ipv6.c24
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/dccp/options.c5
-rw-r--r--net/dccp/output.c39
-rw-r--r--net/dccp/proto.c6
-rw-r--r--net/dccp/timer.c14
-rw-r--r--net/decnet/dn_dev.c6
-rw-r--r--net/decnet/sysctl_net_decnet.c6
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c2
-rw-r--r--net/ipv4/devinet.c3
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c17
-rw-r--r--net/ipv4/route.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c16
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/ndisc.c9
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c11
-rw-r--r--net/sunrpc/cache.c31
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/sunrpc/svcauth_unix.c5
-rw-r--r--net/tipc/config.c2
-rw-r--r--scripts/Kbuild.include19
-rw-r--r--scripts/kconfig/conf.c2
-rw-r--r--scripts/kconfig/confdata.c37
-rw-r--r--scripts/kconfig/gconf.c35
-rw-r--r--scripts/kconfig/gconf.glade4
-rw-r--r--scripts/kconfig/lkc.h2
-rw-r--r--scripts/kconfig/lkc_proto.h3
-rw-r--r--scripts/kconfig/mconf.c21
-rw-r--r--scripts/kconfig/qconf.cc15
-rw-r--r--scripts/kconfig/qconf.h3
-rw-r--r--scripts/kconfig/symbol.c3
-rw-r--r--scripts/kconfig/zconf.tab.c_shipped2
-rw-r--r--scripts/kconfig/zconf.y2
-rw-r--r--scripts/mod/modpost.c1
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--sound/Kconfig8
-rw-r--r--sound/Makefile3
-rw-r--r--sound/ac97_bus.c (renamed from sound/pci/ac97/ac97_bus.c)0
-rw-r--r--sound/aoa/fabrics/Kconfig2
-rw-r--r--sound/core/oss/mixer_oss.c2
-rw-r--r--sound/drivers/Kconfig6
-rw-r--r--sound/oss/ad1848.c2
-rw-r--r--sound/oss/cs4232.c2
-rw-r--r--sound/oss/emu10k1/audio.c6
-rw-r--r--sound/oss/emu10k1/cardmi.c2
-rw-r--r--sound/oss/emu10k1/cardmo.c2
-rw-r--r--sound/oss/emu10k1/midi.c10
-rw-r--r--sound/oss/emu10k1/mixer.c2
-rw-r--r--sound/oss/hal2.c2
-rw-r--r--sound/oss/mpu401.c2
-rw-r--r--sound/oss/opl3.c2
-rw-r--r--sound/oss/sb_common.c2
-rw-r--r--sound/oss/sb_midi.c4
-rw-r--r--sound/oss/sb_mixer.c2
-rw-r--r--sound/oss/v_midi.c2
-rw-r--r--sound/oss/waveartist.c2
-rw-r--r--sound/pci/ac97/Makefile2
1096 files changed, 45733 insertions, 22843 deletions
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index 29c18966b050..0ad6dcb5d45f 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -35,12 +35,37 @@ In short, 8-char indents make things easier to read, and have the added
35benefit of warning you when you're nesting your functions too deep. 35benefit of warning you when you're nesting your functions too deep.
36Heed that warning. 36Heed that warning.
37 37
38The preferred way to ease multiple indentation levels in a switch statement is
39to align the "switch" and its subordinate "case" labels in the same column
40instead of "double-indenting" the "case" labels. E.g.:
41
42 switch (suffix) {
43 case 'G':
44 case 'g':
45 mem <<= 30;
46 break;
47 case 'M':
48 case 'm':
49 mem <<= 20;
50 break;
51 case 'K':
52 case 'k':
53 mem <<= 10;
54 /* fall through */
55 default:
56 break;
57 }
58
59
38Don't put multiple statements on a single line unless you have 60Don't put multiple statements on a single line unless you have
39something to hide: 61something to hide:
40 62
41 if (condition) do_this; 63 if (condition) do_this;
42 do_something_everytime; 64 do_something_everytime;
43 65
66Don't put multiple assignments on a single line either. Kernel coding style
67is super simple. Avoid tricky expressions.
68
44Outside of comments, documentation and except in Kconfig, spaces are never 69Outside of comments, documentation and except in Kconfig, spaces are never
45used for indentation, and the above example is deliberately broken. 70used for indentation, and the above example is deliberately broken.
46 71
@@ -69,7 +94,7 @@ void fun(int a, int b, int c)
69 next_statement; 94 next_statement;
70} 95}
71 96
72 Chapter 3: Placing Braces 97 Chapter 3: Placing Braces and Spaces
73 98
74The other issue that always comes up in C styling is the placement of 99The other issue that always comes up in C styling is the placement of
75braces. Unlike the indent size, there are few technical reasons to 100braces. Unlike the indent size, there are few technical reasons to
@@ -81,6 +106,20 @@ brace last on the line, and put the closing brace first, thusly:
81 we do y 106 we do y
82 } 107 }
83 108
109This applies to all non-function statement blocks (if, switch, for,
110while, do). E.g.:
111
112 switch (action) {
113 case KOBJ_ADD:
114 return "add";
115 case KOBJ_REMOVE:
116 return "remove";
117 case KOBJ_CHANGE:
118 return "change";
119 default:
120 return NULL;
121 }
122
84However, there is one special case, namely functions: they have the 123However, there is one special case, namely functions: they have the
85opening brace at the beginning of the next line, thus: 124opening brace at the beginning of the next line, thus:
86 125
@@ -121,6 +160,49 @@ supply of new-lines on your screen is not a renewable resource (think
12125-line terminal screens here), you have more empty lines to put 16025-line terminal screens here), you have more empty lines to put
122comments on. 161comments on.
123 162
163 3.1: Spaces
164
165Linux kernel style for use of spaces depends (mostly) on
166function-versus-keyword usage. Use a space after (most) keywords. The
167notable exceptions are sizeof, typeof, alignof, and __attribute__, which look
168somewhat like functions (and are usually used with parentheses in Linux,
169although they are not required in the language, as in: "sizeof info" after
170"struct fileinfo info;" is declared).
171
172So use a space after these keywords:
173 if, switch, case, for, do, while
174but not with sizeof, typeof, alignof, or __attribute__. E.g.,
175 s = sizeof(struct file);
176
177Do not add spaces around (inside) parenthesized expressions. This example is
178*bad*:
179
180 s = sizeof( struct file );
181
182When declaring pointer data or a function that returns a pointer type, the
183preferred use of '*' is adjacent to the data name or function name and not
184adjacent to the type name. Examples:
185
186 char *linux_banner;
187 unsigned long long memparse(char *ptr, char **retptr);
188 char *match_strdup(substring_t *s);
189
190Use one space around (on each side of) most binary and ternary operators,
191such as any of these:
192
193 = + - < > * / % | & ^ <= >= == != ? :
194
195but no space after unary operators:
196 & * + - ~ ! sizeof typeof alignof __attribute__ defined
197
198no space before the postfix increment & decrement unary operators:
199 ++ --
200
201no space after the prefix increment & decrement unary operators:
202 ++ --
203
204and no space around the '.' and "->" structure member operators.
205
124 206
125 Chapter 4: Naming 207 Chapter 4: Naming
126 208
@@ -152,7 +234,7 @@ variable that is used to hold a temporary value.
152 234
153If you are afraid to mix up your local variable names, you have another 235If you are afraid to mix up your local variable names, you have another
154problem, which is called the function-growth-hormone-imbalance syndrome. 236problem, which is called the function-growth-hormone-imbalance syndrome.
155See next chapter. 237See chapter 6 (Functions).
156 238
157 239
158 Chapter 5: Typedefs 240 Chapter 5: Typedefs
@@ -258,6 +340,20 @@ generally easily keep track of about 7 different things, anything more
258and it gets confused. You know you're brilliant, but maybe you'd like 340and it gets confused. You know you're brilliant, but maybe you'd like
259to understand what you did 2 weeks from now. 341to understand what you did 2 weeks from now.
260 342
343In source files, separate functions with one blank line. If the function is
344exported, the EXPORT* macro for it should follow immediately after the closing
345function brace line. E.g.:
346
347int system_is_up(void)
348{
349 return system_state == SYSTEM_RUNNING;
350}
351EXPORT_SYMBOL(system_is_up);
352
353In function prototypes, include parameter names with their data types.
354Although this is not required by the C language, it is preferred in Linux
355because it is a simple way to add valuable information for the reader.
356
261 357
262 Chapter 7: Centralized exiting of functions 358 Chapter 7: Centralized exiting of functions
263 359
@@ -306,16 +402,36 @@ time to explain badly written code.
306Generally, you want your comments to tell WHAT your code does, not HOW. 402Generally, you want your comments to tell WHAT your code does, not HOW.
307Also, try to avoid putting comments inside a function body: if the 403Also, try to avoid putting comments inside a function body: if the
308function is so complex that you need to separately comment parts of it, 404function is so complex that you need to separately comment parts of it,
309you should probably go back to chapter 5 for a while. You can make 405you should probably go back to chapter 6 for a while. You can make
310small comments to note or warn about something particularly clever (or 406small comments to note or warn about something particularly clever (or
311ugly), but try to avoid excess. Instead, put the comments at the head 407ugly), but try to avoid excess. Instead, put the comments at the head
312of the function, telling people what it does, and possibly WHY it does 408of the function, telling people what it does, and possibly WHY it does
313it. 409it.
314 410
315When commenting the kernel API functions, please use the kerneldoc format. 411When commenting the kernel API functions, please use the kernel-doc format.
316See the files Documentation/kernel-doc-nano-HOWTO.txt and scripts/kernel-doc 412See the files Documentation/kernel-doc-nano-HOWTO.txt and scripts/kernel-doc
317for details. 413for details.
318 414
415Linux style for comments is the C89 "/* ... */" style.
416Don't use C99-style "// ..." comments.
417
418The preferred style for long (multi-line) comments is:
419
420 /*
421 * This is the preferred style for multi-line
422 * comments in the Linux kernel source code.
423 * Please use it consistently.
424 *
425 * Description: A column of asterisks on the left side,
426 * with beginning and ending almost-blank lines.
427 */
428
429It's also important to comment data, whether they are basic types or derived
430types. To this end, use just one data declaration per line (no commas for
431multiple data declarations). This leaves you room for a small comment on each
432item, explaining its use.
433
434
319 Chapter 9: You've made a mess of it 435 Chapter 9: You've made a mess of it
320 436
321That's OK, we all do. You've probably been told by your long-time Unix 437That's OK, we all do. You've probably been told by your long-time Unix
@@ -591,4 +707,4 @@ Kernel CodingStyle, by greg@kroah.com at OLS 2002:
591http://www.kroah.com/linux/talks/ols_2002_kernel_codingstyle_talk/html/ 707http://www.kroah.com/linux/talks/ols_2002_kernel_codingstyle_talk/html/
592 708
593-- 709--
594Last updated on 30 April 2006. 710Last updated on 2006-December-06.
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist
index 7ac61f60037a..2270efa10153 100644
--- a/Documentation/SubmitChecklist
+++ b/Documentation/SubmitChecklist
@@ -66,3 +66,9 @@ kernel patches.
66 See Documentation/ABI/README for more information. 66 See Documentation/ABI/README for more information.
67 67
6820: Check that it all passes `make headers_check'. 6820: Check that it all passes `make headers_check'.
69
7021: Has been checked with injection of at least slab and page-allocation
71 fauilures. See Documentation/fault-injection/.
72
73 If the new code is substantial, addition of subsystem-specific fault
74 injection might be appropriate.
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c
index bf2b0e2f87e1..e9126e794ed7 100644
--- a/Documentation/accounting/getdelays.c
+++ b/Documentation/accounting/getdelays.c
@@ -7,6 +7,8 @@
7 * Copyright (C) Balbir Singh, IBM Corp. 2006 7 * Copyright (C) Balbir Singh, IBM Corp. 2006
8 * Copyright (c) Jay Lan, SGI. 2006 8 * Copyright (c) Jay Lan, SGI. 2006
9 * 9 *
10 * Compile with
11 * gcc -I/usr/src/linux/include getdelays.c -o getdelays
10 */ 12 */
11 13
12#include <stdio.h> 14#include <stdio.h>
@@ -35,13 +37,20 @@
35#define NLA_DATA(na) ((void *)((char*)(na) + NLA_HDRLEN)) 37#define NLA_DATA(na) ((void *)((char*)(na) + NLA_HDRLEN))
36#define NLA_PAYLOAD(len) (len - NLA_HDRLEN) 38#define NLA_PAYLOAD(len) (len - NLA_HDRLEN)
37 39
38#define err(code, fmt, arg...) do { printf(fmt, ##arg); exit(code); } while (0) 40#define err(code, fmt, arg...) \
39int done = 0; 41 do { \
40int rcvbufsz=0; 42 fprintf(stderr, fmt, ##arg); \
41 43 exit(code); \
42 char name[100]; 44 } while (0)
43int dbg=0, print_delays=0; 45
46int done;
47int rcvbufsz;
48char name[100];
49int dbg;
50int print_delays;
51int print_io_accounting;
44__u64 stime, utime; 52__u64 stime, utime;
53
45#define PRINTF(fmt, arg...) { \ 54#define PRINTF(fmt, arg...) { \
46 if (dbg) { \ 55 if (dbg) { \
47 printf(fmt, ##arg); \ 56 printf(fmt, ##arg); \
@@ -78,8 +87,9 @@ static int create_nl_socket(int protocol)
78 if (rcvbufsz) 87 if (rcvbufsz)
79 if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, 88 if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF,
80 &rcvbufsz, sizeof(rcvbufsz)) < 0) { 89 &rcvbufsz, sizeof(rcvbufsz)) < 0) {
81 printf("Unable to set socket rcv buf size to %d\n", 90 fprintf(stderr, "Unable to set socket rcv buf size "
82 rcvbufsz); 91 "to %d\n",
92 rcvbufsz);
83 return -1; 93 return -1;
84 } 94 }
85 95
@@ -186,6 +196,15 @@ void print_delayacct(struct taskstats *t)
186 "count", "delay total", t->swapin_count, t->swapin_delay_total); 196 "count", "delay total", t->swapin_count, t->swapin_delay_total);
187} 197}
188 198
199void print_ioacct(struct taskstats *t)
200{
201 printf("%s: read=%llu, write=%llu, cancelled_write=%llu\n",
202 t->ac_comm,
203 (unsigned long long)t->read_bytes,
204 (unsigned long long)t->write_bytes,
205 (unsigned long long)t->cancelled_write_bytes);
206}
207
189int main(int argc, char *argv[]) 208int main(int argc, char *argv[])
190{ 209{
191 int c, rc, rep_len, aggr_len, len2, cmd_type; 210 int c, rc, rep_len, aggr_len, len2, cmd_type;
@@ -208,7 +227,7 @@ int main(int argc, char *argv[])
208 struct msgtemplate msg; 227 struct msgtemplate msg;
209 228
210 while (1) { 229 while (1) {
211 c = getopt(argc, argv, "dw:r:m:t:p:v:l"); 230 c = getopt(argc, argv, "diw:r:m:t:p:v:l");
212 if (c < 0) 231 if (c < 0)
213 break; 232 break;
214 233
@@ -217,6 +236,10 @@ int main(int argc, char *argv[])
217 printf("print delayacct stats ON\n"); 236 printf("print delayacct stats ON\n");
218 print_delays = 1; 237 print_delays = 1;
219 break; 238 break;
239 case 'i':
240 printf("printing IO accounting\n");
241 print_io_accounting = 1;
242 break;
220 case 'w': 243 case 'w':
221 strncpy(logfile, optarg, MAX_FILENAME); 244 strncpy(logfile, optarg, MAX_FILENAME);
222 printf("write to file %s\n", logfile); 245 printf("write to file %s\n", logfile);
@@ -238,14 +261,12 @@ int main(int argc, char *argv[])
238 if (!tid) 261 if (!tid)
239 err(1, "Invalid tgid\n"); 262 err(1, "Invalid tgid\n");
240 cmd_type = TASKSTATS_CMD_ATTR_TGID; 263 cmd_type = TASKSTATS_CMD_ATTR_TGID;
241 print_delays = 1;
242 break; 264 break;
243 case 'p': 265 case 'p':
244 tid = atoi(optarg); 266 tid = atoi(optarg);
245 if (!tid) 267 if (!tid)
246 err(1, "Invalid pid\n"); 268 err(1, "Invalid pid\n");
247 cmd_type = TASKSTATS_CMD_ATTR_PID; 269 cmd_type = TASKSTATS_CMD_ATTR_PID;
248 print_delays = 1;
249 break; 270 break;
250 case 'v': 271 case 'v':
251 printf("debug on\n"); 272 printf("debug on\n");
@@ -277,7 +298,7 @@ int main(int argc, char *argv[])
277 mypid = getpid(); 298 mypid = getpid();
278 id = get_family_id(nl_sd); 299 id = get_family_id(nl_sd);
279 if (!id) { 300 if (!id) {
280 printf("Error getting family id, errno %d", errno); 301 fprintf(stderr, "Error getting family id, errno %d\n", errno);
281 goto err; 302 goto err;
282 } 303 }
283 PRINTF("family id %d\n", id); 304 PRINTF("family id %d\n", id);
@@ -288,7 +309,7 @@ int main(int argc, char *argv[])
288 &cpumask, strlen(cpumask) + 1); 309 &cpumask, strlen(cpumask) + 1);
289 PRINTF("Sent register cpumask, retval %d\n", rc); 310 PRINTF("Sent register cpumask, retval %d\n", rc);
290 if (rc < 0) { 311 if (rc < 0) {
291 printf("error sending register cpumask\n"); 312 fprintf(stderr, "error sending register cpumask\n");
292 goto err; 313 goto err;
293 } 314 }
294 } 315 }
@@ -298,7 +319,7 @@ int main(int argc, char *argv[])
298 cmd_type, &tid, sizeof(__u32)); 319 cmd_type, &tid, sizeof(__u32));
299 PRINTF("Sent pid/tgid, retval %d\n", rc); 320 PRINTF("Sent pid/tgid, retval %d\n", rc);
300 if (rc < 0) { 321 if (rc < 0) {
301 printf("error sending tid/tgid cmd\n"); 322 fprintf(stderr, "error sending tid/tgid cmd\n");
302 goto done; 323 goto done;
303 } 324 }
304 } 325 }
@@ -310,13 +331,15 @@ int main(int argc, char *argv[])
310 PRINTF("received %d bytes\n", rep_len); 331 PRINTF("received %d bytes\n", rep_len);
311 332
312 if (rep_len < 0) { 333 if (rep_len < 0) {
313 printf("nonfatal reply error: errno %d\n", errno); 334 fprintf(stderr, "nonfatal reply error: errno %d\n",
335 errno);
314 continue; 336 continue;
315 } 337 }
316 if (msg.n.nlmsg_type == NLMSG_ERROR || 338 if (msg.n.nlmsg_type == NLMSG_ERROR ||
317 !NLMSG_OK((&msg.n), rep_len)) { 339 !NLMSG_OK((&msg.n), rep_len)) {
318 struct nlmsgerr *err = NLMSG_DATA(&msg); 340 struct nlmsgerr *err = NLMSG_DATA(&msg);
319 printf("fatal reply error, errno %d\n", err->error); 341 fprintf(stderr, "fatal reply error, errno %d\n",
342 err->error);
320 goto done; 343 goto done;
321 } 344 }
322 345
@@ -356,6 +379,8 @@ int main(int argc, char *argv[])
356 count++; 379 count++;
357 if (print_delays) 380 if (print_delays)
358 print_delayacct((struct taskstats *) NLA_DATA(na)); 381 print_delayacct((struct taskstats *) NLA_DATA(na));
382 if (print_io_accounting)
383 print_ioacct((struct taskstats *) NLA_DATA(na));
359 if (fd) { 384 if (fd) {
360 if (write(fd, NLA_DATA(na), na->nla_len) < 0) { 385 if (write(fd, NLA_DATA(na), na->nla_len) < 0) {
361 err(1,"write error\n"); 386 err(1,"write error\n");
@@ -365,7 +390,9 @@ int main(int argc, char *argv[])
365 goto done; 390 goto done;
366 break; 391 break;
367 default: 392 default:
368 printf("Unknown nested nla_type %d\n", na->nla_type); 393 fprintf(stderr, "Unknown nested"
394 " nla_type %d\n",
395 na->nla_type);
369 break; 396 break;
370 } 397 }
371 len2 += NLA_ALIGN(na->nla_len); 398 len2 += NLA_ALIGN(na->nla_len);
@@ -374,7 +401,8 @@ int main(int argc, char *argv[])
374 break; 401 break;
375 402
376 default: 403 default:
377 printf("Unknown nla_type %d\n", na->nla_type); 404 fprintf(stderr, "Unknown nla_type %d\n",
405 na->nla_type);
378 break; 406 break;
379 } 407 }
380 na = (struct nlattr *) (GENLMSG_DATA(&msg) + len); 408 na = (struct nlattr *) (GENLMSG_DATA(&msg) + len);
diff --git a/Documentation/cpu-freq/core.txt b/Documentation/cpu-freq/core.txt
index 29b3f9ffc66c..ce0666e51036 100644
--- a/Documentation/cpu-freq/core.txt
+++ b/Documentation/cpu-freq/core.txt
@@ -24,7 +24,7 @@ Contents:
241. General Information 241. General Information
25======================= 25=======================
26 26
27The CPUFreq core code is located in linux/kernel/cpufreq.c. This 27The CPUFreq core code is located in drivers/cpufreq/cpufreq.c. This
28cpufreq code offers a standardized interface for the CPUFreq 28cpufreq code offers a standardized interface for the CPUFreq
29architecture drivers (those pieces of code that do actual 29architecture drivers (those pieces of code that do actual
30frequency transitions), as well as to "notifiers". These are device 30frequency transitions), as well as to "notifiers". These are device
diff --git a/Documentation/dvb/cards.txt b/Documentation/dvb/cards.txt
index ca58e339d85f..cc09187a5db7 100644
--- a/Documentation/dvb/cards.txt
+++ b/Documentation/dvb/cards.txt
@@ -22,10 +22,10 @@ o Frontends drivers:
22 - ves1x93 : Alps BSRV2 (ves1893 demodulator) and dbox2 (ves1993) 22 - ves1x93 : Alps BSRV2 (ves1893 demodulator) and dbox2 (ves1993)
23 - cx24110 : Conexant HM1221/HM1811 (cx24110 or cx24106 demod, cx24108 PLL) 23 - cx24110 : Conexant HM1221/HM1811 (cx24110 or cx24106 demod, cx24108 PLL)
24 - grundig_29504-491 : Grundig 29504-491 (Philips TDA8083 demodulator), tsa5522 PLL 24 - grundig_29504-491 : Grundig 29504-491 (Philips TDA8083 demodulator), tsa5522 PLL
25 - mt312 : Zarlink mt312 or Mitel vp310 demodulator, sl1935 or tsa5059 PLL 25 - mt312 : Zarlink mt312 or Mitel vp310 demodulator, sl1935 or tsa5059 PLLi, Technisat Sky2Pc with bios Rev. 2.3
26 - stv0299 : Alps BSRU6 (tsa5059 PLL), LG TDQB-S00x (tsa5059 PLL), 26 - stv0299 : Alps BSRU6 (tsa5059 PLL), LG TDQB-S00x (tsa5059 PLL),
27 LG TDQF-S001F (sl1935 PLL), Philips SU1278 (tua6100 PLL), 27 LG TDQF-S001F (sl1935 PLL), Philips SU1278 (tua6100 PLL),
28 Philips SU1278SH (tsa5059 PLL), Samsung TBMU24112IMB 28 Philips SU1278SH (tsa5059 PLL), Samsung TBMU24112IMB, Technisat Sky2Pc with bios Rev. 2.6
29 DVB-C: 29 DVB-C:
30 - ves1820 : various (ves1820 demodulator, sp5659c or spXXXX PLL) 30 - ves1820 : various (ves1820 demodulator, sp5659c or spXXXX PLL)
31 - at76c651 : Atmel AT76c651(B) with DAT7021 PLL 31 - at76c651 : Atmel AT76c651(B) with DAT7021 PLL
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index e82c15de7d27..040f437c421b 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -207,17 +207,6 @@ Who: Thomas Gleixner <tglx@linutronix.de>
207 207
208--------------------------- 208---------------------------
209 209
210What: i2c-ite and i2c-algo-ite drivers
211When: September 2006
212Why: These drivers never compiled since they were added to the kernel
213 tree 5 years ago. This feature removal can be reevaluated if
214 someone shows interest in the drivers, fixes them and takes over
215 maintenance.
216 http://marc.theaimsgroup.com/?l=linux-mips&m=115040510817448
217Who: Jean Delvare <khali@linux-fr.org>
218
219---------------------------
220
221What: Bridge netfilter deferred IPv4/IPv6 output hook calling 210What: Bridge netfilter deferred IPv4/IPv6 output hook calling
222When: January 2007 211When: January 2007
223Why: The deferred output hooks are a layering violation causing unusual 212Why: The deferred output hooks are a layering violation causing unusual
@@ -261,3 +250,25 @@ Why: The new layer 3 independant connection tracking replaces the old
261Who: Patrick McHardy <kaber@trash.net> 250Who: Patrick McHardy <kaber@trash.net>
262 251
263--------------------------- 252---------------------------
253
254What: ACPI hooks (X86_SPEEDSTEP_CENTRINO_ACPI) in speedstep-centrino driver
255When: December 2006
256Why: Speedstep-centrino driver with ACPI hooks and acpi-cpufreq driver are
257 functionally very much similar. They talk to ACPI in same way. Only
258 difference between them is the way they do frequency transitions.
259 One uses MSRs and the other one uses IO ports. Functionaliy of
260 speedstep_centrino with ACPI hooks is now merged into acpi-cpufreq.
261 That means one common driver will support all Intel Enhanced Speedstep
262 capable CPUs. That means less confusion over name of
263 speedstep-centrino driver (with that driver supposed to be used on
264 non-centrino platforms). That means less duplication of code and
265 less maintenance effort and no possibility of these two drivers
266 going out of sync.
267 Current users of speedstep_centrino with ACPI hooks are requested to
268 switch over to acpi-cpufreq driver. speedstep-centrino will continue
269 to work using older non-ACPI static table based scheme even after this
270 date.
271
272Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
273
274---------------------------
diff --git a/Documentation/filesystems/bfs.txt b/Documentation/filesystems/bfs.txt
index d2841e0bcf02..ea825e178e79 100644
--- a/Documentation/filesystems/bfs.txt
+++ b/Documentation/filesystems/bfs.txt
@@ -54,4 +54,4 @@ The first 4 bytes should be 0x1badface.
54If you have any patches, questions or suggestions regarding this BFS 54If you have any patches, questions or suggestions regarding this BFS
55implementation please contact the author: 55implementation please contact the author:
56 56
57Tigran A. Aivazian <tigran@veritas.com> 57Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
diff --git a/Documentation/filesystems/ocfs2.txt b/Documentation/filesystems/ocfs2.txt
index af6defd10cb6..8ccf0c1b58ed 100644
--- a/Documentation/filesystems/ocfs2.txt
+++ b/Documentation/filesystems/ocfs2.txt
@@ -54,3 +54,6 @@ errors=panic Panic and halt the machine if an error occurs.
54intr (*) Allow signals to interrupt cluster operations. 54intr (*) Allow signals to interrupt cluster operations.
55nointr Do not allow signals to interrupt cluster 55nointr Do not allow signals to interrupt cluster
56 operations. 56 operations.
57atime_quantum=60(*) OCFS2 will not update atime unless this number
58 of seconds has passed since the last update.
59 Set to zero to always update atime.
diff --git a/Documentation/i2c/busses/i2c-amd8111 b/Documentation/i2c/busses/i2c-amd8111
index db294ee7455a..460dd6635fd2 100644
--- a/Documentation/i2c/busses/i2c-amd8111
+++ b/Documentation/i2c/busses/i2c-amd8111
@@ -5,7 +5,7 @@ Supported adapters:
5 5
6Datasheets: 6Datasheets:
7 AMD datasheet not yet available, but almost everything can be found 7 AMD datasheet not yet available, but almost everything can be found
8 in publically available ACPI 2.0 specification, which the adapter 8 in the publicly available ACPI 2.0 specification, which the adapter
9 follows. 9 follows.
10 10
11Author: Vojtech Pavlik <vojtech@suse.cz> 11Author: Vojtech Pavlik <vojtech@suse.cz>
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index e46c23458242..3db69a086c41 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -9,7 +9,10 @@ Supported adapters:
9 * Intel 82801EB/ER (ICH5) (HW PEC supported, 32 byte buffer not supported) 9 * Intel 82801EB/ER (ICH5) (HW PEC supported, 32 byte buffer not supported)
10 * Intel 6300ESB 10 * Intel 6300ESB
11 * Intel 82801FB/FR/FW/FRW (ICH6) 11 * Intel 82801FB/FR/FW/FRW (ICH6)
12 * Intel ICH7 12 * Intel 82801G (ICH7)
13 * Intel 631xESB/632xESB (ESB2)
14 * Intel 82801H (ICH8)
15 * Intel ICH9
13 Datasheets: Publicly available at the Intel website 16 Datasheets: Publicly available at the Intel website
14 17
15Authors: 18Authors:
diff --git a/Documentation/i2c/busses/i2c-nforce2 b/Documentation/i2c/busses/i2c-nforce2
index cd49c428a3ab..7f61fbc03f7f 100644
--- a/Documentation/i2c/busses/i2c-nforce2
+++ b/Documentation/i2c/busses/i2c-nforce2
@@ -10,11 +10,11 @@ Supported adapters:
10 * nForce4 MCP51 10de:0264 10 * nForce4 MCP51 10de:0264
11 * nForce4 MCP55 10de:0368 11 * nForce4 MCP55 10de:0368
12 12
13Datasheet: not publically available, but seems to be similar to the 13Datasheet: not publicly available, but seems to be similar to the
14 AMD-8111 SMBus 2.0 adapter. 14 AMD-8111 SMBus 2.0 adapter.
15 15
16Authors: 16Authors:
17 Hans-Frieder Vogt <hfvogt@arcor.de>, 17 Hans-Frieder Vogt <hfvogt@gmx.net>,
18 Thomas Leibold <thomas@plx.com>, 18 Thomas Leibold <thomas@plx.com>,
19 Patrick Dreker <patrick@dreker.de> 19 Patrick Dreker <patrick@dreker.de>
20 20
@@ -38,7 +38,7 @@ Notes
38----- 38-----
39 39
40The SMBus adapter in the nForce2 chipset seems to be very similar to the 40The SMBus adapter in the nForce2 chipset seems to be very similar to the
41SMBus 2.0 adapter in the AMD-8111 southbridge. However, I could only get 41SMBus 2.0 adapter in the AMD-8111 south bridge. However, I could only get
42the driver to work with direct I/O access, which is different to the EC 42the driver to work with direct I/O access, which is different to the EC
43interface of the AMD-8111. Tested on Asus A7N8X. The ACPI DSDT table of the 43interface of the AMD-8111. Tested on Asus A7N8X. The ACPI DSDT table of the
44Asus A7N8X lists two SMBuses, both of which are supported by this driver. 44Asus A7N8X lists two SMBuses, both of which are supported by this driver.
diff --git a/Documentation/ioctl/ioctl-decoding.txt b/Documentation/ioctl/ioctl-decoding.txt
new file mode 100644
index 000000000000..bfdf7f3ee4f0
--- /dev/null
+++ b/Documentation/ioctl/ioctl-decoding.txt
@@ -0,0 +1,24 @@
1To decode a hex IOCTL code:
2
3Most architecures use this generic format, but check
4include/ARCH/ioctl.h for specifics, e.g. powerpc
5uses 3 bits to encode read/write and 13 bits for size.
6
7 bits meaning
8 31-30 00 - no parameters: uses _IO macro
9 10 - read: _IOR
10 01 - write: _IOW
11 11 - read/write: _IOWR
12
13 29-16 size of arguments
14
15 15-8 ascii character supposedly
16 unique to each driver
17
18 7-0 function #
19
20
21 So for example 0x82187201 is a read with arg length of 0x218,
22character 'r' function 1. Grepping the source reveals this is:
23
24#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct dirent [2])
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 125093c3ef76..536d5bfbdb8d 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -29,7 +29,7 @@ them. A single configuration option is defined like this:
29 29
30config MODVERSIONS 30config MODVERSIONS
31 bool "Set version information on all module symbols" 31 bool "Set version information on all module symbols"
32 depends MODULES 32 depends on MODULES
33 help 33 help
34 Usually, modules have to be recompiled whenever you switch to a new 34 Usually, modules have to be recompiled whenever you switch to a new
35 kernel. ... 35 kernel. ...
@@ -163,7 +163,7 @@ The position of a menu entry in the tree is determined in two ways. First
163it can be specified explicitly: 163it can be specified explicitly:
164 164
165menu "Network device support" 165menu "Network device support"
166 depends NET 166 depends on NET
167 167
168config NETDEVICES 168config NETDEVICES
169 ... 169 ...
@@ -188,10 +188,10 @@ config MODULES
188 188
189config MODVERSIONS 189config MODVERSIONS
190 bool "Set version information on all module symbols" 190 bool "Set version information on all module symbols"
191 depends MODULES 191 depends on MODULES
192 192
193comment "module support disabled" 193comment "module support disabled"
194 depends !MODULES 194 depends on !MODULES
195 195
196MODVERSIONS directly depends on MODULES, this means it's only visible if 196MODVERSIONS directly depends on MODULES, this means it's only visible if
197MODULES is different from 'n'. The comment on the other hand is always 197MODULES is different from 'n'. The comment on the other hand is always
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index d8323b8893c3..ef69c75780bf 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1656,6 +1656,12 @@ and is between 256 and 4096 characters. It is defined in the file
1656 sym53c416= [HW,SCSI] 1656 sym53c416= [HW,SCSI]
1657 See header of drivers/scsi/sym53c416.c. 1657 See header of drivers/scsi/sym53c416.c.
1658 1658
1659 sysrq_always_enabled
1660 [KNL]
1661 Ignore sysrq setting - this boot parameter will
1662 neutralize any effect of /proc/sys/kernel/sysrq.
1663 Useful for debugging.
1664
1659 t128= [HW,SCSI] 1665 t128= [HW,SCSI]
1660 See header of drivers/scsi/t128.c. 1666 See header of drivers/scsi/t128.c.
1661 1667
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index dda15886bcb5..387482e46c47 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -19,7 +19,8 @@ for real time and multimedia traffic.
19 19
20It has a base protocol and pluggable congestion control IDs (CCIDs). 20It has a base protocol and pluggable congestion control IDs (CCIDs).
21 21
22It is at experimental RFC status and the homepage for DCCP as a protocol is at: 22It is at proposed standard RFC status and the homepage for DCCP as a protocol
23is at:
23 http://www.read.cs.ucla.edu/dccp/ 24 http://www.read.cs.ucla.edu/dccp/
24 25
25Missing features 26Missing features
@@ -34,9 +35,6 @@ The known bugs are at:
34Socket options 35Socket options
35============== 36==============
36 37
37DCCP_SOCKOPT_PACKET_SIZE is used for CCID3 to set default packet size for
38calculations.
39
40DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of 38DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of
41service codes (RFC 4340, sec. 8.1.2); if this socket option is not set, 39service codes (RFC 4340, sec. 8.1.2); if this socket option is not set,
42the socket will fall back to 0 (which means that no meaningful service code 40the socket will fall back to 0 (which means that no meaningful service code
diff --git a/Documentation/spi/pxa2xx b/Documentation/spi/pxa2xx
index a1e0ee20f595..f9717fe9bd85 100644
--- a/Documentation/spi/pxa2xx
+++ b/Documentation/spi/pxa2xx
@@ -102,7 +102,7 @@ struct pxa2xx_spi_chip {
102 u8 tx_threshold; 102 u8 tx_threshold;
103 u8 rx_threshold; 103 u8 rx_threshold;
104 u8 dma_burst_size; 104 u8 dma_burst_size;
105 u32 timeout_microsecs; 105 u32 timeout;
106 u8 enable_loopback; 106 u8 enable_loopback;
107 void (*cs_control)(u32 command); 107 void (*cs_control)(u32 command);
108}; 108};
@@ -121,7 +121,7 @@ the PXA2xx "Developer Manual" sections on the DMA controller and SSP Controllers
121to determine the correct value. An SSP configured for byte-wide transfers would 121to determine the correct value. An SSP configured for byte-wide transfers would
122use a value of 8. 122use a value of 8.
123 123
124The "pxa2xx_spi_chip.timeout_microsecs" fields is used to efficiently handle 124The "pxa2xx_spi_chip.timeout" fields is used to efficiently handle
125trailing bytes in the SSP receiver fifo. The correct value for this field is 125trailing bytes in the SSP receiver fifo. The correct value for this field is
126dependent on the SPI bus speed ("spi_board_info.max_speed_hz") and the specific 126dependent on the SPI bus speed ("spi_board_info.max_speed_hz") and the specific
127slave device. Please note that the PXA2xx SSP 1 does not support trailing byte 127slave device. Please note that the PXA2xx SSP 1 does not support trailing byte
@@ -162,18 +162,18 @@ static void cs8405a_cs_control(u32 command)
162} 162}
163 163
164static struct pxa2xx_spi_chip cs8415a_chip_info = { 164static struct pxa2xx_spi_chip cs8415a_chip_info = {
165 .tx_threshold = 12, /* SSP hardward FIFO threshold */ 165 .tx_threshold = 8, /* SSP hardward FIFO threshold */
166 .rx_threshold = 4, /* SSP hardward FIFO threshold */ 166 .rx_threshold = 8, /* SSP hardward FIFO threshold */
167 .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */ 167 .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */
168 .timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */ 168 .timeout = 235, /* See Intel documentation */
169 .cs_control = cs8415a_cs_control, /* Use external chip select */ 169 .cs_control = cs8415a_cs_control, /* Use external chip select */
170}; 170};
171 171
172static struct pxa2xx_spi_chip cs8405a_chip_info = { 172static struct pxa2xx_spi_chip cs8405a_chip_info = {
173 .tx_threshold = 12, /* SSP hardward FIFO threshold */ 173 .tx_threshold = 8, /* SSP hardward FIFO threshold */
174 .rx_threshold = 4, /* SSP hardward FIFO threshold */ 174 .rx_threshold = 8, /* SSP hardward FIFO threshold */
175 .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */ 175 .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */
176 .timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */ 176 .timeout = 235, /* See Intel documentation */
177 .cs_control = cs8405a_cs_control, /* Use external chip select */ 177 .cs_control = cs8405a_cs_control, /* Use external chip select */
178}; 178};
179 179
diff --git a/Documentation/video4linux/CARDLIST.cx88 b/Documentation/video4linux/CARDLIST.cx88
index 8755b3e7b09e..62e32b49cec9 100644
--- a/Documentation/video4linux/CARDLIST.cx88
+++ b/Documentation/video4linux/CARDLIST.cx88
@@ -43,7 +43,7 @@
43 42 -> digitalnow DNTV Live! DVB-T Pro [1822:0025,1822:0019] 43 42 -> digitalnow DNTV Live! DVB-T Pro [1822:0025,1822:0019]
44 43 -> KWorld/VStream XPert DVB-T with cx22702 [17de:08a1,12ab:2300] 44 43 -> KWorld/VStream XPert DVB-T with cx22702 [17de:08a1,12ab:2300]
45 44 -> DViCO FusionHDTV DVB-T Dual Digital [18ac:db50,18ac:db54] 45 44 -> DViCO FusionHDTV DVB-T Dual Digital [18ac:db50,18ac:db54]
46 45 -> KWorld HardwareMpegTV XPert [17de:0840] 46 45 -> KWorld HardwareMpegTV XPert [17de:0840,1421:0305]
47 46 -> DViCO FusionHDTV DVB-T Hybrid [18ac:db40,18ac:db44] 47 46 -> DViCO FusionHDTV DVB-T Hybrid [18ac:db40,18ac:db44]
48 47 -> pcHDTV HD5500 HDTV [7063:5500] 48 47 -> pcHDTV HD5500 HDTV [7063:5500]
49 48 -> Kworld MCE 200 Deluxe [17de:0841] 49 48 -> Kworld MCE 200 Deluxe [17de:0841]
diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134
index 53ce6a39083c..f6201cc37ec5 100644
--- a/Documentation/video4linux/CARDLIST.saa7134
+++ b/Documentation/video4linux/CARDLIST.saa7134
@@ -76,7 +76,7 @@
76 75 -> AVerMedia AVerTVHD MCE A180 [1461:1044] 76 75 -> AVerMedia AVerTVHD MCE A180 [1461:1044]
77 76 -> SKNet MonsterTV Mobile [1131:4ee9] 77 76 -> SKNet MonsterTV Mobile [1131:4ee9]
78 77 -> Pinnacle PCTV 40i/50i/110i (saa7133) [11bd:002e] 78 77 -> Pinnacle PCTV 40i/50i/110i (saa7133) [11bd:002e]
79 78 -> ASUSTeK P7131 Dual [1043:4862] 79 78 -> ASUSTeK P7131 Dual [1043:4862,1043:4876]
80 79 -> Sedna/MuchTV PC TV Cardbus TV/Radio (ITO25 Rev:2B) 80 79 -> Sedna/MuchTV PC TV Cardbus TV/Radio (ITO25 Rev:2B)
81 80 -> ASUS Digimatrix TV [1043:0210] 81 80 -> ASUS Digimatrix TV [1043:0210]
82 81 -> Philips Tiger reference design [1131:2018] 82 81 -> Philips Tiger reference design [1131:2018]
@@ -99,3 +99,8 @@
99 98 -> Proteus Pro 2309 [0919:2003] 99 98 -> Proteus Pro 2309 [0919:2003]
100 99 -> AVerMedia TV Hybrid A16AR [1461:2c00] 100 99 -> AVerMedia TV Hybrid A16AR [1461:2c00]
101100 -> Asus Europa2 OEM [1043:4860] 101100 -> Asus Europa2 OEM [1043:4860]
102101 -> Pinnacle PCTV 310i [11bd:002f]
103102 -> Avermedia AVerTV Studio 507 [1461:9715]
104103 -> Compro Videomate DVB-T200A
105104 -> Hauppauge WinTV-HVR1110 DVB-T/Hybrid [0070:6701]
106105 -> Terratec Cinergy HT PCMCIA [153b:1172]
diff --git a/Documentation/video4linux/cafe_ccic b/Documentation/video4linux/cafe_ccic
new file mode 100644
index 000000000000..88821022a5de
--- /dev/null
+++ b/Documentation/video4linux/cafe_ccic
@@ -0,0 +1,54 @@
1"cafe_ccic" is a driver for the Marvell 88ALP01 "cafe" CMOS camera
2controller. This is the controller found in first-generation OLPC systems,
3and this driver was written with support from the OLPC project.
4
5Current status: the core driver works. It can generate data in YUV422,
6RGB565, and RGB444 formats. (Anybody looking at the code will see RGB32 as
7well, but that is a debugging aid which will be removed shortly). VGA and
8QVGA modes work; CIF is there but the colors remain funky. Only the OV7670
9sensor is known to work with this controller at this time.
10
11To try it out: either of these commands will work:
12
13 mplayer tv:// -tv driver=v4l2:width=640:height=480 -nosound
14 mplayer tv:// -tv driver=v4l2:width=640:height=480:outfmt=bgr16 -nosound
15
16The "xawtv" utility also works; gqcam does not, for unknown reasons.
17
18There are a few load-time options, most of which can be changed after
19loading via sysfs as well:
20
21 - alloc_bufs_at_load: Normally, the driver will not allocate any DMA
22 buffers until the time comes to transfer data. If this option is set,
23 then worst-case-sized buffers will be allocated at module load time.
24 This option nails down the memory for the life of the module, but
25 perhaps decreases the chances of an allocation failure later on.
26
27 - dma_buf_size: The size of DMA buffers to allocate. Note that this
28 option is only consulted for load-time allocation; when buffers are
29 allocated at run time, they will be sized appropriately for the current
30 camera settings.
31
32 - n_dma_bufs: The controller can cycle through either two or three DMA
33 buffers. Normally, the driver tries to use three buffers; on faster
34 systems, however, it will work well with only two.
35
36 - min_buffers: The minimum number of streaming I/O buffers that the driver
37 will consent to work with. Default is one, but, on slower systems,
38 better behavior with mplayer can be achieved by setting to a higher
39 value (like six).
40
41 - max_buffers: The maximum number of streaming I/O buffers; default is
42 ten. That number was carefully picked out of a hat and should not be
43 assumed to actually mean much of anything.
44
45 - flip: If this boolean parameter is set, the sensor will be instructed to
46 invert the video image. Whether it makes sense is determined by how
47 your particular camera is mounted.
48
49Work is ongoing with this driver, stay tuned.
50
51jon
52
53Jonathan Corbet
54corbet@lwn.net
diff --git a/Documentation/video4linux/zr36120.txt b/Documentation/video4linux/zr36120.txt
deleted file mode 100644
index 1a1c2d03a5c8..000000000000
--- a/Documentation/video4linux/zr36120.txt
+++ /dev/null
@@ -1,162 +0,0 @@
1Driver for Trust Computer Products Framegrabber, version 0.6.1
2------ --- ----- -------- -------- ------------ ------- - - -
3
4- ZORAN ------------------------------------------------------
5 Author: Pauline Middelink <middelin@polyware.nl>
6 Date: 18 September 1999
7Version: 0.6.1
8
9- Description ------------------------------------------------
10
11Video4Linux compatible driver for an unknown brand framegrabber
12(Sold in the Netherlands by TRUST Computer Products) and various
13other zoran zr36120 based framegrabbers.
14
15The card contains a ZR36120 Multimedia PCI Interface and a Philips
16SAA7110 Onechip Frontend videodecoder. There is also an DSP of
17which I have forgotten the number, since i will never get that thing
18to work without specs from the vendor itself.
19
20The SAA711x are capable of processing 6 different video inputs,
21CVBS1..6 and Y1+C1, Y2+C2, Y3+C3. All in 50/60Hz, NTSC, PAL or
22SECAM and delivering a YUV datastream. On my card the input
23'CVBS-0' corresponds to channel CVBS2 and 'S-Video' to Y2+C2.
24
25I have some reports of other cards working with the mentioned
26chip sets. For a list of other working cards please have a look
27at the cards named in the tvcards struct in the beginning of
28zr36120.c
29
30After some testing, I discovered that the carddesigner messed up
31on the I2C interface. The Zoran chip includes 2 lines SDA and SCL
32which (s)he connected reversely. So we have to clock on the SDA
33and r/w data on the SCL pin. Life is fun... Each cardtype now has
34a bit which signifies if you have a card with the same deficiency.
35
36Oh, for the completeness of this story I must mention that my
37card delivers the VSYNC pulse of the SAA chip to GIRQ1, not
38GIRQ0 as some other cards have. This is also incorporated in
39the driver be clearing/setting the 'useirq1' bit in the tvcard
40description.
41
42Another problems of continuous capturing data with a Zoran chip
43is something nasty inside the chip. It effectively halves the
44fps we ought to get... Here is the scenario: capturing frames
45to memory is done in the so-called snapshot mode. In this mode
46the Zoran stops after capturing a frame worth of data and wait
47till the application set GRAB bit to indicate readiness for the
48next frame. After detecting a set bit, the chip neatly waits
49till the start of a frame, captures it and it goes back to off.
50Smart ppl will notice the problem here. Its the waiting on the
51_next_ frame each time we set the GRAB bit... Oh well, 12,5 fps
52is still plenty fast for me.
53-- update 28/7/1999 --
54Don't believe a word I just said... Proof is the output
55of `streamer -t 300 -r 25 -f avi15 -o /dev/null`
56 ++--+-+-+-+-+-+-+-+-+-+-+-+-s+-+-+-+-+-+-+-+-+-+-+- 25/25
57 +-s+-+-+-+-+-+-+-+-+-+-+-+-+-s+-+-+-+-+-+-+-+-+-+-+- 25/25
58 +-s+-+-+-+-+-+-+-+-+-+-+-+-+-s+-+-+-+-+-+-+-+-+-+-+- 25/25
59 +-s+-+-+-+-+-+-+-+-+-+-+-+-+-s+-+-+-+-+-+-+-+-+-+-+- 25/25
60 +-s+-+-+-+-+-+-+-+-+-+-+-+-+-s+-+-+-+-+-+-+-+-+-+-+- 25/25
61 +-s+-+-+-+-+-+-+-+-+-+-+-+-+-s+-+-+-+-+-+-+-+-+-+-+- 25/25
62 +-s+-+-+-+-+-+-+-+-+-+-+-+-+-s+-+-+-+-+-+-+-+-+-+-+- 25/25
63 +-s+-+-+-+-+-+-+-+-+-+-+-+-+-s+-+-+-+-+-+-+-+-+-+-+- 25/25
64 +-s+-+-+-+-+-+-+-+-+-+-+-+-+-s+-+-+-+-+-+-+-+-+-+-+- 25/25
65 +-s+-+-+-+-+-+-+-+-+-+-+-+-+-s+-+-+-+-+-+-+-+-+-+-+- 25/25
66 +-s+-+-+-+-+-+-+-+-+-+-+-+-+-s+-+-+-+-+-+-+-+-+-+-+- 25/25
67 +-s+-+-+-+-+-+-+-+-+-+-+-+-+-s+-+-+-+-+-+-+-+-+-+-+-
68 syncer: done
69 writer: done
70(note the /dev/null is prudent here, my system is not able to
71 grab /and/ write 25 fps to a file... gifts welcome :) )
72The technical reasoning follows: The zoran completed the last
73frame, the VSYNC goes low, and GRAB is cleared. The interrupt
74routine starts to work since its VSYNC driven, and again
75activates the GRAB bit. A few ms later the VSYNC (re-)rises and
76the zoran starts to work on a new and freshly broadcasted frame....
77
78For pointers I used the specs of both chips. Below are the URLs:
79 http://www.zoran.com/ftp/download/devices/pci/ZR36120/36120data.pdf
80 http://www-us.semiconductor.philips.com/acrobat/datasheets/SAA_7110_A_1.pdf
81Some alternatives for the Philips SAA 7110 datasheet are:
82 http://www.datasheetcatalog.com/datasheets_pdf/S/A/A/7/SAA7110.shtml
83 http://www.datasheetarchive.com/search.php?search=SAA7110&sType=part
84
85The documentation has very little on absolute numbers or timings
86needed for the various modes/resolutions, but there are other
87programs you can borrow those from.
88
89------ Install --------------------------------------------
90Read the file called TODO. Note its long list of limitations.
91
92Build a kernel with VIDEO4LINUX enabled. Activate the
93BT848 driver; we need this because we have need for the
94other modules (i2c and videodev) it enables.
95
96To install this software, extract it into a suitable directory.
97Examine the makefile and change anything you don't like. Type "make".
98
99After making the modules check if you have the much needed
100/dev/video devices. If not, execute the following 4 lines:
101 mknod /dev/video c 81 0
102 mknod /dev/video1 c 81 1
103 mknod /dev/video2 c 81 2
104 mknod /dev/video3 c 81 3
105 mknod /dev/video4 c 81 4
106
107After making/checking the devices do:
108 modprobe i2c
109 modprobe videodev
110 modprobe saa7110 (optional)
111 modprobe saa7111 (optional)
112 modprobe tuner (optional)
113 insmod zoran cardtype=<n>
114
115<n> is the cardtype of the card you have. The cardnumber can
116be found in the source of zr36120. Look for tvcards. If your
117card is not there, please try if any other card gives some
118response, and mail me if you got a working tvcard addition.
119
120PS. <TVCard editors behold!)
121 Don't forget to set video_input to the number of inputs
122 you defined in the video_mux part of the tvcard definition.
123 It's a common error to add a channel but not incrementing
124 video_input and getting angry with me/v4l/linux/linus :(
125
126You are now ready to test the framegrabber with your favorite
127video4linux compatible tool
128
129------ Application ----------------------------------------
130
131This device works with all Video4Linux compatible applications,
132given the limitations in the TODO file.
133
134------ API ------------------------------------------------
135
136This uses the V4L interface as of kernel release 2.1.116, and in
137fact has not been tested on any lower version. There are a couple
138of minor differences due to the fact that the amount of data returned
139with each frame varies, and no doubt there are discrepancies due to my
140misunderstanding of the API. I intend to convert this driver to the
141new V4L2 API when it has stabilized more.
142
143------ Current state --------------------------------------
144
145The driver is capable of overlaying a video image in screen, and
146even capable of grabbing frames. It uses the BIGPHYSAREA patch
147to allocate lots of large memory blocks when tis patch is
148found in the kernel, but it doesn't need it.
149The consequence is that, when loading the driver as a module,
150the module may tell you it's out of memory, but 'free' says
151otherwise. The reason is simple; the modules wants its memory
152contiguous, not fragmented, and after a long uptime there
153probably isn't a fragment of memory large enough...
154
155The driver uses a double buffering scheme, which should really
156be an n-way buffer, depending on the size of allocated framebuffer
157and the requested grab-size/format.
158This current version also fixes a dead-lock situation during irq
159time, which really, really froze my system... :)
160
161Good luck.
162 Pauline
diff --git a/MAINTAINERS b/MAINTAINERS
index 9c6aac33eb3b..dea5b2a6de0a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -747,7 +747,7 @@ P: Dave Jones
747M: davej@codemonkey.org.uk 747M: davej@codemonkey.org.uk
748L: cpufreq@lists.linux.org.uk 748L: cpufreq@lists.linux.org.uk
749W: http://www.codemonkey.org.uk/projects/cpufreq/ 749W: http://www.codemonkey.org.uk/projects/cpufreq/
750T: git kernel.org/pub/scm/linux/kernel/davej/cpufreq.git 750T: git kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git
751S: Maintained 751S: Maintained
752 752
753CPUID/MSR DRIVER 753CPUID/MSR DRIVER
@@ -1511,8 +1511,10 @@ T: git kernel.org:/pub/scm/linux/kernel/git/dtor/input.git
1511S: Maintained 1511S: Maintained
1512 1512
1513INOTIFY 1513INOTIFY
1514P: John McCutchan and Robert Love 1514P: John McCutchan
1515M: ttb@tentacle.dhs.org and rml@novell.com 1515M: ttb@tentacle.dhs.org
1516P: Robert Love
1517M: rml@novell.com
1516L: linux-kernel@vger.kernel.org 1518L: linux-kernel@vger.kernel.org
1517S: Maintained 1519S: Maintained
1518 1520
@@ -1752,6 +1754,13 @@ W: http://nfs.sourceforge.net/
1752W: http://www.cse.unsw.edu.au/~neilb/patches/linux-devel/ 1754W: http://www.cse.unsw.edu.au/~neilb/patches/linux-devel/
1753S: Maintained 1755S: Maintained
1754 1756
1757KERNEL VIRTUAL MACHINE (KVM)
1758P: Avi Kivity
1759M: avi@qumranet.com
1760L: kvm-devel@lists.sourceforge.net
1761W: kvm.sourceforge.net
1762S: Supported
1763
1755KEXEC 1764KEXEC
1756P: Eric Biederman 1765P: Eric Biederman
1757M: ebiederm@xmission.com 1766M: ebiederm@xmission.com
@@ -2561,7 +2570,7 @@ S: Maintained
2561REAL TIME CLOCK (RTC) SUBSYSTEM 2570REAL TIME CLOCK (RTC) SUBSYSTEM
2562P: Alessandro Zummo 2571P: Alessandro Zummo
2563M: a.zummo@towertech.it 2572M: a.zummo@towertech.it
2564L: linux-kernel@vger.kernel.org 2573L: rtc-linux@googlegroups.com
2565S: Maintained 2574S: Maintained
2566 2575
2567REISERFS FILE SYSTEM 2576REISERFS FILE SYSTEM
diff --git a/Makefile b/Makefile
index aef96259051f..f732e75be43d 100644
--- a/Makefile
+++ b/Makefile
@@ -10,8 +10,11 @@ NAME=Avast! A bilge rat!
10# Comments in this file are targeted only to the developer, do not 10# Comments in this file are targeted only to the developer, do not
11# expect to learn how to build the kernel reading this file. 11# expect to learn how to build the kernel reading this file.
12 12
13# Do not print "Entering directory ..." 13# Do not:
14MAKEFLAGS += --no-print-directory 14# o use make's built-in rules and variables
15# (this increases performance and avoid hard-to-debug behavour);
16# o print "Entering directory ...";
17MAKEFLAGS += -rR --no-print-directory
15 18
16# We are using a recursive build, so we need to do a little thinking 19# We are using a recursive build, so we need to do a little thinking
17# to get the ordering right. 20# to get the ordering right.
@@ -271,12 +274,8 @@ export quiet Q KBUILD_VERBOSE
271# Look for make include files relative to root of kernel src 274# Look for make include files relative to root of kernel src
272MAKEFLAGS += --include-dir=$(srctree) 275MAKEFLAGS += --include-dir=$(srctree)
273 276
274# We need some generic definitions 277# We need some generic definitions.
275include $(srctree)/scripts/Kbuild.include 278include $(srctree)/scripts/Kbuild.include
276
277# Do not use make's built-in rules and variables
278# This increases performance and avoid hard-to-debug behavour
279MAKEFLAGS += -rR
280 279
281# Make variables (CC, etc...) 280# Make variables (CC, etc...)
282 281
@@ -1101,9 +1100,9 @@ boards := $(notdir $(boards))
1101 1100
1102help: 1101help:
1103 @echo 'Cleaning targets:' 1102 @echo 'Cleaning targets:'
1104 @echo ' clean - remove most generated files but keep the config and' 1103 @echo ' clean - Remove most generated files but keep the config and'
1105 @echo ' enough build support to build external modules' 1104 @echo ' enough build support to build external modules'
1106 @echo ' mrproper - remove all generated files + config + various backup files' 1105 @echo ' mrproper - Remove all generated files + config + various backup files'
1107 @echo ' distclean - mrproper + remove editor backup and patch files' 1106 @echo ' distclean - mrproper + remove editor backup and patch files'
1108 @echo '' 1107 @echo ''
1109 @echo 'Configuration targets:' 1108 @echo 'Configuration targets:'
@@ -1391,12 +1390,18 @@ endif #ifeq ($(mixed-targets),1)
1391 1390
1392PHONY += checkstack kernelrelease kernelversion 1391PHONY += checkstack kernelrelease kernelversion
1393 1392
1394# Use $(SUBARCH) here instead of $(ARCH) so that this works for UML. 1393# UML needs a little special treatment here. It wants to use the host
1395# In the UML case, $(SUBARCH) is the name of the underlying 1394# toolchain, so needs $(SUBARCH) passed to checkstack.pl. Everyone
1396# architecture, while for all other arches, it is the same as $(ARCH). 1395# else wants $(ARCH), including people doing cross-builds, which means
1396# that $(SUBARCH) doesn't work here.
1397ifeq ($(ARCH), um)
1398CHECKSTACK_ARCH := $(SUBARCH)
1399else
1400CHECKSTACK_ARCH := $(ARCH)
1401endif
1397checkstack: 1402checkstack:
1398 $(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \ 1403 $(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \
1399 $(PERL) $(src)/scripts/checkstack.pl $(SUBARCH) 1404 $(PERL) $(src)/scripts/checkstack.pl $(CHECKSTACK_ARCH)
1400 1405
1401kernelrelease: 1406kernelrelease:
1402 $(if $(wildcard include/config/kernel.release), $(Q)echo $(KERNELRELEASE), \ 1407 $(if $(wildcard include/config/kernel.release), $(Q)echo $(KERNELRELEASE), \
@@ -1484,6 +1489,8 @@ endif # skip-makefile
1484PHONY += FORCE 1489PHONY += FORCE
1485FORCE: 1490FORCE:
1486 1491
1492# Cancel implicit rules on top Makefile, `-rR' will apply to sub-makes.
1493Makefile: ;
1487 1494
1488# Declare the contents of the .PHONY variable as phony. We keep that 1495# Declare the contents of the .PHONY variable as phony. We keep that
1489# information in a variable se we can use it in if_changed and friends. 1496# information in a variable se we can use it in if_changed and friends.
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index fb804043b320..be133f1f75a4 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -979,7 +979,7 @@ osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
979 long timeout; 979 long timeout;
980 int ret = -EINVAL; 980 int ret = -EINVAL;
981 struct fdtable *fdt; 981 struct fdtable *fdt;
982 int max_fdset; 982 int max_fds;
983 983
984 timeout = MAX_SCHEDULE_TIMEOUT; 984 timeout = MAX_SCHEDULE_TIMEOUT;
985 if (tvp) { 985 if (tvp) {
@@ -1003,9 +1003,9 @@ osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
1003 1003
1004 rcu_read_lock(); 1004 rcu_read_lock();
1005 fdt = files_fdtable(current->files); 1005 fdt = files_fdtable(current->files);
1006 max_fdset = fdt->max_fdset; 1006 max_fds = fdt->max_fds;
1007 rcu_read_unlock(); 1007 rcu_read_unlock();
1008 if (n < 0 || n > max_fdset) 1008 if (n < 0 || n > max_fds)
1009 goto out_nofds; 1009 goto out_nofds;
1010 1010
1011 /* 1011 /*
diff --git a/arch/arm/kernel/apm.c b/arch/arm/kernel/apm.c
index a11fb9a40c04..2c37b70b17ab 100644
--- a/arch/arm/kernel/apm.c
+++ b/arch/arm/kernel/apm.c
@@ -423,7 +423,7 @@ static int apm_open(struct inode * inode, struct file * filp)
423{ 423{
424 struct apm_user *as; 424 struct apm_user *as;
425 425
426 as = (struct apm_user *)kzalloc(sizeof(*as), GFP_KERNEL); 426 as = kzalloc(sizeof(*as), GFP_KERNEL);
427 if (as) { 427 if (as) {
428 /* 428 /*
429 * XXX - this is a tiny bit broken, when we consider BSD 429 * XXX - this is a tiny bit broken, when we consider BSD
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index a786f769035d..71257e3d513f 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -353,7 +353,7 @@ int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num)
353 } 353 }
354 if (c_id(&excd) == 0x80) { /* loader */ 354 if (c_id(&excd) == 0x80) { /* loader */
355 if (!ec->loader) { 355 if (!ec->loader) {
356 ec->loader = (loader_t)kmalloc(c_len(&excd), 356 ec->loader = kmalloc(c_len(&excd),
357 GFP_KERNEL); 357 GFP_KERNEL);
358 if (ec->loader) 358 if (ec->loader)
359 ecard_readbytes(ec->loader, ec, 359 ecard_readbytes(ec->loader, ec,
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index d135568dc9e7..8781aaeb576b 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -43,6 +43,7 @@ config MACH_OMAP_H3
43config MACH_OMAP_OSK 43config MACH_OMAP_OSK
44 bool "TI OSK Support" 44 bool "TI OSK Support"
45 depends on ARCH_OMAP1 && ARCH_OMAP16XX 45 depends on ARCH_OMAP1 && ARCH_OMAP16XX
46 select TPS65010
46 help 47 help
47 TI OMAP 5912 OSK (OMAP Starter Kit) board support. Say Y here 48 TI OMAP 5912 OSK (OMAP Starter Kit) board support. Say Y here
48 if you have such a board. 49 if you have such a board.
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index 3a622801d7b0..7d0cf7af88ce 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -30,6 +30,7 @@
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/irq.h> 32#include <linux/irq.h>
33#include <linux/interrupt.h>
33 34
34#include <linux/mtd/mtd.h> 35#include <linux/mtd/mtd.h>
35#include <linux/mtd/partitions.h> 36#include <linux/mtd/partitions.h>
diff --git a/arch/arm/mach-pnx4008/Makefile b/arch/arm/mach-pnx4008/Makefile
index b457ca0a431a..777564c90a12 100644
--- a/arch/arm/mach-pnx4008/Makefile
+++ b/arch/arm/mach-pnx4008/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5obj-y := core.o irq.o time.o clock.o gpio.o serial.o dma.o 5obj-y := core.o irq.o time.o clock.o gpio.o serial.o dma.o i2c.o
6obj-m := 6obj-m :=
7obj-n := 7obj-n :=
8obj- := 8obj- :=
diff --git a/arch/arm/mach-pnx4008/i2c.c b/arch/arm/mach-pnx4008/i2c.c
new file mode 100644
index 000000000000..6f308827c4fe
--- /dev/null
+++ b/arch/arm/mach-pnx4008/i2c.c
@@ -0,0 +1,167 @@
1/*
2 * I2C initialization for PNX4008.
3 *
4 * Author: Vitaly Wool <vitalywool@gmail.com>
5 *
6 * 2005-2006 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 */
11
12#include <linux/clk.h>
13#include <linux/i2c.h>
14#include <linux/i2c-pnx.h>
15#include <linux/platform_device.h>
16#include <linux/err.h>
17#include <asm/arch/platform.h>
18#include <asm/arch/i2c.h>
19
20static int set_clock_run(struct platform_device *pdev)
21{
22 struct clk *clk;
23 char name[10];
24 int retval = 0;
25
26 snprintf(name, 10, "i2c%d_ck", pdev->id);
27 clk = clk_get(&pdev->dev, name);
28 if (!IS_ERR(clk)) {
29 clk_set_rate(clk, 1);
30 clk_put(clk);
31 } else
32 retval = -ENOENT;
33
34 return retval;
35}
36
37static int set_clock_stop(struct platform_device *pdev)
38{
39 struct clk *clk;
40 char name[10];
41 int retval = 0;
42
43 snprintf(name, 10, "i2c%d_ck", pdev->id);
44 clk = clk_get(&pdev->dev, name);
45 if (!IS_ERR(clk)) {
46 clk_set_rate(clk, 0);
47 clk_put(clk);
48 } else
49 retval = -ENOENT;
50
51 return retval;
52}
53
54static int i2c_pnx_suspend(struct platform_device *pdev, pm_message_t state)
55{
56 int retval = 0;
57#ifdef CONFIG_PM
58 retval = set_clock_run(pdev);
59#endif
60 return retval;
61}
62
63static int i2c_pnx_resume(struct platform_device *pdev)
64{
65 int retval = 0;
66#ifdef CONFIG_PM
67 retval = set_clock_run(pdev);
68#endif
69 return retval;
70}
71
72static u32 calculate_input_freq(struct platform_device *pdev)
73{
74 return HCLK_MHZ;
75}
76
77
78static struct i2c_pnx_algo_data pnx_algo_data0 = {
79 .base = PNX4008_I2C1_BASE,
80 .irq = I2C_1_INT,
81};
82
83static struct i2c_pnx_algo_data pnx_algo_data1 = {
84 .base = PNX4008_I2C2_BASE,
85 .irq = I2C_2_INT,
86};
87
88static struct i2c_pnx_algo_data pnx_algo_data2 = {
89 .base = (PNX4008_USB_CONFIG_BASE + 0x300),
90 .irq = USB_I2C_INT,
91};
92
93static struct i2c_adapter pnx_adapter0 = {
94 .name = I2C_CHIP_NAME "0",
95 .algo_data = &pnx_algo_data0,
96};
97static struct i2c_adapter pnx_adapter1 = {
98 .name = I2C_CHIP_NAME "1",
99 .algo_data = &pnx_algo_data1,
100};
101
102static struct i2c_adapter pnx_adapter2 = {
103 .name = "USB-I2C",
104 .algo_data = &pnx_algo_data2,
105};
106
107static struct i2c_pnx_data i2c0_data = {
108 .suspend = i2c_pnx_suspend,
109 .resume = i2c_pnx_resume,
110 .calculate_input_freq = calculate_input_freq,
111 .set_clock_run = set_clock_run,
112 .set_clock_stop = set_clock_stop,
113 .adapter = &pnx_adapter0,
114};
115
116static struct i2c_pnx_data i2c1_data = {
117 .suspend = i2c_pnx_suspend,
118 .resume = i2c_pnx_resume,
119 .calculate_input_freq = calculate_input_freq,
120 .set_clock_run = set_clock_run,
121 .set_clock_stop = set_clock_stop,
122 .adapter = &pnx_adapter1,
123};
124
125static struct i2c_pnx_data i2c2_data = {
126 .suspend = i2c_pnx_suspend,
127 .resume = i2c_pnx_resume,
128 .calculate_input_freq = calculate_input_freq,
129 .set_clock_run = set_clock_run,
130 .set_clock_stop = set_clock_stop,
131 .adapter = &pnx_adapter2,
132};
133
134static struct platform_device i2c0_device = {
135 .name = "pnx-i2c",
136 .id = 0,
137 .dev = {
138 .platform_data = &i2c0_data,
139 },
140};
141
142static struct platform_device i2c1_device = {
143 .name = "pnx-i2c",
144 .id = 1,
145 .dev = {
146 .platform_data = &i2c1_data,
147 },
148};
149
150static struct platform_device i2c2_device = {
151 .name = "pnx-i2c",
152 .id = 2,
153 .dev = {
154 .platform_data = &i2c2_data,
155 },
156};
157
158static struct platform_device *devices[] __initdata = {
159 &i2c0_device,
160 &i2c1_device,
161 &i2c2_device,
162};
163
164void __init pnx4008_register_i2c_devices(void)
165{
166 platform_add_devices(devices, ARRAY_SIZE(devices));
167}
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 9e3d0bdcba07..5c0a10041cd1 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -75,28 +75,28 @@ endmenu
75 75
76config MACH_POODLE 76config MACH_POODLE
77 bool "Enable Sharp SL-5600 (Poodle) Support" 77 bool "Enable Sharp SL-5600 (Poodle) Support"
78 depends PXA_SHARPSL_25x 78 depends on PXA_SHARPSL_25x
79 select SHARP_LOCOMO 79 select SHARP_LOCOMO
80 select PXA_SSP 80 select PXA_SSP
81 81
82config MACH_CORGI 82config MACH_CORGI
83 bool "Enable Sharp SL-C700 (Corgi) Support" 83 bool "Enable Sharp SL-C700 (Corgi) Support"
84 depends PXA_SHARPSL_25x 84 depends on PXA_SHARPSL_25x
85 select PXA_SHARP_C7xx 85 select PXA_SHARP_C7xx
86 86
87config MACH_SHEPHERD 87config MACH_SHEPHERD
88 bool "Enable Sharp SL-C750 (Shepherd) Support" 88 bool "Enable Sharp SL-C750 (Shepherd) Support"
89 depends PXA_SHARPSL_25x 89 depends on PXA_SHARPSL_25x
90 select PXA_SHARP_C7xx 90 select PXA_SHARP_C7xx
91 91
92config MACH_HUSKY 92config MACH_HUSKY
93 bool "Enable Sharp SL-C760 (Husky) Support" 93 bool "Enable Sharp SL-C760 (Husky) Support"
94 depends PXA_SHARPSL_25x 94 depends on PXA_SHARPSL_25x
95 select PXA_SHARP_C7xx 95 select PXA_SHARP_C7xx
96 96
97config MACH_AKITA 97config MACH_AKITA
98 bool "Enable Sharp SL-1000 (Akita) Support" 98 bool "Enable Sharp SL-1000 (Akita) Support"
99 depends PXA_SHARPSL_27x 99 depends on PXA_SHARPSL_27x
100 select PXA_SHARP_Cxx00 100 select PXA_SHARP_Cxx00
101 select MACH_SPITZ 101 select MACH_SPITZ
102 select I2C 102 select I2C
@@ -104,17 +104,17 @@ config MACH_AKITA
104 104
105config MACH_SPITZ 105config MACH_SPITZ
106 bool "Enable Sharp Zaurus SL-3000 (Spitz) Support" 106 bool "Enable Sharp Zaurus SL-3000 (Spitz) Support"
107 depends PXA_SHARPSL_27x 107 depends on PXA_SHARPSL_27x
108 select PXA_SHARP_Cxx00 108 select PXA_SHARP_Cxx00
109 109
110config MACH_BORZOI 110config MACH_BORZOI
111 bool "Enable Sharp Zaurus SL-3100 (Borzoi) Support" 111 bool "Enable Sharp Zaurus SL-3100 (Borzoi) Support"
112 depends PXA_SHARPSL_27x 112 depends on PXA_SHARPSL_27x
113 select PXA_SHARP_Cxx00 113 select PXA_SHARP_Cxx00
114 114
115config MACH_TOSA 115config MACH_TOSA
116 bool "Enable Sharp SL-6000x (Tosa) Support" 116 bool "Enable Sharp SL-6000x (Tosa) Support"
117 depends PXA_SHARPSL_25x 117 depends on PXA_SHARPSL_25x
118 118
119config PXA25x 119config PXA25x
120 bool 120 bool
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 68c67053f479..84d3fe76e94e 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -141,6 +141,19 @@ struct platform_device realview_smc91x_device = {
141 .resource = realview_smc91x_resources, 141 .resource = realview_smc91x_resources,
142}; 142};
143 143
144static struct resource realview_i2c_resource = {
145 .start = REALVIEW_I2C_BASE,
146 .end = REALVIEW_I2C_BASE + SZ_4K - 1,
147 .flags = IORESOURCE_MEM,
148};
149
150struct platform_device realview_i2c_device = {
151 .name = "versatile-i2c",
152 .id = -1,
153 .num_resources = 1,
154 .resource = &realview_i2c_resource,
155};
156
144#define REALVIEW_SYSMCI (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_MCI_OFFSET) 157#define REALVIEW_SYSMCI (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_MCI_OFFSET)
145 158
146static unsigned int realview_mmc_status(struct device *dev) 159static unsigned int realview_mmc_status(struct device *dev)
diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h
index 93e86d9f439c..2b53420f9c1b 100644
--- a/arch/arm/mach-realview/core.h
+++ b/arch/arm/mach-realview/core.h
@@ -108,6 +108,7 @@ static struct amba_device name##_device = { \
108 108
109extern struct platform_device realview_flash_device; 109extern struct platform_device realview_flash_device;
110extern struct platform_device realview_smc91x_device; 110extern struct platform_device realview_smc91x_device;
111extern struct platform_device realview_i2c_device;
111extern struct mmc_platform_data realview_mmc0_plat_data; 112extern struct mmc_platform_data realview_mmc0_plat_data;
112extern struct mmc_platform_data realview_mmc1_plat_data; 113extern struct mmc_platform_data realview_mmc1_plat_data;
113extern struct clk realview_clcd_clk; 114extern struct clk realview_clcd_clk;
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 84a959530fb6..9741b4d3c9cf 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -155,6 +155,7 @@ static void __init realview_eb_init(void)
155 155
156 platform_device_register(&realview_flash_device); 156 platform_device_register(&realview_flash_device);
157 platform_device_register(&realview_smc91x_device); 157 platform_device_register(&realview_smc91x_device);
158 platform_device_register(&realview_i2c_device);
158 159
159 for (i = 0; i < ARRAY_SIZE(amba_devs); i++) { 160 for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
160 struct amba_device *d = amba_devs[i]; 161 struct amba_device *d = amba_devs[i];
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 57196947559f..bf71507c76fd 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -325,6 +325,19 @@ static struct platform_device smc91x_device = {
325 .resource = smc91x_resources, 325 .resource = smc91x_resources,
326}; 326};
327 327
328static struct resource versatile_i2c_resource = {
329 .start = VERSATILE_I2C_BASE,
330 .end = VERSATILE_I2C_BASE + SZ_4K - 1,
331 .flags = IORESOURCE_MEM,
332};
333
334static struct platform_device versatile_i2c_device = {
335 .name = "versatile-i2c",
336 .id = -1,
337 .num_resources = 1,
338 .resource = &versatile_i2c_resource,
339};
340
328#define VERSATILE_SYSMCI (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_MCI_OFFSET) 341#define VERSATILE_SYSMCI (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_MCI_OFFSET)
329 342
330unsigned int mmc_status(struct device *dev) 343unsigned int mmc_status(struct device *dev)
@@ -775,6 +788,7 @@ void __init versatile_init(void)
775 clk_register(&versatile_clcd_clk); 788 clk_register(&versatile_clcd_clk);
776 789
777 platform_device_register(&versatile_flash_device); 790 platform_device_register(&versatile_flash_device);
791 platform_device_register(&versatile_i2c_device);
778 platform_device_register(&smc91x_device); 792 platform_device_register(&smc91x_device);
779 793
780 for (i = 0; i < ARRAY_SIZE(amba_devs); i++) { 794 for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index ec752e16d618..f2dc363de66b 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -113,7 +113,7 @@ endchoice
113 113
114config OMAP_SERIAL_WAKE 114config OMAP_SERIAL_WAKE
115 bool "Enable wake-up events for serial ports" 115 bool "Enable wake-up events for serial ports"
116 depends OMAP_MUX 116 depends on OMAP_MUX
117 default y 117 default y
118 help 118 help
119 Select this option if you want to have your system wake up 119 Select this option if you want to have your system wake up
diff --git a/arch/arm26/kernel/ecard.c b/arch/arm26/kernel/ecard.c
index 43dd41be71fb..9dbc17247c6f 100644
--- a/arch/arm26/kernel/ecard.c
+++ b/arch/arm26/kernel/ecard.c
@@ -215,7 +215,7 @@ int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num)
215 } 215 }
216 if (c_id(&excd) == 0x80) { /* loader */ 216 if (c_id(&excd) == 0x80) { /* loader */
217 if (!ec->loader) { 217 if (!ec->loader) {
218 ec->loader = (loader_t)kmalloc(c_len(&excd), 218 ec->loader = kmalloc(c_len(&excd),
219 GFP_KERNEL); 219 GFP_KERNEL);
220 if (ec->loader) 220 if (ec->loader)
221 ecard_readbytes(ec->loader, ec, 221 ecard_readbytes(ec->loader, ec,
diff --git a/arch/arm26/kernel/irq.c b/arch/arm26/kernel/irq.c
index d87d68b77d66..d53382c83bf9 100644
--- a/arch/arm26/kernel/irq.c
+++ b/arch/arm26/kernel/irq.c
@@ -545,7 +545,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
545 (irq_flags & IRQF_SHARED && !dev_id)) 545 (irq_flags & IRQF_SHARED && !dev_id))
546 return -EINVAL; 546 return -EINVAL;
547 547
548 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL); 548 action = kmalloc(sizeof(struct irqaction), GFP_KERNEL);
549 if (!action) 549 if (!action)
550 return -ENOMEM; 550 return -ENOMEM;
551 551
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index cced73c58115..32b361f31c2c 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -7,20 +7,83 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/clk.h>
11#include <linux/etherdevice.h>
10#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/platform_device.h>
15#include <linux/string.h>
16#include <linux/types.h>
11 17
18#include <asm/io.h>
19#include <asm/setup.h>
12#include <asm/arch/board.h> 20#include <asm/arch/board.h>
13#include <asm/arch/init.h> 21#include <asm/arch/init.h>
14 22
15struct eth_platform_data __initdata eth0_data = { 23struct eth_addr {
16 .valid = 1, 24 u8 addr[6];
17 .mii_phy_addr = 0x10,
18 .is_rmii = 0,
19 .hw_addr = { 0x6a, 0x87, 0x71, 0x14, 0xcd, 0xcb },
20}; 25};
21 26
27static struct eth_addr __initdata hw_addr[2];
28
29static struct eth_platform_data __initdata eth_data[2];
22extern struct lcdc_platform_data atstk1000_fb0_data; 30extern struct lcdc_platform_data atstk1000_fb0_data;
23 31
32/*
33 * The next two functions should go away as the boot loader is
34 * supposed to initialize the macb address registers with a valid
35 * ethernet address. But we need to keep it around for a while until
36 * we can be reasonably sure the boot loader does this.
37 *
38 * The phy_id is ignored as the driver will probe for it.
39 */
40static int __init parse_tag_ethernet(struct tag *tag)
41{
42 int i;
43
44 i = tag->u.ethernet.mac_index;
45 if (i < ARRAY_SIZE(hw_addr))
46 memcpy(hw_addr[i].addr, tag->u.ethernet.hw_address,
47 sizeof(hw_addr[i].addr));
48
49 return 0;
50}
51__tagtable(ATAG_ETHERNET, parse_tag_ethernet);
52
53static void __init set_hw_addr(struct platform_device *pdev)
54{
55 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
56 const u8 *addr;
57 void __iomem *regs;
58 struct clk *pclk;
59
60 if (!res)
61 return;
62 if (pdev->id >= ARRAY_SIZE(hw_addr))
63 return;
64
65 addr = hw_addr[pdev->id].addr;
66 if (!is_valid_ether_addr(addr))
67 return;
68
69 /*
70 * Since this is board-specific code, we'll cheat and use the
71 * physical address directly as we happen to know that it's
72 * the same as the virtual address.
73 */
74 regs = (void __iomem __force *)res->start;
75 pclk = clk_get(&pdev->dev, "pclk");
76 if (!pclk)
77 return;
78
79 clk_enable(pclk);
80 __raw_writel((addr[3] << 24) | (addr[2] << 16)
81 | (addr[1] << 8) | addr[0], regs + 0x98);
82 __raw_writel((addr[5] << 8) | addr[4], regs + 0x9c);
83 clk_disable(pclk);
84 clk_put(pclk);
85}
86
24void __init setup_board(void) 87void __init setup_board(void)
25{ 88{
26 at32_map_usart(1, 0); /* /dev/ttyS0 */ 89 at32_map_usart(1, 0); /* /dev/ttyS0 */
@@ -38,7 +101,8 @@ static int __init atstk1002_init(void)
38 at32_add_device_usart(1); 101 at32_add_device_usart(1);
39 at32_add_device_usart(2); 102 at32_add_device_usart(2);
40 103
41 at32_add_device_eth(0, &eth0_data); 104 set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
105
42 at32_add_device_spi(0); 106 at32_add_device_spi(0);
43 at32_add_device_lcdc(0, &atstk1000_fb0_data); 107 at32_add_device_lcdc(0, &atstk1000_fb0_data);
44 108
diff --git a/arch/avr32/kernel/avr32_ksyms.c b/arch/avr32/kernel/avr32_ksyms.c
index 372e3f8b2417..7c4c76114bba 100644
--- a/arch/avr32/kernel/avr32_ksyms.c
+++ b/arch/avr32/kernel/avr32_ksyms.c
@@ -7,12 +7,12 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/delay.h>
10#include <linux/io.h> 11#include <linux/io.h>
11#include <linux/module.h> 12#include <linux/module.h>
12 13
13#include <asm/checksum.h> 14#include <asm/checksum.h>
14#include <asm/uaccess.h> 15#include <asm/uaccess.h>
15#include <asm/delay.h>
16 16
17/* 17/*
18 * GCC functions 18 * GCC functions
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index 317dc50945f2..0b4325946a41 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -38,6 +38,13 @@ void cpu_idle(void)
38 38
39void machine_halt(void) 39void machine_halt(void)
40{ 40{
41 /*
42 * Enter Stop mode. The 32 kHz oscillator will keep running so
43 * the RTC will keep the time properly and the system will
44 * boot quickly.
45 */
46 asm volatile("sleep 3\n\t"
47 "sub pc, -2");
41} 48}
42 49
43void machine_power_off(void) 50void machine_power_off(void)
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c
index ea2d1ffee478..a34211601008 100644
--- a/arch/avr32/kernel/setup.c
+++ b/arch/avr32/kernel/setup.c
@@ -229,30 +229,6 @@ static int __init parse_tag_rsvd_mem(struct tag *tag)
229} 229}
230__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem); 230__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
231 231
232static int __init parse_tag_ethernet(struct tag *tag)
233{
234#if 0
235 const struct platform_device *pdev;
236
237 /*
238 * We really need a bus type that supports "classes"...this
239 * will do for now (until we must handle other kinds of
240 * ethernet controllers)
241 */
242 pdev = platform_get_device("macb", tag->u.ethernet.mac_index);
243 if (pdev && pdev->dev.platform_data) {
244 struct eth_platform_data *data = pdev->dev.platform_data;
245
246 data->valid = 1;
247 data->mii_phy_addr = tag->u.ethernet.mii_phy_addr;
248 memcpy(data->hw_addr, tag->u.ethernet.hw_address,
249 sizeof(data->hw_addr));
250 }
251#endif
252 return 0;
253}
254__tagtable(ATAG_ETHERNET, parse_tag_ethernet);
255
256/* 232/*
257 * Scan the tag table for this tag, and call its parse function. The 233 * Scan the tag table for this tag, and call its parse function. The
258 * tag table is built by the linker from all the __tagtable 234 * tag table is built by the linker from all the __tagtable
diff --git a/arch/avr32/lib/delay.c b/arch/avr32/lib/delay.c
index 462c8307b680..b3bc0b56e2c6 100644
--- a/arch/avr32/lib/delay.c
+++ b/arch/avr32/lib/delay.c
@@ -12,9 +12,9 @@
12 12
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/param.h>
15#include <linux/types.h> 16#include <linux/types.h>
16 17
17#include <asm/delay.h>
18#include <asm/processor.h> 18#include <asm/processor.h>
19#include <asm/sysreg.h> 19#include <asm/sysreg.h>
20 20
diff --git a/arch/avr32/mach-at32ap/at32ap7000.c b/arch/avr32/mach-at32ap/at32ap7000.c
index 7ff6ad8bab5f..48f4ef38c70e 100644
--- a/arch/avr32/mach-at32ap/at32ap7000.c
+++ b/arch/avr32/mach-at32ap/at32ap7000.c
@@ -11,6 +11,7 @@
11 11
12#include <asm/io.h> 12#include <asm/io.h>
13 13
14#include <asm/arch/at32ap7000.h>
14#include <asm/arch/board.h> 15#include <asm/arch/board.h>
15#include <asm/arch/portmux.h> 16#include <asm/arch/portmux.h>
16#include <asm/arch/sm.h> 17#include <asm/arch/sm.h>
@@ -57,6 +58,9 @@ static struct platform_device _name##_id##_device = { \
57 .num_resources = ARRAY_SIZE(_name##_id##_resource), \ 58 .num_resources = ARRAY_SIZE(_name##_id##_resource), \
58} 59}
59 60
61#define select_peripheral(pin, periph, flags) \
62 at32_select_periph(GPIO_PIN_##pin, GPIO_##periph, flags)
63
60#define DEV_CLK(_name, devname, bus, _index) \ 64#define DEV_CLK(_name, devname, bus, _index) \
61static struct clk devname##_##_name = { \ 65static struct clk devname##_##_name = { \
62 .name = #_name, \ 66 .name = #_name, \
@@ -67,18 +71,6 @@ static struct clk devname##_##_name = { \
67 .index = _index, \ 71 .index = _index, \
68} 72}
69 73
70enum {
71 PIOA,
72 PIOB,
73 PIOC,
74 PIOD,
75};
76
77enum {
78 FUNC_A,
79 FUNC_B,
80};
81
82unsigned long at32ap7000_osc_rates[3] = { 74unsigned long at32ap7000_osc_rates[3] = {
83 [0] = 32768, 75 [0] = 32768,
84 /* FIXME: these are ATSTK1002-specific */ 76 /* FIXME: these are ATSTK1002-specific */
@@ -569,26 +561,26 @@ DEV_CLK(usart, atmel_usart3, pba, 6);
569 561
570static inline void configure_usart0_pins(void) 562static inline void configure_usart0_pins(void)
571{ 563{
572 portmux_set_func(PIOA, 8, FUNC_B); /* RXD */ 564 select_peripheral(PA(8), PERIPH_B, 0); /* RXD */
573 portmux_set_func(PIOA, 9, FUNC_B); /* TXD */ 565 select_peripheral(PA(9), PERIPH_B, 0); /* TXD */
574} 566}
575 567
576static inline void configure_usart1_pins(void) 568static inline void configure_usart1_pins(void)
577{ 569{
578 portmux_set_func(PIOA, 17, FUNC_A); /* RXD */ 570 select_peripheral(PA(17), PERIPH_A, 0); /* RXD */
579 portmux_set_func(PIOA, 18, FUNC_A); /* TXD */ 571 select_peripheral(PA(18), PERIPH_A, 0); /* TXD */
580} 572}
581 573
582static inline void configure_usart2_pins(void) 574static inline void configure_usart2_pins(void)
583{ 575{
584 portmux_set_func(PIOB, 26, FUNC_B); /* RXD */ 576 select_peripheral(PB(26), PERIPH_B, 0); /* RXD */
585 portmux_set_func(PIOB, 27, FUNC_B); /* TXD */ 577 select_peripheral(PB(27), PERIPH_B, 0); /* TXD */
586} 578}
587 579
588static inline void configure_usart3_pins(void) 580static inline void configure_usart3_pins(void)
589{ 581{
590 portmux_set_func(PIOB, 18, FUNC_B); /* RXD */ 582 select_peripheral(PB(18), PERIPH_B, 0); /* RXD */
591 portmux_set_func(PIOB, 17, FUNC_B); /* TXD */ 583 select_peripheral(PB(17), PERIPH_B, 0); /* TXD */
592} 584}
593 585
594static struct platform_device *at32_usarts[4]; 586static struct platform_device *at32_usarts[4];
@@ -654,6 +646,15 @@ DEFINE_DEV_DATA(macb, 0);
654DEV_CLK(hclk, macb0, hsb, 8); 646DEV_CLK(hclk, macb0, hsb, 8);
655DEV_CLK(pclk, macb0, pbb, 6); 647DEV_CLK(pclk, macb0, pbb, 6);
656 648
649static struct eth_platform_data macb1_data;
650static struct resource macb1_resource[] = {
651 PBMEM(0xfff01c00),
652 IRQ(26),
653};
654DEFINE_DEV_DATA(macb, 1);
655DEV_CLK(hclk, macb1, hsb, 9);
656DEV_CLK(pclk, macb1, pbb, 7);
657
657struct platform_device *__init 658struct platform_device *__init
658at32_add_device_eth(unsigned int id, struct eth_platform_data *data) 659at32_add_device_eth(unsigned int id, struct eth_platform_data *data)
659{ 660{
@@ -663,27 +664,54 @@ at32_add_device_eth(unsigned int id, struct eth_platform_data *data)
663 case 0: 664 case 0:
664 pdev = &macb0_device; 665 pdev = &macb0_device;
665 666
666 portmux_set_func(PIOC, 3, FUNC_A); /* TXD0 */ 667 select_peripheral(PC(3), PERIPH_A, 0); /* TXD0 */
667 portmux_set_func(PIOC, 4, FUNC_A); /* TXD1 */ 668 select_peripheral(PC(4), PERIPH_A, 0); /* TXD1 */
668 portmux_set_func(PIOC, 7, FUNC_A); /* TXEN */ 669 select_peripheral(PC(7), PERIPH_A, 0); /* TXEN */
669 portmux_set_func(PIOC, 8, FUNC_A); /* TXCK */ 670 select_peripheral(PC(8), PERIPH_A, 0); /* TXCK */
670 portmux_set_func(PIOC, 9, FUNC_A); /* RXD0 */ 671 select_peripheral(PC(9), PERIPH_A, 0); /* RXD0 */
671 portmux_set_func(PIOC, 10, FUNC_A); /* RXD1 */ 672 select_peripheral(PC(10), PERIPH_A, 0); /* RXD1 */
672 portmux_set_func(PIOC, 13, FUNC_A); /* RXER */ 673 select_peripheral(PC(13), PERIPH_A, 0); /* RXER */
673 portmux_set_func(PIOC, 15, FUNC_A); /* RXDV */ 674 select_peripheral(PC(15), PERIPH_A, 0); /* RXDV */
674 portmux_set_func(PIOC, 16, FUNC_A); /* MDC */ 675 select_peripheral(PC(16), PERIPH_A, 0); /* MDC */
675 portmux_set_func(PIOC, 17, FUNC_A); /* MDIO */ 676 select_peripheral(PC(17), PERIPH_A, 0); /* MDIO */
677
678 if (!data->is_rmii) {
679 select_peripheral(PC(0), PERIPH_A, 0); /* COL */
680 select_peripheral(PC(1), PERIPH_A, 0); /* CRS */
681 select_peripheral(PC(2), PERIPH_A, 0); /* TXER */
682 select_peripheral(PC(5), PERIPH_A, 0); /* TXD2 */
683 select_peripheral(PC(6), PERIPH_A, 0); /* TXD3 */
684 select_peripheral(PC(11), PERIPH_A, 0); /* RXD2 */
685 select_peripheral(PC(12), PERIPH_A, 0); /* RXD3 */
686 select_peripheral(PC(14), PERIPH_A, 0); /* RXCK */
687 select_peripheral(PC(18), PERIPH_A, 0); /* SPD */
688 }
689 break;
690
691 case 1:
692 pdev = &macb1_device;
693
694 select_peripheral(PD(13), PERIPH_B, 0); /* TXD0 */
695 select_peripheral(PD(14), PERIPH_B, 0); /* TXD1 */
696 select_peripheral(PD(11), PERIPH_B, 0); /* TXEN */
697 select_peripheral(PD(12), PERIPH_B, 0); /* TXCK */
698 select_peripheral(PD(10), PERIPH_B, 0); /* RXD0 */
699 select_peripheral(PD(6), PERIPH_B, 0); /* RXD1 */
700 select_peripheral(PD(5), PERIPH_B, 0); /* RXER */
701 select_peripheral(PD(4), PERIPH_B, 0); /* RXDV */
702 select_peripheral(PD(3), PERIPH_B, 0); /* MDC */
703 select_peripheral(PD(2), PERIPH_B, 0); /* MDIO */
676 704
677 if (!data->is_rmii) { 705 if (!data->is_rmii) {
678 portmux_set_func(PIOC, 0, FUNC_A); /* COL */ 706 select_peripheral(PC(19), PERIPH_B, 0); /* COL */
679 portmux_set_func(PIOC, 1, FUNC_A); /* CRS */ 707 select_peripheral(PC(23), PERIPH_B, 0); /* CRS */
680 portmux_set_func(PIOC, 2, FUNC_A); /* TXER */ 708 select_peripheral(PC(26), PERIPH_B, 0); /* TXER */
681 portmux_set_func(PIOC, 5, FUNC_A); /* TXD2 */ 709 select_peripheral(PC(27), PERIPH_B, 0); /* TXD2 */
682 portmux_set_func(PIOC, 6, FUNC_A); /* TXD3 */ 710 select_peripheral(PC(28), PERIPH_B, 0); /* TXD3 */
683 portmux_set_func(PIOC, 11, FUNC_A); /* RXD2 */ 711 select_peripheral(PC(29), PERIPH_B, 0); /* RXD2 */
684 portmux_set_func(PIOC, 12, FUNC_A); /* RXD3 */ 712 select_peripheral(PC(30), PERIPH_B, 0); /* RXD3 */
685 portmux_set_func(PIOC, 14, FUNC_A); /* RXCK */ 713 select_peripheral(PC(24), PERIPH_B, 0); /* RXCK */
686 portmux_set_func(PIOC, 18, FUNC_A); /* SPD */ 714 select_peripheral(PD(15), PERIPH_B, 0); /* SPD */
687 } 715 }
688 break; 716 break;
689 717
@@ -714,12 +742,12 @@ struct platform_device *__init at32_add_device_spi(unsigned int id)
714 switch (id) { 742 switch (id) {
715 case 0: 743 case 0:
716 pdev = &spi0_device; 744 pdev = &spi0_device;
717 portmux_set_func(PIOA, 0, FUNC_A); /* MISO */ 745 select_peripheral(PA(0), PERIPH_A, 0); /* MISO */
718 portmux_set_func(PIOA, 1, FUNC_A); /* MOSI */ 746 select_peripheral(PA(1), PERIPH_A, 0); /* MOSI */
719 portmux_set_func(PIOA, 2, FUNC_A); /* SCK */ 747 select_peripheral(PA(2), PERIPH_A, 0); /* SCK */
720 portmux_set_func(PIOA, 3, FUNC_A); /* NPCS0 */ 748 select_peripheral(PA(3), PERIPH_A, 0); /* NPCS0 */
721 portmux_set_func(PIOA, 4, FUNC_A); /* NPCS1 */ 749 select_peripheral(PA(4), PERIPH_A, 0); /* NPCS1 */
722 portmux_set_func(PIOA, 5, FUNC_A); /* NPCS2 */ 750 select_peripheral(PA(5), PERIPH_A, 0); /* NPCS2 */
723 break; 751 break;
724 752
725 default: 753 default:
@@ -762,37 +790,37 @@ at32_add_device_lcdc(unsigned int id, struct lcdc_platform_data *data)
762 switch (id) { 790 switch (id) {
763 case 0: 791 case 0:
764 pdev = &lcdc0_device; 792 pdev = &lcdc0_device;
765 portmux_set_func(PIOC, 19, FUNC_A); /* CC */ 793 select_peripheral(PC(19), PERIPH_A, 0); /* CC */
766 portmux_set_func(PIOC, 20, FUNC_A); /* HSYNC */ 794 select_peripheral(PC(20), PERIPH_A, 0); /* HSYNC */
767 portmux_set_func(PIOC, 21, FUNC_A); /* PCLK */ 795 select_peripheral(PC(21), PERIPH_A, 0); /* PCLK */
768 portmux_set_func(PIOC, 22, FUNC_A); /* VSYNC */ 796 select_peripheral(PC(22), PERIPH_A, 0); /* VSYNC */
769 portmux_set_func(PIOC, 23, FUNC_A); /* DVAL */ 797 select_peripheral(PC(23), PERIPH_A, 0); /* DVAL */
770 portmux_set_func(PIOC, 24, FUNC_A); /* MODE */ 798 select_peripheral(PC(24), PERIPH_A, 0); /* MODE */
771 portmux_set_func(PIOC, 25, FUNC_A); /* PWR */ 799 select_peripheral(PC(25), PERIPH_A, 0); /* PWR */
772 portmux_set_func(PIOC, 26, FUNC_A); /* DATA0 */ 800 select_peripheral(PC(26), PERIPH_A, 0); /* DATA0 */
773 portmux_set_func(PIOC, 27, FUNC_A); /* DATA1 */ 801 select_peripheral(PC(27), PERIPH_A, 0); /* DATA1 */
774 portmux_set_func(PIOC, 28, FUNC_A); /* DATA2 */ 802 select_peripheral(PC(28), PERIPH_A, 0); /* DATA2 */
775 portmux_set_func(PIOC, 29, FUNC_A); /* DATA3 */ 803 select_peripheral(PC(29), PERIPH_A, 0); /* DATA3 */
776 portmux_set_func(PIOC, 30, FUNC_A); /* DATA4 */ 804 select_peripheral(PC(30), PERIPH_A, 0); /* DATA4 */
777 portmux_set_func(PIOC, 31, FUNC_A); /* DATA5 */ 805 select_peripheral(PC(31), PERIPH_A, 0); /* DATA5 */
778 portmux_set_func(PIOD, 0, FUNC_A); /* DATA6 */ 806 select_peripheral(PD(0), PERIPH_A, 0); /* DATA6 */
779 portmux_set_func(PIOD, 1, FUNC_A); /* DATA7 */ 807 select_peripheral(PD(1), PERIPH_A, 0); /* DATA7 */
780 portmux_set_func(PIOD, 2, FUNC_A); /* DATA8 */ 808 select_peripheral(PD(2), PERIPH_A, 0); /* DATA8 */
781 portmux_set_func(PIOD, 3, FUNC_A); /* DATA9 */ 809 select_peripheral(PD(3), PERIPH_A, 0); /* DATA9 */
782 portmux_set_func(PIOD, 4, FUNC_A); /* DATA10 */ 810 select_peripheral(PD(4), PERIPH_A, 0); /* DATA10 */
783 portmux_set_func(PIOD, 5, FUNC_A); /* DATA11 */ 811 select_peripheral(PD(5), PERIPH_A, 0); /* DATA11 */
784 portmux_set_func(PIOD, 6, FUNC_A); /* DATA12 */ 812 select_peripheral(PD(6), PERIPH_A, 0); /* DATA12 */
785 portmux_set_func(PIOD, 7, FUNC_A); /* DATA13 */ 813 select_peripheral(PD(7), PERIPH_A, 0); /* DATA13 */
786 portmux_set_func(PIOD, 8, FUNC_A); /* DATA14 */ 814 select_peripheral(PD(8), PERIPH_A, 0); /* DATA14 */
787 portmux_set_func(PIOD, 9, FUNC_A); /* DATA15 */ 815 select_peripheral(PD(9), PERIPH_A, 0); /* DATA15 */
788 portmux_set_func(PIOD, 10, FUNC_A); /* DATA16 */ 816 select_peripheral(PD(10), PERIPH_A, 0); /* DATA16 */
789 portmux_set_func(PIOD, 11, FUNC_A); /* DATA17 */ 817 select_peripheral(PD(11), PERIPH_A, 0); /* DATA17 */
790 portmux_set_func(PIOD, 12, FUNC_A); /* DATA18 */ 818 select_peripheral(PD(12), PERIPH_A, 0); /* DATA18 */
791 portmux_set_func(PIOD, 13, FUNC_A); /* DATA19 */ 819 select_peripheral(PD(13), PERIPH_A, 0); /* DATA19 */
792 portmux_set_func(PIOD, 14, FUNC_A); /* DATA20 */ 820 select_peripheral(PD(14), PERIPH_A, 0); /* DATA20 */
793 portmux_set_func(PIOD, 15, FUNC_A); /* DATA21 */ 821 select_peripheral(PD(15), PERIPH_A, 0); /* DATA21 */
794 portmux_set_func(PIOD, 16, FUNC_A); /* DATA22 */ 822 select_peripheral(PD(16), PERIPH_A, 0); /* DATA22 */
795 portmux_set_func(PIOD, 17, FUNC_A); /* DATA23 */ 823 select_peripheral(PD(17), PERIPH_A, 0); /* DATA23 */
796 824
797 clk_set_parent(&lcdc0_pixclk, &pll0); 825 clk_set_parent(&lcdc0_pixclk, &pll0);
798 clk_set_rate(&lcdc0_pixclk, clk_get_rate(&pll0)); 826 clk_set_rate(&lcdc0_pixclk, clk_get_rate(&pll0));
@@ -838,6 +866,8 @@ struct clk *at32_clock_list[] = {
838 &atmel_usart3_usart, 866 &atmel_usart3_usart,
839 &macb0_hclk, 867 &macb0_hclk,
840 &macb0_pclk, 868 &macb0_pclk,
869 &macb1_hclk,
870 &macb1_pclk,
841 &spi0_mck, 871 &spi0_mck,
842 &lcdc0_hclk, 872 &lcdc0_hclk,
843 &lcdc0_pixclk, 873 &lcdc0_pixclk,
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c
index 4dff1f988900..b59272e81b9a 100644
--- a/arch/avr32/mach-at32ap/extint.c
+++ b/arch/avr32/mach-at32ap/extint.c
@@ -49,12 +49,25 @@ static void eim_unmask_irq(unsigned int irq)
49static int eim_set_irq_type(unsigned int irq, unsigned int flow_type) 49static int eim_set_irq_type(unsigned int irq, unsigned int flow_type)
50{ 50{
51 struct at32_sm *sm = get_irq_chip_data(irq); 51 struct at32_sm *sm = get_irq_chip_data(irq);
52 struct irq_desc *desc;
52 unsigned int i = irq - sm->eim_first_irq; 53 unsigned int i = irq - sm->eim_first_irq;
53 u32 mode, edge, level; 54 u32 mode, edge, level;
54 unsigned long flags; 55 unsigned long flags;
55 int ret = 0; 56 int ret = 0;
56 57
57 flow_type &= IRQ_TYPE_SENSE_MASK; 58 if (flow_type == IRQ_TYPE_NONE)
59 flow_type = IRQ_TYPE_LEVEL_LOW;
60
61 desc = &irq_desc[irq];
62 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
63 desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
64
65 if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) {
66 desc->status |= IRQ_LEVEL;
67 set_irq_handler(irq, handle_level_irq);
68 } else {
69 set_irq_handler(irq, handle_edge_irq);
70 }
58 71
59 spin_lock_irqsave(&sm->lock, flags); 72 spin_lock_irqsave(&sm->lock, flags);
60 73
@@ -148,10 +161,15 @@ static int __init eim_init(void)
148 pattern = sm_readl(sm, EIM_MODE); 161 pattern = sm_readl(sm, EIM_MODE);
149 nr_irqs = fls(pattern); 162 nr_irqs = fls(pattern);
150 163
164 /* Trigger on falling edge unless overridden by driver */
165 sm_writel(sm, EIM_MODE, 0UL);
166 sm_writel(sm, EIM_EDGE, 0UL);
167
151 sm->eim_chip = &eim_chip; 168 sm->eim_chip = &eim_chip;
152 169
153 for (i = 0; i < nr_irqs; i++) { 170 for (i = 0; i < nr_irqs; i++) {
154 set_irq_chip(sm->eim_first_irq + i, &eim_chip); 171 set_irq_chip_and_handler(sm->eim_first_irq + i, &eim_chip,
172 handle_edge_irq);
155 set_irq_chip_data(sm->eim_first_irq + i, sm); 173 set_irq_chip_data(sm->eim_first_irq + i, sm);
156 } 174 }
157 175
diff --git a/arch/avr32/mach-at32ap/intc.c b/arch/avr32/mach-at32ap/intc.c
index eb87a18ad7b2..dd5c009cf224 100644
--- a/arch/avr32/mach-at32ap/intc.c
+++ b/arch/avr32/mach-at32ap/intc.c
@@ -136,3 +136,7 @@ fail:
136 panic("Interrupt controller initialization failed!\n"); 136 panic("Interrupt controller initialization failed!\n");
137} 137}
138 138
139unsigned long intc_get_pending(int group)
140{
141 return intc_readl(&intc0, INTREQ0 + 4 * group);
142}
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c
index d3aabfca8598..f1280ed8ed6d 100644
--- a/arch/avr32/mach-at32ap/pio.c
+++ b/arch/avr32/mach-at32ap/pio.c
@@ -25,27 +25,98 @@ struct pio_device {
25 void __iomem *regs; 25 void __iomem *regs;
26 const struct platform_device *pdev; 26 const struct platform_device *pdev;
27 struct clk *clk; 27 struct clk *clk;
28 u32 alloc_mask; 28 u32 pinmux_mask;
29 char name[32]; 29 char name[32];
30}; 30};
31 31
32static struct pio_device pio_dev[MAX_NR_PIO_DEVICES]; 32static struct pio_device pio_dev[MAX_NR_PIO_DEVICES];
33 33
34void portmux_set_func(unsigned int portmux_id, unsigned int pin_id, 34static struct pio_device *gpio_to_pio(unsigned int gpio)
35 unsigned int function_id)
36{ 35{
37 struct pio_device *pio; 36 struct pio_device *pio;
38 u32 mask = 1 << pin_id; 37 unsigned int index;
39 38
40 BUG_ON(portmux_id >= MAX_NR_PIO_DEVICES); 39 index = gpio >> 5;
40 if (index >= MAX_NR_PIO_DEVICES)
41 return NULL;
42 pio = &pio_dev[index];
43 if (!pio->regs)
44 return NULL;
41 45
42 pio = &pio_dev[portmux_id]; 46 return pio;
47}
48
49/* Pin multiplexing API */
50
51void __init at32_select_periph(unsigned int pin, unsigned int periph,
52 unsigned long flags)
53{
54 struct pio_device *pio;
55 unsigned int pin_index = pin & 0x1f;
56 u32 mask = 1 << pin_index;
57
58 pio = gpio_to_pio(pin);
59 if (unlikely(!pio)) {
60 printk("pio: invalid pin %u\n", pin);
61 goto fail;
62 }
43 63
44 if (function_id) 64 if (unlikely(test_and_set_bit(pin_index, &pio->pinmux_mask))) {
65 printk("%s: pin %u is busy\n", pio->name, pin_index);
66 goto fail;
67 }
68
69 pio_writel(pio, PUER, mask);
70 if (periph)
45 pio_writel(pio, BSR, mask); 71 pio_writel(pio, BSR, mask);
46 else 72 else
47 pio_writel(pio, ASR, mask); 73 pio_writel(pio, ASR, mask);
74
48 pio_writel(pio, PDR, mask); 75 pio_writel(pio, PDR, mask);
76 if (!(flags & AT32_GPIOF_PULLUP))
77 pio_writel(pio, PUDR, mask);
78
79 return;
80
81fail:
82 dump_stack();
83}
84
85void __init at32_select_gpio(unsigned int pin, unsigned long flags)
86{
87 struct pio_device *pio;
88 unsigned int pin_index = pin & 0x1f;
89 u32 mask = 1 << pin_index;
90
91 pio = gpio_to_pio(pin);
92 if (unlikely(!pio)) {
93 printk("pio: invalid pin %u\n", pin);
94 goto fail;
95 }
96
97 if (unlikely(test_and_set_bit(pin_index, &pio->pinmux_mask))) {
98 printk("%s: pin %u is busy\n", pio->name, pin_index);
99 goto fail;
100 }
101
102 pio_writel(pio, PUER, mask);
103 if (flags & AT32_GPIOF_HIGH)
104 pio_writel(pio, SODR, mask);
105 else
106 pio_writel(pio, CODR, mask);
107 if (flags & AT32_GPIOF_OUTPUT)
108 pio_writel(pio, OER, mask);
109 else
110 pio_writel(pio, ODR, mask);
111
112 pio_writel(pio, PER, mask);
113 if (!(flags & AT32_GPIOF_PULLUP))
114 pio_writel(pio, PUDR, mask);
115
116 return;
117
118fail:
119 dump_stack();
49} 120}
50 121
51static int __init pio_probe(struct platform_device *pdev) 122static int __init pio_probe(struct platform_device *pdev)
diff --git a/arch/avr32/mach-at32ap/sm.c b/arch/avr32/mach-at32ap/sm.c
deleted file mode 100644
index 03306eb0345e..000000000000
--- a/arch/avr32/mach-at32ap/sm.c
+++ /dev/null
@@ -1,289 +0,0 @@
1/*
2 * System Manager driver for AT32AP CPUs
3 *
4 * Copyright (C) 2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/errno.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/platform_device.h>
16#include <linux/random.h>
17#include <linux/spinlock.h>
18
19#include <asm/intc.h>
20#include <asm/io.h>
21#include <asm/irq.h>
22
23#include <asm/arch/sm.h>
24
25#include "sm.h"
26
27#define SM_EIM_IRQ_RESOURCE 1
28#define SM_PM_IRQ_RESOURCE 2
29#define SM_RTC_IRQ_RESOURCE 3
30
31#define to_eim(irqc) container_of(irqc, struct at32_sm, irqc)
32
33struct at32_sm system_manager;
34
35int __init at32_sm_init(void)
36{
37 struct resource *regs;
38 struct at32_sm *sm = &system_manager;
39 int ret = -ENXIO;
40
41 regs = platform_get_resource(&at32_sm_device, IORESOURCE_MEM, 0);
42 if (!regs)
43 goto fail;
44
45 spin_lock_init(&sm->lock);
46 sm->pdev = &at32_sm_device;
47
48 ret = -ENOMEM;
49 sm->regs = ioremap(regs->start, regs->end - regs->start + 1);
50 if (!sm->regs)
51 goto fail;
52
53 return 0;
54
55fail:
56 printk(KERN_ERR "Failed to initialize System Manager: %d\n", ret);
57 return ret;
58}
59
60/*
61 * External Interrupt Module (EIM).
62 *
63 * EIM gets level- or edge-triggered interrupts of either polarity
64 * from the outside and converts it to active-high level-triggered
65 * interrupts that the internal interrupt controller can handle. EIM
66 * also provides masking/unmasking of interrupts, as well as
67 * acknowledging of edge-triggered interrupts.
68 */
69
70static irqreturn_t spurious_eim_interrupt(int irq, void *dev_id,
71 struct pt_regs *regs)
72{
73 printk(KERN_WARNING "Spurious EIM interrupt %d\n", irq);
74 disable_irq(irq);
75 return IRQ_NONE;
76}
77
78static struct irqaction eim_spurious_action = {
79 .handler = spurious_eim_interrupt,
80};
81
82static irqreturn_t eim_handle_irq(int irq, void *dev_id, struct pt_regs *regs)
83{
84 struct irq_controller * irqc = dev_id;
85 struct at32_sm *sm = to_eim(irqc);
86 unsigned long pending;
87
88 /*
89 * No need to disable interrupts globally. The interrupt
90 * level relevant to this group must be masked all the time,
91 * so we know that this particular EIM instance will not be
92 * re-entered.
93 */
94 spin_lock(&sm->lock);
95
96 pending = intc_get_pending(sm->irqc.irq_group);
97 if (unlikely(!pending)) {
98 printk(KERN_ERR "EIM (group %u): No interrupts pending!\n",
99 sm->irqc.irq_group);
100 goto unlock;
101 }
102
103 do {
104 struct irqaction *action;
105 unsigned int i;
106
107 i = fls(pending) - 1;
108 pending &= ~(1 << i);
109 action = sm->action[i];
110
111 /* Acknowledge the interrupt */
112 sm_writel(sm, EIM_ICR, 1 << i);
113
114 spin_unlock(&sm->lock);
115
116 if (action->flags & SA_INTERRUPT)
117 local_irq_disable();
118 action->handler(sm->irqc.first_irq + i, action->dev_id, regs);
119 local_irq_enable();
120 spin_lock(&sm->lock);
121 if (action->flags & SA_SAMPLE_RANDOM)
122 add_interrupt_randomness(sm->irqc.first_irq + i);
123 } while (pending);
124
125unlock:
126 spin_unlock(&sm->lock);
127 return IRQ_HANDLED;
128}
129
130static void eim_mask(struct irq_controller *irqc, unsigned int irq)
131{
132 struct at32_sm *sm = to_eim(irqc);
133 unsigned int i;
134
135 i = irq - sm->irqc.first_irq;
136 sm_writel(sm, EIM_IDR, 1 << i);
137}
138
139static void eim_unmask(struct irq_controller *irqc, unsigned int irq)
140{
141 struct at32_sm *sm = to_eim(irqc);
142 unsigned int i;
143
144 i = irq - sm->irqc.first_irq;
145 sm_writel(sm, EIM_IER, 1 << i);
146}
147
148static int eim_setup(struct irq_controller *irqc, unsigned int irq,
149 struct irqaction *action)
150{
151 struct at32_sm *sm = to_eim(irqc);
152 sm->action[irq - sm->irqc.first_irq] = action;
153 /* Acknowledge earlier interrupts */
154 sm_writel(sm, EIM_ICR, (1<<(irq - sm->irqc.first_irq)));
155 eim_unmask(irqc, irq);
156 return 0;
157}
158
159static void eim_free(struct irq_controller *irqc, unsigned int irq,
160 void *dev)
161{
162 struct at32_sm *sm = to_eim(irqc);
163 eim_mask(irqc, irq);
164 sm->action[irq - sm->irqc.first_irq] = &eim_spurious_action;
165}
166
167static int eim_set_type(struct irq_controller *irqc, unsigned int irq,
168 unsigned int type)
169{
170 struct at32_sm *sm = to_eim(irqc);
171 unsigned long flags;
172 u32 value, pattern;
173
174 spin_lock_irqsave(&sm->lock, flags);
175
176 pattern = 1 << (irq - sm->irqc.first_irq);
177
178 value = sm_readl(sm, EIM_MODE);
179 if (type & IRQ_TYPE_LEVEL)
180 value |= pattern;
181 else
182 value &= ~pattern;
183 sm_writel(sm, EIM_MODE, value);
184 value = sm_readl(sm, EIM_EDGE);
185 if (type & IRQ_EDGE_RISING)
186 value |= pattern;
187 else
188 value &= ~pattern;
189 sm_writel(sm, EIM_EDGE, value);
190 value = sm_readl(sm, EIM_LEVEL);
191 if (type & IRQ_LEVEL_HIGH)
192 value |= pattern;
193 else
194 value &= ~pattern;
195 sm_writel(sm, EIM_LEVEL, value);
196
197 spin_unlock_irqrestore(&sm->lock, flags);
198
199 return 0;
200}
201
202static unsigned int eim_get_type(struct irq_controller *irqc,
203 unsigned int irq)
204{
205 struct at32_sm *sm = to_eim(irqc);
206 unsigned long flags;
207 unsigned int type = 0;
208 u32 mode, edge, level, pattern;
209
210 pattern = 1 << (irq - sm->irqc.first_irq);
211
212 spin_lock_irqsave(&sm->lock, flags);
213 mode = sm_readl(sm, EIM_MODE);
214 edge = sm_readl(sm, EIM_EDGE);
215 level = sm_readl(sm, EIM_LEVEL);
216 spin_unlock_irqrestore(&sm->lock, flags);
217
218 if (mode & pattern)
219 type |= IRQ_TYPE_LEVEL;
220 if (edge & pattern)
221 type |= IRQ_EDGE_RISING;
222 if (level & pattern)
223 type |= IRQ_LEVEL_HIGH;
224
225 return type;
226}
227
228static struct irq_controller_class eim_irq_class = {
229 .typename = "EIM",
230 .handle = eim_handle_irq,
231 .setup = eim_setup,
232 .free = eim_free,
233 .mask = eim_mask,
234 .unmask = eim_unmask,
235 .set_type = eim_set_type,
236 .get_type = eim_get_type,
237};
238
239static int __init eim_init(void)
240{
241 struct at32_sm *sm = &system_manager;
242 unsigned int i;
243 u32 pattern;
244 int ret;
245
246 /*
247 * The EIM is really the same module as SM, so register
248 * mapping, etc. has been taken care of already.
249 */
250
251 /*
252 * Find out how many interrupt lines that are actually
253 * implemented in hardware.
254 */
255 sm_writel(sm, EIM_IDR, ~0UL);
256 sm_writel(sm, EIM_MODE, ~0UL);
257 pattern = sm_readl(sm, EIM_MODE);
258 sm->irqc.nr_irqs = fls(pattern);
259
260 ret = -ENOMEM;
261 sm->action = kmalloc(sizeof(*sm->action) * sm->irqc.nr_irqs,
262 GFP_KERNEL);
263 if (!sm->action)
264 goto out;
265
266 for (i = 0; i < sm->irqc.nr_irqs; i++)
267 sm->action[i] = &eim_spurious_action;
268
269 spin_lock_init(&sm->lock);
270 sm->irqc.irq_group = sm->pdev->resource[SM_EIM_IRQ_RESOURCE].start;
271 sm->irqc.class = &eim_irq_class;
272
273 ret = intc_register_controller(&sm->irqc);
274 if (ret < 0)
275 goto out_free_actions;
276
277 printk("EIM: External Interrupt Module at 0x%p, IRQ group %u\n",
278 sm->regs, sm->irqc.irq_group);
279 printk("EIM: Handling %u external IRQs, starting with IRQ%u\n",
280 sm->irqc.nr_irqs, sm->irqc.first_irq);
281
282 return 0;
283
284out_free_actions:
285 kfree(sm->action);
286out:
287 return ret;
288}
289arch_initcall(eim_init);
diff --git a/arch/cris/arch-v10/drivers/axisflashmap.c b/arch/cris/arch-v10/drivers/axisflashmap.c
index 4fa81abab0c7..ffade19a14e6 100644
--- a/arch/cris/arch-v10/drivers/axisflashmap.c
+++ b/arch/cris/arch-v10/drivers/axisflashmap.c
@@ -516,7 +516,7 @@ static int __init init_axis_flash(void)
516#else 516#else
517 struct mtd_info *mtd_ram; 517 struct mtd_info *mtd_ram;
518 518
519 mtd_ram = (struct mtd_info *)kmalloc(sizeof(struct mtd_info), 519 mtd_ram = kmalloc(sizeof(struct mtd_info),
520 GFP_KERNEL); 520 GFP_KERNEL);
521 if (!mtd_ram) { 521 if (!mtd_ram) {
522 panic("axisflashmap couldn't allocate memory for " 522 panic("axisflashmap couldn't allocate memory for "
diff --git a/arch/cris/arch-v10/drivers/gpio.c b/arch/cris/arch-v10/drivers/gpio.c
index fcba6632ed7b..9aba18b931dd 100644
--- a/arch/cris/arch-v10/drivers/gpio.c
+++ b/arch/cris/arch-v10/drivers/gpio.c
@@ -440,7 +440,7 @@ gpio_open(struct inode *inode, struct file *filp)
440 if (p > GPIO_MINOR_LAST) 440 if (p > GPIO_MINOR_LAST)
441 return -EINVAL; 441 return -EINVAL;
442 442
443 priv = (struct gpio_private *)kmalloc(sizeof(struct gpio_private), 443 priv = kmalloc(sizeof(struct gpio_private),
444 GFP_KERNEL); 444 GFP_KERNEL);
445 445
446 if (!priv) 446 if (!priv)
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c
index 41952320e00a..5180d45412fc 100644
--- a/arch/cris/arch-v32/drivers/axisflashmap.c
+++ b/arch/cris/arch-v32/drivers/axisflashmap.c
@@ -427,7 +427,7 @@ static int __init init_axis_flash(void)
427#else 427#else
428 struct mtd_info *mtd_ram; 428 struct mtd_info *mtd_ram;
429 429
430 mtd_ram = (struct mtd_info *)kmalloc(sizeof(struct mtd_info), 430 mtd_ram = kmalloc(sizeof(struct mtd_info),
431 GFP_KERNEL); 431 GFP_KERNEL);
432 if (!mtd_ram) { 432 if (!mtd_ram) {
433 panic("axisflashmap couldn't allocate memory for " 433 panic("axisflashmap couldn't allocate memory for "
diff --git a/arch/cris/arch-v32/drivers/gpio.c b/arch/cris/arch-v32/drivers/gpio.c
index c3f876b4da6b..08d36f0955c6 100644
--- a/arch/cris/arch-v32/drivers/gpio.c
+++ b/arch/cris/arch-v32/drivers/gpio.c
@@ -423,7 +423,7 @@ gpio_open(struct inode *inode, struct file *filp)
423 if (p > GPIO_MINOR_LAST) 423 if (p > GPIO_MINOR_LAST)
424 return -EINVAL; 424 return -EINVAL;
425 425
426 priv = (struct gpio_private *)kmalloc(sizeof(struct gpio_private), 426 priv = kmalloc(sizeof(struct gpio_private),
427 GFP_KERNEL); 427 GFP_KERNEL);
428 428
429 if (!priv) 429 if (!priv)
diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c
index 99e59b3eacf8..7cd6ac803409 100644
--- a/arch/cris/arch-v32/kernel/signal.c
+++ b/arch/cris/arch-v32/kernel/signal.c
@@ -686,7 +686,7 @@ keep_debug_flags(unsigned long oldccs, unsigned long oldspc,
686int __init 686int __init
687cris_init_signal(void) 687cris_init_signal(void)
688{ 688{
689 u16* data = (u16*)kmalloc(PAGE_SIZE, GFP_KERNEL); 689 u16* data = kmalloc(PAGE_SIZE, GFP_KERNEL);
690 690
691 /* This is movu.w __NR_sigreturn, r9; break 13; */ 691 /* This is movu.w __NR_sigreturn, r9; break 13; */
692 data[0] = 0x9c5f; 692 data[0] = 0x9c5f;
diff --git a/arch/cris/kernel/profile.c b/arch/cris/kernel/profile.c
index 69c52189f044..f60ab785f235 100644
--- a/arch/cris/kernel/profile.c
+++ b/arch/cris/kernel/profile.c
@@ -59,7 +59,7 @@ static int
59__init init_cris_profile(void) 59__init init_cris_profile(void)
60{ 60{
61 struct proc_dir_entry *entry; 61 struct proc_dir_entry *entry;
62 sample_buffer = (char*)kmalloc(SAMPLE_BUFFER_SIZE, GFP_KERNEL); 62 sample_buffer = kmalloc(SAMPLE_BUFFER_SIZE, GFP_KERNEL);
63 sample_buffer_pos = sample_buffer; 63 sample_buffer_pos = sample_buffer;
64 entry = create_proc_entry("system_profile", S_IWUSR | S_IRUGO, NULL); 64 entry = create_proc_entry("system_profile", S_IWUSR | S_IRUGO, NULL);
65 if (entry) { 65 if (entry) {
diff --git a/arch/frv/kernel/pm.c b/arch/frv/kernel/pm.c
index c1d9fc8f1a85..ee677ced7b68 100644
--- a/arch/frv/kernel/pm.c
+++ b/arch/frv/kernel/pm.c
@@ -223,7 +223,7 @@ static int cmode_procctl(ctl_table *ctl, int write, struct file *filp,
223 223
224static int cmode_sysctl(ctl_table *table, int __user *name, int nlen, 224static int cmode_sysctl(ctl_table *table, int __user *name, int nlen,
225 void __user *oldval, size_t __user *oldlenp, 225 void __user *oldval, size_t __user *oldlenp,
226 void __user *newval, size_t newlen, void **context) 226 void __user *newval, size_t newlen)
227{ 227{
228 if (oldval && oldlenp) { 228 if (oldval && oldlenp) {
229 size_t oldlen; 229 size_t oldlen;
@@ -326,7 +326,7 @@ static int p0_procctl(ctl_table *ctl, int write, struct file *filp,
326 326
327static int p0_sysctl(ctl_table *table, int __user *name, int nlen, 327static int p0_sysctl(ctl_table *table, int __user *name, int nlen,
328 void __user *oldval, size_t __user *oldlenp, 328 void __user *oldval, size_t __user *oldlenp,
329 void __user *newval, size_t newlen, void **context) 329 void __user *newval, size_t newlen)
330{ 330{
331 if (oldval && oldlenp) { 331 if (oldval && oldlenp) {
332 size_t oldlen; 332 size_t oldlen;
@@ -370,7 +370,7 @@ static int cm_procctl(ctl_table *ctl, int write, struct file *filp,
370 370
371static int cm_sysctl(ctl_table *table, int __user *name, int nlen, 371static int cm_sysctl(ctl_table *table, int __user *name, int nlen,
372 void __user *oldval, size_t __user *oldlenp, 372 void __user *oldval, size_t __user *oldlenp,
373 void __user *newval, size_t newlen, void **context) 373 void __user *newval, size_t newlen)
374{ 374{
375 if (oldval && oldlenp) { 375 if (oldval && oldlenp) {
376 size_t oldlen; 376 size_t oldlen;
diff --git a/arch/h8300/kernel/ints.c b/arch/h8300/kernel/ints.c
index 1bfc77e391d5..587ef7f4fcc7 100644
--- a/arch/h8300/kernel/ints.c
+++ b/arch/h8300/kernel/ints.c
@@ -141,7 +141,7 @@ int request_irq(unsigned int irq,
141 return -EBUSY; 141 return -EBUSY;
142 142
143 if (use_kmalloc) 143 if (use_kmalloc)
144 irq_handle = (irq_handler_t *)kmalloc(sizeof(irq_handler_t), GFP_ATOMIC); 144 irq_handle = kmalloc(sizeof(irq_handler_t), GFP_ATOMIC);
145 else { 145 else {
146 /* use bootmem allocater */ 146 /* use bootmem allocater */
147 irq_handle = (irq_handler_t *)alloc_bootmem(sizeof(irq_handler_t)); 147 irq_handle = (irq_handler_t *)alloc_bootmem(sizeof(irq_handler_t));
diff --git a/arch/h8300/platform/h8s/ints.c b/arch/h8300/platform/h8s/ints.c
index 270440de4610..567f681ddfec 100644
--- a/arch/h8300/platform/h8s/ints.c
+++ b/arch/h8300/platform/h8s/ints.c
@@ -176,7 +176,7 @@ int request_irq(unsigned int irq,
176 } 176 }
177 177
178 if (use_kmalloc) 178 if (use_kmalloc)
179 irq_handle = (irq_handler_t *)kmalloc(sizeof(irq_handler_t), GFP_ATOMIC); 179 irq_handle = kmalloc(sizeof(irq_handler_t), GFP_ATOMIC);
180 else { 180 else {
181 /* use bootmem allocater */ 181 /* use bootmem allocater */
182 irq_handle = (irq_handler_t *)alloc_bootmem(sizeof(irq_handler_t)); 182 irq_handle = (irq_handler_t *)alloc_bootmem(sizeof(irq_handler_t));
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index a97847da9ed5..b75cff25de4b 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -1604,7 +1604,7 @@ static int do_open(struct inode * inode, struct file * filp)
1604{ 1604{
1605 struct apm_user * as; 1605 struct apm_user * as;
1606 1606
1607 as = (struct apm_user *)kmalloc(sizeof(*as), GFP_KERNEL); 1607 as = kmalloc(sizeof(*as), GFP_KERNEL);
1608 if (as == NULL) { 1608 if (as == NULL) {
1609 printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n", 1609 printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n",
1610 sizeof(*as)); 1610 sizeof(*as));
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index ccc1edff5c97..5299c5bf4454 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -17,6 +17,7 @@ config X86_ACPI_CPUFREQ
17 help 17 help
18 This driver adds a CPUFreq driver which utilizes the ACPI 18 This driver adds a CPUFreq driver which utilizes the ACPI
19 Processor Performance States. 19 Processor Performance States.
20 This driver also supports Intel Enhanced Speedstep.
20 21
21 For details, take a look at <file:Documentation/cpu-freq/>. 22 For details, take a look at <file:Documentation/cpu-freq/>.
22 23
@@ -121,11 +122,14 @@ config X86_SPEEDSTEP_CENTRINO
121 If in doubt, say N. 122 If in doubt, say N.
122 123
123config X86_SPEEDSTEP_CENTRINO_ACPI 124config X86_SPEEDSTEP_CENTRINO_ACPI
124 bool "Use ACPI tables to decode valid frequency/voltage pairs" 125 bool "Use ACPI tables to decode valid frequency/voltage (deprecated)"
125 depends on X86_SPEEDSTEP_CENTRINO && ACPI_PROCESSOR 126 depends on X86_SPEEDSTEP_CENTRINO && ACPI_PROCESSOR
126 depends on !(X86_SPEEDSTEP_CENTRINO = y && ACPI_PROCESSOR = m) 127 depends on !(X86_SPEEDSTEP_CENTRINO = y && ACPI_PROCESSOR = m)
127 default y 128 default y
128 help 129 help
130 This is deprecated and this functionality is now merged into
131 acpi_cpufreq (X86_ACPI_CPUFREQ). Use that driver instead of
132 speedstep_centrino.
129 Use primarily the information provided in the BIOS ACPI tables 133 Use primarily the information provided in the BIOS ACPI tables
130 to determine valid CPU frequency and voltage pairings. It is 134 to determine valid CPU frequency and voltage pairings. It is
131 required for the driver to work on non-Banias CPUs. 135 required for the driver to work on non-Banias CPUs.
diff --git a/arch/i386/kernel/cpu/cpufreq/Makefile b/arch/i386/kernel/cpu/cpufreq/Makefile
index 2e894f1c8910..8de3abe322a9 100644
--- a/arch/i386/kernel/cpu/cpufreq/Makefile
+++ b/arch/i386/kernel/cpu/cpufreq/Makefile
@@ -7,9 +7,9 @@ obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o
7obj-$(CONFIG_X86_LONGRUN) += longrun.o 7obj-$(CONFIG_X86_LONGRUN) += longrun.o
8obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o 8obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o
9obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o 9obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
10obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
11obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o 10obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
12obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o 11obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
13obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o 12obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
13obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
14obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o 14obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
15obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o 15obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 57c880bf0bd6..18f4715c655d 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -1,9 +1,10 @@
1/* 1/*
2 * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.3 $) 2 * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.4 $)
3 * 3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> 6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
7 * 8 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 * 10 *
@@ -27,202 +28,387 @@
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#include <linux/module.h> 29#include <linux/module.h>
29#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/smp.h>
32#include <linux/sched.h>
30#include <linux/cpufreq.h> 33#include <linux/cpufreq.h>
31#include <linux/proc_fs.h>
32#include <linux/seq_file.h>
33#include <linux/compiler.h> 34#include <linux/compiler.h>
34#include <linux/sched.h> /* current */
35#include <linux/dmi.h> 35#include <linux/dmi.h>
36#include <asm/io.h>
37#include <asm/delay.h>
38#include <asm/uaccess.h>
39 36
40#include <linux/acpi.h> 37#include <linux/acpi.h>
41#include <acpi/processor.h> 38#include <acpi/processor.h>
42 39
40#include <asm/io.h>
41#include <asm/msr.h>
42#include <asm/processor.h>
43#include <asm/cpufeature.h>
44#include <asm/delay.h>
45#include <asm/uaccess.h>
46
43#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) 47#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
44 48
45MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); 49MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
46MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 50MODULE_DESCRIPTION("ACPI Processor P-States Driver");
47MODULE_LICENSE("GPL"); 51MODULE_LICENSE("GPL");
48 52
53enum {
54 UNDEFINED_CAPABLE = 0,
55 SYSTEM_INTEL_MSR_CAPABLE,
56 SYSTEM_IO_CAPABLE,
57};
58
59#define INTEL_MSR_RANGE (0xffff)
60#define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1)
49 61
50struct cpufreq_acpi_io { 62struct acpi_cpufreq_data {
51 struct acpi_processor_performance *acpi_data; 63 struct acpi_processor_performance *acpi_data;
52 struct cpufreq_frequency_table *freq_table; 64 struct cpufreq_frequency_table *freq_table;
53 unsigned int resume; 65 unsigned int max_freq;
66 unsigned int resume;
67 unsigned int cpu_feature;
54}; 68};
55 69
56static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; 70static struct acpi_cpufreq_data *drv_data[NR_CPUS];
57static struct acpi_processor_performance *acpi_perf_data[NR_CPUS]; 71static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
58 72
59static struct cpufreq_driver acpi_cpufreq_driver; 73static struct cpufreq_driver acpi_cpufreq_driver;
60 74
61static unsigned int acpi_pstate_strict; 75static unsigned int acpi_pstate_strict;
62 76
63static int 77static int check_est_cpu(unsigned int cpuid)
64acpi_processor_write_port( 78{
65 u16 port, 79 struct cpuinfo_x86 *cpu = &cpu_data[cpuid];
66 u8 bit_width, 80
67 u32 value) 81 if (cpu->x86_vendor != X86_VENDOR_INTEL ||
82 !cpu_has(cpu, X86_FEATURE_EST))
83 return 0;
84
85 return 1;
86}
87
88static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
89{
90 struct acpi_processor_performance *perf;
91 int i;
92
93 perf = data->acpi_data;
94
95 for (i=0; i<perf->state_count; i++) {
96 if (value == perf->states[i].status)
97 return data->freq_table[i].frequency;
98 }
99 return 0;
100}
101
102static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
68{ 103{
69 if (bit_width <= 8) { 104 int i;
105 struct acpi_processor_performance *perf;
106
107 msr &= INTEL_MSR_RANGE;
108 perf = data->acpi_data;
109
110 for (i=0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
111 if (msr == perf->states[data->freq_table[i].index].status)
112 return data->freq_table[i].frequency;
113 }
114 return data->freq_table[0].frequency;
115}
116
117static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
118{
119 switch (data->cpu_feature) {
120 case SYSTEM_INTEL_MSR_CAPABLE:
121 return extract_msr(val, data);
122 case SYSTEM_IO_CAPABLE:
123 return extract_io(val, data);
124 default:
125 return 0;
126 }
127}
128
129static void wrport(u16 port, u8 bit_width, u32 value)
130{
131 if (bit_width <= 8)
70 outb(value, port); 132 outb(value, port);
71 } else if (bit_width <= 16) { 133 else if (bit_width <= 16)
72 outw(value, port); 134 outw(value, port);
73 } else if (bit_width <= 32) { 135 else if (bit_width <= 32)
74 outl(value, port); 136 outl(value, port);
75 } else {
76 return -ENODEV;
77 }
78 return 0;
79} 137}
80 138
81static int 139static void rdport(u16 port, u8 bit_width, u32 * ret)
82acpi_processor_read_port(
83 u16 port,
84 u8 bit_width,
85 u32 *ret)
86{ 140{
87 *ret = 0; 141 *ret = 0;
88 if (bit_width <= 8) { 142 if (bit_width <= 8)
89 *ret = inb(port); 143 *ret = inb(port);
90 } else if (bit_width <= 16) { 144 else if (bit_width <= 16)
91 *ret = inw(port); 145 *ret = inw(port);
92 } else if (bit_width <= 32) { 146 else if (bit_width <= 32)
93 *ret = inl(port); 147 *ret = inl(port);
94 } else { 148}
95 return -ENODEV; 149
150struct msr_addr {
151 u32 reg;
152};
153
154struct io_addr {
155 u16 port;
156 u8 bit_width;
157};
158
159typedef union {
160 struct msr_addr msr;
161 struct io_addr io;
162} drv_addr_union;
163
164struct drv_cmd {
165 unsigned int type;
166 cpumask_t mask;
167 drv_addr_union addr;
168 u32 val;
169};
170
171static void do_drv_read(struct drv_cmd *cmd)
172{
173 u32 h;
174
175 switch (cmd->type) {
176 case SYSTEM_INTEL_MSR_CAPABLE:
177 rdmsr(cmd->addr.msr.reg, cmd->val, h);
178 break;
179 case SYSTEM_IO_CAPABLE:
180 rdport(cmd->addr.io.port, cmd->addr.io.bit_width, &cmd->val);
181 break;
182 default:
183 break;
96 } 184 }
97 return 0;
98} 185}
99 186
100static int 187static void do_drv_write(struct drv_cmd *cmd)
101acpi_processor_set_performance (
102 struct cpufreq_acpi_io *data,
103 unsigned int cpu,
104 int state)
105{ 188{
106 u16 port = 0; 189 u32 h = 0;
107 u8 bit_width = 0; 190
108 int i = 0; 191 switch (cmd->type) {
109 int ret = 0; 192 case SYSTEM_INTEL_MSR_CAPABLE:
110 u32 value = 0; 193 wrmsr(cmd->addr.msr.reg, cmd->val, h);
111 int retval; 194 break;
112 struct acpi_processor_performance *perf; 195 case SYSTEM_IO_CAPABLE:
113 196 wrport(cmd->addr.io.port, cmd->addr.io.bit_width, cmd->val);
114 dprintk("acpi_processor_set_performance\n"); 197 break;
115 198 default:
116 retval = 0; 199 break;
117 perf = data->acpi_data;
118 if (state == perf->state) {
119 if (unlikely(data->resume)) {
120 dprintk("Called after resume, resetting to P%d\n", state);
121 data->resume = 0;
122 } else {
123 dprintk("Already at target state (P%d)\n", state);
124 return (retval);
125 }
126 } 200 }
201}
127 202
128 dprintk("Transitioning from P%d to P%d\n", perf->state, state); 203static void drv_read(struct drv_cmd *cmd)
204{
205 cpumask_t saved_mask = current->cpus_allowed;
206 cmd->val = 0;
129 207
130 /* 208 set_cpus_allowed(current, cmd->mask);
131 * First we write the target state's 'control' value to the 209 do_drv_read(cmd);
132 * control_register. 210 set_cpus_allowed(current, saved_mask);
133 */ 211}
212
213static void drv_write(struct drv_cmd *cmd)
214{
215 cpumask_t saved_mask = current->cpus_allowed;
216 unsigned int i;
217
218 for_each_cpu_mask(i, cmd->mask) {
219 set_cpus_allowed(current, cpumask_of_cpu(i));
220 do_drv_write(cmd);
221 }
222
223 set_cpus_allowed(current, saved_mask);
224 return;
225}
226
227static u32 get_cur_val(cpumask_t mask)
228{
229 struct acpi_processor_performance *perf;
230 struct drv_cmd cmd;
231
232 if (unlikely(cpus_empty(mask)))
233 return 0;
234
235 switch (drv_data[first_cpu(mask)]->cpu_feature) {
236 case SYSTEM_INTEL_MSR_CAPABLE:
237 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
238 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
239 break;
240 case SYSTEM_IO_CAPABLE:
241 cmd.type = SYSTEM_IO_CAPABLE;
242 perf = drv_data[first_cpu(mask)]->acpi_data;
243 cmd.addr.io.port = perf->control_register.address;
244 cmd.addr.io.bit_width = perf->control_register.bit_width;
245 break;
246 default:
247 return 0;
248 }
249
250 cmd.mask = mask;
134 251
135 port = perf->control_register.address; 252 drv_read(&cmd);
136 bit_width = perf->control_register.bit_width;
137 value = (u32) perf->states[state].control;
138 253
139 dprintk("Writing 0x%08x to port 0x%04x\n", value, port); 254 dprintk("get_cur_val = %u\n", cmd.val);
140 255
141 ret = acpi_processor_write_port(port, bit_width, value); 256 return cmd.val;
142 if (ret) { 257}
143 dprintk("Invalid port width 0x%04x\n", bit_width); 258
144 return (ret); 259/*
260 * Return the measured active (C0) frequency on this CPU since last call
261 * to this function.
262 * Input: cpu number
263 * Return: Average CPU frequency in terms of max frequency (zero on error)
264 *
265 * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
266 * over a period of time, while CPU is in C0 state.
267 * IA32_MPERF counts at the rate of max advertised frequency
268 * IA32_APERF counts at the rate of actual CPU frequency
269 * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
270 * no meaning should be associated with absolute values of these MSRs.
271 */
272static unsigned int get_measured_perf(unsigned int cpu)
273{
274 union {
275 struct {
276 u32 lo;
277 u32 hi;
278 } split;
279 u64 whole;
280 } aperf_cur, mperf_cur;
281
282 cpumask_t saved_mask;
283 unsigned int perf_percent;
284 unsigned int retval;
285
286 saved_mask = current->cpus_allowed;
287 set_cpus_allowed(current, cpumask_of_cpu(cpu));
288 if (get_cpu() != cpu) {
289 /* We were not able to run on requested processor */
290 put_cpu();
291 return 0;
145 } 292 }
146 293
294 rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi);
295 rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi);
296
297 wrmsr(MSR_IA32_APERF, 0,0);
298 wrmsr(MSR_IA32_MPERF, 0,0);
299
300#ifdef __i386__
147 /* 301 /*
148 * Assume the write went through when acpi_pstate_strict is not used. 302 * We dont want to do 64 bit divide with 32 bit kernel
149 * As read status_register is an expensive operation and there 303 * Get an approximate value. Return failure in case we cannot get
150 * are no specific error cases where an IO port write will fail. 304 * an approximate value.
151 */ 305 */
152 if (acpi_pstate_strict) { 306 if (unlikely(aperf_cur.split.hi || mperf_cur.split.hi)) {
153 /* Then we read the 'status_register' and compare the value 307 int shift_count;
154 * with the target state's 'status' to make sure the 308 u32 h;
155 * transition was successful. 309
156 * Note that we'll poll for up to 1ms (100 cycles of 10us) 310 h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi);
157 * before giving up. 311 shift_count = fls(h);
158 */ 312
159 313 aperf_cur.whole >>= shift_count;
160 port = perf->status_register.address; 314 mperf_cur.whole >>= shift_count;
161 bit_width = perf->status_register.bit_width; 315 }
162 316
163 dprintk("Looking for 0x%08x from port 0x%04x\n", 317 if (((unsigned long)(-1) / 100) < aperf_cur.split.lo) {
164 (u32) perf->states[state].status, port); 318 int shift_count = 7;
165 319 aperf_cur.split.lo >>= shift_count;
166 for (i = 0; i < 100; i++) { 320 mperf_cur.split.lo >>= shift_count;
167 ret = acpi_processor_read_port(port, bit_width, &value); 321 }
168 if (ret) { 322
169 dprintk("Invalid port width 0x%04x\n", bit_width); 323 if (aperf_cur.split.lo && mperf_cur.split.lo)
170 return (ret); 324 perf_percent = (aperf_cur.split.lo * 100) / mperf_cur.split.lo;
171 } 325 else
172 if (value == (u32) perf->states[state].status) 326 perf_percent = 0;
173 break; 327
174 udelay(10); 328#else
175 } 329 if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) {
176 } else { 330 int shift_count = 7;
177 value = (u32) perf->states[state].status; 331 aperf_cur.whole >>= shift_count;
332 mperf_cur.whole >>= shift_count;
178 } 333 }
179 334
180 if (unlikely(value != (u32) perf->states[state].status)) { 335 if (aperf_cur.whole && mperf_cur.whole)
181 printk(KERN_WARNING "acpi-cpufreq: Transition failed\n"); 336 perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole;
182 retval = -ENODEV; 337 else
183 return (retval); 338 perf_percent = 0;
339
340#endif
341
342 retval = drv_data[cpu]->max_freq * perf_percent / 100;
343
344 put_cpu();
345 set_cpus_allowed(current, saved_mask);
346
347 dprintk("cpu %d: performance percent %d\n", cpu, perf_percent);
348 return retval;
349}
350
351static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
352{
353 struct acpi_cpufreq_data *data = drv_data[cpu];
354 unsigned int freq;
355
356 dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
357
358 if (unlikely(data == NULL ||
359 data->acpi_data == NULL || data->freq_table == NULL)) {
360 return 0;
184 } 361 }
185 362
186 dprintk("Transition successful after %d microseconds\n", i * 10); 363 freq = extract_freq(get_cur_val(cpumask_of_cpu(cpu)), data);
364 dprintk("cur freq = %u\n", freq);
187 365
188 perf->state = state; 366 return freq;
189 return (retval);
190} 367}
191 368
369static unsigned int check_freqs(cpumask_t mask, unsigned int freq,
370 struct acpi_cpufreq_data *data)
371{
372 unsigned int cur_freq;
373 unsigned int i;
374
375 for (i=0; i<100; i++) {
376 cur_freq = extract_freq(get_cur_val(mask), data);
377 if (cur_freq == freq)
378 return 1;
379 udelay(10);
380 }
381 return 0;
382}
192 383
193static int 384static int acpi_cpufreq_target(struct cpufreq_policy *policy,
194acpi_cpufreq_target ( 385 unsigned int target_freq, unsigned int relation)
195 struct cpufreq_policy *policy,
196 unsigned int target_freq,
197 unsigned int relation)
198{ 386{
199 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 387 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
200 struct acpi_processor_performance *perf; 388 struct acpi_processor_performance *perf;
201 struct cpufreq_freqs freqs; 389 struct cpufreq_freqs freqs;
202 cpumask_t online_policy_cpus; 390 cpumask_t online_policy_cpus;
203 cpumask_t saved_mask; 391 struct drv_cmd cmd;
204 cpumask_t set_mask; 392 unsigned int msr;
205 cpumask_t covered_cpus;
206 unsigned int cur_state = 0;
207 unsigned int next_state = 0; 393 unsigned int next_state = 0;
208 unsigned int result = 0; 394 unsigned int next_perf_state = 0;
209 unsigned int j; 395 unsigned int i;
210 unsigned int tmp; 396 int result = 0;
211 397
212 dprintk("acpi_cpufreq_setpolicy\n"); 398 dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
213 399
214 result = cpufreq_frequency_table_target(policy, 400 if (unlikely(data == NULL ||
215 data->freq_table, 401 data->acpi_data == NULL || data->freq_table == NULL)) {
216 target_freq, 402 return -ENODEV;
217 relation, 403 }
218 &next_state);
219 if (unlikely(result))
220 return (result);
221 404
222 perf = data->acpi_data; 405 perf = data->acpi_data;
223 cur_state = perf->state; 406 result = cpufreq_frequency_table_target(policy,
224 freqs.old = data->freq_table[cur_state].frequency; 407 data->freq_table,
225 freqs.new = data->freq_table[next_state].frequency; 408 target_freq,
409 relation, &next_state);
410 if (unlikely(result))
411 return -ENODEV;
226 412
227#ifdef CONFIG_HOTPLUG_CPU 413#ifdef CONFIG_HOTPLUG_CPU
228 /* cpufreq holds the hotplug lock, so we are safe from here on */ 414 /* cpufreq holds the hotplug lock, so we are safe from here on */
@@ -231,106 +417,84 @@ acpi_cpufreq_target (
231 online_policy_cpus = policy->cpus; 417 online_policy_cpus = policy->cpus;
232#endif 418#endif
233 419
234 for_each_cpu_mask(j, online_policy_cpus) { 420 next_perf_state = data->freq_table[next_state].index;
235 freqs.cpu = j; 421 if (perf->state == next_perf_state) {
236 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 422 if (unlikely(data->resume)) {
423 dprintk("Called after resume, resetting to P%d\n",
424 next_perf_state);
425 data->resume = 0;
426 } else {
427 dprintk("Already at target state (P%d)\n",
428 next_perf_state);
429 return 0;
430 }
237 } 431 }
238 432
239 /* 433 switch (data->cpu_feature) {
240 * We need to call driver->target() on all or any CPU in 434 case SYSTEM_INTEL_MSR_CAPABLE:
241 * policy->cpus, depending on policy->shared_type. 435 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
242 */ 436 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
243 saved_mask = current->cpus_allowed; 437 msr =
244 cpus_clear(covered_cpus); 438 (u32) perf->states[next_perf_state].
245 for_each_cpu_mask(j, online_policy_cpus) { 439 control & INTEL_MSR_RANGE;
246 /* 440 cmd.val = (cmd.val & ~INTEL_MSR_RANGE) | msr;
247 * Support for SMP systems. 441 break;
248 * Make sure we are running on CPU that wants to change freq 442 case SYSTEM_IO_CAPABLE:
249 */ 443 cmd.type = SYSTEM_IO_CAPABLE;
250 cpus_clear(set_mask); 444 cmd.addr.io.port = perf->control_register.address;
251 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 445 cmd.addr.io.bit_width = perf->control_register.bit_width;
252 cpus_or(set_mask, set_mask, online_policy_cpus); 446 cmd.val = (u32) perf->states[next_perf_state].control;
253 else 447 break;
254 cpu_set(j, set_mask); 448 default:
255 449 return -ENODEV;
256 set_cpus_allowed(current, set_mask); 450 }
257 if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
258 dprintk("couldn't limit to CPUs in this domain\n");
259 result = -EAGAIN;
260 break;
261 }
262 451
263 result = acpi_processor_set_performance (data, j, next_state); 452 cpus_clear(cmd.mask);
264 if (result) {
265 result = -EAGAIN;
266 break;
267 }
268 453
269 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 454 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
270 break; 455 cmd.mask = online_policy_cpus;
271 456 else
272 cpu_set(j, covered_cpus); 457 cpu_set(policy->cpu, cmd.mask);
273 }
274 458
275 for_each_cpu_mask(j, online_policy_cpus) { 459 freqs.old = data->freq_table[perf->state].frequency;
276 freqs.cpu = j; 460 freqs.new = data->freq_table[next_perf_state].frequency;
277 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 461 for_each_cpu_mask(i, cmd.mask) {
462 freqs.cpu = i;
463 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
278 } 464 }
279 465
280 if (unlikely(result)) { 466 drv_write(&cmd);
281 /*
282 * We have failed halfway through the frequency change.
283 * We have sent callbacks to online_policy_cpus and
284 * acpi_processor_set_performance() has been called on
285 * coverd_cpus. Best effort undo..
286 */
287
288 if (!cpus_empty(covered_cpus)) {
289 for_each_cpu_mask(j, covered_cpus) {
290 policy->cpu = j;
291 acpi_processor_set_performance (data,
292 j,
293 cur_state);
294 }
295 }
296 467
297 tmp = freqs.new; 468 if (acpi_pstate_strict) {
298 freqs.new = freqs.old; 469 if (!check_freqs(cmd.mask, freqs.new, data)) {
299 freqs.old = tmp; 470 dprintk("acpi_cpufreq_target failed (%d)\n",
300 for_each_cpu_mask(j, online_policy_cpus) { 471 policy->cpu);
301 freqs.cpu = j; 472 return -EAGAIN;
302 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
303 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
304 } 473 }
305 } 474 }
306 475
307 set_cpus_allowed(current, saved_mask); 476 for_each_cpu_mask(i, cmd.mask) {
308 return (result); 477 freqs.cpu = i;
309} 478 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
479 }
480 perf->state = next_perf_state;
310 481
482 return result;
483}
311 484
312static int 485static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
313acpi_cpufreq_verify (
314 struct cpufreq_policy *policy)
315{ 486{
316 unsigned int result = 0; 487 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
317 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
318 488
319 dprintk("acpi_cpufreq_verify\n"); 489 dprintk("acpi_cpufreq_verify\n");
320 490
321 result = cpufreq_frequency_table_verify(policy, 491 return cpufreq_frequency_table_verify(policy, data->freq_table);
322 data->freq_table);
323
324 return (result);
325} 492}
326 493
327
328static unsigned long 494static unsigned long
329acpi_cpufreq_guess_freq ( 495acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
330 struct cpufreq_acpi_io *data,
331 unsigned int cpu)
332{ 496{
333 struct acpi_processor_performance *perf = data->acpi_data; 497 struct acpi_processor_performance *perf = data->acpi_data;
334 498
335 if (cpu_khz) { 499 if (cpu_khz) {
336 /* search the closest match to cpu_khz */ 500 /* search the closest match to cpu_khz */
@@ -338,16 +502,16 @@ acpi_cpufreq_guess_freq (
338 unsigned long freq; 502 unsigned long freq;
339 unsigned long freqn = perf->states[0].core_frequency * 1000; 503 unsigned long freqn = perf->states[0].core_frequency * 1000;
340 504
341 for (i = 0; i < (perf->state_count - 1); i++) { 505 for (i=0; i<(perf->state_count-1); i++) {
342 freq = freqn; 506 freq = freqn;
343 freqn = perf->states[i+1].core_frequency * 1000; 507 freqn = perf->states[i+1].core_frequency * 1000;
344 if ((2 * cpu_khz) > (freqn + freq)) { 508 if ((2 * cpu_khz) > (freqn + freq)) {
345 perf->state = i; 509 perf->state = i;
346 return (freq); 510 return freq;
347 } 511 }
348 } 512 }
349 perf->state = perf->state_count - 1; 513 perf->state = perf->state_count-1;
350 return (freqn); 514 return freqn;
351 } else { 515 } else {
352 /* assume CPU is at P0... */ 516 /* assume CPU is at P0... */
353 perf->state = 0; 517 perf->state = 0;
@@ -355,7 +519,6 @@ acpi_cpufreq_guess_freq (
355 } 519 }
356} 520}
357 521
358
359/* 522/*
360 * acpi_cpufreq_early_init - initialize ACPI P-States library 523 * acpi_cpufreq_early_init - initialize ACPI P-States library
361 * 524 *
@@ -364,30 +527,34 @@ acpi_cpufreq_guess_freq (
364 * do _PDC and _PSD and find out the processor dependency for the 527 * do _PDC and _PSD and find out the processor dependency for the
365 * actual init that will happen later... 528 * actual init that will happen later...
366 */ 529 */
367static int acpi_cpufreq_early_init_acpi(void) 530static int acpi_cpufreq_early_init(void)
368{ 531{
369 struct acpi_processor_performance *data; 532 struct acpi_processor_performance *data;
370 unsigned int i, j; 533 cpumask_t covered;
534 unsigned int i, j;
371 535
372 dprintk("acpi_cpufreq_early_init\n"); 536 dprintk("acpi_cpufreq_early_init\n");
373 537
374 for_each_possible_cpu(i) { 538 for_each_possible_cpu(i) {
375 data = kzalloc(sizeof(struct acpi_processor_performance), 539 data = kzalloc(sizeof(struct acpi_processor_performance),
376 GFP_KERNEL); 540 GFP_KERNEL);
377 if (!data) { 541 if (!data) {
378 for_each_possible_cpu(j) { 542 for_each_cpu_mask(j, covered) {
379 kfree(acpi_perf_data[j]); 543 kfree(acpi_perf_data[j]);
380 acpi_perf_data[j] = NULL; 544 acpi_perf_data[j] = NULL;
381 } 545 }
382 return (-ENOMEM); 546 return -ENOMEM;
383 } 547 }
384 acpi_perf_data[i] = data; 548 acpi_perf_data[i] = data;
549 cpu_set(i, covered);
385 } 550 }
386 551
387 /* Do initialization in ACPI core */ 552 /* Do initialization in ACPI core */
388 return acpi_processor_preregister_performance(acpi_perf_data); 553 acpi_processor_preregister_performance(acpi_perf_data);
554 return 0;
389} 555}
390 556
557#ifdef CONFIG_SMP
391/* 558/*
392 * Some BIOSes do SW_ANY coordination internally, either set it up in hw 559 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
393 * or do it in BIOS firmware and won't inform about it to OS. If not 560 * or do it in BIOS firmware and won't inform about it to OS. If not
@@ -414,39 +581,42 @@ static struct dmi_system_id sw_any_bug_dmi_table[] = {
414 }, 581 },
415 { } 582 { }
416}; 583};
584#endif
417 585
418static int 586static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
419acpi_cpufreq_cpu_init (
420 struct cpufreq_policy *policy)
421{ 587{
422 unsigned int i; 588 unsigned int i;
423 unsigned int cpu = policy->cpu; 589 unsigned int valid_states = 0;
424 struct cpufreq_acpi_io *data; 590 unsigned int cpu = policy->cpu;
425 unsigned int result = 0; 591 struct acpi_cpufreq_data *data;
592 unsigned int result = 0;
426 struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; 593 struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
427 struct acpi_processor_performance *perf; 594 struct acpi_processor_performance *perf;
428 595
429 dprintk("acpi_cpufreq_cpu_init\n"); 596 dprintk("acpi_cpufreq_cpu_init\n");
430 597
431 if (!acpi_perf_data[cpu]) 598 if (!acpi_perf_data[cpu])
432 return (-ENODEV); 599 return -ENODEV;
433 600
434 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); 601 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
435 if (!data) 602 if (!data)
436 return (-ENOMEM); 603 return -ENOMEM;
437 604
438 data->acpi_data = acpi_perf_data[cpu]; 605 data->acpi_data = acpi_perf_data[cpu];
439 acpi_io_data[cpu] = data; 606 drv_data[cpu] = data;
440 607
441 result = acpi_processor_register_performance(data->acpi_data, cpu); 608 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
609 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
442 610
611 result = acpi_processor_register_performance(data->acpi_data, cpu);
443 if (result) 612 if (result)
444 goto err_free; 613 goto err_free;
445 614
446 perf = data->acpi_data; 615 perf = data->acpi_data;
447 policy->shared_type = perf->shared_type; 616 policy->shared_type = perf->shared_type;
617
448 /* 618 /*
449 * Will let policy->cpus know about dependency only when software 619 * Will let policy->cpus know about dependency only when software
450 * coordination is required. 620 * coordination is required.
451 */ 621 */
452 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 622 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
@@ -462,10 +632,6 @@ acpi_cpufreq_cpu_init (
462 } 632 }
463#endif 633#endif
464 634
465 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
466 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
467 }
468
469 /* capability check */ 635 /* capability check */
470 if (perf->state_count <= 1) { 636 if (perf->state_count <= 1) {
471 dprintk("No P-States\n"); 637 dprintk("No P-States\n");
@@ -473,17 +639,33 @@ acpi_cpufreq_cpu_init (
473 goto err_unreg; 639 goto err_unreg;
474 } 640 }
475 641
476 if ((perf->control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) || 642 if (perf->control_register.space_id != perf->status_register.space_id) {
477 (perf->status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { 643 result = -ENODEV;
478 dprintk("Unsupported address space [%d, %d]\n", 644 goto err_unreg;
479 (u32) (perf->control_register.space_id), 645 }
480 (u32) (perf->status_register.space_id)); 646
647 switch (perf->control_register.space_id) {
648 case ACPI_ADR_SPACE_SYSTEM_IO:
649 dprintk("SYSTEM IO addr space\n");
650 data->cpu_feature = SYSTEM_IO_CAPABLE;
651 break;
652 case ACPI_ADR_SPACE_FIXED_HARDWARE:
653 dprintk("HARDWARE addr space\n");
654 if (!check_est_cpu(cpu)) {
655 result = -ENODEV;
656 goto err_unreg;
657 }
658 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
659 break;
660 default:
661 dprintk("Unknown addr space %d\n",
662 (u32) (perf->control_register.space_id));
481 result = -ENODEV; 663 result = -ENODEV;
482 goto err_unreg; 664 goto err_unreg;
483 } 665 }
484 666
485 /* alloc freq_table */ 667 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
486 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL); 668 (perf->state_count+1), GFP_KERNEL);
487 if (!data->freq_table) { 669 if (!data->freq_table) {
488 result = -ENOMEM; 670 result = -ENOMEM;
489 goto err_unreg; 671 goto err_unreg;
@@ -492,129 +674,140 @@ acpi_cpufreq_cpu_init (
492 /* detect transition latency */ 674 /* detect transition latency */
493 policy->cpuinfo.transition_latency = 0; 675 policy->cpuinfo.transition_latency = 0;
494 for (i=0; i<perf->state_count; i++) { 676 for (i=0; i<perf->state_count; i++) {
495 if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency) 677 if ((perf->states[i].transition_latency * 1000) >
496 policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000; 678 policy->cpuinfo.transition_latency)
679 policy->cpuinfo.transition_latency =
680 perf->states[i].transition_latency * 1000;
497 } 681 }
498 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 682 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
499 683
500 /* The current speed is unknown and not detectable by ACPI... */ 684 data->max_freq = perf->states[0].core_frequency * 1000;
501 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
502
503 /* table init */ 685 /* table init */
504 for (i=0; i<=perf->state_count; i++) 686 for (i=0; i<perf->state_count; i++) {
505 { 687 if (i>0 && perf->states[i].core_frequency ==
506 data->freq_table[i].index = i; 688 perf->states[i-1].core_frequency)
507 if (i<perf->state_count) 689 continue;
508 data->freq_table[i].frequency = perf->states[i].core_frequency * 1000; 690
509 else 691 data->freq_table[valid_states].index = i;
510 data->freq_table[i].frequency = CPUFREQ_TABLE_END; 692 data->freq_table[valid_states].frequency =
693 perf->states[i].core_frequency * 1000;
694 valid_states++;
511 } 695 }
696 data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
512 697
513 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); 698 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
514 if (result) { 699 if (result)
515 goto err_freqfree; 700 goto err_freqfree;
701
702 switch (data->cpu_feature) {
703 case ACPI_ADR_SPACE_SYSTEM_IO:
704 /* Current speed is unknown and not detectable by IO port */
705 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
706 break;
707 case ACPI_ADR_SPACE_FIXED_HARDWARE:
708 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
709 get_cur_freq_on_cpu(cpu);
710 break;
711 default:
712 break;
516 } 713 }
517 714
518 /* notify BIOS that we exist */ 715 /* notify BIOS that we exist */
519 acpi_processor_notify_smm(THIS_MODULE); 716 acpi_processor_notify_smm(THIS_MODULE);
520 717
521 printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n", 718 /* Check for APERF/MPERF support in hardware */
522 cpu); 719 if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) {
720 unsigned int ecx;
721 ecx = cpuid_ecx(6);
722 if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY)
723 acpi_cpufreq_driver.getavg = get_measured_perf;
724 }
725
726 dprintk("CPU%u - ACPI performance management activated.\n", cpu);
523 for (i = 0; i < perf->state_count; i++) 727 for (i = 0; i < perf->state_count; i++)
524 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n", 728 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
525 (i == perf->state?'*':' '), i, 729 (i == perf->state ? '*' : ' '), i,
526 (u32) perf->states[i].core_frequency, 730 (u32) perf->states[i].core_frequency,
527 (u32) perf->states[i].power, 731 (u32) perf->states[i].power,
528 (u32) perf->states[i].transition_latency); 732 (u32) perf->states[i].transition_latency);
529 733
530 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); 734 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
531 735
532 /* 736 /*
533 * the first call to ->target() should result in us actually 737 * the first call to ->target() should result in us actually
534 * writing something to the appropriate registers. 738 * writing something to the appropriate registers.
535 */ 739 */
536 data->resume = 1; 740 data->resume = 1;
537
538 return (result);
539 741
540 err_freqfree: 742 return result;
743
744err_freqfree:
541 kfree(data->freq_table); 745 kfree(data->freq_table);
542 err_unreg: 746err_unreg:
543 acpi_processor_unregister_performance(perf, cpu); 747 acpi_processor_unregister_performance(perf, cpu);
544 err_free: 748err_free:
545 kfree(data); 749 kfree(data);
546 acpi_io_data[cpu] = NULL; 750 drv_data[cpu] = NULL;
547 751
548 return (result); 752 return result;
549} 753}
550 754
551 755static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
552static int
553acpi_cpufreq_cpu_exit (
554 struct cpufreq_policy *policy)
555{ 756{
556 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 757 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
557
558 758
559 dprintk("acpi_cpufreq_cpu_exit\n"); 759 dprintk("acpi_cpufreq_cpu_exit\n");
560 760
561 if (data) { 761 if (data) {
562 cpufreq_frequency_table_put_attr(policy->cpu); 762 cpufreq_frequency_table_put_attr(policy->cpu);
563 acpi_io_data[policy->cpu] = NULL; 763 drv_data[policy->cpu] = NULL;
564 acpi_processor_unregister_performance(data->acpi_data, policy->cpu); 764 acpi_processor_unregister_performance(data->acpi_data,
765 policy->cpu);
565 kfree(data); 766 kfree(data);
566 } 767 }
567 768
568 return (0); 769 return 0;
569} 770}
570 771
571static int 772static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
572acpi_cpufreq_resume (
573 struct cpufreq_policy *policy)
574{ 773{
575 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 774 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
576
577 775
578 dprintk("acpi_cpufreq_resume\n"); 776 dprintk("acpi_cpufreq_resume\n");
579 777
580 data->resume = 1; 778 data->resume = 1;
581 779
582 return (0); 780 return 0;
583} 781}
584 782
585 783static struct freq_attr *acpi_cpufreq_attr[] = {
586static struct freq_attr* acpi_cpufreq_attr[] = {
587 &cpufreq_freq_attr_scaling_available_freqs, 784 &cpufreq_freq_attr_scaling_available_freqs,
588 NULL, 785 NULL,
589}; 786};
590 787
591static struct cpufreq_driver acpi_cpufreq_driver = { 788static struct cpufreq_driver acpi_cpufreq_driver = {
592 .verify = acpi_cpufreq_verify, 789 .verify = acpi_cpufreq_verify,
593 .target = acpi_cpufreq_target, 790 .target = acpi_cpufreq_target,
594 .init = acpi_cpufreq_cpu_init, 791 .init = acpi_cpufreq_cpu_init,
595 .exit = acpi_cpufreq_cpu_exit, 792 .exit = acpi_cpufreq_cpu_exit,
596 .resume = acpi_cpufreq_resume, 793 .resume = acpi_cpufreq_resume,
597 .name = "acpi-cpufreq", 794 .name = "acpi-cpufreq",
598 .owner = THIS_MODULE, 795 .owner = THIS_MODULE,
599 .attr = acpi_cpufreq_attr, 796 .attr = acpi_cpufreq_attr,
600}; 797};
601 798
602 799static int __init acpi_cpufreq_init(void)
603static int __init
604acpi_cpufreq_init (void)
605{ 800{
606 dprintk("acpi_cpufreq_init\n"); 801 dprintk("acpi_cpufreq_init\n");
607 802
608 acpi_cpufreq_early_init_acpi(); 803 acpi_cpufreq_early_init();
609 804
610 return cpufreq_register_driver(&acpi_cpufreq_driver); 805 return cpufreq_register_driver(&acpi_cpufreq_driver);
611} 806}
612 807
613 808static void __exit acpi_cpufreq_exit(void)
614static void __exit
615acpi_cpufreq_exit (void)
616{ 809{
617 unsigned int i; 810 unsigned int i;
618 dprintk("acpi_cpufreq_exit\n"); 811 dprintk("acpi_cpufreq_exit\n");
619 812
620 cpufreq_unregister_driver(&acpi_cpufreq_driver); 813 cpufreq_unregister_driver(&acpi_cpufreq_driver);
@@ -627,7 +820,9 @@ acpi_cpufreq_exit (void)
627} 820}
628 821
629module_param(acpi_pstate_strict, uint, 0644); 822module_param(acpi_pstate_strict, uint, 0644);
630MODULE_PARM_DESC(acpi_pstate_strict, "value 0 or non-zero. non-zero -> strict ACPI checks are performed during frequency changes."); 823MODULE_PARM_DESC(acpi_pstate_strict,
824 "value 0 or non-zero. non-zero -> strict ACPI checks are "
825 "performed during frequency changes.");
631 826
632late_initcall(acpi_cpufreq_init); 827late_initcall(acpi_cpufreq_init);
633module_exit(acpi_cpufreq_exit); 828module_exit(acpi_cpufreq_exit);
diff --git a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
index 92afa3bc84f1..6667e9cceb9f 100644
--- a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
@@ -447,7 +447,6 @@ static int __init cpufreq_gx_init(void)
447 int ret; 447 int ret;
448 struct gxfreq_params *params; 448 struct gxfreq_params *params;
449 struct pci_dev *gx_pci; 449 struct pci_dev *gx_pci;
450 u32 class_rev;
451 450
452 /* Test if we have the right hardware */ 451 /* Test if we have the right hardware */
453 if ((gx_pci = gx_detect_chipset()) == NULL) 452 if ((gx_pci = gx_detect_chipset()) == NULL)
@@ -472,8 +471,7 @@ static int __init cpufreq_gx_init(void)
472 pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2)); 471 pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2));
473 pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration)); 472 pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration));
474 pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration)); 473 pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration));
475 pci_read_config_dword(params->cs55x0, PCI_CLASS_REVISION, &class_rev); 474 pci_read_config_byte(params->cs55x0, PCI_REVISION_ID, &params->pci_rev);
476 params->pci_rev = class_rev && 0xff;
477 475
478 if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) { 476 if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) {
479 kfree(params); 477 kfree(params);
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index 7233abe5d695..c548daad3476 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -410,7 +410,7 @@ static int __init longhaul_get_ranges(void)
410 maxmult=longhaul_get_cpu_mult(); 410 maxmult=longhaul_get_cpu_mult();
411 411
412 /* Starting with the 1.2GHz parts, theres a 200MHz bus. */ 412 /* Starting with the 1.2GHz parts, theres a 200MHz bus. */
413 if ((cpu_khz/1000) > 1200) 413 if ((cpu_khz/maxmult) > 13400)
414 fsb = 200; 414 fsb = 200;
415 else 415 else
416 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB]; 416 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
@@ -583,6 +583,10 @@ static int enable_arbiter_disable(void)
583 if (dev == NULL) { 583 if (dev == NULL) {
584 reg = 0x76; 584 reg = 0x76;
585 dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL); 585 dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL);
586 /* Find CN400 V-Link host bridge */
587 if (dev == NULL)
588 dev = pci_find_device(PCI_VENDOR_ID_VIA, 0x7259, NULL);
589
586 } 590 }
587 if (dev != NULL) { 591 if (dev != NULL) {
588 /* Enable access to port 0x22 */ 592 /* Enable access to port 0x22 */
@@ -734,7 +738,7 @@ print_support_type:
734 return 0; 738 return 0;
735 739
736err_acpi: 740err_acpi:
737 printk(KERN_ERR PFX "No ACPI support. No VT8601 or VT8623 northbridge. Aborting.\n"); 741 printk(KERN_ERR PFX "No ACPI support. Unsupported northbridge. Aborting.\n");
738 return -ENODEV; 742 return -ENODEV;
739} 743}
740 744
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index 304d2eaa4a1b..bec50170b75a 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -163,29 +163,27 @@ static int cpufreq_p4_verify(struct cpufreq_policy *policy)
163 163
164static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) 164static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
165{ 165{
166 if ((c->x86 == 0x06) && (c->x86_model == 0x09)) { 166 if (c->x86 == 0x06) {
167 /* Pentium M (Banias) */ 167 if (cpu_has(c, X86_FEATURE_EST))
168 printk(KERN_WARNING PFX "Warning: Pentium M detected. " 168 printk(KERN_WARNING PFX "Warning: EST-capable CPU detected. "
169 "The speedstep_centrino module offers voltage scaling" 169 "The acpi-cpufreq module offers voltage scaling"
170 " in addition of frequency scaling. You should use " 170 " in addition of frequency scaling. You should use "
171 "that instead of p4-clockmod, if possible.\n"); 171 "that instead of p4-clockmod, if possible.\n");
172 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM); 172 switch (c->x86_model) {
173 } 173 case 0x0E: /* Core */
174 174 case 0x0F: /* Core Duo */
175 if ((c->x86 == 0x06) && (c->x86_model == 0x0D)) { 175 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
176 /* Pentium M (Dothan) */ 176 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PCORE);
177 printk(KERN_WARNING PFX "Warning: Pentium M detected. " 177 case 0x0D: /* Pentium M (Dothan) */
178 "The speedstep_centrino module offers voltage scaling" 178 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
179 " in addition of frequency scaling. You should use " 179 /* fall through */
180 "that instead of p4-clockmod, if possible.\n"); 180 case 0x09: /* Pentium M (Banias) */
181 /* on P-4s, the TSC runs with constant frequency independent whether 181 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
182 * throttling is active or not. */ 182 }
183 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
184 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
185 } 183 }
186 184
187 if (c->x86 != 0xF) { 185 if (c->x86 != 0xF) {
188 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <linux@brodo.de>\n"); 186 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <cpufreq@lists.linux.org.uk>\n");
189 return 0; 187 return 0;
190 } 188 }
191 189
diff --git a/arch/i386/kernel/cpu/cpufreq/sc520_freq.c b/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
index ef457d50f4ac..b8fb4b521c62 100644
--- a/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
+++ b/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
@@ -153,6 +153,7 @@ static struct cpufreq_driver sc520_freq_driver = {
153static int __init sc520_freq_init(void) 153static int __init sc520_freq_init(void)
154{ 154{
155 struct cpuinfo_x86 *c = cpu_data; 155 struct cpuinfo_x86 *c = cpu_data;
156 int err;
156 157
157 /* Test if we have the right hardware */ 158 /* Test if we have the right hardware */
158 if(c->x86_vendor != X86_VENDOR_AMD || 159 if(c->x86_vendor != X86_VENDOR_AMD ||
@@ -166,7 +167,11 @@ static int __init sc520_freq_init(void)
166 return -ENOMEM; 167 return -ENOMEM;
167 } 168 }
168 169
169 return cpufreq_register_driver(&sc520_freq_driver); 170 err = cpufreq_register_driver(&sc520_freq_driver);
171 if (err)
172 iounmap(cpuctl);
173
174 return err;
170} 175}
171 176
172 177
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index e8993baf3d14..5113e9231634 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -36,6 +36,7 @@
36 36
37#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) 37#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
38 38
39#define INTEL_MSR_RANGE (0xffff)
39 40
40struct cpu_id 41struct cpu_id
41{ 42{
@@ -379,6 +380,7 @@ static int centrino_cpu_early_init_acpi(void)
379} 380}
380 381
381 382
383#ifdef CONFIG_SMP
382/* 384/*
383 * Some BIOSes do SW_ANY coordination internally, either set it up in hw 385 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
384 * or do it in BIOS firmware and won't inform about it to OS. If not 386 * or do it in BIOS firmware and won't inform about it to OS. If not
@@ -392,7 +394,6 @@ static int sw_any_bug_found(struct dmi_system_id *d)
392 return 0; 394 return 0;
393} 395}
394 396
395
396static struct dmi_system_id sw_any_bug_dmi_table[] = { 397static struct dmi_system_id sw_any_bug_dmi_table[] = {
397 { 398 {
398 .callback = sw_any_bug_found, 399 .callback = sw_any_bug_found,
@@ -405,7 +406,7 @@ static struct dmi_system_id sw_any_bug_dmi_table[] = {
405 }, 406 },
406 { } 407 { }
407}; 408};
408 409#endif
409 410
410/* 411/*
411 * centrino_cpu_init_acpi - register with ACPI P-States library 412 * centrino_cpu_init_acpi - register with ACPI P-States library
@@ -463,8 +464,9 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
463 } 464 }
464 465
465 for (i=0; i<p->state_count; i++) { 466 for (i=0; i<p->state_count; i++) {
466 if (p->states[i].control != p->states[i].status) { 467 if ((p->states[i].control & INTEL_MSR_RANGE) !=
467 dprintk("Different control (%llu) and status values (%llu)\n", 468 (p->states[i].status & INTEL_MSR_RANGE)) {
469 dprintk("Different MSR bits in control (%llu) and status (%llu)\n",
468 p->states[i].control, p->states[i].status); 470 p->states[i].control, p->states[i].status);
469 result = -EINVAL; 471 result = -EINVAL;
470 goto err_unreg; 472 goto err_unreg;
@@ -500,7 +502,7 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
500 } 502 }
501 503
502 for (i=0; i<p->state_count; i++) { 504 for (i=0; i<p->state_count; i++) {
503 centrino_model[cpu]->op_points[i].index = p->states[i].control; 505 centrino_model[cpu]->op_points[i].index = p->states[i].control & INTEL_MSR_RANGE;
504 centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000; 506 centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000;
505 dprintk("adding state %i with frequency %u and control value %04x\n", 507 dprintk("adding state %i with frequency %u and control value %04x\n",
506 i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index); 508 i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index);
@@ -531,6 +533,9 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
531 533
532 /* notify BIOS that we exist */ 534 /* notify BIOS that we exist */
533 acpi_processor_notify_smm(THIS_MODULE); 535 acpi_processor_notify_smm(THIS_MODULE);
536 printk("speedstep-centrino with X86_SPEEDSTEP_CENTRINO_ACPI"
537 "config is deprecated.\n "
538 "Use X86_ACPI_CPUFREQ (acpi-cpufreq instead.\n" );
534 539
535 return 0; 540 return 0;
536 541
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
index 4f46cac155c4..d59277c00911 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
@@ -123,6 +123,36 @@ static unsigned int pentiumM_get_frequency(void)
123 return (msr_tmp * 100 * 1000); 123 return (msr_tmp * 100 * 1000);
124} 124}
125 125
126static unsigned int pentium_core_get_frequency(void)
127{
128 u32 fsb = 0;
129 u32 msr_lo, msr_tmp;
130
131 rdmsr(MSR_FSB_FREQ, msr_lo, msr_tmp);
132 /* see table B-2 of 25366920.pdf */
133 switch (msr_lo & 0x07) {
134 case 5:
135 fsb = 100000;
136 break;
137 case 1:
138 fsb = 133333;
139 break;
140 case 3:
141 fsb = 166667;
142 break;
143 default:
144 printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value");
145 }
146
147 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
148 dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
149
150 msr_tmp = (msr_lo >> 22) & 0x1f;
151 dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * fsb));
152
153 return (msr_tmp * fsb);
154}
155
126 156
127static unsigned int pentium4_get_frequency(void) 157static unsigned int pentium4_get_frequency(void)
128{ 158{
@@ -174,6 +204,8 @@ static unsigned int pentium4_get_frequency(void)
174unsigned int speedstep_get_processor_frequency(unsigned int processor) 204unsigned int speedstep_get_processor_frequency(unsigned int processor)
175{ 205{
176 switch (processor) { 206 switch (processor) {
207 case SPEEDSTEP_PROCESSOR_PCORE:
208 return pentium_core_get_frequency();
177 case SPEEDSTEP_PROCESSOR_PM: 209 case SPEEDSTEP_PROCESSOR_PM:
178 return pentiumM_get_frequency(); 210 return pentiumM_get_frequency();
179 case SPEEDSTEP_PROCESSOR_P4D: 211 case SPEEDSTEP_PROCESSOR_P4D:
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
index b735429c50b4..b11bcc608cac 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
@@ -22,6 +22,7 @@
22 * the speedstep_get_processor_frequency() call. */ 22 * the speedstep_get_processor_frequency() call. */
23#define SPEEDSTEP_PROCESSOR_PM 0xFFFFFF03 /* Pentium M */ 23#define SPEEDSTEP_PROCESSOR_PM 0xFFFFFF03 /* Pentium M */
24#define SPEEDSTEP_PROCESSOR_P4D 0xFFFFFF04 /* desktop P4 */ 24#define SPEEDSTEP_PROCESSOR_P4D 0xFFFFFF04 /* desktop P4 */
25#define SPEEDSTEP_PROCESSOR_PCORE 0xFFFFFF05 /* Core */
25 26
26/* speedstep states -- only two of them */ 27/* speedstep states -- only two of them */
27 28
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
index c28333d53646..ff0d89806114 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
@@ -360,9 +360,6 @@ static int __init speedstep_init(void)
360 case SPEEDSTEP_PROCESSOR_PIII_C: 360 case SPEEDSTEP_PROCESSOR_PIII_C:
361 case SPEEDSTEP_PROCESSOR_PIII_C_EARLY: 361 case SPEEDSTEP_PROCESSOR_PIII_C_EARLY:
362 break; 362 break;
363 case SPEEDSTEP_PROCESSOR_P4M:
364 printk(KERN_INFO "speedstep-smi: you're trying to use this cpufreq driver on a Pentium 4-based CPU. Most likely it will not work.\n");
365 break;
366 default: 363 default:
367 speedstep_processor = 0; 364 speedstep_processor = 0;
368 } 365 }
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index 972346604f9d..47ffec57c0cb 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Intel CPU Microcode Update Driver for Linux 2 * Intel CPU Microcode Update Driver for Linux
3 * 3 *
4 * Copyright (C) 2000-2004 Tigran Aivazian 4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5 * 2006 Shaohua Li <shaohua.li@intel.com> 5 * 2006 Shaohua Li <shaohua.li@intel.com>
6 * 6 *
7 * This driver allows to upgrade microcode on Intel processors 7 * This driver allows to upgrade microcode on Intel processors
@@ -92,7 +92,7 @@
92#include <asm/processor.h> 92#include <asm/processor.h>
93 93
94MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver"); 94MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
95MODULE_AUTHOR("Tigran Aivazian <tigran@veritas.com>"); 95MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
96MODULE_LICENSE("GPL"); 96MODULE_LICENSE("GPL");
97 97
98#define MICROCODE_VERSION "1.14a" 98#define MICROCODE_VERSION "1.14a"
@@ -752,7 +752,7 @@ static int __init microcode_init (void)
752 register_hotcpu_notifier(&mc_cpu_notifier); 752 register_hotcpu_notifier(&mc_cpu_notifier);
753 753
754 printk(KERN_INFO 754 printk(KERN_INFO
755 "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@veritas.com>\n"); 755 "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@aivazian.fsnet.co.uk>\n");
756 return 0; 756 return 0;
757} 757}
758 758
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index b0f84e5778ad..aef39be81361 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -69,9 +69,7 @@ static int __devinitdata smp_b_stepping;
69 69
70/* Number of siblings per CPU package */ 70/* Number of siblings per CPU package */
71int smp_num_siblings = 1; 71int smp_num_siblings = 1;
72#ifdef CONFIG_SMP
73EXPORT_SYMBOL(smp_num_siblings); 72EXPORT_SYMBOL(smp_num_siblings);
74#endif
75 73
76/* Last level cache ID of each logical CPU */ 74/* Last level cache ID of each logical CPU */
77int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; 75int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 1f16ebb9a800..324ea7565e2c 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -488,7 +488,7 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
488 488
489#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) 489#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
490 490
491static void rs_set_termios(struct tty_struct *tty, struct termios *old_termios) 491static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
492{ 492{
493 unsigned int cflag = tty->termios->c_cflag; 493 unsigned int cflag = tty->termios->c_cflag;
494 494
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 8ae384eb5357..098ee605bf5e 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/
29obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o 29obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
30obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o 30obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
31obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o 31obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
32obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
32obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o 33obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
33obj-$(CONFIG_AUDIT) += audit.o 34obj-$(CONFIG_AUDIT) += audit.o
34obj-$(CONFIG_PCI_MSI) += msi_ia64.o 35obj-$(CONFIG_PCI_MSI) += msi_ia64.o
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index 0aabedf95dad..bc2f64d72244 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -19,29 +19,11 @@
19 19
20#include <asm/kdebug.h> 20#include <asm/kdebug.h>
21#include <asm/mca.h> 21#include <asm/mca.h>
22#include <asm/uaccess.h>
23 22
24int kdump_status[NR_CPUS]; 23int kdump_status[NR_CPUS];
25atomic_t kdump_cpu_freezed; 24atomic_t kdump_cpu_freezed;
26atomic_t kdump_in_progress; 25atomic_t kdump_in_progress;
27int kdump_on_init = 1; 26int kdump_on_init = 1;
28ssize_t
29copy_oldmem_page(unsigned long pfn, char *buf,
30 size_t csize, unsigned long offset, int userbuf)
31{
32 void *vaddr;
33
34 if (!csize)
35 return 0;
36 vaddr = __va(pfn<<PAGE_SHIFT);
37 if (userbuf) {
38 if (copy_to_user(buf, (vaddr + offset), csize)) {
39 return -EFAULT;
40 }
41 } else
42 memcpy(buf, (vaddr + offset), csize);
43 return csize;
44}
45 27
46static inline Elf64_Word 28static inline Elf64_Word
47*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data, 29*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
@@ -225,14 +207,10 @@ static ctl_table sys_table[] = {
225static int 207static int
226machine_crash_setup(void) 208machine_crash_setup(void)
227{ 209{
228 char *from = strstr(saved_command_line, "elfcorehdr=");
229 static struct notifier_block kdump_init_notifier_nb = { 210 static struct notifier_block kdump_init_notifier_nb = {
230 .notifier_call = kdump_init_notifier, 211 .notifier_call = kdump_init_notifier,
231 }; 212 };
232 int ret; 213 int ret;
233 if (from)
234 elfcorehdr_addr = memparse(from+11, &from);
235 saved_max_pfn = (unsigned long)-1;
236 if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0) 214 if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
237 return ret; 215 return ret;
238#ifdef CONFIG_SYSCTL 216#ifdef CONFIG_SYSCTL
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
new file mode 100644
index 000000000000..83b8c91c1408
--- /dev/null
+++ b/arch/ia64/kernel/crash_dump.c
@@ -0,0 +1,48 @@
1/*
2 * kernel/crash_dump.c - Memory preserving reboot related code.
3 *
4 * Created by: Simon Horman <horms@verge.net.au>
5 * Original code moved from kernel/crash.c
6 * Original code comment copied from the i386 version of this file
7 */
8
9#include <linux/errno.h>
10#include <linux/types.h>
11
12#include <linux/uaccess.h>
13
14/**
15 * copy_oldmem_page - copy one page from "oldmem"
16 * @pfn: page frame number to be copied
17 * @buf: target memory address for the copy; this can be in kernel address
18 * space or user address space (see @userbuf)
19 * @csize: number of bytes to copy
20 * @offset: offset in bytes into the page (based on pfn) to begin the copy
21 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
22 * otherwise @buf is in kernel address space, use memcpy().
23 *
24 * Copy a page from "oldmem". For this page, there is no pte mapped
25 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
26 *
27 * Calling copy_to_user() in atomic context is not desirable. Hence first
28 * copying the data to a pre-allocated kernel page and then copying to user
29 * space in non-atomic context.
30 */
31ssize_t
32copy_oldmem_page(unsigned long pfn, char *buf,
33 size_t csize, unsigned long offset, int userbuf)
34{
35 void *vaddr;
36
37 if (!csize)
38 return 0;
39 vaddr = __va(pfn<<PAGE_SHIFT);
40 if (userbuf) {
41 if (copy_to_user(buf, (vaddr + offset), csize)) {
42 return -EFAULT;
43 }
44 } else
45 memcpy(buf, (vaddr + offset), csize);
46 return csize;
47}
48
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
index 5cd6226f44f2..621630256c4a 100644
--- a/arch/ia64/kernel/jprobes.S
+++ b/arch/ia64/kernel/jprobes.S
@@ -45,13 +45,14 @@
45 * to the correct location. 45 * to the correct location.
46 */ 46 */
47#include <asm/asmmacro.h> 47#include <asm/asmmacro.h>
48#include <asm-ia64/break.h>
48 49
49 /* 50 /*
50 * void jprobe_break(void) 51 * void jprobe_break(void)
51 */ 52 */
52 .section .kprobes.text, "ax" 53 .section .kprobes.text, "ax"
53ENTRY(jprobe_break) 54ENTRY(jprobe_break)
54 break.m 0x80300 55 break.m __IA64_BREAK_JPROBE
55END(jprobe_break) 56END(jprobe_break)
56 57
57 /* 58 /*
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 76e778951e20..6cb56dd4056d 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -88,6 +88,7 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
88{ 88{
89 p->ainsn.inst_flag = 0; 89 p->ainsn.inst_flag = 0;
90 p->ainsn.target_br_reg = 0; 90 p->ainsn.target_br_reg = 0;
91 p->ainsn.slot = slot;
91 92
92 /* Check for Break instruction 93 /* Check for Break instruction
93 * Bits 37:40 Major opcode to be zero 94 * Bits 37:40 Major opcode to be zero
@@ -129,48 +130,6 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
129 130
130/* 131/*
131 * In this function we check to see if the instruction 132 * In this function we check to see if the instruction
132 * on which we are inserting kprobe is supported.
133 * Returns 0 if supported
134 * Returns -EINVAL if unsupported
135 */
136static int __kprobes unsupported_inst(uint template, uint slot,
137 uint major_opcode,
138 unsigned long kprobe_inst,
139 unsigned long addr)
140{
141 if (bundle_encoding[template][slot] == I) {
142 switch (major_opcode) {
143 case 0x0: //I_UNIT_MISC_OPCODE:
144 /*
145 * Check for Integer speculation instruction
146 * - Bit 33-35 to be equal to 0x1
147 */
148 if (((kprobe_inst >> 33) & 0x7) == 1) {
149 printk(KERN_WARNING
150 "Kprobes on speculation inst at <0x%lx> not supported\n",
151 addr);
152 return -EINVAL;
153 }
154
155 /*
156 * IP relative mov instruction
157 * - Bit 27-35 to be equal to 0x30
158 */
159 if (((kprobe_inst >> 27) & 0x1FF) == 0x30) {
160 printk(KERN_WARNING
161 "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n",
162 addr);
163 return -EINVAL;
164
165 }
166 }
167 }
168 return 0;
169}
170
171
172/*
173 * In this function we check to see if the instruction
174 * (qp) cmpx.crel.ctype p1,p2=r2,r3 133 * (qp) cmpx.crel.ctype p1,p2=r2,r3
175 * on which we are inserting kprobe is cmp instruction 134 * on which we are inserting kprobe is cmp instruction
176 * with ctype as unc. 135 * with ctype as unc.
@@ -206,26 +165,136 @@ out:
206} 165}
207 166
208/* 167/*
168 * In this function we check to see if the instruction
169 * on which we are inserting kprobe is supported.
170 * Returns qp value if supported
171 * Returns -EINVAL if unsupported
172 */
173static int __kprobes unsupported_inst(uint template, uint slot,
174 uint major_opcode,
175 unsigned long kprobe_inst,
176 unsigned long addr)
177{
178 int qp;
179
180 qp = kprobe_inst & 0x3f;
181 if (is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) {
182 if (slot == 1 && qp) {
183 printk(KERN_WARNING "Kprobes on cmp unc"
184 "instruction on slot 1 at <0x%lx>"
185 "is not supported\n", addr);
186 return -EINVAL;
187
188 }
189 qp = 0;
190 }
191 else if (bundle_encoding[template][slot] == I) {
192 if (major_opcode == 0) {
193 /*
194 * Check for Integer speculation instruction
195 * - Bit 33-35 to be equal to 0x1
196 */
197 if (((kprobe_inst >> 33) & 0x7) == 1) {
198 printk(KERN_WARNING
199 "Kprobes on speculation inst at <0x%lx> not supported\n",
200 addr);
201 return -EINVAL;
202 }
203 /*
204 * IP relative mov instruction
205 * - Bit 27-35 to be equal to 0x30
206 */
207 if (((kprobe_inst >> 27) & 0x1FF) == 0x30) {
208 printk(KERN_WARNING
209 "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n",
210 addr);
211 return -EINVAL;
212
213 }
214 }
215 else if ((major_opcode == 5) && !(kprobe_inst & (0xFUl << 33)) &&
216 (kprobe_inst & (0x1UL << 12))) {
217 /* test bit instructions, tbit,tnat,tf
218 * bit 33-36 to be equal to 0
219 * bit 12 to be equal to 1
220 */
221 if (slot == 1 && qp) {
222 printk(KERN_WARNING "Kprobes on test bit"
223 "instruction on slot at <0x%lx>"
224 "is not supported\n", addr);
225 return -EINVAL;
226 }
227 qp = 0;
228 }
229 }
230 else if (bundle_encoding[template][slot] == B) {
231 if (major_opcode == 7) {
232 /* IP-Relative Predict major code is 7 */
233 printk(KERN_WARNING "Kprobes on IP-Relative"
234 "Predict is not supported\n");
235 return -EINVAL;
236 }
237 else if (major_opcode == 2) {
238 /* Indirect Predict, major code is 2
239 * bit 27-32 to be equal to 10 or 11
240 */
241 int x6=(kprobe_inst >> 27) & 0x3F;
242 if ((x6 == 0x10) || (x6 == 0x11)) {
243 printk(KERN_WARNING "Kprobes on"
244 "Indirect Predict is not supported\n");
245 return -EINVAL;
246 }
247 }
248 }
249 /* kernel does not use float instruction, here for safety kprobe
250 * will judge whether it is fcmp/flass/float approximation instruction
251 */
252 else if (unlikely(bundle_encoding[template][slot] == F)) {
253 if ((major_opcode == 4 || major_opcode == 5) &&
254 (kprobe_inst & (0x1 << 12))) {
255 /* fcmp/fclass unc instruction */
256 if (slot == 1 && qp) {
257 printk(KERN_WARNING "Kprobes on fcmp/fclass "
258 "instruction on slot at <0x%lx> "
259 "is not supported\n", addr);
260 return -EINVAL;
261
262 }
263 qp = 0;
264 }
265 if ((major_opcode == 0 || major_opcode == 1) &&
266 (kprobe_inst & (0x1UL << 33))) {
267 /* float Approximation instruction */
268 if (slot == 1 && qp) {
269 printk(KERN_WARNING "Kprobes on float Approx "
270 "instr at <0x%lx> is not supported\n",
271 addr);
272 return -EINVAL;
273 }
274 qp = 0;
275 }
276 }
277 return qp;
278}
279
280/*
209 * In this function we override the bundle with 281 * In this function we override the bundle with
210 * the break instruction at the given slot. 282 * the break instruction at the given slot.
211 */ 283 */
212static void __kprobes prepare_break_inst(uint template, uint slot, 284static void __kprobes prepare_break_inst(uint template, uint slot,
213 uint major_opcode, 285 uint major_opcode,
214 unsigned long kprobe_inst, 286 unsigned long kprobe_inst,
215 struct kprobe *p) 287 struct kprobe *p,
288 int qp)
216{ 289{
217 unsigned long break_inst = BREAK_INST; 290 unsigned long break_inst = BREAK_INST;
218 bundle_t *bundle = &p->opcode.bundle; 291 bundle_t *bundle = &p->opcode.bundle;
219 292
220 /* 293 /*
221 * Copy the original kprobe_inst qualifying predicate(qp) 294 * Copy the original kprobe_inst qualifying predicate(qp)
222 * to the break instruction iff !is_cmp_ctype_unc_inst 295 * to the break instruction
223 * because for cmp instruction with ctype equal to unc,
224 * which is a special instruction always needs to be
225 * executed regradless of qp
226 */ 296 */
227 if (!is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) 297 break_inst |= qp;
228 break_inst |= (0x3f & kprobe_inst);
229 298
230 switch (slot) { 299 switch (slot) {
231 case 0: 300 case 0:
@@ -296,12 +365,6 @@ static int __kprobes valid_kprobe_addr(int template, int slot,
296 return -EINVAL; 365 return -EINVAL;
297 } 366 }
298 367
299 if (slot == 1 && bundle_encoding[template][1] != L) {
300 printk(KERN_WARNING "Inserting kprobes on slot #1 "
301 "is not supported\n");
302 return -EINVAL;
303 }
304
305 return 0; 368 return 0;
306} 369}
307 370
@@ -427,6 +490,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
427 unsigned long kprobe_inst=0; 490 unsigned long kprobe_inst=0;
428 unsigned int slot = addr & 0xf, template, major_opcode = 0; 491 unsigned int slot = addr & 0xf, template, major_opcode = 0;
429 bundle_t *bundle; 492 bundle_t *bundle;
493 int qp;
430 494
431 bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle; 495 bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle;
432 template = bundle->quad0.template; 496 template = bundle->quad0.template;
@@ -441,9 +505,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
441 /* Get kprobe_inst and major_opcode from the bundle */ 505 /* Get kprobe_inst and major_opcode from the bundle */
442 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); 506 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
443 507
444 if (unsupported_inst(template, slot, major_opcode, kprobe_inst, addr)) 508 qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr);
445 return -EINVAL; 509 if (qp < 0)
446 510 return -EINVAL;
447 511
448 p->ainsn.insn = get_insn_slot(); 512 p->ainsn.insn = get_insn_slot();
449 if (!p->ainsn.insn) 513 if (!p->ainsn.insn)
@@ -451,30 +515,56 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
451 memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t)); 515 memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t));
452 memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t)); 516 memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t));
453 517
454 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p); 518 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp);
455 519
456 return 0; 520 return 0;
457} 521}
458 522
459void __kprobes arch_arm_kprobe(struct kprobe *p) 523void __kprobes arch_arm_kprobe(struct kprobe *p)
460{ 524{
461 unsigned long addr = (unsigned long)p->addr; 525 unsigned long arm_addr;
462 unsigned long arm_addr = addr & ~0xFULL; 526 bundle_t *src, *dest;
527
528 arm_addr = ((unsigned long)p->addr) & ~0xFUL;
529 dest = &((kprobe_opcode_t *)arm_addr)->bundle;
530 src = &p->opcode.bundle;
463 531
464 flush_icache_range((unsigned long)p->ainsn.insn, 532 flush_icache_range((unsigned long)p->ainsn.insn,
465 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); 533 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
466 memcpy((char *)arm_addr, &p->opcode, sizeof(kprobe_opcode_t)); 534 switch (p->ainsn.slot) {
535 case 0:
536 dest->quad0.slot0 = src->quad0.slot0;
537 break;
538 case 1:
539 dest->quad1.slot1_p1 = src->quad1.slot1_p1;
540 break;
541 case 2:
542 dest->quad1.slot2 = src->quad1.slot2;
543 break;
544 }
467 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); 545 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
468} 546}
469 547
470void __kprobes arch_disarm_kprobe(struct kprobe *p) 548void __kprobes arch_disarm_kprobe(struct kprobe *p)
471{ 549{
472 unsigned long addr = (unsigned long)p->addr; 550 unsigned long arm_addr;
473 unsigned long arm_addr = addr & ~0xFULL; 551 bundle_t *src, *dest;
474 552
553 arm_addr = ((unsigned long)p->addr) & ~0xFUL;
554 dest = &((kprobe_opcode_t *)arm_addr)->bundle;
475 /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */ 555 /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */
476 memcpy((char *) arm_addr, (char *) p->ainsn.insn, 556 src = &p->ainsn.insn->bundle;
477 sizeof(kprobe_opcode_t)); 557 switch (p->ainsn.slot) {
558 case 0:
559 dest->quad0.slot0 = src->quad0.slot0;
560 break;
561 case 1:
562 dest->quad1.slot1_p1 = src->quad1.slot1_p1;
563 break;
564 case 2:
565 dest->quad1.slot2 = src->quad1.slot2;
566 break;
567 }
478 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); 568 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
479} 569}
480 570
@@ -807,7 +897,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
807 switch(val) { 897 switch(val) {
808 case DIE_BREAK: 898 case DIE_BREAK:
809 /* err is break number from ia64_bad_break() */ 899 /* err is break number from ia64_bad_break() */
810 if (args->err == 0x80200 || args->err == 0x80300 || args->err == 0) 900 if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12)
901 || args->err == __IA64_BREAK_JPROBE
902 || args->err == 0)
811 if (pre_kprobes_handler(args)) 903 if (pre_kprobes_handler(args))
812 ret = NOTIFY_STOP; 904 ret = NOTIFY_STOP;
813 break; 905 break;
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index 468233fa2cee..e2ccc9f660c5 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -19,8 +19,11 @@
19#include <asm/delay.h> 19#include <asm/delay.h>
20#include <asm/meminit.h> 20#include <asm/meminit.h>
21 21
22typedef void (*relocate_new_kernel_t)(unsigned long, unsigned long, 22typedef NORET_TYPE void (*relocate_new_kernel_t)(
23 struct ia64_boot_param *, unsigned long); 23 unsigned long indirection_page,
24 unsigned long start_address,
25 struct ia64_boot_param *boot_param,
26 unsigned long pal_addr) ATTRIB_NORET;
24 27
25struct kimage *ia64_kimage; 28struct kimage *ia64_kimage;
26 29
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 87c1c4f42872..a76add3e76a2 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1239,7 +1239,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1239 } else { 1239 } else {
1240 /* Dump buffered message to console */ 1240 /* Dump buffered message to console */
1241 ia64_mlogbuf_finish(1); 1241 ia64_mlogbuf_finish(1);
1242#ifdef CONFIG_CRASH_DUMP 1242#ifdef CONFIG_KEXEC
1243 atomic_set(&kdump_in_progress, 1); 1243 atomic_set(&kdump_in_progress, 1);
1244 monarch_cpu = -1; 1244 monarch_cpu = -1;
1245#endif 1245#endif
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 14e1200376a9..ad567b8d432e 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -256,7 +256,7 @@ reserve_memory (void)
256 256
257#ifdef CONFIG_KEXEC 257#ifdef CONFIG_KEXEC
258 /* crashkernel=size@offset specifies the size to reserve for a crash 258 /* crashkernel=size@offset specifies the size to reserve for a crash
259 * kernel.(offset is ingored for keep compatibility with other archs) 259 * kernel. If offset is 0, then it is determined automatically.
260 * By reserving this memory we guarantee that linux never set's it 260 * By reserving this memory we guarantee that linux never set's it
261 * up as a DMA target.Useful for holding code to do something 261 * up as a DMA target.Useful for holding code to do something
262 * appropriate after a kernel panic. 262 * appropriate after a kernel panic.
@@ -266,10 +266,16 @@ reserve_memory (void)
266 unsigned long base, size; 266 unsigned long base, size;
267 if (from) { 267 if (from) {
268 size = memparse(from + 12, &from); 268 size = memparse(from + 12, &from);
269 if (*from == '@')
270 base = memparse(from+1, &from);
271 else
272 base = 0;
269 if (size) { 273 if (size) {
270 sort_regions(rsvd_region, n); 274 if (!base) {
271 base = kdump_find_rsvd_region(size, 275 sort_regions(rsvd_region, n);
272 rsvd_region, n); 276 base = kdump_find_rsvd_region(size,
277 rsvd_region, n);
278 }
273 if (base != ~0UL) { 279 if (base != ~0UL) {
274 rsvd_region[n].start = 280 rsvd_region[n].start =
275 (unsigned long)__va(base); 281 (unsigned long)__va(base);
@@ -434,6 +440,21 @@ static __init int setup_nomca(char *s)
434} 440}
435early_param("nomca", setup_nomca); 441early_param("nomca", setup_nomca);
436 442
443#ifdef CONFIG_PROC_VMCORE
444/* elfcorehdr= specifies the location of elf core header
445 * stored by the crashed kernel.
446 */
447static int __init parse_elfcorehdr(char *arg)
448{
449 if (!arg)
450 return -EINVAL;
451
452 elfcorehdr_addr = memparse(arg, &arg);
453 return 0;
454}
455early_param("elfcorehdr", parse_elfcorehdr);
456#endif /* CONFIG_PROC_VMCORE */
457
437void __init 458void __init
438setup_arch (char **cmdline_p) 459setup_arch (char **cmdline_p)
439{ 460{
@@ -653,6 +674,7 @@ get_model_name(__u8 family, __u8 model)
653{ 674{
654 char brand[128]; 675 char brand[128];
655 676
677 memcpy(brand, "Unknown", 8);
656 if (ia64_pal_get_brand_info(brand)) { 678 if (ia64_pal_get_brand_info(brand)) {
657 if (family == 0x7) 679 if (family == 0x7)
658 memcpy(brand, "Merced", 7); 680 memcpy(brand, "Merced", 7);
@@ -660,8 +682,7 @@ get_model_name(__u8 family, __u8 model)
660 case 0: memcpy(brand, "McKinley", 9); break; 682 case 0: memcpy(brand, "McKinley", 9); break;
661 case 1: memcpy(brand, "Madison", 8); break; 683 case 1: memcpy(brand, "Madison", 8); break;
662 case 2: memcpy(brand, "Madison up to 9M cache", 23); break; 684 case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
663 } else 685 }
664 memcpy(brand, "Unknown", 8);
665 } 686 }
666 if (brandname[0] == '\0') 687 if (brandname[0] == '\0')
667 return strcpy(brandname, brand); 688 return strcpy(brandname, brand);
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index b1b9aa4364b9..f4c7f7769cf7 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -157,7 +157,7 @@ handle_IPI (int irq, void *dev_id)
157 case IPI_CPU_STOP: 157 case IPI_CPU_STOP:
158 stop_this_cpu(); 158 stop_this_cpu();
159 break; 159 break;
160#ifdef CONFIG_CRASH_DUMP 160#ifdef CONFIG_KEXEC
161 case IPI_KDUMP_CPU_STOP: 161 case IPI_KDUMP_CPU_STOP:
162 unw_init_running(kdump_cpu_freeze, NULL); 162 unw_init_running(kdump_cpu_freeze, NULL);
163 break; 163 break;
@@ -219,7 +219,7 @@ send_IPI_self (int op)
219 send_IPI_single(smp_processor_id(), op); 219 send_IPI_single(smp_processor_id(), op);
220} 220}
221 221
222#ifdef CONFIG_CRASH_DUMP 222#ifdef CONFIG_KEXEC
223void 223void
224kdump_smp_send_stop() 224kdump_smp_send_stop()
225{ 225{
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index fffa9e0826bc..ab684747036f 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -307,6 +307,15 @@ fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long
307 return ret.status; 307 return ret.status;
308} 308}
309 309
310struct fpu_swa_msg {
311 unsigned long count;
312 unsigned long time;
313};
314static DEFINE_PER_CPU(struct fpu_swa_msg, cpulast);
315DECLARE_PER_CPU(struct fpu_swa_msg, cpulast);
316static struct fpu_swa_msg last __cacheline_aligned;
317
318
310/* 319/*
311 * Handle floating-point assist faults and traps. 320 * Handle floating-point assist faults and traps.
312 */ 321 */
@@ -316,8 +325,6 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
316 long exception, bundle[2]; 325 long exception, bundle[2];
317 unsigned long fault_ip; 326 unsigned long fault_ip;
318 struct siginfo siginfo; 327 struct siginfo siginfo;
319 static int fpu_swa_count = 0;
320 static unsigned long last_time;
321 328
322 fault_ip = regs->cr_iip; 329 fault_ip = regs->cr_iip;
323 if (!fp_fault && (ia64_psr(regs)->ri == 0)) 330 if (!fp_fault && (ia64_psr(regs)->ri == 0))
@@ -325,14 +332,37 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
325 if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle))) 332 if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle)))
326 return -1; 333 return -1;
327 334
328 if (jiffies - last_time > 5*HZ) 335 if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) {
329 fpu_swa_count = 0; 336 unsigned long count, current_jiffies = jiffies;
330 if ((fpu_swa_count < 4) && !(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) { 337 struct fpu_swa_msg *cp = &__get_cpu_var(cpulast);
331 last_time = jiffies; 338
332 ++fpu_swa_count; 339 if (unlikely(current_jiffies > cp->time))
333 printk(KERN_WARNING 340 cp->count = 0;
334 "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n", 341 if (unlikely(cp->count < 5)) {
335 current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr); 342 cp->count++;
343 cp->time = current_jiffies + 5 * HZ;
344
345 /* minimize races by grabbing a copy of count BEFORE checking last.time. */
346 count = last.count;
347 barrier();
348
349 /*
350 * Lower 4 bits are used as a count. Upper bits are a sequence
351 * number that is updated when count is reset. The cmpxchg will
352 * fail is seqno has changed. This minimizes mutiple cpus
353 * reseting the count.
354 */
355 if (current_jiffies > last.time)
356 (void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));
357
358 /* used fetchadd to atomically update the count */
359 if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) {
360 last.time = current_jiffies + 5 * HZ;
361 printk(KERN_WARNING
362 "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
363 current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr);
364 }
365 }
336 } 366 }
337 367
338 exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr, 368 exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 82deaa3a7c48..1e79551231b9 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -174,6 +174,12 @@ find_memory (void)
174 reserve_bootmem(bootmap_start, bootmap_size); 174 reserve_bootmem(bootmap_start, bootmap_size);
175 175
176 find_initrd(); 176 find_initrd();
177
178#ifdef CONFIG_CRASH_DUMP
179 /* If we are doing a crash dump, we still need to know the real mem
180 * size before original memory map is * reset. */
181 saved_max_pfn = max_pfn;
182#endif
177} 183}
178 184
179#ifdef CONFIG_SMP 185#ifdef CONFIG_SMP
@@ -226,7 +232,6 @@ void __init
226paging_init (void) 232paging_init (void)
227{ 233{
228 unsigned long max_dma; 234 unsigned long max_dma;
229 unsigned long nid = 0;
230 unsigned long max_zone_pfns[MAX_NR_ZONES]; 235 unsigned long max_zone_pfns[MAX_NR_ZONES];
231 236
232 num_physpages = 0; 237 num_physpages = 0;
@@ -238,7 +243,7 @@ paging_init (void)
238 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 243 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
239 244
240#ifdef CONFIG_VIRTUAL_MEM_MAP 245#ifdef CONFIG_VIRTUAL_MEM_MAP
241 efi_memmap_walk(register_active_ranges, &nid); 246 efi_memmap_walk(register_active_ranges, NULL);
242 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); 247 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
243 if (max_gap < LARGE_GAP) { 248 if (max_gap < LARGE_GAP) {
244 vmem_map = (struct page *) 0; 249 vmem_map = (struct page *) 0;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 56dc2024220e..1a3d8a2feb94 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -595,14 +595,9 @@ find_largest_hole (u64 start, u64 end, void *arg)
595} 595}
596 596
597int __init 597int __init
598register_active_ranges(u64 start, u64 end, void *nid) 598register_active_ranges(u64 start, u64 end, void *arg)
599{ 599{
600 BUG_ON(nid == NULL); 600 add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT);
601 BUG_ON(*(unsigned long *)nid >= MAX_NUMNODES);
602
603 add_active_range(*(unsigned long *)nid,
604 __pa(start) >> PAGE_SHIFT,
605 __pa(end) >> PAGE_SHIFT);
606 return 0; 601 return 0;
607} 602}
608#endif /* CONFIG_VIRTUAL_MEM_MAP */ 603#endif /* CONFIG_VIRTUAL_MEM_MAP */
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index a934ad069425..8571e52c2efd 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -580,7 +580,7 @@ void __cpuinit sn_cpu_init(void)
580 int slice; 580 int slice;
581 int cnode; 581 int cnode;
582 int i; 582 int i;
583 static int wars_have_been_checked; 583 static int wars_have_been_checked, set_cpu0_number;
584 584
585 cpuid = smp_processor_id(); 585 cpuid = smp_processor_id();
586 if (cpuid == 0 && IS_MEDUSA()) { 586 if (cpuid == 0 && IS_MEDUSA()) {
@@ -605,8 +605,16 @@ void __cpuinit sn_cpu_init(void)
605 /* 605 /*
606 * Don't check status. The SAL call is not supported on all PROMs 606 * Don't check status. The SAL call is not supported on all PROMs
607 * but a failure is harmless. 607 * but a failure is harmless.
608 * Architechtuallly, cpu_init is always called twice on cpu 0. We
609 * should set cpu_number on cpu 0 once.
608 */ 610 */
609 (void) ia64_sn_set_cpu_number(cpuid); 611 if (cpuid == 0) {
612 if (!set_cpu0_number) {
613 (void) ia64_sn_set_cpu_number(cpuid);
614 set_cpu0_number = 1;
615 }
616 } else
617 (void) ia64_sn_set_cpu_number(cpuid);
610 618
611 /* 619 /*
612 * The boot cpu makes this call again after platform initialization is 620 * The boot cpu makes this call again after platform initialization is
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 462ea178f49a..33367996d72d 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -189,7 +189,7 @@ static void print_pci_topology(struct seq_file *s)
189 int e; 189 int e;
190 190
191 for (sz = PAGE_SIZE; sz < 16 * PAGE_SIZE; sz += PAGE_SIZE) { 191 for (sz = PAGE_SIZE; sz < 16 * PAGE_SIZE; sz += PAGE_SIZE) {
192 if (!(p = (char *)kmalloc(sz, GFP_KERNEL))) 192 if (!(p = kmalloc(sz, GFP_KERNEL)))
193 break; 193 break;
194 e = ia64_sn_ioif_get_pci_topology(__pa(p), sz); 194 e = ia64_sn_ioif_get_pci_topology(__pa(p), sz);
195 if (e == SALRET_OK) 195 if (e == SALRET_OK)
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index 1f3540826e68..c08db9c2375d 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -632,7 +632,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
632 ch->number, ch->partid); 632 ch->number, ch->partid);
633 633
634 spin_unlock_irqrestore(&ch->lock, *irq_flags); 634 spin_unlock_irqrestore(&ch->lock, *irq_flags);
635 xpc_create_kthreads(ch, 1); 635 xpc_create_kthreads(ch, 1, 0);
636 spin_lock_irqsave(&ch->lock, *irq_flags); 636 spin_lock_irqsave(&ch->lock, *irq_flags);
637} 637}
638 638
@@ -754,12 +754,12 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
754 754
755 /* make sure all activity has settled down first */ 755 /* make sure all activity has settled down first */
756 756
757 if (atomic_read(&ch->references) > 0 || 757 if (atomic_read(&ch->kthreads_assigned) > 0 ||
758 ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 758 atomic_read(&ch->references) > 0) {
759 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE))) {
760 return; 759 return;
761 } 760 }
762 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); 761 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
762 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
763 763
764 if (part->act_state == XPC_P_DEACTIVATING) { 764 if (part->act_state == XPC_P_DEACTIVATING) {
765 /* can't proceed until the other side disengages from us */ 765 /* can't proceed until the other side disengages from us */
@@ -1651,6 +1651,11 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1651 /* wake all idle kthreads so they can exit */ 1651 /* wake all idle kthreads so they can exit */
1652 if (atomic_read(&ch->kthreads_idle) > 0) { 1652 if (atomic_read(&ch->kthreads_idle) > 0) {
1653 wake_up_all(&ch->idle_wq); 1653 wake_up_all(&ch->idle_wq);
1654
1655 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
1656 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
1657 /* start a kthread that will do the xpcDisconnecting callout */
1658 xpc_create_kthreads(ch, 1, 1);
1654 } 1659 }
1655 1660
1656 /* wake those waiting to allocate an entry from the local msg queue */ 1661 /* wake those waiting to allocate an entry from the local msg queue */
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index fa96dfc0e1aa..7a387d237363 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -681,7 +681,7 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
681 dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", 681 dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
682 needed, ch->partid, ch->number); 682 needed, ch->partid, ch->number);
683 683
684 xpc_create_kthreads(ch, needed); 684 xpc_create_kthreads(ch, needed, 0);
685} 685}
686 686
687 687
@@ -775,26 +775,28 @@ xpc_daemonize_kthread(void *args)
775 xpc_kthread_waitmsgs(part, ch); 775 xpc_kthread_waitmsgs(part, ch);
776 } 776 }
777 777
778 if (atomic_dec_return(&ch->kthreads_assigned) == 0) { 778 /* let registerer know that connection is disconnecting */
779 spin_lock_irqsave(&ch->lock, irq_flags);
780 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
781 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
782 ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
783 spin_unlock_irqrestore(&ch->lock, irq_flags);
784 779
785 xpc_disconnect_callout(ch, xpcDisconnecting); 780 spin_lock_irqsave(&ch->lock, irq_flags);
786 781 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
787 spin_lock_irqsave(&ch->lock, irq_flags); 782 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
788 ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; 783 ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
789 }
790 spin_unlock_irqrestore(&ch->lock, irq_flags); 784 spin_unlock_irqrestore(&ch->lock, irq_flags);
785
786 xpc_disconnect_callout(ch, xpcDisconnecting);
787
788 spin_lock_irqsave(&ch->lock, irq_flags);
789 ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
790 }
791 spin_unlock_irqrestore(&ch->lock, irq_flags);
792
793 if (atomic_dec_return(&ch->kthreads_assigned) == 0) {
791 if (atomic_dec_return(&part->nchannels_engaged) == 0) { 794 if (atomic_dec_return(&part->nchannels_engaged) == 0) {
792 xpc_mark_partition_disengaged(part); 795 xpc_mark_partition_disengaged(part);
793 xpc_IPI_send_disengage(part); 796 xpc_IPI_send_disengage(part);
794 } 797 }
795 } 798 }
796 799
797
798 xpc_msgqueue_deref(ch); 800 xpc_msgqueue_deref(ch);
799 801
800 dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", 802 dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
@@ -818,7 +820,8 @@ xpc_daemonize_kthread(void *args)
818 * partition. 820 * partition.
819 */ 821 */
820void 822void
821xpc_create_kthreads(struct xpc_channel *ch, int needed) 823xpc_create_kthreads(struct xpc_channel *ch, int needed,
824 int ignore_disconnecting)
822{ 825{
823 unsigned long irq_flags; 826 unsigned long irq_flags;
824 pid_t pid; 827 pid_t pid;
@@ -833,16 +836,38 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed)
833 * kthread. That kthread is responsible for doing the 836 * kthread. That kthread is responsible for doing the
834 * counterpart to the following before it exits. 837 * counterpart to the following before it exits.
835 */ 838 */
839 if (ignore_disconnecting) {
840 if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
841 /* kthreads assigned had gone to zero */
842 BUG_ON(!(ch->flags &
843 XPC_C_DISCONNECTINGCALLOUT_MADE));
844 break;
845 }
846
847 } else if (ch->flags & XPC_C_DISCONNECTING) {
848 break;
849
850 } else if (atomic_inc_return(&ch->kthreads_assigned) == 1) {
851 if (atomic_inc_return(&part->nchannels_engaged) == 1)
852 xpc_mark_partition_engaged(part);
853 }
836 (void) xpc_part_ref(part); 854 (void) xpc_part_ref(part);
837 xpc_msgqueue_ref(ch); 855 xpc_msgqueue_ref(ch);
838 if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
839 atomic_inc_return(&part->nchannels_engaged) == 1) {
840 xpc_mark_partition_engaged(part);
841 }
842 856
843 pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); 857 pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0);
844 if (pid < 0) { 858 if (pid < 0) {
845 /* the fork failed */ 859 /* the fork failed */
860
861 /*
862 * NOTE: if (ignore_disconnecting &&
863 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
864 * then we'll deadlock if all other kthreads assigned
865 * to this channel are blocked in the channel's
866 * registerer, because the only thing that will unblock
867 * them is the xpcDisconnecting callout that this
868 * failed kernel_thread would have made.
869 */
870
846 if (atomic_dec_return(&ch->kthreads_assigned) == 0 && 871 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
847 atomic_dec_return(&part->nchannels_engaged) == 0) { 872 atomic_dec_return(&part->nchannels_engaged) == 0) {
848 xpc_mark_partition_disengaged(part); 873 xpc_mark_partition_disengaged(part);
@@ -857,9 +882,6 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed)
857 * Flag this as an error only if we have an 882 * Flag this as an error only if we have an
858 * insufficient #of kthreads for the channel 883 * insufficient #of kthreads for the channel
859 * to function. 884 * to function.
860 *
861 * No xpc_msgqueue_ref() is needed here since
862 * the channel mgr is doing this.
863 */ 885 */
864 spin_lock_irqsave(&ch->lock, irq_flags); 886 spin_lock_irqsave(&ch->lock, irq_flags);
865 XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, 887 XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index b54ef1726c55..46b7d6035aab 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -59,7 +59,7 @@ static struct vm_struct *get_io_area(unsigned long size)
59 unsigned long addr; 59 unsigned long addr;
60 struct vm_struct **p, *tmp, *area; 60 struct vm_struct **p, *tmp, *area;
61 61
62 area = (struct vm_struct *)kmalloc(sizeof(*area), GFP_KERNEL); 62 area = kmalloc(sizeof(*area), GFP_KERNEL);
63 if (!area) 63 if (!area)
64 return NULL; 64 return NULL;
65 addr = KMAP_START; 65 addr = KMAP_START;
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 1f9300f37f52..96e941084c04 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -644,7 +644,85 @@ CONFIG_CONNECTOR=m
644# 644#
645# Memory Technology Devices (MTD) 645# Memory Technology Devices (MTD)
646# 646#
647# CONFIG_MTD is not set 647CONFIG_MTD=y
648# CONFIG_MTD_DEBUG is not set
649# CONFIG_MTD_CONCAT is not set
650CONFIG_MTD_PARTITIONS=y
651# CONFIG_MTD_REDBOOT_PARTS is not set
652# CONFIG_MTD_CMDLINE_PARTS is not set
653
654#
655# User Modules And Translation Layers
656#
657CONFIG_MTD_CHAR=y
658CONFIG_MTD_BLOCK=y
659# CONFIG_FTL is not set
660# CONFIG_NFTL is not set
661# CONFIG_INFTL is not set
662# CONFIG_RFD_FTL is not set
663# CONFIG_SSFDC is not set
664
665#
666# RAM/ROM/Flash chip drivers
667#
668CONFIG_MTD_CFI=y
669# CONFIG_MTD_JEDECPROBE is not set
670CONFIG_MTD_GEN_PROBE=y
671# CONFIG_MTD_CFI_ADV_OPTIONS is not set
672CONFIG_MTD_MAP_BANK_WIDTH_1=y
673CONFIG_MTD_MAP_BANK_WIDTH_2=y
674CONFIG_MTD_MAP_BANK_WIDTH_4=y
675# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
676# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
677# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
678CONFIG_MTD_CFI_I1=y
679CONFIG_MTD_CFI_I2=y
680# CONFIG_MTD_CFI_I4 is not set
681# CONFIG_MTD_CFI_I8 is not set
682CONFIG_MTD_CFI_INTELEXT=y
683CONFIG_MTD_CFI_AMDSTD=y
684CONFIG_MTD_CFI_STAA=y
685CONFIG_MTD_CFI_UTIL=y
686# CONFIG_MTD_RAM is not set
687# CONFIG_MTD_ROM is not set
688# CONFIG_MTD_ABSENT is not set
689# CONFIG_MTD_OBSOLETE_CHIPS is not set
690
691#
692# Mapping drivers for chip access
693#
694# CONFIG_MTD_COMPLEX_MAPPINGS is not set
695CONFIG_MTD_PHYSMAP=y
696CONFIG_MTD_PHYSMAP_START=0x0
697CONFIG_MTD_PHYSMAP_LEN=0x0
698CONFIG_MTD_PHYSMAP_BANKWIDTH=0
699# CONFIG_MTD_PLATRAM is not set
700
701#
702# Self-contained MTD device drivers
703#
704# CONFIG_MTD_PMC551 is not set
705# CONFIG_MTD_SLRAM is not set
706# CONFIG_MTD_PHRAM is not set
707# CONFIG_MTD_MTDRAM is not set
708# CONFIG_MTD_BLOCK2MTD is not set
709
710#
711# Disk-On-Chip Device Drivers
712#
713# CONFIG_MTD_DOC2000 is not set
714# CONFIG_MTD_DOC2001 is not set
715# CONFIG_MTD_DOC2001PLUS is not set
716
717#
718# NAND Flash Device Drivers
719#
720# CONFIG_MTD_NAND is not set
721
722#
723# OneNAND Flash Device Drivers
724#
725# CONFIG_MTD_ONENAND is not set
648 726
649# 727#
650# Parallel port support 728# Parallel port support
diff --git a/arch/mips/kernel/apm.c b/arch/mips/kernel/apm.c
index 528e731049c1..ba16d07588cb 100644
--- a/arch/mips/kernel/apm.c
+++ b/arch/mips/kernel/apm.c
@@ -356,7 +356,7 @@ static int apm_open(struct inode * inode, struct file * filp)
356{ 356{
357 struct apm_user *as; 357 struct apm_user *as;
358 358
359 as = (struct apm_user *)kzalloc(sizeof(*as), GFP_KERNEL); 359 as = kzalloc(sizeof(*as), GFP_KERNEL);
360 if (as) { 360 if (as) {
361 /* 361 /*
362 * XXX - this is a tiny bit broken, when we consider BSD 362 * XXX - this is a tiny bit broken, when we consider BSD
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index 2c82412b9efe..5929f883e46b 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -301,7 +301,7 @@ static void sp_cleanup(void)
301 for (;;) { 301 for (;;) {
302 unsigned long set; 302 unsigned long set;
303 i = j * __NFDBITS; 303 i = j * __NFDBITS;
304 if (i >= fdt->max_fdset || i >= fdt->max_fds) 304 if (i >= fdt->max_fds)
305 break; 305 break;
306 set = fdt->open_fds->fds_bits[j++]; 306 set = fdt->open_fds->fds_bits[j++];
307 while (set) { 307 while (set) {
diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
index 621037db2290..060563a712b6 100644
--- a/arch/mips/kernel/reset.c
+++ b/arch/mips/kernel/reset.c
@@ -23,6 +23,8 @@ void (*_machine_restart)(char *command);
23void (*_machine_halt)(void); 23void (*_machine_halt)(void);
24void (*pm_power_off)(void); 24void (*pm_power_off)(void);
25 25
26EXPORT_SYMBOL(pm_power_off);
27
26void machine_restart(char *command) 28void machine_restart(char *command)
27{ 29{
28 if (_machine_restart) 30 if (_machine_restart)
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 79f0317d84ac..cecff24cc972 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -112,6 +112,7 @@ SECTIONS
112 /* .exit.text is discarded at runtime, not link time, to deal with 112 /* .exit.text is discarded at runtime, not link time, to deal with
113 references from .rodata */ 113 references from .rodata */
114 .exit.text : { *(.exit.text) } 114 .exit.text : { *(.exit.text) }
115 .exit.data : { *(.exit.data) }
115 . = ALIGN(_PAGE_SIZE); 116 . = ALIGN(_PAGE_SIZE);
116 __initramfs_start = .; 117 __initramfs_start = .;
117 .init.ramfs : { *(.init.ramfs) } 118 .init.ramfs : { *(.init.ramfs) }
@@ -139,7 +140,6 @@ SECTIONS
139 140
140 /* Sections to be discarded */ 141 /* Sections to be discarded */
141 /DISCARD/ : { 142 /DISCARD/ : {
142 *(.exit.data)
143 *(.exitcall.exit) 143 *(.exitcall.exit)
144 144
145 /* ABI crap starts here */ 145 /* ABI crap starts here */
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c
index da35d4555491..12878359f2c8 100644
--- a/arch/mips/lasat/sysctl.c
+++ b/arch/mips/lasat/sysctl.c
@@ -40,12 +40,12 @@ static DEFINE_MUTEX(lasat_info_mutex);
40/* Strategy function to write EEPROM after changing string entry */ 40/* Strategy function to write EEPROM after changing string entry */
41int sysctl_lasatstring(ctl_table *table, int *name, int nlen, 41int sysctl_lasatstring(ctl_table *table, int *name, int nlen,
42 void *oldval, size_t *oldlenp, 42 void *oldval, size_t *oldlenp,
43 void *newval, size_t newlen, void **context) 43 void *newval, size_t newlen)
44{ 44{
45 int r; 45 int r;
46 mutex_lock(&lasat_info_mutex); 46 mutex_lock(&lasat_info_mutex);
47 r = sysctl_string(table, name, 47 r = sysctl_string(table, name,
48 nlen, oldval, oldlenp, newval, newlen, context); 48 nlen, oldval, oldlenp, newval, newlen);
49 if (r < 0) { 49 if (r < 0) {
50 mutex_unlock(&lasat_info_mutex); 50 mutex_unlock(&lasat_info_mutex);
51 return r; 51 return r;
@@ -119,11 +119,11 @@ int proc_dolasatrtc(ctl_table *table, int write, struct file *filp,
119/* Sysctl for setting the IP addresses */ 119/* Sysctl for setting the IP addresses */
120int sysctl_lasat_intvec(ctl_table *table, int *name, int nlen, 120int sysctl_lasat_intvec(ctl_table *table, int *name, int nlen,
121 void *oldval, size_t *oldlenp, 121 void *oldval, size_t *oldlenp,
122 void *newval, size_t newlen, void **context) 122 void *newval, size_t newlen)
123{ 123{
124 int r; 124 int r;
125 mutex_lock(&lasat_info_mutex); 125 mutex_lock(&lasat_info_mutex);
126 r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context); 126 r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen);
127 if (r < 0) { 127 if (r < 0) {
128 mutex_unlock(&lasat_info_mutex); 128 mutex_unlock(&lasat_info_mutex);
129 return r; 129 return r;
@@ -139,14 +139,14 @@ int sysctl_lasat_intvec(ctl_table *table, int *name, int nlen,
139/* Same for RTC */ 139/* Same for RTC */
140int sysctl_lasat_rtc(ctl_table *table, int *name, int nlen, 140int sysctl_lasat_rtc(ctl_table *table, int *name, int nlen,
141 void *oldval, size_t *oldlenp, 141 void *oldval, size_t *oldlenp,
142 void *newval, size_t newlen, void **context) 142 void *newval, size_t newlen)
143{ 143{
144 int r; 144 int r;
145 mutex_lock(&lasat_info_mutex); 145 mutex_lock(&lasat_info_mutex);
146 rtctmp = ds1603_read(); 146 rtctmp = ds1603_read();
147 if (rtctmp < 0) 147 if (rtctmp < 0)
148 rtctmp = 0; 148 rtctmp = 0;
149 r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context); 149 r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen);
150 if (r < 0) { 150 if (r < 0) {
151 mutex_unlock(&lasat_info_mutex); 151 mutex_unlock(&lasat_info_mutex);
152 return r; 152 return r;
@@ -251,13 +251,12 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp,
251 251
252static int sysctl_lasat_eeprom_value(ctl_table *table, int *name, int nlen, 252static int sysctl_lasat_eeprom_value(ctl_table *table, int *name, int nlen,
253 void *oldval, size_t *oldlenp, 253 void *oldval, size_t *oldlenp,
254 void *newval, size_t newlen, 254 void *newval, size_t newlen)
255 void **context)
256{ 255{
257 int r; 256 int r;
258 257
259 mutex_lock(&lasat_info_mutex); 258 mutex_lock(&lasat_info_mutex);
260 r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context); 259 r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen);
261 if (r < 0) { 260 if (r < 0) {
262 mutex_unlock(&lasat_info_mutex); 261 mutex_unlock(&lasat_info_mutex);
263 return r; 262 return r;
diff --git a/arch/mips/lib/csum_partial_copy.c b/arch/mips/lib/csum_partial_copy.c
index 1720f2ceeeae..06771040a267 100644
--- a/arch/mips/lib/csum_partial_copy.c
+++ b/arch/mips/lib/csum_partial_copy.c
@@ -7,6 +7,7 @@
7 * Copyright (C) 1998, 1999 Ralf Baechle 7 * Copyright (C) 1998, 1999 Ralf Baechle
8 */ 8 */
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/module.h>
10#include <linux/types.h> 11#include <linux/types.h>
11#include <asm/byteorder.h> 12#include <asm/byteorder.h>
12#include <asm/string.h> 13#include <asm/string.h>
@@ -29,6 +30,8 @@ __wsum csum_partial_copy_nocheck(const void *src,
29 return sum; 30 return sum;
30} 31}
31 32
33EXPORT_SYMBOL(csum_partial_copy_nocheck);
34
32/* 35/*
33 * Copy from userspace and compute checksum. If we catch an exception 36 * Copy from userspace and compute checksum. If we catch an exception
34 * then zero the rest of the buffer. 37 * then zero the rest of the buffer.
diff --git a/arch/mips/mips-boards/malta/Makefile b/arch/mips/mips-boards/malta/Makefile
index 77ee5c6d33c1..b662c75fb28e 100644
--- a/arch/mips/mips-boards/malta/Makefile
+++ b/arch/mips/mips-boards/malta/Makefile
@@ -19,5 +19,5 @@
19# under Linux. 19# under Linux.
20# 20#
21 21
22obj-y := malta_int.o malta_setup.o 22obj-y := malta_int.o malta_mtd.o malta_setup.o
23obj-$(CONFIG_SMP) += malta_smp.o 23obj-$(CONFIG_SMP) += malta_smp.o
diff --git a/arch/mips/mips-boards/malta/malta_setup.c b/arch/mips/mips-boards/malta/malta_setup.c
index 282f3e52eea3..56ea76679cd4 100644
--- a/arch/mips/mips-boards/malta/malta_setup.c
+++ b/arch/mips/mips-boards/malta/malta_setup.c
@@ -21,13 +21,6 @@
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/screen_info.h> 22#include <linux/screen_info.h>
23 23
24#ifdef CONFIG_MTD
25#include <linux/mtd/partitions.h>
26#include <linux/mtd/physmap.h>
27#include <linux/mtd/mtd.h>
28#include <linux/mtd/map.h>
29#endif
30
31#include <asm/cpu.h> 24#include <asm/cpu.h>
32#include <asm/bootinfo.h> 25#include <asm/bootinfo.h>
33#include <asm/irq.h> 26#include <asm/irq.h>
@@ -58,30 +51,6 @@ struct resource standard_io_resources[] = {
58 { .name = "dma2", .start = 0xc0, .end = 0xdf, .flags = IORESOURCE_BUSY }, 51 { .name = "dma2", .start = 0xc0, .end = 0xdf, .flags = IORESOURCE_BUSY },
59}; 52};
60 53
61#ifdef CONFIG_MTD
62static struct mtd_partition malta_mtd_partitions[] = {
63 {
64 .name = "YAMON",
65 .offset = 0x0,
66 .size = 0x100000,
67 .mask_flags = MTD_WRITEABLE
68 },
69 {
70 .name = "User FS",
71 .offset = 0x100000,
72 .size = 0x2e0000
73 },
74 {
75 .name = "Board Config",
76 .offset = 0x3e0000,
77 .size = 0x020000,
78 .mask_flags = MTD_WRITEABLE
79 }
80};
81
82#define number_partitions (sizeof(malta_mtd_partitions)/sizeof(struct mtd_partition))
83#endif
84
85const char *get_system_type(void) 54const char *get_system_type(void)
86{ 55{
87 return "MIPS Malta"; 56 return "MIPS Malta";
@@ -211,14 +180,6 @@ void __init plat_mem_setup(void)
211#endif 180#endif
212#endif 181#endif
213 182
214#ifdef CONFIG_MTD
215 /*
216 * Support for MTD on Malta. Use the generic physmap driver
217 */
218 physmap_configure(0x1e000000, 0x400000, 4, NULL);
219 physmap_set_partitions(malta_mtd_partitions, number_partitions);
220#endif
221
222 mips_reboot_setup(); 183 mips_reboot_setup();
223 184
224 board_time_init = mips_time_init; 185 board_time_init = mips_time_init;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index caf807ded514..1f954a238a63 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -32,6 +32,7 @@ void (*local_flush_data_cache_page)(void * addr);
32void (*flush_data_cache_page)(unsigned long addr); 32void (*flush_data_cache_page)(unsigned long addr);
33void (*flush_icache_all)(void); 33void (*flush_icache_all)(void);
34 34
35EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
35EXPORT_SYMBOL(flush_data_cache_page); 36EXPORT_SYMBOL(flush_data_cache_page);
36 37
37#ifdef CONFIG_DMA_NONCOHERENT 38#ifdef CONFIG_DMA_NONCOHERENT
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 9e29ba9205f0..ea2d15370bb7 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -316,7 +316,7 @@ static int __init page_is_ram(unsigned long pagenr)
316void __init paging_init(void) 316void __init paging_init(void)
317{ 317{
318 unsigned long zones_size[MAX_NR_ZONES] = { 0, }; 318 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
319 unsigned long max_dma, high, low; 319 unsigned long max_dma, low;
320#ifndef CONFIG_FLATMEM 320#ifndef CONFIG_FLATMEM
321 unsigned long zholes_size[MAX_NR_ZONES] = { 0, }; 321 unsigned long zholes_size[MAX_NR_ZONES] = { 0, };
322 unsigned long i, j, pfn; 322 unsigned long i, j, pfn;
@@ -331,7 +331,6 @@ void __init paging_init(void)
331 331
332 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; 332 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
333 low = max_low_pfn; 333 low = max_low_pfn;
334 high = highend_pfn;
335 334
336#ifdef CONFIG_ISA 335#ifdef CONFIG_ISA
337 if (low < max_dma) 336 if (low < max_dma)
@@ -344,13 +343,13 @@ void __init paging_init(void)
344 zones_size[ZONE_DMA] = low; 343 zones_size[ZONE_DMA] = low;
345#endif 344#endif
346#ifdef CONFIG_HIGHMEM 345#ifdef CONFIG_HIGHMEM
347 if (cpu_has_dc_aliases) { 346 zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn;
348 printk(KERN_WARNING "This processor doesn't support highmem."); 347
349 if (high - low) 348 if (cpu_has_dc_aliases && zones_size[ZONE_HIGHMEM]) {
350 printk(" %ldk highmem ignored", high - low); 349 printk(KERN_WARNING "This processor doesn't support highmem."
351 printk("\n"); 350 " %ldk highmem ignored\n", zones_size[ZONE_HIGHMEM]);
352 } else 351 zones_size[ZONE_HIGHMEM] = 0;
353 zones_size[ZONE_HIGHMEM] = high - low; 352 }
354#endif 353#endif
355 354
356#ifdef CONFIG_FLATMEM 355#ifdef CONFIG_FLATMEM
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index d88309209f56..04c2ff444396 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -475,7 +475,7 @@ int hpux_sysfs(int opcode, unsigned long arg1, unsigned long arg2)
475 printk(KERN_DEBUG "len of arg1 = %d\n", len); 475 printk(KERN_DEBUG "len of arg1 = %d\n", len);
476 if (len == 0) 476 if (len == 0)
477 return 0; 477 return 0;
478 fsname = (char *) kmalloc(len, GFP_KERNEL); 478 fsname = kmalloc(len, GFP_KERNEL);
479 if ( !fsname ) { 479 if ( !fsname ) {
480 printk(KERN_DEBUG "failed to kmalloc fsname\n"); 480 printk(KERN_DEBUG "failed to kmalloc fsname\n");
481 return 0; 481 return 0;
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 920bdbf8404f..c10ab47d81fa 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -343,7 +343,7 @@ void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct
343 struct pt_regs *r = &t->thread.regs; 343 struct pt_regs *r = &t->thread.regs;
344 struct pt_regs *r2; 344 struct pt_regs *r2;
345 345
346 r2 = (struct pt_regs *)kmalloc(sizeof(struct pt_regs), GFP_KERNEL); 346 r2 = kmalloc(sizeof(struct pt_regs), GFP_KERNEL);
347 if (!r2) 347 if (!r2)
348 return; 348 return;
349 *r2 = *r; 349 *r2 = *r;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 56c3c4065eb0..8699dadcd096 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -107,6 +107,11 @@ config AUDIT_ARCH
107 bool 107 bool
108 default y 108 default y
109 109
110config GENERIC_BUG
111 bool
112 default y
113 depends on BUG
114
110config DEFAULT_UIMAGE 115config DEFAULT_UIMAGE
111 bool 116 bool
112 help 117 help
@@ -478,6 +483,7 @@ config PPC_MAPLE
478 select PPC_UDBG_16550 483 select PPC_UDBG_16550
479 select PPC_970_NAP 484 select PPC_970_NAP
480 select PPC_NATIVE 485 select PPC_NATIVE
486 select PPC_RTAS
481 default n 487 default n
482 help 488 help
483 This option enables support for the Maple 970FX Evaluation Board. 489 This option enables support for the Maple 970FX Evaluation Board.
@@ -714,7 +720,7 @@ config FORCE_MAX_ZONEORDER
714 720
715config MATH_EMULATION 721config MATH_EMULATION
716 bool "Math emulation" 722 bool "Math emulation"
717 depends on 4xx || 8xx || E200 || E500 723 depends on 4xx || 8xx || E200 || PPC_83xx || E500
718 ---help--- 724 ---help---
719 Some PowerPC chips designed for embedded applications do not have 725 Some PowerPC chips designed for embedded applications do not have
720 a floating-point unit and therefore do not implement the 726 a floating-point unit and therefore do not implement the
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index f2d888e014a9..70ed61337f5c 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -157,6 +157,7 @@ CONFIG_SPU_BASE=y
157CONFIG_PS3_HTAB_SIZE=20 157CONFIG_PS3_HTAB_SIZE=20
158CONFIG_PS3_DYNAMIC_DMA=y 158CONFIG_PS3_DYNAMIC_DMA=y
159CONFIG_PS3_USE_LPAR_ADDR=y 159CONFIG_PS3_USE_LPAR_ADDR=y
160CONFIG_PS3_VUART=y
160 161
161# 162#
162# Kernel options 163# Kernel options
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 4fe53d08ab81..d2ded19e4064 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -77,6 +77,7 @@ endif
77 77
78ifeq ($(CONFIG_PPC_ISERIES),y) 78ifeq ($(CONFIG_PPC_ISERIES),y)
79extra-y += lparmap.s 79extra-y += lparmap.s
80$(obj)/head_64.o: $(obj)/lparmap.s
80AFLAGS_head_64.o += -I$(obj) 81AFLAGS_head_64.o += -I$(obj)
81endif 82endif
82 83
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 9d1614c3ce67..b742013bb9da 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -833,7 +833,7 @@ static struct cpu_spec cpu_specs[] = {
833 .pvr_mask = 0x7fff0000, 833 .pvr_mask = 0x7fff0000,
834 .pvr_value = 0x00840000, 834 .pvr_value = 0x00840000,
835 .cpu_name = "e300c2", 835 .cpu_name = "e300c2",
836 .cpu_features = CPU_FTRS_E300, 836 .cpu_features = CPU_FTRS_E300C2,
837 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 837 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
838 .icache_bsize = 32, 838 .icache_bsize = 32,
839 .dcache_bsize = 32, 839 .dcache_bsize = 32,
@@ -1136,8 +1136,7 @@ static struct cpu_spec cpu_specs[] = {
1136 .pvr_mask = 0xff000fff, 1136 .pvr_mask = 0xff000fff,
1137 .pvr_value = 0x53000890, 1137 .pvr_value = 0x53000890,
1138 .cpu_name = "440SPe Rev. A", 1138 .cpu_name = "440SPe Rev. A",
1139 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 1139 .cpu_features = CPU_FTRS_44X,
1140 CPU_FTR_USE_TB,
1141 .cpu_user_features = COMMON_USER_BOOKE, 1140 .cpu_user_features = COMMON_USER_BOOKE,
1142 .icache_bsize = 32, 1141 .icache_bsize = 32,
1143 .dcache_bsize = 32, 1142 .dcache_bsize = 32,
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index d88e182e40b3..9417cf5b4b7e 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -437,6 +437,13 @@ Alignment:
437/* Floating-point unavailable */ 437/* Floating-point unavailable */
438 . = 0x800 438 . = 0x800
439FPUnavailable: 439FPUnavailable:
440BEGIN_FTR_SECTION
441/*
442 * Certain Freescale cores don't have a FPU and treat fp instructions
443 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
444 */
445 b ProgramCheck
446END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
440 EXCEPTION_PROLOG 447 EXCEPTION_PROLOG
441 bne load_up_fpu /* if from user, just load it up */ 448 bne load_up_fpu /* if from user, just load it up */
442 addi r3,r1,STACK_FRAME_OVERHEAD 449 addi r3,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index e2c3c6a85f33..8339fd609de0 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -23,6 +23,7 @@
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/cache.h> 25#include <linux/cache.h>
26#include <linux/bug.h>
26 27
27#include "setup.h" 28#include "setup.h"
28 29
@@ -290,23 +291,11 @@ int module_finalize(const Elf_Ehdr *hdr,
290 struct module *me) 291 struct module *me)
291{ 292{
292 const Elf_Shdr *sect; 293 const Elf_Shdr *sect;
294 int err;
293 295
294 me->arch.bug_table = NULL; 296 err = module_bug_finalize(hdr, sechdrs, me);
295 me->arch.num_bugs = 0; 297 if (err) /* never true, currently */
296 298 return err;
297 /* Find the __bug_table section, if present */
298 sect = find_section(hdr, sechdrs, "__bug_table");
299 if (sect != NULL) {
300 me->arch.bug_table = (void *) sect->sh_addr;
301 me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
302 }
303
304 /*
305 * Strictly speaking this should have a spinlock to protect against
306 * traversals, but since we only traverse on BUG()s, a spinlock
307 * could potentially lead to deadlock and thus be counter-productive.
308 */
309 list_add(&me->arch.bug_list, &module_bug_list);
310 299
311 /* Apply feature fixups */ 300 /* Apply feature fixups */
312 sect = find_section(hdr, sechdrs, "__ftr_fixup"); 301 sect = find_section(hdr, sechdrs, "__ftr_fixup");
@@ -320,7 +309,7 @@ int module_finalize(const Elf_Ehdr *hdr,
320 309
321void module_arch_cleanup(struct module *mod) 310void module_arch_cleanup(struct module *mod)
322{ 311{
323 list_del(&mod->arch.bug_list); 312 module_bug_cleanup(mod);
324} 313}
325 314
326struct bug_entry *module_find_bug(unsigned long bugaddr) 315struct bug_entry *module_find_bug(unsigned long bugaddr)
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 8dd1f0aae5d6..75c7c4f19280 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -20,6 +20,7 @@
20#include <linux/moduleloader.h> 20#include <linux/moduleloader.h>
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/bug.h>
23#include <asm/module.h> 24#include <asm/module.h>
24#include <asm/uaccess.h> 25#include <asm/uaccess.h>
25#include <asm/firmware.h> 26#include <asm/firmware.h>
@@ -439,23 +440,11 @@ int module_finalize(const Elf_Ehdr *hdr,
439 const Elf_Shdr *sechdrs, struct module *me) 440 const Elf_Shdr *sechdrs, struct module *me)
440{ 441{
441 const Elf_Shdr *sect; 442 const Elf_Shdr *sect;
443 int err;
442 444
443 me->arch.bug_table = NULL; 445 err = module_bug_finalize(hdr, sechdrs, me);
444 me->arch.num_bugs = 0; 446 if (err)
445 447 return err;
446 /* Find the __bug_table section, if present */
447 sect = find_section(hdr, sechdrs, "__bug_table");
448 if (sect != NULL) {
449 me->arch.bug_table = (void *) sect->sh_addr;
450 me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
451 }
452
453 /*
454 * Strictly speaking this should have a spinlock to protect against
455 * traversals, but since we only traverse on BUG()s, a spinlock
456 * could potentially lead to deadlock and thus be counter-productive.
457 */
458 list_add(&me->arch.bug_list, &module_bug_list);
459 448
460 /* Apply feature fixups */ 449 /* Apply feature fixups */
461 sect = find_section(hdr, sechdrs, "__ftr_fixup"); 450 sect = find_section(hdr, sechdrs, "__ftr_fixup");
@@ -475,7 +464,7 @@ int module_finalize(const Elf_Ehdr *hdr,
475 464
476void module_arch_cleanup(struct module *mod) 465void module_arch_cleanup(struct module *mod)
477{ 466{
478 list_del(&mod->arch.bug_list); 467 module_bug_cleanup(mod);
479} 468}
480 469
481struct bug_entry *module_find_bug(unsigned long bugaddr) 470struct bug_entry *module_find_bug(unsigned long bugaddr)
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 6960f090991e..869cebbba967 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -505,7 +505,7 @@ static int nvram_scan_partitions(void)
505 return -ENODEV; 505 return -ENODEV;
506 total_size = ppc_md.nvram_size(); 506 total_size = ppc_md.nvram_size();
507 507
508 header = (char *) kmalloc(NVRAM_HEADER_LEN, GFP_KERNEL); 508 header = kmalloc(NVRAM_HEADER_LEN, GFP_KERNEL);
509 if (!header) { 509 if (!header) {
510 printk(KERN_ERR "nvram_scan_partitions: Failed kmalloc\n"); 510 printk(KERN_ERR "nvram_scan_partitions: Failed kmalloc\n");
511 return -ENOMEM; 511 return -ENOMEM;
@@ -574,7 +574,7 @@ static int __init nvram_init(void)
574 } 574 }
575 575
576 /* initialize our anchor for the nvram partition list */ 576 /* initialize our anchor for the nvram partition list */
577 nvram_part = (struct nvram_partition *) kmalloc(sizeof(struct nvram_partition), GFP_KERNEL); 577 nvram_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
578 if (!nvram_part) { 578 if (!nvram_part) {
579 printk(KERN_ERR "nvram_init: Failed kmalloc\n"); 579 printk(KERN_ERR "nvram_init: Failed kmalloc\n");
580 return -ENOMEM; 580 return -ENOMEM;
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index 8a06724e029e..e921514e655b 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -109,9 +109,7 @@ int of_device_register(struct of_device *ofdev)
109 if (rc) 109 if (rc)
110 return rc; 110 return rc;
111 111
112 device_create_file(&ofdev->dev, &dev_attr_devspec); 112 return device_create_file(&ofdev->dev, &dev_attr_devspec);
113
114 return 0;
115} 113}
116 114
117void of_device_unregister(struct of_device *ofdev) 115void of_device_unregister(struct of_device *ofdev)
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index b3189d0161b8..3002ea3a61a2 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -169,7 +169,7 @@ static void of_platform_make_bus_id(struct of_device *dev)
169 char *name = dev->dev.bus_id; 169 char *name = dev->dev.bus_id;
170 const u32 *reg; 170 const u32 *reg;
171 u64 addr; 171 u64 addr;
172 long magic; 172 int magic;
173 173
174 /* 174 /*
175 * If it's a DCR based device, use 'd' for native DCRs 175 * If it's a DCR based device, use 'd' for native DCRs
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 2f54cd81dea5..2847cd51a2d7 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -670,7 +670,7 @@ pcibios_make_OF_bus_map(void)
670 struct pci_controller* hose; 670 struct pci_controller* hose;
671 struct property *map_prop; 671 struct property *map_prop;
672 672
673 pci_to_OF_bus_map = (u8*)kmalloc(pci_bus_count, GFP_KERNEL); 673 pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL);
674 if (!pci_to_OF_bus_map) { 674 if (!pci_to_OF_bus_map) {
675 printk(KERN_ERR "Can't allocate OF bus map !\n"); 675 printk(KERN_ERR "Can't allocate OF bus map !\n");
676 return; 676 return;
@@ -736,25 +736,51 @@ scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void*
736 return NULL; 736 return NULL;
737} 737}
738 738
739static int 739static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
740scan_OF_pci_childs_iterator(struct device_node* node, void* data) 740 unsigned int devfn)
741{ 741{
742 const unsigned int *reg; 742 struct device_node *np = NULL;
743 u8* fdata = (u8*)data; 743 const u32 *reg;
744 744 unsigned int psize;
745 reg = get_property(node, "reg", NULL); 745
746 if (reg && ((reg[0] >> 8) & 0xff) == fdata[1] 746 while ((np = of_get_next_child(parent, np)) != NULL) {
747 && ((reg[0] >> 16) & 0xff) == fdata[0]) 747 reg = get_property(np, "reg", &psize);
748 return 1; 748 if (reg == NULL || psize < 4)
749 return 0; 749 continue;
750 if (((reg[0] >> 8) & 0xff) == devfn)
751 return np;
752 }
753 return NULL;
750} 754}
751 755
752static struct device_node* 756
753scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn) 757static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
754{ 758{
755 u8 filter_data[2] = {bus, dev_fn}; 759 struct device_node *parent, *np;
760
761 /* Are we a root bus ? */
762 if (bus->self == NULL || bus->parent == NULL) {
763 struct pci_controller *hose = pci_bus_to_hose(bus->number);
764 if (hose == NULL)
765 return NULL;
766 return of_node_get(hose->arch_data);
767 }
768
769 /* not a root bus, we need to get our parent */
770 parent = scan_OF_for_pci_bus(bus->parent);
771 if (parent == NULL)
772 return NULL;
773
774 /* now iterate for children for a match */
775 np = scan_OF_for_pci_dev(parent, bus->self->devfn);
776 of_node_put(parent);
756 777
757 return scan_OF_pci_childs(node, scan_OF_pci_childs_iterator, filter_data); 778 /* sanity check */
779 if (strcmp(np->type, "pci") != 0)
780 printk(KERN_WARNING "pci: wrong type \"%s\" for bridge %s\n",
781 np->type, np->full_name);
782
783 return np;
758} 784}
759 785
760/* 786/*
@@ -763,43 +789,25 @@ scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
763struct device_node * 789struct device_node *
764pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) 790pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
765{ 791{
766 struct pci_controller *hose; 792 struct device_node *parent, *np;
767 struct device_node *node;
768 int busnr;
769 793
770 if (!have_of) 794 if (!have_of)
771 return NULL; 795 return NULL;
772
773 /* Lookup the hose */
774 busnr = bus->number;
775 hose = pci_bus_to_hose(busnr);
776 if (!hose)
777 return NULL;
778 796
779 /* Check it has an OF node associated */ 797 DBG("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
780 node = (struct device_node *) hose->arch_data; 798 parent = scan_OF_for_pci_bus(bus);
781 if (!node) 799 if (parent == NULL)
782 return NULL; 800 return NULL;
783 801 DBG(" parent is %s\n", parent ? parent->full_name : "<NULL>");
784 /* Fixup bus number according to what OF think it is. */ 802 np = scan_OF_for_pci_dev(parent, devfn);
785#ifdef CONFIG_PPC_PMAC 803 of_node_put(parent);
786 /* The G5 need a special case here. Basically, we don't remap all 804 DBG(" result is %s\n", np ? np->full_name : "<NULL>");
787 * busses on it so we don't create the pci-OF-map. However, we do 805
788 * remap the AGP bus and so have to deal with it. A future better 806 /* XXX most callers don't release the returned node
789 * fix has to be done by making the remapping per-host and always 807 * mostly because ppc64 doesn't increase the refcount,
790 * filling the pci_to_OF map. --BenH 808 * we need to fix that.
791 */ 809 */
792 if (machine_is(powermac) && busnr >= 0xf0) 810 return np;
793 busnr -= 0xf0;
794 else
795#endif
796 if (pci_to_OF_bus_map)
797 busnr = pci_to_OF_bus_map[busnr];
798 if (busnr == 0xff)
799 return NULL;
800
801 /* Now, lookup childs of the hose */
802 return scan_OF_childs_for_device(node->child, busnr, devfn);
803} 811}
804EXPORT_SYMBOL(pci_busdev_to_OF_node); 812EXPORT_SYMBOL(pci_busdev_to_OF_node);
805 813
@@ -1544,7 +1552,7 @@ pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
1544 1552
1545 1553
1546static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, 1554static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
1547 unsigned long *offset, 1555 resource_size_t *offset,
1548 enum pci_mmap_state mmap_state) 1556 enum pci_mmap_state mmap_state)
1549{ 1557{
1550 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); 1558 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
@@ -1556,7 +1564,9 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
1556 1564
1557 /* If memory, add on the PCI bridge address offset */ 1565 /* If memory, add on the PCI bridge address offset */
1558 if (mmap_state == pci_mmap_mem) { 1566 if (mmap_state == pci_mmap_mem) {
1567#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
1559 *offset += hose->pci_mem_offset; 1568 *offset += hose->pci_mem_offset;
1569#endif
1560 res_bit = IORESOURCE_MEM; 1570 res_bit = IORESOURCE_MEM;
1561 } else { 1571 } else {
1562 io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE; 1572 io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE;
@@ -1624,9 +1634,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
1624 else 1634 else
1625 prot |= _PAGE_GUARDED; 1635 prot |= _PAGE_GUARDED;
1626 1636
1627 printk("PCI map for %s:%llx, prot: %lx\n", pci_name(dev),
1628 (unsigned long long)rp->start, prot);
1629
1630 return __pgprot(prot); 1637 return __pgprot(prot);
1631} 1638}
1632 1639
@@ -1695,7 +1702,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
1695 enum pci_mmap_state mmap_state, 1702 enum pci_mmap_state mmap_state,
1696 int write_combine) 1703 int write_combine)
1697{ 1704{
1698 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 1705 resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
1699 struct resource *rp; 1706 struct resource *rp;
1700 int ret; 1707 int ret;
1701 1708
@@ -1808,22 +1815,42 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
1808 resource_size_t *start, resource_size_t *end) 1815 resource_size_t *start, resource_size_t *end)
1809{ 1816{
1810 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); 1817 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
1811 unsigned long offset = 0; 1818 resource_size_t offset = 0;
1812 1819
1813 if (hose == NULL) 1820 if (hose == NULL)
1814 return; 1821 return;
1815 1822
1816 if (rsrc->flags & IORESOURCE_IO) 1823 if (rsrc->flags & IORESOURCE_IO)
1817 offset = (void __iomem *)_IO_BASE - hose->io_base_virt 1824 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1818 + hose->io_base_phys; 1825
1826 /* We pass a fully fixed up address to userland for MMIO instead of
1827 * a BAR value because X is lame and expects to be able to use that
1828 * to pass to /dev/mem !
1829 *
1830 * That means that we'll have potentially 64 bits values where some
1831 * userland apps only expect 32 (like X itself since it thinks only
1832 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
1833 * 32 bits CHRPs :-(
1834 *
1835 * Hopefully, the sysfs insterface is immune to that gunk. Once X
1836 * has been fixed (and the fix spread enough), we can re-enable the
1837 * 2 lines below and pass down a BAR value to userland. In that case
1838 * we'll also have to re-enable the matching code in
1839 * __pci_mmap_make_offset().
1840 *
1841 * BenH.
1842 */
1843#if 0
1844 else if (rsrc->flags & IORESOURCE_MEM)
1845 offset = hose->pci_mem_offset;
1846#endif
1819 1847
1820 *start = rsrc->start + offset; 1848 *start = rsrc->start - offset;
1821 *end = rsrc->end + offset; 1849 *end = rsrc->end - offset;
1822} 1850}
1823 1851
1824void __init 1852void __init pci_init_resource(struct resource *res, resource_size_t start,
1825pci_init_resource(struct resource *res, unsigned long start, unsigned long end, 1853 resource_size_t end, int flags, char *name)
1826 int flags, char *name)
1827{ 1854{
1828 res->start = start; 1855 res->start = start;
1829 res->end = end; 1856 res->end = end;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 6fa9a0a5c8db..a6b7692c7269 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -682,7 +682,7 @@ int pci_proc_domain(struct pci_bus *bus)
682 * Returns negative error code on failure, zero on success. 682 * Returns negative error code on failure, zero on success.
683 */ 683 */
684static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, 684static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
685 unsigned long *offset, 685 resource_size_t *offset,
686 enum pci_mmap_state mmap_state) 686 enum pci_mmap_state mmap_state)
687{ 687{
688 struct pci_controller *hose = pci_bus_to_host(dev->bus); 688 struct pci_controller *hose = pci_bus_to_host(dev->bus);
@@ -694,7 +694,9 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
694 694
695 /* If memory, add on the PCI bridge address offset */ 695 /* If memory, add on the PCI bridge address offset */
696 if (mmap_state == pci_mmap_mem) { 696 if (mmap_state == pci_mmap_mem) {
697#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
697 *offset += hose->pci_mem_offset; 698 *offset += hose->pci_mem_offset;
699#endif
698 res_bit = IORESOURCE_MEM; 700 res_bit = IORESOURCE_MEM;
699 } else { 701 } else {
700 io_offset = (unsigned long)hose->io_base_virt - pci_io_base; 702 io_offset = (unsigned long)hose->io_base_virt - pci_io_base;
@@ -762,9 +764,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
762 else 764 else
763 prot |= _PAGE_GUARDED; 765 prot |= _PAGE_GUARDED;
764 766
765 printk(KERN_DEBUG "PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
766 prot);
767
768 return __pgprot(prot); 767 return __pgprot(prot);
769} 768}
770 769
@@ -832,7 +831,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
832int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 831int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
833 enum pci_mmap_state mmap_state, int write_combine) 832 enum pci_mmap_state mmap_state, int write_combine)
834{ 833{
835 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 834 resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
836 struct resource *rp; 835 struct resource *rp;
837 int ret; 836 int ret;
838 837
@@ -1333,20 +1332,41 @@ EXPORT_SYMBOL(pci_read_irq_line);
1333 1332
1334void pci_resource_to_user(const struct pci_dev *dev, int bar, 1333void pci_resource_to_user(const struct pci_dev *dev, int bar,
1335 const struct resource *rsrc, 1334 const struct resource *rsrc,
1336 u64 *start, u64 *end) 1335 resource_size_t *start, resource_size_t *end)
1337{ 1336{
1338 struct pci_controller *hose = pci_bus_to_host(dev->bus); 1337 struct pci_controller *hose = pci_bus_to_host(dev->bus);
1339 unsigned long offset = 0; 1338 resource_size_t offset = 0;
1340 1339
1341 if (hose == NULL) 1340 if (hose == NULL)
1342 return; 1341 return;
1343 1342
1344 if (rsrc->flags & IORESOURCE_IO) 1343 if (rsrc->flags & IORESOURCE_IO)
1345 offset = pci_io_base - (unsigned long)hose->io_base_virt + 1344 offset = (unsigned long)hose->io_base_virt - pci_io_base;
1346 hose->io_base_phys; 1345
1346 /* We pass a fully fixed up address to userland for MMIO instead of
1347 * a BAR value because X is lame and expects to be able to use that
1348 * to pass to /dev/mem !
1349 *
1350 * That means that we'll have potentially 64 bits values where some
1351 * userland apps only expect 32 (like X itself since it thinks only
1352 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
1353 * 32 bits CHRPs :-(
1354 *
1355 * Hopefully, the sysfs insterface is immune to that gunk. Once X
1356 * has been fixed (and the fix spread enough), we can re-enable the
1357 * 2 lines below and pass down a BAR value to userland. In that case
1358 * we'll also have to re-enable the matching code in
1359 * __pci_mmap_make_offset().
1360 *
1361 * BenH.
1362 */
1363#if 0
1364 else if (rsrc->flags & IORESOURCE_MEM)
1365 offset = hose->pci_mem_offset;
1366#endif
1347 1367
1348 *start = rsrc->start + offset; 1368 *start = rsrc->start - offset;
1349 *end = rsrc->end + offset; 1369 *end = rsrc->end - offset;
1350} 1370}
1351 1371
1352struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) 1372struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 9179f0739ea2..95776b6af4e2 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -208,7 +208,7 @@ EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
208extern long *intercept_table; 208extern long *intercept_table;
209EXPORT_SYMBOL(intercept_table); 209EXPORT_SYMBOL(intercept_table);
210#endif /* CONFIG_PPC_STD_MMU_32 */ 210#endif /* CONFIG_PPC_STD_MMU_32 */
211#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) 211#ifdef CONFIG_PPC_DCR_NATIVE
212EXPORT_SYMBOL(__mtdcr); 212EXPORT_SYMBOL(__mtdcr);
213EXPORT_SYMBOL(__mfdcr); 213EXPORT_SYMBOL(__mfdcr);
214#endif 214#endif
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index c18dbe77fdc2..1fc732a552db 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -804,6 +804,56 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
804 return of_read_ulong(p, s); 804 return of_read_ulong(p, s);
805} 805}
806 806
807#ifdef CONFIG_PPC_PSERIES
808/*
809 * Interpret the ibm,dynamic-memory property in the
810 * /ibm,dynamic-reconfiguration-memory node.
811 * This contains a list of memory blocks along with NUMA affinity
812 * information.
813 */
814static int __init early_init_dt_scan_drconf_memory(unsigned long node)
815{
816 cell_t *dm, *ls;
817 unsigned long l, n;
818 unsigned long base, size, lmb_size, flags;
819
820 ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
821 if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t))
822 return 0;
823 lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls);
824
825 dm = (cell_t *)of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
826 if (dm == NULL || l < sizeof(cell_t))
827 return 0;
828
829 n = *dm++; /* number of entries */
830 if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(cell_t))
831 return 0;
832
833 for (; n != 0; --n) {
834 base = dt_mem_next_cell(dt_root_addr_cells, &dm);
835 flags = dm[3];
836 /* skip DRC index, pad, assoc. list index, flags */
837 dm += 4;
838 /* skip this block if the reserved bit is set in flags (0x80)
839 or if the block is not assigned to this partition (0x8) */
840 if ((flags & 0x80) || !(flags & 0x8))
841 continue;
842 size = lmb_size;
843 if (iommu_is_off) {
844 if (base >= 0x80000000ul)
845 continue;
846 if ((base + size) > 0x80000000ul)
847 size = 0x80000000ul - base;
848 }
849 lmb_add(base, size);
850 }
851 lmb_dump_all();
852 return 0;
853}
854#else
855#define early_init_dt_scan_drconf_memory(node) 0
856#endif /* CONFIG_PPC_PSERIES */
807 857
808static int __init early_init_dt_scan_memory(unsigned long node, 858static int __init early_init_dt_scan_memory(unsigned long node,
809 const char *uname, int depth, void *data) 859 const char *uname, int depth, void *data)
@@ -812,6 +862,11 @@ static int __init early_init_dt_scan_memory(unsigned long node,
812 cell_t *reg, *endp; 862 cell_t *reg, *endp;
813 unsigned long l; 863 unsigned long l;
814 864
865 /* Look for the ibm,dynamic-reconfiguration-memory node */
866 if (depth == 1 &&
867 strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
868 return early_init_dt_scan_drconf_memory(node);
869
815 /* We are scanning "memory" nodes only */ 870 /* We are scanning "memory" nodes only */
816 if (type == NULL) { 871 if (type == NULL) {
817 /* 872 /*
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 46cf32670ddb..520ef42f642e 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -679,7 +679,7 @@ static unsigned char ibm_architecture_vec[] = {
679 /* option vector 5: PAPR/OF options */ 679 /* option vector 5: PAPR/OF options */
680 3 - 2, /* length */ 680 3 - 2, /* length */
681 0, /* don't ignore, don't halt */ 681 0, /* don't ignore, don't halt */
682 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES, 682 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY,
683}; 683};
684 684
685/* Old method - ELF header with PT_NOTE sections */ 685/* Old method - ELF header with PT_NOTE sections */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 387ed0d9ad61..76b5d7ebdcc6 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -303,6 +303,12 @@ int rtas_token(const char *service)
303} 303}
304EXPORT_SYMBOL(rtas_token); 304EXPORT_SYMBOL(rtas_token);
305 305
306int rtas_service_present(const char *service)
307{
308 return rtas_token(service) != RTAS_UNKNOWN_SERVICE;
309}
310EXPORT_SYMBOL(rtas_service_present);
311
306#ifdef CONFIG_RTAS_ERROR_LOGGING 312#ifdef CONFIG_RTAS_ERROR_LOGGING
307/* 313/*
308 * Return the firmware-specified size of the error log buffer 314 * Return the firmware-specified size of the error log buffer
@@ -810,32 +816,6 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
810 return 0; 816 return 0;
811} 817}
812 818
813#ifdef CONFIG_HOTPLUG_CPU
814/* This version can't take the spinlock, because it never returns */
815static struct rtas_args rtas_stop_self_args = {
816 /* The token is initialized for real in setup_system() */
817 .token = RTAS_UNKNOWN_SERVICE,
818 .nargs = 0,
819 .nret = 1,
820 .rets = &rtas_stop_self_args.args[0],
821};
822
823void rtas_stop_self(void)
824{
825 struct rtas_args *rtas_args = &rtas_stop_self_args;
826
827 local_irq_disable();
828
829 BUG_ON(rtas_args->token == RTAS_UNKNOWN_SERVICE);
830
831 printk("cpu %u (hwid %u) Ready to die...\n",
832 smp_processor_id(), hard_smp_processor_id());
833 enter_rtas(__pa(rtas_args));
834
835 panic("Alas, I survived.\n");
836}
837#endif
838
839/* 819/*
840 * Call early during boot, before mem init or bootmem, to retrieve the RTAS 820 * Call early during boot, before mem init or bootmem, to retrieve the RTAS
841 * informations from the device-tree and allocate the RMO buffer for userland 821 * informations from the device-tree and allocate the RMO buffer for userland
@@ -880,9 +860,6 @@ void __init rtas_initialize(void)
880#endif 860#endif
881 rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region); 861 rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
882 862
883#ifdef CONFIG_HOTPLUG_CPU
884 rtas_stop_self_args.token = rtas_token("stop-self");
885#endif /* CONFIG_HOTPLUG_CPU */
886#ifdef CONFIG_RTAS_ERROR_LOGGING 863#ifdef CONFIG_RTAS_ERROR_LOGGING
887 rtas_last_error_token = rtas_token("rtas-last-error"); 864 rtas_last_error_token = rtas_token("rtas-last-error");
888#endif 865#endif
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 63ed265b7f09..400ab2b946e7 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -181,6 +181,8 @@ SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
181SYSFS_PMCSETUP(pmc7, SPRN_PMC7); 181SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
182SYSFS_PMCSETUP(pmc8, SPRN_PMC8); 182SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
183SYSFS_PMCSETUP(purr, SPRN_PURR); 183SYSFS_PMCSETUP(purr, SPRN_PURR);
184SYSFS_PMCSETUP(spurr, SPRN_SPURR);
185SYSFS_PMCSETUP(dscr, SPRN_DSCR);
184 186
185static SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0); 187static SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0);
186static SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1); 188static SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1);
@@ -194,6 +196,8 @@ static SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6);
194static SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7); 196static SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7);
195static SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8); 197static SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8);
196static SYSDEV_ATTR(purr, 0600, show_purr, NULL); 198static SYSDEV_ATTR(purr, 0600, show_purr, NULL);
199static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
200static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr);
197 201
198static void register_cpu_online(unsigned int cpu) 202static void register_cpu_online(unsigned int cpu)
199{ 203{
@@ -231,6 +235,12 @@ static void register_cpu_online(unsigned int cpu)
231 235
232 if (cpu_has_feature(CPU_FTR_PURR)) 236 if (cpu_has_feature(CPU_FTR_PURR))
233 sysdev_create_file(s, &attr_purr); 237 sysdev_create_file(s, &attr_purr);
238
239 if (cpu_has_feature(CPU_FTR_SPURR))
240 sysdev_create_file(s, &attr_spurr);
241
242 if (cpu_has_feature(CPU_FTR_DSCR))
243 sysdev_create_file(s, &attr_dscr);
234} 244}
235 245
236#ifdef CONFIG_HOTPLUG_CPU 246#ifdef CONFIG_HOTPLUG_CPU
@@ -272,6 +282,12 @@ static void unregister_cpu_online(unsigned int cpu)
272 282
273 if (cpu_has_feature(CPU_FTR_PURR)) 283 if (cpu_has_feature(CPU_FTR_PURR))
274 sysdev_remove_file(s, &attr_purr); 284 sysdev_remove_file(s, &attr_purr);
285
286 if (cpu_has_feature(CPU_FTR_SPURR))
287 sysdev_remove_file(s, &attr_spurr);
288
289 if (cpu_has_feature(CPU_FTR_DSCR))
290 sysdev_remove_file(s, &attr_dscr);
275} 291}
276#endif /* CONFIG_HOTPLUG_CPU */ 292#endif /* CONFIG_HOTPLUG_CPU */
277 293
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0d4e203fa7a0..535f50665647 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -32,6 +32,7 @@
32#include <linux/kprobes.h> 32#include <linux/kprobes.h>
33#include <linux/kexec.h> 33#include <linux/kexec.h>
34#include <linux/backlight.h> 34#include <linux/backlight.h>
35#include <linux/bug.h>
35 36
36#include <asm/kdebug.h> 37#include <asm/kdebug.h>
37#include <asm/pgtable.h> 38#include <asm/pgtable.h>
@@ -727,54 +728,9 @@ static int emulate_instruction(struct pt_regs *regs)
727 return -EINVAL; 728 return -EINVAL;
728} 729}
729 730
730/* 731int is_valid_bugaddr(unsigned long addr)
731 * Look through the list of trap instructions that are used for BUG(),
732 * BUG_ON() and WARN_ON() and see if we hit one. At this point we know
733 * that the exception was caused by a trap instruction of some kind.
734 * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
735 * otherwise.
736 */
737extern struct bug_entry __start___bug_table[], __stop___bug_table[];
738
739#ifndef CONFIG_MODULES
740#define module_find_bug(x) NULL
741#endif
742
743struct bug_entry *find_bug(unsigned long bugaddr)
744{ 732{
745 struct bug_entry *bug; 733 return is_kernel_addr(addr);
746
747 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
748 if (bugaddr == bug->bug_addr)
749 return bug;
750 return module_find_bug(bugaddr);
751}
752
753static int check_bug_trap(struct pt_regs *regs)
754{
755 struct bug_entry *bug;
756 unsigned long addr;
757
758 if (regs->msr & MSR_PR)
759 return 0; /* not in kernel */
760 addr = regs->nip; /* address of trap instruction */
761 if (addr < PAGE_OFFSET)
762 return 0;
763 bug = find_bug(regs->nip);
764 if (bug == NULL)
765 return 0;
766 if (bug->line & BUG_WARNING_TRAP) {
767 /* this is a WARN_ON rather than BUG/BUG_ON */
768 printk(KERN_ERR "Badness in %s at %s:%ld\n",
769 bug->function, bug->file,
770 bug->line & ~BUG_WARNING_TRAP);
771 dump_stack();
772 return 1;
773 }
774 printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
775 bug->function, bug->file, bug->line);
776
777 return 0;
778} 734}
779 735
780void __kprobes program_check_exception(struct pt_regs *regs) 736void __kprobes program_check_exception(struct pt_regs *regs)
@@ -782,6 +738,8 @@ void __kprobes program_check_exception(struct pt_regs *regs)
782 unsigned int reason = get_reason(regs); 738 unsigned int reason = get_reason(regs);
783 extern int do_mathemu(struct pt_regs *regs); 739 extern int do_mathemu(struct pt_regs *regs);
784 740
741 /* We can now get here via a FP Unavailable exception if the core
742 * has no FPU, in that case no reason flags will be set */
785#ifdef CONFIG_MATH_EMULATION 743#ifdef CONFIG_MATH_EMULATION
786 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 744 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
787 * but there seems to be a hardware bug on the 405GP (RevD) 745 * but there seems to be a hardware bug on the 405GP (RevD)
@@ -808,7 +766,9 @@ void __kprobes program_check_exception(struct pt_regs *regs)
808 return; 766 return;
809 if (debugger_bpt(regs)) 767 if (debugger_bpt(regs))
810 return; 768 return;
811 if (check_bug_trap(regs)) { 769
770 if (!(regs->msr & MSR_PR) && /* not user-mode */
771 report_bug(regs->nip) == BUG_TRAP_TYPE_WARN) {
812 regs->nip += 4; 772 regs->nip += 4;
813 return; 773 return;
814 } 774 }
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 04b98671a060..04b8e71bf5b0 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -62,11 +62,7 @@ SECTIONS
62 __stop___ex_table = .; 62 __stop___ex_table = .;
63 } 63 }
64 64
65 __bug_table : { 65 BUG_TABLE
66 __start___bug_table = .;
67 *(__bug_table)
68 __stop___bug_table = .;
69 }
70 66
71/* 67/*
72 * Init sections discarded at runtime 68 * Init sections discarded at runtime
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
index add8c1a9af68..c831815c31f0 100644
--- a/arch/powerpc/mm/imalloc.c
+++ b/arch/powerpc/mm/imalloc.c
@@ -138,7 +138,7 @@ static struct vm_struct * split_im_region(unsigned long v_addr,
138 struct vm_struct *vm2 = NULL; 138 struct vm_struct *vm2 = NULL;
139 struct vm_struct *new_vm = NULL; 139 struct vm_struct *new_vm = NULL;
140 140
141 vm1 = (struct vm_struct *) kmalloc(sizeof(*vm1), GFP_KERNEL); 141 vm1 = kmalloc(sizeof(*vm1), GFP_KERNEL);
142 if (vm1 == NULL) { 142 if (vm1 == NULL) {
143 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__); 143 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
144 return NULL; 144 return NULL;
@@ -172,7 +172,7 @@ static struct vm_struct * split_im_region(unsigned long v_addr,
172 * uppermost remainder, and use existing parent one for the 172 * uppermost remainder, and use existing parent one for the
173 * lower remainder of parent range 173 * lower remainder of parent range
174 */ 174 */
175 vm2 = (struct vm_struct *) kmalloc(sizeof(*vm2), GFP_KERNEL); 175 vm2 = kmalloc(sizeof(*vm2), GFP_KERNEL);
176 if (vm2 == NULL) { 176 if (vm2 == NULL) {
177 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__); 177 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
178 kfree(vm1); 178 kfree(vm1);
@@ -206,7 +206,7 @@ static struct vm_struct * __add_new_im_area(unsigned long req_addr,
206 break; 206 break;
207 } 207 }
208 208
209 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL); 209 area = kmalloc(sizeof(*area), GFP_KERNEL);
210 if (!area) 210 if (!area)
211 return NULL; 211 return NULL;
212 area->flags = 0; 212 area->flags = 0;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 9da01dc8cfd9..262790910ff2 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -295,6 +295,63 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
295 return lmb_end_of_DRAM() - start; 295 return lmb_end_of_DRAM() - start;
296} 296}
297 297
298/*
299 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
300 * node. This assumes n_mem_{addr,size}_cells have been set.
301 */
302static void __init parse_drconf_memory(struct device_node *memory)
303{
304 const unsigned int *lm, *dm, *aa;
305 unsigned int ls, ld, la;
306 unsigned int n, aam, aalen;
307 unsigned long lmb_size, size;
308 int nid, default_nid = 0;
309 unsigned int start, ai, flags;
310
311 lm = get_property(memory, "ibm,lmb-size", &ls);
312 dm = get_property(memory, "ibm,dynamic-memory", &ld);
313 aa = get_property(memory, "ibm,associativity-lookup-arrays", &la);
314 if (!lm || !dm || !aa ||
315 ls < sizeof(unsigned int) || ld < sizeof(unsigned int) ||
316 la < 2 * sizeof(unsigned int))
317 return;
318
319 lmb_size = read_n_cells(n_mem_size_cells, &lm);
320 n = *dm++; /* number of LMBs */
321 aam = *aa++; /* number of associativity lists */
322 aalen = *aa++; /* length of each associativity list */
323 if (ld < (n * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int) ||
324 la < (aam * aalen + 2) * sizeof(unsigned int))
325 return;
326
327 for (; n != 0; --n) {
328 start = read_n_cells(n_mem_addr_cells, &dm);
329 ai = dm[2];
330 flags = dm[3];
331 dm += 4;
332 /* 0x80 == reserved, 0x8 = assigned to us */
333 if ((flags & 0x80) || !(flags & 0x8))
334 continue;
335 nid = default_nid;
336 /* flags & 0x40 means associativity index is invalid */
337 if (min_common_depth > 0 && min_common_depth <= aalen &&
338 (flags & 0x40) == 0 && ai < aam) {
339 /* this is like of_node_to_nid_single */
340 nid = aa[ai * aalen + min_common_depth - 1];
341 if (nid == 0xffff || nid >= MAX_NUMNODES)
342 nid = default_nid;
343 }
344 node_set_online(nid);
345
346 size = numa_enforce_memory_limit(start, lmb_size);
347 if (!size)
348 continue;
349
350 add_active_range(nid, start >> PAGE_SHIFT,
351 (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
352 }
353}
354
298static int __init parse_numa_properties(void) 355static int __init parse_numa_properties(void)
299{ 356{
300 struct device_node *cpu = NULL; 357 struct device_node *cpu = NULL;
@@ -385,6 +442,14 @@ new_range:
385 goto new_range; 442 goto new_range;
386 } 443 }
387 444
445 /*
446 * Now do the same thing for each LMB listed in the ibm,dynamic-memory
447 * property in the ibm,dynamic-reconfiguration-memory node.
448 */
449 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
450 if (memory)
451 parse_drconf_memory(memory);
452
388 return 0; 453 return 0;
389} 454}
390 455
diff --git a/arch/powerpc/platforms/4xx/Kconfig b/arch/powerpc/platforms/4xx/Kconfig
index ed39d6a3d22a..2f2a13ed7667 100644
--- a/arch/powerpc/platforms/4xx/Kconfig
+++ b/arch/powerpc/platforms/4xx/Kconfig
@@ -179,7 +179,7 @@ config BIOS_FIXUP
179# OAK doesn't exist but wanted to keep this around for any future 403GCX boards 179# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
180config 403GCX 180config 403GCX
181 bool 181 bool
182 depends OAK 182 depends on OAK
183 default y 183 default y
184 184
185config 405EP 185config 405EP
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
index a375c15b4315..eaff71e74fb0 100644
--- a/arch/powerpc/platforms/52xx/lite5200.c
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -40,8 +40,6 @@
40#include <asm/prom.h> 40#include <asm/prom.h>
41#include <asm/udbg.h> 41#include <asm/udbg.h>
42#include <sysdev/fsl_soc.h> 42#include <sysdev/fsl_soc.h>
43#include <asm/qe.h>
44#include <asm/qe_ic.h>
45#include <asm/of_platform.h> 43#include <asm/of_platform.h>
46 44
47#include <asm/mpc52xx.h> 45#include <asm/mpc52xx.h>
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
index 616a0a3fd0e2..70e0d968d30f 100644
--- a/arch/powerpc/platforms/cell/cbe_thermal.c
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -115,6 +115,7 @@ static struct sysdev_attribute attr_spu_temperature = {
115 115
116static struct attribute *spu_attributes[] = { 116static struct attribute *spu_attributes[] = {
117 &attr_spu_temperature.attr, 117 &attr_spu_temperature.attr,
118 NULL,
118}; 119};
119 120
120static struct attribute_group spu_attribute_group = { 121static struct attribute_group spu_attribute_group = {
@@ -135,6 +136,7 @@ static struct sysdev_attribute attr_ppe_temperature1 = {
135static struct attribute *ppe_attributes[] = { 136static struct attribute *ppe_attributes[] = {
136 &attr_ppe_temperature0.attr, 137 &attr_ppe_temperature0.attr,
137 &attr_ppe_temperature1.attr, 138 &attr_ppe_temperature1.attr,
139 NULL,
138}; 140};
139 141
140static struct attribute_group ppe_attribute_group = { 142static struct attribute_group ppe_attribute_group = {
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
index 99c612025e8f..d04ae1671e6c 100644
--- a/arch/powerpc/platforms/cell/pmu.c
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -382,11 +382,14 @@ static irqreturn_t cbe_pm_irq(int irq, void *dev_id)
382 return IRQ_HANDLED; 382 return IRQ_HANDLED;
383} 383}
384 384
385int __init cbe_init_pm_irq(void) 385static int __init cbe_init_pm_irq(void)
386{ 386{
387 unsigned int irq; 387 unsigned int irq;
388 int rc, node; 388 int rc, node;
389 389
390 if (!machine_is(cell))
391 return 0;
392
390 for_each_node(node) { 393 for_each_node(node) {
391 irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI | 394 irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
392 (node << IIC_IRQ_NODE_SHIFT)); 395 (node << IIC_IRQ_NODE_SHIFT));
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 26945c491f6b..725e19561159 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -147,7 +147,7 @@ static int spufs_arch_notes_size(void)
147 struct fdtable *fdt = files_fdtable(current->files); 147 struct fdtable *fdt = files_fdtable(current->files);
148 int size = 0, fd; 148 int size = 0, fd;
149 149
150 for (fd = 0; fd < fdt->max_fdset && fd < fdt->max_fds; fd++) { 150 for (fd = 0; fd < fdt->max_fds; fd++) {
151 if (FD_ISSET(fd, fdt->open_fds)) { 151 if (FD_ISSET(fd, fdt->open_fds)) {
152 struct file *file = fcheck(fd); 152 struct file *file = fcheck(fd);
153 153
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index ddbe398fbd48..b3c2ce4cb7a8 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -35,7 +35,7 @@ config HDPU
35 Select HDPU if configuring a Sky Computers Compute Blade. 35 Select HDPU if configuring a Sky Computers Compute Blade.
36 36
37config HDPU_FEATURES 37config HDPU_FEATURES
38 depends HDPU 38 depends on HDPU
39 tristate "HDPU-Features" 39 tristate "HDPU-Features"
40 help 40 help
41 Select to enable HDPU enhanced features. 41 Select to enable HDPU enhanced features.
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 3a32deda765d..3f6a69f67195 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -562,7 +562,7 @@ void __init maple_pci_init(void)
562 for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) { 562 for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
563 if (np->name == NULL) 563 if (np->name == NULL)
564 continue; 564 continue;
565 if (strcmp(np->name, "pci") == 0) { 565 if (!strcmp(np->name, "pci") || !strcmp(np->name, "pcie")) {
566 if (add_bridge(np) == 0) 566 if (add_bridge(np) == 0)
567 of_node_get(np); 567 of_node_get(np);
568 } 568 }
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 094989d50bab..f12d5c69e74d 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -60,6 +60,7 @@
60#include <asm/of_device.h> 60#include <asm/of_device.h>
61#include <asm/lmb.h> 61#include <asm/lmb.h>
62#include <asm/mpic.h> 62#include <asm/mpic.h>
63#include <asm/rtas.h>
63#include <asm/udbg.h> 64#include <asm/udbg.h>
64 65
65#include "maple.h" 66#include "maple.h"
@@ -166,6 +167,16 @@ struct smp_ops_t maple_smp_ops = {
166}; 167};
167#endif /* CONFIG_SMP */ 168#endif /* CONFIG_SMP */
168 169
170static void __init maple_use_rtas_reboot_and_halt_if_present(void)
171{
172 if (rtas_service_present("system-reboot") &&
173 rtas_service_present("power-off")) {
174 ppc_md.restart = rtas_restart;
175 ppc_md.power_off = rtas_power_off;
176 ppc_md.halt = rtas_halt;
177 }
178}
179
169void __init maple_setup_arch(void) 180void __init maple_setup_arch(void)
170{ 181{
171 /* init to some ~sane value until calibrate_delay() runs */ 182 /* init to some ~sane value until calibrate_delay() runs */
@@ -181,6 +192,7 @@ void __init maple_setup_arch(void)
181#ifdef CONFIG_DUMMY_CONSOLE 192#ifdef CONFIG_DUMMY_CONSOLE
182 conswitchp = &dummy_con; 193 conswitchp = &dummy_con;
183#endif 194#endif
195 maple_use_rtas_reboot_and_halt_if_present();
184 196
185 printk(KERN_DEBUG "Using native/NAP idle loop\n"); 197 printk(KERN_DEBUG "Using native/NAP idle loop\n");
186} 198}
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index 451bfcd5502e..de52ec4e9e58 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -40,4 +40,15 @@ config PS3_USE_LPAR_ADDR
40 40
41 If you have any doubt, choose the default y. 41 If you have any doubt, choose the default y.
42 42
43config PS3_VUART
44 depends on PPC_PS3
45 bool "PS3 Virtual UART support"
46 default y
47 help
48 Include support for the PS3 Virtual UART.
49
50 This support is required for several system services
51 including the System Manager and AV Settings. In
52 general, all users will say Y.
53
43endmenu 54endmenu
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 997243a91be8..69590fbf83da 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -10,6 +10,8 @@ obj-$(CONFIG_XICS) += xics.o
10obj-$(CONFIG_SCANLOG) += scanlog.o 10obj-$(CONFIG_SCANLOG) += scanlog.o
11obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o 11obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o
12 12
13obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o
14
13obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o 15obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
14obj-$(CONFIG_HVCS) += hvcserver.o 16obj-$(CONFIG_HVCS) += hvcserver.o
15obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o 17obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 3c2d63ebf787..da6e5362e7cd 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -337,6 +337,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
337 printk (KERN_ERR "EEH: Device driver ignored %d bad reads, panicing\n", 337 printk (KERN_ERR "EEH: Device driver ignored %d bad reads, panicing\n",
338 pdn->eeh_check_count); 338 pdn->eeh_check_count);
339 dump_stack(); 339 dump_stack();
340 msleep(5000);
340 341
341 /* re-read the slot reset state */ 342 /* re-read the slot reset state */
342 if (read_slot_reset_state(pdn, rets) != 0) 343 if (read_slot_reset_state(pdn, rets) != 0)
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index b6b462d3c604..f2bae04424f8 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -153,7 +153,7 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
153 return piar; 153 return piar;
154 } 154 }
155 } 155 }
156 piar = (struct pci_io_addr_range *)kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC); 156 piar = kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
157 if (!piar) 157 if (!piar)
158 return NULL; 158 return NULL;
159 159
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index c2bc9904f1cb..cbd6b0711ab4 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -170,14 +170,19 @@ static void eeh_report_reset(struct pci_dev *dev, void *userdata)
170static void eeh_report_resume(struct pci_dev *dev, void *userdata) 170static void eeh_report_resume(struct pci_dev *dev, void *userdata)
171{ 171{
172 struct pci_driver *driver = dev->driver; 172 struct pci_driver *driver = dev->driver;
173 struct device_node *dn = pci_device_to_OF_node(dev);
173 174
174 dev->error_state = pci_channel_io_normal; 175 dev->error_state = pci_channel_io_normal;
175 176
176 if (!driver) 177 if (!driver)
177 return; 178 return;
178 if (!driver->err_handler) 179
179 return; 180 if ((PCI_DN(dn)->eeh_mode) & EEH_MODE_IRQ_DISABLED) {
180 if (!driver->err_handler->resume) 181 PCI_DN(dn)->eeh_mode &= ~EEH_MODE_IRQ_DISABLED;
182 enable_irq(dev->irq);
183 }
184 if (!driver->err_handler ||
185 !driver->err_handler->resume)
181 return; 186 return;
182 187
183 driver->err_handler->resume(dev); 188 driver->err_handler->resume(dev);
@@ -407,6 +412,8 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
407 412
408 if (rc) 413 if (rc)
409 result = PCI_ERS_RESULT_NEED_RESET; 414 result = PCI_ERS_RESULT_NEED_RESET;
415 else
416 result = PCI_ERS_RESULT_RECOVERED;
410 } 417 }
411 418
412 /* If any device has a hard failure, then shut off everything. */ 419 /* If any device has a hard failure, then shut off everything. */
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
new file mode 100644
index 000000000000..f460b9cbfd46
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -0,0 +1,275 @@
1/*
2 * pseries CPU Hotplug infrastructure.
3 *
4 * Split out from arch/powerpc/platforms/pseries/setup.c
5 * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c
6 *
7 * Peter Bergner, IBM March 2001.
8 * Copyright (C) 2001 IBM.
9 * Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11 * Plus various changes from other IBM teams...
12 *
13 * Copyright (C) 2006 Michael Ellerman, IBM Corporation
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#include <linux/kernel.h>
22#include <linux/delay.h>
23#include <linux/cpu.h>
24#include <asm/system.h>
25#include <asm/prom.h>
26#include <asm/rtas.h>
27#include <asm/firmware.h>
28#include <asm/machdep.h>
29#include <asm/vdso_datapage.h>
30#include <asm/pSeries_reconfig.h>
31#include "xics.h"
32
33/* This version can't take the spinlock, because it never returns */
34static struct rtas_args rtas_stop_self_args = {
35 .token = RTAS_UNKNOWN_SERVICE,
36 .nargs = 0,
37 .nret = 1,
38 .rets = &rtas_stop_self_args.args[0],
39};
40
41static void rtas_stop_self(void)
42{
43 struct rtas_args *args = &rtas_stop_self_args;
44
45 local_irq_disable();
46
47 BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
48
49 printk("cpu %u (hwid %u) Ready to die...\n",
50 smp_processor_id(), hard_smp_processor_id());
51 enter_rtas(__pa(args));
52
53 panic("Alas, I survived.\n");
54}
55
56static void pseries_mach_cpu_die(void)
57{
58 local_irq_disable();
59 idle_task_exit();
60 xics_teardown_cpu(0);
61 rtas_stop_self();
62 /* Should never get here... */
63 BUG();
64 for(;;);
65}
66
67static int qcss_tok; /* query-cpu-stopped-state token */
68
69/* Get state of physical CPU.
70 * Return codes:
71 * 0 - The processor is in the RTAS stopped state
72 * 1 - stop-self is in progress
73 * 2 - The processor is not in the RTAS stopped state
74 * -1 - Hardware Error
75 * -2 - Hardware Busy, Try again later.
76 */
77static int query_cpu_stopped(unsigned int pcpu)
78{
79 int cpu_status, status;
80
81 status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
82 if (status != 0) {
83 printk(KERN_ERR
84 "RTAS query-cpu-stopped-state failed: %i\n", status);
85 return status;
86 }
87
88 return cpu_status;
89}
90
91static int pseries_cpu_disable(void)
92{
93 int cpu = smp_processor_id();
94
95 cpu_clear(cpu, cpu_online_map);
96 vdso_data->processorCount--;
97
98 /*fix boot_cpuid here*/
99 if (cpu == boot_cpuid)
100 boot_cpuid = any_online_cpu(cpu_online_map);
101
102 /* FIXME: abstract this to not be platform specific later on */
103 xics_migrate_irqs_away();
104 return 0;
105}
106
107static void pseries_cpu_die(unsigned int cpu)
108{
109 int tries;
110 int cpu_status;
111 unsigned int pcpu = get_hard_smp_processor_id(cpu);
112
113 for (tries = 0; tries < 25; tries++) {
114 cpu_status = query_cpu_stopped(pcpu);
115 if (cpu_status == 0 || cpu_status == -1)
116 break;
117 msleep(200);
118 }
119 if (cpu_status != 0) {
120 printk("Querying DEAD? cpu %i (%i) shows %i\n",
121 cpu, pcpu, cpu_status);
122 }
123
124 /* Isolation and deallocation are definatly done by
125 * drslot_chrp_cpu. If they were not they would be
126 * done here. Change isolate state to Isolate and
127 * change allocation-state to Unusable.
128 */
129 paca[cpu].cpu_start = 0;
130}
131
132/*
133 * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle
134 * here is that a cpu device node may represent up to two logical cpus
135 * in the SMT case. We must honor the assumption in other code that
136 * the logical ids for sibling SMT threads x and y are adjacent, such
137 * that x^1 == y and y^1 == x.
138 */
139static int pseries_add_processor(struct device_node *np)
140{
141 unsigned int cpu;
142 cpumask_t candidate_map, tmp = CPU_MASK_NONE;
143 int err = -ENOSPC, len, nthreads, i;
144 const u32 *intserv;
145
146 intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
147 if (!intserv)
148 return 0;
149
150 nthreads = len / sizeof(u32);
151 for (i = 0; i < nthreads; i++)
152 cpu_set(i, tmp);
153
154 lock_cpu_hotplug();
155
156 BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
157
158 /* Get a bitmap of unoccupied slots. */
159 cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
160 if (cpus_empty(candidate_map)) {
161 /* If we get here, it most likely means that NR_CPUS is
162 * less than the partition's max processors setting.
163 */
164 printk(KERN_ERR "Cannot add cpu %s; this system configuration"
165 " supports %d logical cpus.\n", np->full_name,
166 cpus_weight(cpu_possible_map));
167 goto out_unlock;
168 }
169
170 while (!cpus_empty(tmp))
171 if (cpus_subset(tmp, candidate_map))
172 /* Found a range where we can insert the new cpu(s) */
173 break;
174 else
175 cpus_shift_left(tmp, tmp, nthreads);
176
177 if (cpus_empty(tmp)) {
178 printk(KERN_ERR "Unable to find space in cpu_present_map for"
179 " processor %s with %d thread(s)\n", np->name,
180 nthreads);
181 goto out_unlock;
182 }
183
184 for_each_cpu_mask(cpu, tmp) {
185 BUG_ON(cpu_isset(cpu, cpu_present_map));
186 cpu_set(cpu, cpu_present_map);
187 set_hard_smp_processor_id(cpu, *intserv++);
188 }
189 err = 0;
190out_unlock:
191 unlock_cpu_hotplug();
192 return err;
193}
194
195/*
196 * Update the present map for a cpu node which is going away, and set
197 * the hard id in the paca(s) to -1 to be consistent with boot time
198 * convention for non-present cpus.
199 */
200static void pseries_remove_processor(struct device_node *np)
201{
202 unsigned int cpu;
203 int len, nthreads, i;
204 const u32 *intserv;
205
206 intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
207 if (!intserv)
208 return;
209
210 nthreads = len / sizeof(u32);
211
212 lock_cpu_hotplug();
213 for (i = 0; i < nthreads; i++) {
214 for_each_present_cpu(cpu) {
215 if (get_hard_smp_processor_id(cpu) != intserv[i])
216 continue;
217 BUG_ON(cpu_online(cpu));
218 cpu_clear(cpu, cpu_present_map);
219 set_hard_smp_processor_id(cpu, -1);
220 break;
221 }
222 if (cpu == NR_CPUS)
223 printk(KERN_WARNING "Could not find cpu to remove "
224 "with physical id 0x%x\n", intserv[i]);
225 }
226 unlock_cpu_hotplug();
227}
228
229static int pseries_smp_notifier(struct notifier_block *nb,
230 unsigned long action, void *node)
231{
232 int err = NOTIFY_OK;
233
234 switch (action) {
235 case PSERIES_RECONFIG_ADD:
236 if (pseries_add_processor(node))
237 err = NOTIFY_BAD;
238 break;
239 case PSERIES_RECONFIG_REMOVE:
240 pseries_remove_processor(node);
241 break;
242 default:
243 err = NOTIFY_DONE;
244 break;
245 }
246 return err;
247}
248
249static struct notifier_block pseries_smp_nb = {
250 .notifier_call = pseries_smp_notifier,
251};
252
253static int __init pseries_cpu_hotplug_init(void)
254{
255 rtas_stop_self_args.token = rtas_token("stop-self");
256 qcss_tok = rtas_token("query-cpu-stopped-state");
257
258 if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
259 qcss_tok == RTAS_UNKNOWN_SERVICE) {
260 printk(KERN_INFO "CPU Hotplug not supported by firmware "
261 "- disabling.\n");
262 return 0;
263 }
264
265 ppc_md.cpu_die = pseries_mach_cpu_die;
266 smp_ops->cpu_disable = pseries_cpu_disable;
267 smp_ops->cpu_die = pseries_cpu_die;
268
269 /* Processors can be added/removed only on LPAR */
270 if (firmware_has_feature(FW_FEATURE_LPAR))
271 pSeries_reconfig_notifier_register(&pseries_smp_nb);
272
273 return 0;
274}
275arch_initcall(pseries_cpu_hotplug_init);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 0dc2548ca9bc..042ecae107ac 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -347,21 +347,6 @@ static int __init pSeries_init_panel(void)
347} 347}
348arch_initcall(pSeries_init_panel); 348arch_initcall(pSeries_init_panel);
349 349
350#ifdef CONFIG_HOTPLUG_CPU
351static void pSeries_mach_cpu_die(void)
352{
353 local_irq_disable();
354 idle_task_exit();
355 xics_teardown_cpu(0);
356 rtas_stop_self();
357 /* Should never get here... */
358 BUG();
359 for(;;);
360}
361#else
362#define pSeries_mach_cpu_die NULL
363#endif
364
365static int pseries_set_dabr(unsigned long dabr) 350static int pseries_set_dabr(unsigned long dabr)
366{ 351{
367 return plpar_hcall_norets(H_SET_DABR, dabr); 352 return plpar_hcall_norets(H_SET_DABR, dabr);
@@ -437,19 +422,14 @@ static int __init pSeries_probe_hypertas(unsigned long node,
437 if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL) != NULL) 422 if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL) != NULL)
438 powerpc_firmware_features |= FW_FEATURE_LPAR; 423 powerpc_firmware_features |= FW_FEATURE_LPAR;
439 424
440 if (firmware_has_feature(FW_FEATURE_LPAR))
441 hpte_init_lpar();
442 else
443 hpte_init_native();
444
445 return 1; 425 return 1;
446} 426}
447 427
448static int __init pSeries_probe(void) 428static int __init pSeries_probe(void)
449{ 429{
450 unsigned long root = of_get_flat_dt_root(); 430 unsigned long root = of_get_flat_dt_root();
451 char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(), 431 char *dtype = of_get_flat_dt_prop(root, "device_type", NULL);
452 "device_type", NULL); 432
453 if (dtype == NULL) 433 if (dtype == NULL)
454 return 0; 434 return 0;
455 if (strcmp(dtype, "chrp")) 435 if (strcmp(dtype, "chrp"))
@@ -467,6 +447,11 @@ static int __init pSeries_probe(void)
467 /* Now try to figure out if we are running on LPAR */ 447 /* Now try to figure out if we are running on LPAR */
468 of_scan_flat_dt(pSeries_probe_hypertas, NULL); 448 of_scan_flat_dt(pSeries_probe_hypertas, NULL);
469 449
450 if (firmware_has_feature(FW_FEATURE_LPAR))
451 hpte_init_lpar();
452 else
453 hpte_init_native();
454
470 DBG("Machine is%s LPAR !\n", 455 DBG("Machine is%s LPAR !\n",
471 (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not"); 456 (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
472 457
@@ -561,7 +546,6 @@ define_machine(pseries) {
561 .power_off = rtas_power_off, 546 .power_off = rtas_power_off,
562 .halt = rtas_halt, 547 .halt = rtas_halt,
563 .panic = rtas_os_term, 548 .panic = rtas_os_term,
564 .cpu_die = pSeries_mach_cpu_die,
565 .get_boot_time = rtas_get_boot_time, 549 .get_boot_time = rtas_get_boot_time,
566 .get_rtc_time = rtas_get_rtc_time, 550 .get_rtc_time = rtas_get_rtc_time,
567 .set_rtc_time = rtas_set_rtc_time, 551 .set_rtc_time = rtas_set_rtc_time,
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index c6624b8a0e77..4408518eaebe 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -64,197 +64,6 @@ static cpumask_t of_spin_map;
64 64
65extern void generic_secondary_smp_init(unsigned long); 65extern void generic_secondary_smp_init(unsigned long);
66 66
67#ifdef CONFIG_HOTPLUG_CPU
68
69/* Get state of physical CPU.
70 * Return codes:
71 * 0 - The processor is in the RTAS stopped state
72 * 1 - stop-self is in progress
73 * 2 - The processor is not in the RTAS stopped state
74 * -1 - Hardware Error
75 * -2 - Hardware Busy, Try again later.
76 */
77static int query_cpu_stopped(unsigned int pcpu)
78{
79 int cpu_status;
80 int status, qcss_tok;
81
82 qcss_tok = rtas_token("query-cpu-stopped-state");
83 if (qcss_tok == RTAS_UNKNOWN_SERVICE)
84 return -1;
85 status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
86 if (status != 0) {
87 printk(KERN_ERR
88 "RTAS query-cpu-stopped-state failed: %i\n", status);
89 return status;
90 }
91
92 return cpu_status;
93}
94
95static int pSeries_cpu_disable(void)
96{
97 int cpu = smp_processor_id();
98
99 cpu_clear(cpu, cpu_online_map);
100 vdso_data->processorCount--;
101
102 /*fix boot_cpuid here*/
103 if (cpu == boot_cpuid)
104 boot_cpuid = any_online_cpu(cpu_online_map);
105
106 /* FIXME: abstract this to not be platform specific later on */
107 xics_migrate_irqs_away();
108 return 0;
109}
110
111static void pSeries_cpu_die(unsigned int cpu)
112{
113 int tries;
114 int cpu_status;
115 unsigned int pcpu = get_hard_smp_processor_id(cpu);
116
117 for (tries = 0; tries < 25; tries++) {
118 cpu_status = query_cpu_stopped(pcpu);
119 if (cpu_status == 0 || cpu_status == -1)
120 break;
121 msleep(200);
122 }
123 if (cpu_status != 0) {
124 printk("Querying DEAD? cpu %i (%i) shows %i\n",
125 cpu, pcpu, cpu_status);
126 }
127
128 /* Isolation and deallocation are definatly done by
129 * drslot_chrp_cpu. If they were not they would be
130 * done here. Change isolate state to Isolate and
131 * change allocation-state to Unusable.
132 */
133 paca[cpu].cpu_start = 0;
134}
135
136/*
137 * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle
138 * here is that a cpu device node may represent up to two logical cpus
139 * in the SMT case. We must honor the assumption in other code that
140 * the logical ids for sibling SMT threads x and y are adjacent, such
141 * that x^1 == y and y^1 == x.
142 */
143static int pSeries_add_processor(struct device_node *np)
144{
145 unsigned int cpu;
146 cpumask_t candidate_map, tmp = CPU_MASK_NONE;
147 int err = -ENOSPC, len, nthreads, i;
148 const u32 *intserv;
149
150 intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
151 if (!intserv)
152 return 0;
153
154 nthreads = len / sizeof(u32);
155 for (i = 0; i < nthreads; i++)
156 cpu_set(i, tmp);
157
158 lock_cpu_hotplug();
159
160 BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
161
162 /* Get a bitmap of unoccupied slots. */
163 cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
164 if (cpus_empty(candidate_map)) {
165 /* If we get here, it most likely means that NR_CPUS is
166 * less than the partition's max processors setting.
167 */
168 printk(KERN_ERR "Cannot add cpu %s; this system configuration"
169 " supports %d logical cpus.\n", np->full_name,
170 cpus_weight(cpu_possible_map));
171 goto out_unlock;
172 }
173
174 while (!cpus_empty(tmp))
175 if (cpus_subset(tmp, candidate_map))
176 /* Found a range where we can insert the new cpu(s) */
177 break;
178 else
179 cpus_shift_left(tmp, tmp, nthreads);
180
181 if (cpus_empty(tmp)) {
182 printk(KERN_ERR "Unable to find space in cpu_present_map for"
183 " processor %s with %d thread(s)\n", np->name,
184 nthreads);
185 goto out_unlock;
186 }
187
188 for_each_cpu_mask(cpu, tmp) {
189 BUG_ON(cpu_isset(cpu, cpu_present_map));
190 cpu_set(cpu, cpu_present_map);
191 set_hard_smp_processor_id(cpu, *intserv++);
192 }
193 err = 0;
194out_unlock:
195 unlock_cpu_hotplug();
196 return err;
197}
198
199/*
200 * Update the present map for a cpu node which is going away, and set
201 * the hard id in the paca(s) to -1 to be consistent with boot time
202 * convention for non-present cpus.
203 */
204static void pSeries_remove_processor(struct device_node *np)
205{
206 unsigned int cpu;
207 int len, nthreads, i;
208 const u32 *intserv;
209
210 intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
211 if (!intserv)
212 return;
213
214 nthreads = len / sizeof(u32);
215
216 lock_cpu_hotplug();
217 for (i = 0; i < nthreads; i++) {
218 for_each_present_cpu(cpu) {
219 if (get_hard_smp_processor_id(cpu) != intserv[i])
220 continue;
221 BUG_ON(cpu_online(cpu));
222 cpu_clear(cpu, cpu_present_map);
223 set_hard_smp_processor_id(cpu, -1);
224 break;
225 }
226 if (cpu == NR_CPUS)
227 printk(KERN_WARNING "Could not find cpu to remove "
228 "with physical id 0x%x\n", intserv[i]);
229 }
230 unlock_cpu_hotplug();
231}
232
233static int pSeries_smp_notifier(struct notifier_block *nb, unsigned long action, void *node)
234{
235 int err = NOTIFY_OK;
236
237 switch (action) {
238 case PSERIES_RECONFIG_ADD:
239 if (pSeries_add_processor(node))
240 err = NOTIFY_BAD;
241 break;
242 case PSERIES_RECONFIG_REMOVE:
243 pSeries_remove_processor(node);
244 break;
245 default:
246 err = NOTIFY_DONE;
247 break;
248 }
249 return err;
250}
251
252static struct notifier_block pSeries_smp_nb = {
253 .notifier_call = pSeries_smp_notifier,
254};
255
256#endif /* CONFIG_HOTPLUG_CPU */
257
258/** 67/**
259 * smp_startup_cpu() - start the given cpu 68 * smp_startup_cpu() - start the given cpu
260 * 69 *
@@ -422,15 +231,6 @@ static void __init smp_init_pseries(void)
422 231
423 DBG(" -> smp_init_pSeries()\n"); 232 DBG(" -> smp_init_pSeries()\n");
424 233
425#ifdef CONFIG_HOTPLUG_CPU
426 smp_ops->cpu_disable = pSeries_cpu_disable;
427 smp_ops->cpu_die = pSeries_cpu_die;
428
429 /* Processors can be added/removed only on LPAR */
430 if (firmware_has_feature(FW_FEATURE_LPAR))
431 pSeries_reconfig_notifier_register(&pSeries_smp_nb);
432#endif
433
434 /* Mark threads which are still spinning in hold loops. */ 234 /* Mark threads which are still spinning in hold loops. */
435 if (cpu_has_feature(CPU_FTR_SMT)) { 235 if (cpu_has_feature(CPU_FTR_SMT)) {
436 for_each_present_cpu(i) { 236 for_each_present_cpu(i) {
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 6cc34597a620..04d4917eb303 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -5,7 +5,8 @@ endif
5obj-$(CONFIG_MPIC) += mpic.o 5obj-$(CONFIG_MPIC) += mpic.o
6obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o 6obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
7obj-$(CONFIG_PPC_MPC106) += grackle.o 7obj-$(CONFIG_PPC_MPC106) += grackle.o
8obj-$(CONFIG_PPC_DCR) += dcr.o dcr-low.o 8obj-$(CONFIG_PPC_DCR) += dcr.o
9obj-$(CONFIG_PPC_DCR_NATIVE) += dcr-low.o
9obj-$(CONFIG_U3_DART) += dart_iommu.o 10obj-$(CONFIG_U3_DART) += dart_iommu.o
10obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o 11obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
11obj-$(CONFIG_FSL_SOC) += fsl_soc.o 12obj-$(CONFIG_FSL_SOC) += fsl_soc.o
diff --git a/arch/powerpc/sysdev/dcr.S b/arch/powerpc/sysdev/dcr.S
deleted file mode 100644
index 2078f39e2f17..000000000000
--- a/arch/powerpc/sysdev/dcr.S
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * "Indirect" DCR access
3 *
4 * Copyright (c) 2004 Eugene Surovegin <ebs@ebshome.net>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <asm/ppc_asm.h>
13#include <asm/processor.h>
14
15#define DCR_ACCESS_PROLOG(table) \
16 rlwinm r3,r3,4,18,27; \
17 lis r5,table@h; \
18 ori r5,r5,table@l; \
19 add r3,r3,r5; \
20 mtctr r3; \
21 bctr
22
23_GLOBAL(__mfdcr)
24 DCR_ACCESS_PROLOG(__mfdcr_table)
25
26_GLOBAL(__mtdcr)
27 DCR_ACCESS_PROLOG(__mtdcr_table)
28
29__mfdcr_table:
30 mfdcr r3,0; blr
31__mtdcr_table:
32 mtdcr 0,r4; blr
33
34dcr = 1
35 .rept 1023
36 mfdcr r3,dcr; blr
37 mtdcr dcr,r4; blr
38 dcr = dcr + 1
39 .endr
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 6995f51b9488..74e48d94f27c 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -223,23 +223,15 @@ static void qe_ic_mask_irq(unsigned int virq)
223 qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, 223 qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
224 temp & ~qe_ic_info[src].mask); 224 temp & ~qe_ic_info[src].mask);
225 225
226 spin_unlock_irqrestore(&qe_ic_lock, flags); 226 /* Flush the above write before enabling interrupts; otherwise,
227} 227 * spurious interrupts will sometimes happen. To be 100% sure
228 228 * that the write has reached the device before interrupts are
229static void qe_ic_mask_irq_and_ack(unsigned int virq) 229 * enabled, the mask register would have to be read back; however,
230{ 230 * this is not required for correctness, only to avoid wasting
231 struct qe_ic *qe_ic = qe_ic_from_irq(virq); 231 * time on a large number of spurious interrupts. In testing,
232 unsigned int src = virq_to_hw(virq); 232 * a sync reduced the observed spurious interrupts to zero.
233 unsigned long flags; 233 */
234 u32 temp; 234 mb();
235
236 spin_lock_irqsave(&qe_ic_lock, flags);
237
238 temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
239 qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
240 temp & ~qe_ic_info[src].mask);
241
242 /* There is nothing to do for ack here, ack is handled in ISR */
243 235
244 spin_unlock_irqrestore(&qe_ic_lock, flags); 236 spin_unlock_irqrestore(&qe_ic_lock, flags);
245} 237}
@@ -248,7 +240,7 @@ static struct irq_chip qe_ic_irq_chip = {
248 .typename = " QEIC ", 240 .typename = " QEIC ",
249 .unmask = qe_ic_unmask_irq, 241 .unmask = qe_ic_unmask_irq,
250 .mask = qe_ic_mask_irq, 242 .mask = qe_ic_mask_irq,
251 .mask_ack = qe_ic_mask_irq_and_ack, 243 .mask_ack = qe_ic_mask_irq,
252}; 244};
253 245
254static int qe_ic_host_match(struct irq_host *h, struct device_node *node) 246static int qe_ic_host_match(struct irq_host *h, struct device_node *node)
@@ -331,34 +323,22 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
331 return irq_linear_revmap(qe_ic->irqhost, irq); 323 return irq_linear_revmap(qe_ic->irqhost, irq);
332} 324}
333 325
334/* FIXME: We mask all the QE Low interrupts while handling. We should
335 * let other interrupt come in, but BAD interrupts are generated */
336void fastcall qe_ic_cascade_low(unsigned int irq, struct irq_desc *desc) 326void fastcall qe_ic_cascade_low(unsigned int irq, struct irq_desc *desc)
337{ 327{
338 struct qe_ic *qe_ic = desc->handler_data; 328 struct qe_ic *qe_ic = desc->handler_data;
339 struct irq_chip *chip = irq_desc[irq].chip;
340
341 unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); 329 unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
342 330
343 chip->mask_ack(irq);
344 if (cascade_irq != NO_IRQ) 331 if (cascade_irq != NO_IRQ)
345 generic_handle_irq(cascade_irq); 332 generic_handle_irq(cascade_irq);
346 chip->unmask(irq);
347} 333}
348 334
349/* FIXME: We mask all the QE High interrupts while handling. We should
350 * let other interrupt come in, but BAD interrupts are generated */
351void fastcall qe_ic_cascade_high(unsigned int irq, struct irq_desc *desc) 335void fastcall qe_ic_cascade_high(unsigned int irq, struct irq_desc *desc)
352{ 336{
353 struct qe_ic *qe_ic = desc->handler_data; 337 struct qe_ic *qe_ic = desc->handler_data;
354 struct irq_chip *chip = irq_desc[irq].chip;
355
356 unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); 338 unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
357 339
358 chip->mask_ack(irq);
359 if (cascade_irq != NO_IRQ) 340 if (cascade_irq != NO_IRQ)
360 generic_handle_irq(cascade_irq); 341 generic_handle_irq(cascade_irq);
361 chip->unmask(irq);
362} 342}
363 343
364void __init qe_ic_init(struct device_node *node, unsigned int flags) 344void __init qe_ic_init(struct device_node *node, unsigned int flags)
diff --git a/arch/powerpc/sysdev/rom.c b/arch/powerpc/sysdev/rom.c
index bf5b3f10e6c6..c855a3b298a3 100644
--- a/arch/powerpc/sysdev/rom.c
+++ b/arch/powerpc/sysdev/rom.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <asm/of_device.h> 11#include <asm/of_device.h>
12#include <asm/of_platform.h>
12 13
13static int __init powerpc_flash_init(void) 14static int __init powerpc_flash_init(void)
14{ 15{
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index a34ed49e0356..77540a2f7704 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -22,6 +22,7 @@
22#include <linux/sysrq.h> 22#include <linux/sysrq.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/bug.h>
25 26
26#include <asm/ptrace.h> 27#include <asm/ptrace.h>
27#include <asm/string.h> 28#include <asm/string.h>
@@ -35,7 +36,6 @@
35#include <asm/cputable.h> 36#include <asm/cputable.h>
36#include <asm/rtas.h> 37#include <asm/rtas.h>
37#include <asm/sstep.h> 38#include <asm/sstep.h>
38#include <asm/bug.h>
39#include <asm/irq_regs.h> 39#include <asm/irq_regs.h>
40#include <asm/spu.h> 40#include <asm/spu.h>
41#include <asm/spu_priv1.h> 41#include <asm/spu_priv1.h>
@@ -1346,7 +1346,7 @@ static void backtrace(struct pt_regs *excp)
1346 1346
1347static void print_bug_trap(struct pt_regs *regs) 1347static void print_bug_trap(struct pt_regs *regs)
1348{ 1348{
1349 struct bug_entry *bug; 1349 const struct bug_entry *bug;
1350 unsigned long addr; 1350 unsigned long addr;
1351 1351
1352 if (regs->msr & MSR_PR) 1352 if (regs->msr & MSR_PR)
@@ -1357,11 +1357,11 @@ static void print_bug_trap(struct pt_regs *regs)
1357 bug = find_bug(regs->nip); 1357 bug = find_bug(regs->nip);
1358 if (bug == NULL) 1358 if (bug == NULL)
1359 return; 1359 return;
1360 if (bug->line & BUG_WARNING_TRAP) 1360 if (is_warning_bug(bug))
1361 return; 1361 return;
1362 1362
1363 printf("kernel BUG in %s at %s:%d!\n", 1363 printf("kernel BUG at %s:%u!\n",
1364 bug->function, bug->file, (unsigned int)bug->line); 1364 bug->file, bug->line);
1365} 1365}
1366 1366
1367void excprint(struct pt_regs *fp) 1367void excprint(struct pt_regs *fp)
diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c
index 709952c25f29..06b84c372e58 100644
--- a/arch/ppc/8260_io/fcc_enet.c
+++ b/arch/ppc/8260_io/fcc_enet.c
@@ -1892,10 +1892,10 @@ init_fcc_param(fcc_info_t *fip, struct net_device *dev,
1892 /* Allocate space for the buffer descriptors from regular memory. 1892 /* Allocate space for the buffer descriptors from regular memory.
1893 * Initialize base addresses for the buffer descriptors. 1893 * Initialize base addresses for the buffer descriptors.
1894 */ 1894 */
1895 cep->rx_bd_base = (cbd_t *)kmalloc(sizeof(cbd_t) * RX_RING_SIZE, 1895 cep->rx_bd_base = kmalloc(sizeof(cbd_t) * RX_RING_SIZE,
1896 GFP_KERNEL | GFP_DMA); 1896 GFP_KERNEL | GFP_DMA);
1897 ep->fen_genfcc.fcc_rbase = __pa(cep->rx_bd_base); 1897 ep->fen_genfcc.fcc_rbase = __pa(cep->rx_bd_base);
1898 cep->tx_bd_base = (cbd_t *)kmalloc(sizeof(cbd_t) * TX_RING_SIZE, 1898 cep->tx_bd_base = kmalloc(sizeof(cbd_t) * TX_RING_SIZE,
1899 GFP_KERNEL | GFP_DMA); 1899 GFP_KERNEL | GFP_DMA);
1900 ep->fen_genfcc.fcc_tbase = __pa(cep->tx_bd_base); 1900 ep->fen_genfcc.fcc_tbase = __pa(cep->tx_bd_base);
1901 1901
diff --git a/arch/ppc/8xx_io/cs4218_tdm.c b/arch/ppc/8xx_io/cs4218_tdm.c
index c71ef3c2e7bf..b7bb5f0b3c5f 100644
--- a/arch/ppc/8xx_io/cs4218_tdm.c
+++ b/arch/ppc/8xx_io/cs4218_tdm.c
@@ -2601,7 +2601,7 @@ int __init tdm8xx_sound_init(void)
2601 /* Initialize beep stuff */ 2601 /* Initialize beep stuff */
2602 orig_mksound = kd_mksound; 2602 orig_mksound = kd_mksound;
2603 kd_mksound = cs_mksound; 2603 kd_mksound = cs_mksound;
2604 beep_buf = (short *) kmalloc(BEEP_BUFLEN * 4, GFP_KERNEL); 2604 beep_buf = kmalloc(BEEP_BUFLEN * 4, GFP_KERNEL);
2605 if (beep_buf == NULL) 2605 if (beep_buf == NULL)
2606 printk(KERN_WARNING "dmasound: no memory for " 2606 printk(KERN_WARNING "dmasound: no memory for "
2607 "beep buffer\n"); 2607 "beep buffer\n");
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 692b5ba53209..8eb82efe05a1 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -624,7 +624,7 @@ config HDPU
624 Select HDPU if configuring a Sky Computers Compute Blade. 624 Select HDPU if configuring a Sky Computers Compute Blade.
625 625
626config HDPU_FEATURES 626config HDPU_FEATURES
627 depends HDPU 627 depends on HDPU
628 tristate "HDPU-Features" 628 tristate "HDPU-Features"
629 help 629 help
630 Select to enable HDPU enhanced features. 630 Select to enable HDPU enhanced features.
@@ -735,7 +735,7 @@ config LITE5200
735 735
736config LITE5200B 736config LITE5200B
737 bool "Freescale LITE5200B" 737 bool "Freescale LITE5200B"
738 depends LITE5200 738 depends on LITE5200
739 help 739 help
740 Support for the LITE5200B dev board for the MPC5200 from Freescale. 740 Support for the LITE5200B dev board for the MPC5200 from Freescale.
741 This is the new board with 2 PCI slots. 741 This is the new board with 2 PCI slots.
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
index 63808e01cb0b..5e723c4c2571 100644
--- a/arch/ppc/kernel/pci.c
+++ b/arch/ppc/kernel/pci.c
@@ -879,7 +879,7 @@ pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
879 879
880 880
881static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, 881static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
882 unsigned long *offset, 882 resource_size_t *offset,
883 enum pci_mmap_state mmap_state) 883 enum pci_mmap_state mmap_state)
884{ 884{
885 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); 885 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
@@ -891,7 +891,9 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
891 891
892 /* If memory, add on the PCI bridge address offset */ 892 /* If memory, add on the PCI bridge address offset */
893 if (mmap_state == pci_mmap_mem) { 893 if (mmap_state == pci_mmap_mem) {
894#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
894 *offset += hose->pci_mem_offset; 895 *offset += hose->pci_mem_offset;
896#endif
895 res_bit = IORESOURCE_MEM; 897 res_bit = IORESOURCE_MEM;
896 } else { 898 } else {
897 io_offset = hose->io_base_virt - ___IO_BASE; 899 io_offset = hose->io_base_virt - ___IO_BASE;
@@ -1030,7 +1032,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
1030 enum pci_mmap_state mmap_state, 1032 enum pci_mmap_state mmap_state,
1031 int write_combine) 1033 int write_combine)
1032{ 1034{
1033 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 1035 resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
1034 struct resource *rp; 1036 struct resource *rp;
1035 int ret; 1037 int ret;
1036 1038
@@ -1132,21 +1134,42 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
1132 resource_size_t *start, resource_size_t *end) 1134 resource_size_t *start, resource_size_t *end)
1133{ 1135{
1134 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); 1136 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
1135 unsigned long offset = 0; 1137 resource_size_t offset = 0;
1136 1138
1137 if (hose == NULL) 1139 if (hose == NULL)
1138 return; 1140 return;
1139 1141
1140 if (rsrc->flags & IORESOURCE_IO) 1142 if (rsrc->flags & IORESOURCE_IO)
1141 offset = ___IO_BASE - hose->io_base_virt + hose->io_base_phys; 1143 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1144
1145 /* We pass a fully fixed up address to userland for MMIO instead of
1146 * a BAR value because X is lame and expects to be able to use that
1147 * to pass to /dev/mem !
1148 *
1149 * That means that we'll have potentially 64 bits values where some
1150 * userland apps only expect 32 (like X itself since it thinks only
1151 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
1152 * 32 bits CHRPs :-(
1153 *
1154 * Hopefully, the sysfs insterface is immune to that gunk. Once X
1155 * has been fixed (and the fix spread enough), we can re-enable the
1156 * 2 lines below and pass down a BAR value to userland. In that case
1157 * we'll also have to re-enable the matching code in
1158 * __pci_mmap_make_offset().
1159 *
1160 * BenH.
1161 */
1162#if 0
1163 else if (rsrc->flags & IORESOURCE_MEM)
1164 offset = hose->pci_mem_offset;
1165#endif
1142 1166
1143 *start = rsrc->start + offset; 1167 *start = rsrc->start - offset;
1144 *end = rsrc->end + offset; 1168 *end = rsrc->end - offset;
1145} 1169}
1146 1170
1147void __init 1171void __init pci_init_resource(struct resource *res, resource_size_t start,
1148pci_init_resource(struct resource *res, unsigned long start, unsigned long end, 1172 resource_size_t end, int flags, char *name)
1149 int flags, char *name)
1150{ 1173{
1151 res->start = start; 1174 res->start = start;
1152 res->end = end; 1175 res->end = end;
diff --git a/arch/ppc/platforms/4xx/Kconfig b/arch/ppc/platforms/4xx/Kconfig
index 293bd489e7d9..6980de420e92 100644
--- a/arch/ppc/platforms/4xx/Kconfig
+++ b/arch/ppc/platforms/4xx/Kconfig
@@ -189,7 +189,7 @@ config BIOS_FIXUP
189# OAK doesn't exist but wanted to keep this around for any future 403GCX boards 189# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
190config 403GCX 190config 403GCX
191 bool 191 bool
192 depends OAK 192 depends on OAK
193 default y 193 default y
194 194
195config 405EP 195config 405EP
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ff690564edbd..12272361c018 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -407,7 +407,7 @@ config APPLDATA_BASE
407 407
408config APPLDATA_MEM 408config APPLDATA_MEM
409 tristate "Monitor memory management statistics" 409 tristate "Monitor memory management statistics"
410 depends on APPLDATA_BASE 410 depends on APPLDATA_BASE && VM_EVENT_COUNTERS
411 help 411 help
412 This provides memory management related data to the Linux - VM Monitor 412 This provides memory management related data to the Linux - VM Monitor
413 Stream, like paging/swapping rate, memory utilisation, etc. 413 Stream, like paging/swapping rate, memory utilisation, etc.
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index ef5266fbce62..bb57bc0e3fc8 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -191,13 +191,13 @@ debug_areas_alloc(int pages_per_area, int nr_areas)
191 debug_entry_t*** areas; 191 debug_entry_t*** areas;
192 int i,j; 192 int i,j;
193 193
194 areas = (debug_entry_t ***) kmalloc(nr_areas * 194 areas = kmalloc(nr_areas *
195 sizeof(debug_entry_t**), 195 sizeof(debug_entry_t**),
196 GFP_KERNEL); 196 GFP_KERNEL);
197 if (!areas) 197 if (!areas)
198 goto fail_malloc_areas; 198 goto fail_malloc_areas;
199 for (i = 0; i < nr_areas; i++) { 199 for (i = 0; i < nr_areas; i++) {
200 areas[i] = (debug_entry_t**) kmalloc(pages_per_area * 200 areas[i] = kmalloc(pages_per_area *
201 sizeof(debug_entry_t*),GFP_KERNEL); 201 sizeof(debug_entry_t*),GFP_KERNEL);
202 if (!areas[i]) { 202 if (!areas[i]) {
203 goto fail_malloc_areas2; 203 goto fail_malloc_areas2;
@@ -242,7 +242,7 @@ debug_info_alloc(char *name, int pages_per_area, int nr_areas, int buf_size,
242 242
243 /* alloc everything */ 243 /* alloc everything */
244 244
245 rc = (debug_info_t*) kmalloc(sizeof(debug_info_t), GFP_KERNEL); 245 rc = kmalloc(sizeof(debug_info_t), GFP_KERNEL);
246 if(!rc) 246 if(!rc)
247 goto fail_malloc_rc; 247 goto fail_malloc_rc;
248 rc->active_entries = kcalloc(nr_areas, sizeof(int), GFP_KERNEL); 248 rc->active_entries = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
@@ -634,7 +634,7 @@ found:
634 rc = -ENOMEM; 634 rc = -ENOMEM;
635 goto out; 635 goto out;
636 } 636 }
637 p_info = (file_private_info_t *) kmalloc(sizeof(file_private_info_t), 637 p_info = kmalloc(sizeof(file_private_info_t),
638 GFP_KERNEL); 638 GFP_KERNEL);
639 if(!p_info){ 639 if(!p_info){
640 if(debug_info_snapshot) 640 if(debug_info_snapshot)
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index 4faf96f8a834..bc5beaa8f98e 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -37,7 +37,7 @@ int register_external_interrupt(__u16 code, ext_int_handler_t handler)
37 ext_int_info_t *p; 37 ext_int_info_t *p;
38 int index; 38 int index;
39 39
40 p = (ext_int_info_t *) kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC); 40 p = kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
41 if (p == NULL) 41 if (p == NULL)
42 return -ENOMEM; 42 return -ENOMEM;
43 p->code = code; 43 p->code = code;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 8e24c40662e3..3aa3b885ab36 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -479,7 +479,7 @@ config SH_CLK_MD
479 int "CPU Mode Pin Setting" 479 int "CPU Mode Pin Setting"
480 depends on CPU_SUBTYPE_SH7619 || CPU_SUBTYPE_SH7206 480 depends on CPU_SUBTYPE_SH7619 || CPU_SUBTYPE_SH7206
481 help 481 help
482 MD2 - MD0 Setting. 482 MD2 - MD0 pin setting.
483 483
484menu "CPU Frequency scaling" 484menu "CPU Frequency scaling"
485 485
@@ -580,18 +580,6 @@ config NR_CPUS
580 580
581source "kernel/Kconfig.preempt" 581source "kernel/Kconfig.preempt"
582 582
583config CPU_HAS_SR_RB
584 bool "CPU has SR.RB"
585 depends on CPU_SH3 || CPU_SH4
586 default y
587 help
588 This will enable the use of SR.RB register bank usage. Processors
589 that are lacking this bit must have another method in place for
590 accomplishing what is taken care of by the banked registers.
591
592 See <file:Documentation/sh/register-banks.txt> for further
593 information on SR.RB and register banking in the kernel in general.
594
595config NODES_SHIFT 583config NODES_SHIFT
596 int 584 int
597 default "1" 585 default "1"
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index 66a25ef4ef1b..87902e0298e2 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -31,7 +31,8 @@ config EARLY_SCIF_CONSOLE_PORT
31 hex "SCIF port for early console" 31 hex "SCIF port for early console"
32 depends on EARLY_SCIF_CONSOLE 32 depends on EARLY_SCIF_CONSOLE
33 default "0xffe00000" if CPU_SUBTYPE_SH7780 33 default "0xffe00000" if CPU_SUBTYPE_SH7780
34 default "0xfffe9800" if CPU_SUBTYPE_SH72060 34 default "0xfffe9800" if CPU_SUBTYPE_SH7206
35 default "0xf8420000" if CPU_SUBTYPE_SH7619
35 default "0xffe80000" if CPU_SH4 36 default "0xffe80000" if CPU_SH4
36 37
37config EARLY_PRINTK 38config EARLY_PRINTK
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index d10bba5e1074..c1dbef212634 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -179,7 +179,7 @@ maketools: include/linux/version.h FORCE
179 179
180all: zImage 180all: zImage
181 181
182zImage: vmlinux 182zImage uImage uImage.srec vmlinux.srec: vmlinux
183 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 183 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
184 184
185compressed: zImage 185compressed: zImage
@@ -190,5 +190,8 @@ archclean:
190CLEAN_FILES += include/asm-sh/machtypes.h 190CLEAN_FILES += include/asm-sh/machtypes.h
191 191
192define archhelp 192define archhelp
193 @echo ' zImage - Compressed kernel image (arch/sh/boot/zImage)' 193 @echo '* zImage - Compressed kernel image'
194 @echo ' vmlinux.srec - Create an ELF S-record'
195 @echo ' uImage - Create a bootable image for U-Boot'
196 @echo ' uImage.srec - Create an S-record for U-Boot'
194endef 197endef
diff --git a/arch/sh/boards/landisk/irq.c b/arch/sh/boards/landisk/irq.c
index 8f2e1c68b90f..3eba6d086d7f 100644
--- a/arch/sh/boards/landisk/irq.c
+++ b/arch/sh/boards/landisk/irq.c
@@ -16,8 +16,8 @@
16 */ 16 */
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <asm/io.h> 19#include <linux/interrupt.h>
20#include <asm/irq.h> 20#include <linux/io.h>
21#include <asm/landisk/iodata_landisk.h> 21#include <asm/landisk/iodata_landisk.h>
22 22
23static void enable_landisk_irq(unsigned int irq); 23static void enable_landisk_irq(unsigned int irq);
diff --git a/arch/sh/boards/se/7206/irq.c b/arch/sh/boards/se/7206/irq.c
index 3fb0c5f5b23a..27da88486f73 100644
--- a/arch/sh/boards/se/7206/irq.c
+++ b/arch/sh/boards/se/7206/irq.c
@@ -10,6 +10,7 @@
10#include <linux/irq.h> 10#include <linux/irq.h>
11#include <linux/io.h> 11#include <linux/io.h>
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/interrupt.h>
13#include <asm/se7206.h> 14#include <asm/se7206.h>
14 15
15#define INTSTS0 0x31800000 16#define INTSTS0 0x31800000
@@ -18,6 +19,13 @@
18#define INTMSK1 0x31800006 19#define INTMSK1 0x31800006
19#define INTSEL 0x31800008 20#define INTSEL 0x31800008
20 21
22#define IRQ0_IRQ 64
23#define IRQ1_IRQ 65
24#define IRQ3_IRQ 67
25
26#define INTC_IPR01 0xfffe0818
27#define INTC_ICR1 0xfffe0802
28
21static void disable_se7206_irq(unsigned int irq) 29static void disable_se7206_irq(unsigned int irq)
22{ 30{
23 unsigned short val; 31 unsigned short val;
@@ -39,7 +47,7 @@ static void disable_se7206_irq(unsigned int irq)
39 case IRQ1_IRQ: 47 case IRQ1_IRQ:
40 msk0 |= 0x000f; 48 msk0 |= 0x000f;
41 break; 49 break;
42 case IRQ2_IRQ: 50 case IRQ3_IRQ:
43 msk0 |= 0x0f00; 51 msk0 |= 0x0f00;
44 msk1 |= 0x00ff; 52 msk1 |= 0x00ff;
45 break; 53 break;
@@ -70,7 +78,7 @@ static void enable_se7206_irq(unsigned int irq)
70 case IRQ1_IRQ: 78 case IRQ1_IRQ:
71 msk0 &= ~0x000f; 79 msk0 &= ~0x000f;
72 break; 80 break;
73 case IRQ2_IRQ: 81 case IRQ3_IRQ:
74 msk0 &= ~0x0f00; 82 msk0 &= ~0x0f00;
75 msk1 &= ~0x00ff; 83 msk1 &= ~0x00ff;
76 break; 84 break;
@@ -96,7 +104,7 @@ static void eoi_se7206_irq(unsigned int irq)
96 case IRQ1_IRQ: 104 case IRQ1_IRQ:
97 sts0 &= ~0x000f; 105 sts0 &= ~0x000f;
98 break; 106 break;
99 case IRQ2_IRQ: 107 case IRQ3_IRQ:
100 sts0 &= ~0x0f00; 108 sts0 &= ~0x0f00;
101 sts1 &= ~0x00ff; 109 sts1 &= ~0x00ff;
102 break; 110 break;
@@ -106,7 +114,7 @@ static void eoi_se7206_irq(unsigned int irq)
106} 114}
107 115
108static struct irq_chip se7206_irq_chip __read_mostly = { 116static struct irq_chip se7206_irq_chip __read_mostly = {
109 .name = "SE7206-FPGA-IRQ", 117 .name = "SE7206-FPGA",
110 .mask = disable_se7206_irq, 118 .mask = disable_se7206_irq,
111 .unmask = enable_se7206_irq, 119 .unmask = enable_se7206_irq,
112 .mask_ack = disable_se7206_irq, 120 .mask_ack = disable_se7206_irq,
diff --git a/arch/sh/boards/se/7619/Makefile b/arch/sh/boards/se/7619/Makefile
index 3666eca8a658..d21775c28cda 100644
--- a/arch/sh/boards/se/7619/Makefile
+++ b/arch/sh/boards/se/7619/Makefile
@@ -2,4 +2,4 @@
2# Makefile for the 7619 SolutionEngine specific parts of the kernel 2# Makefile for the 7619 SolutionEngine specific parts of the kernel
3# 3#
4 4
5obj-y := setup.o io.o 5obj-y := setup.o
diff --git a/arch/sh/boards/se/7619/io.c b/arch/sh/boards/se/7619/io.c
deleted file mode 100644
index 176f1f39cd9d..000000000000
--- a/arch/sh/boards/se/7619/io.c
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 *
3 * linux/arch/sh/boards/se/7619/io.c
4 *
5 * Copyright (C) 2006 Yoshinori Sato
6 *
7 * I/O routine for Hitachi 7619 SolutionEngine.
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <asm/io.h>
14#include <asm/se7619.h>
15#include <asm/irq.h>
16
17/* FIXME: M3A-ZAB7 Compact Flash Slot support */
18
19static inline void delay(void)
20{
21 ctrl_inw(0xa0000000); /* Uncached ROM area (P2) */
22}
23
24#define badio(name,port) \
25 printk("bad I/O operation (%s) for port 0x%lx at 0x%08x\n", \
26 #name, (port), (__u32) __builtin_return_address(0))
27
28unsigned char se7619___inb(unsigned long port)
29{
30 badio(inb, port);
31 return 0;
32}
33
34unsigned char se7619___inb_p(unsigned long port)
35{
36 badio(inb_p, port);
37 delay();
38 return 0;
39}
40
41unsigned short se7619___inw(unsigned long port)
42{
43 badio(inw, port);
44 return 0;
45}
46
47unsigned int se7619___inl(unsigned long port)
48{
49 badio(inl, port);
50 return 0;
51}
52
53void se7619___outb(unsigned char value, unsigned long port)
54{
55 badio(outb, port);
56}
57
58void se7619___outb_p(unsigned char value, unsigned long port)
59{
60 badio(outb_p, port);
61 delay();
62}
63
64void se7619___outw(unsigned short value, unsigned long port)
65{
66 badio(outw, port);
67}
68
69void se7619___outl(unsigned int value, unsigned long port)
70{
71 badio(outl, port);
72}
73
74void se7619___insb(unsigned long port, void *addr, unsigned long count)
75{
76 badio(inw, port);
77}
78
79void se7619___insw(unsigned long port, void *addr, unsigned long count)
80{
81 badio(inw, port);
82}
83
84void se7619___insl(unsigned long port, void *addr, unsigned long count)
85{
86 badio(insl, port);
87}
88
89void se7619___outsb(unsigned long port, const void *addr, unsigned long count)
90{
91 badio(insl, port);
92}
93
94void se7619___outsw(unsigned long port, const void *addr, unsigned long count)
95{
96 badio(insl, port);
97}
98
99void se7619___outsl(unsigned long port, const void *addr, unsigned long count)
100{
101 badio(outsw, port);
102}
diff --git a/arch/sh/boards/se/7619/setup.c b/arch/sh/boards/se/7619/setup.c
index e627b26de0d0..52d2c4d5d2fa 100644
--- a/arch/sh/boards/se/7619/setup.c
+++ b/arch/sh/boards/se/7619/setup.c
@@ -9,7 +9,6 @@
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/platform_device.h> 10#include <linux/platform_device.h>
11#include <asm/io.h> 11#include <asm/io.h>
12#include <asm/se7619.h>
13#include <asm/machvec.h> 12#include <asm/machvec.h>
14 13
15/* 14/*
@@ -19,25 +18,5 @@
19struct sh_machine_vector mv_se __initmv = { 18struct sh_machine_vector mv_se __initmv = {
20 .mv_name = "SolutionEngine", 19 .mv_name = "SolutionEngine",
21 .mv_nr_irqs = 108, 20 .mv_nr_irqs = 108,
22 .mv_inb = se7619___inb,
23 .mv_inw = se7619___inw,
24 .mv_inl = se7619___inl,
25 .mv_outb = se7619___outb,
26 .mv_outw = se7619___outw,
27 .mv_outl = se7619___outl,
28
29 .mv_inb_p = se7619___inb_p,
30 .mv_inw_p = se7619___inw,
31 .mv_inl_p = se7619___inl,
32 .mv_outb_p = se7619___outb_p,
33 .mv_outw_p = se7619___outw,
34 .mv_outl_p = se7619___outl,
35
36 .mv_insb = se7619___insb,
37 .mv_insw = se7619___insw,
38 .mv_insl = se7619___insl,
39 .mv_outsb = se7619___outsb,
40 .mv_outsw = se7619___outsw,
41 .mv_outsl = se7619___outsl,
42}; 21};
43ALIAS_MV(se) 22ALIAS_MV(se)
diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
index 60797b31089c..11dc272c618e 100644
--- a/arch/sh/boot/Makefile
+++ b/arch/sh/boot/Makefile
@@ -8,13 +8,49 @@
8# Copyright (C) 1999 Stuart Menefy 8# Copyright (C) 1999 Stuart Menefy
9# 9#
10 10
11targets := zImage 11MKIMAGE := $(srctree)/scripts/mkuboot.sh
12
13#
14# Assign safe dummy values if these variables are not defined,
15# in order to suppress error message.
16#
17CONFIG_PAGE_OFFSET ?= 0x80000000
18CONFIG_MEMORY_START ?= 0x0c000000
19CONFIG_BOOT_LINK_OFFSET ?= 0x00800000
20CONFIG_ZERO_PAGE_OFFSET ?= 0x00001000
21
22export CONFIG_PAGE_OFFSET CONFIG_MEMORY_START CONFIG_BOOT_LINK_OFFSET \
23 CONFIG_ZERO_PAGE_OFFSET
24
25targets := zImage vmlinux.srec uImage uImage.srec
12subdir- := compressed 26subdir- := compressed
13 27
14$(obj)/zImage: $(obj)/compressed/vmlinux FORCE 28$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
15 $(call if_changed,objcopy) 29 $(call if_changed,objcopy)
16 @echo 'Kernel: $@ is ready' 30 @echo ' Kernel: $@ is ready'
17 31
18$(obj)/compressed/vmlinux: FORCE 32$(obj)/compressed/vmlinux: FORCE
19 $(Q)$(MAKE) $(build)=$(obj)/compressed $@ 33 $(Q)$(MAKE) $(build)=$(obj)/compressed $@
20 34
35KERNEL_LOAD := $(shell printf "0x%8x" $$[$(CONFIG_PAGE_OFFSET) + \
36 $(CONFIG_MEMORY_START) + \
37 $(CONFIG_ZERO_PAGE_OFFSET)+0x1000])
38
39quiet_cmd_uimage = UIMAGE $@
40 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A sh -O linux -T kernel \
41 -C gzip -a $(KERNEL_LOAD) -e $(KERNEL_LOAD) \
42 -n 'Linux-$(KERNELRELEASE)' -d $< $@
43
44$(obj)/uImage: $(obj)/zImage FORCE
45 $(call if_changed,uimage)
46 @echo ' Image $@ is ready'
47
48OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
49$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux
50 $(call if_changed,objcopy)
51
52OBJCOPYFLAGS_uImage.srec := -I binary -O srec
53$(obj)/uImage.srec: $(obj)/uImage
54 $(call if_changed,objcopy)
55
56clean-files += uImage uImage.srec vmlinux.srec
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index e5f443790079..d9512416f885 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -15,13 +15,7 @@ endif
15 15
16# 16#
17# IMAGE_OFFSET is the load offset of the compression loader 17# IMAGE_OFFSET is the load offset of the compression loader
18# Assign dummy values if these 2 variables are not defined,
19# in order to suppress error message.
20# 18#
21CONFIG_PAGE_OFFSET ?= 0x80000000
22CONFIG_MEMORY_START ?= 0x0c000000
23CONFIG_BOOT_LINK_OFFSET ?= 0x00800000
24
25IMAGE_OFFSET := $(shell printf "0x%08x" $$[$(CONFIG_PAGE_OFFSET) + \ 19IMAGE_OFFSET := $(shell printf "0x%08x" $$[$(CONFIG_PAGE_OFFSET) + \
26 $(CONFIG_MEMORY_START) + \ 20 $(CONFIG_MEMORY_START) + \
27 $(CONFIG_BOOT_LINK_OFFSET)]) 21 $(CONFIG_BOOT_LINK_OFFSET)])
diff --git a/arch/sh/boot/compressed/head.S b/arch/sh/boot/compressed/head.S
index 4c26a192277d..a8399b013729 100644
--- a/arch/sh/boot/compressed/head.S
+++ b/arch/sh/boot/compressed/head.S
@@ -8,6 +8,7 @@
8.text 8.text
9 9
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/page.h>
11 12
12 .global startup 13 .global startup
13startup: 14startup:
@@ -97,7 +98,7 @@ init_stack_addr:
97decompress_kernel_addr: 98decompress_kernel_addr:
98 .long decompress_kernel 99 .long decompress_kernel
99kernel_start_addr: 100kernel_start_addr:
100 .long _text+0x1000 101 .long _text+PAGE_SIZE
101 102
102 .align 9 103 .align 9
103fake_headers_as_bzImage: 104fake_headers_as_bzImage:
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
index 35452d85b7f7..df65e305acf7 100644
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -13,6 +13,7 @@
13 13
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15#include <asm/addrspace.h> 15#include <asm/addrspace.h>
16#include <asm/page.h>
16#ifdef CONFIG_SH_STANDARD_BIOS 17#ifdef CONFIG_SH_STANDARD_BIOS
17#include <asm/sh_bios.h> 18#include <asm/sh_bios.h>
18#endif 19#endif
@@ -229,7 +230,7 @@ long* stack_start = &user_stack[STACK_SIZE];
229void decompress_kernel(void) 230void decompress_kernel(void)
230{ 231{
231 output_data = 0; 232 output_data = 0;
232 output_ptr = P2SEGADDR((unsigned long)&_text+0x1000); 233 output_ptr = P2SEGADDR((unsigned long)&_text+PAGE_SIZE);
233 free_mem_ptr = (unsigned long)&_end; 234 free_mem_ptr = (unsigned long)&_end;
234 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; 235 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
235 236
diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig
index 238c0f109907..e7f8ddb0ada4 100644
--- a/arch/sh/configs/landisk_defconfig
+++ b/arch/sh/configs/landisk_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.18 3# Linux kernel version: 2.6.19
4# Tue Oct 3 11:14:13 2006 4# Thu Dec 7 17:13:04 2006
5# 5#
6CONFIG_SUPERH=y 6CONFIG_SUPERH=y
7CONFIG_RWSEM_GENERIC_SPINLOCK=y 7CONFIG_RWSEM_GENERIC_SPINLOCK=y
@@ -10,6 +10,9 @@ CONFIG_GENERIC_HWEIGHT=y
10CONFIG_GENERIC_HARDIRQS=y 10CONFIG_GENERIC_HARDIRQS=y
11CONFIG_GENERIC_IRQ_PROBE=y 11CONFIG_GENERIC_IRQ_PROBE=y
12CONFIG_GENERIC_CALIBRATE_DELAY=y 12CONFIG_GENERIC_CALIBRATE_DELAY=y
13# CONFIG_GENERIC_TIME is not set
14CONFIG_STACKTRACE_SUPPORT=y
15CONFIG_LOCKDEP_SUPPORT=y
13CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 16CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
14 17
15# 18#
@@ -33,6 +36,7 @@ CONFIG_SYSVIPC=y
33# CONFIG_UTS_NS is not set 36# CONFIG_UTS_NS is not set
34# CONFIG_AUDIT is not set 37# CONFIG_AUDIT is not set
35# CONFIG_IKCONFIG is not set 38# CONFIG_IKCONFIG is not set
39CONFIG_SYSFS_DEPRECATED=y
36# CONFIG_RELAY is not set 40# CONFIG_RELAY is not set
37CONFIG_INITRAMFS_SOURCE="" 41CONFIG_INITRAMFS_SOURCE=""
38CONFIG_CC_OPTIMIZE_FOR_SIZE=y 42CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -114,6 +118,8 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
114CONFIG_SH_LANDISK=y 118CONFIG_SH_LANDISK=y
115# CONFIG_SH_TITAN is not set 119# CONFIG_SH_TITAN is not set
116# CONFIG_SH_SHMIN is not set 120# CONFIG_SH_SHMIN is not set
121# CONFIG_SH_7206_SOLUTION_ENGINE is not set
122# CONFIG_SH_7619_SOLUTION_ENGINE is not set
117# CONFIG_SH_UNKNOWN is not set 123# CONFIG_SH_UNKNOWN is not set
118 124
119# 125#
@@ -125,6 +131,12 @@ CONFIG_CPU_SH4=y
125# SH-2 Processor Support 131# SH-2 Processor Support
126# 132#
127# CONFIG_CPU_SUBTYPE_SH7604 is not set 133# CONFIG_CPU_SUBTYPE_SH7604 is not set
134# CONFIG_CPU_SUBTYPE_SH7619 is not set
135
136#
137# SH-2A Processor Support
138#
139# CONFIG_CPU_SUBTYPE_SH7206 is not set
128 140
129# 141#
130# SH-3 Processor Support 142# SH-3 Processor Support
@@ -160,6 +172,7 @@ CONFIG_CPU_SUBTYPE_SH7751R=y
160# 172#
161# CONFIG_CPU_SUBTYPE_SH7770 is not set 173# CONFIG_CPU_SUBTYPE_SH7770 is not set
162# CONFIG_CPU_SUBTYPE_SH7780 is not set 174# CONFIG_CPU_SUBTYPE_SH7780 is not set
175# CONFIG_CPU_SUBTYPE_SH7785 is not set
163 176
164# 177#
165# SH4AL-DSP Processor Support 178# SH4AL-DSP Processor Support
@@ -175,6 +188,9 @@ CONFIG_PAGE_OFFSET=0x80000000
175CONFIG_MEMORY_START=0x0c000000 188CONFIG_MEMORY_START=0x0c000000
176CONFIG_MEMORY_SIZE=0x04000000 189CONFIG_MEMORY_SIZE=0x04000000
177CONFIG_VSYSCALL=y 190CONFIG_VSYSCALL=y
191CONFIG_PAGE_SIZE_4KB=y
192# CONFIG_PAGE_SIZE_8KB is not set
193# CONFIG_PAGE_SIZE_64KB is not set
178CONFIG_SELECT_MEMORY_MODEL=y 194CONFIG_SELECT_MEMORY_MODEL=y
179CONFIG_FLATMEM_MANUAL=y 195CONFIG_FLATMEM_MANUAL=y
180# CONFIG_DISCONTIGMEM_MANUAL is not set 196# CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -196,16 +212,21 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
196# Processor features 212# Processor features
197# 213#
198CONFIG_CPU_LITTLE_ENDIAN=y 214CONFIG_CPU_LITTLE_ENDIAN=y
215# CONFIG_CPU_BIG_ENDIAN is not set
199CONFIG_SH_FPU=y 216CONFIG_SH_FPU=y
200# CONFIG_SH_DSP is not set 217# CONFIG_SH_DSP is not set
201# CONFIG_SH_STORE_QUEUES is not set 218# CONFIG_SH_STORE_QUEUES is not set
202CONFIG_CPU_HAS_INTEVT=y 219CONFIG_CPU_HAS_INTEVT=y
220CONFIG_CPU_HAS_IPR_IRQ=y
203CONFIG_CPU_HAS_SR_RB=y 221CONFIG_CPU_HAS_SR_RB=y
222CONFIG_CPU_HAS_PTEA=y
204 223
205# 224#
206# Timer support 225# Timer support
207# 226#
208CONFIG_SH_TMU=y 227CONFIG_SH_TMU=y
228CONFIG_SH_TIMER_IRQ=16
229# CONFIG_NO_IDLE_HZ is not set
209CONFIG_SH_PCLK_FREQ=33333333 230CONFIG_SH_PCLK_FREQ=33333333
210 231
211# 232#
@@ -216,9 +237,7 @@ CONFIG_SH_PCLK_FREQ=33333333
216# 237#
217# DMA support 238# DMA support
218# 239#
219CONFIG_SH_DMA=y 240# CONFIG_SH_DMA is not set
220CONFIG_NR_ONCHIP_DMA_CHANNELS=4
221# CONFIG_NR_DMA_CHANNELS_BOOL is not set
222 241
223# 242#
224# Companion Chips 243# Companion Chips
@@ -227,6 +246,11 @@ CONFIG_NR_ONCHIP_DMA_CHANNELS=4
227CONFIG_HEARTBEAT=y 246CONFIG_HEARTBEAT=y
228 247
229# 248#
249# Additional SuperH Device Drivers
250#
251# CONFIG_PUSH_SWITCH is not set
252
253#
230# Kernel features 254# Kernel features
231# 255#
232# CONFIG_HZ_100 is not set 256# CONFIG_HZ_100 is not set
@@ -340,11 +364,13 @@ CONFIG_IP_PNP=y
340# CONFIG_INET_TUNNEL is not set 364# CONFIG_INET_TUNNEL is not set
341CONFIG_INET_XFRM_MODE_TRANSPORT=y 365CONFIG_INET_XFRM_MODE_TRANSPORT=y
342CONFIG_INET_XFRM_MODE_TUNNEL=y 366CONFIG_INET_XFRM_MODE_TUNNEL=y
367CONFIG_INET_XFRM_MODE_BEET=y
343CONFIG_INET_DIAG=y 368CONFIG_INET_DIAG=y
344CONFIG_INET_TCP_DIAG=y 369CONFIG_INET_TCP_DIAG=y
345# CONFIG_TCP_CONG_ADVANCED is not set 370# CONFIG_TCP_CONG_ADVANCED is not set
346CONFIG_TCP_CONG_CUBIC=y 371CONFIG_TCP_CONG_CUBIC=y
347CONFIG_DEFAULT_TCP_CONG="cubic" 372CONFIG_DEFAULT_TCP_CONG="cubic"
373# CONFIG_TCP_MD5SIG is not set
348 374
349# 375#
350# IP: Virtual Server Configuration 376# IP: Virtual Server Configuration
@@ -361,24 +387,12 @@ CONFIG_NETFILTER=y
361# Core Netfilter Configuration 387# Core Netfilter Configuration
362# 388#
363# CONFIG_NETFILTER_NETLINK is not set 389# CONFIG_NETFILTER_NETLINK is not set
390# CONFIG_NF_CONNTRACK_ENABLED is not set
364# CONFIG_NETFILTER_XTABLES is not set 391# CONFIG_NETFILTER_XTABLES is not set
365 392
366# 393#
367# IP: Netfilter Configuration 394# IP: Netfilter Configuration
368# 395#
369CONFIG_IP_NF_CONNTRACK=m
370CONFIG_IP_NF_CT_ACCT=y
371CONFIG_IP_NF_CONNTRACK_MARK=y
372# CONFIG_IP_NF_CONNTRACK_EVENTS is not set
373# CONFIG_IP_NF_CT_PROTO_SCTP is not set
374CONFIG_IP_NF_FTP=m
375CONFIG_IP_NF_IRC=m
376# CONFIG_IP_NF_NETBIOS_NS is not set
377CONFIG_IP_NF_TFTP=m
378CONFIG_IP_NF_AMANDA=m
379# CONFIG_IP_NF_PPTP is not set
380# CONFIG_IP_NF_H323 is not set
381# CONFIG_IP_NF_SIP is not set
382CONFIG_IP_NF_QUEUE=m 396CONFIG_IP_NF_QUEUE=m
383 397
384# 398#
@@ -477,6 +491,12 @@ CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
477# CONFIG_ATA_OVER_ETH is not set 491# CONFIG_ATA_OVER_ETH is not set
478 492
479# 493#
494# Misc devices
495#
496# CONFIG_SGI_IOC4 is not set
497# CONFIG_TIFM_CORE is not set
498
499#
480# ATA/ATAPI/MFM/RLL support 500# ATA/ATAPI/MFM/RLL support
481# 501#
482CONFIG_IDE=y 502CONFIG_IDE=y
@@ -519,6 +539,7 @@ CONFIG_BLK_DEV_AEC62XX=y
519# CONFIG_BLK_DEV_CS5530 is not set 539# CONFIG_BLK_DEV_CS5530 is not set
520# CONFIG_BLK_DEV_HPT34X is not set 540# CONFIG_BLK_DEV_HPT34X is not set
521# CONFIG_BLK_DEV_HPT366 is not set 541# CONFIG_BLK_DEV_HPT366 is not set
542# CONFIG_BLK_DEV_JMICRON is not set
522# CONFIG_BLK_DEV_SC1200 is not set 543# CONFIG_BLK_DEV_SC1200 is not set
523# CONFIG_BLK_DEV_PIIX is not set 544# CONFIG_BLK_DEV_PIIX is not set
524# CONFIG_BLK_DEV_IT821X is not set 545# CONFIG_BLK_DEV_IT821X is not set
@@ -542,6 +563,7 @@ CONFIG_IDEDMA_AUTO=y
542# 563#
543# CONFIG_RAID_ATTRS is not set 564# CONFIG_RAID_ATTRS is not set
544CONFIG_SCSI=y 565CONFIG_SCSI=y
566# CONFIG_SCSI_TGT is not set
545# CONFIG_SCSI_NETLINK is not set 567# CONFIG_SCSI_NETLINK is not set
546CONFIG_SCSI_PROC_FS=y 568CONFIG_SCSI_PROC_FS=y
547 569
@@ -561,6 +583,7 @@ CONFIG_BLK_DEV_SD=y
561CONFIG_SCSI_MULTI_LUN=y 583CONFIG_SCSI_MULTI_LUN=y
562# CONFIG_SCSI_CONSTANTS is not set 584# CONFIG_SCSI_CONSTANTS is not set
563# CONFIG_SCSI_LOGGING is not set 585# CONFIG_SCSI_LOGGING is not set
586# CONFIG_SCSI_SCAN_ASYNC is not set
564 587
565# 588#
566# SCSI Transports 589# SCSI Transports
@@ -602,12 +625,12 @@ CONFIG_SCSI_MULTI_LUN=y
602# CONFIG_SCSI_NCR53C406A is not set 625# CONFIG_SCSI_NCR53C406A is not set
603# CONFIG_SCSI_STEX is not set 626# CONFIG_SCSI_STEX is not set
604# CONFIG_SCSI_SYM53C8XX_2 is not set 627# CONFIG_SCSI_SYM53C8XX_2 is not set
605# CONFIG_SCSI_IPR is not set
606# CONFIG_SCSI_PAS16 is not set 628# CONFIG_SCSI_PAS16 is not set
607# CONFIG_SCSI_PSI240I is not set 629# CONFIG_SCSI_PSI240I is not set
608# CONFIG_SCSI_QLOGIC_FAS is not set 630# CONFIG_SCSI_QLOGIC_FAS is not set
609# CONFIG_SCSI_QLOGIC_1280 is not set 631# CONFIG_SCSI_QLOGIC_1280 is not set
610# CONFIG_SCSI_QLA_FC is not set 632# CONFIG_SCSI_QLA_FC is not set
633# CONFIG_SCSI_QLA_ISCSI is not set
611# CONFIG_SCSI_LPFC is not set 634# CONFIG_SCSI_LPFC is not set
612# CONFIG_SCSI_SYM53C416 is not set 635# CONFIG_SCSI_SYM53C416 is not set
613# CONFIG_SCSI_DC395x is not set 636# CONFIG_SCSI_DC395x is not set
@@ -615,6 +638,7 @@ CONFIG_SCSI_MULTI_LUN=y
615# CONFIG_SCSI_T128 is not set 638# CONFIG_SCSI_T128 is not set
616# CONFIG_SCSI_NSP32 is not set 639# CONFIG_SCSI_NSP32 is not set
617# CONFIG_SCSI_DEBUG is not set 640# CONFIG_SCSI_DEBUG is not set
641# CONFIG_SCSI_SRP is not set
618 642
619# 643#
620# PCMCIA SCSI adapter support 644# PCMCIA SCSI adapter support
@@ -757,6 +781,7 @@ CONFIG_8139CP=y
757# CONFIG_IXGB is not set 781# CONFIG_IXGB is not set
758# CONFIG_S2IO is not set 782# CONFIG_S2IO is not set
759# CONFIG_MYRI10GE is not set 783# CONFIG_MYRI10GE is not set
784# CONFIG_NETXEN_NIC is not set
760 785
761# 786#
762# Token Ring devices 787# Token Ring devices
@@ -871,10 +896,6 @@ CONFIG_HW_RANDOM=y
871# CONFIG_DTLK is not set 896# CONFIG_DTLK is not set
872# CONFIG_R3964 is not set 897# CONFIG_R3964 is not set
873# CONFIG_APPLICOM is not set 898# CONFIG_APPLICOM is not set
874
875#
876# Ftape, the floppy tape device driver
877#
878# CONFIG_DRM is not set 899# CONFIG_DRM is not set
879 900
880# 901#
@@ -889,7 +910,6 @@ CONFIG_HW_RANDOM=y
889# TPM devices 910# TPM devices
890# 911#
891# CONFIG_TCG_TPM is not set 912# CONFIG_TCG_TPM is not set
892# CONFIG_TELCLOCK is not set
893 913
894# 914#
895# I2C support 915# I2C support
@@ -905,6 +925,7 @@ CONFIG_HW_RANDOM=y
905# 925#
906# Dallas's 1-wire bus 926# Dallas's 1-wire bus
907# 927#
928# CONFIG_W1 is not set
908 929
909# 930#
910# Hardware Monitoring support 931# Hardware Monitoring support
@@ -917,10 +938,6 @@ CONFIG_HWMON=y
917# CONFIG_HWMON_DEBUG_CHIP is not set 938# CONFIG_HWMON_DEBUG_CHIP is not set
918 939
919# 940#
920# Misc devices
921#
922
923#
924# Multimedia devices 941# Multimedia devices
925# 942#
926CONFIG_VIDEO_DEV=m 943CONFIG_VIDEO_DEV=m
@@ -1037,6 +1054,7 @@ CONFIG_USB=y
1037CONFIG_USB_DEVICEFS=y 1054CONFIG_USB_DEVICEFS=y
1038# CONFIG_USB_BANDWIDTH is not set 1055# CONFIG_USB_BANDWIDTH is not set
1039# CONFIG_USB_DYNAMIC_MINORS is not set 1056# CONFIG_USB_DYNAMIC_MINORS is not set
1057# CONFIG_USB_MULTITHREAD_PROBE is not set
1040# CONFIG_USB_OTG is not set 1058# CONFIG_USB_OTG is not set
1041 1059
1042# 1060#
@@ -1106,7 +1124,6 @@ CONFIG_USB_HIDINPUT=y
1106# CONFIG_USB_ATI_REMOTE2 is not set 1124# CONFIG_USB_ATI_REMOTE2 is not set
1107# CONFIG_USB_KEYSPAN_REMOTE is not set 1125# CONFIG_USB_KEYSPAN_REMOTE is not set
1108# CONFIG_USB_APPLETOUCH is not set 1126# CONFIG_USB_APPLETOUCH is not set
1109# CONFIG_USB_TRANCEVIBRATOR is not set
1110 1127
1111# 1128#
1112# USB Imaging devices 1129# USB Imaging devices
@@ -1121,6 +1138,7 @@ CONFIG_USB_HIDINPUT=y
1121# CONFIG_USB_KAWETH is not set 1138# CONFIG_USB_KAWETH is not set
1122CONFIG_USB_PEGASUS=m 1139CONFIG_USB_PEGASUS=m
1123CONFIG_USB_RTL8150=m 1140CONFIG_USB_RTL8150=m
1141# CONFIG_USB_USBNET_MII is not set
1124# CONFIG_USB_USBNET is not set 1142# CONFIG_USB_USBNET is not set
1125CONFIG_USB_MON=y 1143CONFIG_USB_MON=y
1126 1144
@@ -1156,6 +1174,7 @@ CONFIG_USB_SERIAL_FTDI_SIO=m
1156# CONFIG_USB_SERIAL_KLSI is not set 1174# CONFIG_USB_SERIAL_KLSI is not set
1157# CONFIG_USB_SERIAL_KOBIL_SCT is not set 1175# CONFIG_USB_SERIAL_KOBIL_SCT is not set
1158# CONFIG_USB_SERIAL_MCT_U232 is not set 1176# CONFIG_USB_SERIAL_MCT_U232 is not set
1177# CONFIG_USB_SERIAL_MOS7720 is not set
1159# CONFIG_USB_SERIAL_MOS7840 is not set 1178# CONFIG_USB_SERIAL_MOS7840 is not set
1160# CONFIG_USB_SERIAL_NAVMAN is not set 1179# CONFIG_USB_SERIAL_NAVMAN is not set
1161CONFIG_USB_SERIAL_PL2303=m 1180CONFIG_USB_SERIAL_PL2303=m
@@ -1167,6 +1186,7 @@ CONFIG_USB_SERIAL_PL2303=m
1167# CONFIG_USB_SERIAL_XIRCOM is not set 1186# CONFIG_USB_SERIAL_XIRCOM is not set
1168# CONFIG_USB_SERIAL_OPTION is not set 1187# CONFIG_USB_SERIAL_OPTION is not set
1169# CONFIG_USB_SERIAL_OMNINET is not set 1188# CONFIG_USB_SERIAL_OMNINET is not set
1189# CONFIG_USB_SERIAL_DEBUG is not set
1170 1190
1171# 1191#
1172# USB Miscellaneous drivers 1192# USB Miscellaneous drivers
@@ -1188,6 +1208,7 @@ CONFIG_USB_EMI26=m
1188CONFIG_USB_SISUSBVGA=m 1208CONFIG_USB_SISUSBVGA=m
1189CONFIG_USB_SISUSBVGA_CON=y 1209CONFIG_USB_SISUSBVGA_CON=y
1190# CONFIG_USB_LD is not set 1210# CONFIG_USB_LD is not set
1211# CONFIG_USB_TRANCEVIBRATOR is not set
1191# CONFIG_USB_TEST is not set 1212# CONFIG_USB_TEST is not set
1192 1213
1193# 1214#
@@ -1254,6 +1275,7 @@ CONFIG_EXT3_FS=y
1254CONFIG_EXT3_FS_XATTR=y 1275CONFIG_EXT3_FS_XATTR=y
1255# CONFIG_EXT3_FS_POSIX_ACL is not set 1276# CONFIG_EXT3_FS_POSIX_ACL is not set
1256# CONFIG_EXT3_FS_SECURITY is not set 1277# CONFIG_EXT3_FS_SECURITY is not set
1278# CONFIG_EXT4DEV_FS is not set
1257CONFIG_JBD=y 1279CONFIG_JBD=y
1258# CONFIG_JBD_DEBUG is not set 1280# CONFIG_JBD_DEBUG is not set
1259CONFIG_FS_MBCACHE=y 1281CONFIG_FS_MBCACHE=y
@@ -1264,6 +1286,7 @@ CONFIG_REISERFS_FS=y
1264# CONFIG_JFS_FS is not set 1286# CONFIG_JFS_FS is not set
1265# CONFIG_FS_POSIX_ACL is not set 1287# CONFIG_FS_POSIX_ACL is not set
1266# CONFIG_XFS_FS is not set 1288# CONFIG_XFS_FS is not set
1289# CONFIG_GFS2_FS is not set
1267# CONFIG_OCFS2_FS is not set 1290# CONFIG_OCFS2_FS is not set
1268# CONFIG_MINIX_FS is not set 1291# CONFIG_MINIX_FS is not set
1269CONFIG_ROMFS_FS=y 1292CONFIG_ROMFS_FS=y
@@ -1414,6 +1437,7 @@ CONFIG_NLS_CODEPAGE_932=y
1414# 1437#
1415# Kernel hacking 1438# Kernel hacking
1416# 1439#
1440CONFIG_TRACE_IRQFLAGS_SUPPORT=y
1417# CONFIG_PRINTK_TIME is not set 1441# CONFIG_PRINTK_TIME is not set
1418CONFIG_ENABLE_MUST_CHECK=y 1442CONFIG_ENABLE_MUST_CHECK=y
1419# CONFIG_MAGIC_SYSRQ is not set 1443# CONFIG_MAGIC_SYSRQ is not set
@@ -1422,6 +1446,7 @@ CONFIG_ENABLE_MUST_CHECK=y
1422CONFIG_LOG_BUF_SHIFT=14 1446CONFIG_LOG_BUF_SHIFT=14
1423# CONFIG_DEBUG_BUGVERBOSE is not set 1447# CONFIG_DEBUG_BUGVERBOSE is not set
1424# CONFIG_DEBUG_FS is not set 1448# CONFIG_DEBUG_FS is not set
1449# CONFIG_HEADERS_CHECK is not set
1425CONFIG_SH_STANDARD_BIOS=y 1450CONFIG_SH_STANDARD_BIOS=y
1426# CONFIG_EARLY_SCIF_CONSOLE is not set 1451# CONFIG_EARLY_SCIF_CONSOLE is not set
1427# CONFIG_EARLY_PRINTK is not set 1452# CONFIG_EARLY_PRINTK is not set
@@ -1445,6 +1470,4 @@ CONFIG_SH_STANDARD_BIOS=y
1445# CONFIG_CRC16 is not set 1470# CONFIG_CRC16 is not set
1446CONFIG_CRC32=y 1471CONFIG_CRC32=y
1447# CONFIG_LIBCRC32C is not set 1472# CONFIG_LIBCRC32C is not set
1448CONFIG_TEXTSEARCH=y
1449CONFIG_TEXTSEARCH_KMP=m
1450CONFIG_PLIST=y 1473CONFIG_PLIST=y
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index 36cec0b6e7c1..87ab9080fd1d 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.19-rc4 3# Linux kernel version: 2.6.19
4# Sun Nov 5 16:20:10 2006 4# Wed Dec 6 14:40:15 2006
5# 5#
6CONFIG_SUPERH=y 6CONFIG_SUPERH=y
7CONFIG_RWSEM_GENERIC_SPINLOCK=y 7CONFIG_RWSEM_GENERIC_SPINLOCK=y
@@ -11,6 +11,8 @@ CONFIG_GENERIC_HARDIRQS=y
11CONFIG_GENERIC_IRQ_PROBE=y 11CONFIG_GENERIC_IRQ_PROBE=y
12CONFIG_GENERIC_CALIBRATE_DELAY=y 12CONFIG_GENERIC_CALIBRATE_DELAY=y
13# CONFIG_GENERIC_TIME is not set 13# CONFIG_GENERIC_TIME is not set
14CONFIG_STACKTRACE_SUPPORT=y
15CONFIG_LOCKDEP_SUPPORT=y
14CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 16CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
15 17
16# 18#
@@ -34,24 +36,23 @@ CONFIG_LOCALVERSION=""
34# CONFIG_IKCONFIG is not set 36# CONFIG_IKCONFIG is not set
35# CONFIG_RELAY is not set 37# CONFIG_RELAY is not set
36CONFIG_INITRAMFS_SOURCE="" 38CONFIG_INITRAMFS_SOURCE=""
37# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 39CONFIG_CC_OPTIMIZE_FOR_SIZE=y
38CONFIG_SYSCTL=y 40CONFIG_SYSCTL=y
39CONFIG_EMBEDDED=y 41CONFIG_EMBEDDED=y
40CONFIG_UID16=y 42# CONFIG_UID16 is not set
41# CONFIG_SYSCTL_SYSCALL is not set 43# CONFIG_SYSCTL_SYSCALL is not set
42CONFIG_KALLSYMS=y 44# CONFIG_KALLSYMS is not set
43# CONFIG_KALLSYMS_EXTRA_PASS is not set
44# CONFIG_HOTPLUG is not set 45# CONFIG_HOTPLUG is not set
45CONFIG_PRINTK=y 46CONFIG_PRINTK=y
46CONFIG_BUG=y 47CONFIG_BUG=y
47CONFIG_ELF_CORE=y 48# CONFIG_ELF_CORE is not set
48CONFIG_BASE_FULL=y 49# CONFIG_BASE_FULL is not set
49# CONFIG_FUTEX is not set 50# CONFIG_FUTEX is not set
50# CONFIG_EPOLL is not set 51# CONFIG_EPOLL is not set
51CONFIG_SLAB=y 52CONFIG_SLAB=y
52CONFIG_VM_EVENT_COUNTERS=y 53# CONFIG_VM_EVENT_COUNTERS is not set
53CONFIG_TINY_SHMEM=y 54CONFIG_TINY_SHMEM=y
54CONFIG_BASE_SMALL=0 55CONFIG_BASE_SMALL=1
55# CONFIG_SLOB is not set 56# CONFIG_SLOB is not set
56 57
57# 58#
@@ -160,6 +161,7 @@ CONFIG_CPU_SUBTYPE_SH7206=y
160# 161#
161# CONFIG_CPU_SUBTYPE_SH7770 is not set 162# CONFIG_CPU_SUBTYPE_SH7770 is not set
162# CONFIG_CPU_SUBTYPE_SH7780 is not set 163# CONFIG_CPU_SUBTYPE_SH7780 is not set
164# CONFIG_CPU_SUBTYPE_SH7785 is not set
163 165
164# 166#
165# SH4AL-DSP Processor Support 167# SH4AL-DSP Processor Support
@@ -172,7 +174,10 @@ CONFIG_CPU_SUBTYPE_SH7206=y
172# 174#
173CONFIG_PAGE_OFFSET=0x00000000 175CONFIG_PAGE_OFFSET=0x00000000
174CONFIG_MEMORY_START=0x0c000000 176CONFIG_MEMORY_START=0x0c000000
175CONFIG_MEMORY_SIZE=0x02000000 177CONFIG_MEMORY_SIZE=0x04000000
178CONFIG_PAGE_SIZE_4KB=y
179# CONFIG_PAGE_SIZE_8KB is not set
180# CONFIG_PAGE_SIZE_64KB is not set
176CONFIG_SELECT_MEMORY_MODEL=y 181CONFIG_SELECT_MEMORY_MODEL=y
177CONFIG_FLATMEM_MANUAL=y 182CONFIG_FLATMEM_MANUAL=y
178# CONFIG_DISCONTIGMEM_MANUAL is not set 183# CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -194,6 +199,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
194# Processor features 199# Processor features
195# 200#
196# CONFIG_CPU_LITTLE_ENDIAN is not set 201# CONFIG_CPU_LITTLE_ENDIAN is not set
202CONFIG_CPU_BIG_ENDIAN=y
197# CONFIG_SH_FPU is not set 203# CONFIG_SH_FPU is not set
198# CONFIG_SH_FPU_EMU is not set 204# CONFIG_SH_FPU_EMU is not set
199# CONFIG_SH_DSP is not set 205# CONFIG_SH_DSP is not set
@@ -203,6 +209,8 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
203# 209#
204CONFIG_SH_CMT=y 210CONFIG_SH_CMT=y
205# CONFIG_SH_MTU2 is not set 211# CONFIG_SH_MTU2 is not set
212CONFIG_SH_TIMER_IRQ=140
213# CONFIG_NO_IDLE_HZ is not set
206CONFIG_SH_PCLK_FREQ=33333333 214CONFIG_SH_PCLK_FREQ=33333333
207CONFIG_SH_CLK_MD=6 215CONFIG_SH_CLK_MD=6
208 216
@@ -222,6 +230,11 @@ CONFIG_SH_CLK_MD=6
222# CONFIG_HD6446X_SERIES is not set 230# CONFIG_HD6446X_SERIES is not set
223 231
224# 232#
233# Additional SuperH Device Drivers
234#
235# CONFIG_PUSH_SWITCH is not set
236
237#
225# Kernel features 238# Kernel features
226# 239#
227CONFIG_HZ_100=y 240CONFIG_HZ_100=y
@@ -279,9 +292,6 @@ CONFIG_NET=y
279# CONFIG_NETDEBUG is not set 292# CONFIG_NETDEBUG is not set
280# CONFIG_PACKET is not set 293# CONFIG_PACKET is not set
281# CONFIG_UNIX is not set 294# CONFIG_UNIX is not set
282CONFIG_XFRM=y
283# CONFIG_XFRM_USER is not set
284# CONFIG_XFRM_SUB_POLICY is not set
285# CONFIG_NET_KEY is not set 295# CONFIG_NET_KEY is not set
286CONFIG_INET=y 296CONFIG_INET=y
287# CONFIG_IP_MULTICAST is not set 297# CONFIG_IP_MULTICAST is not set
@@ -297,9 +307,9 @@ CONFIG_IP_FIB_HASH=y
297# CONFIG_INET_IPCOMP is not set 307# CONFIG_INET_IPCOMP is not set
298# CONFIG_INET_XFRM_TUNNEL is not set 308# CONFIG_INET_XFRM_TUNNEL is not set
299# CONFIG_INET_TUNNEL is not set 309# CONFIG_INET_TUNNEL is not set
300CONFIG_INET_XFRM_MODE_TRANSPORT=y 310# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
301CONFIG_INET_XFRM_MODE_TUNNEL=y 311# CONFIG_INET_XFRM_MODE_TUNNEL is not set
302CONFIG_INET_XFRM_MODE_BEET=y 312# CONFIG_INET_XFRM_MODE_BEET is not set
303# CONFIG_INET_DIAG is not set 313# CONFIG_INET_DIAG is not set
304# CONFIG_TCP_CONG_ADVANCED is not set 314# CONFIG_TCP_CONG_ADVANCED is not set
305CONFIG_TCP_CONG_CUBIC=y 315CONFIG_TCP_CONG_CUBIC=y
@@ -371,7 +381,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
371# 381#
372CONFIG_MTD=y 382CONFIG_MTD=y
373# CONFIG_MTD_DEBUG is not set 383# CONFIG_MTD_DEBUG is not set
374# CONFIG_MTD_CONCAT is not set 384CONFIG_MTD_CONCAT=y
375CONFIG_MTD_PARTITIONS=y 385CONFIG_MTD_PARTITIONS=y
376CONFIG_MTD_REDBOOT_PARTS=y 386CONFIG_MTD_REDBOOT_PARTS=y
377CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 387CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
@@ -422,7 +432,7 @@ CONFIG_MTD_CFI_UTIL=y
422# CONFIG_MTD_COMPLEX_MAPPINGS is not set 432# CONFIG_MTD_COMPLEX_MAPPINGS is not set
423CONFIG_MTD_PHYSMAP=y 433CONFIG_MTD_PHYSMAP=y
424CONFIG_MTD_PHYSMAP_START=0x20000000 434CONFIG_MTD_PHYSMAP_START=0x20000000
425CONFIG_MTD_PHYSMAP_LEN=0x1000000 435CONFIG_MTD_PHYSMAP_LEN=0x01000000
426CONFIG_MTD_PHYSMAP_BANKWIDTH=4 436CONFIG_MTD_PHYSMAP_BANKWIDTH=4
427# CONFIG_MTD_SOLUTIONENGINE is not set 437# CONFIG_MTD_SOLUTIONENGINE is not set
428# CONFIG_MTD_UCLINUX is not set 438# CONFIG_MTD_UCLINUX is not set
@@ -468,10 +478,7 @@ CONFIG_MTD_PHYSMAP_BANKWIDTH=4
468# CONFIG_BLK_DEV_COW_COMMON is not set 478# CONFIG_BLK_DEV_COW_COMMON is not set
469# CONFIG_BLK_DEV_LOOP is not set 479# CONFIG_BLK_DEV_LOOP is not set
470# CONFIG_BLK_DEV_NBD is not set 480# CONFIG_BLK_DEV_NBD is not set
471CONFIG_BLK_DEV_RAM=y 481# CONFIG_BLK_DEV_RAM is not set
472CONFIG_BLK_DEV_RAM_COUNT=16
473CONFIG_BLK_DEV_RAM_SIZE=4096
474CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
475# CONFIG_BLK_DEV_INITRD is not set 482# CONFIG_BLK_DEV_INITRD is not set
476# CONFIG_CDROM_PKTCDVD is not set 483# CONFIG_CDROM_PKTCDVD is not set
477# CONFIG_ATA_OVER_ETH is not set 484# CONFIG_ATA_OVER_ETH is not set
@@ -519,7 +526,50 @@ CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
519# 526#
520# Network device support 527# Network device support
521# 528#
522# CONFIG_NETDEVICES is not set 529CONFIG_NETDEVICES=y
530# CONFIG_DUMMY is not set
531# CONFIG_BONDING is not set
532# CONFIG_EQUALIZER is not set
533# CONFIG_TUN is not set
534
535#
536# PHY device support
537#
538# CONFIG_PHYLIB is not set
539
540#
541# Ethernet (10 or 100Mbit)
542#
543CONFIG_NET_ETHERNET=y
544CONFIG_MII=y
545# CONFIG_STNIC is not set
546CONFIG_SMC91X=y
547
548#
549# Ethernet (1000 Mbit)
550#
551
552#
553# Ethernet (10000 Mbit)
554#
555
556#
557# Token Ring devices
558#
559
560#
561# Wireless LAN (non-hamradio)
562#
563# CONFIG_NET_RADIO is not set
564
565#
566# Wan interfaces
567#
568# CONFIG_WAN is not set
569# CONFIG_PPP is not set
570# CONFIG_SLIP is not set
571# CONFIG_SHAPER is not set
572# CONFIG_NETCONSOLE is not set
523# CONFIG_NETPOLL is not set 573# CONFIG_NETPOLL is not set
524# CONFIG_NET_POLL_CONTROLLER is not set 574# CONFIG_NET_POLL_CONTROLLER is not set
525 575
@@ -536,7 +586,26 @@ CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
536# 586#
537# Input device support 587# Input device support
538# 588#
539# CONFIG_INPUT is not set 589CONFIG_INPUT=y
590# CONFIG_INPUT_FF_MEMLESS is not set
591
592#
593# Userland interfaces
594#
595# CONFIG_INPUT_MOUSEDEV is not set
596# CONFIG_INPUT_JOYDEV is not set
597# CONFIG_INPUT_TSDEV is not set
598# CONFIG_INPUT_EVDEV is not set
599# CONFIG_INPUT_EVBUG is not set
600
601#
602# Input Device Drivers
603#
604# CONFIG_INPUT_KEYBOARD is not set
605# CONFIG_INPUT_MOUSE is not set
606# CONFIG_INPUT_JOYSTICK is not set
607# CONFIG_INPUT_TOUCHSCREEN is not set
608# CONFIG_INPUT_MISC is not set
540 609
541# 610#
542# Hardware I/O ports 611# Hardware I/O ports
@@ -564,8 +633,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
564CONFIG_SERIAL_CORE=y 633CONFIG_SERIAL_CORE=y
565CONFIG_SERIAL_CORE_CONSOLE=y 634CONFIG_SERIAL_CORE_CONSOLE=y
566# CONFIG_UNIX98_PTYS is not set 635# CONFIG_UNIX98_PTYS is not set
567CONFIG_LEGACY_PTYS=y 636# CONFIG_LEGACY_PTYS is not set
568CONFIG_LEGACY_PTY_COUNT=256
569 637
570# 638#
571# IPMI 639# IPMI
@@ -576,7 +644,7 @@ CONFIG_LEGACY_PTY_COUNT=256
576# Watchdog Cards 644# Watchdog Cards
577# 645#
578# CONFIG_WATCHDOG is not set 646# CONFIG_WATCHDOG is not set
579CONFIG_HW_RANDOM=y 647# CONFIG_HW_RANDOM is not set
580# CONFIG_GEN_RTC is not set 648# CONFIG_GEN_RTC is not set
581# CONFIG_DTLK is not set 649# CONFIG_DTLK is not set
582# CONFIG_R3964 is not set 650# CONFIG_R3964 is not set
@@ -610,12 +678,8 @@ CONFIG_HW_RANDOM=y
610# 678#
611# Hardware Monitoring support 679# Hardware Monitoring support
612# 680#
613CONFIG_HWMON=y 681# CONFIG_HWMON is not set
614# CONFIG_HWMON_VID is not set 682# CONFIG_HWMON_VID is not set
615# CONFIG_SENSORS_ABITUGURU is not set
616# CONFIG_SENSORS_F71805F is not set
617# CONFIG_SENSORS_VT1211 is not set
618# CONFIG_HWMON_DEBUG_CHIP is not set
619 683
620# 684#
621# Multimedia devices 685# Multimedia devices
@@ -630,7 +694,7 @@ CONFIG_HWMON=y
630# 694#
631# Graphics support 695# Graphics support
632# 696#
633CONFIG_FIRMWARE_EDID=y 697# CONFIG_FIRMWARE_EDID is not set
634# CONFIG_FB is not set 698# CONFIG_FB is not set
635 699
636# 700#
@@ -701,8 +765,7 @@ CONFIG_FIRMWARE_EDID=y
701# 765#
702# File systems 766# File systems
703# 767#
704CONFIG_EXT2_FS=y 768# CONFIG_EXT2_FS is not set
705# CONFIG_EXT2_FS_XATTR is not set
706# CONFIG_EXT3_FS is not set 769# CONFIG_EXT3_FS is not set
707# CONFIG_EXT4DEV_FS is not set 770# CONFIG_EXT4DEV_FS is not set
708# CONFIG_REISERFS_FS is not set 771# CONFIG_REISERFS_FS is not set
@@ -755,7 +818,7 @@ CONFIG_RAMFS=y
755# CONFIG_EFS_FS is not set 818# CONFIG_EFS_FS is not set
756# CONFIG_JFFS_FS is not set 819# CONFIG_JFFS_FS is not set
757# CONFIG_JFFS2_FS is not set 820# CONFIG_JFFS2_FS is not set
758CONFIG_CRAMFS=y 821# CONFIG_CRAMFS is not set
759# CONFIG_VXFS_FS is not set 822# CONFIG_VXFS_FS is not set
760# CONFIG_HPFS_FS is not set 823# CONFIG_HPFS_FS is not set
761# CONFIG_QNX4FS_FS is not set 824# CONFIG_QNX4FS_FS is not set
@@ -793,8 +856,9 @@ CONFIG_MSDOS_PARTITION=y
793# 856#
794# Kernel hacking 857# Kernel hacking
795# 858#
859CONFIG_TRACE_IRQFLAGS_SUPPORT=y
796# CONFIG_PRINTK_TIME is not set 860# CONFIG_PRINTK_TIME is not set
797CONFIG_ENABLE_MUST_CHECK=y 861# CONFIG_ENABLE_MUST_CHECK is not set
798# CONFIG_MAGIC_SYSRQ is not set 862# CONFIG_MAGIC_SYSRQ is not set
799# CONFIG_UNUSED_SYMBOLS is not set 863# CONFIG_UNUSED_SYMBOLS is not set
800# CONFIG_DEBUG_KERNEL is not set 864# CONFIG_DEBUG_KERNEL is not set
@@ -819,7 +883,7 @@ CONFIG_LOG_BUF_SHIFT=14
819# 883#
820# Library routines 884# Library routines
821# 885#
822CONFIG_CRC_CCITT=y 886# CONFIG_CRC_CCITT is not set
823# CONFIG_CRC16 is not set 887# CONFIG_CRC16 is not set
824CONFIG_CRC32=y 888CONFIG_CRC32=y
825# CONFIG_LIBCRC32C is not set 889# CONFIG_LIBCRC32C is not set
diff --git a/arch/sh/configs/se7619_defconfig b/arch/sh/configs/se7619_defconfig
new file mode 100644
index 000000000000..20ac7f4c53fb
--- /dev/null
+++ b/arch/sh/configs/se7619_defconfig
@@ -0,0 +1,744 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.19
4# Wed Dec 6 16:35:36 2006
5#
6CONFIG_SUPERH=y
7CONFIG_RWSEM_GENERIC_SPINLOCK=y
8CONFIG_GENERIC_FIND_NEXT_BIT=y
9CONFIG_GENERIC_HWEIGHT=y
10CONFIG_GENERIC_HARDIRQS=y
11CONFIG_GENERIC_IRQ_PROBE=y
12CONFIG_GENERIC_CALIBRATE_DELAY=y
13# CONFIG_GENERIC_TIME is not set
14CONFIG_STACKTRACE_SUPPORT=y
15CONFIG_LOCKDEP_SUPPORT=y
16CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
17
18#
19# Code maturity level options
20#
21CONFIG_EXPERIMENTAL=y
22CONFIG_BROKEN_ON_SMP=y
23CONFIG_INIT_ENV_ARG_LIMIT=32
24
25#
26# General setup
27#
28CONFIG_LOCALVERSION=""
29# CONFIG_LOCALVERSION_AUTO is not set
30# CONFIG_SYSVIPC is not set
31# CONFIG_BSD_PROCESS_ACCT is not set
32# CONFIG_UTS_NS is not set
33# CONFIG_IKCONFIG is not set
34# CONFIG_RELAY is not set
35CONFIG_INITRAMFS_SOURCE=""
36CONFIG_CC_OPTIMIZE_FOR_SIZE=y
37CONFIG_SYSCTL=y
38CONFIG_EMBEDDED=y
39# CONFIG_UID16 is not set
40# CONFIG_SYSCTL_SYSCALL is not set
41# CONFIG_KALLSYMS is not set
42# CONFIG_HOTPLUG is not set
43CONFIG_PRINTK=y
44CONFIG_BUG=y
45# CONFIG_ELF_CORE is not set
46# CONFIG_BASE_FULL is not set
47# CONFIG_FUTEX is not set
48# CONFIG_EPOLL is not set
49CONFIG_SLAB=y
50# CONFIG_VM_EVENT_COUNTERS is not set
51CONFIG_TINY_SHMEM=y
52CONFIG_BASE_SMALL=1
53# CONFIG_SLOB is not set
54
55#
56# Loadable module support
57#
58# CONFIG_MODULES is not set
59
60#
61# Block layer
62#
63CONFIG_BLOCK=y
64# CONFIG_LBD is not set
65# CONFIG_LSF is not set
66
67#
68# IO Schedulers
69#
70CONFIG_IOSCHED_NOOP=y
71# CONFIG_IOSCHED_AS is not set
72# CONFIG_IOSCHED_DEADLINE is not set
73# CONFIG_IOSCHED_CFQ is not set
74# CONFIG_DEFAULT_AS is not set
75# CONFIG_DEFAULT_DEADLINE is not set
76# CONFIG_DEFAULT_CFQ is not set
77CONFIG_DEFAULT_NOOP=y
78CONFIG_DEFAULT_IOSCHED="noop"
79
80#
81# System type
82#
83# CONFIG_SH_SOLUTION_ENGINE is not set
84# CONFIG_SH_7751_SOLUTION_ENGINE is not set
85# CONFIG_SH_7300_SOLUTION_ENGINE is not set
86# CONFIG_SH_7343_SOLUTION_ENGINE is not set
87# CONFIG_SH_73180_SOLUTION_ENGINE is not set
88# CONFIG_SH_7751_SYSTEMH is not set
89# CONFIG_SH_HP6XX is not set
90# CONFIG_SH_EC3104 is not set
91# CONFIG_SH_SATURN is not set
92# CONFIG_SH_DREAMCAST is not set
93# CONFIG_SH_BIGSUR is not set
94# CONFIG_SH_MPC1211 is not set
95# CONFIG_SH_SH03 is not set
96# CONFIG_SH_SECUREEDGE5410 is not set
97# CONFIG_SH_HS7751RVOIP is not set
98# CONFIG_SH_7710VOIPGW is not set
99# CONFIG_SH_RTS7751R2D is not set
100# CONFIG_SH_R7780RP is not set
101# CONFIG_SH_EDOSK7705 is not set
102# CONFIG_SH_SH4202_MICRODEV is not set
103# CONFIG_SH_LANDISK is not set
104# CONFIG_SH_TITAN is not set
105# CONFIG_SH_SHMIN is not set
106# CONFIG_SH_7206_SOLUTION_ENGINE is not set
107CONFIG_SH_7619_SOLUTION_ENGINE=y
108# CONFIG_SH_UNKNOWN is not set
109
110#
111# Processor selection
112#
113CONFIG_CPU_SH2=y
114
115#
116# SH-2 Processor Support
117#
118# CONFIG_CPU_SUBTYPE_SH7604 is not set
119CONFIG_CPU_SUBTYPE_SH7619=y
120
121#
122# SH-2A Processor Support
123#
124# CONFIG_CPU_SUBTYPE_SH7206 is not set
125
126#
127# SH-3 Processor Support
128#
129# CONFIG_CPU_SUBTYPE_SH7300 is not set
130# CONFIG_CPU_SUBTYPE_SH7705 is not set
131# CONFIG_CPU_SUBTYPE_SH7706 is not set
132# CONFIG_CPU_SUBTYPE_SH7707 is not set
133# CONFIG_CPU_SUBTYPE_SH7708 is not set
134# CONFIG_CPU_SUBTYPE_SH7709 is not set
135# CONFIG_CPU_SUBTYPE_SH7710 is not set
136
137#
138# SH-4 Processor Support
139#
140# CONFIG_CPU_SUBTYPE_SH7750 is not set
141# CONFIG_CPU_SUBTYPE_SH7091 is not set
142# CONFIG_CPU_SUBTYPE_SH7750R is not set
143# CONFIG_CPU_SUBTYPE_SH7750S is not set
144# CONFIG_CPU_SUBTYPE_SH7751 is not set
145# CONFIG_CPU_SUBTYPE_SH7751R is not set
146# CONFIG_CPU_SUBTYPE_SH7760 is not set
147# CONFIG_CPU_SUBTYPE_SH4_202 is not set
148
149#
150# ST40 Processor Support
151#
152# CONFIG_CPU_SUBTYPE_ST40STB1 is not set
153# CONFIG_CPU_SUBTYPE_ST40GX1 is not set
154
155#
156# SH-4A Processor Support
157#
158# CONFIG_CPU_SUBTYPE_SH7770 is not set
159# CONFIG_CPU_SUBTYPE_SH7780 is not set
160# CONFIG_CPU_SUBTYPE_SH7785 is not set
161
162#
163# SH4AL-DSP Processor Support
164#
165# CONFIG_CPU_SUBTYPE_SH73180 is not set
166# CONFIG_CPU_SUBTYPE_SH7343 is not set
167
168#
169# Memory management options
170#
171CONFIG_PAGE_OFFSET=0x00000000
172CONFIG_MEMORY_START=0x0c000000
173CONFIG_MEMORY_SIZE=0x04000000
174CONFIG_PAGE_SIZE_4KB=y
175# CONFIG_PAGE_SIZE_8KB is not set
176# CONFIG_PAGE_SIZE_64KB is not set
177CONFIG_SELECT_MEMORY_MODEL=y
178CONFIG_FLATMEM_MANUAL=y
179# CONFIG_DISCONTIGMEM_MANUAL is not set
180# CONFIG_SPARSEMEM_MANUAL is not set
181CONFIG_FLATMEM=y
182CONFIG_FLAT_NODE_MEM_MAP=y
183# CONFIG_SPARSEMEM_STATIC is not set
184CONFIG_SPLIT_PTLOCK_CPUS=4
185# CONFIG_RESOURCES_64BIT is not set
186
187#
188# Cache configuration
189#
190# CONFIG_SH_DIRECT_MAPPED is not set
191CONFIG_SH_WRITETHROUGH=y
192# CONFIG_SH_OCRAM is not set
193
194#
195# Processor features
196#
197# CONFIG_CPU_LITTLE_ENDIAN is not set
198CONFIG_CPU_BIG_ENDIAN=y
199# CONFIG_SH_FPU is not set
200# CONFIG_SH_FPU_EMU is not set
201# CONFIG_SH_DSP is not set
202
203#
204# Timer support
205#
206CONFIG_SH_CMT=y
207CONFIG_SH_TIMER_IRQ=86
208# CONFIG_NO_IDLE_HZ is not set
209CONFIG_SH_PCLK_FREQ=31250000
210CONFIG_SH_CLK_MD=5
211
212#
213# CPU Frequency scaling
214#
215# CONFIG_CPU_FREQ is not set
216
217#
218# DMA support
219#
220# CONFIG_SH_DMA is not set
221
222#
223# Companion Chips
224#
225# CONFIG_HD6446X_SERIES is not set
226
227#
228# Additional SuperH Device Drivers
229#
230# CONFIG_PUSH_SWITCH is not set
231
232#
233# Kernel features
234#
235CONFIG_HZ_100=y
236# CONFIG_HZ_250 is not set
237# CONFIG_HZ_1000 is not set
238CONFIG_HZ=100
239# CONFIG_KEXEC is not set
240# CONFIG_SMP is not set
241CONFIG_PREEMPT_NONE=y
242# CONFIG_PREEMPT_VOLUNTARY is not set
243# CONFIG_PREEMPT is not set
244
245#
246# Boot options
247#
248CONFIG_ZERO_PAGE_OFFSET=0x00001000
249CONFIG_BOOT_LINK_OFFSET=0x00800000
250# CONFIG_UBC_WAKEUP is not set
251# CONFIG_CMDLINE_BOOL is not set
252
253#
254# Bus options
255#
256# CONFIG_PCI is not set
257
258#
259# PCCARD (PCMCIA/CardBus) support
260#
261
262#
263# PCI Hotplug Support
264#
265
266#
267# Executable file formats
268#
269CONFIG_BINFMT_FLAT=y
270CONFIG_BINFMT_ZFLAT=y
271# CONFIG_BINFMT_SHARED_FLAT is not set
272# CONFIG_BINFMT_MISC is not set
273
274#
275# Power management options (EXPERIMENTAL)
276#
277# CONFIG_PM is not set
278
279#
280# Networking
281#
282# CONFIG_NET is not set
283
284#
285# Device Drivers
286#
287
288#
289# Generic Driver Options
290#
291# CONFIG_STANDALONE is not set
292# CONFIG_PREVENT_FIRMWARE_BUILD is not set
293# CONFIG_SYS_HYPERVISOR is not set
294
295#
296# Connector - unified userspace <-> kernelspace linker
297#
298
299#
300# Memory Technology Devices (MTD)
301#
302CONFIG_MTD=y
303# CONFIG_MTD_DEBUG is not set
304CONFIG_MTD_CONCAT=y
305CONFIG_MTD_PARTITIONS=y
306CONFIG_MTD_REDBOOT_PARTS=y
307CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
308# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
309# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
310# CONFIG_MTD_CMDLINE_PARTS is not set
311
312#
313# User Modules And Translation Layers
314#
315CONFIG_MTD_CHAR=y
316CONFIG_MTD_BLOCK=y
317# CONFIG_FTL is not set
318# CONFIG_NFTL is not set
319# CONFIG_INFTL is not set
320# CONFIG_RFD_FTL is not set
321# CONFIG_SSFDC is not set
322
323#
324# RAM/ROM/Flash chip drivers
325#
326CONFIG_MTD_CFI=y
327# CONFIG_MTD_JEDECPROBE is not set
328CONFIG_MTD_GEN_PROBE=y
329# CONFIG_MTD_CFI_ADV_OPTIONS is not set
330CONFIG_MTD_MAP_BANK_WIDTH_1=y
331CONFIG_MTD_MAP_BANK_WIDTH_2=y
332CONFIG_MTD_MAP_BANK_WIDTH_4=y
333# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
334# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
335# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
336CONFIG_MTD_CFI_I1=y
337CONFIG_MTD_CFI_I2=y
338# CONFIG_MTD_CFI_I4 is not set
339# CONFIG_MTD_CFI_I8 is not set
340# CONFIG_MTD_CFI_INTELEXT is not set
341CONFIG_MTD_CFI_AMDSTD=y
342# CONFIG_MTD_CFI_STAA is not set
343CONFIG_MTD_CFI_UTIL=y
344# CONFIG_MTD_RAM is not set
345# CONFIG_MTD_ROM is not set
346# CONFIG_MTD_ABSENT is not set
347# CONFIG_MTD_OBSOLETE_CHIPS is not set
348
349#
350# Mapping drivers for chip access
351#
352# CONFIG_MTD_COMPLEX_MAPPINGS is not set
353CONFIG_MTD_PHYSMAP=y
354CONFIG_MTD_PHYSMAP_START=0xa0000000
355CONFIG_MTD_PHYSMAP_LEN=0x01000000
356CONFIG_MTD_PHYSMAP_BANKWIDTH=2
357# CONFIG_MTD_SOLUTIONENGINE is not set
358# CONFIG_MTD_UCLINUX is not set
359# CONFIG_MTD_PLATRAM is not set
360
361#
362# Self-contained MTD device drivers
363#
364# CONFIG_MTD_SLRAM is not set
365# CONFIG_MTD_PHRAM is not set
366# CONFIG_MTD_MTDRAM is not set
367# CONFIG_MTD_BLOCK2MTD is not set
368
369#
370# Disk-On-Chip Device Drivers
371#
372# CONFIG_MTD_DOC2000 is not set
373# CONFIG_MTD_DOC2001 is not set
374# CONFIG_MTD_DOC2001PLUS is not set
375
376#
377# NAND Flash Device Drivers
378#
379# CONFIG_MTD_NAND is not set
380
381#
382# OneNAND Flash Device Drivers
383#
384# CONFIG_MTD_ONENAND is not set
385
386#
387# Parallel port support
388#
389# CONFIG_PARPORT is not set
390
391#
392# Plug and Play support
393#
394
395#
396# Block devices
397#
398# CONFIG_BLK_DEV_COW_COMMON is not set
399# CONFIG_BLK_DEV_LOOP is not set
400# CONFIG_BLK_DEV_RAM is not set
401# CONFIG_BLK_DEV_INITRD is not set
402# CONFIG_CDROM_PKTCDVD is not set
403
404#
405# Misc devices
406#
407# CONFIG_TIFM_CORE is not set
408
409#
410# ATA/ATAPI/MFM/RLL support
411#
412# CONFIG_IDE is not set
413
414#
415# SCSI device support
416#
417# CONFIG_RAID_ATTRS is not set
418# CONFIG_SCSI is not set
419# CONFIG_SCSI_NETLINK is not set
420
421#
422# Serial ATA (prod) and Parallel ATA (experimental) drivers
423#
424# CONFIG_ATA is not set
425
426#
427# Multi-device support (RAID and LVM)
428#
429# CONFIG_MD is not set
430
431#
432# Fusion MPT device support
433#
434# CONFIG_FUSION is not set
435
436#
437# IEEE 1394 (FireWire) support
438#
439
440#
441# I2O device support
442#
443
444#
445# ISDN subsystem
446#
447
448#
449# Telephony Support
450#
451# CONFIG_PHONE is not set
452
453#
454# Input device support
455#
456CONFIG_INPUT=y
457# CONFIG_INPUT_FF_MEMLESS is not set
458
459#
460# Userland interfaces
461#
462# CONFIG_INPUT_MOUSEDEV is not set
463# CONFIG_INPUT_JOYDEV is not set
464# CONFIG_INPUT_TSDEV is not set
465# CONFIG_INPUT_EVDEV is not set
466# CONFIG_INPUT_EVBUG is not set
467
468#
469# Input Device Drivers
470#
471# CONFIG_INPUT_KEYBOARD is not set
472# CONFIG_INPUT_MOUSE is not set
473# CONFIG_INPUT_JOYSTICK is not set
474# CONFIG_INPUT_TOUCHSCREEN is not set
475# CONFIG_INPUT_MISC is not set
476
477#
478# Hardware I/O ports
479#
480# CONFIG_SERIO is not set
481# CONFIG_GAMEPORT is not set
482
483#
484# Character devices
485#
486# CONFIG_VT is not set
487# CONFIG_SERIAL_NONSTANDARD is not set
488
489#
490# Serial drivers
491#
492# CONFIG_SERIAL_8250 is not set
493
494#
495# Non-8250 serial port support
496#
497CONFIG_SERIAL_SH_SCI=y
498CONFIG_SERIAL_SH_SCI_NR_UARTS=3
499CONFIG_SERIAL_SH_SCI_CONSOLE=y
500CONFIG_SERIAL_CORE=y
501CONFIG_SERIAL_CORE_CONSOLE=y
502# CONFIG_UNIX98_PTYS is not set
503# CONFIG_LEGACY_PTYS is not set
504
505#
506# IPMI
507#
508# CONFIG_IPMI_HANDLER is not set
509
510#
511# Watchdog Cards
512#
513# CONFIG_WATCHDOG is not set
514# CONFIG_HW_RANDOM is not set
515# CONFIG_GEN_RTC is not set
516# CONFIG_DTLK is not set
517# CONFIG_R3964 is not set
518
519#
520# Ftape, the floppy tape device driver
521#
522# CONFIG_RAW_DRIVER is not set
523
524#
525# TPM devices
526#
527# CONFIG_TCG_TPM is not set
528
529#
530# I2C support
531#
532# CONFIG_I2C is not set
533
534#
535# SPI support
536#
537# CONFIG_SPI is not set
538# CONFIG_SPI_MASTER is not set
539
540#
541# Dallas's 1-wire bus
542#
543# CONFIG_W1 is not set
544
545#
546# Hardware Monitoring support
547#
548# CONFIG_HWMON is not set
549# CONFIG_HWMON_VID is not set
550
551#
552# Multimedia devices
553#
554# CONFIG_VIDEO_DEV is not set
555
556#
557# Digital Video Broadcasting Devices
558#
559
560#
561# Graphics support
562#
563# CONFIG_FIRMWARE_EDID is not set
564# CONFIG_FB is not set
565
566#
567# Sound
568#
569# CONFIG_SOUND is not set
570
571#
572# USB support
573#
574# CONFIG_USB_ARCH_HAS_HCD is not set
575# CONFIG_USB_ARCH_HAS_OHCI is not set
576# CONFIG_USB_ARCH_HAS_EHCI is not set
577
578#
579# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
580#
581
582#
583# USB Gadget Support
584#
585# CONFIG_USB_GADGET is not set
586
587#
588# MMC/SD Card support
589#
590# CONFIG_MMC is not set
591
592#
593# LED devices
594#
595# CONFIG_NEW_LEDS is not set
596
597#
598# LED drivers
599#
600
601#
602# LED Triggers
603#
604
605#
606# InfiniBand support
607#
608
609#
610# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
611#
612
613#
614# Real Time Clock
615#
616# CONFIG_RTC_CLASS is not set
617
618#
619# DMA Engine support
620#
621# CONFIG_DMA_ENGINE is not set
622
623#
624# DMA Clients
625#
626
627#
628# DMA Devices
629#
630
631#
632# File systems
633#
634# CONFIG_EXT2_FS is not set
635# CONFIG_EXT3_FS is not set
636# CONFIG_EXT4DEV_FS is not set
637# CONFIG_REISERFS_FS is not set
638# CONFIG_JFS_FS is not set
639# CONFIG_FS_POSIX_ACL is not set
640# CONFIG_XFS_FS is not set
641# CONFIG_GFS2_FS is not set
642# CONFIG_MINIX_FS is not set
643CONFIG_ROMFS_FS=y
644# CONFIG_INOTIFY is not set
645# CONFIG_QUOTA is not set
646# CONFIG_DNOTIFY is not set
647# CONFIG_AUTOFS_FS is not set
648# CONFIG_AUTOFS4_FS is not set
649# CONFIG_FUSE_FS is not set
650
651#
652# CD-ROM/DVD Filesystems
653#
654# CONFIG_ISO9660_FS is not set
655# CONFIG_UDF_FS is not set
656
657#
658# DOS/FAT/NT Filesystems
659#
660# CONFIG_MSDOS_FS is not set
661# CONFIG_VFAT_FS is not set
662# CONFIG_NTFS_FS is not set
663
664#
665# Pseudo filesystems
666#
667CONFIG_PROC_FS=y
668CONFIG_PROC_SYSCTL=y
669# CONFIG_SYSFS is not set
670# CONFIG_TMPFS is not set
671# CONFIG_HUGETLBFS is not set
672# CONFIG_HUGETLB_PAGE is not set
673CONFIG_RAMFS=y
674
675#
676# Miscellaneous filesystems
677#
678# CONFIG_ADFS_FS is not set
679# CONFIG_AFFS_FS is not set
680# CONFIG_HFS_FS is not set
681# CONFIG_HFSPLUS_FS is not set
682# CONFIG_BEFS_FS is not set
683# CONFIG_BFS_FS is not set
684# CONFIG_EFS_FS is not set
685# CONFIG_JFFS_FS is not set
686# CONFIG_JFFS2_FS is not set
687# CONFIG_CRAMFS is not set
688# CONFIG_VXFS_FS is not set
689# CONFIG_HPFS_FS is not set
690# CONFIG_QNX4FS_FS is not set
691# CONFIG_SYSV_FS is not set
692# CONFIG_UFS_FS is not set
693
694#
695# Partition Types
696#
697# CONFIG_PARTITION_ADVANCED is not set
698CONFIG_MSDOS_PARTITION=y
699
700#
701# Native Language Support
702#
703# CONFIG_NLS is not set
704
705#
706# Profiling support
707#
708# CONFIG_PROFILING is not set
709
710#
711# Kernel hacking
712#
713CONFIG_TRACE_IRQFLAGS_SUPPORT=y
714# CONFIG_PRINTK_TIME is not set
715# CONFIG_ENABLE_MUST_CHECK is not set
716# CONFIG_MAGIC_SYSRQ is not set
717# CONFIG_UNUSED_SYMBOLS is not set
718# CONFIG_DEBUG_KERNEL is not set
719CONFIG_LOG_BUF_SHIFT=14
720# CONFIG_DEBUG_BUGVERBOSE is not set
721# CONFIG_UNWIND_INFO is not set
722# CONFIG_HEADERS_CHECK is not set
723# CONFIG_SH_STANDARD_BIOS is not set
724# CONFIG_EARLY_SCIF_CONSOLE is not set
725# CONFIG_KGDB is not set
726
727#
728# Security options
729#
730# CONFIG_KEYS is not set
731
732#
733# Cryptographic options
734#
735# CONFIG_CRYPTO is not set
736
737#
738# Library routines
739#
740# CONFIG_CRC_CCITT is not set
741# CONFIG_CRC16 is not set
742CONFIG_CRC32=y
743# CONFIG_LIBCRC32C is not set
744CONFIG_ZLIB_INFLATE=y
diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c
index f2b9157c314f..b3d20c0e021f 100644
--- a/arch/sh/drivers/push-switch.c
+++ b/arch/sh/drivers/push-switch.c
@@ -14,7 +14,7 @@
14#include <asm/push-switch.h> 14#include <asm/push-switch.h>
15 15
16#define DRV_NAME "push-switch" 16#define DRV_NAME "push-switch"
17#define DRV_VERSION "0.1.0" 17#define DRV_VERSION "0.1.1"
18 18
19static ssize_t switch_show(struct device *dev, 19static ssize_t switch_show(struct device *dev,
20 struct device_attribute *attr, 20 struct device_attribute *attr,
@@ -32,10 +32,10 @@ static void switch_timer(unsigned long data)
32 schedule_work(&psw->work); 32 schedule_work(&psw->work);
33} 33}
34 34
35static void switch_work_handler(void *data) 35static void switch_work_handler(struct work_struct *work)
36{ 36{
37 struct platform_device *pdev = data; 37 struct push_switch *psw = container_of(work, struct push_switch, work);
38 struct push_switch *psw = platform_get_drvdata(pdev); 38 struct platform_device *pdev = psw->pdev;
39 39
40 psw->state = 0; 40 psw->state = 0;
41 41
@@ -76,12 +76,15 @@ static int switch_drv_probe(struct platform_device *pdev)
76 } 76 }
77 } 77 }
78 78
79 INIT_WORK(&psw->work, switch_work_handler, pdev); 79 INIT_WORK(&psw->work, switch_work_handler);
80 init_timer(&psw->debounce); 80 init_timer(&psw->debounce);
81 81
82 psw->debounce.function = switch_timer; 82 psw->debounce.function = switch_timer;
83 psw->debounce.data = (unsigned long)psw; 83 psw->debounce.data = (unsigned long)psw;
84 84
85 /* Workqueue API brain-damage */
86 psw->pdev = pdev;
87
85 platform_set_drvdata(pdev, psw); 88 platform_set_drvdata(pdev, psw);
86 89
87 return 0; 90 return 0;
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index 0582e6712b79..d055a3ea6b4b 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_CPU_SH2) = sh2/
6obj-$(CONFIG_CPU_SH2A) = sh2a/ 6obj-$(CONFIG_CPU_SH2A) = sh2a/
7obj-$(CONFIG_CPU_SH3) = sh3/ 7obj-$(CONFIG_CPU_SH3) = sh3/
8obj-$(CONFIG_CPU_SH4) = sh4/ 8obj-$(CONFIG_CPU_SH4) = sh4/
9obj-$(CONFIG_CPU_SH4A) += sh4a/
9 10
10obj-$(CONFIG_UBC_WAKEUP) += ubc.o 11obj-$(CONFIG_UBC_WAKEUP) += ubc.o
11obj-$(CONFIG_SH_ADC) += adc.o 12obj-$(CONFIG_SH_ADC) += adc.o
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
index 34d51b3745ea..d51fa5e9904a 100644
--- a/arch/sh/kernel/cpu/sh2/entry.S
+++ b/arch/sh/kernel/cpu/sh2/entry.S
@@ -177,15 +177,21 @@ interrupt_entry:
1777: .long do_IRQ 1777: .long do_IRQ
1788: .long do_exception_error 1788: .long do_exception_error
179 179
180trap_entry: 180trap_entry:
181 add #-0x10,r9 181 /* verbose BUG trapa entry check */
182 mov #0x3e,r8
183 cmp/ge r8,r9
184 bf/s 1f
185 add #-0x10,r9
186 add #0x10,r9
1871:
182 shll2 r9 ! TRA 188 shll2 r9 ! TRA
183 mov #OFF_TRA,r8 189 mov #OFF_TRA,r8
184 add r15,r8 190 add r15,r8
185 mov.l r9,@r8 191 mov.l r9,@r8
186 mov r9,r8 192 mov r9,r8
187#ifdef CONFIG_TRACE_IRQFLAGS 193#ifdef CONFIG_TRACE_IRQFLAGS
188 mov.l 5f, r9 194 mov.l 2f, r9
189 jsr @r9 195 jsr @r9
190 nop 196 nop
191#endif 197#endif
@@ -194,12 +200,8 @@ trap_entry:
194 nop 200 nop
195 201
196 .align 2 202 .align 2
1971: .long syscall_exit
1982: .long break_point_trap_software
1993: .long NR_syscalls
2004: .long sys_call_table
201#ifdef CONFIG_TRACE_IRQFLAGS 203#ifdef CONFIG_TRACE_IRQFLAGS
2025: .long trace_hardirqs_on 2042: .long trace_hardirqs_on
203#endif 205#endif
204 206
205#if defined(CONFIG_SH_STANDARD_BIOS) 207#if defined(CONFIG_SH_STANDARD_BIOS)
@@ -264,7 +266,7 @@ ENTRY(address_error_handler)
264restore_all: 266restore_all:
265 cli 267 cli
266#ifdef CONFIG_TRACE_IRQFLAGS 268#ifdef CONFIG_TRACE_IRQFLAGS
267 mov.l 3f, r0 269 mov.l 1f, r0
268 jsr @r0 270 jsr @r0
269 nop 271 nop
270#endif 272#endif
@@ -309,20 +311,14 @@ restore_all:
309 mov.l @r15,r15 311 mov.l @r15,r15
310 rte 312 rte
311 nop 313 nop
3122:
313 mov.l 1f,r8
314 mov.l 2f,r9
315 jmp @r9
316 lds r8,pr
317 314
318 .align 2 315#ifdef CONFIG_TRACE_IRQFLAGS
3161: .long trace_hardirqs_off
317#endif
319$current_thread_info: 318$current_thread_info:
320 .long __current_thread_info 319 .long __current_thread_info
321$cpu_mode: 320$cpu_mode:
322 .long __cpu_mode 321 .long __cpu_mode
323#ifdef CONFIG_TRACE_IRQFLAGS
3243: .long trace_hardirqs_off
325#endif
326 322
327! common exception handler 323! common exception handler
328#include "../../entry-common.S" 324#include "../../entry-common.S"
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index 82c2d905152f..79283e6c1d8f 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -51,3 +51,44 @@ static int __init sh7619_devices_setup(void)
51 ARRAY_SIZE(sh7619_devices)); 51 ARRAY_SIZE(sh7619_devices));
52} 52}
53__initcall(sh7619_devices_setup); 53__initcall(sh7619_devices_setup);
54
55#define INTC_IPRC 0xf8080000UL
56#define INTC_IPRD 0xf8080002UL
57
58#define CMI0_IRQ 86
59
60#define SCIF0_ERI_IRQ 88
61#define SCIF0_RXI_IRQ 89
62#define SCIF0_BRI_IRQ 90
63#define SCIF0_TXI_IRQ 91
64
65#define SCIF1_ERI_IRQ 92
66#define SCIF1_RXI_IRQ 93
67#define SCIF1_BRI_IRQ 94
68#define SCIF1_TXI_IRQ 95
69
70#define SCIF2_BRI_IRQ 96
71#define SCIF2_ERI_IRQ 97
72#define SCIF2_RXI_IRQ 98
73#define SCIF2_TXI_IRQ 99
74
75static struct ipr_data sh7619_ipr_map[] = {
76 { CMI0_IRQ, INTC_IPRC, 1, 2 },
77 { SCIF0_ERI_IRQ, INTC_IPRD, 3, 3 },
78 { SCIF0_RXI_IRQ, INTC_IPRD, 3, 3 },
79 { SCIF0_BRI_IRQ, INTC_IPRD, 3, 3 },
80 { SCIF0_TXI_IRQ, INTC_IPRD, 3, 3 },
81 { SCIF1_ERI_IRQ, INTC_IPRD, 2, 3 },
82 { SCIF1_RXI_IRQ, INTC_IPRD, 2, 3 },
83 { SCIF1_BRI_IRQ, INTC_IPRD, 2, 3 },
84 { SCIF1_TXI_IRQ, INTC_IPRD, 2, 3 },
85 { SCIF2_ERI_IRQ, INTC_IPRD, 1, 3 },
86 { SCIF2_RXI_IRQ, INTC_IPRD, 1, 3 },
87 { SCIF2_BRI_IRQ, INTC_IPRD, 1, 3 },
88 { SCIF2_TXI_IRQ, INTC_IPRD, 1, 3 },
89};
90
91void __init init_IRQ_ipr(void)
92{
93 make_ipr_irq(sh7619_ipr_map, ARRAY_SIZE(sh7619_ipr_map));
94}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index cdfeef49e62e..4b60fcc7d667 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -17,22 +17,22 @@ static struct plat_sci_port sci_platform_data[] = {
17 .mapbase = 0xfffe8000, 17 .mapbase = 0xfffe8000,
18 .flags = UPF_BOOT_AUTOCONF, 18 .flags = UPF_BOOT_AUTOCONF,
19 .type = PORT_SCIF, 19 .type = PORT_SCIF,
20 .irqs = { 240, 241, 242, 243}, 20 .irqs = { 241, 242, 243, 240},
21 }, { 21 }, {
22 .mapbase = 0xfffe8800, 22 .mapbase = 0xfffe8800,
23 .flags = UPF_BOOT_AUTOCONF, 23 .flags = UPF_BOOT_AUTOCONF,
24 .type = PORT_SCIF, 24 .type = PORT_SCIF,
25 .irqs = { 244, 245, 246, 247}, 25 .irqs = { 247, 244, 245, 246},
26 }, { 26 }, {
27 .mapbase = 0xfffe9000, 27 .mapbase = 0xfffe9000,
28 .flags = UPF_BOOT_AUTOCONF, 28 .flags = UPF_BOOT_AUTOCONF,
29 .type = PORT_SCIF, 29 .type = PORT_SCIF,
30 .irqs = { 248, 249, 250, 251}, 30 .irqs = { 249, 250, 251, 248},
31 }, { 31 }, {
32 .mapbase = 0xfffe9800, 32 .mapbase = 0xfffe9800,
33 .flags = UPF_BOOT_AUTOCONF, 33 .flags = UPF_BOOT_AUTOCONF,
34 .type = PORT_SCIF, 34 .type = PORT_SCIF,
35 .irqs = { 252, 253, 254, 255}, 35 .irqs = { 253, 254, 255, 252},
36 }, { 36 }, {
37 .flags = 0, 37 .flags = 0,
38 } 38 }
@@ -56,3 +56,57 @@ static int __init sh7206_devices_setup(void)
56 ARRAY_SIZE(sh7206_devices)); 56 ARRAY_SIZE(sh7206_devices));
57} 57}
58__initcall(sh7206_devices_setup); 58__initcall(sh7206_devices_setup);
59
60#define INTC_IPR08 0xfffe0c04UL
61#define INTC_IPR09 0xfffe0c06UL
62#define INTC_IPR14 0xfffe0c10UL
63
64#define CMI0_IRQ 140
65
66#define MTU1_TGI1A 164
67
68#define SCIF0_BRI_IRQ 240
69#define SCIF0_ERI_IRQ 241
70#define SCIF0_RXI_IRQ 242
71#define SCIF0_TXI_IRQ 243
72
73#define SCIF1_BRI_IRQ 244
74#define SCIF1_ERI_IRQ 245
75#define SCIF1_RXI_IRQ 246
76#define SCIF1_TXI_IRQ 247
77
78#define SCIF2_BRI_IRQ 248
79#define SCIF2_ERI_IRQ 249
80#define SCIF2_RXI_IRQ 250
81#define SCIF2_TXI_IRQ 251
82
83#define SCIF3_BRI_IRQ 252
84#define SCIF3_ERI_IRQ 253
85#define SCIF3_RXI_IRQ 254
86#define SCIF3_TXI_IRQ 255
87
88static struct ipr_data sh7206_ipr_map[] = {
89 { CMI0_IRQ, INTC_IPR08, 3, 2 },
90 { MTU2_TGI1A, INTC_IPR09, 1, 2 },
91 { SCIF0_ERI_IRQ, INTC_IPR14, 3, 3 },
92 { SCIF0_RXI_IRQ, INTC_IPR14, 3, 3 },
93 { SCIF0_BRI_IRQ, INTC_IPR14, 3, 3 },
94 { SCIF0_TXI_IRQ, INTC_IPR14, 3, 3 },
95 { SCIF1_ERI_IRQ, INTC_IPR14, 2, 3 },
96 { SCIF1_RXI_IRQ, INTC_IPR14, 2, 3 },
97 { SCIF1_BRI_IRQ, INTC_IPR14, 2, 3 },
98 { SCIF1_TXI_IRQ, INTC_IPR14, 2, 3 },
99 { SCIF2_ERI_IRQ, INTC_IPR14, 1, 3 },
100 { SCIF2_RXI_IRQ, INTC_IPR14, 1, 3 },
101 { SCIF2_BRI_IRQ, INTC_IPR14, 1, 3 },
102 { SCIF2_TXI_IRQ, INTC_IPR14, 1, 3 },
103 { SCIF3_ERI_IRQ, INTC_IPR14, 0, 3 },
104 { SCIF3_RXI_IRQ, INTC_IPR14, 0, 3 },
105 { SCIF3_BRI_IRQ, INTC_IPR14, 0, 3 },
106 { SCIF3_TXI_IRQ, INTC_IPR14, 0, 3 },
107};
108
109void __init init_IRQ_ipr(void)
110{
111 make_ipr_irq(sh7206_ipr_map, ARRAY_SIZE(sh7206_ipr_map));
112}
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
index 6e415baf04b4..19ca68c71884 100644
--- a/arch/sh/kernel/cpu/sh4/Makefile
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -12,17 +12,12 @@ obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
12obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o 12obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o
13obj-$(CONFIG_CPU_SUBTYPE_SH7751) += setup-sh7750.o 13obj-$(CONFIG_CPU_SUBTYPE_SH7751) += setup-sh7750.o
14obj-$(CONFIG_CPU_SUBTYPE_SH7760) += setup-sh7760.o 14obj-$(CONFIG_CPU_SUBTYPE_SH7760) += setup-sh7760.o
15obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o
16obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o
17obj-$(CONFIG_CPU_SUBTYPE_SH73180) += setup-sh73180.o
18obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o
19obj-$(CONFIG_CPU_SUBTYPE_SH4_202) += setup-sh4-202.o 15obj-$(CONFIG_CPU_SUBTYPE_SH4_202) += setup-sh4-202.o
20 16
21# Primary on-chip clocks (common) 17# Primary on-chip clocks (common)
18ifndef CONFIG_CPU_SH4A
22clock-$(CONFIG_CPU_SH4) := clock-sh4.o 19clock-$(CONFIG_CPU_SH4) := clock-sh4.o
23clock-$(CONFIG_CPU_SUBTYPE_SH73180) := clock-sh73180.o 20endif
24clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o
25clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o
26 21
27# Additional clocks by subtype 22# Additional clocks by subtype
28clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o 23clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index afe0f1b1c030..9031a22a2ce7 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -119,11 +119,20 @@ int __init detect_cpu_and_cache_system(void)
119 break; 119 break;
120 case 0x3000: 120 case 0x3000:
121 case 0x3003: 121 case 0x3003:
122 case 0x3009:
122 cpu_data->type = CPU_SH7343; 123 cpu_data->type = CPU_SH7343;
123 cpu_data->icache.ways = 4; 124 cpu_data->icache.ways = 4;
124 cpu_data->dcache.ways = 4; 125 cpu_data->dcache.ways = 4;
125 cpu_data->flags |= CPU_HAS_LLSC; 126 cpu_data->flags |= CPU_HAS_LLSC;
126 break; 127 break;
128 case 0x3008:
129 if (prr == 0xa0) {
130 cpu_data->type = CPU_SH7722;
131 cpu_data->icache.ways = 4;
132 cpu_data->dcache.ways = 4;
133 cpu_data->flags |= CPU_HAS_LLSC;
134 }
135 break;
127 case 0x8000: 136 case 0x8000:
128 cpu_data->type = CPU_ST40RA; 137 cpu_data->type = CPU_ST40RA;
129 cpu_data->flags |= CPU_HAS_FPU; 138 cpu_data->flags |= CPU_HAS_FPU;
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index bbcb06f18b04..cbac27634c0b 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -14,6 +14,36 @@
14#include <linux/io.h> 14#include <linux/io.h>
15#include <asm/sci.h> 15#include <asm/sci.h>
16 16
17static struct resource rtc_resources[] = {
18 [0] = {
19 .start = 0xffc80000,
20 .end = 0xffc80000 + 0x58 - 1,
21 .flags = IORESOURCE_IO,
22 },
23 [1] = {
24 /* Period IRQ */
25 .start = 21,
26 .flags = IORESOURCE_IRQ,
27 },
28 [2] = {
29 /* Carry IRQ */
30 .start = 22,
31 .flags = IORESOURCE_IRQ,
32 },
33 [3] = {
34 /* Alarm IRQ */
35 .start = 20,
36 .flags = IORESOURCE_IRQ,
37 },
38};
39
40static struct platform_device rtc_device = {
41 .name = "sh-rtc",
42 .id = -1,
43 .num_resources = ARRAY_SIZE(rtc_resources),
44 .resource = rtc_resources,
45};
46
17static struct plat_sci_port sci_platform_data[] = { 47static struct plat_sci_port sci_platform_data[] = {
18 { 48 {
19 .mapbase = 0xffe00000, 49 .mapbase = 0xffe00000,
@@ -39,6 +69,7 @@ static struct platform_device sci_device = {
39}; 69};
40 70
41static struct platform_device *sh7750_devices[] __initdata = { 71static struct platform_device *sh7750_devices[] __initdata = {
72 &rtc_device,
42 &sci_device, 73 &sci_device,
43}; 74};
44 75
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index 0c9ea38d2caa..d7fff752e569 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -111,8 +111,9 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
111 111
112 vma->phys_addr = map->addr; 112 vma->phys_addr = map->addr;
113 113
114 if (remap_area_pages((unsigned long)vma->addr, vma->phys_addr, 114 if (ioremap_page_range((unsigned long)vma->addr,
115 map->size, flags)) { 115 (unsigned long)vma->addr + map->size,
116 vma->phys_addr, __pgprot(flags))) {
116 vunmap(vma->addr); 117 vunmap(vma->addr);
117 return -EAGAIN; 118 return -EAGAIN;
118 } 119 }
@@ -176,7 +177,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size,
176 177
177 map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); 178 map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
178 179
179 ret = __sq_remap(map, flags); 180 ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
180 if (unlikely(ret != 0)) 181 if (unlikely(ret != 0))
181 goto out; 182 goto out;
182 183
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
new file mode 100644
index 000000000000..a8f493f2f21f
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -0,0 +1,19 @@
1#
2# Makefile for the Linux/SuperH SH-4 backends.
3#
4
5# CPU subtype setup
6obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o
7obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o
8obj-$(CONFIG_CPU_SUBTYPE_SH73180) += setup-sh73180.o
9obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o
10obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o
11
12# Primary on-chip clocks (common)
13clock-$(CONFIG_CPU_SUBTYPE_SH73180) := clock-sh73180.o
14clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o
15clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o
16clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o
17clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7343.o
18
19obj-y += $(clock-y)
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh73180.c b/arch/sh/kernel/cpu/sh4a/clock-sh73180.c
index 2fa5cb2ae68d..2fa5cb2ae68d 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh73180.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh73180.c
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
new file mode 100644
index 000000000000..1707a213f0cf
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
@@ -0,0 +1,99 @@
1/*
2 * arch/sh/kernel/cpu/sh4/clock-sh7343.c
3 *
4 * SH7343/SH7722 support for the clock framework
5 *
6 * Copyright (C) 2006 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/io.h>
15#include <asm/clock.h>
16#include <asm/freq.h>
17
18/*
19 * SH7343/SH7722 uses a common set of multipliers and divisors, so this
20 * is quite simple..
21 */
22static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
23static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
24
25#define pll_calc() (((ctrl_inl(FRQCR) >> 24) & 0x1f) + 1)
26
27static void master_clk_init(struct clk *clk)
28{
29 clk->parent = clk_get(NULL, "cpu_clk");
30}
31
32static void master_clk_recalc(struct clk *clk)
33{
34 int idx = (ctrl_inl(FRQCR) & 0x000f);
35 clk->rate *= clk->parent->rate * multipliers[idx] / divisors[idx];
36}
37
38static struct clk_ops sh7343_master_clk_ops = {
39 .init = master_clk_init,
40 .recalc = master_clk_recalc,
41};
42
43static void module_clk_init(struct clk *clk)
44{
45 clk->parent = NULL;
46 clk->rate = CONFIG_SH_PCLK_FREQ;
47}
48
49static struct clk_ops sh7343_module_clk_ops = {
50 .init = module_clk_init,
51};
52
53static void bus_clk_init(struct clk *clk)
54{
55 clk->parent = clk_get(NULL, "cpu_clk");
56}
57
58static void bus_clk_recalc(struct clk *clk)
59{
60 int idx = (ctrl_inl(FRQCR) >> 8) & 0x000f;
61 clk->rate = clk->parent->rate * multipliers[idx] / divisors[idx];
62}
63
64static struct clk_ops sh7343_bus_clk_ops = {
65 .init = bus_clk_init,
66 .recalc = bus_clk_recalc,
67};
68
69static void cpu_clk_init(struct clk *clk)
70{
71 clk->parent = clk_get(NULL, "module_clk");
72 clk->flags |= CLK_RATE_PROPAGATES;
73 clk_set_rate(clk, clk_get_rate(clk));
74}
75
76static void cpu_clk_recalc(struct clk *clk)
77{
78 int idx = (ctrl_inl(FRQCR) >> 20) & 0x000f;
79 clk->rate = clk->parent->rate * pll_calc() *
80 multipliers[idx] / divisors[idx];
81}
82
83static struct clk_ops sh7343_cpu_clk_ops = {
84 .init = cpu_clk_init,
85 .recalc = cpu_clk_recalc,
86};
87
88static struct clk_ops *sh7343_clk_ops[] = {
89 &sh7343_master_clk_ops,
90 &sh7343_module_clk_ops,
91 &sh7343_bus_clk_ops,
92 &sh7343_cpu_clk_ops,
93};
94
95void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
96{
97 if (idx < ARRAY_SIZE(sh7343_clk_ops))
98 *ops = sh7343_clk_ops[idx];
99}
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh7770.c b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
index c8694bac6477..c8694bac6477 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh7780.c b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
index 9e6a216750c8..9e6a216750c8 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh73180.c b/arch/sh/kernel/cpu/sh4a/setup-sh73180.c
index cc9ea1e2e5df..cc9ea1e2e5df 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh73180.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh73180.c
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
index 91d61cf91ba1..91d61cf91ba1 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
new file mode 100644
index 000000000000..1143fbf65faf
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -0,0 +1,80 @@
1/*
2 * SH7722 Setup
3 *
4 * Copyright (C) 2006 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/platform_device.h>
11#include <linux/init.h>
12#include <linux/serial.h>
13#include <asm/sci.h>
14
15static struct plat_sci_port sci_platform_data[] = {
16 {
17 .mapbase = 0xffe00000,
18 .flags = UPF_BOOT_AUTOCONF,
19 .type = PORT_SCIF,
20 .irqs = { 80, 81, 83, 82 },
21 }, {
22 .flags = 0,
23 }
24};
25
26static struct platform_device sci_device = {
27 .name = "sh-sci",
28 .id = -1,
29 .dev = {
30 .platform_data = sci_platform_data,
31 },
32};
33
34static struct platform_device *sh7722_devices[] __initdata = {
35 &sci_device,
36};
37
38static int __init sh7722_devices_setup(void)
39{
40 return platform_add_devices(sh7722_devices,
41 ARRAY_SIZE(sh7722_devices));
42}
43__initcall(sh7722_devices_setup);
44
45static struct ipr_data sh7722_ipr_map[] = {
46 /* IRQ, IPR-idx, shift, prio */
47 { 16, 0, 12, 2 }, /* TMU0 */
48 { 17, 0, 8, 2 }, /* TMU1 */
49 { 80, 6, 12, 3 }, /* SCIF ERI */
50 { 81, 6, 12, 3 }, /* SCIF RXI */
51 { 82, 6, 12, 3 }, /* SCIF BRI */
52 { 83, 6, 12, 3 }, /* SCIF TXI */
53};
54
55static unsigned long ipr_offsets[] = {
56 0xa4080000, /* 0: IPRA */
57 0xa4080004, /* 1: IPRB */
58 0xa4080008, /* 2: IPRC */
59 0xa408000c, /* 3: IPRD */
60 0xa4080010, /* 4: IPRE */
61 0xa4080014, /* 5: IPRF */
62 0xa4080018, /* 6: IPRG */
63 0xa408001c, /* 7: IPRH */
64 0xa4080020, /* 8: IPRI */
65 0xa4080024, /* 9: IPRJ */
66 0xa4080028, /* 10: IPRK */
67 0xa408002c, /* 11: IPRL */
68};
69
70unsigned int map_ipridx_to_addr(int idx)
71{
72 if (unlikely(idx >= ARRAY_SIZE(ipr_offsets)))
73 return 0;
74 return ipr_offsets[idx];
75}
76
77void __init init_IRQ_ipr(void)
78{
79 make_ipr_irq(sh7722_ipr_map, ARRAY_SIZE(sh7722_ipr_map));
80}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
index 6a04cc5f5aca..6a04cc5f5aca 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index 9aeaa2ddaa28..9aeaa2ddaa28 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
index 60340823798a..560b91cdd15c 100644
--- a/arch/sh/kernel/early_printk.c
+++ b/arch/sh/kernel/early_printk.c
@@ -144,16 +144,16 @@ static struct console *early_console =
144 ; 144 ;
145 145
146static int __initdata keep_early; 146static int __initdata keep_early;
147static int early_console_initialized;
147 148
148int __init setup_early_printk(char *opt) 149int __init setup_early_printk(char *buf)
149{ 150{
150 char *space; 151 if (!buf)
151 char buf[256]; 152 return 0;
152 153
153 strlcpy(buf, opt, sizeof(buf)); 154 if (early_console_initialized)
154 space = strchr(buf, ' '); 155 return 0;
155 if (space) 156 early_console_initialized = 1;
156 *space = 0;
157 157
158 if (strstr(buf, "keep")) 158 if (strstr(buf, "keep"))
159 keep_early = 1; 159 keep_early = 1;
@@ -175,12 +175,14 @@ int __init setup_early_printk(char *opt)
175 if (likely(early_console)) 175 if (likely(early_console))
176 register_console(early_console); 176 register_console(early_console);
177 177
178 return 1; 178 return 0;
179} 179}
180__setup("earlyprintk=", setup_early_printk); 180early_param("earlyprintk", setup_early_printk);
181 181
182void __init disable_early_printk(void) 182void __init disable_early_printk(void)
183{ 183{
184 if (!early_console_initialized || !early_console)
185 return;
184 if (!keep_early) { 186 if (!keep_early) {
185 printk("disabling early console\n"); 187 printk("disabling early console\n");
186 unregister_console(early_console); 188 unregister_console(early_console);
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 29136a35d7c7..fc279aeb73ab 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -79,18 +79,29 @@ debug_kernel_sw:
79 .align 2 79 .align 2
803: .long kgdb_handle_exception 803: .long kgdb_handle_exception
81#endif /* CONFIG_SH_KGDB */ 81#endif /* CONFIG_SH_KGDB */
82 82#ifdef CONFIG_SH_STANDARD_BIOS
83 bra debug_kernel_fw
84 nop
85#endif
83#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */ 86#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */
84 87
85
86 .align 2 88 .align 2
87debug_trap: 89debug_trap:
88#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) 90#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
91 mov r8, r0
92 shlr2 r0
93 cmp/eq #0x3f, r0 ! sh_bios() trap
94 bf 1f
95#ifdef CONFIG_SH_KGDB
96 cmp/eq #0xff, r0 ! XXX: KGDB trap, fix for SH-2.
97 bf 1f
98#endif
89 mov #OFF_SR, r0 99 mov #OFF_SR, r0
90 mov.l @(r0,r15), r0 ! get status register 100 mov.l @(r0,r15), r0 ! get status register
91 shll r0 101 shll r0
92 shll r0 ! kernel space? 102 shll r0 ! kernel space?
93 bt/s debug_kernel 103 bt/s debug_kernel
1041:
94#endif 105#endif
95 mov.l @r15, r0 ! Restore R0 value 106 mov.l @r15, r0 ! Restore R0 value
96 mov.l 1f, r8 107 mov.l 1f, r8
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
index 6aca4bc6ec5d..71a3ad7d283e 100644
--- a/arch/sh/kernel/head.S
+++ b/arch/sh/kernel/head.S
@@ -33,7 +33,8 @@ ENTRY(empty_zero_page)
33 .long 0x00360000 /* INITRD_START */ 33 .long 0x00360000 /* INITRD_START */
34 .long 0x000a0000 /* INITRD_SIZE */ 34 .long 0x000a0000 /* INITRD_SIZE */
35 .long 0 35 .long 0
36 .balign PAGE_SIZE,0,PAGE_SIZE 361:
37 .skip PAGE_SIZE - empty_zero_page - 1b
37 38
38 .text 39 .text
39/* 40/*
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index f3e2631be144..486c06e18033 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -470,9 +470,10 @@ unsigned long get_wchan(struct task_struct *p)
470 */ 470 */
471 pc = thread_saved_pc(p); 471 pc = thread_saved_pc(p);
472 if (in_sched_functions(pc)) { 472 if (in_sched_functions(pc)) {
473 schedule_frame = ((unsigned long *)(long)p->thread.sp)[1]; 473 schedule_frame = (unsigned long)p->thread.sp;
474 return (unsigned long)((unsigned long *)schedule_frame)[1]; 474 return ((unsigned long *)schedule_frame)[21];
475 } 475 }
476
476 return pc; 477 return pc;
477} 478}
478 479
@@ -498,6 +499,16 @@ asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5,
498{ 499{
499 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 500 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
500 501
502 /* Rewind */
501 regs->pc -= 2; 503 regs->pc -= 2;
504
505#ifdef CONFIG_BUG
506 if (__kernel_text_address(instruction_pointer(regs))) {
507 u16 insn = *(u16 *)instruction_pointer(regs);
508 if (insn == TRAPA_BUG_OPCODE)
509 handle_BUG(regs);
510 }
511#endif
512
502 force_sig(SIGTRAP, current); 513 force_sig(SIGTRAP, current);
503} 514}
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index f8dd6b7bfab0..225f9ea5cdd7 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -84,8 +84,7 @@ unsigned long memory_start, memory_end;
84 84
85static inline void parse_cmdline (char ** cmdline_p, char mv_name[MV_NAME_SIZE], 85static inline void parse_cmdline (char ** cmdline_p, char mv_name[MV_NAME_SIZE],
86 struct sh_machine_vector** mvp, 86 struct sh_machine_vector** mvp,
87 unsigned long *mv_io_base, 87 unsigned long *mv_io_base)
88 int *mv_mmio_enable)
89{ 88{
90 char c = ' ', *to = command_line, *from = COMMAND_LINE; 89 char c = ' ', *to = command_line, *from = COMMAND_LINE;
91 int len = 0; 90 int len = 0;
@@ -112,23 +111,6 @@ static inline void parse_cmdline (char ** cmdline_p, char mv_name[MV_NAME_SIZE],
112 } 111 }
113 } 112 }
114 113
115#ifdef CONFIG_EARLY_PRINTK
116 if (c == ' ' && !memcmp(from, "earlyprintk=", 12)) {
117 char *ep_end;
118
119 if (to != command_line)
120 to--;
121
122 from += 12;
123 ep_end = strchr(from, ' ');
124
125 setup_early_printk(from);
126 printk("early console enabled\n");
127
128 from = ep_end;
129 }
130#endif
131
132 if (c == ' ' && !memcmp(from, "sh_mv=", 6)) { 114 if (c == ' ' && !memcmp(from, "sh_mv=", 6)) {
133 char* mv_end; 115 char* mv_end;
134 char* mv_comma; 116 char* mv_comma;
@@ -145,7 +127,6 @@ static inline void parse_cmdline (char ** cmdline_p, char mv_name[MV_NAME_SIZE],
145 int ints[3]; 127 int ints[3];
146 get_options(mv_comma+1, ARRAY_SIZE(ints), ints); 128 get_options(mv_comma+1, ARRAY_SIZE(ints), ints);
147 *mv_io_base = ints[1]; 129 *mv_io_base = ints[1];
148 *mv_mmio_enable = ints[2];
149 mv_len = mv_comma - from; 130 mv_len = mv_comma - from;
150 } else { 131 } else {
151 mv_len = mv_end - from; 132 mv_len = mv_end - from;
@@ -158,6 +139,7 @@ static inline void parse_cmdline (char ** cmdline_p, char mv_name[MV_NAME_SIZE],
158 139
159 *mvp = get_mv_byname(mv_name); 140 *mvp = get_mv_byname(mv_name);
160 } 141 }
142
161 c = *(from++); 143 c = *(from++);
162 if (!c) 144 if (!c)
163 break; 145 break;
@@ -177,9 +159,8 @@ static int __init sh_mv_setup(char **cmdline_p)
177 struct sh_machine_vector *mv = NULL; 159 struct sh_machine_vector *mv = NULL;
178 char mv_name[MV_NAME_SIZE] = ""; 160 char mv_name[MV_NAME_SIZE] = "";
179 unsigned long mv_io_base = 0; 161 unsigned long mv_io_base = 0;
180 int mv_mmio_enable = 0;
181 162
182 parse_cmdline(cmdline_p, mv_name, &mv, &mv_io_base, &mv_mmio_enable); 163 parse_cmdline(cmdline_p, mv_name, &mv, &mv_io_base);
183 164
184#ifdef CONFIG_SH_UNKNOWN 165#ifdef CONFIG_SH_UNKNOWN
185 if (mv == NULL) { 166 if (mv == NULL) {
@@ -258,6 +239,7 @@ void __init setup_arch(char **cmdline_p)
258 239
259 sh_mv_setup(cmdline_p); 240 sh_mv_setup(cmdline_p);
260 241
242
261 /* 243 /*
262 * Find the highest page frame number we have available 244 * Find the highest page frame number we have available
263 */ 245 */
@@ -305,6 +287,7 @@ void __init setup_arch(char **cmdline_p)
305 PFN_PHYS(pages)); 287 PFN_PHYS(pages));
306 } 288 }
307 289
290
308 /* 291 /*
309 * Reserve the kernel text and 292 * Reserve the kernel text and
310 * Reserve the bootmem bitmap. We do this in two steps (first step 293 * Reserve the bootmem bitmap. We do this in two steps (first step
@@ -325,14 +308,18 @@ void __init setup_arch(char **cmdline_p)
325 ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0); 308 ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0);
326 if (&__rd_start != &__rd_end) { 309 if (&__rd_start != &__rd_end) {
327 LOADER_TYPE = 1; 310 LOADER_TYPE = 1;
328 INITRD_START = PHYSADDR((unsigned long)&__rd_start) - __MEMORY_START; 311 INITRD_START = PHYSADDR((unsigned long)&__rd_start) -
329 INITRD_SIZE = (unsigned long)&__rd_end - (unsigned long)&__rd_start; 312 __MEMORY_START;
313 INITRD_SIZE = (unsigned long)&__rd_end -
314 (unsigned long)&__rd_start;
330 } 315 }
331 316
332 if (LOADER_TYPE && INITRD_START) { 317 if (LOADER_TYPE && INITRD_START) {
333 if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { 318 if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
334 reserve_bootmem_node(NODE_DATA(0), INITRD_START+__MEMORY_START, INITRD_SIZE); 319 reserve_bootmem_node(NODE_DATA(0), INITRD_START +
335 initrd_start = INITRD_START + PAGE_OFFSET + __MEMORY_START; 320 __MEMORY_START, INITRD_SIZE);
321 initrd_start = INITRD_START + PAGE_OFFSET +
322 __MEMORY_START;
336 initrd_end = initrd_start + INITRD_SIZE; 323 initrd_end = initrd_start + INITRD_SIZE;
337 } else { 324 } else {
338 printk("initrd extends beyond end of memory " 325 printk("initrd extends beyond end of memory "
@@ -404,7 +391,7 @@ static const char *cpu_name[] = {
404 [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501", 391 [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501",
405 [CPU_SH7770] = "SH7770", [CPU_SH7780] = "SH7780", 392 [CPU_SH7770] = "SH7770", [CPU_SH7780] = "SH7780",
406 [CPU_SH7781] = "SH7781", [CPU_SH7343] = "SH7343", 393 [CPU_SH7781] = "SH7781", [CPU_SH7343] = "SH7343",
407 [CPU_SH7785] = "SH7785", 394 [CPU_SH7785] = "SH7785", [CPU_SH7722] = "SH7722",
408 [CPU_SH_NONE] = "Unknown" 395 [CPU_SH_NONE] = "Unknown"
409}; 396};
410 397
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
index ceee79143401..e6106239a0fe 100644
--- a/arch/sh/kernel/sh_ksyms.c
+++ b/arch/sh/kernel/sh_ksyms.c
@@ -70,13 +70,26 @@ DECLARE_EXPORT(__sdivsi3);
70DECLARE_EXPORT(__ashrdi3); 70DECLARE_EXPORT(__ashrdi3);
71DECLARE_EXPORT(__ashldi3); 71DECLARE_EXPORT(__ashldi3);
72DECLARE_EXPORT(__lshrdi3); 72DECLARE_EXPORT(__lshrdi3);
73DECLARE_EXPORT(__movstr);
74DECLARE_EXPORT(__movstrSI16); 73DECLARE_EXPORT(__movstrSI16);
74#if __GNUC__ == 4
75DECLARE_EXPORT(__movmem);
76#else
77DECLARE_EXPORT(__movstr);
78#endif
75 79
76#ifdef CONFIG_CPU_SH4 80#ifdef CONFIG_CPU_SH4
81#if __GNUC__ == 4
82DECLARE_EXPORT(__movmem_i4_even);
83DECLARE_EXPORT(__movmem_i4_odd);
84DECLARE_EXPORT(__movmemSI12_i4);
85DECLARE_EXPORT(__sdivsi3_i4i);
86DECLARE_EXPORT(__udiv_qrnnd_16);
87DECLARE_EXPORT(__udivsi3_i4i);
88#else /* GCC 3.x */
77DECLARE_EXPORT(__movstr_i4_even); 89DECLARE_EXPORT(__movstr_i4_even);
78DECLARE_EXPORT(__movstr_i4_odd); 90DECLARE_EXPORT(__movstr_i4_odd);
79DECLARE_EXPORT(__movstrSI12_i4); 91DECLARE_EXPORT(__movstrSI12_i4);
92#endif /* __GNUC__ == 4 */
80#endif 93#endif
81 94
82#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) 95#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index bb1c480a59c7..379c88bf5d9a 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -101,7 +101,7 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
101 */ 101 */
102 102
103#define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ 103#define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
104#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A) 104#if defined(CONFIG_CPU_SH2)
105#define TRAP_NOARG 0xc320 /* Syscall w/no args (NR in R3) */ 105#define TRAP_NOARG 0xc320 /* Syscall w/no args (NR in R3) */
106#else 106#else
107#define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) */ 107#define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) */
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 5083b6ed4b39..e18f183e1035 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -314,6 +314,12 @@ asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
314#endif 314#endif
315} 315}
316 316
317#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
318#define SYSCALL_ARG3 "trapa #0x23"
319#else
320#define SYSCALL_ARG3 "trapa #0x13"
321#endif
322
317/* 323/*
318 * Do a system call from kernel instead of calling sys_execve so we 324 * Do a system call from kernel instead of calling sys_execve so we
319 * end up with proper pt_regs. 325 * end up with proper pt_regs.
@@ -324,7 +330,7 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[])
324 register long __sc4 __asm__ ("r4") = (long) filename; 330 register long __sc4 __asm__ ("r4") = (long) filename;
325 register long __sc5 __asm__ ("r5") = (long) argv; 331 register long __sc5 __asm__ ("r5") = (long) argv;
326 register long __sc6 __asm__ ("r6") = (long) envp; 332 register long __sc6 __asm__ ("r6") = (long) envp;
327 __asm__ __volatile__ ("trapa #0x13" : "=z" (__sc0) 333 __asm__ __volatile__ (SYSCALL_ARG3 : "=z" (__sc0)
328 : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6) 334 : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6)
329 : "memory"); 335 : "memory");
330 return __sc0; 336 return __sc0;
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 3762d9dc2046..ec110157992d 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -19,6 +19,7 @@
19#include <linux/kallsyms.h> 19#include <linux/kallsyms.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/debug_locks.h> 21#include <linux/debug_locks.h>
22#include <linux/limits.h>
22#include <asm/system.h> 23#include <asm/system.h>
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24 25
@@ -129,6 +130,40 @@ static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
129 return -EFAULT; 130 return -EFAULT;
130} 131}
131 132
133#ifdef CONFIG_BUG
134#ifdef CONFIG_DEBUG_BUGVERBOSE
135static inline void do_bug_verbose(struct pt_regs *regs)
136{
137 struct bug_frame f;
138 long len;
139
140 if (__copy_from_user(&f, (const void __user *)regs->pc,
141 sizeof(struct bug_frame)))
142 return;
143
144 len = __strnlen_user(f.file, PATH_MAX) - 1;
145 if (unlikely(len < 0 || len >= PATH_MAX))
146 f.file = "<bad filename>";
147 len = __strnlen_user(f.func, PATH_MAX) - 1;
148 if (unlikely(len < 0 || len >= PATH_MAX))
149 f.func = "<bad function>";
150
151 printk(KERN_ALERT "kernel BUG in %s() at %s:%d!\n",
152 f.func, f.file, f.line);
153}
154#else
155static inline void do_bug_verbose(struct pt_regs *regs)
156{
157}
158#endif /* CONFIG_DEBUG_BUGVERBOSE */
159#endif /* CONFIG_BUG */
160
161void handle_BUG(struct pt_regs *regs)
162{
163 do_bug_verbose(regs);
164 die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
165}
166
132/* 167/*
133 * handle an instruction that does an unaligned memory access by emulating the 168 * handle an instruction that does an unaligned memory access by emulating the
134 * desired behaviour 169 * desired behaviour
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 77b4026d5688..f34bdcc33a7d 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -51,7 +51,7 @@ SECTIONS
51 } 51 }
52 52
53 . = ALIGN(PAGE_SIZE); 53 . = ALIGN(PAGE_SIZE);
54 .data.page_aligned : { *(.data.idt) } 54 .data.page_aligned : { *(.data.page_aligned) }
55 55
56 . = ALIGN(32); 56 . = ALIGN(32);
57 __per_cpu_start = .; 57 __per_cpu_start = .;
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 4e0362f50384..29f4ee35c6dc 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -35,6 +35,9 @@ config CPU_SUBTYPE_ST40
35 select CPU_SH4 35 select CPU_SH4
36 select CPU_HAS_INTC2_IRQ 36 select CPU_HAS_INTC2_IRQ
37 37
38config CPU_SHX2
39 bool
40
38# 41#
39# Processor subtypes 42# Processor subtypes
40# 43#
@@ -180,6 +183,7 @@ config CPU_SUBTYPE_SH7780
180config CPU_SUBTYPE_SH7785 183config CPU_SUBTYPE_SH7785
181 bool "Support SH7785 processor" 184 bool "Support SH7785 processor"
182 select CPU_SH4A 185 select CPU_SH4A
186 select CPU_SHX2
183 select CPU_HAS_INTC2_IRQ 187 select CPU_HAS_INTC2_IRQ
184 188
185comment "SH4AL-DSP Processor Support" 189comment "SH4AL-DSP Processor Support"
@@ -192,6 +196,12 @@ config CPU_SUBTYPE_SH7343
192 bool "Support SH7343 processor" 196 bool "Support SH7343 processor"
193 select CPU_SH4AL_DSP 197 select CPU_SH4AL_DSP
194 198
199config CPU_SUBTYPE_SH7722
200 bool "Support SH7722 processor"
201 select CPU_SH4AL_DSP
202 select CPU_SHX2
203 select CPU_HAS_IPR_IRQ
204
195endmenu 205endmenu
196 206
197menu "Memory management options" 207menu "Memory management options"
@@ -250,7 +260,7 @@ config 32BIT
250 260
251config X2TLB 261config X2TLB
252 bool "Enable extended TLB mode" 262 bool "Enable extended TLB mode"
253 depends on CPU_SUBTYPE_SH7785 && MMU && EXPERIMENTAL 263 depends on CPU_SHX2 && MMU && EXPERIMENTAL
254 help 264 help
255 Selecting this option will enable the extended mode of the SH-X2 265 Selecting this option will enable the extended mode of the SH-X2
256 TLB. For legacy SH-X behaviour and interoperability, say N. For 266 TLB. For legacy SH-X behaviour and interoperability, say N. For
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index ae531affccbd..c6955157c989 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -107,7 +107,7 @@ void __init p3_cache_init(void)
107 107
108 emit_cache_params(); 108 emit_cache_params();
109 109
110 if (remap_area_pages(P3SEG, 0, PAGE_SIZE * 4, _PAGE_CACHABLE)) 110 if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL))
111 panic("%s failed.", __FUNCTION__); 111 panic("%s failed.", __FUNCTION__);
112 112
113 for (i = 0; i < cpu_data->dcache.n_aliases; i++) 113 for (i = 0; i < cpu_data->dcache.n_aliases; i++)
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 59f4cc18235b..29bd37b1488e 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -77,6 +77,7 @@ void show_mem(void)
77 printk("%d pages swap cached\n",cached); 77 printk("%d pages swap cached\n",cached);
78} 78}
79 79
80#ifdef CONFIG_MMU
80static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) 81static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
81{ 82{
82 pgd_t *pgd; 83 pgd_t *pgd;
@@ -139,6 +140,7 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
139 140
140 set_pte_phys(address, phys, prot); 141 set_pte_phys(address, phys, prot);
141} 142}
143#endif /* CONFIG_MMU */
142 144
143/* References to section boundaries */ 145/* References to section boundaries */
144 146
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 54d51b404603..cbbc98846b00 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -317,9 +317,8 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp)
317 if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0) 317 if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0)
318 goto err_nopages; 318 goto err_nopages;
319 319
320 if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) 320 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
321 goto err_nomem; 321 goto err_nomem;
322 memset((char*)res, 0, sizeof(struct resource));
323 322
324 if (allocate_resource(&_sparc_dvma, res, len_total, 323 if (allocate_resource(&_sparc_dvma, res, len_total,
325 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { 324 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
@@ -589,12 +588,11 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
589 return NULL; 588 return NULL;
590 } 589 }
591 590
592 if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { 591 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
593 free_pages(va, order); 592 free_pages(va, order);
594 printk("pci_alloc_consistent: no core\n"); 593 printk("pci_alloc_consistent: no core\n");
595 return NULL; 594 return NULL;
596 } 595 }
597 memset((char*)res, 0, sizeof(struct resource));
598 596
599 if (allocate_resource(&_sparc_dvma, res, len_total, 597 if (allocate_resource(&_sparc_dvma, res, len_total,
600 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { 598 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
index c8cb211b9072..5b4841d067c1 100644
--- a/arch/sparc/kernel/irq.c
+++ b/arch/sparc/kernel/irq.c
@@ -425,7 +425,7 @@ int request_fast_irq(unsigned int irq,
425 } 425 }
426 426
427 if (action == NULL) 427 if (action == NULL)
428 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), 428 action = kmalloc(sizeof(struct irqaction),
429 GFP_ATOMIC); 429 GFP_ATOMIC);
430 430
431 if (!action) { 431 if (!action) {
@@ -528,7 +528,7 @@ int request_irq(unsigned int irq,
528 } 528 }
529 529
530 if (action == NULL) 530 if (action == NULL)
531 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), 531 action = kmalloc(sizeof(struct irqaction),
532 GFP_ATOMIC); 532 GFP_ATOMIC);
533 533
534 if (!action) { 534 if (!action) {
diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c
index 46200c43ffb1..dab6169e31ca 100644
--- a/arch/sparc/kernel/of_device.c
+++ b/arch/sparc/kernel/of_device.c
@@ -793,10 +793,9 @@ struct of_device* of_platform_device_create(struct device_node *np,
793{ 793{
794 struct of_device *dev; 794 struct of_device *dev;
795 795
796 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 796 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
797 if (!dev) 797 if (!dev)
798 return NULL; 798 return NULL;
799 memset(dev, 0, sizeof(*dev));
800 799
801 dev->dev.parent = parent; 800 dev->dev.parent = parent;
802 dev->dev.bus = bus; 801 dev->dev.bus = bus;
diff --git a/arch/sparc/kernel/ptrace.c b/arch/sparc/kernel/ptrace.c
index 1baf13ed5c3a..003f8eed32f4 100644
--- a/arch/sparc/kernel/ptrace.c
+++ b/arch/sparc/kernel/ptrace.c
@@ -289,7 +289,10 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
289 289
290 if (request == PTRACE_TRACEME) { 290 if (request == PTRACE_TRACEME) {
291 ret = ptrace_traceme(); 291 ret = ptrace_traceme();
292 pt_succ_return(regs, 0); 292 if (ret < 0)
293 pt_error_return(regs, -ret);
294 else
295 pt_succ_return(regs, 0);
293 goto out; 296 goto out;
294 } 297 }
295 298
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index d4f9da8170c5..0e27e226e0e2 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -327,7 +327,7 @@ int sun4d_request_irq(unsigned int irq,
327 } 327 }
328 328
329 if (action == NULL) 329 if (action == NULL)
330 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), 330 action = kmalloc(sizeof(struct irqaction),
331 GFP_ATOMIC); 331 GFP_ATOMIC);
332 332
333 if (!action) { 333 if (!action) {
@@ -545,8 +545,11 @@ void __init sun4d_init_sbi_irq(void)
545 nsbi = 0; 545 nsbi = 0;
546 for_each_sbus(sbus) 546 for_each_sbus(sbus)
547 nsbi++; 547 nsbi++;
548 sbus_actions = (struct sbus_action *)kmalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC); 548 sbus_actions = kzalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
549 memset (sbus_actions, 0, (nsbi * 8 * 4 * sizeof(struct sbus_action))); 549 if (!sbus_actions) {
550 prom_printf("SUN4D: Cannot allocate sbus_actions, halting.\n");
551 prom_halt();
552 }
550 for_each_sbus(sbus) { 553 for_each_sbus(sbus) {
551#ifdef CONFIG_SMP 554#ifdef CONFIG_SMP
552 extern unsigned char boot_cpu_id; 555 extern unsigned char boot_cpu_id;
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index 2bb1309003dd..4ccda77d08d6 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -22,6 +22,7 @@
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/tlbflush.h> 23#include <asm/tlbflush.h>
24#include <asm/dma.h> 24#include <asm/dma.h>
25#include <asm/oplib.h>
25 26
26/* #define IOUNIT_DEBUG */ 27/* #define IOUNIT_DEBUG */
27#ifdef IOUNIT_DEBUG 28#ifdef IOUNIT_DEBUG
@@ -41,9 +42,12 @@ iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus)
41 struct linux_prom_registers iommu_promregs[PROMREG_MAX]; 42 struct linux_prom_registers iommu_promregs[PROMREG_MAX];
42 struct resource r; 43 struct resource r;
43 44
44 iounit = kmalloc(sizeof(struct iounit_struct), GFP_ATOMIC); 45 iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
46 if (!iounit) {
47 prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
48 prom_halt();
49 }
45 50
46 memset(iounit, 0, sizeof(*iounit));
47 iounit->limit[0] = IOUNIT_BMAP1_START; 51 iounit->limit[0] = IOUNIT_BMAP1_START;
48 iounit->limit[1] = IOUNIT_BMAP2_START; 52 iounit->limit[1] = IOUNIT_BMAP2_START;
49 iounit->limit[2] = IOUNIT_BMAPM_START; 53 iounit->limit[2] = IOUNIT_BMAPM_START;
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index d391d11f245a..d41f66ac7fff 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -26,6 +26,14 @@ config MMU
26 bool 26 bool
27 default y 27 default y
28 28
29config STACKTRACE_SUPPORT
30 bool
31 default y
32
33config LOCKDEP_SUPPORT
34 bool
35 default y
36
29config TIME_INTERPOLATION 37config TIME_INTERPOLATION
30 bool 38 bool
31 default y 39 default y
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
index afe0a7720a26..1f130f3b6c24 100644
--- a/arch/sparc64/Kconfig.debug
+++ b/arch/sparc64/Kconfig.debug
@@ -1,5 +1,9 @@
1menu "Kernel hacking" 1menu "Kernel hacking"
2 2
3config TRACE_IRQFLAGS_SUPPORT
4 bool
5 default y
6
3source "lib/Kconfig.debug" 7source "lib/Kconfig.debug"
4 8
5config DEBUG_STACK_USAGE 9config DEBUG_STACK_USAGE
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 2f4612fa81f2..0f0d38f6197c 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,24 +1,29 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.19-rc2 3# Linux kernel version: 2.6.19
4# Tue Oct 17 19:29:20 2006 4# Sat Dec 9 15:41:30 2006
5# 5#
6CONFIG_SPARC=y 6CONFIG_SPARC=y
7CONFIG_SPARC64=y 7CONFIG_SPARC64=y
8CONFIG_64BIT=y 8CONFIG_64BIT=y
9CONFIG_MMU=y 9CONFIG_MMU=y
10CONFIG_STACKTRACE_SUPPORT=y
11CONFIG_LOCKDEP_SUPPORT=y
10CONFIG_TIME_INTERPOLATION=y 12CONFIG_TIME_INTERPOLATION=y
11CONFIG_ARCH_MAY_HAVE_PC_FDC=y 13CONFIG_ARCH_MAY_HAVE_PC_FDC=y
14# CONFIG_ARCH_HAS_ILOG2_U32 is not set
15# CONFIG_ARCH_HAS_ILOG2_U64 is not set
12CONFIG_AUDIT_ARCH=y 16CONFIG_AUDIT_ARCH=y
13CONFIG_SPARC64_PAGE_SIZE_8KB=y 17CONFIG_SPARC64_PAGE_SIZE_8KB=y
14# CONFIG_SPARC64_PAGE_SIZE_64KB is not set 18# CONFIG_SPARC64_PAGE_SIZE_64KB is not set
15# CONFIG_SPARC64_PAGE_SIZE_512KB is not set 19# CONFIG_SPARC64_PAGE_SIZE_512KB is not set
16# CONFIG_SPARC64_PAGE_SIZE_4MB is not set 20# CONFIG_SPARC64_PAGE_SIZE_4MB is not set
17CONFIG_SECCOMP=y 21CONFIG_SECCOMP=y
18# CONFIG_HZ_100 is not set 22CONFIG_HZ_100=y
19CONFIG_HZ_250=y 23# CONFIG_HZ_250 is not set
24# CONFIG_HZ_300 is not set
20# CONFIG_HZ_1000 is not set 25# CONFIG_HZ_1000 is not set
21CONFIG_HZ=250 26CONFIG_HZ=100
22CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 27CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
23 28
24# 29#
@@ -42,13 +47,14 @@ CONFIG_POSIX_MQUEUE=y
42# CONFIG_UTS_NS is not set 47# CONFIG_UTS_NS is not set
43# CONFIG_AUDIT is not set 48# CONFIG_AUDIT is not set
44# CONFIG_IKCONFIG is not set 49# CONFIG_IKCONFIG is not set
50CONFIG_SYSFS_DEPRECATED=y
45CONFIG_RELAY=y 51CONFIG_RELAY=y
46CONFIG_INITRAMFS_SOURCE="" 52CONFIG_INITRAMFS_SOURCE=""
47CONFIG_CC_OPTIMIZE_FOR_SIZE=y 53CONFIG_CC_OPTIMIZE_FOR_SIZE=y
48CONFIG_SYSCTL=y 54CONFIG_SYSCTL=y
49# CONFIG_EMBEDDED is not set 55# CONFIG_EMBEDDED is not set
50CONFIG_UID16=y 56CONFIG_UID16=y
51# CONFIG_SYSCTL_SYSCALL is not set 57CONFIG_SYSCTL_SYSCALL=y
52CONFIG_KALLSYMS=y 58CONFIG_KALLSYMS=y
53# CONFIG_KALLSYMS_ALL is not set 59# CONFIG_KALLSYMS_ALL is not set
54# CONFIG_KALLSYMS_EXTRA_PASS is not set 60# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -203,6 +209,7 @@ CONFIG_INET_TCP_DIAG=y
203# CONFIG_TCP_CONG_ADVANCED is not set 209# CONFIG_TCP_CONG_ADVANCED is not set
204CONFIG_TCP_CONG_CUBIC=y 210CONFIG_TCP_CONG_CUBIC=y
205CONFIG_DEFAULT_TCP_CONG="cubic" 211CONFIG_DEFAULT_TCP_CONG="cubic"
212# CONFIG_TCP_MD5SIG is not set
206CONFIG_IPV6=m 213CONFIG_IPV6=m
207CONFIG_IPV6_PRIVACY=y 214CONFIG_IPV6_PRIVACY=y
208CONFIG_IPV6_ROUTER_PREF=y 215CONFIG_IPV6_ROUTER_PREF=y
@@ -219,7 +226,6 @@ CONFIG_INET6_XFRM_MODE_BEET=m
219# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set 226# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
220CONFIG_IPV6_SIT=m 227CONFIG_IPV6_SIT=m
221CONFIG_IPV6_TUNNEL=m 228CONFIG_IPV6_TUNNEL=m
222# CONFIG_IPV6_SUBTREES is not set
223# CONFIG_IPV6_MULTIPLE_TABLES is not set 229# CONFIG_IPV6_MULTIPLE_TABLES is not set
224# CONFIG_NETWORK_SECMARK is not set 230# CONFIG_NETWORK_SECMARK is not set
225# CONFIG_NETFILTER is not set 231# CONFIG_NETFILTER is not set
@@ -238,6 +244,8 @@ CONFIG_IP_DCCP_CCID2=m
238# CONFIG_IP_DCCP_CCID2_DEBUG is not set 244# CONFIG_IP_DCCP_CCID2_DEBUG is not set
239CONFIG_IP_DCCP_CCID3=m 245CONFIG_IP_DCCP_CCID3=m
240CONFIG_IP_DCCP_TFRC_LIB=m 246CONFIG_IP_DCCP_TFRC_LIB=m
247# CONFIG_IP_DCCP_CCID3_DEBUG is not set
248CONFIG_IP_DCCP_CCID3_RTO=100
241 249
242# 250#
243# DCCP Kernel Hacking 251# DCCP Kernel Hacking
@@ -405,6 +413,7 @@ CONFIG_IDEDMA_AUTO=y
405# 413#
406CONFIG_RAID_ATTRS=m 414CONFIG_RAID_ATTRS=m
407CONFIG_SCSI=y 415CONFIG_SCSI=y
416# CONFIG_SCSI_TGT is not set
408CONFIG_SCSI_NETLINK=y 417CONFIG_SCSI_NETLINK=y
409CONFIG_SCSI_PROC_FS=y 418CONFIG_SCSI_PROC_FS=y
410 419
@@ -425,6 +434,7 @@ CONFIG_CHR_DEV_SG=m
425CONFIG_SCSI_MULTI_LUN=y 434CONFIG_SCSI_MULTI_LUN=y
426CONFIG_SCSI_CONSTANTS=y 435CONFIG_SCSI_CONSTANTS=y
427# CONFIG_SCSI_LOGGING is not set 436# CONFIG_SCSI_LOGGING is not set
437# CONFIG_SCSI_SCAN_ASYNC is not set
428 438
429# 439#
430# SCSI Transports 440# SCSI Transports
@@ -468,6 +478,7 @@ CONFIG_ISCSI_TCP=m
468# CONFIG_SCSI_DC390T is not set 478# CONFIG_SCSI_DC390T is not set
469# CONFIG_SCSI_DEBUG is not set 479# CONFIG_SCSI_DEBUG is not set
470# CONFIG_SCSI_SUNESP is not set 480# CONFIG_SCSI_SUNESP is not set
481# CONFIG_SCSI_SRP is not set
471 482
472# 483#
473# Serial ATA (prod) and Parallel ATA (experimental) drivers 484# Serial ATA (prod) and Parallel ATA (experimental) drivers
@@ -598,6 +609,7 @@ CONFIG_BNX2=m
598# CONFIG_IXGB is not set 609# CONFIG_IXGB is not set
599# CONFIG_S2IO is not set 610# CONFIG_S2IO is not set
600# CONFIG_MYRI10GE is not set 611# CONFIG_MYRI10GE is not set
612# CONFIG_NETXEN_NIC is not set
601 613
602# 614#
603# Token Ring devices 615# Token Ring devices
@@ -724,10 +736,6 @@ CONFIG_RTC=y
724# CONFIG_DTLK is not set 736# CONFIG_DTLK is not set
725# CONFIG_R3964 is not set 737# CONFIG_R3964 is not set
726# CONFIG_APPLICOM is not set 738# CONFIG_APPLICOM is not set
727
728#
729# Ftape, the floppy tape device driver
730#
731# CONFIG_DRM is not set 739# CONFIG_DRM is not set
732# CONFIG_RAW_DRIVER is not set 740# CONFIG_RAW_DRIVER is not set
733 741
@@ -1039,6 +1047,11 @@ CONFIG_SND_SUN_CS4231=m
1039# CONFIG_SOUND_PRIME is not set 1047# CONFIG_SOUND_PRIME is not set
1040 1048
1041# 1049#
1050# HID Devices
1051#
1052CONFIG_HID=y
1053
1054#
1042# USB support 1055# USB support
1043# 1056#
1044CONFIG_USB_ARCH_HAS_HCD=y 1057CONFIG_USB_ARCH_HAS_HCD=y
@@ -1053,6 +1066,7 @@ CONFIG_USB=y
1053CONFIG_USB_DEVICEFS=y 1066CONFIG_USB_DEVICEFS=y
1054# CONFIG_USB_BANDWIDTH is not set 1067# CONFIG_USB_BANDWIDTH is not set
1055# CONFIG_USB_DYNAMIC_MINORS is not set 1068# CONFIG_USB_DYNAMIC_MINORS is not set
1069# CONFIG_USB_MULTITHREAD_PROBE is not set
1056# CONFIG_USB_OTG is not set 1070# CONFIG_USB_OTG is not set
1057 1071
1058# 1072#
@@ -1089,8 +1103,7 @@ CONFIG_USB_UHCI_HCD=m
1089# USB Input Devices 1103# USB Input Devices
1090# 1104#
1091CONFIG_USB_HID=y 1105CONFIG_USB_HID=y
1092CONFIG_USB_HIDINPUT=y 1106# CONFIG_USB_HID_POWERBOOK is not set
1093# CONFIG_USB_HIDINPUT_POWERBOOK is not set
1094# CONFIG_HID_FF is not set 1107# CONFIG_HID_FF is not set
1095CONFIG_USB_HIDDEV=y 1108CONFIG_USB_HIDDEV=y
1096# CONFIG_USB_AIPTEK is not set 1109# CONFIG_USB_AIPTEK is not set
@@ -1119,6 +1132,7 @@ CONFIG_USB_HIDDEV=y
1119# CONFIG_USB_KAWETH is not set 1132# CONFIG_USB_KAWETH is not set
1120# CONFIG_USB_PEGASUS is not set 1133# CONFIG_USB_PEGASUS is not set
1121# CONFIG_USB_RTL8150 is not set 1134# CONFIG_USB_RTL8150 is not set
1135# CONFIG_USB_USBNET_MII is not set
1122# CONFIG_USB_USBNET is not set 1136# CONFIG_USB_USBNET is not set
1123# CONFIG_USB_MON is not set 1137# CONFIG_USB_MON is not set
1124 1138
@@ -1364,6 +1378,11 @@ CONFIG_NLS_DEFAULT="iso8859-1"
1364# CONFIG_NLS_UTF8 is not set 1378# CONFIG_NLS_UTF8 is not set
1365 1379
1366# 1380#
1381# Distributed Lock Manager
1382#
1383# CONFIG_DLM is not set
1384
1385#
1367# Instrumentation Support 1386# Instrumentation Support
1368# 1387#
1369CONFIG_PROFILING=y 1388CONFIG_PROFILING=y
@@ -1373,6 +1392,7 @@ CONFIG_KPROBES=y
1373# 1392#
1374# Kernel hacking 1393# Kernel hacking
1375# 1394#
1395CONFIG_TRACE_IRQFLAGS_SUPPORT=y
1376CONFIG_PRINTK_TIME=y 1396CONFIG_PRINTK_TIME=y
1377CONFIG_ENABLE_MUST_CHECK=y 1397CONFIG_ENABLE_MUST_CHECK=y
1378CONFIG_MAGIC_SYSRQ=y 1398CONFIG_MAGIC_SYSRQ=y
@@ -1387,6 +1407,8 @@ CONFIG_SCHEDSTATS=y
1387# CONFIG_DEBUG_SPINLOCK is not set 1407# CONFIG_DEBUG_SPINLOCK is not set
1388# CONFIG_DEBUG_MUTEXES is not set 1408# CONFIG_DEBUG_MUTEXES is not set
1389# CONFIG_DEBUG_RWSEMS is not set 1409# CONFIG_DEBUG_RWSEMS is not set
1410# CONFIG_DEBUG_LOCK_ALLOC is not set
1411# CONFIG_PROVE_LOCKING is not set
1390# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1412# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1391# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1413# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1392# CONFIG_DEBUG_KOBJECT is not set 1414# CONFIG_DEBUG_KOBJECT is not set
@@ -1420,8 +1442,9 @@ CONFIG_CRYPTO=y
1420CONFIG_CRYPTO_ALGAPI=y 1442CONFIG_CRYPTO_ALGAPI=y
1421CONFIG_CRYPTO_BLKCIPHER=y 1443CONFIG_CRYPTO_BLKCIPHER=y
1422CONFIG_CRYPTO_HASH=y 1444CONFIG_CRYPTO_HASH=y
1423CONFIG_CRYPTO_MANAGER=m 1445CONFIG_CRYPTO_MANAGER=y
1424CONFIG_CRYPTO_HMAC=y 1446CONFIG_CRYPTO_HMAC=y
1447CONFIG_CRYPTO_XCBC=y
1425CONFIG_CRYPTO_NULL=m 1448CONFIG_CRYPTO_NULL=m
1426CONFIG_CRYPTO_MD4=y 1449CONFIG_CRYPTO_MD4=y
1427CONFIG_CRYPTO_MD5=y 1450CONFIG_CRYPTO_MD5=y
@@ -1430,8 +1453,10 @@ CONFIG_CRYPTO_SHA256=m
1430CONFIG_CRYPTO_SHA512=m 1453CONFIG_CRYPTO_SHA512=m
1431CONFIG_CRYPTO_WP512=m 1454CONFIG_CRYPTO_WP512=m
1432CONFIG_CRYPTO_TGR192=m 1455CONFIG_CRYPTO_TGR192=m
1456CONFIG_CRYPTO_GF128MUL=m
1433CONFIG_CRYPTO_ECB=m 1457CONFIG_CRYPTO_ECB=m
1434CONFIG_CRYPTO_CBC=y 1458CONFIG_CRYPTO_CBC=y
1459CONFIG_CRYPTO_LRW=m
1435CONFIG_CRYPTO_DES=y 1460CONFIG_CRYPTO_DES=y
1436CONFIG_CRYPTO_BLOWFISH=m 1461CONFIG_CRYPTO_BLOWFISH=m
1437CONFIG_CRYPTO_TWOFISH=m 1462CONFIG_CRYPTO_TWOFISH=m
@@ -1456,6 +1481,7 @@ CONFIG_CRYPTO_TEST=m
1456# 1481#
1457# Library routines 1482# Library routines
1458# 1483#
1484CONFIG_BITREVERSE=y
1459CONFIG_CRC_CCITT=m 1485CONFIG_CRC_CCITT=m
1460CONFIG_CRC16=m 1486CONFIG_CRC16=m
1461CONFIG_CRC32=y 1487CONFIG_CRC32=y
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index e1eabebaed39..eff0c01d3579 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -14,6 +14,7 @@ obj-y := process.o setup.o cpu.o idprom.o \
14 power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \ 14 power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \
15 visemul.o prom.o of_device.o 15 visemul.o prom.o of_device.o
16 16
17obj-$(CONFIG_STACKTRACE) += stacktrace.o
17obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ 18obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \
18 pci_psycho.o pci_sabre.o pci_schizo.o \ 19 pci_psycho.o pci_sabre.o pci_schizo.o \
19 pci_sun4v.o pci_sun4v_asm.o 20 pci_sun4v.o pci_sun4v_asm.o
diff --git a/arch/sparc64/kernel/chmc.c b/arch/sparc64/kernel/chmc.c
index 259f37e516f5..9699abeb9907 100644
--- a/arch/sparc64/kernel/chmc.c
+++ b/arch/sparc64/kernel/chmc.c
@@ -341,7 +341,7 @@ static void fetch_decode_regs(struct mctrl_info *mp)
341 341
342static int init_one_mctrl(struct device_node *dp) 342static int init_one_mctrl(struct device_node *dp)
343{ 343{
344 struct mctrl_info *mp = kmalloc(sizeof(*mp), GFP_KERNEL); 344 struct mctrl_info *mp = kzalloc(sizeof(*mp), GFP_KERNEL);
345 int portid = of_getintprop_default(dp, "portid", -1); 345 int portid = of_getintprop_default(dp, "portid", -1);
346 struct linux_prom64_registers *regs; 346 struct linux_prom64_registers *regs;
347 void *pval; 347 void *pval;
@@ -349,7 +349,6 @@ static int init_one_mctrl(struct device_node *dp)
349 349
350 if (!mp) 350 if (!mp)
351 return -1; 351 return -1;
352 memset(mp, 0, sizeof(*mp));
353 if (portid == -1) 352 if (portid == -1)
354 goto fail; 353 goto fail;
355 354
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 6f28bec0a9bf..c15a3edcb826 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -597,7 +597,12 @@ __spitfire_cee_trap_continue:
5971: ba,pt %xcc, etrap_irq 5971: ba,pt %xcc, etrap_irq
598 rd %pc, %g7 598 rd %pc, %g7
599 599
6002: mov %l4, %o1 6002:
601#ifdef CONFIG_TRACE_IRQFLAGS
602 call trace_hardirqs_off
603 nop
604#endif
605 mov %l4, %o1
601 mov %l5, %o2 606 mov %l5, %o2
602 call spitfire_access_error 607 call spitfire_access_error
603 add %sp, PTREGS_OFF, %o0 608 add %sp, PTREGS_OFF, %o0
@@ -824,6 +829,10 @@ do_cheetah_plus_data_parity:
824 wrpr %g0, 15, %pil 829 wrpr %g0, 15, %pil
825 ba,pt %xcc, etrap_irq 830 ba,pt %xcc, etrap_irq
826 rd %pc, %g7 831 rd %pc, %g7
832#ifdef CONFIG_TRACE_IRQFLAGS
833 call trace_hardirqs_off
834 nop
835#endif
827 mov 0x0, %o0 836 mov 0x0, %o0
828 call cheetah_plus_parity_error 837 call cheetah_plus_parity_error
829 add %sp, PTREGS_OFF, %o1 838 add %sp, PTREGS_OFF, %o1
@@ -855,6 +864,10 @@ do_cheetah_plus_insn_parity:
855 wrpr %g0, 15, %pil 864 wrpr %g0, 15, %pil
856 ba,pt %xcc, etrap_irq 865 ba,pt %xcc, etrap_irq
857 rd %pc, %g7 866 rd %pc, %g7
867#ifdef CONFIG_TRACE_IRQFLAGS
868 call trace_hardirqs_off
869 nop
870#endif
858 mov 0x1, %o0 871 mov 0x1, %o0
859 call cheetah_plus_parity_error 872 call cheetah_plus_parity_error
860 add %sp, PTREGS_OFF, %o1 873 add %sp, PTREGS_OFF, %o1
@@ -1183,6 +1196,10 @@ c_fast_ecc:
1183 wrpr %g0, 15, %pil 1196 wrpr %g0, 15, %pil
1184 ba,pt %xcc, etrap_irq 1197 ba,pt %xcc, etrap_irq
1185 rd %pc, %g7 1198 rd %pc, %g7
1199#ifdef CONFIG_TRACE_IRQFLAGS
1200 call trace_hardirqs_off
1201 nop
1202#endif
1186 mov %l4, %o1 1203 mov %l4, %o1
1187 mov %l5, %o2 1204 mov %l5, %o2
1188 call cheetah_fecc_handler 1205 call cheetah_fecc_handler
@@ -1211,6 +1228,10 @@ c_cee:
1211 wrpr %g0, 15, %pil 1228 wrpr %g0, 15, %pil
1212 ba,pt %xcc, etrap_irq 1229 ba,pt %xcc, etrap_irq
1213 rd %pc, %g7 1230 rd %pc, %g7
1231#ifdef CONFIG_TRACE_IRQFLAGS
1232 call trace_hardirqs_off
1233 nop
1234#endif
1214 mov %l4, %o1 1235 mov %l4, %o1
1215 mov %l5, %o2 1236 mov %l5, %o2
1216 call cheetah_cee_handler 1237 call cheetah_cee_handler
@@ -1239,6 +1260,10 @@ c_deferred:
1239 wrpr %g0, 15, %pil 1260 wrpr %g0, 15, %pil
1240 ba,pt %xcc, etrap_irq 1261 ba,pt %xcc, etrap_irq
1241 rd %pc, %g7 1262 rd %pc, %g7
1263#ifdef CONFIG_TRACE_IRQFLAGS
1264 call trace_hardirqs_off
1265 nop
1266#endif
1242 mov %l4, %o1 1267 mov %l4, %o1
1243 mov %l5, %o2 1268 mov %l5, %o2
1244 call cheetah_deferred_handler 1269 call cheetah_deferred_handler
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index c8e9dc9d68a9..03ffaf895a22 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -489,6 +489,14 @@ tlb_fixup_done:
489 call __bzero 489 call __bzero
490 sub %o1, %o0, %o1 490 sub %o1, %o0, %o1
491 491
492#ifdef CONFIG_LOCKDEP
493 /* We have this call this super early, as even prom_init can grab
494 * spinlocks and thus call into the lockdep code.
495 */
496 call lockdep_init
497 nop
498#endif
499
492 mov %l6, %o1 ! OpenPROM stack 500 mov %l6, %o1 ! OpenPROM stack
493 call prom_init 501 call prom_init
494 mov %l7, %o0 ! OpenPROM cif handler 502 mov %l7, %o0 ! OpenPROM cif handler
diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c
index f028e68b23f2..ad1c4f55420f 100644
--- a/arch/sparc64/kernel/isa.c
+++ b/arch/sparc64/kernel/isa.c
@@ -72,14 +72,12 @@ static void __init isa_fill_children(struct sparc_isa_device *parent_isa_dev)
72 struct linux_prom_registers *regs; 72 struct linux_prom_registers *regs;
73 struct sparc_isa_device *isa_dev; 73 struct sparc_isa_device *isa_dev;
74 74
75 isa_dev = kmalloc(sizeof(*isa_dev), GFP_KERNEL); 75 isa_dev = kzalloc(sizeof(*isa_dev), GFP_KERNEL);
76 if (!isa_dev) { 76 if (!isa_dev) {
77 fatal_err("cannot allocate child isa_dev"); 77 fatal_err("cannot allocate child isa_dev");
78 prom_halt(); 78 prom_halt();
79 } 79 }
80 80
81 memset(isa_dev, 0, sizeof(*isa_dev));
82
83 /* Link it in to parent. */ 81 /* Link it in to parent. */
84 isa_dev->next = parent_isa_dev->child; 82 isa_dev->next = parent_isa_dev->child;
85 parent_isa_dev->child = isa_dev; 83 parent_isa_dev->child = isa_dev;
@@ -104,14 +102,12 @@ static void __init isa_fill_devices(struct sparc_isa_bridge *isa_br)
104 struct linux_prom_registers *regs; 102 struct linux_prom_registers *regs;
105 struct sparc_isa_device *isa_dev; 103 struct sparc_isa_device *isa_dev;
106 104
107 isa_dev = kmalloc(sizeof(*isa_dev), GFP_KERNEL); 105 isa_dev = kzalloc(sizeof(*isa_dev), GFP_KERNEL);
108 if (!isa_dev) { 106 if (!isa_dev) {
109 printk(KERN_DEBUG "ISA: cannot allocate isa_dev"); 107 printk(KERN_DEBUG "ISA: cannot allocate isa_dev");
110 return; 108 return;
111 } 109 }
112 110
113 memset(isa_dev, 0, sizeof(*isa_dev));
114
115 isa_dev->ofdev.node = dp; 111 isa_dev->ofdev.node = dp;
116 isa_dev->ofdev.dev.parent = &isa_br->ofdev.dev; 112 isa_dev->ofdev.dev.parent = &isa_br->ofdev.dev;
117 isa_dev->ofdev.dev.bus = &isa_bus_type; 113 isa_dev->ofdev.dev.bus = &isa_bus_type;
@@ -180,14 +176,12 @@ void __init isa_init(void)
180 pbm = pdev_cookie->pbm; 176 pbm = pdev_cookie->pbm;
181 dp = pdev_cookie->prom_node; 177 dp = pdev_cookie->prom_node;
182 178
183 isa_br = kmalloc(sizeof(*isa_br), GFP_KERNEL); 179 isa_br = kzalloc(sizeof(*isa_br), GFP_KERNEL);
184 if (!isa_br) { 180 if (!isa_br) {
185 printk(KERN_DEBUG "isa: cannot allocate sparc_isa_bridge"); 181 printk(KERN_DEBUG "isa: cannot allocate sparc_isa_bridge");
186 return; 182 return;
187 } 183 }
188 184
189 memset(isa_br, 0, sizeof(*isa_br));
190
191 isa_br->ofdev.node = dp; 185 isa_br->ofdev.node = dp;
192 isa_br->ofdev.dev.parent = &pdev->dev; 186 isa_br->ofdev.dev.parent = &pdev->dev;
193 isa_br->ofdev.dev.bus = &isa_bus_type; 187 isa_br->ofdev.dev.bus = &isa_bus_type;
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index 8e75ed762fd8..ae221f0d4a6f 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -45,7 +45,11 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
45int __kprobes arch_prepare_kprobe(struct kprobe *p) 45int __kprobes arch_prepare_kprobe(struct kprobe *p)
46{ 46{
47 p->ainsn.insn[0] = *p->addr; 47 p->ainsn.insn[0] = *p->addr;
48 flushi(&p->ainsn.insn[0]);
49
48 p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2; 50 p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
51 flushi(&p->ainsn.insn[1]);
52
49 p->opcode = *p->addr; 53 p->opcode = *p->addr;
50 return 0; 54 return 0;
51} 55}
@@ -185,16 +189,19 @@ no_kprobe:
185/* If INSN is a relative control transfer instruction, 189/* If INSN is a relative control transfer instruction,
186 * return the corrected branch destination value. 190 * return the corrected branch destination value.
187 * 191 *
188 * The original INSN location was REAL_PC, it actually 192 * regs->tpc and regs->tnpc still hold the values of the
189 * executed at PC and produced destination address NPC. 193 * program counters at the time of trap due to the execution
194 * of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1]
195 *
190 */ 196 */
191static unsigned long __kprobes relbranch_fixup(u32 insn, unsigned long real_pc, 197static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p,
192 unsigned long pc, 198 struct pt_regs *regs)
193 unsigned long npc)
194{ 199{
200 unsigned long real_pc = (unsigned long) p->addr;
201
195 /* Branch not taken, no mods necessary. */ 202 /* Branch not taken, no mods necessary. */
196 if (npc == pc + 0x4UL) 203 if (regs->tnpc == regs->tpc + 0x4UL)
197 return real_pc + 0x4UL; 204 return real_pc + 0x8UL;
198 205
199 /* The three cases are call, branch w/prediction, 206 /* The three cases are call, branch w/prediction,
200 * and traditional branch. 207 * and traditional branch.
@@ -202,14 +209,21 @@ static unsigned long __kprobes relbranch_fixup(u32 insn, unsigned long real_pc,
202 if ((insn & 0xc0000000) == 0x40000000 || 209 if ((insn & 0xc0000000) == 0x40000000 ||
203 (insn & 0xc1c00000) == 0x00400000 || 210 (insn & 0xc1c00000) == 0x00400000 ||
204 (insn & 0xc1c00000) == 0x00800000) { 211 (insn & 0xc1c00000) == 0x00800000) {
212 unsigned long ainsn_addr;
213
214 ainsn_addr = (unsigned long) &p->ainsn.insn[0];
215
205 /* The instruction did all the work for us 216 /* The instruction did all the work for us
206 * already, just apply the offset to the correct 217 * already, just apply the offset to the correct
207 * instruction location. 218 * instruction location.
208 */ 219 */
209 return (real_pc + (npc - pc)); 220 return (real_pc + (regs->tnpc - ainsn_addr));
210 } 221 }
211 222
212 return real_pc + 0x4UL; 223 /* It is jmpl or some other absolute PC modification instruction,
224 * leave NPC as-is.
225 */
226 return regs->tnpc;
213} 227}
214 228
215/* If INSN is an instruction which writes it's PC location 229/* If INSN is an instruction which writes it's PC location
@@ -220,12 +234,12 @@ static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
220{ 234{
221 unsigned long *slot = NULL; 235 unsigned long *slot = NULL;
222 236
223 /* Simplest cast is call, which always uses %o7 */ 237 /* Simplest case is 'call', which always uses %o7 */
224 if ((insn & 0xc0000000) == 0x40000000) { 238 if ((insn & 0xc0000000) == 0x40000000) {
225 slot = &regs->u_regs[UREG_I7]; 239 slot = &regs->u_regs[UREG_I7];
226 } 240 }
227 241
228 /* Jmpl encodes the register inside of the opcode */ 242 /* 'jmpl' encodes the register inside of the opcode */
229 if ((insn & 0xc1f80000) == 0x81c00000) { 243 if ((insn & 0xc1f80000) == 0x81c00000) {
230 unsigned long rd = ((insn >> 25) & 0x1f); 244 unsigned long rd = ((insn >> 25) & 0x1f);
231 245
@@ -247,11 +261,11 @@ static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
247 261
248/* 262/*
249 * Called after single-stepping. p->addr is the address of the 263 * Called after single-stepping. p->addr is the address of the
250 * instruction whose first byte has been replaced by the breakpoint 264 * instruction which has been replaced by the breakpoint
251 * instruction. To avoid the SMP problems that can occur when we 265 * instruction. To avoid the SMP problems that can occur when we
252 * temporarily put back the original opcode to single-step, we 266 * temporarily put back the original opcode to single-step, we
253 * single-stepped a copy of the instruction. The address of this 267 * single-stepped a copy of the instruction. The address of this
254 * copy is p->ainsn.insn. 268 * copy is &p->ainsn.insn[0].
255 * 269 *
256 * This function prepares to return from the post-single-step 270 * This function prepares to return from the post-single-step
257 * breakpoint trap. 271 * breakpoint trap.
@@ -261,11 +275,11 @@ static void __kprobes resume_execution(struct kprobe *p,
261{ 275{
262 u32 insn = p->ainsn.insn[0]; 276 u32 insn = p->ainsn.insn[0];
263 277
278 regs->tnpc = relbranch_fixup(insn, p, regs);
279
280 /* This assignment must occur after relbranch_fixup() */
264 regs->tpc = kcb->kprobe_orig_tnpc; 281 regs->tpc = kcb->kprobe_orig_tnpc;
265 regs->tnpc = relbranch_fixup(insn, 282
266 (unsigned long) p->addr,
267 (unsigned long) &p->ainsn.insn[0],
268 regs->tnpc);
269 retpc_fixup(regs, insn, (unsigned long) p->addr); 283 retpc_fixup(regs, insn, (unsigned long) p->addr);
270 284
271 regs->tstate = ((regs->tstate & ~TSTATE_PIL) | 285 regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
@@ -430,17 +444,8 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
430 struct jprobe *jp = container_of(p, struct jprobe, kp); 444 struct jprobe *jp = container_of(p, struct jprobe, kp);
431 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 445 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
432 446
433 kcb->jprobe_saved_regs_location = regs;
434 memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs)); 447 memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
435 448
436 /* Save a whole stack frame, this gets arguments
437 * pushed onto the stack after using up all the
438 * arg registers.
439 */
440 memcpy(&(kcb->jprobe_saved_stack),
441 (char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
442 sizeof(kcb->jprobe_saved_stack));
443
444 regs->tpc = (unsigned long) jp->entry; 449 regs->tpc = (unsigned long) jp->entry;
445 regs->tnpc = ((unsigned long) jp->entry) + 0x4UL; 450 regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
446 regs->tstate |= TSTATE_PIL; 451 regs->tstate |= TSTATE_PIL;
@@ -450,10 +455,19 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
450 455
451void __kprobes jprobe_return(void) 456void __kprobes jprobe_return(void)
452{ 457{
453 __asm__ __volatile__( 458 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
454 ".globl jprobe_return_trap_instruction\n" 459 register unsigned long orig_fp asm("g1");
460
461 orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP];
462 __asm__ __volatile__("\n"
463"1: cmp %%sp, %0\n\t"
464 "blu,a,pt %%xcc, 1b\n\t"
465 " restore\n\t"
466 ".globl jprobe_return_trap_instruction\n"
455"jprobe_return_trap_instruction:\n\t" 467"jprobe_return_trap_instruction:\n\t"
456 "ta 0x70"); 468 "ta 0x70"
469 : /* no outputs */
470 : "r" (orig_fp));
457} 471}
458 472
459extern void jprobe_return_trap_instruction(void); 473extern void jprobe_return_trap_instruction(void);
@@ -466,26 +480,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
466 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 480 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
467 481
468 if (addr == (u32 *) jprobe_return_trap_instruction) { 482 if (addr == (u32 *) jprobe_return_trap_instruction) {
469 if (kcb->jprobe_saved_regs_location != regs) {
470 printk("JPROBE: Current regs (%p) does not match "
471 "saved regs (%p).\n",
472 regs, kcb->jprobe_saved_regs_location);
473 printk("JPROBE: Saved registers\n");
474 __show_regs(kcb->jprobe_saved_regs_location);
475 printk("JPROBE: Current registers\n");
476 __show_regs(regs);
477 BUG();
478 }
479 /* Restore old register state. Do pt_regs
480 * first so that UREG_FP is the original one for
481 * the stack frame restore.
482 */
483 memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs)); 483 memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
484
485 memcpy((char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
486 &(kcb->jprobe_saved_stack),
487 sizeof(kcb->jprobe_saved_stack));
488
489 preempt_enable_no_resched(); 484 preempt_enable_no_resched();
490 return 1; 485 return 1;
491 } 486 }
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index 8cc14fc6b6f1..cec0eceae552 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -1007,10 +1007,9 @@ struct of_device* of_platform_device_create(struct device_node *np,
1007{ 1007{
1008 struct of_device *dev; 1008 struct of_device *dev;
1009 1009
1010 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 1010 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1011 if (!dev) 1011 if (!dev)
1012 return NULL; 1012 return NULL;
1013 memset(dev, 0, sizeof(*dev));
1014 1013
1015 dev->dev.parent = parent; 1014 dev->dev.parent = parent;
1016 dev->dev.bus = bus; 1015 dev->dev.bus = bus;
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 03ad4c06758e..6b04794b7a97 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -798,7 +798,7 @@ static struct pci_ops pci_sun4v_ops = {
798static void pbm_scan_bus(struct pci_controller_info *p, 798static void pbm_scan_bus(struct pci_controller_info *p,
799 struct pci_pbm_info *pbm) 799 struct pci_pbm_info *pbm)
800{ 800{
801 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); 801 struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
802 802
803 if (!cookie) { 803 if (!cookie) {
804 prom_printf("%s: Critical allocation failure.\n", pbm->name); 804 prom_printf("%s: Critical allocation failure.\n", pbm->name);
@@ -806,7 +806,6 @@ static void pbm_scan_bus(struct pci_controller_info *p,
806 } 806 }
807 807
808 /* All we care about is the PBM. */ 808 /* All we care about is the PBM. */
809 memset(cookie, 0, sizeof(*cookie));
810 cookie->pbm = pbm; 809 cookie->pbm = pbm;
811 810
812 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm); 811 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm);
@@ -1048,12 +1047,11 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
1048 /* Allocate and initialize the free area map. */ 1047 /* Allocate and initialize the free area map. */
1049 sz = num_tsb_entries / 8; 1048 sz = num_tsb_entries / 8;
1050 sz = (sz + 7UL) & ~7UL; 1049 sz = (sz + 7UL) & ~7UL;
1051 iommu->arena.map = kmalloc(sz, GFP_KERNEL); 1050 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
1052 if (!iommu->arena.map) { 1051 if (!iommu->arena.map) {
1053 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); 1052 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
1054 prom_halt(); 1053 prom_halt();
1055 } 1054 }
1056 memset(iommu->arena.map, 0, sz);
1057 iommu->arena.limit = num_tsb_entries; 1055 iommu->arena.limit = num_tsb_entries;
1058 1056
1059 sz = probe_existing_entries(pbm, iommu); 1057 sz = probe_existing_entries(pbm, iommu);
@@ -1164,24 +1162,20 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
1164 per_cpu(pci_iommu_batch, i).pglist = (u64 *) page; 1162 per_cpu(pci_iommu_batch, i).pglist = (u64 *) page;
1165 } 1163 }
1166 1164
1167 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); 1165 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1168 if (!p) 1166 if (!p)
1169 goto fatal_memory_error; 1167 goto fatal_memory_error;
1170 1168
1171 memset(p, 0, sizeof(*p)); 1169 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1172
1173 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1174 if (!iommu) 1170 if (!iommu)
1175 goto fatal_memory_error; 1171 goto fatal_memory_error;
1176 1172
1177 memset(iommu, 0, sizeof(*iommu));
1178 p->pbm_A.iommu = iommu; 1173 p->pbm_A.iommu = iommu;
1179 1174
1180 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); 1175 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1181 if (!iommu) 1176 if (!iommu)
1182 goto fatal_memory_error; 1177 goto fatal_memory_error;
1183 1178
1184 memset(iommu, 0, sizeof(*iommu));
1185 p->pbm_B.iommu = iommu; 1179 p->pbm_B.iommu = iommu;
1186 1180
1187 p->next = pci_controller_root; 1181 p->next = pci_controller_root;
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index d31975e6d6f6..81111a12f0a8 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -202,7 +202,10 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
202#endif 202#endif
203 if (request == PTRACE_TRACEME) { 203 if (request == PTRACE_TRACEME) {
204 ret = ptrace_traceme(); 204 ret = ptrace_traceme();
205 pt_succ_return(regs, 0); 205 if (ret < 0)
206 pt_error_return(regs, -ret);
207 else
208 pt_succ_return(regs, 0);
206 goto out; 209 goto out;
207 } 210 }
208 211
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index 3522cd66f3bb..079d18a11d24 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -165,14 +165,26 @@ rtrap:
165__handle_softirq_continue: 165__handle_softirq_continue:
166rtrap_xcall: 166rtrap_xcall:
167 sethi %hi(0xf << 20), %l4 167 sethi %hi(0xf << 20), %l4
168 andcc %l1, TSTATE_PRIV, %l3
169 and %l1, %l4, %l4 168 and %l1, %l4, %l4
169 andn %l1, %l4, %l1
170 srl %l4, 20, %l4
171#ifdef CONFIG_TRACE_IRQFLAGS
172 brnz,pn %l4, rtrap_no_irq_enable
173 nop
174 call trace_hardirqs_on
175 nop
176 wrpr %l4, %pil
177rtrap_no_irq_enable:
178#endif
179 andcc %l1, TSTATE_PRIV, %l3
170 bne,pn %icc, to_kernel 180 bne,pn %icc, to_kernel
171 andn %l1, %l4, %l1 181 nop
172 182
173 /* We must hold IRQs off and atomically test schedule+signal 183 /* We must hold IRQs off and atomically test schedule+signal
174 * state, then hold them off all the way back to userspace. 184 * state, then hold them off all the way back to userspace.
175 * If we are returning to kernel, none of this matters. 185 * If we are returning to kernel, none of this matters. Note
186 * that we are disabling interrupts via PSTATE_IE, not using
187 * %pil.
176 * 188 *
177 * If we do not do this, there is a window where we would do 189 * If we do not do this, there is a window where we would do
178 * the tests, later the signal/resched event arrives but we do 190 * the tests, later the signal/resched event arrives but we do
@@ -256,7 +268,6 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
256 268
257 ld [%sp + PTREGS_OFF + PT_V9_Y], %o3 269 ld [%sp + PTREGS_OFF + PT_V9_Y], %o3
258 wr %o3, %g0, %y 270 wr %o3, %g0, %y
259 srl %l4, 20, %l4
260 wrpr %l4, 0x0, %pil 271 wrpr %l4, 0x0, %pil
261 wrpr %g0, 0x1, %tl 272 wrpr %g0, 0x1, %tl
262 wrpr %l1, %g0, %tstate 273 wrpr %l1, %g0, %tstate
@@ -374,8 +385,8 @@ to_kernel:
374 ldx [%g6 + TI_FLAGS], %l5 385 ldx [%g6 + TI_FLAGS], %l5
375 andcc %l5, _TIF_NEED_RESCHED, %g0 386 andcc %l5, _TIF_NEED_RESCHED, %g0
376 be,pt %xcc, kern_fpucheck 387 be,pt %xcc, kern_fpucheck
377 srl %l4, 20, %l5 388 nop
378 cmp %l5, 0 389 cmp %l4, 0
379 bne,pn %xcc, kern_fpucheck 390 bne,pn %xcc, kern_fpucheck
380 sethi %hi(PREEMPT_ACTIVE), %l6 391 sethi %hi(PREEMPT_ACTIVE), %l6
381 stw %l6, [%g6 + TI_PRE_COUNT] 392 stw %l6, [%g6 + TI_PRE_COUNT]
diff --git a/arch/sparc64/kernel/stacktrace.c b/arch/sparc64/kernel/stacktrace.c
new file mode 100644
index 000000000000..c4d15f2762b9
--- /dev/null
+++ b/arch/sparc64/kernel/stacktrace.c
@@ -0,0 +1,41 @@
1#include <linux/sched.h>
2#include <linux/stacktrace.h>
3#include <linux/thread_info.h>
4#include <asm/ptrace.h>
5
6void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
7{
8 unsigned long ksp, fp, thread_base;
9 struct thread_info *tp;
10
11 if (!task)
12 task = current;
13 tp = task_thread_info(task);
14 if (task == current) {
15 flushw_all();
16 __asm__ __volatile__(
17 "mov %%fp, %0"
18 : "=r" (ksp)
19 );
20 } else
21 ksp = tp->ksp;
22
23 fp = ksp + STACK_BIAS;
24 thread_base = (unsigned long) tp;
25 do {
26 struct reg_window *rw;
27
28 /* Bogus frame pointer? */
29 if (fp < (thread_base + sizeof(struct thread_info)) ||
30 fp >= (thread_base + THREAD_SIZE))
31 break;
32
33 rw = (struct reg_window *) fp;
34 if (trace->skip > 0)
35 trace->skip--;
36 else
37 trace->entries[trace->nr_entries++] = rw->ins[7];
38
39 fp = rw->ins[6] + STACK_BIAS;
40 } while (trace->nr_entries < trace->max_entries);
41}
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S
index 49703c3c5769..405855dd886b 100644
--- a/arch/sparc64/kernel/sun4v_ivec.S
+++ b/arch/sparc64/kernel/sun4v_ivec.S
@@ -190,7 +190,10 @@ sun4v_res_mondo:
190 mov %g1, %g4 190 mov %g1, %g4
191 ba,pt %xcc, etrap_irq 191 ba,pt %xcc, etrap_irq
192 rd %pc, %g7 192 rd %pc, %g7
193 193#ifdef CONFIG_TRACE_IRQFLAGS
194 call trace_hardirqs_off
195 nop
196#endif
194 /* Log the event. */ 197 /* Log the event. */
195 add %sp, PTREGS_OFF, %o0 198 add %sp, PTREGS_OFF, %o0
196 call sun4v_resum_error 199 call sun4v_resum_error
@@ -216,7 +219,10 @@ sun4v_res_mondo_queue_full:
216 wrpr %g0, 15, %pil 219 wrpr %g0, 15, %pil
217 ba,pt %xcc, etrap_irq 220 ba,pt %xcc, etrap_irq
218 rd %pc, %g7 221 rd %pc, %g7
219 222#ifdef CONFIG_TRACE_IRQFLAGS
223 call trace_hardirqs_off
224 nop
225#endif
220 call sun4v_resum_overflow 226 call sun4v_resum_overflow
221 add %sp, PTREGS_OFF, %o0 227 add %sp, PTREGS_OFF, %o0
222 228
@@ -295,7 +301,10 @@ sun4v_nonres_mondo:
295 mov %g1, %g4 301 mov %g1, %g4
296 ba,pt %xcc, etrap_irq 302 ba,pt %xcc, etrap_irq
297 rd %pc, %g7 303 rd %pc, %g7
298 304#ifdef CONFIG_TRACE_IRQFLAGS
305 call trace_hardirqs_off
306 nop
307#endif
299 /* Log the event. */ 308 /* Log the event. */
300 add %sp, PTREGS_OFF, %o0 309 add %sp, PTREGS_OFF, %o0
301 call sun4v_nonresum_error 310 call sun4v_nonresum_error
@@ -321,7 +330,10 @@ sun4v_nonres_mondo_queue_full:
321 wrpr %g0, 15, %pil 330 wrpr %g0, 15, %pil
322 ba,pt %xcc, etrap_irq 331 ba,pt %xcc, etrap_irq
323 rd %pc, %g7 332 rd %pc, %g7
324 333#ifdef CONFIG_TRACE_IRQFLAGS
334 call trace_hardirqs_off
335 nop
336#endif
325 call sun4v_nonresum_overflow 337 call sun4v_nonresum_overflow
326 add %sp, PTREGS_OFF, %o0 338 add %sp, PTREGS_OFF, %o0
327 339
diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c
index 4446f66590fa..2ebc2c051383 100644
--- a/arch/sparc64/kernel/sys_sunos32.c
+++ b/arch/sparc64/kernel/sys_sunos32.c
@@ -1055,7 +1055,7 @@ asmlinkage int sunos_msgsys(int op, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
1055 break; 1055 break;
1056 case 2: 1056 case 2:
1057 rval = -EFAULT; 1057 rval = -EFAULT;
1058 kmbuf = (struct msgbuf *)kmalloc(sizeof(struct msgbuf) + arg3, 1058 kmbuf = kmalloc(sizeof(struct msgbuf) + arg3,
1059 GFP_KERNEL); 1059 GFP_KERNEL);
1060 if (!kmbuf) 1060 if (!kmbuf)
1061 break; 1061 break;
@@ -1078,7 +1078,7 @@ asmlinkage int sunos_msgsys(int op, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
1078 break; 1078 break;
1079 case 3: 1079 case 3:
1080 rval = -EFAULT; 1080 rval = -EFAULT;
1081 kmbuf = (struct msgbuf *)kmalloc(sizeof(struct msgbuf) + arg3, 1081 kmbuf = kmalloc(sizeof(struct msgbuf) + arg3,
1082 GFP_KERNEL); 1082 GFP_KERNEL);
1083 if (!kmbuf || sunos_msgbuf_get((struct msgbuf32 __user *)(unsigned long)arg2, 1083 if (!kmbuf || sunos_msgbuf_get((struct msgbuf32 __user *)(unsigned long)arg2,
1084 kmbuf, arg3)) 1084 kmbuf, arg3))
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index fe1796c939c3..ad67784292db 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -10,7 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/sched.h> /* for jiffies */ 13#include <linux/sched.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/signal.h> 16#include <linux/signal.h>
@@ -1873,6 +1873,16 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1873 1873
1874 put_cpu(); 1874 put_cpu();
1875 1875
1876 if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) {
1877 /* If err_type is 0x4, it's a powerdown request. Do
1878 * not do the usual resumable error log because that
1879 * makes it look like some abnormal error.
1880 */
1881 printk(KERN_INFO "Power down request...\n");
1882 kill_cad_pid(SIGINT, 1);
1883 return;
1884 }
1885
1876 sun4v_log_error(regs, &local_copy, cpu, 1886 sun4v_log_error(regs, &local_copy, cpu,
1877 KERN_ERR "RESUMABLE ERROR", 1887 KERN_ERR "RESUMABLE ERROR",
1878 &sun4v_resum_oflow_cnt); 1888 &sun4v_resum_oflow_cnt);
@@ -2261,8 +2271,12 @@ void die_if_kernel(char *str, struct pt_regs *regs)
2261 do_exit(SIGSEGV); 2271 do_exit(SIGSEGV);
2262} 2272}
2263 2273
2274#define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
2275#define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
2276
2264extern int handle_popc(u32 insn, struct pt_regs *regs); 2277extern int handle_popc(u32 insn, struct pt_regs *regs);
2265extern int handle_ldf_stq(u32 insn, struct pt_regs *regs); 2278extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2279extern int vis_emul(struct pt_regs *, unsigned int);
2266 2280
2267void do_illegal_instruction(struct pt_regs *regs) 2281void do_illegal_instruction(struct pt_regs *regs)
2268{ 2282{
@@ -2287,10 +2301,18 @@ void do_illegal_instruction(struct pt_regs *regs)
2287 if (handle_ldf_stq(insn, regs)) 2301 if (handle_ldf_stq(insn, regs))
2288 return; 2302 return;
2289 } else if (tlb_type == hypervisor) { 2303 } else if (tlb_type == hypervisor) {
2290 extern int vis_emul(struct pt_regs *, unsigned int); 2304 if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2305 if (!vis_emul(regs, insn))
2306 return;
2307 } else {
2308 struct fpustate *f = FPUSTATE;
2291 2309
2292 if (!vis_emul(regs, insn)) 2310 /* XXX maybe verify XFSR bits like
2293 return; 2311 * XXX do_fpother() does?
2312 */
2313 if (do_mathemu(regs, f))
2314 return;
2315 }
2294 } 2316 }
2295 } 2317 }
2296 info.si_signo = SIGILL; 2318 info.si_signo = SIGILL;
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
index a9b765271b85..bc18d480dd1c 100644
--- a/arch/sparc64/kernel/unaligned.c
+++ b/arch/sparc64/kernel/unaligned.c
@@ -243,7 +243,7 @@ static inline int ok_for_kernel(unsigned int insn)
243 return !floating_point_load_or_store_p(insn); 243 return !floating_point_load_or_store_p(insn);
244} 244}
245 245
246static void kernel_mna_trap_fault(void) 246static void kernel_mna_trap_fault(int fixup_tstate_asi)
247{ 247{
248 struct pt_regs *regs = current_thread_info()->kern_una_regs; 248 struct pt_regs *regs = current_thread_info()->kern_una_regs;
249 unsigned int insn = current_thread_info()->kern_una_insn; 249 unsigned int insn = current_thread_info()->kern_una_insn;
@@ -274,18 +274,15 @@ static void kernel_mna_trap_fault(void)
274 regs->tpc = entry->fixup; 274 regs->tpc = entry->fixup;
275 regs->tnpc = regs->tpc + 4; 275 regs->tnpc = regs->tpc + 4;
276 276
277 regs->tstate &= ~TSTATE_ASI; 277 if (fixup_tstate_asi) {
278 regs->tstate |= (ASI_AIUS << 24UL); 278 regs->tstate &= ~TSTATE_ASI;
279 regs->tstate |= (ASI_AIUS << 24UL);
280 }
279} 281}
280 282
281asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) 283static void log_unaligned(struct pt_regs *regs)
282{ 284{
283 static unsigned long count, last_time; 285 static unsigned long count, last_time;
284 enum direction dir = decode_direction(insn);
285 int size = decode_access_size(insn);
286
287 current_thread_info()->kern_una_regs = regs;
288 current_thread_info()->kern_una_insn = insn;
289 286
290 if (jiffies - last_time > 5 * HZ) 287 if (jiffies - last_time > 5 * HZ)
291 count = 0; 288 count = 0;
@@ -295,6 +292,28 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
295 printk("Kernel unaligned access at TPC[%lx] ", regs->tpc); 292 printk("Kernel unaligned access at TPC[%lx] ", regs->tpc);
296 print_symbol("%s\n", regs->tpc); 293 print_symbol("%s\n", regs->tpc);
297 } 294 }
295}
296
297asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
298{
299 enum direction dir = decode_direction(insn);
300 int size = decode_access_size(insn);
301 int orig_asi, asi;
302
303 current_thread_info()->kern_una_regs = regs;
304 current_thread_info()->kern_una_insn = insn;
305
306 orig_asi = asi = decode_asi(insn, regs);
307
308 /* If this is a {get,put}_user() on an unaligned userspace pointer,
309 * just signal a fault and do not log the event.
310 */
311 if (asi == ASI_AIUS) {
312 kernel_mna_trap_fault(0);
313 return;
314 }
315
316 log_unaligned(regs);
298 317
299 if (!ok_for_kernel(insn) || dir == both) { 318 if (!ok_for_kernel(insn) || dir == both) {
300 printk("Unsupported unaligned load/store trap for kernel " 319 printk("Unsupported unaligned load/store trap for kernel "
@@ -302,10 +321,10 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
302 unaligned_panic("Kernel does fpu/atomic " 321 unaligned_panic("Kernel does fpu/atomic "
303 "unaligned load/store.", regs); 322 "unaligned load/store.", regs);
304 323
305 kernel_mna_trap_fault(); 324 kernel_mna_trap_fault(0);
306 } else { 325 } else {
307 unsigned long addr, *reg_addr; 326 unsigned long addr, *reg_addr;
308 int orig_asi, asi, err; 327 int err;
309 328
310 addr = compute_effective_address(regs, insn, 329 addr = compute_effective_address(regs, insn,
311 ((insn >> 25) & 0x1f)); 330 ((insn >> 25) & 0x1f));
@@ -315,7 +334,6 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
315 regs->tpc, dirstrings[dir], addr, size, 334 regs->tpc, dirstrings[dir], addr, size,
316 regs->u_regs[UREG_RETPC]); 335 regs->u_regs[UREG_RETPC]);
317#endif 336#endif
318 orig_asi = asi = decode_asi(insn, regs);
319 switch (asi) { 337 switch (asi) {
320 case ASI_NL: 338 case ASI_NL:
321 case ASI_AIUPL: 339 case ASI_AIUPL:
@@ -365,7 +383,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
365 /* Not reached... */ 383 /* Not reached... */
366 } 384 }
367 if (unlikely(err)) 385 if (unlikely(err))
368 kernel_mna_trap_fault(); 386 kernel_mna_trap_fault(1);
369 else 387 else
370 advance(regs); 388 advance(regs);
371 } 389 }
diff --git a/arch/sparc64/kernel/visemul.c b/arch/sparc64/kernel/visemul.c
index 84fedaa38aae..c3fd64706b53 100644
--- a/arch/sparc64/kernel/visemul.c
+++ b/arch/sparc64/kernel/visemul.c
@@ -128,9 +128,6 @@
128/* 001001100 - Permute bytes as specified by GSR.MASK */ 128/* 001001100 - Permute bytes as specified by GSR.MASK */
129#define BSHUFFLE_OPF 0x04c 129#define BSHUFFLE_OPF 0x04c
130 130
131#define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
132#define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
133
134#define VIS_OPF_SHIFT 5 131#define VIS_OPF_SHIFT 5
135#define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT) 132#define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT)
136 133
@@ -810,9 +807,6 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
810 if (get_user(insn, (u32 __user *) pc)) 807 if (get_user(insn, (u32 __user *) pc))
811 return -EFAULT; 808 return -EFAULT;
812 809
813 if ((insn & VIS_OPCODE_MASK) != VIS_OPCODE_VAL)
814 return -EINVAL;
815
816 opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT; 810 opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT;
817 switch (opf) { 811 switch (opf) {
818 default: 812 default:
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index d70b60a3bbcc..737c26923c09 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -477,6 +477,10 @@ xcall_sync_tick:
477 sethi %hi(109f), %g7 477 sethi %hi(109f), %g7
478 b,pt %xcc, etrap_irq 478 b,pt %xcc, etrap_irq
479109: or %g7, %lo(109b), %g7 479109: or %g7, %lo(109b), %g7
480#ifdef CONFIG_TRACE_IRQFLAGS
481 call trace_hardirqs_off
482 nop
483#endif
480 call smp_synchronize_tick_client 484 call smp_synchronize_tick_client
481 nop 485 nop
482 clr %l6 486 clr %l6
@@ -508,6 +512,10 @@ xcall_report_regs:
508 sethi %hi(109f), %g7 512 sethi %hi(109f), %g7
509 b,pt %xcc, etrap_irq 513 b,pt %xcc, etrap_irq
510109: or %g7, %lo(109b), %g7 514109: or %g7, %lo(109b), %g7
515#ifdef CONFIG_TRACE_IRQFLAGS
516 call trace_hardirqs_off
517 nop
518#endif
511 call __show_regs 519 call __show_regs
512 add %sp, PTREGS_OFF, %o0 520 add %sp, PTREGS_OFF, %o0
513 clr %l6 521 clr %l6
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 286bc0b3207f..afe3d427ddfa 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -72,9 +72,11 @@ static int uml_net_rx(struct net_device *dev)
72 return pkt_len; 72 return pkt_len;
73} 73}
74 74
75static void uml_dev_close(void* dev) 75static void uml_dev_close(struct work_struct *work)
76{ 76{
77 dev_close( (struct net_device *) dev); 77 struct uml_net_private *lp =
78 container_of(work, struct uml_net_private, work);
79 dev_close(lp->dev);
78} 80}
79 81
80irqreturn_t uml_net_interrupt(int irq, void *dev_id) 82irqreturn_t uml_net_interrupt(int irq, void *dev_id)
@@ -89,7 +91,6 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id)
89 spin_lock(&lp->lock); 91 spin_lock(&lp->lock);
90 while((err = uml_net_rx(dev)) > 0) ; 92 while((err = uml_net_rx(dev)) > 0) ;
91 if(err < 0) { 93 if(err < 0) {
92 DECLARE_WORK(close_work, uml_dev_close, dev);
93 printk(KERN_ERR 94 printk(KERN_ERR
94 "Device '%s' read returned %d, shutting it down\n", 95 "Device '%s' read returned %d, shutting it down\n",
95 dev->name, err); 96 dev->name, err);
@@ -97,9 +98,10 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id)
97 * again lp->lock. 98 * again lp->lock.
98 * And dev_close() can be safely called multiple times on the 99 * And dev_close() can be safely called multiple times on the
99 * same device, since it tests for (dev->flags & IFF_UP). So 100 * same device, since it tests for (dev->flags & IFF_UP). So
100 * there's no harm in delaying the device shutdown. */ 101 * there's no harm in delaying the device shutdown.
101 schedule_work(&close_work); 102 * Furthermore, the workqueue will not re-enqueue an already
102#error this is not permitted - close_work will go out of scope 103 * enqueued work item. */
104 schedule_work(&lp->work);
103 goto out; 105 goto out;
104 } 106 }
105 reactivate_fd(lp->fd, UM_ETH_IRQ); 107 reactivate_fd(lp->fd, UM_ETH_IRQ);
@@ -334,13 +336,12 @@ static int eth_configure(int n, void *init, char *mac,
334 size = transport->private_size + sizeof(struct uml_net_private) + 336 size = transport->private_size + sizeof(struct uml_net_private) +
335 sizeof(((struct uml_net_private *) 0)->user); 337 sizeof(((struct uml_net_private *) 0)->user);
336 338
337 device = kmalloc(sizeof(*device), GFP_KERNEL); 339 device = kzalloc(sizeof(*device), GFP_KERNEL);
338 if (device == NULL) { 340 if (device == NULL) {
339 printk(KERN_ERR "eth_configure failed to allocate uml_net\n"); 341 printk(KERN_ERR "eth_configure failed to allocate uml_net\n");
340 return(1); 342 return(1);
341 } 343 }
342 344
343 memset(device, 0, sizeof(*device));
344 INIT_LIST_HEAD(&device->list); 345 INIT_LIST_HEAD(&device->list);
345 device->index = n; 346 device->index = n;
346 347
@@ -366,6 +367,7 @@ static int eth_configure(int n, void *init, char *mac,
366 /* This points to the transport private data. It's still clear, but we 367 /* This points to the transport private data. It's still clear, but we
367 * must memset it to 0 *now*. Let's help the drivers. */ 368 * must memset it to 0 *now*. Let's help the drivers. */
368 memset(lp, 0, size); 369 memset(lp, 0, size);
370 INIT_WORK(&lp->work, uml_dev_close);
369 371
370 /* sysfs register */ 372 /* sysfs register */
371 if (!driver_registered) { 373 if (!driver_registered) {
diff --git a/arch/um/include/net_kern.h b/arch/um/include/net_kern.h
index 280459fb0b26..218f8b47fdcd 100644
--- a/arch/um/include/net_kern.h
+++ b/arch/um/include/net_kern.h
@@ -11,6 +11,7 @@
11#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/socket.h> 12#include <linux/socket.h>
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/workqueue.h>
14 15
15struct uml_net { 16struct uml_net {
16 struct list_head list; 17 struct list_head list;
@@ -26,6 +27,7 @@ struct uml_net_private {
26 struct net_device *dev; 27 struct net_device *dev;
27 struct timer_list tl; 28 struct timer_list tl;
28 struct net_device_stats stats; 29 struct net_device_stats stats;
30 struct work_struct work;
29 int fd; 31 int fd;
30 unsigned char mac[ETH_ALEN]; 32 unsigned char mac[ETH_ALEN];
31 unsigned short (*protocol)(struct sk_buff *); 33 unsigned short (*protocol)(struct sk_buff *);
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c
index 49057d8bc668..5db7737df0ff 100644
--- a/arch/um/sys-i386/ldt.c
+++ b/arch/um/sys-i386/ldt.c
@@ -166,7 +166,7 @@ static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
166 struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) { 166 struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
167 .func = 0, 167 .func = 0,
168 .bytecount = bytecount, 168 .bytecount = bytecount,
169 .ptr = (void *)kmalloc(bytecount, GFP_KERNEL)}; 169 .ptr = kmalloc(bytecount, GFP_KERNEL)};
170 u32 cpu; 170 u32 cpu;
171 171
172 if(ptrace_ldt.ptr == NULL) 172 if(ptrace_ldt.ptr == NULL)
@@ -426,7 +426,7 @@ void ldt_get_host_info(void)
426 host_ldt_entries = dummy_list; 426 host_ldt_entries = dummy_list;
427 else { 427 else {
428 size = (size + 1) * sizeof(dummy_list[0]); 428 size = (size + 1) * sizeof(dummy_list[0]);
429 host_ldt_entries = (short *)kmalloc(size, GFP_KERNEL); 429 host_ldt_entries = kmalloc(size, GFP_KERNEL);
430 if(host_ldt_entries == NULL) { 430 if(host_ldt_entries == NULL) {
431 printk("ldt_get_host_info: couldn't allocate host ldt list\n"); 431 printk("ldt_get_host_info: couldn't allocate host ldt list\n");
432 goto out_free; 432 goto out_free;
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig
index bcf825875d17..f0d4d72e560f 100644
--- a/arch/v850/Kconfig
+++ b/arch/v850/Kconfig
@@ -105,17 +105,17 @@ menu "Processor type and features"
105 # currently support 105 # currently support
106 config V850E_MA1 106 config V850E_MA1
107 bool 107 bool
108 depends RTE_CB_MA1 108 depends on RTE_CB_MA1
109 default y 109 default y
110 # Similarly for the RTE-V850E/NB85E-CB - V850E/TEG 110 # Similarly for the RTE-V850E/NB85E-CB - V850E/TEG
111 config V850E_TEG 111 config V850E_TEG
112 bool 112 bool
113 depends RTE_CB_NB85E 113 depends on RTE_CB_NB85E
114 default y 114 default y
115 # ... and the RTE-V850E/ME2-CB - V850E/ME2 115 # ... and the RTE-V850E/ME2-CB - V850E/ME2
116 config V850E_ME2 116 config V850E_ME2
117 bool 117 bool
118 depends RTE_CB_ME2 118 depends on RTE_CB_ME2
119 default y 119 default y
120 120
121 121
@@ -123,7 +123,7 @@ menu "Processor type and features"
123 123
124 config V850E2_SIM85E2 124 config V850E2_SIM85E2
125 bool 125 bool
126 depends V850E2_SIM85E2C || V850E2_SIM85E2S 126 depends on V850E2_SIM85E2C || V850E2_SIM85E2S
127 default y 127 default y
128 128
129 129
@@ -132,7 +132,7 @@ menu "Processor type and features"
132 # V850E2 processors 132 # V850E2 processors
133 config V850E2 133 config V850E2
134 bool 134 bool
135 depends V850E2_SIM85E2 || V850E2_FPGA85E2C || V850E2_ANNA 135 depends on V850E2_SIM85E2 || V850E2_FPGA85E2C || V850E2_ANNA
136 default y 136 default y
137 137
138 138
@@ -141,7 +141,7 @@ menu "Processor type and features"
141 # Boards in the RTE-x-CB series 141 # Boards in the RTE-x-CB series
142 config RTE_CB 142 config RTE_CB
143 bool 143 bool
144 depends RTE_CB_MA1 || RTE_CB_NB85E || RTE_CB_ME2 144 depends on RTE_CB_MA1 || RTE_CB_NB85E || RTE_CB_ME2
145 default y 145 default y
146 146
147 config RTE_CB_MULTI 147 config RTE_CB_MULTI
@@ -149,28 +149,28 @@ menu "Processor type and features"
149 # RTE_CB_NB85E can either have multi ROM support or not, but 149 # RTE_CB_NB85E can either have multi ROM support or not, but
150 # other platforms (currently only RTE_CB_MA1) require it. 150 # other platforms (currently only RTE_CB_MA1) require it.
151 prompt "Multi monitor ROM support" if RTE_CB_NB85E 151 prompt "Multi monitor ROM support" if RTE_CB_NB85E
152 depends RTE_CB_MA1 || RTE_CB_NB85E 152 depends on RTE_CB_MA1 || RTE_CB_NB85E
153 default y 153 default y
154 154
155 config RTE_CB_MULTI_DBTRAP 155 config RTE_CB_MULTI_DBTRAP
156 bool "Pass illegal insn trap / dbtrap to kernel" 156 bool "Pass illegal insn trap / dbtrap to kernel"
157 depends RTE_CB_MULTI 157 depends on RTE_CB_MULTI
158 default n 158 default n
159 159
160 config RTE_CB_MA1_KSRAM 160 config RTE_CB_MA1_KSRAM
161 bool "Kernel in SRAM (limits size of kernel)" 161 bool "Kernel in SRAM (limits size of kernel)"
162 depends RTE_CB_MA1 && RTE_CB_MULTI 162 depends on RTE_CB_MA1 && RTE_CB_MULTI
163 default n 163 default n
164 164
165 config RTE_MB_A_PCI 165 config RTE_MB_A_PCI
166 bool "Mother-A PCI support" 166 bool "Mother-A PCI support"
167 depends RTE_CB 167 depends on RTE_CB
168 default y 168 default y
169 169
170 # The GBUS is used to talk to the RTE-MOTHER-A board 170 # The GBUS is used to talk to the RTE-MOTHER-A board
171 config RTE_GBUS_INT 171 config RTE_GBUS_INT
172 bool 172 bool
173 depends RTE_MB_A_PCI 173 depends on RTE_MB_A_PCI
174 default y 174 default y
175 175
176 # The only PCI bus we support is on the RTE-MOTHER-A board 176 # The only PCI bus we support is on the RTE-MOTHER-A board
@@ -209,7 +209,7 @@ menu "Processor type and features"
209 209
210 config ROM_KERNEL 210 config ROM_KERNEL
211 bool "Kernel in ROM" 211 bool "Kernel in ROM"
212 depends V850E2_ANNA || V850E_AS85EP1 || RTE_CB_ME2 212 depends on V850E2_ANNA || V850E_AS85EP1 || RTE_CB_ME2
213 213
214 # Some platforms pre-zero memory, in which case the kernel doesn't need to 214 # Some platforms pre-zero memory, in which case the kernel doesn't need to
215 config ZERO_BSS 215 config ZERO_BSS
@@ -225,10 +225,10 @@ menu "Processor type and features"
225 225
226 config V850E_HIGHRES_TIMER 226 config V850E_HIGHRES_TIMER
227 bool "High resolution timer support" 227 bool "High resolution timer support"
228 depends V850E_TIMER_D 228 depends on V850E_TIMER_D
229 config TIME_BOOTUP 229 config TIME_BOOTUP
230 bool "Time bootup" 230 bool "Time bootup"
231 depends V850E_HIGHRES_TIMER 231 depends on V850E_HIGHRES_TIMER
232 232
233 config RESET_GUARD 233 config RESET_GUARD
234 bool "Reset Guard" 234 bool "Reset Guard"
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 3ac581d17202..d4275537b25b 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -584,7 +584,7 @@ config SECCOMP
584 If unsure, say Y. Only embedded should say N here. 584 If unsure, say Y. Only embedded should say N here.
585 585
586config CC_STACKPROTECTOR 586config CC_STACKPROTECTOR
587 bool "Enable -fstack-protector buffer overflow detection (EXPRIMENTAL)" 587 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
588 depends on EXPERIMENTAL 588 depends on EXPERIMENTAL
589 help 589 help
590 This option turns on the -fstack-protector GCC feature. This 590 This option turns on the -fstack-protector GCC feature. This
diff --git a/arch/x86_64/kernel/cpufreq/Kconfig b/arch/x86_64/kernel/cpufreq/Kconfig
index 81f1562e5393..3abcfa3e1ed7 100644
--- a/arch/x86_64/kernel/cpufreq/Kconfig
+++ b/arch/x86_64/kernel/cpufreq/Kconfig
@@ -27,10 +27,13 @@ config X86_POWERNOW_K8_ACPI
27 default y 27 default y
28 28
29config X86_SPEEDSTEP_CENTRINO 29config X86_SPEEDSTEP_CENTRINO
30 tristate "Intel Enhanced SpeedStep" 30 tristate "Intel Enhanced SpeedStep (deprecated)"
31 select CPU_FREQ_TABLE 31 select CPU_FREQ_TABLE
32 depends on ACPI_PROCESSOR 32 depends on ACPI_PROCESSOR
33 help 33 help
34 This is deprecated and this functionality is now merged into
35 acpi_cpufreq (X86_ACPI_CPUFREQ). Use that driver instead of
36 speedstep_centrino.
34 This adds the CPUFreq driver for Enhanced SpeedStep enabled 37 This adds the CPUFreq driver for Enhanced SpeedStep enabled
35 mobile CPUs. This means Intel Pentium M (Centrino) CPUs 38 mobile CPUs. This means Intel Pentium M (Centrino) CPUs
36 or 64bit enabled Intel Xeons. 39 or 64bit enabled Intel Xeons.
@@ -50,6 +53,7 @@ config X86_ACPI_CPUFREQ
50 help 53 help
51 This driver adds a CPUFreq driver which utilizes the ACPI 54 This driver adds a CPUFreq driver which utilizes the ACPI
52 Processor Performance States. 55 Processor Performance States.
56 This driver also supports Intel Enhanced Speedstep.
53 57
54 For details, take a look at <file:Documentation/cpu-freq/>. 58 For details, take a look at <file:Documentation/cpu-freq/>.
55 59
diff --git a/arch/x86_64/kernel/cpufreq/Makefile b/arch/x86_64/kernel/cpufreq/Makefile
index d8b593879224..753ce1dd418e 100644
--- a/arch/x86_64/kernel/cpufreq/Makefile
+++ b/arch/x86_64/kernel/cpufreq/Makefile
@@ -5,8 +5,8 @@
5SRCDIR := ../../../i386/kernel/cpu/cpufreq 5SRCDIR := ../../../i386/kernel/cpu/cpufreq
6 6
7obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o 7obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
8obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
9obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o 8obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
9obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
10obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o 10obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
11obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o 11obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
12 12
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index 4a673f5397a0..2433d6fc68b1 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -225,8 +225,7 @@ out:
225 225
226static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen, 226static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
227 void __user *oldval, size_t __user *oldlenp, 227 void __user *oldval, size_t __user *oldlenp,
228 void __user *newval, size_t newlen, 228 void __user *newval, size_t newlen)
229 void **context)
230{ 229{
231 return -ENOSYS; 230 return -ENOSYS;
232} 231}
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 9eccfbd1b536..2e74cb0b7807 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -48,25 +48,10 @@ menu "Processor type and features"
48 48
49choice 49choice
50 prompt "Xtensa Processor Configuration" 50 prompt "Xtensa Processor Configuration"
51 default XTENSA_CPU_LINUX_BE 51 default XTENSA_VARIANT_FSF
52 52
53config XTENSA_CPU_LINUX_BE 53config XTENSA_VARIANT_FSF
54 bool "linux_be" 54 bool "fsf"
55 ---help---
56 The linux_be processor configuration is the baseline Xtensa
57 configurations included in this kernel and also used by
58 binutils, gcc, and gdb. It contains no TIE, no coprocessors,
59 and the following configuration options:
60
61 Code Density Option 2 Misc Special Registers
62 NSA/NSAU Instructions 128-bit Data Bus Width
63 Processor ID 8K, 2-way I and D Caches
64 Zero-Overhead Loops 2 Inst Address Break Registers
65 Big Endian 2 Data Address Break Registers
66 64 General-Purpose Registers JTAG Interface and Trace Port
67 17 Interrupts MMU w/ TLBs and Autorefill
68 3 Interrupt Levels 8 Autorefill Ways (I/D TLBs)
69 3 Timers Unaligned Exceptions
70endchoice 55endchoice
71 56
72config MMU 57config MMU
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 3a3a4c66ef87..95f836db38fa 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -11,13 +11,13 @@
11# this architecture 11# this architecture
12 12
13# Core configuration. 13# Core configuration.
14# (Use CPU=<xtensa_config> to use another default compiler.) 14# (Use VAR=<xtensa_config> to use another default compiler.)
15 15
16cpu-$(CONFIG_XTENSA_CPU_LINUX_BE) := linux_be 16variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf
17cpu-$(CONFIG_XTENSA_CPU_LINUX_CUSTOM) := linux_custom 17variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom
18 18
19CPU = $(cpu-y) 19VARIANT = $(variant-y)
20export CPU 20export VARIANT
21 21
22# Platform configuration 22# Platform configuration
23 23
@@ -27,8 +27,6 @@ platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
27PLATFORM = $(platform-y) 27PLATFORM = $(platform-y)
28export PLATFORM 28export PLATFORM
29 29
30CPPFLAGS += $(if $(KBUILD_SRC),-I$(srctree)/include/asm-xtensa/)
31CPPFLAGS += -Iinclude/asm
32CFLAGS += -pipe -mlongcalls 30CFLAGS += -pipe -mlongcalls
33 31
34KBUILD_DEFCONFIG := iss_defconfig 32KBUILD_DEFCONFIG := iss_defconfig
@@ -41,12 +39,12 @@ core-$(CONFIG_EMBEDDED_RAMDISK) += arch/xtensa/boot/ramdisk/
41 39
42# Test for cross compiling 40# Test for cross compiling
43 41
44ifneq ($(CPU),) 42ifneq ($(VARIANT),)
45 COMPILE_ARCH = $(shell uname -m) 43 COMPILE_ARCH = $(shell uname -m)
46 44
47 ifneq ($(COMPILE_ARCH), xtensa) 45 ifneq ($(COMPILE_ARCH), xtensa)
48 ifndef CROSS_COMPILE 46 ifndef CROSS_COMPILE
49 CROSS_COMPILE = xtensa_$(CPU)- 47 CROSS_COMPILE = xtensa_$(VARIANT)-
50 endif 48 endif
51 endif 49 endif
52endif 50endif
@@ -68,14 +66,13 @@ archinc := include/asm-xtensa
68 66
69archprepare: $(archinc)/.platform 67archprepare: $(archinc)/.platform
70 68
71# Update machine cpu and platform symlinks if something which affects 69# Update processor variant and platform symlinks if something which affects
72# them changed. 70# them changed.
73 71
74$(archinc)/.platform: $(wildcard include/config/arch/*.h) include/config/auto.conf 72$(archinc)/.platform: $(wildcard include/config/arch/*.h) include/config/auto.conf
75 @echo ' SYMLINK $(archinc)/xtensa/config -> $(archinc)/xtensa/config-$(CPU)' 73 @echo ' SYMLINK $(archinc)/variant -> $(archinc)/variant-$(VARIANT)'
76 $(Q)mkdir -p $(archinc) 74 $(Q)mkdir -p $(archinc)
77 $(Q)mkdir -p $(archinc)/xtensa 75 $(Q)ln -fsn $(srctree)/$(archinc)/variant-$(VARIANT) $(archinc)/variant
78 $(Q)ln -fsn $(srctree)/$(archinc)/xtensa/config-$(CPU) $(archinc)/xtensa/config
79 @echo ' SYMLINK $(archinc)/platform -> $(archinc)/platform-$(PLATFORM)' 76 @echo ' SYMLINK $(archinc)/platform -> $(archinc)/platform-$(PLATFORM)'
80 $(Q)ln -fsn $(srctree)/$(archinc)/platform-$(PLATFORM) $(archinc)/platform 77 $(Q)ln -fsn $(srctree)/$(archinc)/platform-$(PLATFORM) $(archinc)/platform
81 @touch $@ 78 @touch $@
@@ -89,7 +86,7 @@ zImage zImage.initrd: vmlinux
89 $(Q)$(MAKE) $(build)=$(boot) $@ 86 $(Q)$(MAKE) $(build)=$(boot) $@
90 87
91CLEAN_FILES += arch/xtensa/vmlinux.lds \ 88CLEAN_FILES += arch/xtensa/vmlinux.lds \
92 $(archinc)/platform $(archinc)/xtensa/config \ 89 $(archinc)/platform $(archinc)/variant \
93 $(archinc)/.platform 90 $(archinc)/.platform
94 91
95define archhelp 92define archhelp
diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S
index f857fc760aa8..464298bc348b 100644
--- a/arch/xtensa/boot/boot-elf/bootstrap.S
+++ b/arch/xtensa/boot/boot-elf/bootstrap.S
@@ -1,7 +1,4 @@
1 1
2#include <xtensa/config/specreg.h>
3#include <xtensa/config/core.h>
4
5#include <asm/bootparam.h> 2#include <asm/bootparam.h>
6 3
7 4
diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S
index ee636b0da81c..84848123e2a8 100644
--- a/arch/xtensa/boot/boot-redboot/bootstrap.S
+++ b/arch/xtensa/boot/boot-redboot/bootstrap.S
@@ -1,9 +1,7 @@
1 1#include <asm/variant/core.h>
2#define _ASMLANGUAGE 2#include <asm/regs.h>
3#include <xtensa/config/specreg.h> 3#include <asm/asmmacro.h>
4#include <xtensa/config/core.h> 4#include <asm/cacheasm.h>
5#include <xtensa/cacheasm.h>
6
7 /* 5 /*
8 * RB-Data: RedBoot data/bss 6 * RB-Data: RedBoot data/bss
9 * P: Boot-Parameters 7 * P: Boot-Parameters
@@ -77,8 +75,14 @@ _start:
77 /* Note: The assembler cannot relax "addi a0, a0, ..." to an 75 /* Note: The assembler cannot relax "addi a0, a0, ..." to an
78 l32r, so we load to a4 first. */ 76 l32r, so we load to a4 first. */
79 77
80 addi a4, a0, __start - __start_a0 78 # addi a4, a0, __start - __start_a0
81 mov a0, a4 79 # mov a0, a4
80
81 movi a4, __start
82 movi a5, __start_a0
83 add a4, a0, a4
84 sub a0, a4, a5
85
82 movi a4, __start 86 movi a4, __start
83 movi a5, __reloc_end 87 movi a5, __reloc_end
84 88
@@ -106,9 +110,13 @@ _start:
106 /* We have to flush and invalidate the caches here before we jump. */ 110 /* We have to flush and invalidate the caches here before we jump. */
107 111
108#if XCHAL_DCACHE_IS_WRITEBACK 112#if XCHAL_DCACHE_IS_WRITEBACK
109 dcache_writeback_all a5, a6 113
114 ___flush_dcache_all a5 a6
115
110#endif 116#endif
111 icache_invalidate_all a5, a6 117
118 ___invalidate_icache_all a5 a6
119 isync
112 120
113 movi a11, _reloc 121 movi a11, _reloc
114 jx a11 122 jx a11
@@ -209,9 +217,14 @@ _reloc:
209 /* jump to the kernel */ 217 /* jump to the kernel */
2102: 2182:
211#if XCHAL_DCACHE_IS_WRITEBACK 219#if XCHAL_DCACHE_IS_WRITEBACK
212 dcache_writeback_all a5, a6 220
221 ___flush_dcache_all a5 a6
222
213#endif 223#endif
214 icache_invalidate_all a5, a6 224
225 ___invalidate_icache_all a5 a6
226
227 isync
215 228
216 movi a5, __start 229 movi a5, __start
217 movi a3, boot_initrd_start 230 movi a3, boot_initrd_start
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index 802621dd4867..f19854035e61 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -53,11 +53,7 @@ CONFIG_CC_ALIGN_JUMPS=0
53# 53#
54# Processor type and features 54# Processor type and features
55# 55#
56CONFIG_XTENSA_ARCH_LINUX_BE=y 56CONFIG_XTENSA_VARIANT_FSF=y
57# CONFIG_XTENSA_ARCH_LINUX_LE is not set
58# CONFIG_XTENSA_ARCH_LINUX_TEST is not set
59# CONFIG_XTENSA_ARCH_S5 is not set
60# CONFIG_XTENSA_CUSTOM is not set
61CONFIG_MMU=y 57CONFIG_MMU=y
62# CONFIG_XTENSA_UNALIGNED_USER is not set 58# CONFIG_XTENSA_UNALIGNED_USER is not set
63# CONFIG_PREEMPT is not set 59# CONFIG_PREEMPT is not set
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index d573017a5dde..71f733c4f66d 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -6,7 +6,7 @@ extra-y := head.o vmlinux.lds
6 6
7 7
8obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \ 8obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \
9 setup.o signal.o syscalls.o time.o traps.o vectors.o platform.o \ 9 setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \
10 pci-dma.o 10 pci-dma.o
11 11
12## windowspill.o 12## windowspill.o
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
index a4956578a24d..33d6e9d2e83c 100644
--- a/arch/xtensa/kernel/align.S
+++ b/arch/xtensa/kernel/align.S
@@ -16,14 +16,9 @@
16 */ 16 */
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <asm/ptrace.h>
20#include <asm/ptrace.h>
21#include <asm/current.h> 19#include <asm/current.h>
22#include <asm/asm-offsets.h> 20#include <asm/asm-offsets.h>
23#include <asm/pgtable.h>
24#include <asm/processor.h> 21#include <asm/processor.h>
25#include <asm/page.h>
26#include <asm/thread_info.h>
27 22
28#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION 23#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
29 24
@@ -216,7 +211,7 @@ ENTRY(fast_unaligned)
216 211
217 extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble 212 extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble
218 213
219#if XCHAL_HAVE_NARROW 214#if XCHAL_HAVE_DENSITY
220 _beqi a5, OP0_L32I_N, .Lload # L32I.N, jump 215 _beqi a5, OP0_L32I_N, .Lload # L32I.N, jump
221 addi a6, a5, -OP0_S32I_N 216 addi a6, a5, -OP0_S32I_N
222 _beqz a6, .Lstore # S32I.N, do a store 217 _beqz a6, .Lstore # S32I.N, do a store
@@ -251,7 +246,7 @@ ENTRY(fast_unaligned)
251#endif 246#endif
252 __src_b a3, a5, a6 # a3 has the data word 247 __src_b a3, a5, a6 # a3 has the data word
253 248
254#if XCHAL_HAVE_NARROW 249#if XCHAL_HAVE_DENSITY
255 addi a7, a7, 2 # increment PC (assume 16-bit insn) 250 addi a7, a7, 2 # increment PC (assume 16-bit insn)
256 251
257 extui a5, a4, INSN_OP0, 4 252 extui a5, a4, INSN_OP0, 4
@@ -279,14 +274,14 @@ ENTRY(fast_unaligned)
279 274
2801: 2751:
281 276
282#if XCHAL_HAVE_LOOP 277#if XCHAL_HAVE_LOOPS
283 rsr a3, LEND # check if we reached LEND 278 rsr a5, LEND # check if we reached LEND
284 bne a7, a3, 1f 279 bne a7, a5, 1f
285 rsr a3, LCOUNT # and LCOUNT != 0 280 rsr a5, LCOUNT # and LCOUNT != 0
286 beqz a3, 1f 281 beqz a5, 1f
287 addi a3, a3, -1 # decrement LCOUNT and set 282 addi a5, a5, -1 # decrement LCOUNT and set
288 rsr a7, LBEG # set PC to LBEGIN 283 rsr a7, LBEG # set PC to LBEGIN
289 wsr a3, LCOUNT 284 wsr a5, LCOUNT
290#endif 285#endif
291 286
2921: wsr a7, EPC_1 # skip load instruction 2871: wsr a7, EPC_1 # skip load instruction
@@ -336,7 +331,7 @@ ENTRY(fast_unaligned)
336 331
337 movi a6, 0 # mask: ffffffff:00000000 332 movi a6, 0 # mask: ffffffff:00000000
338 333
339#if XCHAL_HAVE_NARROW 334#if XCHAL_HAVE_DENSITY
340 addi a7, a7, 2 # incr. PC,assume 16-bit instruction 335 addi a7, a7, 2 # incr. PC,assume 16-bit instruction
341 336
342 extui a5, a4, INSN_OP0, 4 # extract OP0 337 extui a5, a4, INSN_OP0, 4 # extract OP0
@@ -359,14 +354,14 @@ ENTRY(fast_unaligned)
359 /* Get memory address */ 354 /* Get memory address */
360 355
3611: 3561:
362#if XCHAL_HAVE_LOOP 357#if XCHAL_HAVE_LOOPS
363 rsr a3, LEND # check if we reached LEND 358 rsr a4, LEND # check if we reached LEND
364 bne a7, a3, 1f 359 bne a7, a4, 1f
365 rsr a3, LCOUNT # and LCOUNT != 0 360 rsr a4, LCOUNT # and LCOUNT != 0
366 beqz a3, 1f 361 beqz a4, 1f
367 addi a3, a3, -1 # decrement LCOUNT and set 362 addi a4, a4, -1 # decrement LCOUNT and set
368 rsr a7, LBEG # set PC to LBEGIN 363 rsr a7, LBEG # set PC to LBEGIN
369 wsr a3, LCOUNT 364 wsr a4, LCOUNT
370#endif 365#endif
371 366
3721: wsr a7, EPC_1 # skip store instruction 3671: wsr a7, EPC_1 # skip store instruction
@@ -416,6 +411,7 @@ ENTRY(fast_unaligned)
416 411
417 /* Restore working register */ 412 /* Restore working register */
418 413
414 l32i a8, a2, PT_AREG8
419 l32i a7, a2, PT_AREG7 415 l32i a7, a2, PT_AREG7
420 l32i a6, a2, PT_AREG6 416 l32i a6, a2, PT_AREG6
421 l32i a5, a2, PT_AREG5 417 l32i a5, a2, PT_AREG5
@@ -446,7 +442,7 @@ ENTRY(fast_unaligned)
446 mov a1, a2 442 mov a1, a2
447 443
448 rsr a0, PS 444 rsr a0, PS
449 bbsi.l a2, PS_UM_SHIFT, 1f # jump if user mode 445 bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
450 446
451 movi a0, _kernel_exception 447 movi a0, _kernel_exception
452 jx a0 448 jx a0
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index 7cd1d7f8f608..b256cfbef344 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -87,6 +87,11 @@ int main(void)
87 DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context)); 87 DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
88 BLANK(); 88 BLANK();
89 DEFINE(PT_SINGLESTEP_BIT, PT_SINGLESTEP_BIT); 89 DEFINE(PT_SINGLESTEP_BIT, PT_SINGLESTEP_BIT);
90
91 /* constants */
92 DEFINE(_CLONE_VM, CLONE_VM);
93 DEFINE(_CLONE_UNTRACED, CLONE_UNTRACED);
94
90 return 0; 95 return 0;
91} 96}
92 97
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index cf5a93fb6a2e..01bcb9fcfcbd 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -90,7 +90,6 @@ ENTRY(enable_coprocessor)
90 rsync 90 rsync
91 retw 91 retw
92 92
93#endif
94 93
95ENTRY(save_coprocessor_extra) 94ENTRY(save_coprocessor_extra)
96 entry sp, 16 95 entry sp, 16
@@ -197,4 +196,5 @@ _xtensa_reginfo_tables:
197 XCHAL_CP7_SA_CONTENTS_LIBDB 196 XCHAL_CP7_SA_CONTENTS_LIBDB
198 .word 0xFC000000 /* invalid register number,marks end of table*/ 197 .word 0xFC000000 /* invalid register number,marks end of table*/
199_xtensa_reginfo_table_end: 198_xtensa_reginfo_table_end:
199#endif
200 200
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 89e409e9e0de..9e271ba009bf 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -24,7 +24,7 @@
24#include <asm/pgtable.h> 24#include <asm/pgtable.h>
25#include <asm/page.h> 25#include <asm/page.h>
26#include <asm/signal.h> 26#include <asm/signal.h>
27#include <xtensa/coreasm.h> 27#include <asm/tlbflush.h>
28 28
29/* Unimplemented features. */ 29/* Unimplemented features. */
30 30
@@ -364,7 +364,7 @@ common_exception:
364 movi a2, 1 364 movi a2, 1
365 extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0] 365 extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0]
366 moveqz a3, a2, a0 # a3 = 1 iff interrupt exception 366 moveqz a3, a2, a0 # a3 = 1 iff interrupt exception
367 movi a2, PS_WOE_MASK 367 movi a2, 1 << PS_WOE_BIT
368 or a3, a3, a2 368 or a3, a3, a2
369 rsr a0, EXCCAUSE 369 rsr a0, EXCCAUSE
370 xsr a3, PS 370 xsr a3, PS
@@ -399,7 +399,7 @@ common_exception_return:
399 /* Jump if we are returning from kernel exceptions. */ 399 /* Jump if we are returning from kernel exceptions. */
400 400
4011: l32i a3, a1, PT_PS 4011: l32i a3, a1, PT_PS
402 _bbsi.l a3, PS_UM_SHIFT, 2f 402 _bbsi.l a3, PS_UM_BIT, 2f
403 j kernel_exception_exit 403 j kernel_exception_exit
404 404
405 /* Specific to a user exception exit: 405 /* Specific to a user exception exit:
@@ -422,7 +422,7 @@ common_exception_return:
422 * (Hint: There is only one user exception frame on stack) 422 * (Hint: There is only one user exception frame on stack)
423 */ 423 */
424 424
425 movi a3, PS_WOE_MASK 425 movi a3, 1 << PS_WOE_BIT
426 426
427 _bbsi.l a4, TIF_NEED_RESCHED, 3f 427 _bbsi.l a4, TIF_NEED_RESCHED, 3f
428 _bbci.l a4, TIF_SIGPENDING, 4f 428 _bbci.l a4, TIF_SIGPENDING, 4f
@@ -694,7 +694,7 @@ common_exception_exit:
694ENTRY(debug_exception) 694ENTRY(debug_exception)
695 695
696 rsr a0, EPS + XCHAL_DEBUGLEVEL 696 rsr a0, EPS + XCHAL_DEBUGLEVEL
697 bbsi.l a0, PS_EXCM_SHIFT, 1f # exception mode 697 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
698 698
699 /* Set EPC_1 and EXCCAUSE */ 699 /* Set EPC_1 and EXCCAUSE */
700 700
@@ -707,7 +707,7 @@ ENTRY(debug_exception)
707 707
708 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ 708 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
709 709
710 movi a2, 1 << PS_EXCM_SHIFT 710 movi a2, 1 << PS_EXCM_BIT
711 or a2, a0, a2 711 or a2, a0, a2
712 movi a0, debug_exception # restore a3, debug jump vector 712 movi a0, debug_exception # restore a3, debug jump vector
713 wsr a2, PS 713 wsr a2, PS
@@ -715,7 +715,7 @@ ENTRY(debug_exception)
715 715
716 /* Switch to kernel/user stack, restore jump vector, and save a0 */ 716 /* Switch to kernel/user stack, restore jump vector, and save a0 */
717 717
718 bbsi.l a2, PS_UM_SHIFT, 2f # jump if user mode 718 bbsi.l a2, PS_UM_BIT, 2f # jump if user mode
719 719
720 addi a2, a1, -16-PT_SIZE # assume kernel stack 720 addi a2, a1, -16-PT_SIZE # assume kernel stack
721 s32i a0, a2, PT_AREG0 721 s32i a0, a2, PT_AREG0
@@ -778,7 +778,7 @@ ENTRY(unrecoverable_exception)
778 wsr a1, WINDOWBASE 778 wsr a1, WINDOWBASE
779 rsync 779 rsync
780 780
781 movi a1, PS_WOE_MASK | 1 781 movi a1, (1 << PS_WOE_BIT) | 1
782 wsr a1, PS 782 wsr a1, PS
783 rsync 783 rsync
784 784
@@ -1004,13 +1004,10 @@ ENTRY(fast_syscall_kernel)
1004 1004
1005 rsr a0, DEPC # get syscall-nr 1005 rsr a0, DEPC # get syscall-nr
1006 _beqz a0, fast_syscall_spill_registers 1006 _beqz a0, fast_syscall_spill_registers
1007 1007 _beqi a0, __NR_xtensa, fast_syscall_xtensa
1008 addi a0, a0, -__NR_sysxtensa
1009 _beqz a0, fast_syscall_sysxtensa
1010 1008
1011 j kernel_exception 1009 j kernel_exception
1012 1010
1013
1014ENTRY(fast_syscall_user) 1011ENTRY(fast_syscall_user)
1015 1012
1016 /* Skip syscall. */ 1013 /* Skip syscall. */
@@ -1024,9 +1021,7 @@ ENTRY(fast_syscall_user)
1024 1021
1025 rsr a0, DEPC # get syscall-nr 1022 rsr a0, DEPC # get syscall-nr
1026 _beqz a0, fast_syscall_spill_registers 1023 _beqz a0, fast_syscall_spill_registers
1027 1024 _beqi a0, __NR_xtensa, fast_syscall_xtensa
1028 addi a0, a0, -__NR_sysxtensa
1029 _beqz a0, fast_syscall_sysxtensa
1030 1025
1031 j user_exception 1026 j user_exception
1032 1027
@@ -1047,18 +1042,19 @@ ENTRY(fast_syscall_unrecoverable)
1047/* 1042/*
1048 * sysxtensa syscall handler 1043 * sysxtensa syscall handler
1049 * 1044 *
1050 * int sysxtensa (XTENSA_ATOMIC_SET, ptr, val, unused); 1045 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
1051 * int sysxtensa (XTENSA_ATOMIC_ADD, ptr, val, unused); 1046 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
1052 * int sysxtensa (XTENSA_ATOMIC_EXG_ADD, ptr, val, unused); 1047 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
1053 * int sysxtensa (XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval); 1048 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
1054 * a2 a6 a3 a4 a5 1049 * a2 a6 a3 a4 a5
1055 * 1050 *
1056 * Entry condition: 1051 * Entry condition:
1057 * 1052 *
1058 * a0: trashed, original value saved on stack (PT_AREG0) 1053 * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0)
1059 * a1: a1 1054 * a1: a1
1060 * a2: new stack pointer, original in DEPC 1055 * a2: new stack pointer, original in a0 and DEPC
1061 * a3: dispatch table 1056 * a3: dispatch table, original in excsave_1
1057 * a4..a15: unchanged
1062 * depc: a2, original value saved on stack (PT_DEPC) 1058 * depc: a2, original value saved on stack (PT_DEPC)
1063 * excsave_1: a3 1059 * excsave_1: a3
1064 * 1060 *
@@ -1091,59 +1087,62 @@ ENTRY(fast_syscall_unrecoverable)
1091#define CATCH \ 1087#define CATCH \
109267: 108867:
1093 1089
1094ENTRY(fast_syscall_sysxtensa) 1090ENTRY(fast_syscall_xtensa)
1095
1096 _beqz a6, 1f
1097 _blti a6, SYSXTENSA_COUNT, 2f
1098 1091
10991: j user_exception 1092 xsr a3, EXCSAVE_1 # restore a3, excsave1
1100
11012: xsr a3, EXCSAVE_1 # restore a3, excsave1
1102 s32i a7, a2, PT_AREG7
1103 1093
1094 s32i a7, a2, PT_AREG7 # we need an additional register
1104 movi a7, 4 # sizeof(unsigned int) 1095 movi a7, 4 # sizeof(unsigned int)
1105 access_ok a0, a3, a7, a2, .Leac 1096 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
1106 1097
1107 _beqi a6, SYSXTENSA_ATOMIC_SET, .Lset 1098 addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1
1108 _beqi a6, SYSXTENSA_ATOMIC_EXG_ADD, .Lexg 1099 _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill
1109 _beqi a6, SYSXTENSA_ATOMIC_ADD, .Ladd 1100 _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
1110 1101
1111 /* Fall through for SYSXTENSA_ATOMIC_CMP_SWP */ 1102 /* Fall through for ATOMIC_CMP_SWP. */
1112 1103
1113.Lswp: /* Atomic compare and swap */ 1104.Lswp: /* Atomic compare and swap */
1114 1105
1115TRY l32i a7, a3, 0 # read old value 1106TRY l32i a0, a3, 0 # read old value
1116 bne a7, a4, 1f # same as old value? jump 1107 bne a0, a4, 1f # same as old value? jump
1117 s32i a5, a3, 0 # different, modify value 1108TRY s32i a5, a3, 0 # different, modify value
1118 movi a7, 1 # and return 1 1109 l32i a7, a2, PT_AREG7 # restore a7
1119 j .Lret 1110 l32i a0, a2, PT_AREG0 # restore a0
1120 1111 movi a2, 1 # and return 1
11211: movi a7, 0 # same values: return 0 1112 addi a6, a6, 1 # restore a6 (really necessary?)
1122 j .Lret 1113 rfe
1123
1124.Ladd: /* Atomic add */
1125.Lexg: /* Atomic (exchange) add */
1126 1114
1127TRY l32i a7, a3, 0 11151: l32i a7, a2, PT_AREG7 # restore a7
1128 add a4, a4, a7 1116 l32i a0, a2, PT_AREG0 # restore a0
1129 s32i a4, a3, 0 1117 movi a2, 0 # return 0 (note that we cannot set
1130 j .Lret 1118 addi a6, a6, 1 # restore a6 (really necessary?)
1119 rfe
1131 1120
1132.Lset: /* Atomic set */ 1121.Lnswp: /* Atomic set, add, and exg_add. */
1133 1122
1134TRY l32i a7, a3, 0 # read old value as return value 1123TRY l32i a7, a3, 0 # orig
1135 s32i a4, a3, 0 # write new value 1124 add a0, a4, a7 # + arg
1125 moveqz a0, a4, a6 # set
1126TRY s32i a0, a3, 0 # write new value
1136 1127
1137.Lret: mov a0, a2 1128 mov a0, a2
1138 mov a2, a7 1129 mov a2, a7
1139 l32i a7, a0, PT_AREG7 1130 l32i a7, a0, PT_AREG7 # restore a7
1140 l32i a3, a0, PT_AREG3 1131 l32i a0, a0, PT_AREG0 # restore a0
1141 l32i a0, a0, PT_AREG0 1132 addi a6, a6, 1 # restore a6 (really necessary?)
1142 rfe 1133 rfe
1143 1134
1144CATCH 1135CATCH
1145.Leac: movi a7, -EFAULT 1136.Leac: l32i a7, a2, PT_AREG7 # restore a7
1146 j .Lret 1137 l32i a0, a2, PT_AREG0 # restore a0
1138 movi a2, -EFAULT
1139 rfe
1140
1141.Lill: l32i a7, a2, PT_AREG0 # restore a7
1142 l32i a0, a2, PT_AREG0 # restore a0
1143 movi a2, -EINVAL
1144 rfe
1145
1147 1146
1148 1147
1149 1148
@@ -1491,7 +1490,7 @@ ENTRY(_spill_registers)
1491 */ 1490 */
1492 1491
1493 rsr a0, PS 1492 rsr a0, PS
1494 _bbci.l a0, PS_UM_SHIFT, 1f 1493 _bbci.l a0, PS_UM_BIT, 1f
1495 1494
1496 /* User space: Setup a dummy frame and kill application. 1495 /* User space: Setup a dummy frame and kill application.
1497 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. 1496 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
@@ -1510,7 +1509,7 @@ ENTRY(_spill_registers)
1510 l32i a1, a3, EXC_TABLE_KSTK 1509 l32i a1, a3, EXC_TABLE_KSTK
1511 wsr a3, EXCSAVE_1 1510 wsr a3, EXCSAVE_1
1512 1511
1513 movi a4, PS_WOE_MASK | 1 1512 movi a4, (1 << PS_WOE_BIT) | 1
1514 wsr a4, PS 1513 wsr a4, PS
1515 rsync 1514 rsync
1516 1515
@@ -1612,7 +1611,7 @@ ENTRY(fast_second_level_miss)
1612 rsr a1, PTEVADDR 1611 rsr a1, PTEVADDR
1613 srli a1, a1, PAGE_SHIFT 1612 srli a1, a1, PAGE_SHIFT
1614 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK 1613 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
1615 addi a1, a1, DTLB_WAY_PGTABLE # ... + way_number 1614 addi a1, a1, DTLB_WAY_PGD # ... + way_number
1616 1615
1617 wdtlb a0, a1 1616 wdtlb a0, a1
1618 dsync 1617 dsync
@@ -1654,7 +1653,7 @@ ENTRY(fast_second_level_miss)
1654 mov a1, a2 1653 mov a1, a2
1655 1654
1656 rsr a2, PS 1655 rsr a2, PS
1657 bbsi.l a2, PS_UM_SHIFT, 1f 1656 bbsi.l a2, PS_UM_BIT, 1f
1658 j _kernel_exception 1657 j _kernel_exception
16591: j _user_exception 16581: j _user_exception
1660 1659
@@ -1753,7 +1752,7 @@ ENTRY(fast_store_prohibited)
1753 mov a1, a2 1752 mov a1, a2
1754 1753
1755 rsr a2, PS 1754 rsr a2, PS
1756 bbsi.l a2, PS_UM_SHIFT, 1f 1755 bbsi.l a2, PS_UM_BIT, 1f
1757 j _kernel_exception 1756 j _kernel_exception
17581: j _user_exception 17571: j _user_exception
1759 1758
@@ -1907,6 +1906,103 @@ ENTRY(fast_coprocessor)
1907#endif /* XCHAL_EXTRA_SA_SIZE */ 1906#endif /* XCHAL_EXTRA_SA_SIZE */
1908 1907
1909/* 1908/*
1909 * System Calls.
1910 *
1911 * void system_call (struct pt_regs* regs, int exccause)
1912 * a2 a3
1913 */
1914
1915ENTRY(system_call)
1916 entry a1, 32
1917
1918 /* regs->syscall = regs->areg[2] */
1919
1920 l32i a3, a2, PT_AREG2
1921 mov a6, a2
1922 movi a4, do_syscall_trace_enter
1923 s32i a3, a2, PT_SYSCALL
1924 callx4 a4
1925
1926 /* syscall = sys_call_table[syscall_nr] */
1927
1928 movi a4, sys_call_table;
1929 movi a5, __NR_syscall_count
1930 movi a6, -ENOSYS
1931 bgeu a3, a5, 1f
1932
1933 addx4 a4, a3, a4
1934 l32i a4, a4, 0
1935 movi a5, sys_ni_syscall;
1936 beq a4, a5, 1f
1937
1938 /* Load args: arg0 - arg5 are passed via regs. */
1939
1940 l32i a6, a2, PT_AREG6
1941 l32i a7, a2, PT_AREG3
1942 l32i a8, a2, PT_AREG4
1943 l32i a9, a2, PT_AREG5
1944 l32i a10, a2, PT_AREG8
1945 l32i a11, a2, PT_AREG9
1946
1947 /* Pass one additional argument to the syscall: pt_regs (on stack) */
1948 s32i a2, a1, 0
1949
1950 callx4 a4
1951
19521: /* regs->areg[2] = return_value */
1953
1954 s32i a6, a2, PT_AREG2
1955 movi a4, do_syscall_trace_leave
1956 mov a6, a2
1957 callx4 a4
1958 retw
1959
1960
1961/*
1962 * Create a kernel thread
1963 *
1964 * int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
1965 * a2 a2 a3 a4
1966 */
1967
1968ENTRY(kernel_thread)
1969 entry a1, 16
1970
1971 mov a5, a2 # preserve fn over syscall
1972 mov a7, a3 # preserve args over syscall
1973
1974 movi a3, _CLONE_VM | _CLONE_UNTRACED
1975 movi a2, __NR_clone
1976 or a6, a4, a3 # arg0: flags
1977 mov a3, a1 # arg1: sp
1978 syscall
1979
1980 beq a3, a1, 1f # branch if parent
1981 mov a6, a7 # args
1982 callx4 a5 # fn(args)
1983
1984 movi a2, __NR_exit
1985 syscall # return value of fn(args) still in a6
1986
19871: retw
1988
1989/*
1990 * Do a system call from kernel instead of calling sys_execve, so we end up
1991 * with proper pt_regs.
1992 *
1993 * int kernel_execve(const char *fname, char *const argv[], charg *const envp[])
1994 * a2 a2 a3 a4
1995 */
1996
1997ENTRY(kernel_execve)
1998 entry a1, 16
1999 mov a6, a2 # arg0 is in a6
2000 movi a2, __NR_execve
2001 syscall
2002
2003 retw
2004
2005/*
1910 * Task switch. 2006 * Task switch.
1911 * 2007 *
1912 * struct task* _switch_to (struct task* prev, struct task* next) 2008 * struct task* _switch_to (struct task* prev, struct task* next)
@@ -1924,7 +2020,7 @@ ENTRY(_switch_to)
1924 2020
1925 /* Disable ints while we manipulate the stack pointer; spill regs. */ 2021 /* Disable ints while we manipulate the stack pointer; spill regs. */
1926 2022
1927 movi a5, PS_EXCM_MASK | LOCKLEVEL 2023 movi a5, (1 << PS_EXCM_BIT) | LOCKLEVEL
1928 xsr a5, PS 2024 xsr a5, PS
1929 rsr a3, EXCSAVE_1 2025 rsr a3, EXCSAVE_1
1930 rsync 2026 rsync
@@ -1964,33 +2060,9 @@ ENTRY(ret_from_fork)
1964 movi a4, schedule_tail 2060 movi a4, schedule_tail
1965 callx4 a4 2061 callx4 a4
1966 2062
1967 movi a4, do_syscall_trace 2063 movi a4, do_syscall_trace_leave
2064 mov a6, a1
1968 callx4 a4 2065 callx4 a4
1969 2066
1970 j common_exception_return 2067 j common_exception_return
1971 2068
1972
1973
1974/*
1975 * Table of syscalls
1976 */
1977
1978.data
1979.align 4
1980.global sys_call_table
1981sys_call_table:
1982
1983#define SYSCALL(call, narg) .word call
1984#include "syscalls.h"
1985
1986/*
1987 * Number of arguments of each syscall
1988 */
1989
1990.global sys_narg_table
1991sys_narg_table:
1992
1993#undef SYSCALL
1994#define SYSCALL(call, narg) .byte narg
1995#include "syscalls.h"
1996
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index c07cb2522993..ea89910efa44 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -15,9 +15,9 @@
15 * Kevin Chea 15 * Kevin Chea
16 */ 16 */
17 17
18#include <xtensa/cacheasm.h>
19#include <asm/processor.h> 18#include <asm/processor.h>
20#include <asm/page.h> 19#include <asm/page.h>
20#include <asm/cacheasm.h>
21 21
22/* 22/*
23 * This module contains the entry code for kernel images. It performs the 23 * This module contains the entry code for kernel images. It performs the
@@ -32,13 +32,6 @@
32 * 32 *
33 */ 33 */
34 34
35 .macro iterate from, to , cmd
36 .ifeq ((\to - \from) & ~0xfff)
37 \cmd \from
38 iterate "(\from+1)", \to, \cmd
39 .endif
40 .endm
41
42/* 35/*
43 * _start 36 * _start
44 * 37 *
@@ -64,7 +57,7 @@ _startup:
64 57
65 /* Disable interrupts and exceptions. */ 58 /* Disable interrupts and exceptions. */
66 59
67 movi a0, XCHAL_PS_EXCM_MASK 60 movi a0, LOCKLEVEL
68 wsr a0, PS 61 wsr a0, PS
69 62
70 /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ 63 /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
@@ -91,11 +84,11 @@ _startup:
91 movi a1, 15 84 movi a1, 15
92 wsr a0, ICOUNTLEVEL 85 wsr a0, ICOUNTLEVEL
93 86
94 .macro reset_dbreak num 87 .set _index, 0
95 wsr a0, DBREAKC + \num 88 .rept XCHAL_NUM_DBREAK - 1
96 .endm 89 wsr a0, DBREAKC + _index
97 90 .set _index, _index + 1
98 iterate 0, XCHAL_NUM_IBREAK-1, reset_dbreak 91 .endr
99#endif 92#endif
100 93
101 /* Clear CCOUNT (not really necessary, but nice) */ 94 /* Clear CCOUNT (not really necessary, but nice) */
@@ -110,10 +103,11 @@ _startup:
110 103
111 /* Disable all timers. */ 104 /* Disable all timers. */
112 105
113 .macro reset_timer num 106 .set _index, 0
114 wsr a0, CCOMPARE_0 + \num 107 .rept XCHAL_NUM_TIMERS - 1
115 .endm 108 wsr a0, CCOMPARE + _index
116 iterate 0, XCHAL_NUM_TIMERS-1, reset_timer 109 .set _index, _index + 1
110 .endr
117 111
118 /* Interrupt initialization. */ 112 /* Interrupt initialization. */
119 113
@@ -139,12 +133,21 @@ _startup:
139 rsync 133 rsync
140 134
141 /* Initialize the caches. 135 /* Initialize the caches.
142 * Does not include flushing writeback d-cache. 136 * a2, a3 are just working registers (clobbered).
143 * a6, a7 are just working registers (clobbered).
144 */ 137 */
145 138
146 icache_reset a2, a3 139#if XCHAL_DCACHE_LINE_LOCKABLE
147 dcache_reset a2, a3 140 ___unlock_dcache_all a2 a3
141#endif
142
143#if XCHAL_ICACHE_LINE_LOCKABLE
144 ___unlock_icache_all a2 a3
145#endif
146
147 ___invalidate_dcache_all a2 a3
148 ___invalidate_icache_all a2 a3
149
150 isync
148 151
149 /* Unpack data sections 152 /* Unpack data sections
150 * 153 *
@@ -181,9 +184,9 @@ _startup:
181 movi a2, _bss_start # start of BSS 184 movi a2, _bss_start # start of BSS
182 movi a3, _bss_end # end of BSS 185 movi a3, _bss_end # end of BSS
183 186
1841: addi a2, a2, 4 187 __loopt a2, a3, a4, 2
185 s32i a0, a2, 0 188 s32i a0, a2, 0
186 blt a2, a3, 1b 189 __endla a2, a4, 4
187 190
188#if XCHAL_DCACHE_IS_WRITEBACK 191#if XCHAL_DCACHE_IS_WRITEBACK
189 192
@@ -191,7 +194,7 @@ _startup:
191 * instructions/data are available. 194 * instructions/data are available.
192 */ 195 */
193 196
194 dcache_writeback_all a2, a3 197 ___flush_dcache_all a2 a3
195#endif 198#endif
196 199
197 /* Setup stack and enable window exceptions (keep irqs disabled) */ 200 /* Setup stack and enable window exceptions (keep irqs disabled) */
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 1cf744ee0959..c9ea73b7031b 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -4,7 +4,7 @@
4 * Xtensa built-in interrupt controller and some generic functions copied 4 * Xtensa built-in interrupt controller and some generic functions copied
5 * from i386. 5 * from i386.
6 * 6 *
7 * Copyright (C) 2002 - 2005 Tensilica, Inc. 7 * Copyright (C) 2002 - 2006 Tensilica, Inc.
8 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar 8 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
9 * 9 *
10 * 10 *
@@ -22,11 +22,6 @@
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23#include <asm/platform.h> 23#include <asm/platform.h>
24 24
25static void enable_xtensa_irq(unsigned int irq);
26static void disable_xtensa_irq(unsigned int irq);
27static void mask_and_ack_xtensa(unsigned int irq);
28static void end_xtensa_irq(unsigned int irq);
29
30static unsigned int cached_irq_mask; 25static unsigned int cached_irq_mask;
31 26
32atomic_t irq_err_count; 27atomic_t irq_err_count;
@@ -46,8 +41,16 @@ void ack_bad_irq(unsigned int irq)
46 * handlers). 41 * handlers).
47 */ 42 */
48 43
49unsigned int do_IRQ(int irq, struct pt_regs *regs) 44asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
50{ 45{
46 struct pt_regs *old_regs = set_irq_regs(regs);
47 struct irq_desc *desc = irq_desc + irq;
48
49 if (irq >= NR_IRQS) {
50 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
51 __FUNCTION__, irq);
52 }
53
51 irq_enter(); 54 irq_enter();
52 55
53#ifdef CONFIG_DEBUG_STACKOVERFLOW 56#ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -63,12 +66,10 @@ unsigned int do_IRQ(int irq, struct pt_regs *regs)
63 sp - sizeof(struct thread_info)); 66 sp - sizeof(struct thread_info));
64 } 67 }
65#endif 68#endif
66 69 desc->handle_irq(irq, desc);
67 __do_IRQ(irq, regs);
68 70
69 irq_exit(); 71 irq_exit();
70 72 set_irq_regs(old_regs);
71 return 1;
72} 73}
73 74
74/* 75/*
@@ -118,72 +119,68 @@ skip:
118 } 119 }
119 return 0; 120 return 0;
120} 121}
121/* shutdown is same as "disable" */
122#define shutdown_xtensa_irq disable_xtensa_irq
123 122
124static unsigned int startup_xtensa_irq(unsigned int irq) 123static void xtensa_irq_mask(unsigned int irq)
125{
126 enable_xtensa_irq(irq);
127 return 0; /* never anything pending */
128}
129
130static struct hw_interrupt_type xtensa_irq_type = {
131 "Xtensa-IRQ",
132 startup_xtensa_irq,
133 shutdown_xtensa_irq,
134 enable_xtensa_irq,
135 disable_xtensa_irq,
136 mask_and_ack_xtensa,
137 end_xtensa_irq
138};
139
140static inline void mask_irq(unsigned int irq)
141{ 124{
142 cached_irq_mask &= ~(1 << irq); 125 cached_irq_mask &= ~(1 << irq);
143 set_sr (cached_irq_mask, INTENABLE); 126 set_sr (cached_irq_mask, INTENABLE);
144} 127}
145 128
146static inline void unmask_irq(unsigned int irq) 129static void xtensa_irq_unmask(unsigned int irq)
147{ 130{
148 cached_irq_mask |= 1 << irq; 131 cached_irq_mask |= 1 << irq;
149 set_sr (cached_irq_mask, INTENABLE); 132 set_sr (cached_irq_mask, INTENABLE);
150} 133}
151 134
152static void disable_xtensa_irq(unsigned int irq) 135static void xtensa_irq_ack(unsigned int irq)
153{ 136{
154 unsigned long flags; 137 set_sr(1 << irq, INTCLEAR);
155 local_save_flags(flags);
156 mask_irq(irq);
157 local_irq_restore(flags);
158} 138}
159 139
160static void enable_xtensa_irq(unsigned int irq) 140static int xtensa_irq_retrigger(unsigned int irq)
161{ 141{
162 unsigned long flags; 142 set_sr (1 << irq, INTSET);
163 local_save_flags(flags); 143 return 1;
164 unmask_irq(irq);
165 local_irq_restore(flags);
166}
167
168static void mask_and_ack_xtensa(unsigned int irq)
169{
170 disable_xtensa_irq(irq);
171} 144}
172 145
173static void end_xtensa_irq(unsigned int irq)
174{
175 enable_xtensa_irq(irq);
176}
177 146
147static struct irq_chip xtensa_irq_chip = {
148 .name = "xtensa",
149 .mask = xtensa_irq_mask,
150 .unmask = xtensa_irq_unmask,
151 .ack = xtensa_irq_ack,
152 .retrigger = xtensa_irq_retrigger,
153};
178 154
179void __init init_IRQ(void) 155void __init init_IRQ(void)
180{ 156{
181 int i; 157 int index;
182 158
183 for (i=0; i < XTENSA_NR_IRQS; i++) 159 for (index = 0; index < XTENSA_NR_IRQS; index++) {
184 irq_desc[i].chip = &xtensa_irq_type; 160 int mask = 1 << index;
185 161
186 cached_irq_mask = 0; 162 if (mask & XCHAL_INTTYPE_MASK_SOFTWARE)
163 set_irq_chip_and_handler(index, &xtensa_irq_chip,
164 handle_simple_irq);
187 165
188 platform_init_irq(); 166 else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE)
167 set_irq_chip_and_handler(index, &xtensa_irq_chip,
168 handle_edge_irq);
169
170 else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL)
171 set_irq_chip_and_handler(index, &xtensa_irq_chip,
172 handle_level_irq);
173
174 else if (mask & XCHAL_INTTYPE_MASK_TIMER)
175 set_irq_chip_and_handler(index, &xtensa_irq_chip,
176 handle_edge_irq);
177
178 else /* XCHAL_INTTYPE_MASK_WRITE_ERROR */
179 /* XCHAL_INTTYPE_MASK_NMI */
180
181 set_irq_chip_and_handler(index, &xtensa_irq_chip,
182 handle_level_irq);
183 }
184
185 cached_irq_mask = 0;
189} 186}
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 6648fa9d9192..ca76f071666e 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/xtensa/kernel/pci-dma.c 2 * arch/xtensa/pci-dma.c
3 * 3 *
4 * DMA coherent memory allocation. 4 * DMA coherent memory allocation.
5 * 5 *
@@ -29,28 +29,48 @@
29 */ 29 */
30 30
31void * 31void *
32dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 32dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
33{ 33{
34 void *ret; 34 unsigned long ret;
35 unsigned long uncached = 0;
35 36
36 /* ignore region speicifiers */ 37 /* ignore region speicifiers */
37 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
38 38
39 if (dev == NULL || (*dev->dma_mask < 0xffffffff)) 39 flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
40 gfp |= GFP_DMA;
41 ret = (void *)__get_free_pages(gfp, get_order(size));
42 40
43 if (ret != NULL) { 41 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
44 memset(ret, 0, size); 42 flag |= GFP_DMA;
45 *handle = virt_to_bus(ret); 43 ret = (unsigned long)__get_free_pages(flag, get_order(size));
44
45 if (ret == 0)
46 return NULL;
47
48 /* We currently don't support coherent memory outside KSEG */
49
50 if (ret < XCHAL_KSEG_CACHED_VADDR
51 || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE)
52 BUG();
53
54
55 if (ret != 0) {
56 memset((void*) ret, 0, size);
57 uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR;
58 *handle = virt_to_bus((void*)ret);
59 __flush_invalidate_dcache_range(ret, size);
46 } 60 }
47 return (void*) BYPASS_ADDR((unsigned long)ret); 61
62 return (void*)uncached;
48} 63}
49 64
50void dma_free_coherent(struct device *hwdev, size_t size, 65void dma_free_coherent(struct device *hwdev, size_t size,
51 void *vaddr, dma_addr_t dma_handle) 66 void *vaddr, dma_addr_t dma_handle)
52{ 67{
53 free_pages(CACHED_ADDR((unsigned long)vaddr), get_order(size)); 68 long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR;
69
70 if (addr < 0 || addr >= XCHAL_KSEG_SIZE)
71 BUG();
72
73 free_pages(addr, get_order(size));
54} 74}
55 75
56 76
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index a7c4178c2a8c..795bd5ac6f4c 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -1,4 +1,3 @@
1// TODO verify coprocessor handling
2/* 1/*
3 * arch/xtensa/kernel/process.c 2 * arch/xtensa/kernel/process.c
4 * 3 *
@@ -43,7 +42,7 @@
43#include <asm/irq.h> 42#include <asm/irq.h>
44#include <asm/atomic.h> 43#include <asm/atomic.h>
45#include <asm/asm-offsets.h> 44#include <asm/asm-offsets.h>
46#include <asm/coprocessor.h> 45#include <asm/regs.h>
47 46
48extern void ret_from_fork(void); 47extern void ret_from_fork(void);
49 48
@@ -67,25 +66,6 @@ void (*pm_power_off)(void) = NULL;
67EXPORT_SYMBOL(pm_power_off); 66EXPORT_SYMBOL(pm_power_off);
68 67
69 68
70#if XCHAL_CP_NUM > 0
71
72/*
73 * Coprocessor ownership.
74 */
75
76coprocessor_info_t coprocessor_info[] = {
77 { 0, XTENSA_CPE_CP0_OFFSET },
78 { 0, XTENSA_CPE_CP1_OFFSET },
79 { 0, XTENSA_CPE_CP2_OFFSET },
80 { 0, XTENSA_CPE_CP3_OFFSET },
81 { 0, XTENSA_CPE_CP4_OFFSET },
82 { 0, XTENSA_CPE_CP5_OFFSET },
83 { 0, XTENSA_CPE_CP6_OFFSET },
84 { 0, XTENSA_CPE_CP7_OFFSET },
85};
86
87#endif
88
89/* 69/*
90 * Powermanagement idle function, if any is provided by the platform. 70 * Powermanagement idle function, if any is provided by the platform.
91 */ 71 */
@@ -110,12 +90,10 @@ void cpu_idle(void)
110 90
111void exit_thread(void) 91void exit_thread(void)
112{ 92{
113 release_coprocessors(current); /* Empty macro if no CPs are defined */
114} 93}
115 94
116void flush_thread(void) 95void flush_thread(void)
117{ 96{
118 release_coprocessors(current); /* Empty macro if no CPs are defined */
119} 97}
120 98
121/* 99/*
@@ -183,36 +161,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
183 161
184 162
185/* 163/*
186 * Create a kernel thread
187 */
188
189int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
190{
191 long retval;
192 __asm__ __volatile__
193 ("mov a5, %4\n\t" /* preserve fn in a5 */
194 "mov a6, %3\n\t" /* preserve and setup arg in a6 */
195 "movi a2, %1\n\t" /* load __NR_clone for syscall*/
196 "mov a3, sp\n\t" /* sp check and sys_clone */
197 "mov a4, %5\n\t" /* load flags for syscall */
198 "syscall\n\t"
199 "beq a3, sp, 1f\n\t" /* branch if parent */
200 "callx4 a5\n\t" /* call fn */
201 "movi a2, %2\n\t" /* load __NR_exit for syscall */
202 "mov a3, a6\n\t" /* load fn return value */
203 "syscall\n"
204 "1:\n\t"
205 "mov %0, a2\n\t" /* parent returns zero */
206 :"=r" (retval)
207 :"i" (__NR_clone), "i" (__NR_exit),
208 "r" (arg), "r" (fn),
209 "r" (flags | CLONE_VM)
210 : "a2", "a3", "a4", "a5", "a6" );
211 return retval;
212}
213
214
215/*
216 * These bracket the sleeping functions.. 164 * These bracket the sleeping functions..
217 */ 165 */
218 166
@@ -275,7 +223,7 @@ void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
275 */ 223 */
276 224
277 elfregs->pc = regs->pc; 225 elfregs->pc = regs->pc;
278 elfregs->ps = (regs->ps & ~XCHAL_PS_EXCM_MASK); 226 elfregs->ps = (regs->ps & ~(1 << PS_EXCM_BIT));
279 elfregs->exccause = regs->exccause; 227 elfregs->exccause = regs->exccause;
280 elfregs->excvaddr = regs->excvaddr; 228 elfregs->excvaddr = regs->excvaddr;
281 elfregs->windowbase = regs->windowbase; 229 elfregs->windowbase = regs->windowbase;
@@ -325,7 +273,7 @@ void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
325 */ 273 */
326 274
327 regs->pc = elfregs->pc; 275 regs->pc = elfregs->pc;
328 regs->ps = (elfregs->ps | XCHAL_PS_EXCM_MASK); 276 regs->ps = (elfregs->ps | (1 << PS_EXCM_BIT));
329 regs->exccause = elfregs->exccause; 277 regs->exccause = elfregs->exccause;
330 regs->excvaddr = elfregs->excvaddr; 278 regs->excvaddr = elfregs->excvaddr;
331 regs->windowbase = elfregs->windowbase; 279 regs->windowbase = elfregs->windowbase;
@@ -459,16 +407,7 @@ int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
459int 407int
460dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r) 408dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
461{ 409{
462/* see asm/coprocessor.h for this magic number 16 */
463#if XTENSA_CP_EXTRA_SIZE > 16
464 do_save_fpregs (r, regs, task);
465
466 /* For now, bit 16 means some extra state may be present: */
467// FIXME!! need to track to return more accurate mask
468 return 0x10000 | XCHAL_CP_MASK;
469#else
470 return 0; /* no coprocessors active on this processor */ 410 return 0; /* no coprocessors active on this processor */
471#endif
472} 411}
473 412
474/* 413/*
@@ -483,3 +422,44 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
483{ 422{
484 return dump_task_fpu(regs, current, r); 423 return dump_task_fpu(regs, current, r);
485} 424}
425
426asmlinkage
427long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
428 void __user *parent_tid, void *child_tls,
429 void __user *child_tid, long a5,
430 struct pt_regs *regs)
431{
432 if (!newsp)
433 newsp = regs->areg[1];
434 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
435}
436
437/*
438 * * xtensa_execve() executes a new program.
439 * */
440
441asmlinkage
442long xtensa_execve(char __user *name, char __user * __user *argv,
443 char __user * __user *envp,
444 long a3, long a4, long a5,
445 struct pt_regs *regs)
446{
447 long error;
448 char * filename;
449
450 filename = getname(name);
451 error = PTR_ERR(filename);
452 if (IS_ERR(filename))
453 goto out;
454 // FIXME: release coprocessor??
455 error = do_execve(filename, argv, envp, regs);
456 if (error == 0) {
457 task_lock(current);
458 current->ptrace &= ~PT_DTRACE;
459 task_unlock(current);
460 }
461 putname(filename);
462out:
463 return error;
464}
465
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 9aea23cc0dc5..8b6d3d0623b6 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -96,7 +96,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
96 /* Note: PS.EXCM is not set while user task is running; 96 /* Note: PS.EXCM is not set while user task is running;
97 * its being set in regs is for exception handling 97 * its being set in regs is for exception handling
98 * convenience. */ 98 * convenience. */
99 tmp = (regs->ps & ~XCHAL_PS_EXCM_MASK); 99 tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
100 break; 100 break;
101 case REG_WB: 101 case REG_WB:
102 tmp = regs->windowbase; 102 tmp = regs->windowbase;
@@ -332,12 +332,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
332 332
333void do_syscall_trace(void) 333void do_syscall_trace(void)
334{ 334{
335 if (!test_thread_flag(TIF_SYSCALL_TRACE))
336 return;
337
338 if (!(current->ptrace & PT_PTRACED))
339 return;
340
341 /* 335 /*
342 * The 0x80 provides a way for the tracing parent to distinguish 336 * The 0x80 provides a way for the tracing parent to distinguish
343 * between a syscall stop and SIGTRAP delivery 337 * between a syscall stop and SIGTRAP delivery
@@ -354,3 +348,23 @@ void do_syscall_trace(void)
354 current->exit_code = 0; 348 current->exit_code = 0;
355 } 349 }
356} 350}
351
352void do_syscall_trace_enter(struct pt_regs *regs)
353{
354 if (test_thread_flag(TIF_SYSCALL_TRACE)
355 && (current->ptrace & PT_PTRACED))
356 do_syscall_trace();
357
358#if 0
359 if (unlikely(current->audit_context))
360 audit_syscall_entry(current, AUDIT_ARCH_XTENSA..);
361#endif
362}
363
364void do_syscall_trace_leave(struct pt_regs *regs)
365{
366 if ((test_thread_flag(TIF_SYSCALL_TRACE))
367 && (current->ptrace & PT_PTRACED))
368 do_syscall_trace();
369}
370
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index c99ab72b41b6..b6374c09de20 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -42,8 +42,6 @@
42#include <asm/page.h> 42#include <asm/page.h>
43#include <asm/setup.h> 43#include <asm/setup.h>
44 44
45#include <xtensa/config/system.h>
46
47#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) 45#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
48struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16}; 46struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
49#endif 47#endif
@@ -336,7 +334,7 @@ c_show(struct seq_file *f, void *slot)
336 /* high-level stuff */ 334 /* high-level stuff */
337 seq_printf(f,"processor\t: 0\n" 335 seq_printf(f,"processor\t: 0\n"
338 "vendor_id\t: Tensilica\n" 336 "vendor_id\t: Tensilica\n"
339 "model\t\t: Xtensa " XCHAL_HW_RELEASE_NAME "\n" 337 "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
340 "core ID\t\t: " XCHAL_CORE_ID "\n" 338 "core ID\t\t: " XCHAL_CORE_ID "\n"
341 "build ID\t: 0x%x\n" 339 "build ID\t: 0x%x\n"
342 "byte order\t: %s\n" 340 "byte order\t: %s\n"
@@ -420,25 +418,6 @@ c_show(struct seq_file *f, void *slot)
420 XCHAL_NUM_TIMERS, 418 XCHAL_NUM_TIMERS,
421 XCHAL_DEBUGLEVEL); 419 XCHAL_DEBUGLEVEL);
422 420
423 /* Coprocessors */
424#if XCHAL_HAVE_CP
425 seq_printf(f, "coprocessors\t: %d\n", XCHAL_CP_NUM);
426#else
427 seq_printf(f, "coprocessors\t: none\n");
428#endif
429
430 /* {I,D}{RAM,ROM} and XLMI */
431 seq_printf(f,"inst ROMs\t: %d\n"
432 "inst RAMs\t: %d\n"
433 "data ROMs\t: %d\n"
434 "data RAMs\t: %d\n"
435 "XLMI ports\t: %d\n",
436 XCHAL_NUM_IROM,
437 XCHAL_NUM_IRAM,
438 XCHAL_NUM_DROM,
439 XCHAL_NUM_DRAM,
440 XCHAL_NUM_XLMI);
441
442 /* Cache */ 421 /* Cache */
443 seq_printf(f,"icache line size: %d\n" 422 seq_printf(f,"icache line size: %d\n"
444 "icache ways\t: %d\n" 423 "icache ways\t: %d\n"
@@ -466,24 +445,6 @@ c_show(struct seq_file *f, void *slot)
466 XCHAL_DCACHE_WAYS, 445 XCHAL_DCACHE_WAYS,
467 XCHAL_DCACHE_SIZE); 446 XCHAL_DCACHE_SIZE);
468 447
469 /* MMU */
470 seq_printf(f,"ASID bits\t: %d\n"
471 "ASID invalid\t: %d\n"
472 "ASID kernel\t: %d\n"
473 "rings\t\t: %d\n"
474 "itlb ways\t: %d\n"
475 "itlb AR ways\t: %d\n"
476 "dtlb ways\t: %d\n"
477 "dtlb AR ways\t: %d\n",
478 XCHAL_MMU_ASID_BITS,
479 XCHAL_MMU_ASID_INVALID,
480 XCHAL_MMU_ASID_KERNEL,
481 XCHAL_MMU_RINGS,
482 XCHAL_ITLB_WAYS,
483 XCHAL_ITLB_ARF_WAYS,
484 XCHAL_DTLB_WAYS,
485 XCHAL_DTLB_ARF_WAYS);
486
487 return 0; 448 return 0;
488} 449}
489 450
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index c494f0826fc5..c6d9880a4cdb 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -12,8 +12,8 @@
12 * 12 *
13 */ 13 */
14 14
15#include <xtensa/config/core.h> 15#include <asm/variant/core.h>
16#include <xtensa/hal.h> 16#include <asm/coprocessor.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/smp.h> 19#include <linux/smp.h>
@@ -46,7 +46,7 @@ extern struct task_struct *coproc_owners[];
46 * Atomically swap in the new signal mask, and wait for a signal. 46 * Atomically swap in the new signal mask, and wait for a signal.
47 */ 47 */
48 48
49int sys_sigsuspend(struct pt_regs *regs) 49int xtensa_sigsuspend(struct pt_regs *regs)
50{ 50{
51 old_sigset_t mask = (old_sigset_t) regs->areg[3]; 51 old_sigset_t mask = (old_sigset_t) regs->areg[3];
52 sigset_t saveset; 52 sigset_t saveset;
@@ -68,7 +68,7 @@ int sys_sigsuspend(struct pt_regs *regs)
68} 68}
69 69
70asmlinkage int 70asmlinkage int
71sys_rt_sigsuspend(struct pt_regs *regs) 71xtensa_rt_sigsuspend(struct pt_regs *regs)
72{ 72{
73 sigset_t *unewset = (sigset_t *) regs->areg[4]; 73 sigset_t *unewset = (sigset_t *) regs->areg[4];
74 size_t sigsetsize = (size_t) regs->areg[3]; 74 size_t sigsetsize = (size_t) regs->areg[3];
@@ -96,7 +96,7 @@ sys_rt_sigsuspend(struct pt_regs *regs)
96} 96}
97 97
98asmlinkage int 98asmlinkage int
99sys_sigaction(int sig, const struct old_sigaction *act, 99xtensa_sigaction(int sig, const struct old_sigaction *act,
100 struct old_sigaction *oact) 100 struct old_sigaction *oact)
101{ 101{
102 struct k_sigaction new_ka, old_ka; 102 struct k_sigaction new_ka, old_ka;
@@ -128,7 +128,7 @@ sys_sigaction(int sig, const struct old_sigaction *act,
128} 128}
129 129
130asmlinkage int 130asmlinkage int
131sys_sigaltstack(struct pt_regs *regs) 131xtensa_sigaltstack(struct pt_regs *regs)
132{ 132{
133 const stack_t *uss = (stack_t *) regs->areg[4]; 133 const stack_t *uss = (stack_t *) regs->areg[4];
134 stack_t *uoss = (stack_t *) regs->areg[3]; 134 stack_t *uoss = (stack_t *) regs->areg[3];
@@ -216,8 +216,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
216 * handler, or the user mode value doesn't matter (e.g. PS.OWB). 216 * handler, or the user mode value doesn't matter (e.g. PS.OWB).
217 */ 217 */
218 err |= __get_user(ps, &sc->sc_ps); 218 err |= __get_user(ps, &sc->sc_ps);
219 regs->ps = (regs->ps & ~XCHAL_PS_CALLINC_MASK) 219 regs->ps = (regs->ps & ~PS_CALLINC_MASK)
220 | (ps & XCHAL_PS_CALLINC_MASK); 220 | (ps & PS_CALLINC_MASK);
221 221
222 /* Additional corruption checks */ 222 /* Additional corruption checks */
223 223
@@ -280,7 +280,7 @@ flush_my_cpstate(struct task_struct *tsk)
280static int 280static int
281save_cpextra (struct _cpstate *buf) 281save_cpextra (struct _cpstate *buf)
282{ 282{
283#if (XCHAL_EXTRA_SA_SIZE == 0) && (XCHAL_CP_NUM == 0) 283#if XCHAL_CP_NUM == 0
284 return 0; 284 return 0;
285#else 285#else
286 286
@@ -350,7 +350,7 @@ setup_sigcontext(struct sigcontext *sc, struct _cpstate *cpstate,
350 return err; 350 return err;
351} 351}
352 352
353asmlinkage int sys_sigreturn(struct pt_regs *regs) 353asmlinkage int xtensa_sigreturn(struct pt_regs *regs)
354{ 354{
355 struct sigframe *frame = (struct sigframe *)regs->areg[1]; 355 struct sigframe *frame = (struct sigframe *)regs->areg[1];
356 sigset_t set; 356 sigset_t set;
@@ -382,7 +382,7 @@ badframe:
382 return 0; 382 return 0;
383} 383}
384 384
385asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) 385asmlinkage int xtensa_rt_sigreturn(struct pt_regs *regs)
386{ 386{
387 struct rt_sigframe *frame = (struct rt_sigframe *)regs->areg[1]; 387 struct rt_sigframe *frame = (struct rt_sigframe *)regs->areg[1];
388 sigset_t set; 388 sigset_t set;
@@ -497,8 +497,10 @@ gen_return_code(unsigned char *codemem, unsigned int use_rt_sigreturn)
497 497
498 /* Flush generated code out of the data cache */ 498 /* Flush generated code out of the data cache */
499 499
500 if (err == 0) 500 if (err == 0) {
501 __flush_invalidate_cache_range((unsigned long)codemem, 6UL); 501 __invalidate_icache_range((unsigned long)codemem, 6UL);
502 __flush_invalidate_dcache_range((unsigned long)codemem, 6UL);
503 }
502 504
503 return err; 505 return err;
504} 506}
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
new file mode 100644
index 000000000000..fe3834bc1dbf
--- /dev/null
+++ b/arch/xtensa/kernel/syscall.c
@@ -0,0 +1,95 @@
1/*
2 * arch/xtensa/kernel/syscall.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 * Copyright (C) 2000 Silicon Graphics, Inc.
10 * Copyright (C) 1995 - 2000 by Ralf Baechle
11 *
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
13 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
14 * Chris Zankel <chris@zankel.net>
15 * Kevin Chea
16 *
17 */
18#include <asm/uaccess.h>
19#include <asm/syscall.h>
20#include <asm/unistd.h>
21#include <linux/linkage.h>
22#include <linux/stringify.h>
23#include <linux/errno.h>
24#include <linux/syscalls.h>
25#include <linux/file.h>
26#include <linux/fs.h>
27#include <linux/mman.h>
28#include <linux/shm.h>
29
30typedef void (*syscall_t)(void);
31
32syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
33 [0 ... __NR_syscall_count - 1] = (syscall_t)&sys_ni_syscall,
34
35#undef __SYSCALL
36#define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol,
37#undef _XTENSA_UNISTD_H
38#undef __KERNEL_SYSCALLS__
39#include <asm/unistd.h>
40};
41
42/*
43 * xtensa_pipe() is the normal C calling standard for creating a pipe. It's not
44 * the way unix traditional does this, though.
45 */
46
47asmlinkage long xtensa_pipe(int __user *userfds)
48{
49 int fd[2];
50 int error;
51
52 error = do_pipe(fd);
53 if (!error) {
54 if (copy_to_user(userfds, fd, 2 * sizeof(int)))
55 error = -EFAULT;
56 }
57 return error;
58}
59
60
61asmlinkage long xtensa_mmap2(unsigned long addr, unsigned long len,
62 unsigned long prot, unsigned long flags,
63 unsigned long fd, unsigned long pgoff)
64{
65 int error = -EBADF;
66 struct file * file = NULL;
67
68 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
69 if (!(flags & MAP_ANONYMOUS)) {
70 file = fget(fd);
71 if (!file)
72 goto out;
73 }
74
75 down_write(&current->mm->mmap_sem);
76 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
77 up_write(&current->mm->mmap_sem);
78
79 if (file)
80 fput(file);
81out:
82 return error;
83}
84
85asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
86{
87 unsigned long ret;
88 long err;
89
90 err = do_shmat(shmid, shmaddr, shmflg, &ret);
91 if (err)
92 return err;
93 return (long)ret;
94}
95
diff --git a/arch/xtensa/kernel/syscalls.c b/arch/xtensa/kernel/syscalls.c
deleted file mode 100644
index f49cb239e603..000000000000
--- a/arch/xtensa/kernel/syscalls.c
+++ /dev/null
@@ -1,288 +0,0 @@
1/*
2 * arch/xtensa/kernel/syscalls.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 * Copyright (C) 2000 Silicon Graphics, Inc.
10 * Copyright (C) 1995 - 2000 by Ralf Baechle
11 *
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
13 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
14 * Chris Zankel <chris@zankel.net>
15 * Kevin Chea
16 *
17 */
18
19#define DEBUG 0
20
21#include <linux/linkage.h>
22#include <linux/mm.h>
23#include <linux/smp.h>
24#include <linux/smp_lock.h>
25#include <linux/mman.h>
26#include <linux/sched.h>
27#include <linux/file.h>
28#include <linux/slab.h>
29#include <linux/utsname.h>
30#include <linux/unistd.h>
31#include <linux/stringify.h>
32#include <linux/syscalls.h>
33#include <linux/sem.h>
34#include <linux/msg.h>
35#include <linux/shm.h>
36#include <linux/errno.h>
37#include <asm/ptrace.h>
38#include <asm/signal.h>
39#include <asm/uaccess.h>
40#include <asm/hardirq.h>
41#include <asm/mman.h>
42#include <asm/shmparam.h>
43#include <asm/page.h>
44
45extern void do_syscall_trace(void);
46typedef int (*syscall_t)(void *a0,...);
47extern syscall_t sys_call_table[];
48extern unsigned char sys_narg_table[];
49
50/*
51 * sys_pipe() is the normal C calling standard for creating a pipe. It's not
52 * the way unix traditional does this, though.
53 */
54
55int sys_pipe(int __user *userfds)
56{
57 int fd[2];
58 int error;
59
60 error = do_pipe(fd);
61 if (!error) {
62 if (copy_to_user(userfds, fd, 2 * sizeof(int)))
63 error = -EFAULT;
64 }
65 return error;
66}
67
68/*
69 * Common code for old and new mmaps.
70 */
71long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot,
72 unsigned long flags, unsigned long fd, unsigned long pgoff)
73{
74 int error = -EBADF;
75 struct file * file = NULL;
76
77 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
78 if (!(flags & MAP_ANONYMOUS)) {
79 file = fget(fd);
80 if (!file)
81 goto out;
82 }
83
84 down_write(&current->mm->mmap_sem);
85 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
86 up_write(&current->mm->mmap_sem);
87
88 if (file)
89 fput(file);
90out:
91 return error;
92}
93
94int sys_clone(struct pt_regs *regs)
95{
96 unsigned long clone_flags;
97 unsigned long newsp;
98 int __user *parent_tidptr, *child_tidptr;
99 clone_flags = regs->areg[4];
100 newsp = regs->areg[3];
101 parent_tidptr = (int __user *)regs->areg[5];
102 child_tidptr = (int __user *)regs->areg[6];
103 if (!newsp)
104 newsp = regs->areg[1];
105 return do_fork(clone_flags,newsp,regs,0,parent_tidptr,child_tidptr);
106}
107
108/*
109 * sys_execve() executes a new program.
110 */
111
112int sys_execve(struct pt_regs *regs)
113{
114 int error;
115 char * filename;
116
117 filename = getname((char *) (long)regs->areg[5]);
118 error = PTR_ERR(filename);
119 if (IS_ERR(filename))
120 goto out;
121 error = do_execve(filename, (char **) (long)regs->areg[3],
122 (char **) (long)regs->areg[4], regs);
123 putname(filename);
124
125out:
126 return error;
127}
128
129int sys_uname(struct old_utsname * name)
130{
131 if (name && !copy_to_user(name, utsname(), sizeof (*name)))
132 return 0;
133 return -EFAULT;
134}
135
136/*
137 * Build the string table for the builtin "poor man's strace".
138 */
139
140#if DEBUG
141#define SYSCALL(fun, narg) #fun,
142static char *sfnames[] = {
143#include "syscalls.h"
144};
145#undef SYS
146#endif
147
148void system_call (struct pt_regs *regs)
149{
150 syscall_t syscall;
151 unsigned long parm0, parm1, parm2, parm3, parm4, parm5;
152 int nargs, res;
153 unsigned int syscallnr;
154 int ps;
155
156#if DEBUG
157 int i;
158 unsigned long parms[6];
159 char *sysname;
160#endif
161
162 regs->syscall = regs->areg[2];
163
164 do_syscall_trace();
165
166 /* Have to load after syscall_trace because strace
167 * sometimes changes regs->syscall.
168 */
169 syscallnr = regs->syscall;
170
171 parm0 = parm1 = parm2 = parm3 = parm4 = parm5 = 0;
172
173 /* Restore interrupt level to syscall invoker's.
174 * If this were in assembly, we wouldn't disable
175 * interrupts in the first place:
176 */
177 local_save_flags (ps);
178 local_irq_restore((ps & ~XCHAL_PS_INTLEVEL_MASK) |
179 (regs->ps & XCHAL_PS_INTLEVEL_MASK) );
180
181 if (syscallnr > __NR_Linux_syscalls) {
182 regs->areg[2] = -ENOSYS;
183 return;
184 }
185
186 syscall = sys_call_table[syscallnr];
187 nargs = sys_narg_table[syscallnr];
188
189 if (syscall == NULL) {
190 regs->areg[2] = -ENOSYS;
191 return;
192 }
193
194 /* There shouldn't be more than six arguments in the table! */
195
196 if (nargs > 6)
197 panic("Internal error - too many syscall arguments (%d)!\n",
198 nargs);
199
200 /* Linux takes system-call arguments in registers. The ABI
201 * and Xtensa software conventions require the system-call
202 * number in a2. If an argument exists in a2, we move it to
203 * the next available register. Note that for improved
204 * efficiency, we do NOT shift all parameters down one
205 * register to maintain the original order.
206 *
207 * At best case (zero arguments), we just write the syscall
208 * number to a2. At worst case (1 to 6 arguments), we move
209 * the argument in a2 to the next available register, then
210 * write the syscall number to a2.
211 *
212 * For clarity, the following truth table enumerates all
213 * possibilities.
214 *
215 * arguments syscall number arg0, arg1, arg2, arg3, arg4, arg5
216 * --------- -------------- ----------------------------------
217 * 0 a2
218 * 1 a2 a3
219 * 2 a2 a4, a3
220 * 3 a2 a5, a3, a4
221 * 4 a2 a6, a3, a4, a5
222 * 5 a2 a7, a3, a4, a5, a6
223 * 6 a2 a8, a3, a4, a5, a6, a7
224 */
225 if (nargs) {
226 parm0 = regs->areg[nargs+2];
227 parm1 = regs->areg[3];
228 parm2 = regs->areg[4];
229 parm3 = regs->areg[5];
230 parm4 = regs->areg[6];
231 parm5 = regs->areg[7];
232 } else /* nargs == 0 */
233 parm0 = (unsigned long) regs;
234
235#if DEBUG
236 parms[0] = parm0;
237 parms[1] = parm1;
238 parms[2] = parm2;
239 parms[3] = parm3;
240 parms[4] = parm4;
241 parms[5] = parm5;
242
243 sysname = sfnames[syscallnr];
244 if (strncmp(sysname, "sys_", 4) == 0)
245 sysname = sysname + 4;
246
247 printk("\017SYSCALL:I:%x:%d:%s %s(", regs->pc, current->pid,
248 current->comm, sysname);
249 for (i = 0; i < nargs; i++)
250 printk((i>0) ? ", %#lx" : "%#lx", parms[i]);
251 printk(")\n");
252#endif
253
254 res = syscall((void *)parm0, parm1, parm2, parm3, parm4, parm5);
255
256#if DEBUG
257 printk("\017SYSCALL:O:%d:%s %s(",current->pid, current->comm, sysname);
258 for (i = 0; i < nargs; i++)
259 printk((i>0) ? ", %#lx" : "%#lx", parms[i]);
260 if (res < 4096)
261 printk(") = %d\n", res);
262 else
263 printk(") = %#x\n", res);
264#endif /* DEBUG */
265
266 regs->areg[2] = res;
267 do_syscall_trace();
268}
269
270/*
271 * Do a system call from kernel instead of calling sys_execve so we
272 * end up with proper pt_regs.
273 */
274int kernel_execve(const char *filename, char *const argv[], char *const envp[])
275{
276 long __res;
277 asm volatile (
278 " mov a5, %2 \n"
279 " mov a4, %4 \n"
280 " mov a3, %3 \n"
281 " movi a2, %1 \n"
282 " syscall \n"
283 " mov %0, a2 \n"
284 : "=a" (__res)
285 : "i" (__NR_execve), "a" (filename), "a" (argv), "a" (envp)
286 : "a2", "a3", "a4", "a5");
287 return __res;
288}
diff --git a/arch/xtensa/kernel/syscalls.h b/arch/xtensa/kernel/syscalls.h
deleted file mode 100644
index 216c10a31501..000000000000
--- a/arch/xtensa/kernel/syscalls.h
+++ /dev/null
@@ -1,247 +0,0 @@
1/*
2 * arch/xtensa/kernel/syscalls.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
9 * Copyright (C) 2001 - 2005 Tensilica Inc.
10 *
11 * Changes by Joe Taylor <joe@tensilica.com>
12 */
13
14/*
15 * This file is being included twice - once to build a list of all
16 * syscalls and once to build a table of how many arguments each syscall
17 * accepts. Syscalls that receive a pointer to the saved registers are
18 * marked as having zero arguments.
19 *
20 * The binary compatibility calls are in a separate list.
21 *
22 * Entry '0' used to be system_call. It's removed to disable indirect
23 * system calls for now so user tasks can't recurse. See mips'
24 * sys_syscall for a comparable example.
25 */
26
27SYSCALL(0, 0) /* 00 */
28SYSCALL(sys_exit, 1)
29SYSCALL(sys_ni_syscall, 0)
30SYSCALL(sys_read, 3)
31SYSCALL(sys_write, 3)
32SYSCALL(sys_open, 3) /* 05 */
33SYSCALL(sys_close, 1)
34SYSCALL(sys_ni_syscall, 3)
35SYSCALL(sys_creat, 2)
36SYSCALL(sys_link, 2)
37SYSCALL(sys_unlink, 1) /* 10 */
38SYSCALL(sys_execve, 0)
39SYSCALL(sys_chdir, 1)
40SYSCALL(sys_ni_syscall, 1)
41SYSCALL(sys_mknod, 3)
42SYSCALL(sys_chmod, 2) /* 15 */
43SYSCALL(sys_lchown, 3)
44SYSCALL(sys_ni_syscall, 0)
45SYSCALL(sys_newstat, 2)
46SYSCALL(sys_lseek, 3)
47SYSCALL(sys_getpid, 0) /* 20 */
48SYSCALL(sys_mount, 5)
49SYSCALL(sys_ni_syscall, 1)
50SYSCALL(sys_setuid, 1)
51SYSCALL(sys_getuid, 0)
52SYSCALL(sys_ni_syscall, 1) /* 25 */
53SYSCALL(sys_ptrace, 4)
54SYSCALL(sys_ni_syscall, 1)
55SYSCALL(sys_newfstat, 2)
56SYSCALL(sys_ni_syscall, 0)
57SYSCALL(sys_utime, 2) /* 30 */
58SYSCALL(sys_ni_syscall, 0)
59SYSCALL(sys_ni_syscall, 0)
60SYSCALL(sys_access, 2)
61SYSCALL(sys_ni_syscall, 1)
62SYSCALL(sys_ni_syscall, 0) /* 35 */
63SYSCALL(sys_sync, 0)
64SYSCALL(sys_kill, 2)
65SYSCALL(sys_rename, 2)
66SYSCALL(sys_mkdir, 2)
67SYSCALL(sys_rmdir, 1) /* 40 */
68SYSCALL(sys_dup, 1)
69SYSCALL(sys_pipe, 1)
70SYSCALL(sys_times, 1)
71SYSCALL(sys_ni_syscall, 0)
72SYSCALL(sys_brk, 1) /* 45 */
73SYSCALL(sys_setgid, 1)
74SYSCALL(sys_getgid, 0)
75SYSCALL(sys_ni_syscall, 0)
76SYSCALL(sys_geteuid, 0)
77SYSCALL(sys_getegid, 0) /* 50 */
78SYSCALL(sys_acct, 1)
79SYSCALL(sys_umount, 2)
80SYSCALL(sys_ni_syscall, 0)
81SYSCALL(sys_ioctl, 3)
82SYSCALL(sys_fcntl, 3) /* 55 */
83SYSCALL(sys_ni_syscall, 2)
84SYSCALL(sys_setpgid, 2)
85SYSCALL(sys_ni_syscall, 0)
86SYSCALL(sys_ni_syscall, 0)
87SYSCALL(sys_umask, 1) /* 60 */
88SYSCALL(sys_chroot, 1)
89SYSCALL(sys_ustat, 2)
90SYSCALL(sys_dup2, 2)
91SYSCALL(sys_getppid, 0)
92SYSCALL(sys_ni_syscall, 0) /* 65 */
93SYSCALL(sys_setsid, 0)
94SYSCALL(sys_sigaction, 3)
95SYSCALL(sys_ni_syscall, 0)
96SYSCALL(sys_ni_syscall, 1)
97SYSCALL(sys_setreuid, 2) /* 70 */
98SYSCALL(sys_setregid, 2)
99SYSCALL(sys_sigsuspend, 0)
100SYSCALL(sys_ni_syscall, 1)
101SYSCALL(sys_sethostname, 2)
102SYSCALL(sys_setrlimit, 2) /* 75 */
103SYSCALL(sys_getrlimit, 2)
104SYSCALL(sys_getrusage, 2)
105SYSCALL(sys_gettimeofday, 2)
106SYSCALL(sys_settimeofday, 2)
107SYSCALL(sys_getgroups, 2) /* 80 */
108SYSCALL(sys_setgroups, 2)
109SYSCALL(sys_ni_syscall, 0)
110SYSCALL(sys_symlink, 2)
111SYSCALL(sys_newlstat, 2)
112SYSCALL(sys_readlink, 3) /* 85 */
113SYSCALL(sys_uselib, 1)
114SYSCALL(sys_swapon, 2)
115SYSCALL(sys_reboot, 3)
116SYSCALL(sys_ni_syscall, 3)
117SYSCALL(sys_ni_syscall, 6) /* 90 */
118SYSCALL(sys_munmap, 2)
119SYSCALL(sys_truncate, 2)
120SYSCALL(sys_ftruncate, 2)
121SYSCALL(sys_fchmod, 2)
122SYSCALL(sys_fchown, 3) /* 95 */
123SYSCALL(sys_getpriority, 2)
124SYSCALL(sys_setpriority, 3)
125SYSCALL(sys_ni_syscall, 0)
126SYSCALL(sys_statfs, 2)
127SYSCALL(sys_fstatfs, 2) /* 100 */
128SYSCALL(sys_ni_syscall, 3)
129SYSCALL(sys_ni_syscall, 2)
130SYSCALL(sys_syslog, 3)
131SYSCALL(sys_setitimer, 3)
132SYSCALL(sys_getitimer, 2) /* 105 */
133SYSCALL(sys_newstat, 2)
134SYSCALL(sys_newlstat, 2)
135SYSCALL(sys_newfstat, 2)
136SYSCALL(sys_uname, 1)
137SYSCALL(sys_ni_syscall, 0) /* 110 */
138SYSCALL(sys_vhangup, 0)
139SYSCALL(sys_ni_syscall, 0)
140SYSCALL(sys_ni_syscall, 0)
141SYSCALL(sys_wait4, 4)
142SYSCALL(sys_swapoff, 1) /* 115 */
143SYSCALL(sys_sysinfo, 1)
144SYSCALL(sys_ni_syscall, 0)
145SYSCALL(sys_fsync, 1)
146SYSCALL(sys_sigreturn, 0)
147SYSCALL(sys_clone, 0) /* 120 */
148SYSCALL(sys_setdomainname, 2)
149SYSCALL(sys_newuname, 1)
150SYSCALL(sys_ni_syscall, 0)
151SYSCALL(sys_adjtimex, 1)
152SYSCALL(sys_mprotect, 3) /* 125 */
153SYSCALL(sys_ni_syscall, 3)
154SYSCALL(sys_ni_syscall, 2)
155SYSCALL(sys_init_module, 2)
156SYSCALL(sys_delete_module, 1)
157SYSCALL(sys_ni_syscall, 1) /* 130 */
158SYSCALL(sys_quotactl, 0)
159SYSCALL(sys_getpgid, 1)
160SYSCALL(sys_fchdir, 1)
161SYSCALL(sys_bdflush, 2)
162SYSCALL(sys_sysfs, 3) /* 135 */
163SYSCALL(sys_personality, 1)
164SYSCALL(sys_ni_syscall, 0)
165SYSCALL(sys_setfsuid, 1)
166SYSCALL(sys_setfsgid, 1)
167SYSCALL(sys_llseek, 5) /* 140 */
168SYSCALL(sys_getdents, 3)
169SYSCALL(sys_select, 5)
170SYSCALL(sys_flock, 2)
171SYSCALL(sys_msync, 3)
172SYSCALL(sys_readv, 3) /* 145 */
173SYSCALL(sys_writev, 3)
174SYSCALL(sys_ni_syscall, 3)
175SYSCALL(sys_ni_syscall, 3)
176SYSCALL(sys_ni_syscall, 4) /* handled in fast syscall handler. */
177SYSCALL(sys_ni_syscall, 0) /* 150 */
178SYSCALL(sys_getsid, 1)
179SYSCALL(sys_fdatasync, 1)
180SYSCALL(sys_sysctl, 1)
181SYSCALL(sys_mlock, 2)
182SYSCALL(sys_munlock, 2) /* 155 */
183SYSCALL(sys_mlockall, 1)
184SYSCALL(sys_munlockall, 0)
185SYSCALL(sys_sched_setparam,2)
186SYSCALL(sys_sched_getparam,2)
187SYSCALL(sys_sched_setscheduler,3) /* 160 */
188SYSCALL(sys_sched_getscheduler,1)
189SYSCALL(sys_sched_yield,0)
190SYSCALL(sys_sched_get_priority_max,1)
191SYSCALL(sys_sched_get_priority_min,1)
192SYSCALL(sys_sched_rr_get_interval,2) /* 165 */
193SYSCALL(sys_nanosleep,2)
194SYSCALL(sys_mremap,4)
195SYSCALL(sys_accept, 3)
196SYSCALL(sys_bind, 3)
197SYSCALL(sys_connect, 3) /* 170 */
198SYSCALL(sys_getpeername, 3)
199SYSCALL(sys_getsockname, 3)
200SYSCALL(sys_getsockopt, 5)
201SYSCALL(sys_listen, 2)
202SYSCALL(sys_recv, 4) /* 175 */
203SYSCALL(sys_recvfrom, 6)
204SYSCALL(sys_recvmsg, 3)
205SYSCALL(sys_send, 4)
206SYSCALL(sys_sendmsg, 3)
207SYSCALL(sys_sendto, 6) /* 180 */
208SYSCALL(sys_setsockopt, 5)
209SYSCALL(sys_shutdown, 2)
210SYSCALL(sys_socket, 3)
211SYSCALL(sys_socketpair, 4)
212SYSCALL(sys_setresuid, 3) /* 185 */
213SYSCALL(sys_getresuid, 3)
214SYSCALL(sys_ni_syscall, 5)
215SYSCALL(sys_poll, 3)
216SYSCALL(sys_nfsservctl, 3)
217SYSCALL(sys_setresgid, 3) /* 190 */
218SYSCALL(sys_getresgid, 3)
219SYSCALL(sys_prctl, 5)
220SYSCALL(sys_rt_sigreturn, 0)
221SYSCALL(sys_rt_sigaction, 4)
222SYSCALL(sys_rt_sigprocmask, 4) /* 195 */
223SYSCALL(sys_rt_sigpending, 2)
224SYSCALL(sys_rt_sigtimedwait, 4)
225SYSCALL(sys_rt_sigqueueinfo, 3)
226SYSCALL(sys_rt_sigsuspend, 0)
227SYSCALL(sys_pread64, 5) /* 200 */
228SYSCALL(sys_pwrite64, 5)
229SYSCALL(sys_chown, 3)
230SYSCALL(sys_getcwd, 2)
231SYSCALL(sys_capget, 2)
232SYSCALL(sys_capset, 2) /* 205 */
233SYSCALL(sys_sigaltstack, 0)
234SYSCALL(sys_sendfile, 4)
235SYSCALL(sys_ni_syscall, 0)
236SYSCALL(sys_ni_syscall, 0)
237SYSCALL(sys_mmap, 6) /* 210 */
238SYSCALL(sys_truncate64, 2)
239SYSCALL(sys_ftruncate64, 2)
240SYSCALL(sys_stat64, 2)
241SYSCALL(sys_lstat64, 2)
242SYSCALL(sys_fstat64, 2) /* 215 */
243SYSCALL(sys_pivot_root, 2)
244SYSCALL(sys_mincore, 3)
245SYSCALL(sys_madvise, 3)
246SYSCALL(sys_getdents64, 3)
247SYSCALL(sys_ni_syscall, 0) /* 220 */
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 37347e369987..a350431363a0 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -47,7 +47,7 @@ unsigned long long sched_clock(void)
47 return (unsigned long long)jiffies * (1000000000 / HZ); 47 return (unsigned long long)jiffies * (1000000000 / HZ);
48} 48}
49 49
50static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs); 50static irqreturn_t timer_interrupt(int irq, void *dev_id);
51static struct irqaction timer_irqaction = { 51static struct irqaction timer_irqaction = {
52 .handler = timer_interrupt, 52 .handler = timer_interrupt,
53 .flags = IRQF_DISABLED, 53 .flags = IRQF_DISABLED,
@@ -150,7 +150,7 @@ EXPORT_SYMBOL(do_gettimeofday);
150 * The timer interrupt is called HZ times per second. 150 * The timer interrupt is called HZ times per second.
151 */ 151 */
152 152
153irqreturn_t timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) 153irqreturn_t timer_interrupt (int irq, void *dev_id)
154{ 154{
155 155
156 unsigned long next; 156 unsigned long next;
@@ -160,9 +160,9 @@ irqreturn_t timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
160again: 160again:
161 while ((signed long)(get_ccount() - next) > 0) { 161 while ((signed long)(get_ccount() - next) > 0) {
162 162
163 profile_tick(CPU_PROFILING, regs); 163 profile_tick(CPU_PROFILING);
164#ifndef CONFIG_SMP 164#ifndef CONFIG_SMP
165 update_process_times(user_mode(regs)); 165 update_process_times(user_mode(get_irq_regs()));
166#endif 166#endif
167 167
168 write_seqlock(&xtime_lock); 168 write_seqlock(&xtime_lock);
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index ce077d6bf3a0..693ab268485e 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -75,7 +75,7 @@ extern void system_call (struct pt_regs*);
75#define USER 0x02 75#define USER 0x02
76 76
77#define COPROCESSOR(x) \ 77#define COPROCESSOR(x) \
78{ XCHAL_EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor } 78{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
79 79
80typedef struct { 80typedef struct {
81 int cause; 81 int cause;
@@ -85,38 +85,38 @@ typedef struct {
85 85
86dispatch_init_table_t __init dispatch_init_table[] = { 86dispatch_init_table_t __init dispatch_init_table[] = {
87 87
88{ XCHAL_EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction}, 88{ EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
89{ XCHAL_EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel }, 89{ EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
90{ XCHAL_EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user }, 90{ EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
91{ XCHAL_EXCCAUSE_SYSTEM_CALL, 0, system_call }, 91{ EXCCAUSE_SYSTEM_CALL, 0, system_call },
92/* XCHAL_EXCCAUSE_INSTRUCTION_FETCH unhandled */ 92/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
93/* XCHAL_EXCCAUSE_LOAD_STORE_ERROR unhandled*/ 93/* EXCCAUSE_LOAD_STORE_ERROR unhandled*/
94{ XCHAL_EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt }, 94{ EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
95{ XCHAL_EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca }, 95{ EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
96/* XCHAL_EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */ 96/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
97/* XCHAL_EXCCAUSE_PRIVILEGED unhandled */ 97/* EXCCAUSE_PRIVILEGED unhandled */
98#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION 98#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
99#ifdef CONFIG_UNALIGNED_USER 99#ifdef CONFIG_UNALIGNED_USER
100{ XCHAL_EXCCAUSE_UNALIGNED, USER, fast_unaligned }, 100{ EXCCAUSE_UNALIGNED, USER, fast_unaligned },
101#else 101#else
102{ XCHAL_EXCCAUSE_UNALIGNED, 0, do_unaligned_user }, 102{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
103#endif 103#endif
104{ XCHAL_EXCCAUSE_UNALIGNED, KRNL, fast_unaligned }, 104{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
105#endif 105#endif
106{ XCHAL_EXCCAUSE_ITLB_MISS, 0, do_page_fault }, 106{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
107{ XCHAL_EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss}, 107{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
108{ XCHAL_EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit }, 108{ EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
109{ XCHAL_EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault }, 109{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
110/* XCHAL_EXCCAUSE_SIZE_RESTRICTION unhandled */ 110/* EXCCAUSE_SIZE_RESTRICTION unhandled */
111{ XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault }, 111{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
112{ XCHAL_EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss}, 112{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
113{ XCHAL_EXCCAUSE_DTLB_MISS, 0, do_page_fault }, 113{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
114{ XCHAL_EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit }, 114{ EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
115{ XCHAL_EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault }, 115{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
116/* XCHAL_EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */ 116/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
117{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited }, 117{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
118{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault }, 118{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
119{ XCHAL_EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault }, 119{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
120/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */ 120/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
121#if (XCHAL_CP_MASK & 1) 121#if (XCHAL_CP_MASK & 1)
122COPROCESSOR(0), 122COPROCESSOR(0),
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 0e74397bfa2b..eb2d7bb69ee0 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -53,6 +53,8 @@
53#include <asm/thread_info.h> 53#include <asm/thread_info.h>
54#include <asm/processor.h> 54#include <asm/processor.h>
55 55
56#define WINDOW_VECTORS_SIZE 0x180
57
56 58
57/* 59/*
58 * User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0) 60 * User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0)
@@ -210,7 +212,7 @@ ENTRY(_DoubleExceptionVector)
210 /* Check for kernel double exception (usually fatal). */ 212 /* Check for kernel double exception (usually fatal). */
211 213
212 rsr a3, PS 214 rsr a3, PS
213 _bbci.l a3, PS_UM_SHIFT, .Lksp 215 _bbci.l a3, PS_UM_BIT, .Lksp
214 216
215 /* Check if we are currently handling a window exception. */ 217 /* Check if we are currently handling a window exception. */
216 /* Note: We don't need to indicate that we enter a critical section. */ 218 /* Note: We don't need to indicate that we enter a critical section. */
@@ -219,7 +221,7 @@ ENTRY(_DoubleExceptionVector)
219 221
220 movi a3, XCHAL_WINDOW_VECTORS_VADDR 222 movi a3, XCHAL_WINDOW_VECTORS_VADDR
221 _bltu a0, a3, .Lfixup 223 _bltu a0, a3, .Lfixup
222 addi a3, a3, XSHAL_WINDOW_VECTORS_SIZE 224 addi a3, a3, WINDOW_VECTORS_SIZE
223 _bgeu a0, a3, .Lfixup 225 _bgeu a0, a3, .Lfixup
224 226
225 /* Window overflow/underflow exception. Get stack pointer. */ 227 /* Window overflow/underflow exception. Get stack pointer. */
@@ -245,7 +247,7 @@ ENTRY(_DoubleExceptionVector)
245 247
246 wsr a2, DEPC # save stack pointer temporarily 248 wsr a2, DEPC # save stack pointer temporarily
247 rsr a0, PS 249 rsr a0, PS
248 extui a0, a0, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS 250 extui a0, a0, PS_OWB_SHIFT, 4
249 wsr a0, WINDOWBASE 251 wsr a0, WINDOWBASE
250 rsync 252 rsync
251 253
@@ -312,8 +314,8 @@ ENTRY(_DoubleExceptionVector)
312.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */ 314.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
313 315
314 rsr a3, EXCCAUSE 316 rsr a3, EXCCAUSE
315 beqi a3, XCHAL_EXCCAUSE_ITLB_MISS, 1f 317 beqi a3, EXCCAUSE_ITLB_MISS, 1f
316 addi a3, a3, -XCHAL_EXCCAUSE_DTLB_MISS 318 addi a3, a3, -EXCCAUSE_DTLB_MISS
317 bnez a3, .Lunrecoverable 319 bnez a3, .Lunrecoverable
3181: movi a3, fast_second_level_miss_double_kernel 3201: movi a3, fast_second_level_miss_double_kernel
319 jx a3 321 jx a3
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index cfe75f528725..a36c104c3a52 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -16,19 +16,17 @@
16 16
17#include <asm-generic/vmlinux.lds.h> 17#include <asm-generic/vmlinux.lds.h>
18 18
19#define _NOCLANGUAGE 19#include <asm/variant/core.h>
20#include <xtensa/config/core.h>
21#include <xtensa/config/system.h>
22OUTPUT_ARCH(xtensa) 20OUTPUT_ARCH(xtensa)
23ENTRY(_start) 21ENTRY(_start)
24 22
25#if XCHAL_MEMORY_ORDER == XTHAL_BIGENDIAN 23#ifdef __XTENSA_EB__
26jiffies = jiffies_64 + 4; 24jiffies = jiffies_64 + 4;
27#else 25#else
28jiffies = jiffies_64; 26jiffies = jiffies_64;
29#endif 27#endif
30 28
31#define KERNELOFFSET 0x1000 29#define KERNELOFFSET 0xd0001000
32 30
33/* Note: In the following macros, it would be nice to specify only the 31/* Note: In the following macros, it would be nice to specify only the
34 vector name and section kind and construct "sym" and "section" using 32 vector name and section kind and construct "sym" and "section" using
@@ -75,7 +73,7 @@ jiffies = jiffies_64;
75 73
76SECTIONS 74SECTIONS
77{ 75{
78 . = XCHAL_KSEG_CACHED_VADDR + KERNELOFFSET; 76 . = KERNELOFFSET;
79 /* .text section */ 77 /* .text section */
80 78
81 _text = .; 79 _text = .;
@@ -159,7 +157,7 @@ SECTIONS
159 157
160 /* Initialization code and data: */ 158 /* Initialization code and data: */
161 159
162 . = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE); 160 . = ALIGN(1 << 12);
163 __init_begin = .; 161 __init_begin = .;
164 .init.text : { 162 .init.text : {
165 _sinittext = .; 163 _sinittext = .;
@@ -223,32 +221,32 @@ SECTIONS
223 .dummy) 221 .dummy)
224 SECTION_VECTOR (_DebugInterruptVector_literal, 222 SECTION_VECTOR (_DebugInterruptVector_literal,
225 .DebugInterruptVector.literal, 223 .DebugInterruptVector.literal,
226 XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL) - 4, 224 XCHAL_DEBUG_VECTOR_VADDR - 4,
227 SIZEOF(.WindowVectors.text), 225 SIZEOF(.WindowVectors.text),
228 .WindowVectors.text) 226 .WindowVectors.text)
229 SECTION_VECTOR (_DebugInterruptVector_text, 227 SECTION_VECTOR (_DebugInterruptVector_text,
230 .DebugInterruptVector.text, 228 .DebugInterruptVector.text,
231 XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL), 229 XCHAL_DEBUG_VECTOR_VADDR,
232 4, 230 4,
233 .DebugInterruptVector.literal) 231 .DebugInterruptVector.literal)
234 SECTION_VECTOR (_KernelExceptionVector_literal, 232 SECTION_VECTOR (_KernelExceptionVector_literal,
235 .KernelExceptionVector.literal, 233 .KernelExceptionVector.literal,
236 XCHAL_KERNELEXC_VECTOR_VADDR - 4, 234 XCHAL_KERNEL_VECTOR_VADDR - 4,
237 SIZEOF(.DebugInterruptVector.text), 235 SIZEOF(.DebugInterruptVector.text),
238 .DebugInterruptVector.text) 236 .DebugInterruptVector.text)
239 SECTION_VECTOR (_KernelExceptionVector_text, 237 SECTION_VECTOR (_KernelExceptionVector_text,
240 .KernelExceptionVector.text, 238 .KernelExceptionVector.text,
241 XCHAL_KERNELEXC_VECTOR_VADDR, 239 XCHAL_KERNEL_VECTOR_VADDR,
242 4, 240 4,
243 .KernelExceptionVector.literal) 241 .KernelExceptionVector.literal)
244 SECTION_VECTOR (_UserExceptionVector_literal, 242 SECTION_VECTOR (_UserExceptionVector_literal,
245 .UserExceptionVector.literal, 243 .UserExceptionVector.literal,
246 XCHAL_USEREXC_VECTOR_VADDR - 4, 244 XCHAL_USER_VECTOR_VADDR - 4,
247 SIZEOF(.KernelExceptionVector.text), 245 SIZEOF(.KernelExceptionVector.text),
248 .KernelExceptionVector.text) 246 .KernelExceptionVector.text)
249 SECTION_VECTOR (_UserExceptionVector_text, 247 SECTION_VECTOR (_UserExceptionVector_text,
250 .UserExceptionVector.text, 248 .UserExceptionVector.text,
251 XCHAL_USEREXC_VECTOR_VADDR, 249 XCHAL_USER_VECTOR_VADDR,
252 4, 250 4,
253 .UserExceptionVector.literal) 251 .UserExceptionVector.literal)
254 SECTION_VECTOR (_DoubleExceptionVector_literal, 252 SECTION_VECTOR (_DoubleExceptionVector_literal,
@@ -263,7 +261,7 @@ SECTIONS
263 .DoubleExceptionVector.literal) 261 .DoubleExceptionVector.literal)
264 262
265 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; 263 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
266 . = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE); 264 . = ALIGN(1 << 12);
267 265
268 __init_end = .; 266 __init_end = .;
269 267
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
index e2d64dfd530c..9d9cd990afa6 100644
--- a/arch/xtensa/lib/checksum.S
+++ b/arch/xtensa/lib/checksum.S
@@ -16,8 +16,7 @@
16 16
17#include <asm/errno.h> 17#include <asm/errno.h>
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#define _ASMLANGUAGE 19#include <asm/variant/core.h>
20#include <xtensa/config/core.h>
21 20
22/* 21/*
23 * computes a partial checksum, e.g. for TCP/UDP fragments 22 * computes a partial checksum, e.g. for TCP/UDP fragments
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
index e8f6d7eb7222..ddda8f4bc862 100644
--- a/arch/xtensa/lib/memcopy.S
+++ b/arch/xtensa/lib/memcopy.S
@@ -9,7 +9,7 @@
9 * Copyright (C) 2002 - 2005 Tensilica Inc. 9 * Copyright (C) 2002 - 2005 Tensilica Inc.
10 */ 10 */
11 11
12#include <xtensa/coreasm.h> 12#include <asm/variant/core.h>
13 13
14 .macro src_b r, w0, w1 14 .macro src_b r, w0, w1
15#ifdef __XTENSA_EB__ 15#ifdef __XTENSA_EB__
diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S
index 4de25134bc62..56a17495b2db 100644
--- a/arch/xtensa/lib/memset.S
+++ b/arch/xtensa/lib/memset.S
@@ -11,7 +11,7 @@
11 * Copyright (C) 2002 Tensilica Inc. 11 * Copyright (C) 2002 Tensilica Inc.
12 */ 12 */
13 13
14#include <xtensa/coreasm.h> 14#include <asm/variant/core.h>
15 15
16/* 16/*
17 * void *memset(void *dst, int c, size_t length) 17 * void *memset(void *dst, int c, size_t length)
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S
index 71d55df43893..a834057bda6b 100644
--- a/arch/xtensa/lib/strncpy_user.S
+++ b/arch/xtensa/lib/strncpy_user.S
@@ -11,7 +11,7 @@
11 * Copyright (C) 2002 Tensilica Inc. 11 * Copyright (C) 2002 Tensilica Inc.
12 */ 12 */
13 13
14#include <xtensa/coreasm.h> 14#include <asm/variant/core.h>
15#include <linux/errno.h> 15#include <linux/errno.h>
16 16
17/* Load or store instructions that may cause exceptions use the EX macro. */ 17/* Load or store instructions that may cause exceptions use the EX macro. */
diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S
index cdff4d670f3b..5e9c1e709b2e 100644
--- a/arch/xtensa/lib/strnlen_user.S
+++ b/arch/xtensa/lib/strnlen_user.S
@@ -11,7 +11,7 @@
11 * Copyright (C) 2002 Tensilica Inc. 11 * Copyright (C) 2002 Tensilica Inc.
12 */ 12 */
13 13
14#include <xtensa/coreasm.h> 14#include <asm/variant/core.h>
15 15
16/* Load or store instructions that may cause exceptions use the EX macro. */ 16/* Load or store instructions that may cause exceptions use the EX macro. */
17 17
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
index 4641ef510f0e..a8ab1d4fe0ae 100644
--- a/arch/xtensa/lib/usercopy.S
+++ b/arch/xtensa/lib/usercopy.S
@@ -53,7 +53,7 @@
53 * a11/ original length 53 * a11/ original length
54 */ 54 */
55 55
56#include <xtensa/coreasm.h> 56#include <asm/variant/core.h>
57 57
58#ifdef __XTENSA_EB__ 58#ifdef __XTENSA_EB__
59#define ALIGN(R, W0, W1) src R, W0, W1 59#define ALIGN(R, W0, W1) src R, W0, W1
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index dd0dbec2e57e..3dc6f2f07bbe 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -21,7 +21,7 @@
21#include <asm/system.h> 21#include <asm/system.h>
22#include <asm/pgalloc.h> 22#include <asm/pgalloc.h>
23 23
24unsigned long asid_cache = ASID_FIRST_VERSION; 24unsigned long asid_cache = ASID_USER_FIRST;
25void bad_page_fault(struct pt_regs*, unsigned long, int); 25void bad_page_fault(struct pt_regs*, unsigned long, int);
26 26
27/* 27/*
@@ -58,10 +58,10 @@ void do_page_fault(struct pt_regs *regs)
58 return; 58 return;
59 } 59 }
60 60
61 is_write = (exccause == XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0; 61 is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
62 is_exec = (exccause == XCHAL_EXCCAUSE_ITLB_PRIVILEGE || 62 is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
63 exccause == XCHAL_EXCCAUSE_ITLB_MISS || 63 exccause == EXCCAUSE_ITLB_MISS ||
64 exccause == XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; 64 exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
65 65
66#if 0 66#if 0
67 printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid, 67 printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 660ef058c149..e1ec2d1e8189 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -141,8 +141,8 @@ void __init bootmem_init(void)
141 if (min_low_pfn > max_pfn) 141 if (min_low_pfn > max_pfn)
142 panic("No memory found!\n"); 142 panic("No memory found!\n");
143 143
144 max_low_pfn = max_pfn < MAX_LOW_MEMORY >> PAGE_SHIFT ? 144 max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ?
145 max_pfn : MAX_LOW_MEMORY >> PAGE_SHIFT; 145 max_pfn : MAX_MEM_PFN >> PAGE_SHIFT;
146 146
147 /* Find an area to use for the bootmem bitmap. */ 147 /* Find an area to use for the bootmem bitmap. */
148 148
@@ -215,7 +215,7 @@ void __init init_mmu (void)
215 215
216 /* Set rasid register to a known value. */ 216 /* Set rasid register to a known value. */
217 217
218 set_rasid_register (ASID_ALL_RESERVED); 218 set_rasid_register (ASID_USER_FIRST);
219 219
220 /* Set PTEVADDR special register to the start of the page 220 /* Set PTEVADDR special register to the start of the page
221 * table, which is in kernel mappable space (ie. not 221 * table, which is in kernel mappable space (ie. not
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index 327c0f17187c..ae085332c607 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -19,9 +19,8 @@
19#include <linux/linkage.h> 19#include <linux/linkage.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/pgtable.h> 21#include <asm/pgtable.h>
22 22#include <asm/asmmacro.h>
23#include <xtensa/cacheasm.h> 23#include <asm/cacheasm.h>
24#include <xtensa/cacheattrasm.h>
25 24
26/* clear_page (page) */ 25/* clear_page (page) */
27 26
@@ -74,104 +73,66 @@ ENTRY(copy_page)
74 73
75 retw 74 retw
76 75
77
78/* 76/*
79 * void __flush_invalidate_cache_all(void) 77 * void __invalidate_icache_page(ulong start)
80 */ 78 */
81 79
82ENTRY(__flush_invalidate_cache_all) 80ENTRY(__invalidate_icache_page)
83 entry sp, 16 81 entry sp, 16
84 dcache_writeback_inv_all a2, a3
85 icache_invalidate_all a2, a3
86 retw
87 82
88/* 83 ___invalidate_icache_page a2 a3
89 * void __invalidate_icache_all(void) 84 isync
90 */
91 85
92ENTRY(__invalidate_icache_all)
93 entry sp, 16
94 icache_invalidate_all a2, a3
95 retw 86 retw
96 87
97/* 88/*
98 * void __flush_invalidate_dcache_all(void) 89 * void __invalidate_dcache_page(ulong start)
99 */ 90 */
100 91
101ENTRY(__flush_invalidate_dcache_all) 92ENTRY(__invalidate_dcache_page)
102 entry sp, 16 93 entry sp, 16
103 dcache_writeback_inv_all a2, a3
104 retw
105
106 94
107/* 95 ___invalidate_dcache_page a2 a3
108 * void __flush_invalidate_cache_range(ulong start, ulong size) 96 dsync
109 */
110 97
111ENTRY(__flush_invalidate_cache_range)
112 entry sp, 16
113 mov a4, a2
114 mov a5, a3
115 dcache_writeback_inv_region a4, a5, a6
116 icache_invalidate_region a2, a3, a4
117 retw 98 retw
118 99
119/* 100/*
120 * void __invalidate_icache_page(ulong start) 101 * void __flush_invalidate_dcache_page(ulong start)
121 */ 102 */
122 103
123ENTRY(__invalidate_icache_page) 104ENTRY(__flush_invalidate_dcache_page)
124 entry sp, 16 105 entry sp, 16
125 movi a3, PAGE_SIZE
126 icache_invalidate_region a2, a3, a4
127 retw
128 106
129/* 107 ___flush_invalidate_dcache_page a2 a3
130 * void __invalidate_dcache_page(ulong start)
131 */
132 108
133ENTRY(__invalidate_dcache_page) 109 dsync
134 entry sp, 16
135 movi a3, PAGE_SIZE
136 dcache_invalidate_region a2, a3, a4
137 retw 110 retw
138 111
139/* 112/*
140 * void __invalidate_icache_range(ulong start, ulong size) 113 * void __flush_dcache_page(ulong start)
141 */ 114 */
142 115
143ENTRY(__invalidate_icache_range) 116ENTRY(__flush_dcache_page)
144 entry sp, 16 117 entry sp, 16
145 icache_invalidate_region a2, a3, a4
146 retw
147 118
148/* 119 ___flush_dcache_page a2 a3
149 * void __invalidate_dcache_range(ulong start, ulong size)
150 */
151 120
152ENTRY(__invalidate_dcache_range) 121 dsync
153 entry sp, 16
154 dcache_invalidate_region a2, a3, a4
155 retw 122 retw
156 123
157/*
158 * void __flush_dcache_page(ulong start)
159 */
160 124
161ENTRY(__flush_dcache_page)
162 entry sp, 16
163 movi a3, PAGE_SIZE
164 dcache_writeback_region a2, a3, a4
165 retw
166 125
167/* 126/*
168 * void __flush_invalidate_dcache_page(ulong start) 127 * void __invalidate_icache_range(ulong start, ulong size)
169 */ 128 */
170 129
171ENTRY(__flush_invalidate_dcache_page) 130ENTRY(__invalidate_icache_range)
172 entry sp, 16 131 entry sp, 16
173 movi a3, PAGE_SIZE 132
174 dcache_writeback_inv_region a2, a3, a4 133 ___invalidate_icache_range a2 a3 a4
134 isync
135
175 retw 136 retw
176 137
177/* 138/*
@@ -180,195 +141,69 @@ ENTRY(__flush_invalidate_dcache_page)
180 141
181ENTRY(__flush_invalidate_dcache_range) 142ENTRY(__flush_invalidate_dcache_range)
182 entry sp, 16 143 entry sp, 16
183 dcache_writeback_inv_region a2, a3, a4
184 retw
185 144
186/* 145 ___flush_invalidate_dcache_range a2 a3 a4
187 * void __invalidate_dcache_all(void) 146 dsync
188 */
189 147
190ENTRY(__invalidate_dcache_all)
191 entry sp, 16
192 dcache_invalidate_all a2, a3
193 retw 148 retw
194 149
195/* 150/*
196 * void __flush_invalidate_dcache_page_phys(ulong start) 151 * void _flush_dcache_range(ulong start, ulong size)
197 */ 152 */
198 153
199ENTRY(__flush_invalidate_dcache_page_phys) 154ENTRY(__flush_dcache_range)
200 entry sp, 16 155 entry sp, 16
201 156
202 movi a3, XCHAL_DCACHE_SIZE 157 ___flush_dcache_range a2 a3 a4
203 movi a4, PAGE_MASK | 1
204 addi a2, a2, 1
205
2061: addi a3, a3, -XCHAL_DCACHE_LINESIZE
207
208 ldct a6, a3
209 dsync 158 dsync
210 and a6, a6, a4
211 beq a6, a2, 2f
212 bgeui a3, 2, 1b
213 retw
214 159
2152: diwbi a3, 0
216 bgeui a3, 2, 1b
217 retw 160 retw
218 161
219ENTRY(check_dcache_low0) 162/*
220 entry sp, 16 163 * void _invalidate_dcache_range(ulong start, ulong size)
221 164 */
222 movi a3, XCHAL_DCACHE_SIZE / 4
223 movi a4, PAGE_MASK | 1
224 addi a2, a2, 1
225
2261: addi a3, a3, -XCHAL_DCACHE_LINESIZE
227
228 ldct a6, a3
229 dsync
230 and a6, a6, a4
231 beq a6, a2, 2f
232 bgeui a3, 2, 1b
233 retw
234
2352: j 2b
236
237ENTRY(check_dcache_high0)
238 entry sp, 16
239
240 movi a5, XCHAL_DCACHE_SIZE / 4
241 movi a3, XCHAL_DCACHE_SIZE / 2
242 movi a4, PAGE_MASK | 1
243 addi a2, a2, 1
244
2451: addi a3, a3, -XCHAL_DCACHE_LINESIZE
246 addi a5, a5, -XCHAL_DCACHE_LINESIZE
247
248 ldct a6, a3
249 dsync
250 and a6, a6, a4
251 beq a6, a2, 2f
252 bgeui a5, 2, 1b
253 retw
254
2552: j 2b
256 165
257ENTRY(check_dcache_low1) 166ENTRY(__invalidate_dcache_range)
258 entry sp, 16 167 entry sp, 16
259 168
260 movi a5, XCHAL_DCACHE_SIZE / 4 169 ___invalidate_dcache_range a2 a3 a4
261 movi a3, XCHAL_DCACHE_SIZE * 3 / 4
262 movi a4, PAGE_MASK | 1
263 addi a2, a2, 1
264 170
2651: addi a3, a3, -XCHAL_DCACHE_LINESIZE
266 addi a5, a5, -XCHAL_DCACHE_LINESIZE
267 171
268 ldct a6, a3
269 dsync
270 and a6, a6, a4
271 beq a6, a2, 2f
272 bgeui a5, 2, 1b
273 retw 172 retw
274 173
2752: j 2b 174/*
175 * void _invalidate_icache_all(void)
176 */
276 177
277ENTRY(check_dcache_high1) 178ENTRY(__invalidate_icache_all)
278 entry sp, 16 179 entry sp, 16
279 180
280 movi a5, XCHAL_DCACHE_SIZE / 4 181 ___invalidate_icache_all a2 a3
281 movi a3, XCHAL_DCACHE_SIZE 182 isync
282 movi a4, PAGE_MASK | 1
283 addi a2, a2, 1
284
2851: addi a3, a3, -XCHAL_DCACHE_LINESIZE
286 addi a5, a5, -XCHAL_DCACHE_LINESIZE
287 183
288 ldct a6, a3
289 dsync
290 and a6, a6, a4
291 beq a6, a2, 2f
292 bgeui a5, 2, 1b
293 retw 184 retw
294 185
2952: j 2b
296
297
298/* 186/*
299 * void __invalidate_icache_page_phys(ulong start) 187 * void _flush_invalidate_dcache_all(void)
300 */ 188 */
301 189
302ENTRY(__invalidate_icache_page_phys) 190ENTRY(__flush_invalidate_dcache_all)
303 entry sp, 16 191 entry sp, 16
304 192
305 movi a3, XCHAL_ICACHE_SIZE 193 ___flush_invalidate_dcache_all a2 a3
306 movi a4, PAGE_MASK | 1 194 dsync
307 addi a2, a2, 1
308
3091: addi a3, a3, -XCHAL_ICACHE_LINESIZE
310
311 lict a6, a3
312 isync
313 and a6, a6, a4
314 beq a6, a2, 2f
315 bgeui a3, 2, 1b
316 retw
317 195
3182: iii a3, 0
319 bgeui a3, 2, 1b
320 retw 196 retw
321 197
198/*
199 * void _invalidate_dcache_all(void)
200 */
322 201
323#if 0 202ENTRY(__invalidate_dcache_all)
324
325 movi a3, XCHAL_DCACHE_WAYS - 1
326 movi a4, PAGE_SIZE
327
3281: mov a5, a2
329 add a6, a2, a4
330
3312: diwbi a5, 0
332 diwbi a5, XCHAL_DCACHE_LINESIZE
333 diwbi a5, XCHAL_DCACHE_LINESIZE * 2
334 diwbi a5, XCHAL_DCACHE_LINESIZE * 3
335
336 addi a5, a5, XCHAL_DCACHE_LINESIZE * 4
337 blt a5, a6, 2b
338
339 addi a3, a3, -1
340 addi a2, a2, XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS
341 bgez a3, 1b
342
343 retw
344
345ENTRY(__invalidate_icache_page_index)
346 entry sp, 16 203 entry sp, 16
347 204
348 movi a3, XCHAL_ICACHE_WAYS - 1 205 ___invalidate_dcache_all a2 a3
349 movi a4, PAGE_SIZE 206 dsync
350
3511: mov a5, a2
352 add a6, a2, a4
353
3542: iii a5, 0
355 iii a5, XCHAL_ICACHE_LINESIZE
356 iii a5, XCHAL_ICACHE_LINESIZE * 2
357 iii a5, XCHAL_ICACHE_LINESIZE * 3
358
359 addi a5, a5, XCHAL_ICACHE_LINESIZE * 4
360 blt a5, a6, 2b
361
362 addi a3, a3, -1
363 addi a2, a2, XCHAL_ICACHE_SIZE / XCHAL_ICACHE_WAYS
364 bgez a3, 2b
365 207
366 retw 208 retw
367 209
368#endif
369
370
371
372
373
374
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 0fefb8666874..239461d8ea88 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -24,12 +24,12 @@
24 24
25static inline void __flush_itlb_all (void) 25static inline void __flush_itlb_all (void)
26{ 26{
27 int way, index; 27 int w, i;
28 28
29 for (way = 0; way < XCHAL_ITLB_ARF_WAYS; way++) { 29 for (w = 0; w < ITLB_ARF_WAYS; w++) {
30 for (index = 0; index < ITLB_ENTRIES_PER_ARF_WAY; index++) { 30 for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
31 int entry = way + (index << PAGE_SHIFT); 31 int e = w + (i << PAGE_SHIFT);
32 invalidate_itlb_entry_no_isync (entry); 32 invalidate_itlb_entry_no_isync(e);
33 } 33 }
34 } 34 }
35 asm volatile ("isync\n"); 35 asm volatile ("isync\n");
@@ -37,12 +37,12 @@ static inline void __flush_itlb_all (void)
37 37
38static inline void __flush_dtlb_all (void) 38static inline void __flush_dtlb_all (void)
39{ 39{
40 int way, index; 40 int w, i;
41 41
42 for (way = 0; way < XCHAL_DTLB_ARF_WAYS; way++) { 42 for (w = 0; w < DTLB_ARF_WAYS; w++) {
43 for (index = 0; index < DTLB_ENTRIES_PER_ARF_WAY; index++) { 43 for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
44 int entry = way + (index << PAGE_SHIFT); 44 int e = w + (i << PAGE_SHIFT);
45 invalidate_dtlb_entry_no_isync (entry); 45 invalidate_dtlb_entry_no_isync(e);
46 } 46 }
47 } 47 }
48 asm volatile ("isync\n"); 48 asm volatile ("isync\n");
@@ -63,21 +63,25 @@ void flush_tlb_all (void)
63 63
64void flush_tlb_mm(struct mm_struct *mm) 64void flush_tlb_mm(struct mm_struct *mm)
65{ 65{
66#if 0
67 printk("[tlbmm<%lx>]\n", (unsigned long)mm->context);
68#endif
69
70 if (mm == current->active_mm) { 66 if (mm == current->active_mm) {
71 int flags; 67 int flags;
72 local_save_flags(flags); 68 local_save_flags(flags);
73 get_new_mmu_context(mm, asid_cache); 69 __get_new_mmu_context(mm);
74 set_rasid_register(ASID_INSERT(mm->context)); 70 __load_mmu_context(mm);
75 local_irq_restore(flags); 71 local_irq_restore(flags);
76 } 72 }
77 else 73 else
78 mm->context = 0; 74 mm->context = 0;
79} 75}
80 76
77#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
78#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
79#if _ITLB_ENTRIES > _DTLB_ENTRIES
80# define _TLB_ENTRIES _ITLB_ENTRIES
81#else
82# define _TLB_ENTRIES _DTLB_ENTRIES
83#endif
84
81void flush_tlb_range (struct vm_area_struct *vma, 85void flush_tlb_range (struct vm_area_struct *vma,
82 unsigned long start, unsigned long end) 86 unsigned long start, unsigned long end)
83{ 87{
@@ -93,7 +97,7 @@ void flush_tlb_range (struct vm_area_struct *vma,
93#endif 97#endif
94 local_save_flags(flags); 98 local_save_flags(flags);
95 99
96 if (end-start + (PAGE_SIZE-1) <= SMALLEST_NTLB_ENTRIES << PAGE_SHIFT) { 100 if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
97 int oldpid = get_rasid_register(); 101 int oldpid = get_rasid_register();
98 set_rasid_register (ASID_INSERT(mm->context)); 102 set_rasid_register (ASID_INSERT(mm->context));
99 start &= PAGE_MASK; 103 start &= PAGE_MASK;
@@ -111,9 +115,7 @@ void flush_tlb_range (struct vm_area_struct *vma,
111 115
112 set_rasid_register(oldpid); 116 set_rasid_register(oldpid);
113 } else { 117 } else {
114 get_new_mmu_context(mm, asid_cache); 118 flush_tlb_mm(mm);
115 if (mm == current->active_mm)
116 set_rasid_register(ASID_INSERT(mm->context));
117 } 119 }
118 local_irq_restore(flags); 120 local_irq_restore(flags);
119} 121}
@@ -123,10 +125,6 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
123 struct mm_struct* mm = vma->vm_mm; 125 struct mm_struct* mm = vma->vm_mm;
124 unsigned long flags; 126 unsigned long flags;
125 int oldpid; 127 int oldpid;
126#if 0
127 printk("[tlbpage<%02lx,%08lx>]\n",
128 (unsigned long)mm->context, page);
129#endif
130 128
131 if(mm->context == NO_CONTEXT) 129 if(mm->context == NO_CONTEXT)
132 return; 130 return;
@@ -142,404 +140,5 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
142 set_rasid_register(oldpid); 140 set_rasid_register(oldpid);
143 141
144 local_irq_restore(flags); 142 local_irq_restore(flags);
145
146#if 0
147 flush_tlb_all();
148 return;
149#endif
150}
151
152
153#ifdef DEBUG_TLB
154
155#define USE_ITLB 0
156#define USE_DTLB 1
157
158struct way_config_t {
159 int indicies;
160 int indicies_log2;
161 int pgsz_log2;
162 int arf;
163};
164
165static struct way_config_t itlb[XCHAL_ITLB_WAYS] =
166{
167 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES),
168 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES_LOG2),
169 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, PAGESZ_LOG2_MIN),
170 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ARF)
171 },
172 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES),
173 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES_LOG2),
174 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, PAGESZ_LOG2_MIN),
175 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ARF)
176 },
177 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES),
178 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES_LOG2),
179 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, PAGESZ_LOG2_MIN),
180 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ARF)
181 },
182 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES),
183 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES_LOG2),
184 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, PAGESZ_LOG2_MIN),
185 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ARF)
186 },
187 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES),
188 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES_LOG2),
189 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, PAGESZ_LOG2_MIN),
190 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ARF)
191 },
192 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES),
193 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES_LOG2),
194 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, PAGESZ_LOG2_MIN),
195 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ARF)
196 },
197 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES),
198 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES_LOG2),
199 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, PAGESZ_LOG2_MIN),
200 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ARF)
201 }
202};
203
204static struct way_config_t dtlb[XCHAL_DTLB_WAYS] =
205{
206 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES),
207 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES_LOG2),
208 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, PAGESZ_LOG2_MIN),
209 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ARF)
210 },
211 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES),
212 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES_LOG2),
213 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, PAGESZ_LOG2_MIN),
214 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ARF)
215 },
216 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES),
217 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES_LOG2),
218 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, PAGESZ_LOG2_MIN),
219 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ARF)
220 },
221 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES),
222 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES_LOG2),
223 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, PAGESZ_LOG2_MIN),
224 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ARF)
225 },
226 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES),
227 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES_LOG2),
228 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, PAGESZ_LOG2_MIN),
229 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ARF)
230 },
231 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES),
232 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES_LOG2),
233 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, PAGESZ_LOG2_MIN),
234 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ARF)
235 },
236 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES),
237 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES_LOG2),
238 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, PAGESZ_LOG2_MIN),
239 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ARF)
240 },
241 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES),
242 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES_LOG2),
243 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, PAGESZ_LOG2_MIN),
244 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ARF)
245 },
246 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES),
247 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES_LOG2),
248 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, PAGESZ_LOG2_MIN),
249 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ARF)
250 },
251 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES),
252 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES_LOG2),
253 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, PAGESZ_LOG2_MIN),
254 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ARF)
255 }
256};
257
258/* Total number of entries: */
259#define ITLB_TOTAL_ENTRIES \
260 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES) + \
261 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES) + \
262 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES) + \
263 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES) + \
264 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES) + \
265 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES) + \
266 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES)
267#define DTLB_TOTAL_ENTRIES \
268 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES) + \
269 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES) + \
270 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES) + \
271 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES) + \
272 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES) + \
273 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES) + \
274 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES) + \
275 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES) + \
276 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES) + \
277 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES)
278
279
280typedef struct {
281 unsigned va;
282 unsigned pa;
283 unsigned char asid;
284 unsigned char ca;
285 unsigned char way;
286 unsigned char index;
287 unsigned char pgsz_log2; /* 0 .. 32 */
288 unsigned char type; /* 0=ITLB 1=DTLB */
289} tlb_dump_entry_t;
290
291/* Return -1 if a precedes b, +1 if a follows b, 0 if same: */
292int cmp_tlb_dump_info( tlb_dump_entry_t *a, tlb_dump_entry_t *b )
293{
294 if (a->asid < b->asid) return -1;
295 if (a->asid > b->asid) return 1;
296 if (a->va < b->va) return -1;
297 if (a->va > b->va) return 1;
298 if (a->pa < b->pa) return -1;
299 if (a->pa > b->pa) return 1;
300 if (a->ca < b->ca) return -1;
301 if (a->ca > b->ca) return 1;
302 if (a->way < b->way) return -1;
303 if (a->way > b->way) return 1;
304 if (a->index < b->index) return -1;
305 if (a->index > b->index) return 1;
306 return 0;
307}
308
309void sort_tlb_dump_info( tlb_dump_entry_t *t, int n )
310{
311 int i, j;
312 /* Simple O(n*n) sort: */
313 for (i = 0; i < n-1; i++)
314 for (j = i+1; j < n; j++)
315 if (cmp_tlb_dump_info(t+i, t+j) > 0) {
316 tlb_dump_entry_t tmp = t[i];
317 t[i] = t[j];
318 t[j] = tmp;
319 }
320}
321
322
323static tlb_dump_entry_t itlb_dump_info[ITLB_TOTAL_ENTRIES];
324static tlb_dump_entry_t dtlb_dump_info[DTLB_TOTAL_ENTRIES];
325
326
327static inline char *way_type (int type)
328{
329 return type ? "autorefill" : "non-autorefill";
330}
331
332void print_entry (struct way_config_t *way_info,
333 unsigned int way,
334 unsigned int index,
335 unsigned int virtual,
336 unsigned int translation)
337{
338 char valid_chr;
339 unsigned int va, pa, asid, ca;
340
341 va = virtual &
342 ~((1 << (way_info->pgsz_log2 + way_info->indicies_log2)) - 1);
343 asid = virtual & ((1 << XCHAL_MMU_ASID_BITS) - 1);
344 pa = translation & ~((1 << way_info->pgsz_log2) - 1);
345 ca = translation & ((1 << XCHAL_MMU_CA_BITS) - 1);
346 valid_chr = asid ? 'V' : 'I';
347
348 /* Compute and incorporate the effect of the index bits on the
349 * va. It's more useful for kernel debugging, since we always
350 * want to know the effective va anyway. */
351
352 va += index << way_info->pgsz_log2;
353
354 printk ("\t[%d,%d] (%c) vpn 0x%.8x ppn 0x%.8x asid 0x%.2x am 0x%x\n",
355 way, index, valid_chr, va, pa, asid, ca);
356}
357
358void print_itlb_entry (struct way_config_t *way_info, int way, int index)
359{
360 print_entry (way_info, way, index,
361 read_itlb_virtual (way + (index << way_info->pgsz_log2)),
362 read_itlb_translation (way + (index << way_info->pgsz_log2)));
363}
364
365void print_dtlb_entry (struct way_config_t *way_info, int way, int index)
366{
367 print_entry (way_info, way, index,
368 read_dtlb_virtual (way + (index << way_info->pgsz_log2)),
369 read_dtlb_translation (way + (index << way_info->pgsz_log2)));
370}
371
372void dump_itlb (void)
373{
374 int way, index;
375
376 printk ("\nITLB: ways = %d\n", XCHAL_ITLB_WAYS);
377
378 for (way = 0; way < XCHAL_ITLB_WAYS; way++) {
379 printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
380 way, itlb[way].indicies,
381 itlb[way].pgsz_log2, way_type(itlb[way].arf));
382 for (index = 0; index < itlb[way].indicies; index++) {
383 print_itlb_entry(&itlb[way], way, index);
384 }
385 }
386}
387
388void dump_dtlb (void)
389{
390 int way, index;
391
392 printk ("\nDTLB: ways = %d\n", XCHAL_DTLB_WAYS);
393
394 for (way = 0; way < XCHAL_DTLB_WAYS; way++) {
395 printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
396 way, dtlb[way].indicies,
397 dtlb[way].pgsz_log2, way_type(dtlb[way].arf));
398 for (index = 0; index < dtlb[way].indicies; index++) {
399 print_dtlb_entry(&dtlb[way], way, index);
400 }
401 }
402}
403
404void dump_tlb (tlb_dump_entry_t *tinfo, struct way_config_t *config,
405 int entries, int ways, int type, int show_invalid)
406{
407 tlb_dump_entry_t *e = tinfo;
408 int way, i;
409
410 /* Gather all info: */
411 for (way = 0; way < ways; way++) {
412 struct way_config_t *cfg = config + way;
413 for (i = 0; i < cfg->indicies; i++) {
414 unsigned wayindex = way + (i << cfg->pgsz_log2);
415 unsigned vv = (type ? read_dtlb_virtual (wayindex)
416 : read_itlb_virtual (wayindex));
417 unsigned pp = (type ? read_dtlb_translation (wayindex)
418 : read_itlb_translation (wayindex));
419
420 /* Compute and incorporate the effect of the index bits on the
421 * va. It's more useful for kernel debugging, since we always
422 * want to know the effective va anyway. */
423
424 e->va = (vv & ~((1 << (cfg->pgsz_log2 + cfg->indicies_log2)) - 1));
425 e->va += (i << cfg->pgsz_log2);
426 e->pa = (pp & ~((1 << cfg->pgsz_log2) - 1));
427 e->asid = (vv & ((1 << XCHAL_MMU_ASID_BITS) - 1));
428 e->ca = (pp & ((1 << XCHAL_MMU_CA_BITS) - 1));
429 e->way = way;
430 e->index = i;
431 e->pgsz_log2 = cfg->pgsz_log2;
432 e->type = type;
433 e++;
434 }
435 }
436#if 1
437 /* Sort by ASID and VADDR: */
438 sort_tlb_dump_info (tinfo, entries);
439#endif
440
441 /* Display all sorted info: */
442 printk ("\n%cTLB dump:\n", (type ? 'D' : 'I'));
443 for (e = tinfo, i = 0; i < entries; i++, e++) {
444#if 0
445 if (e->asid == 0 && !show_invalid)
446 continue;
447#endif
448 printk ("%c way=%d i=%d ASID=%02X V=%08X -> P=%08X CA=%X (%d %cB)\n",
449 (e->type ? 'D' : 'I'), e->way, e->index,
450 e->asid, e->va, e->pa, e->ca,
451 (1 << (e->pgsz_log2 % 10)),
452 " kMG"[e->pgsz_log2 / 10]
453 );
454 }
455}
456
457void dump_tlbs2 (int showinv)
458{
459 dump_tlb (itlb_dump_info, itlb, ITLB_TOTAL_ENTRIES, XCHAL_ITLB_WAYS, 0, showinv);
460 dump_tlb (dtlb_dump_info, dtlb, DTLB_TOTAL_ENTRIES, XCHAL_DTLB_WAYS, 1, showinv);
461}
462
463void dump_all_tlbs (void)
464{
465 dump_tlbs2 (1);
466}
467
468void dump_valid_tlbs (void)
469{
470 dump_tlbs2 (0);
471} 143}
472 144
473
474void dump_tlbs (void)
475{
476 dump_itlb();
477 dump_dtlb();
478}
479
480void dump_cache_tag(int dcache, int idx)
481{
482 int w, i, s, e;
483 unsigned long tag, index;
484 unsigned long num_lines, num_ways, cache_size, line_size;
485
486 num_ways = dcache ? XCHAL_DCACHE_WAYS : XCHAL_ICACHE_WAYS;
487 cache_size = dcache ? XCHAL_DCACHE_SIZE : XCHAL_ICACHE_SIZE;
488 line_size = dcache ? XCHAL_DCACHE_LINESIZE : XCHAL_ICACHE_LINESIZE;
489
490 num_lines = cache_size / num_ways;
491
492 s = 0; e = num_lines;
493
494 if (idx >= 0)
495 e = (s = idx * line_size) + 1;
496
497 for (i = s; i < e; i+= line_size) {
498 printk("\nline %#08x:", i);
499 for (w = 0; w < num_ways; w++) {
500 index = w * num_lines + i;
501 if (dcache)
502 __asm__ __volatile__("ldct %0, %1\n\t"
503 : "=a"(tag) : "a"(index));
504 else
505 __asm__ __volatile__("lict %0, %1\n\t"
506 : "=a"(tag) : "a"(index));
507
508 printk(" %#010lx", tag);
509 }
510 }
511 printk ("\n");
512}
513
514void dump_icache(int index)
515{
516 unsigned long data, addr;
517 int w, i;
518
519 const unsigned long num_ways = XCHAL_ICACHE_WAYS;
520 const unsigned long cache_size = XCHAL_ICACHE_SIZE;
521 const unsigned long line_size = XCHAL_ICACHE_LINESIZE;
522 const unsigned long num_lines = cache_size / num_ways / line_size;
523
524 for (w = 0; w < num_ways; w++) {
525 printk ("\nWay %d", w);
526
527 for (i = 0; i < line_size; i+= 4) {
528 addr = w * num_lines + index * line_size + i;
529 __asm__ __volatile__("licw %0, %1\n\t"
530 : "=a"(data) : "a"(addr));
531 printk(" %#010lx", data);
532 }
533 }
534 printk ("\n");
535}
536
537void dump_cache_tags(void)
538{
539 printk("Instruction cache\n");
540 dump_cache_tag(0, -1);
541 printk("Data cache\n");
542 dump_cache_tag(1, -1);
543}
544
545#endif
diff --git a/arch/xtensa/platform-iss/console.c b/arch/xtensa/platform-iss/console.c
index 5c947cae7520..2f4f20ffe666 100644
--- a/arch/xtensa/platform-iss/console.c
+++ b/arch/xtensa/platform-iss/console.c
@@ -25,11 +25,15 @@
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/irq.h> 26#include <asm/irq.h>
27 27
28#include <xtensa/simcall.h> 28#include <asm/platform/simcall.h>
29 29
30#include <linux/tty.h> 30#include <linux/tty.h>
31#include <linux/tty_flip.h> 31#include <linux/tty_flip.h>
32 32
33#ifdef SERIAL_INLINE
34#define _INLINE_ inline
35#endif
36
33#define SERIAL_MAX_NUM_LINES 1 37#define SERIAL_MAX_NUM_LINES 1
34#define SERIAL_TIMER_VALUE (20 * HZ) 38#define SERIAL_TIMER_VALUE (20 * HZ)
35 39
@@ -191,7 +195,7 @@ static int rs_read_proc(char *page, char **start, off_t off, int count,
191} 195}
192 196
193 197
194static const struct tty_operations serial_ops = { 198static struct tty_operations serial_ops = {
195 .open = rs_open, 199 .open = rs_open,
196 .close = rs_close, 200 .close = rs_close,
197 .write = rs_write, 201 .write = rs_write,
diff --git a/arch/xtensa/platform-iss/network.c b/arch/xtensa/platform-iss/network.c
index 15d64414bd60..8ebfc8761229 100644
--- a/arch/xtensa/platform-iss/network.c
+++ b/arch/xtensa/platform-iss/network.c
@@ -34,7 +34,7 @@
34#include <linux/timer.h> 34#include <linux/timer.h>
35#include <linux/platform_device.h> 35#include <linux/platform_device.h>
36 36
37#include <xtensa/simcall.h> 37#include <asm/platform/simcall.h>
38 38
39#define DRIVER_NAME "iss-netdev" 39#define DRIVER_NAME "iss-netdev"
40#define ETH_MAX_PACKET 1500 40#define ETH_MAX_PACKET 1500
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 4f83fd922377..a541b42c08e3 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/swap.h> 26#include <linux/swap.h>
27#include <linux/writeback.h> 27#include <linux/writeback.h>
28#include <linux/task_io_accounting_ops.h>
28#include <linux/interrupt.h> 29#include <linux/interrupt.h>
29#include <linux/cpu.h> 30#include <linux/cpu.h>
30#include <linux/blktrace_api.h> 31#include <linux/blktrace_api.h>
@@ -128,13 +129,6 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
128} 129}
129EXPORT_SYMBOL(blk_get_backing_dev_info); 130EXPORT_SYMBOL(blk_get_backing_dev_info);
130 131
131void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
132{
133 q->activity_fn = fn;
134 q->activity_data = data;
135}
136EXPORT_SYMBOL(blk_queue_activity_fn);
137
138/** 132/**
139 * blk_queue_prep_rq - set a prepare_request function for queue 133 * blk_queue_prep_rq - set a prepare_request function for queue
140 * @q: queue 134 * @q: queue
@@ -237,8 +231,6 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
237 * by default assume old behaviour and bounce for any highmem page 231 * by default assume old behaviour and bounce for any highmem page
238 */ 232 */
239 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 233 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
240
241 blk_queue_activity_fn(q, NULL, NULL);
242} 234}
243 235
244EXPORT_SYMBOL(blk_queue_make_request); 236EXPORT_SYMBOL(blk_queue_make_request);
@@ -2695,9 +2687,6 @@ static inline void add_request(request_queue_t * q, struct request * req)
2695{ 2687{
2696 drive_stat_acct(req, req->nr_sectors, 1); 2688 drive_stat_acct(req, req->nr_sectors, 1);
2697 2689
2698 if (q->activity_fn)
2699 q->activity_fn(q->activity_data, rq_data_dir(req));
2700
2701 /* 2690 /*
2702 * elevator indicated where it wants this request to be 2691 * elevator indicated where it wants this request to be
2703 * inserted at elevator_merge time 2692 * inserted at elevator_merge time
@@ -3235,10 +3224,12 @@ void submit_bio(int rw, struct bio *bio)
3235 BIO_BUG_ON(!bio->bi_size); 3224 BIO_BUG_ON(!bio->bi_size);
3236 BIO_BUG_ON(!bio->bi_io_vec); 3225 BIO_BUG_ON(!bio->bi_io_vec);
3237 bio->bi_rw |= rw; 3226 bio->bi_rw |= rw;
3238 if (rw & WRITE) 3227 if (rw & WRITE) {
3239 count_vm_events(PGPGOUT, count); 3228 count_vm_events(PGPGOUT, count);
3240 else 3229 } else {
3230 task_io_account_read(bio->bi_size);
3241 count_vm_events(PGPGIN, count); 3231 count_vm_events(PGPGIN, count);
3232 }
3242 3233
3243 if (unlikely(block_dump)) { 3234 if (unlikely(block_dump)) {
3244 char b[BDEVNAME_SIZE]; 3235 char b[BDEVNAME_SIZE];
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index b3e210723a71..f322b6a441d8 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -228,6 +228,7 @@ static int sg_io(struct file *file, request_queue_t *q,
228 struct request *rq; 228 struct request *rq;
229 char sense[SCSI_SENSE_BUFFERSIZE]; 229 char sense[SCSI_SENSE_BUFFERSIZE];
230 unsigned char cmd[BLK_MAX_CDB]; 230 unsigned char cmd[BLK_MAX_CDB];
231 struct bio *bio;
231 232
232 if (hdr->interface_id != 'S') 233 if (hdr->interface_id != 'S')
233 return -EINVAL; 234 return -EINVAL;
@@ -270,13 +271,6 @@ static int sg_io(struct file *file, request_queue_t *q,
270 271
271 rq->cmd_type = REQ_TYPE_BLOCK_PC; 272 rq->cmd_type = REQ_TYPE_BLOCK_PC;
272 273
273 /*
274 * bounce this after holding a reference to the original bio, it's
275 * needed for proper unmapping
276 */
277 if (rq->bio)
278 blk_queue_bounce(q, &rq->bio);
279
280 rq->timeout = jiffies_to_msecs(hdr->timeout); 274 rq->timeout = jiffies_to_msecs(hdr->timeout);
281 if (!rq->timeout) 275 if (!rq->timeout)
282 rq->timeout = q->sg_timeout; 276 rq->timeout = q->sg_timeout;
@@ -308,6 +302,7 @@ static int sg_io(struct file *file, request_queue_t *q,
308 if (ret) 302 if (ret)
309 goto out; 303 goto out;
310 304
305 bio = rq->bio;
311 rq->retries = 0; 306 rq->retries = 0;
312 307
313 start_time = jiffies; 308 start_time = jiffies;
@@ -338,6 +333,7 @@ static int sg_io(struct file *file, request_queue_t *q,
338 hdr->sb_len_wr = len; 333 hdr->sb_len_wr = len;
339 } 334 }
340 335
336 rq->bio = bio;
341 if (blk_rq_unmap_user(rq)) 337 if (blk_rq_unmap_user(rq))
342 ret = -EFAULT; 338 ret = -EFAULT;
343 339
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 034c939bf91a..6e93004f2181 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -17,7 +17,6 @@
17#include <linux/crypto.h> 17#include <linux/crypto.h>
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/io.h>
21#include <linux/module.h> 20#include <linux/module.h>
22#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
23#include <linux/seq_file.h> 22#include <linux/seq_file.h>
diff --git a/crypto/sha512.c b/crypto/sha512.c
index 2dfe7f170b48..15eab9db9be4 100644
--- a/crypto/sha512.c
+++ b/crypto/sha512.c
@@ -24,7 +24,7 @@
24 24
25#define SHA384_DIGEST_SIZE 48 25#define SHA384_DIGEST_SIZE 48
26#define SHA512_DIGEST_SIZE 64 26#define SHA512_DIGEST_SIZE 64
27#define SHA384_HMAC_BLOCK_SIZE 96 27#define SHA384_HMAC_BLOCK_SIZE 128
28#define SHA512_HMAC_BLOCK_SIZE 128 28#define SHA512_HMAC_BLOCK_SIZE 128
29 29
30struct sha512_ctx { 30struct sha512_ctx {
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 4929e923b5c6..e7da9fa724ec 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -80,4 +80,6 @@ source "drivers/rtc/Kconfig"
80 80
81source "drivers/dma/Kconfig" 81source "drivers/dma/Kconfig"
82 82
83source "drivers/kvm/Kconfig"
84
83endmenu 85endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 50f76da598c9..0dd96d1afd39 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_SPI) += spi/
43obj-$(CONFIG_PCCARD) += pcmcia/ 43obj-$(CONFIG_PCCARD) += pcmcia/
44obj-$(CONFIG_DIO) += dio/ 44obj-$(CONFIG_DIO) += dio/
45obj-$(CONFIG_SBUS) += sbus/ 45obj-$(CONFIG_SBUS) += sbus/
46obj-$(CONFIG_KVM) += kvm/
46obj-$(CONFIG_ZORRO) += zorro/ 47obj-$(CONFIG_ZORRO) += zorro/
47obj-$(CONFIG_MAC) += macintosh/ 48obj-$(CONFIG_MAC) += macintosh/
48obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/ 49obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/
diff --git a/drivers/acorn/block/fd1772.c b/drivers/acorn/block/fd1772.c
index 048542341204..674bf81c6e66 100644
--- a/drivers/acorn/block/fd1772.c
+++ b/drivers/acorn/block/fd1772.c
@@ -1549,12 +1549,12 @@ int fd1772_init(void)
1549#ifdef TRACKBUFFER 1549#ifdef TRACKBUFFER
1550 BufferDrive = BufferSide = BufferTrack = -1; 1550 BufferDrive = BufferSide = BufferTrack = -1;
1551 /* Atari uses 512 - I want to eventually cope with 1K sectors */ 1551 /* Atari uses 512 - I want to eventually cope with 1K sectors */
1552 DMABuffer = (char *)kmalloc((FD1772_MAX_SECTORS+1)*512,GFP_KERNEL); 1552 DMABuffer = kmalloc((FD1772_MAX_SECTORS+1)*512,GFP_KERNEL);
1553 TrackBuffer = DMABuffer + 512; 1553 TrackBuffer = DMABuffer + 512;
1554#else 1554#else
1555 /* Allocate memory for the DMAbuffer - on the Atari this takes it 1555 /* Allocate memory for the DMAbuffer - on the Atari this takes it
1556 out of some special memory... */ 1556 out of some special memory... */
1557 DMABuffer = (char *) kmalloc(2048); /* Copes with pretty large sectors */ 1557 DMABuffer = kmalloc(2048); /* Copes with pretty large sectors */
1558#endif 1558#endif
1559 err = -ENOMEM; 1559 err = -ENOMEM;
1560 if (!DMAbuffer) 1560 if (!DMAbuffer)
diff --git a/drivers/acorn/char/i2c.c b/drivers/acorn/char/i2c.c
index bdb9c8b78ed8..9e584a7af434 100644
--- a/drivers/acorn/char/i2c.c
+++ b/drivers/acorn/char/i2c.c
@@ -360,7 +360,7 @@ static int __init i2c_ioc_init(void)
360 if (ret >= 0){ 360 if (ret >= 0){
361 ret = misc_register(&rtc_dev); 361 ret = misc_register(&rtc_dev);
362 if(ret < 0) 362 if(ret < 0)
363 i2c_bit_del_bus(&ioc_ops); 363 i2c_del_adapter(&ioc_ops);
364 } 364 }
365 365
366 return ret; 366 return ret;
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 9021e34d2096..90786d7a20bb 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -551,7 +551,7 @@ static int adma_port_start(struct ata_port *ap)
551 return rc; 551 return rc;
552 adma_enter_reg_mode(ap); 552 adma_enter_reg_mode(ap);
553 rc = -ENOMEM; 553 rc = -ENOMEM;
554 pp = kcalloc(1, sizeof(*pp), GFP_KERNEL); 554 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
555 if (!pp) 555 if (!pp)
556 goto err_out; 556 goto err_out;
557 pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma, 557 pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
@@ -672,7 +672,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
672 if (rc) 672 if (rc)
673 goto err_out_iounmap; 673 goto err_out_iounmap;
674 674
675 probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL); 675 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
676 if (probe_ent == NULL) { 676 if (probe_ent == NULL) {
677 rc = -ENOMEM; 677 rc = -ENOMEM;
678 goto err_out_iounmap; 678 goto err_out_iounmap;
diff --git a/drivers/atm/.gitignore b/drivers/atm/.gitignore
index a165b7167714..fc0ae5eb05d8 100644
--- a/drivers/atm/.gitignore
+++ b/drivers/atm/.gitignore
@@ -2,4 +2,4 @@
2fore200e_mkfirm 2fore200e_mkfirm
3fore200e_pca_fw.c 3fore200e_pca_fw.c
4pca200e.bin 4pca200e.bin
5 5pca200e_ecd.bin2
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index bc1b13c8f5d7..5aab7bd473ac 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1832,7 +1832,7 @@ static int __devinit eni_start(struct atm_dev *dev)
1832 /* initialize memory management */ 1832 /* initialize memory management */
1833 buffer_mem = eni_dev->mem - (buf - eni_dev->ram); 1833 buffer_mem = eni_dev->mem - (buf - eni_dev->ram);
1834 eni_dev->free_list_size = buffer_mem/MID_MIN_BUF_SIZE/2; 1834 eni_dev->free_list_size = buffer_mem/MID_MIN_BUF_SIZE/2;
1835 eni_dev->free_list = (struct eni_free *) kmalloc( 1835 eni_dev->free_list = kmalloc(
1836 sizeof(struct eni_free)*(eni_dev->free_list_size+1),GFP_KERNEL); 1836 sizeof(struct eni_free)*(eni_dev->free_list_size+1),GFP_KERNEL);
1837 if (!eni_dev->free_list) { 1837 if (!eni_dev->free_list) {
1838 printk(KERN_ERR DEV_LABEL "(itf %d): couldn't get free page\n", 1838 printk(KERN_ERR DEV_LABEL "(itf %d): couldn't get free page\n",
@@ -2232,7 +2232,7 @@ static int __devinit eni_init_one(struct pci_dev *pci_dev,
2232 goto out0; 2232 goto out0;
2233 } 2233 }
2234 2234
2235 eni_dev = (struct eni_dev *) kmalloc(sizeof(struct eni_dev),GFP_KERNEL); 2235 eni_dev = kmalloc(sizeof(struct eni_dev),GFP_KERNEL);
2236 if (!eni_dev) goto out0; 2236 if (!eni_dev) goto out0;
2237 if (!cpu_zeroes) { 2237 if (!cpu_zeroes) {
2238 cpu_zeroes = pci_alloc_consistent(pci_dev,ENI_ZEROES_SIZE, 2238 cpu_zeroes = pci_alloc_consistent(pci_dev,ENI_ZEROES_SIZE,
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 7d9b4e52f0bf..db33f6f4dd2a 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2351,7 +2351,7 @@ he_open(struct atm_vcc *vcc)
2351 2351
2352 cid = he_mkcid(he_dev, vpi, vci); 2352 cid = he_mkcid(he_dev, vpi, vci);
2353 2353
2354 he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC); 2354 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2355 if (he_vcc == NULL) { 2355 if (he_vcc == NULL) {
2356 hprintk("unable to allocate he_vcc during open\n"); 2356 hprintk("unable to allocate he_vcc during open\n");
2357 return -ENOMEM; 2357 return -ENOMEM;
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 267825501dfe..09f477d4237a 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -2602,7 +2602,7 @@ static int __devinit lanai_init_one(struct pci_dev *pci,
2602 struct atm_dev *atmdev; 2602 struct atm_dev *atmdev;
2603 int result; 2603 int result;
2604 2604
2605 lanai = (struct lanai_dev *) kmalloc(sizeof(*lanai), GFP_KERNEL); 2605 lanai = kmalloc(sizeof(*lanai), GFP_KERNEL);
2606 if (lanai == NULL) { 2606 if (lanai == NULL) {
2607 printk(KERN_ERR DEV_LABEL 2607 printk(KERN_ERR DEV_LABEL
2608 ": couldn't allocate dev_data structure!\n"); 2608 ": couldn't allocate dev_data structure!\n");
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index bd0904594805..aab9b3733d52 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -997,7 +997,7 @@ static scq_info *get_scq(int size, u32 scd)
997 if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) 997 if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
998 return NULL; 998 return NULL;
999 999
1000 scq = (scq_info *) kmalloc(sizeof(scq_info), GFP_KERNEL); 1000 scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
1001 if (scq == NULL) 1001 if (scq == NULL)
1002 return NULL; 1002 return NULL;
1003 scq->org = kmalloc(2 * size, GFP_KERNEL); 1003 scq->org = kmalloc(2 * size, GFP_KERNEL);
@@ -1006,7 +1006,7 @@ static scq_info *get_scq(int size, u32 scd)
1006 kfree(scq); 1006 kfree(scq);
1007 return NULL; 1007 return NULL;
1008 } 1008 }
1009 scq->skb = (struct sk_buff **) kmalloc(sizeof(struct sk_buff *) * 1009 scq->skb = kmalloc(sizeof(struct sk_buff *) *
1010 (size / NS_SCQE_SIZE), GFP_KERNEL); 1010 (size / NS_SCQE_SIZE), GFP_KERNEL);
1011 if (scq->skb == NULL) 1011 if (scq->skb == NULL)
1012 { 1012 {
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 7df0f373188e..756d4f760da3 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -996,7 +996,7 @@ static int start_tx(struct atm_dev *dev)
996 996
997 DPRINTK("start_tx\n"); 997 DPRINTK("start_tx\n");
998 zatm_dev = ZATM_DEV(dev); 998 zatm_dev = ZATM_DEV(dev);
999 zatm_dev->tx_map = (struct atm_vcc **) kmalloc(sizeof(struct atm_vcc *)* 999 zatm_dev->tx_map = kmalloc(sizeof(struct atm_vcc *)*
1000 zatm_dev->chans,GFP_KERNEL); 1000 zatm_dev->chans,GFP_KERNEL);
1001 if (!zatm_dev->tx_map) return -ENOMEM; 1001 if (!zatm_dev->tx_map) return -ENOMEM;
1002 zatm_dev->tx_bw = ATM_OC3_PCR; 1002 zatm_dev->tx_bw = ATM_OC3_PCR;
@@ -1591,7 +1591,7 @@ static int __devinit zatm_init_one(struct pci_dev *pci_dev,
1591 struct zatm_dev *zatm_dev; 1591 struct zatm_dev *zatm_dev;
1592 int ret = -ENOMEM; 1592 int ret = -ENOMEM;
1593 1593
1594 zatm_dev = (struct zatm_dev *) kmalloc(sizeof(*zatm_dev), GFP_KERNEL); 1594 zatm_dev = kmalloc(sizeof(*zatm_dev), GFP_KERNEL);
1595 if (!zatm_dev) { 1595 if (!zatm_dev) {
1596 printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL); 1596 printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL);
1597 goto out; 1597 goto out;
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c
index dbe0735f8c9e..f95d50277274 100644
--- a/drivers/base/dmapool.c
+++ b/drivers/base/dmapool.c
@@ -173,7 +173,7 @@ pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags)
173 mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; 173 mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
174 mapsize *= sizeof (long); 174 mapsize *= sizeof (long);
175 175
176 page = (struct dma_page *) kmalloc (mapsize + sizeof *page, mem_flags); 176 page = kmalloc(mapsize + sizeof *page, mem_flags);
177 if (!page) 177 if (!page)
178 return NULL; 178 return NULL;
179 page->vaddr = dma_alloc_coherent (pool->dev, 179 page->vaddr = dma_alloc_coherent (pool->dev,
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ce9cfcb6071c..58c1debf86f1 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -28,13 +28,6 @@ config ATARI_FLOPPY
28 tristate "Atari floppy support" 28 tristate "Atari floppy support"
29 depends on ATARI 29 depends on ATARI
30 30
31config BLK_DEV_SWIM_IOP
32 bool "Macintosh IIfx/Quadra 900/Quadra 950 floppy support (EXPERIMENTAL)"
33 depends on MAC && EXPERIMENTAL && BROKEN
34 help
35 Say Y here to support the SWIM (Super Woz Integrated Machine) IOP
36 floppy controller on the Macintosh IIfx and Quadra 900/950.
37
38config MAC_FLOPPY 31config MAC_FLOPPY
39 tristate "Support for PowerMac floppy" 32 tristate "Support for PowerMac floppy"
40 depends on PPC_PMAC && !PPC_PMAC64 33 depends on PPC_PMAC && !PPC_PMAC64
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 410f259a8031..dd88e33c1eb1 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_MAC_FLOPPY) += swim3.o
9obj-$(CONFIG_BLK_DEV_FD) += floppy.o 9obj-$(CONFIG_BLK_DEV_FD) += floppy.o
10obj-$(CONFIG_AMIGA_FLOPPY) += amiflop.o 10obj-$(CONFIG_AMIGA_FLOPPY) += amiflop.o
11obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o 11obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
12obj-$(CONFIG_BLK_DEV_SWIM_IOP) += swim_iop.o
13obj-$(CONFIG_ATARI_ACSI) += acsi.o 12obj-$(CONFIG_ATARI_ACSI) += acsi.o
14obj-$(CONFIG_ATARI_SLM) += acsi_slm.o 13obj-$(CONFIG_ATARI_SLM) += acsi_slm.o
15obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o 14obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index ee159edb6b88..acb2fa9cf6b1 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1039,7 +1039,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
1039 status = -ENOMEM; 1039 status = -ENOMEM;
1040 goto cleanup1; 1040 goto cleanup1;
1041 } 1041 }
1042 buff_size = (int *)kmalloc(MAXSGENTRIES * sizeof(int), 1042 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1043 GFP_KERNEL); 1043 GFP_KERNEL);
1044 if (!buff_size) { 1044 if (!buff_size) {
1045 status = -ENOMEM; 1045 status = -ENOMEM;
@@ -2837,7 +2837,7 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2837 if (err) { 2837 if (err) {
2838 printk(KERN_ERR "cciss: Cannot obtain PCI resources, " 2838 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2839 "aborting\n"); 2839 "aborting\n");
2840 goto err_out_disable_pdev; 2840 return err;
2841 } 2841 }
2842 2842
2843 subsystem_vendor_id = pdev->subsystem_vendor; 2843 subsystem_vendor_id = pdev->subsystem_vendor;
@@ -2865,7 +2865,7 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2865#ifdef CCISS_DEBUG 2865#ifdef CCISS_DEBUG
2866 printk("address 0 = %x\n", c->paddr); 2866 printk("address 0 = %x\n", c->paddr);
2867#endif /* CCISS_DEBUG */ 2867#endif /* CCISS_DEBUG */
2868 c->vaddr = remap_pci_mem(c->paddr, 200); 2868 c->vaddr = remap_pci_mem(c->paddr, 0x250);
2869 2869
2870 /* Wait for the board to become ready. (PCI hotplug needs this.) 2870 /* Wait for the board to become ready. (PCI hotplug needs this.)
2871 * We poll for up to 120 secs, once per 100ms. */ 2871 * We poll for up to 120 secs, once per 100ms. */
@@ -3005,10 +3005,11 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3005 return 0; 3005 return 0;
3006 3006
3007 err_out_free_res: 3007 err_out_free_res:
3008 /*
3009 * Deliberately omit pci_disable_device(): it does something nasty to
3010 * Smart Array controllers that pci_enable_device does not undo
3011 */
3008 pci_release_regions(pdev); 3012 pci_release_regions(pdev);
3009
3010 err_out_disable_pdev:
3011 pci_disable_device(pdev);
3012 return err; 3013 return err;
3013} 3014}
3014 3015
@@ -3382,8 +3383,11 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3382 if (drv->queue) 3383 if (drv->queue)
3383 blk_cleanup_queue(drv->queue); 3384 blk_cleanup_queue(drv->queue);
3384 } 3385 }
3386 /*
3387 * Deliberately omit pci_disable_device(): it does something nasty to
3388 * Smart Array controllers that pci_enable_device does not undo
3389 */
3385 pci_release_regions(pdev); 3390 pci_release_regions(pdev);
3386 pci_disable_device(pdev);
3387 pci_set_drvdata(pdev, NULL); 3391 pci_set_drvdata(pdev, NULL);
3388 free_hba(i); 3392 free_hba(i);
3389 return -1; 3393 return -1;
@@ -3452,8 +3456,11 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
3452#ifdef CONFIG_CISS_SCSI_TAPE 3456#ifdef CONFIG_CISS_SCSI_TAPE
3453 kfree(hba[i]->scsi_rejects.complete); 3457 kfree(hba[i]->scsi_rejects.complete);
3454#endif 3458#endif
3459 /*
3460 * Deliberately omit pci_disable_device(): it does something nasty to
3461 * Smart Array controllers that pci_enable_device does not undo
3462 */
3455 pci_release_regions(pdev); 3463 pci_release_regions(pdev);
3456 pci_disable_device(pdev);
3457 pci_set_drvdata(pdev, NULL); 3464 pci_set_drvdata(pdev, NULL);
3458 free_hba(i); 3465 free_hba(i);
3459} 3466}
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index d5f519ebbc08..b94cd1c32131 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -1625,7 +1625,7 @@ static void start_fwbk(int ctlr)
1625 " processing\n"); 1625 " processing\n");
1626 /* Command does not return anything, but idasend command needs a 1626 /* Command does not return anything, but idasend command needs a
1627 buffer */ 1627 buffer */
1628 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL); 1628 id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1629 if(id_ctlr_buf==NULL) 1629 if(id_ctlr_buf==NULL)
1630 { 1630 {
1631 printk(KERN_WARNING "cpqarray: Out of memory. " 1631 printk(KERN_WARNING "cpqarray: Out of memory. "
@@ -1660,14 +1660,14 @@ static void getgeometry(int ctlr)
1660 1660
1661 info_p->log_drv_map = 0; 1661 info_p->log_drv_map = 0;
1662 1662
1663 id_ldrive = (id_log_drv_t *)kmalloc(sizeof(id_log_drv_t), GFP_KERNEL); 1663 id_ldrive = kmalloc(sizeof(id_log_drv_t), GFP_KERNEL);
1664 if(id_ldrive == NULL) 1664 if(id_ldrive == NULL)
1665 { 1665 {
1666 printk( KERN_ERR "cpqarray: out of memory.\n"); 1666 printk( KERN_ERR "cpqarray: out of memory.\n");
1667 return; 1667 return;
1668 } 1668 }
1669 1669
1670 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL); 1670 id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1671 if(id_ctlr_buf == NULL) 1671 if(id_ctlr_buf == NULL)
1672 { 1672 {
1673 kfree(id_ldrive); 1673 kfree(id_ldrive);
@@ -1675,7 +1675,7 @@ static void getgeometry(int ctlr)
1675 return; 1675 return;
1676 } 1676 }
1677 1677
1678 id_lstatus_buf = (sense_log_drv_stat_t *)kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL); 1678 id_lstatus_buf = kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
1679 if(id_lstatus_buf == NULL) 1679 if(id_lstatus_buf == NULL)
1680 { 1680 {
1681 kfree(id_ctlr_buf); 1681 kfree(id_ctlr_buf);
@@ -1684,7 +1684,7 @@ static void getgeometry(int ctlr)
1684 return; 1684 return;
1685 } 1685 }
1686 1686
1687 sense_config_buf = (config_t *)kmalloc(sizeof(config_t), GFP_KERNEL); 1687 sense_config_buf = kmalloc(sizeof(config_t), GFP_KERNEL);
1688 if(sense_config_buf == NULL) 1688 if(sense_config_buf == NULL)
1689 { 1689 {
1690 kfree(id_lstatus_buf); 1690 kfree(id_lstatus_buf);
diff --git a/drivers/block/swim_iop.c b/drivers/block/swim_iop.c
deleted file mode 100644
index ed7b06cf3e68..000000000000
--- a/drivers/block/swim_iop.c
+++ /dev/null
@@ -1,578 +0,0 @@
1/*
2 * Driver for the SWIM (Super Woz Integrated Machine) IOP
3 * floppy controller on the Macintosh IIfx and Quadra 900/950
4 *
5 * Written by Joshua M. Thompson (funaho@jurai.org)
6 * based on the SWIM3 driver (c) 1996 by Paul Mackerras.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * 1999-06-12 (jmt) - Initial implementation.
14 */
15
16/*
17 * -------------------
18 * Theory of Operation
19 * -------------------
20 *
21 * Since the SWIM IOP is message-driven we implement a simple request queue
22 * system. One outstanding request may be queued at any given time (this is
23 * an IOP limitation); only when that request has completed can a new request
24 * be sent.
25 */
26
27#include <linux/stddef.h>
28#include <linux/kernel.h>
29#include <linux/sched.h>
30#include <linux/timer.h>
31#include <linux/delay.h>
32#include <linux/fd.h>
33#include <linux/ioctl.h>
34#include <linux/blkdev.h>
35#include <asm/io.h>
36#include <asm/uaccess.h>
37#include <asm/mac_iop.h>
38#include <asm/swim_iop.h>
39
40#define DRIVER_VERSION "Version 0.1 (1999-06-12)"
41
42#define MAX_FLOPPIES 4
43
44enum swim_state {
45 idle,
46 available,
47 revalidating,
48 transferring,
49 ejecting
50};
51
52struct floppy_state {
53 enum swim_state state;
54 int drive_num; /* device number */
55 int secpercyl; /* disk geometry information */
56 int secpertrack;
57 int total_secs;
58 int write_prot; /* 1 if write-protected, 0 if not, -1 dunno */
59 int ref_count;
60 struct timer_list timeout;
61 int ejected;
62 struct wait_queue *wait;
63 int wanted;
64 int timeout_pending;
65};
66
67struct swim_iop_req {
68 int sent;
69 int complete;
70 __u8 command[32];
71 struct floppy_state *fs;
72 void (*done)(struct swim_iop_req *);
73};
74
75static struct swim_iop_req *current_req;
76static int floppy_count;
77
78static struct floppy_state floppy_states[MAX_FLOPPIES];
79static DEFINE_SPINLOCK(swim_iop_lock);
80
81#define CURRENT elv_next_request(swim_queue)
82
83static char *drive_names[7] = {
84 "not installed", /* DRV_NONE */
85 "unknown (1)", /* DRV_UNKNOWN */
86 "a 400K drive", /* DRV_400K */
87 "an 800K drive" /* DRV_800K */
88 "unknown (4)", /* ???? */
89 "an FDHD", /* DRV_FDHD */
90 "unknown (6)", /* ???? */
91 "an Apple HD20" /* DRV_HD20 */
92};
93
94int swimiop_init(void);
95static void swimiop_init_request(struct swim_iop_req *);
96static int swimiop_send_request(struct swim_iop_req *);
97static void swimiop_receive(struct iop_msg *);
98static void swimiop_status_update(int, struct swim_drvstatus *);
99static int swimiop_eject(struct floppy_state *fs);
100
101static int floppy_ioctl(struct inode *inode, struct file *filp,
102 unsigned int cmd, unsigned long param);
103static int floppy_open(struct inode *inode, struct file *filp);
104static int floppy_release(struct inode *inode, struct file *filp);
105static int floppy_check_change(struct gendisk *disk);
106static int floppy_revalidate(struct gendisk *disk);
107static int grab_drive(struct floppy_state *fs, enum swim_state state,
108 int interruptible);
109static void release_drive(struct floppy_state *fs);
110static void set_timeout(struct floppy_state *fs, int nticks,
111 void (*proc)(unsigned long));
112static void fd_request_timeout(unsigned long);
113static void do_fd_request(request_queue_t * q);
114static void start_request(struct floppy_state *fs);
115
116static struct block_device_operations floppy_fops = {
117 .open = floppy_open,
118 .release = floppy_release,
119 .ioctl = floppy_ioctl,
120 .media_changed = floppy_check_change,
121 .revalidate_disk= floppy_revalidate,
122};
123
124static struct request_queue *swim_queue;
125/*
126 * SWIM IOP initialization
127 */
128
129int swimiop_init(void)
130{
131 volatile struct swim_iop_req req;
132 struct swimcmd_status *cmd = (struct swimcmd_status *) &req.command[0];
133 struct swim_drvstatus *ds = &cmd->status;
134 struct floppy_state *fs;
135 int i;
136
137 current_req = NULL;
138 floppy_count = 0;
139
140 if (!iop_ism_present)
141 return -ENODEV;
142
143 if (register_blkdev(FLOPPY_MAJOR, "fd"))
144 return -EBUSY;
145
146 swim_queue = blk_init_queue(do_fd_request, &swim_iop_lock);
147 if (!swim_queue) {
148 unregister_blkdev(FLOPPY_MAJOR, "fd");
149 return -ENOMEM;
150 }
151
152 printk("SWIM-IOP: %s by Joshua M. Thompson (funaho@jurai.org)\n",
153 DRIVER_VERSION);
154
155 if (iop_listen(SWIM_IOP, SWIM_CHAN, swimiop_receive, "SWIM") != 0) {
156 printk(KERN_ERR "SWIM-IOP: IOP channel already in use; can't initialize.\n");
157 unregister_blkdev(FLOPPY_MAJOR, "fd");
158 blk_cleanup_queue(swim_queue);
159 return -EBUSY;
160 }
161
162 printk(KERN_ERR "SWIM_IOP: probing for installed drives.\n");
163
164 for (i = 0 ; i < MAX_FLOPPIES ; i++) {
165 memset(&floppy_states[i], 0, sizeof(struct floppy_state));
166 fs = &floppy_states[floppy_count];
167
168 swimiop_init_request(&req);
169 cmd->code = CMD_STATUS;
170 cmd->drive_num = i + 1;
171 if (swimiop_send_request(&req) != 0) continue;
172 while (!req.complete);
173 if (cmd->error != 0) {
174 printk(KERN_ERR "SWIM-IOP: probe on drive %d returned error %d\n", i, (uint) cmd->error);
175 continue;
176 }
177 if (ds->installed != 0x01) continue;
178 printk("SWIM-IOP: drive %d is %s (%s, %s, %s, %s)\n", i,
179 drive_names[ds->info.type],
180 ds->info.external? "ext" : "int",
181 ds->info.scsi? "scsi" : "floppy",
182 ds->info.fixed? "fixed" : "removable",
183 ds->info.secondary? "secondary" : "primary");
184 swimiop_status_update(floppy_count, ds);
185 fs->state = idle;
186
187 init_timer(&fs->timeout);
188 floppy_count++;
189 }
190 printk("SWIM-IOP: detected %d installed drives.\n", floppy_count);
191
192 for (i = 0; i < floppy_count; i++) {
193 struct gendisk *disk = alloc_disk(1);
194 if (!disk)
195 continue;
196 disk->major = FLOPPY_MAJOR;
197 disk->first_minor = i;
198 disk->fops = &floppy_fops;
199 sprintf(disk->disk_name, "fd%d", i);
200 disk->private_data = &floppy_states[i];
201 disk->queue = swim_queue;
202 set_capacity(disk, 2880 * 2);
203 add_disk(disk);
204 }
205
206 return 0;
207}
208
209static void swimiop_init_request(struct swim_iop_req *req)
210{
211 req->sent = 0;
212 req->complete = 0;
213 req->done = NULL;
214}
215
216static int swimiop_send_request(struct swim_iop_req *req)
217{
218 unsigned long flags;
219 int err;
220
221 /* It's doubtful an interrupt routine would try to send */
222 /* a SWIM request, but I'd rather play it safe here. */
223
224 local_irq_save(flags);
225
226 if (current_req != NULL) {
227 local_irq_restore(flags);
228 return -ENOMEM;
229 }
230
231 current_req = req;
232
233 /* Interrupts should be back on for iop_send_message() */
234
235 local_irq_restore(flags);
236
237 err = iop_send_message(SWIM_IOP, SWIM_CHAN, (void *) req,
238 sizeof(req->command), (__u8 *) &req->command[0],
239 swimiop_receive);
240
241 /* No race condition here; we own current_req at this point */
242
243 if (err) {
244 current_req = NULL;
245 } else {
246 req->sent = 1;
247 }
248 return err;
249}
250
251/*
252 * Receive a SWIM message from the IOP.
253 *
254 * This will be called in two cases:
255 *
256 * 1. A message has been successfully sent to the IOP.
257 * 2. An unsolicited message was received from the IOP.
258 */
259
260void swimiop_receive(struct iop_msg *msg)
261{
262 struct swim_iop_req *req;
263 struct swimmsg_status *sm;
264 struct swim_drvstatus *ds;
265
266 req = current_req;
267
268 switch(msg->status) {
269 case IOP_MSGSTATUS_COMPLETE:
270 memcpy(&req->command[0], &msg->reply[0], sizeof(req->command));
271 req->complete = 1;
272 if (req->done) (*req->done)(req);
273 current_req = NULL;
274 break;
275 case IOP_MSGSTATUS_UNSOL:
276 sm = (struct swimmsg_status *) &msg->message[0];
277 ds = &sm->status;
278 swimiop_status_update(sm->drive_num, ds);
279 iop_complete_message(msg);
280 break;
281 }
282}
283
284static void swimiop_status_update(int drive_num, struct swim_drvstatus *ds)
285{
286 struct floppy_state *fs = &floppy_states[drive_num];
287
288 fs->write_prot = (ds->write_prot == 0x80);
289 if ((ds->disk_in_drive != 0x01) && (ds->disk_in_drive != 0x02)) {
290 fs->ejected = 1;
291 } else {
292 fs->ejected = 0;
293 }
294 switch(ds->info.type) {
295 case DRV_400K:
296 fs->secpercyl = 10;
297 fs->secpertrack = 10;
298 fs->total_secs = 800;
299 break;
300 case DRV_800K:
301 fs->secpercyl = 20;
302 fs->secpertrack = 10;
303 fs->total_secs = 1600;
304 break;
305 case DRV_FDHD:
306 fs->secpercyl = 36;
307 fs->secpertrack = 18;
308 fs->total_secs = 2880;
309 break;
310 default:
311 fs->secpercyl = 0;
312 fs->secpertrack = 0;
313 fs->total_secs = 0;
314 break;
315 }
316}
317
318static int swimiop_eject(struct floppy_state *fs)
319{
320 int err, n;
321 struct swim_iop_req req;
322 struct swimcmd_eject *cmd = (struct swimcmd_eject *) &req.command[0];
323
324 err = grab_drive(fs, ejecting, 1);
325 if (err) return err;
326
327 swimiop_init_request(&req);
328 cmd->code = CMD_EJECT;
329 cmd->drive_num = fs->drive_num;
330 err = swimiop_send_request(&req);
331 if (err) {
332 release_drive(fs);
333 return err;
334 }
335 for (n = 2*HZ; n > 0; --n) {
336 if (req.complete) break;
337 if (signal_pending(current)) {
338 err = -EINTR;
339 break;
340 }
341 schedule_timeout_interruptible(1);
342 }
343 release_drive(fs);
344 return cmd->error;
345}
346
347static struct floppy_struct floppy_type =
348 { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */
349
350static int floppy_ioctl(struct inode *inode, struct file *filp,
351 unsigned int cmd, unsigned long param)
352{
353 struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
354 int err;
355
356 if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
357 return -EPERM;
358
359 switch (cmd) {
360 case FDEJECT:
361 if (fs->ref_count != 1)
362 return -EBUSY;
363 err = swimiop_eject(fs);
364 return err;
365 case FDGETPRM:
366 if (copy_to_user((void *) param, (void *) &floppy_type,
367 sizeof(struct floppy_struct)))
368 return -EFAULT;
369 return 0;
370 }
371 return -ENOTTY;
372}
373
374static int floppy_open(struct inode *inode, struct file *filp)
375{
376 struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
377
378 if (fs->ref_count == -1 || filp->f_flags & O_EXCL)
379 return -EBUSY;
380
381 if ((filp->f_flags & O_NDELAY) == 0 && (filp->f_mode & 3)) {
382 check_disk_change(inode->i_bdev);
383 if (fs->ejected)
384 return -ENXIO;
385 }
386
387 if ((filp->f_mode & 2) && fs->write_prot)
388 return -EROFS;
389
390 if (filp->f_flags & O_EXCL)
391 fs->ref_count = -1;
392 else
393 ++fs->ref_count;
394
395 return 0;
396}
397
398static int floppy_release(struct inode *inode, struct file *filp)
399{
400 struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
401 if (fs->ref_count > 0)
402 fs->ref_count--;
403 return 0;
404}
405
406static int floppy_check_change(struct gendisk *disk)
407{
408 struct floppy_state *fs = disk->private_data;
409 return fs->ejected;
410}
411
412static int floppy_revalidate(struct gendisk *disk)
413{
414 struct floppy_state *fs = disk->private_data;
415 grab_drive(fs, revalidating, 0);
416 /* yadda, yadda */
417 release_drive(fs);
418 return 0;
419}
420
421static void floppy_off(unsigned int nr)
422{
423}
424
425static int grab_drive(struct floppy_state *fs, enum swim_state state,
426 int interruptible)
427{
428 unsigned long flags;
429
430 local_irq_save(flags);
431 if (fs->state != idle) {
432 ++fs->wanted;
433 while (fs->state != available) {
434 if (interruptible && signal_pending(current)) {
435 --fs->wanted;
436 local_irq_restore(flags);
437 return -EINTR;
438 }
439 interruptible_sleep_on(&fs->wait);
440 }
441 --fs->wanted;
442 }
443 fs->state = state;
444 local_irq_restore(flags);
445 return 0;
446}
447
448static void release_drive(struct floppy_state *fs)
449{
450 unsigned long flags;
451
452 local_irq_save(flags);
453 fs->state = idle;
454 start_request(fs);
455 local_irq_restore(flags);
456}
457
458static void set_timeout(struct floppy_state *fs, int nticks,
459 void (*proc)(unsigned long))
460{
461 unsigned long flags;
462
463 local_irq_save(flags);
464 if (fs->timeout_pending)
465 del_timer(&fs->timeout);
466 init_timer(&fs->timeout);
467 fs->timeout.expires = jiffies + nticks;
468 fs->timeout.function = proc;
469 fs->timeout.data = (unsigned long) fs;
470 add_timer(&fs->timeout);
471 fs->timeout_pending = 1;
472 local_irq_restore(flags);
473}
474
475static void do_fd_request(request_queue_t * q)
476{
477 int i;
478
479 for (i = 0 ; i < floppy_count ; i++) {
480 start_request(&floppy_states[i]);
481 }
482}
483
484static void fd_request_complete(struct swim_iop_req *req)
485{
486 struct floppy_state *fs = req->fs;
487 struct swimcmd_rw *cmd = (struct swimcmd_rw *) &req->command[0];
488
489 del_timer(&fs->timeout);
490 fs->timeout_pending = 0;
491 fs->state = idle;
492 if (cmd->error) {
493 printk(KERN_ERR "SWIM-IOP: error %d on read/write request.\n", cmd->error);
494 end_request(CURRENT, 0);
495 } else {
496 CURRENT->sector += cmd->num_blocks;
497 CURRENT->current_nr_sectors -= cmd->num_blocks;
498 if (CURRENT->current_nr_sectors <= 0) {
499 end_request(CURRENT, 1);
500 return;
501 }
502 }
503 start_request(fs);
504}
505
506static void fd_request_timeout(unsigned long data)
507{
508 struct floppy_state *fs = (struct floppy_state *) data;
509
510 fs->timeout_pending = 0;
511 end_request(CURRENT, 0);
512 fs->state = idle;
513}
514
515static void start_request(struct floppy_state *fs)
516{
517 volatile struct swim_iop_req req;
518 struct swimcmd_rw *cmd = (struct swimcmd_rw *) &req.command[0];
519
520 if (fs->state == idle && fs->wanted) {
521 fs->state = available;
522 wake_up(&fs->wait);
523 return;
524 }
525 while (CURRENT && fs->state == idle) {
526 if (CURRENT->bh && !buffer_locked(CURRENT->bh))
527 panic("floppy: block not locked");
528#if 0
529 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
530 CURRENT->rq_disk->disk_name, CURRENT->cmd,
531 CURRENT->sector, CURRENT->nr_sectors, CURRENT->buffer);
532 printk(" errors=%d current_nr_sectors=%ld\n",
533 CURRENT->errors, CURRENT->current_nr_sectors);
534#endif
535
536 if (CURRENT->sector < 0 || CURRENT->sector >= fs->total_secs) {
537 end_request(CURRENT, 0);
538 continue;
539 }
540 if (CURRENT->current_nr_sectors == 0) {
541 end_request(CURRENT, 1);
542 continue;
543 }
544 if (fs->ejected) {
545 end_request(CURRENT, 0);
546 continue;
547 }
548
549 swimiop_init_request(&req);
550 req.fs = fs;
551 req.done = fd_request_complete;
552
553 if (CURRENT->cmd == WRITE) {
554 if (fs->write_prot) {
555 end_request(CURRENT, 0);
556 continue;
557 }
558 cmd->code = CMD_WRITE;
559 } else {
560 cmd->code = CMD_READ;
561
562 }
563 cmd->drive_num = fs->drive_num;
564 cmd->buffer = CURRENT->buffer;
565 cmd->first_block = CURRENT->sector;
566 cmd->num_blocks = CURRENT->current_nr_sectors;
567
568 if (swimiop_send_request(&req)) {
569 end_request(CURRENT, 0);
570 continue;
571 }
572
573 set_timeout(fs, HZ*CURRENT->current_nr_sectors,
574 fd_request_timeout);
575
576 fs->state = transferring;
577 }
578}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 2df5cf4ec743..e4a2f8f3a1d7 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1810,7 +1810,7 @@ static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s)
1810 1810
1811 size = sizeof(s->disckey.value) + 4; 1811 size = sizeof(s->disckey.value) + 4;
1812 1812
1813 if ((buf = (u_char *) kmalloc(size, GFP_KERNEL)) == NULL) 1813 if ((buf = kmalloc(size, GFP_KERNEL)) == NULL)
1814 return -ENOMEM; 1814 return -ENOMEM;
1815 1815
1816 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ); 1816 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
@@ -1861,7 +1861,7 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s)
1861 1861
1862 size = sizeof(s->manufact.value) + 4; 1862 size = sizeof(s->manufact.value) + 4;
1863 1863
1864 if ((buf = (u_char *) kmalloc(size, GFP_KERNEL)) == NULL) 1864 if ((buf = kmalloc(size, GFP_KERNEL)) == NULL)
1865 return -ENOMEM; 1865 return -ENOMEM;
1866 1866
1867 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ); 1867 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
@@ -2849,7 +2849,7 @@ static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
2849 /* FIXME: we need upper bound checking, too!! */ 2849 /* FIXME: we need upper bound checking, too!! */
2850 if (lba < 0) 2850 if (lba < 0)
2851 return -EINVAL; 2851 return -EINVAL;
2852 cgc.buffer = (char *) kmalloc(blocksize, GFP_KERNEL); 2852 cgc.buffer = kmalloc(blocksize, GFP_KERNEL);
2853 if (cgc.buffer == NULL) 2853 if (cgc.buffer == NULL)
2854 return -ENOMEM; 2854 return -ENOMEM;
2855 memset(&sense, 0, sizeof(sense)); 2855 memset(&sense, 0, sizeof(sense));
@@ -3031,7 +3031,7 @@ static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
3031 int size = sizeof(dvd_struct); 3031 int size = sizeof(dvd_struct);
3032 if (!CDROM_CAN(CDC_DVD)) 3032 if (!CDROM_CAN(CDC_DVD))
3033 return -ENOSYS; 3033 return -ENOSYS;
3034 if ((s = (dvd_struct *) kmalloc(size, GFP_KERNEL)) == NULL) 3034 if ((s = kmalloc(size, GFP_KERNEL)) == NULL)
3035 return -ENOMEM; 3035 return -ENOMEM;
3036 cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n"); 3036 cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n");
3037 if (copy_from_user(s, (dvd_struct __user *)arg, size)) { 3037 if (copy_from_user(s, (dvd_struct __user *)arg, size)) {
diff --git a/drivers/cdrom/cm206.c b/drivers/cdrom/cm206.c
index e6d8e9ededea..b6c61bbb20e1 100644
--- a/drivers/cdrom/cm206.c
+++ b/drivers/cdrom/cm206.c
@@ -1420,7 +1420,7 @@ int __init cm206_init(void)
1420 return -EIO; 1420 return -EIO;
1421 } 1421 }
1422 printk(" adapter at 0x%x", cm206_base); 1422 printk(" adapter at 0x%x", cm206_base);
1423 cd = (struct cm206_struct *) kmalloc(size, GFP_KERNEL); 1423 cd = kmalloc(size, GFP_KERNEL);
1424 if (!cd) 1424 if (!cd)
1425 goto out_base; 1425 goto out_base;
1426 /* Now we have found the adaptor card, try to reset it. As we have 1426 /* Now we have found the adaptor card, try to reset it. As we have
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index b10f4d8fdc7f..9e43e39dc35c 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -97,7 +97,7 @@ config SERIAL_NONSTANDARD
97 97
98config COMPUTONE 98config COMPUTONE
99 tristate "Computone IntelliPort Plus serial support" 99 tristate "Computone IntelliPort Plus serial support"
100 depends on SERIAL_NONSTANDARD 100 depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
101 ---help--- 101 ---help---
102 This driver supports the entire family of Intelliport II/Plus 102 This driver supports the entire family of Intelliport II/Plus
103 controllers with the exception of the MicroChannel controllers and 103 controllers with the exception of the MicroChannel controllers and
@@ -203,7 +203,7 @@ config MOXA_SMARTIO
203 203
204config MOXA_SMARTIO_NEW 204config MOXA_SMARTIO_NEW
205 tristate "Moxa SmartIO support v. 2.0 (EXPERIMENTAL)" 205 tristate "Moxa SmartIO support v. 2.0 (EXPERIMENTAL)"
206 depends on SERIAL_NONSTANDARD 206 depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA)
207 help 207 help
208 Say Y here if you have a Moxa SmartIO multiport serial card and/or 208 Say Y here if you have a Moxa SmartIO multiport serial card and/or
209 want to help develop a new version of this driver. 209 want to help develop a new version of this driver.
@@ -218,7 +218,7 @@ config MOXA_SMARTIO_NEW
218 218
219config ISI 219config ISI
220 tristate "Multi-Tech multiport card support (EXPERIMENTAL)" 220 tristate "Multi-Tech multiport card support (EXPERIMENTAL)"
221 depends on SERIAL_NONSTANDARD 221 depends on SERIAL_NONSTANDARD && PCI
222 select FW_LOADER 222 select FW_LOADER
223 help 223 help
224 This is a driver for the Multi-Tech cards which provide several 224 This is a driver for the Multi-Tech cards which provide several
@@ -312,7 +312,7 @@ config SPECIALIX_RTSCTS
312 312
313config SX 313config SX
314 tristate "Specialix SX (and SI) card support" 314 tristate "Specialix SX (and SI) card support"
315 depends on SERIAL_NONSTANDARD 315 depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA)
316 help 316 help
317 This is a driver for the SX and SI multiport serial cards. 317 This is a driver for the SX and SI multiport serial cards.
318 Please read the file <file:Documentation/sx.txt> for details. 318 Please read the file <file:Documentation/sx.txt> for details.
@@ -867,7 +867,7 @@ config SONYPI
867 867
868config TANBAC_TB0219 868config TANBAC_TB0219
869 tristate "TANBAC TB0219 base board support" 869 tristate "TANBAC TB0219 base board support"
870 depends TANBAC_TB022X 870 depends on TANBAC_TB022X
871 select GPIO_VR41XX 871 select GPIO_VR41XX
872 872
873source "drivers/char/agp/Kconfig" 873source "drivers/char/agp/Kconfig"
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index c603bf291580..a9f9c48c2424 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -86,7 +86,7 @@ config AGP_NVIDIA
86 86
87config AGP_SIS 87config AGP_SIS
88 tristate "SiS chipset support" 88 tristate "SiS chipset support"
89 depends on AGP 89 depends on AGP && X86
90 help 90 help
91 This option gives you AGP support for the GLX component of 91 This option gives you AGP support for the GLX component of
92 X on Silicon Integrated Systems [SiS] chipsets. 92 X on Silicon Integrated Systems [SiS] chipsets.
@@ -103,7 +103,7 @@ config AGP_SWORKS
103 103
104config AGP_VIA 104config AGP_VIA
105 tristate "VIA chipset support" 105 tristate "VIA chipset support"
106 depends on AGP 106 depends on AGP && X86
107 help 107 help
108 This option gives you AGP support for the GLX component of 108 This option gives you AGP support for the GLX component of
109 X on VIA MVP3/Apollo Pro chipsets. 109 X on VIA MVP3/Apollo Pro chipsets.
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 5ff457b41efb..883a36a27833 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -419,6 +419,31 @@ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
419 *requested_mode &= ~AGP2_RESERVED_MASK; 419 *requested_mode &= ~AGP2_RESERVED_MASK;
420 } 420 }
421 421
422 /*
423 * Some dumb bridges are programmed to disobey the AGP2 spec.
424 * This is likely a BIOS misprogramming rather than poweron default, or
425 * it would be a lot more common.
426 * https://bugs.freedesktop.org/show_bug.cgi?id=8816
427 * AGPv2 spec 6.1.9 states:
428 * The RATE field indicates the data transfer rates supported by this
429 * device. A.G.P. devices must report all that apply.
430 * Fix them up as best we can.
431 */
432 switch (*bridge_agpstat & 7) {
433 case 4:
434 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
435 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
436 "Fixing up support for x2 & x1\n");
437 break;
438 case 2:
439 *bridge_agpstat |= AGPSTAT2_1X;
440 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
441 "Fixing up support for x1\n");
442 break;
443 default:
444 break;
445 }
446
422 /* Check the speed bits make sense. Only one should be set. */ 447 /* Check the speed bits make sense. Only one should be set. */
423 tmp = *requested_mode & 7; 448 tmp = *requested_mode & 7;
424 switch (tmp) { 449 switch (tmp) {
diff --git a/drivers/char/consolemap.c b/drivers/char/consolemap.c
index 04a12027a740..b99b7561260d 100644
--- a/drivers/char/consolemap.c
+++ b/drivers/char/consolemap.c
@@ -443,7 +443,7 @@ int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
443 p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; 443 p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
444 if (p && p->readonly) return -EIO; 444 if (p && p->readonly) return -EIO;
445 if (!p || --p->refcount) { 445 if (!p || --p->refcount) {
446 q = (struct uni_pagedir *)kmalloc(sizeof(*p), GFP_KERNEL); 446 q = kmalloc(sizeof(*p), GFP_KERNEL);
447 if (!q) { 447 if (!q) {
448 if (p) p->refcount++; 448 if (p) p->refcount++;
449 return -ENOMEM; 449 return -ENOMEM;
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 6c59baa887a8..e736119b6497 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -37,8 +37,10 @@
37#define BT_DEBUG_ENABLE 1 /* Generic messages */ 37#define BT_DEBUG_ENABLE 1 /* Generic messages */
38#define BT_DEBUG_MSG 2 /* Prints all request/response buffers */ 38#define BT_DEBUG_MSG 2 /* Prints all request/response buffers */
39#define BT_DEBUG_STATES 4 /* Verbose look at state changes */ 39#define BT_DEBUG_STATES 4 /* Verbose look at state changes */
40/* BT_DEBUG_OFF must be zero to correspond to the default uninitialized
41 value */
40 42
41static int bt_debug = BT_DEBUG_OFF; 43static int bt_debug; /* 0 == BT_DEBUG_OFF */
42 44
43module_param(bt_debug, int, 0644); 45module_param(bt_debug, int, 0644);
44MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); 46MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index e257835a9a73..ff2d052177cb 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -834,7 +834,7 @@ static const struct file_operations ipmi_fops = {
834 834
835#define DEVICE_NAME "ipmidev" 835#define DEVICE_NAME "ipmidev"
836 836
837static int ipmi_major = 0; 837static int ipmi_major;
838module_param(ipmi_major, int, 0); 838module_param(ipmi_major, int, 0);
839MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By" 839MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
840 " default, or if you set it to zero, it will choose the next" 840 " default, or if you set it to zero, it will choose the next"
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 5703ee28e1cc..4e4691a53890 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -53,10 +53,10 @@
53static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 53static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54static int ipmi_init_msghandler(void); 54static int ipmi_init_msghandler(void);
55 55
56static int initialized = 0; 56static int initialized;
57 57
58#ifdef CONFIG_PROC_FS 58#ifdef CONFIG_PROC_FS
59static struct proc_dir_entry *proc_ipmi_root = NULL; 59static struct proc_dir_entry *proc_ipmi_root;
60#endif /* CONFIG_PROC_FS */ 60#endif /* CONFIG_PROC_FS */
61 61
62/* Remain in auto-maintenance mode for this amount of time (in ms). */ 62/* Remain in auto-maintenance mode for this amount of time (in ms). */
@@ -2142,8 +2142,7 @@ cleanup_bmc_device(struct kref *ref)
2142 bmc = container_of(ref, struct bmc_device, refcount); 2142 bmc = container_of(ref, struct bmc_device, refcount);
2143 2143
2144 remove_files(bmc); 2144 remove_files(bmc);
2145 if (bmc->dev) 2145 platform_device_unregister(bmc->dev);
2146 platform_device_unregister(bmc->dev);
2147 kfree(bmc); 2146 kfree(bmc);
2148} 2147}
2149 2148
@@ -2341,8 +2340,7 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2341 2340
2342 while (ipmi_find_bmc_prod_dev_id(&ipmidriver, 2341 while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
2343 bmc->id.product_id, 2342 bmc->id.product_id,
2344 bmc->id.device_id)) 2343 bmc->id.device_id)) {
2345 {
2346 if (!warn_printed) { 2344 if (!warn_printed) {
2347 printk(KERN_WARNING PFX 2345 printk(KERN_WARNING PFX
2348 "This machine has two different BMCs" 2346 "This machine has two different BMCs"
@@ -4043,7 +4041,7 @@ static void send_panic_events(char *str)
4043} 4041}
4044#endif /* CONFIG_IPMI_PANIC_EVENT */ 4042#endif /* CONFIG_IPMI_PANIC_EVENT */
4045 4043
4046static int has_panicked = 0; 4044static int has_panicked;
4047 4045
4048static int panic_event(struct notifier_block *this, 4046static int panic_event(struct notifier_block *this,
4049 unsigned long event, 4047 unsigned long event,
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 597eb4f88b84..9d23136e598a 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -58,10 +58,10 @@ static int poweroff_powercycle;
58static int ifnum_to_use = -1; 58static int ifnum_to_use = -1;
59 59
60/* Our local state. */ 60/* Our local state. */
61static int ready = 0; 61static int ready;
62static ipmi_user_t ipmi_user; 62static ipmi_user_t ipmi_user;
63static int ipmi_ifnum; 63static int ipmi_ifnum;
64static void (*specific_poweroff_func)(ipmi_user_t user) = NULL; 64static void (*specific_poweroff_func)(ipmi_user_t user);
65 65
66/* Holds the old poweroff function so we can restore it on removal. */ 66/* Holds the old poweroff function so we can restore it on removal. */
67static void (*old_poweroff_func)(void); 67static void (*old_poweroff_func)(void);
@@ -182,7 +182,7 @@ static int ipmi_request_in_rc_mode(ipmi_user_t user,
182#define IPMI_MOTOROLA_MANUFACTURER_ID 0x0000A1 182#define IPMI_MOTOROLA_MANUFACTURER_ID 0x0000A1
183#define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID 0x0051 183#define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID 0x0051
184 184
185static void (*atca_oem_poweroff_hook)(ipmi_user_t user) = NULL; 185static void (*atca_oem_poweroff_hook)(ipmi_user_t user);
186 186
187static void pps_poweroff_atca (ipmi_user_t user) 187static void pps_poweroff_atca (ipmi_user_t user)
188{ 188{
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 81a0c89598e7..f1afd26a509f 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -845,7 +845,7 @@ static void request_events(void *send_info)
845 atomic_set(&smi_info->req_events, 1); 845 atomic_set(&smi_info->req_events, 1);
846} 846}
847 847
848static int initialized = 0; 848static int initialized;
849 849
850static void smi_timeout(unsigned long data) 850static void smi_timeout(unsigned long data)
851{ 851{
@@ -1018,17 +1018,17 @@ static int num_ports;
1018static int irqs[SI_MAX_PARMS]; 1018static int irqs[SI_MAX_PARMS];
1019static int num_irqs; 1019static int num_irqs;
1020static int regspacings[SI_MAX_PARMS]; 1020static int regspacings[SI_MAX_PARMS];
1021static int num_regspacings = 0; 1021static int num_regspacings;
1022static int regsizes[SI_MAX_PARMS]; 1022static int regsizes[SI_MAX_PARMS];
1023static int num_regsizes = 0; 1023static int num_regsizes;
1024static int regshifts[SI_MAX_PARMS]; 1024static int regshifts[SI_MAX_PARMS];
1025static int num_regshifts = 0; 1025static int num_regshifts;
1026static int slave_addrs[SI_MAX_PARMS]; 1026static int slave_addrs[SI_MAX_PARMS];
1027static int num_slave_addrs = 0; 1027static int num_slave_addrs;
1028 1028
1029#define IPMI_IO_ADDR_SPACE 0 1029#define IPMI_IO_ADDR_SPACE 0
1030#define IPMI_MEM_ADDR_SPACE 1 1030#define IPMI_MEM_ADDR_SPACE 1
1031static char *addr_space_to_str[] = { "I/O", "mem" }; 1031static char *addr_space_to_str[] = { "i/o", "mem" };
1032 1032
1033static int hotmod_handler(const char *val, struct kernel_param *kp); 1033static int hotmod_handler(const char *val, struct kernel_param *kp);
1034 1034
@@ -1397,20 +1397,7 @@ static struct hotmod_vals hotmod_as[] = {
1397 { "i/o", IPMI_IO_ADDR_SPACE }, 1397 { "i/o", IPMI_IO_ADDR_SPACE },
1398 { NULL } 1398 { NULL }
1399}; 1399};
1400static int ipmi_strcasecmp(const char *s1, const char *s2) 1400
1401{
1402 while (*s1 || *s2) {
1403 if (!*s1)
1404 return -1;
1405 if (!*s2)
1406 return 1;
1407 if (*s1 != *s2)
1408 return *s1 - *s2;
1409 s1++;
1410 s2++;
1411 }
1412 return 0;
1413}
1414static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr) 1401static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1415{ 1402{
1416 char *s; 1403 char *s;
@@ -1424,7 +1411,7 @@ static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1424 *s = '\0'; 1411 *s = '\0';
1425 s++; 1412 s++;
1426 for (i = 0; hotmod_ops[i].name; i++) { 1413 for (i = 0; hotmod_ops[i].name; i++) {
1427 if (ipmi_strcasecmp(*curr, v[i].name) == 0) { 1414 if (strcmp(*curr, v[i].name) == 0) {
1428 *val = v[i].val; 1415 *val = v[i].val;
1429 *curr = s; 1416 *curr = s;
1430 return 0; 1417 return 0;
@@ -1435,10 +1422,34 @@ static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1435 return -EINVAL; 1422 return -EINVAL;
1436} 1423}
1437 1424
1425static int check_hotmod_int_op(const char *curr, const char *option,
1426 const char *name, int *val)
1427{
1428 char *n;
1429
1430 if (strcmp(curr, name) == 0) {
1431 if (!option) {
1432 printk(KERN_WARNING PFX
1433 "No option given for '%s'\n",
1434 curr);
1435 return -EINVAL;
1436 }
1437 *val = simple_strtoul(option, &n, 0);
1438 if ((*n != '\0') || (*option == '\0')) {
1439 printk(KERN_WARNING PFX
1440 "Bad option given for '%s'\n",
1441 curr);
1442 return -EINVAL;
1443 }
1444 return 1;
1445 }
1446 return 0;
1447}
1448
1438static int hotmod_handler(const char *val, struct kernel_param *kp) 1449static int hotmod_handler(const char *val, struct kernel_param *kp)
1439{ 1450{
1440 char *str = kstrdup(val, GFP_KERNEL); 1451 char *str = kstrdup(val, GFP_KERNEL);
1441 int rv = -EINVAL; 1452 int rv;
1442 char *next, *curr, *s, *n, *o; 1453 char *next, *curr, *s, *n, *o;
1443 enum hotmod_op op; 1454 enum hotmod_op op;
1444 enum si_type si_type; 1455 enum si_type si_type;
@@ -1450,13 +1461,15 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
1450 int irq; 1461 int irq;
1451 int ipmb; 1462 int ipmb;
1452 int ival; 1463 int ival;
1464 int len;
1453 struct smi_info *info; 1465 struct smi_info *info;
1454 1466
1455 if (!str) 1467 if (!str)
1456 return -ENOMEM; 1468 return -ENOMEM;
1457 1469
1458 /* Kill any trailing spaces, as we can get a "\n" from echo. */ 1470 /* Kill any trailing spaces, as we can get a "\n" from echo. */
1459 ival = strlen(str) - 1; 1471 len = strlen(str);
1472 ival = len - 1;
1460 while ((ival >= 0) && isspace(str[ival])) { 1473 while ((ival >= 0) && isspace(str[ival])) {
1461 str[ival] = '\0'; 1474 str[ival] = '\0';
1462 ival--; 1475 ival--;
@@ -1513,35 +1526,37 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
1513 *o = '\0'; 1526 *o = '\0';
1514 o++; 1527 o++;
1515 } 1528 }
1516#define HOTMOD_INT_OPT(name, val) \ 1529 rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1517 if (ipmi_strcasecmp(curr, name) == 0) { \ 1530 if (rv < 0)
1518 if (!o) { \
1519 printk(KERN_WARNING PFX \
1520 "No option given for '%s'\n", \
1521 curr); \
1522 goto out; \
1523 } \
1524 val = simple_strtoul(o, &n, 0); \
1525 if ((*n != '\0') || (*o == '\0')) { \
1526 printk(KERN_WARNING PFX \
1527 "Bad option given for '%s'\n", \
1528 curr); \
1529 goto out; \
1530 } \
1531 }
1532
1533 HOTMOD_INT_OPT("rsp", regspacing)
1534 else HOTMOD_INT_OPT("rsi", regsize)
1535 else HOTMOD_INT_OPT("rsh", regshift)
1536 else HOTMOD_INT_OPT("irq", irq)
1537 else HOTMOD_INT_OPT("ipmb", ipmb)
1538 else {
1539 printk(KERN_WARNING PFX
1540 "Invalid hotmod option '%s'\n",
1541 curr);
1542 goto out; 1531 goto out;
1543 } 1532 else if (rv)
1544#undef HOTMOD_INT_OPT 1533 continue;
1534 rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1535 if (rv < 0)
1536 goto out;
1537 else if (rv)
1538 continue;
1539 rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1540 if (rv < 0)
1541 goto out;
1542 else if (rv)
1543 continue;
1544 rv = check_hotmod_int_op(curr, o, "irq", &irq);
1545 if (rv < 0)
1546 goto out;
1547 else if (rv)
1548 continue;
1549 rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1550 if (rv < 0)
1551 goto out;
1552 else if (rv)
1553 continue;
1554
1555 rv = -EINVAL;
1556 printk(KERN_WARNING PFX
1557 "Invalid hotmod option '%s'\n",
1558 curr);
1559 goto out;
1545 } 1560 }
1546 1561
1547 if (op == HM_ADD) { 1562 if (op == HM_ADD) {
@@ -1590,6 +1605,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
1590 mutex_unlock(&smi_infos_lock); 1605 mutex_unlock(&smi_infos_lock);
1591 } 1606 }
1592 } 1607 }
1608 rv = len;
1593 out: 1609 out:
1594 kfree(str); 1610 kfree(str);
1595 return rv; 1611 return rv;
@@ -1610,11 +1626,11 @@ static __devinit void hardcode_find_bmc(void)
1610 1626
1611 info->addr_source = "hardcoded"; 1627 info->addr_source = "hardcoded";
1612 1628
1613 if (!si_type[i] || ipmi_strcasecmp(si_type[i], "kcs") == 0) { 1629 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1614 info->si_type = SI_KCS; 1630 info->si_type = SI_KCS;
1615 } else if (ipmi_strcasecmp(si_type[i], "smic") == 0) { 1631 } else if (strcmp(si_type[i], "smic") == 0) {
1616 info->si_type = SI_SMIC; 1632 info->si_type = SI_SMIC;
1617 } else if (ipmi_strcasecmp(si_type[i], "bt") == 0) { 1633 } else if (strcmp(si_type[i], "bt") == 0) {
1618 info->si_type = SI_BT; 1634 info->si_type = SI_BT;
1619 } else { 1635 } else {
1620 printk(KERN_WARNING 1636 printk(KERN_WARNING
@@ -1668,7 +1684,7 @@ static __devinit void hardcode_find_bmc(void)
1668/* Once we get an ACPI failure, we don't try any more, because we go 1684/* Once we get an ACPI failure, we don't try any more, because we go
1669 through the tables sequentially. Once we don't find a table, there 1685 through the tables sequentially. Once we don't find a table, there
1670 are no more. */ 1686 are no more. */
1671static int acpi_failure = 0; 1687static int acpi_failure;
1672 1688
1673/* For GPE-type interrupts. */ 1689/* For GPE-type interrupts. */
1674static u32 ipmi_acpi_gpe(void *context) 1690static u32 ipmi_acpi_gpe(void *context)
@@ -1779,7 +1795,6 @@ struct SPMITable {
1779static __devinit int try_init_acpi(struct SPMITable *spmi) 1795static __devinit int try_init_acpi(struct SPMITable *spmi)
1780{ 1796{
1781 struct smi_info *info; 1797 struct smi_info *info;
1782 char *io_type;
1783 u8 addr_space; 1798 u8 addr_space;
1784 1799
1785 if (spmi->IPMIlegacy != 1) { 1800 if (spmi->IPMIlegacy != 1) {
@@ -1843,11 +1858,9 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
1843 info->io.regshift = spmi->addr.register_bit_offset; 1858 info->io.regshift = spmi->addr.register_bit_offset;
1844 1859
1845 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 1860 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1846 io_type = "memory";
1847 info->io_setup = mem_setup; 1861 info->io_setup = mem_setup;
1848 info->io.addr_type = IPMI_IO_ADDR_SPACE; 1862 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1849 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 1863 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1850 io_type = "I/O";
1851 info->io_setup = port_setup; 1864 info->io_setup = port_setup;
1852 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 1865 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1853 } else { 1866 } else {
@@ -2773,8 +2786,7 @@ static __devinit int init_ipmi_si(void)
2773#endif 2786#endif
2774 2787
2775#ifdef CONFIG_ACPI 2788#ifdef CONFIG_ACPI
2776 if (si_trydefaults) 2789 acpi_find_bmc();
2777 acpi_find_bmc();
2778#endif 2790#endif
2779 2791
2780#ifdef CONFIG_PCI 2792#ifdef CONFIG_PCI
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 90fb2a541916..78280380a905 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -134,14 +134,14 @@
134 134
135static int nowayout = WATCHDOG_NOWAYOUT; 135static int nowayout = WATCHDOG_NOWAYOUT;
136 136
137static ipmi_user_t watchdog_user = NULL; 137static ipmi_user_t watchdog_user;
138static int watchdog_ifnum; 138static int watchdog_ifnum;
139 139
140/* Default the timeout to 10 seconds. */ 140/* Default the timeout to 10 seconds. */
141static int timeout = 10; 141static int timeout = 10;
142 142
143/* The pre-timeout is disabled by default. */ 143/* The pre-timeout is disabled by default. */
144static int pretimeout = 0; 144static int pretimeout;
145 145
146/* Default action is to reset the board on a timeout. */ 146/* Default action is to reset the board on a timeout. */
147static unsigned char action_val = WDOG_TIMEOUT_RESET; 147static unsigned char action_val = WDOG_TIMEOUT_RESET;
@@ -156,10 +156,10 @@ static unsigned char preop_val = WDOG_PREOP_NONE;
156 156
157static char preop[16] = "preop_none"; 157static char preop[16] = "preop_none";
158static DEFINE_SPINLOCK(ipmi_read_lock); 158static DEFINE_SPINLOCK(ipmi_read_lock);
159static char data_to_read = 0; 159static char data_to_read;
160static DECLARE_WAIT_QUEUE_HEAD(read_q); 160static DECLARE_WAIT_QUEUE_HEAD(read_q);
161static struct fasync_struct *fasync_q = NULL; 161static struct fasync_struct *fasync_q;
162static char pretimeout_since_last_heartbeat = 0; 162static char pretimeout_since_last_heartbeat;
163static char expect_close; 163static char expect_close;
164 164
165static int ifnum_to_use = -1; 165static int ifnum_to_use = -1;
@@ -177,7 +177,7 @@ static void ipmi_unregister_watchdog(int ipmi_intf);
177 177
178/* If true, the driver will start running as soon as it is configured 178/* If true, the driver will start running as soon as it is configured
179 and ready. */ 179 and ready. */
180static int start_now = 0; 180static int start_now;
181 181
182static int set_param_int(const char *val, struct kernel_param *kp) 182static int set_param_int(const char *val, struct kernel_param *kp)
183{ 183{
@@ -300,16 +300,16 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
300static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE; 300static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
301 301
302/* If shutting down via IPMI, we ignore the heartbeat. */ 302/* If shutting down via IPMI, we ignore the heartbeat. */
303static int ipmi_ignore_heartbeat = 0; 303static int ipmi_ignore_heartbeat;
304 304
305/* Is someone using the watchdog? Only one user is allowed. */ 305/* Is someone using the watchdog? Only one user is allowed. */
306static unsigned long ipmi_wdog_open = 0; 306static unsigned long ipmi_wdog_open;
307 307
308/* If set to 1, the heartbeat command will set the state to reset and 308/* If set to 1, the heartbeat command will set the state to reset and
309 start the timer. The timer doesn't normally run when the driver is 309 start the timer. The timer doesn't normally run when the driver is
310 first opened until the heartbeat is set the first time, this 310 first opened until the heartbeat is set the first time, this
311 variable is used to accomplish this. */ 311 variable is used to accomplish this. */
312static int ipmi_start_timer_on_heartbeat = 0; 312static int ipmi_start_timer_on_heartbeat;
313 313
314/* IPMI version of the BMC. */ 314/* IPMI version of the BMC. */
315static unsigned char ipmi_version_major; 315static unsigned char ipmi_version_major;
diff --git a/drivers/char/lcd.c b/drivers/char/lcd.c
index da601fd6c07a..d649abbf0857 100644
--- a/drivers/char/lcd.c
+++ b/drivers/char/lcd.c
@@ -459,7 +459,7 @@ static int lcd_ioctl(struct inode *inode, struct file *file,
459 (&display, (struct lcd_display *) arg, 459 (&display, (struct lcd_display *) arg,
460 sizeof(struct lcd_display))) 460 sizeof(struct lcd_display)))
461 return -EFAULT; 461 return -EFAULT;
462 rom = (unsigned char *) kmalloc((128), GFP_ATOMIC); 462 rom = kmalloc((128), GFP_ATOMIC);
463 if (rom == NULL) { 463 if (rom == NULL) {
464 printk(KERN_ERR LCD "kmalloc() failed in %s\n", 464 printk(KERN_ERR LCD "kmalloc() failed in %s\n",
465 __FUNCTION__); 465 __FUNCTION__);
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index b70b5388b5a8..b51d08be0bcf 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -525,7 +525,7 @@ static int lp_open(struct inode * inode, struct file * file)
525 return -EIO; 525 return -EIO;
526 } 526 }
527 } 527 }
528 lp_table[minor].lp_buffer = (char *) kmalloc(LP_BUFFER_SIZE, GFP_KERNEL); 528 lp_table[minor].lp_buffer = kmalloc(LP_BUFFER_SIZE, GFP_KERNEL);
529 if (!lp_table[minor].lp_buffer) { 529 if (!lp_table[minor].lp_buffer) {
530 LP_F(minor) &= ~LP_BUSY; 530 LP_F(minor) &= ~LP_BUSY;
531 return -ENOMEM; 531 return -ENOMEM;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 089020e0ee5a..4f1813e04754 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -646,7 +646,8 @@ static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
646 count = size; 646 count = size;
647 647
648 zap_page_range(vma, addr, count, NULL); 648 zap_page_range(vma, addr, count, NULL);
649 zeromap_page_range(vma, addr, count, PAGE_COPY); 649 if (zeromap_page_range(vma, addr, count, PAGE_COPY))
650 break;
650 651
651 size -= count; 652 size -= count;
652 buf += count; 653 buf += count;
@@ -713,11 +714,14 @@ out:
713 714
714static int mmap_zero(struct file * file, struct vm_area_struct * vma) 715static int mmap_zero(struct file * file, struct vm_area_struct * vma)
715{ 716{
717 int err;
718
716 if (vma->vm_flags & VM_SHARED) 719 if (vma->vm_flags & VM_SHARED)
717 return shmem_zero_setup(vma); 720 return shmem_zero_setup(vma);
718 if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot)) 721 err = zeromap_page_range(vma, vma->vm_start,
719 return -EAGAIN; 722 vma->vm_end - vma->vm_start, vma->vm_page_prot);
720 return 0; 723 BUG_ON(err == -EEXIST);
724 return err;
721} 725}
722#else /* CONFIG_MMU */ 726#else /* CONFIG_MMU */
723static ssize_t read_zero(struct file * file, char * buf, 727static ssize_t read_zero(struct file * file, char * buf,
diff --git a/drivers/char/mxser_new.c b/drivers/char/mxser_new.c
index efa8076c33e0..cd989dce7c53 100644
--- a/drivers/char/mxser_new.c
+++ b/drivers/char/mxser_new.c
@@ -315,6 +315,7 @@ static struct mxser_mon_ext mon_data_ext;
315static int mxser_set_baud_method[MXSER_PORTS + 1]; 315static int mxser_set_baud_method[MXSER_PORTS + 1];
316static spinlock_t gm_lock; 316static spinlock_t gm_lock;
317 317
318#ifdef CONFIG_PCI
318static int CheckIsMoxaMust(int io) 319static int CheckIsMoxaMust(int io)
319{ 320{
320 u8 oldmcr, hwid; 321 u8 oldmcr, hwid;
@@ -337,6 +338,7 @@ static int CheckIsMoxaMust(int io)
337 } 338 }
338 return MOXA_OTHER_UART; 339 return MOXA_OTHER_UART;
339} 340}
341#endif
340 342
341static void process_txrx_fifo(struct mxser_port *info) 343static void process_txrx_fifo(struct mxser_port *info)
342{ 344{
@@ -2380,9 +2382,11 @@ static void mxser_release_res(struct mxser_board *brd, struct pci_dev *pdev,
2380 if (irq) 2382 if (irq)
2381 free_irq(brd->irq, brd); 2383 free_irq(brd->irq, brd);
2382 if (pdev != NULL) { /* PCI */ 2384 if (pdev != NULL) { /* PCI */
2385#ifdef CONFIG_PCI
2383 pci_release_region(pdev, 2); 2386 pci_release_region(pdev, 2);
2384 pci_release_region(pdev, 3); 2387 pci_release_region(pdev, 3);
2385 pci_dev_put(pdev); 2388 pci_dev_put(pdev);
2389#endif
2386 } else { 2390 } else {
2387 release_region(brd->ports[0].ioaddr, 8 * brd->info->nports); 2391 release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
2388 release_region(brd->vector, 1); 2392 release_region(brd->vector, 1);
@@ -2546,6 +2550,7 @@ static int __init mxser_get_ISA_conf(int cap, struct mxser_board *brd)
2546static int __devinit mxser_probe(struct pci_dev *pdev, 2550static int __devinit mxser_probe(struct pci_dev *pdev,
2547 const struct pci_device_id *ent) 2551 const struct pci_device_id *ent)
2548{ 2552{
2553#ifdef CONFIG_PCI
2549 struct mxser_board *brd; 2554 struct mxser_board *brd;
2550 unsigned int i, j; 2555 unsigned int i, j;
2551 unsigned long ioaddress; 2556 unsigned long ioaddress;
@@ -2644,6 +2649,9 @@ err_relio:
2644 brd->info = NULL; 2649 brd->info = NULL;
2645err: 2650err:
2646 return retval; 2651 return retval;
2652#else
2653 return -ENODEV;
2654#endif
2647} 2655}
2648 2656
2649static void __devexit mxser_remove(struct pci_dev *pdev) 2657static void __devexit mxser_remove(struct pci_dev *pdev)
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index 103d338f21e2..dc6d41841457 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -125,8 +125,8 @@ static void transmit_block(struct r3964_info *pInfo);
125static void receive_char(struct r3964_info *pInfo, const unsigned char c); 125static void receive_char(struct r3964_info *pInfo, const unsigned char c);
126static void receive_error(struct r3964_info *pInfo, const char flag); 126static void receive_error(struct r3964_info *pInfo, const char flag);
127static void on_timeout(unsigned long priv); 127static void on_timeout(unsigned long priv);
128static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg); 128static int enable_signals(struct r3964_info *pInfo, struct pid *pid, int arg);
129static int read_telegram(struct r3964_info *pInfo, pid_t pid, unsigned char __user *buf); 129static int read_telegram(struct r3964_info *pInfo, struct pid *pid, unsigned char __user *buf);
130static void add_msg(struct r3964_client_info *pClient, int msg_id, int arg, 130static void add_msg(struct r3964_client_info *pClient, int msg_id, int arg,
131 int error_code, struct r3964_block_header *pBlock); 131 int error_code, struct r3964_block_header *pBlock);
132static struct r3964_message* remove_msg(struct r3964_info *pInfo, 132static struct r3964_message* remove_msg(struct r3964_info *pInfo,
@@ -829,7 +829,7 @@ static void on_timeout(unsigned long priv)
829} 829}
830 830
831static struct r3964_client_info *findClient( 831static struct r3964_client_info *findClient(
832 struct r3964_info *pInfo, pid_t pid) 832 struct r3964_info *pInfo, struct pid *pid)
833{ 833{
834 struct r3964_client_info *pClient; 834 struct r3964_client_info *pClient;
835 835
@@ -843,7 +843,7 @@ static struct r3964_client_info *findClient(
843 return NULL; 843 return NULL;
844} 844}
845 845
846static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg) 846static int enable_signals(struct r3964_info *pInfo, struct pid *pid, int arg)
847{ 847{
848 struct r3964_client_info *pClient; 848 struct r3964_client_info *pClient;
849 struct r3964_client_info **ppClient; 849 struct r3964_client_info **ppClient;
@@ -858,7 +858,7 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg)
858 858
859 if(pClient->pid == pid) 859 if(pClient->pid == pid)
860 { 860 {
861 TRACE_PS("removing client %d from client list", pid); 861 TRACE_PS("removing client %d from client list", pid_nr(pid));
862 *ppClient = pClient->next; 862 *ppClient = pClient->next;
863 while(pClient->msg_count) 863 while(pClient->msg_count)
864 { 864 {
@@ -869,6 +869,7 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg)
869 TRACE_M("enable_signals - msg kfree %p",pMsg); 869 TRACE_M("enable_signals - msg kfree %p",pMsg);
870 } 870 }
871 } 871 }
872 put_pid(pClient->pid);
872 kfree(pClient); 873 kfree(pClient);
873 TRACE_M("enable_signals - kfree %p",pClient); 874 TRACE_M("enable_signals - kfree %p",pClient);
874 return 0; 875 return 0;
@@ -892,10 +893,10 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg)
892 if(pClient==NULL) 893 if(pClient==NULL)
893 return -ENOMEM; 894 return -ENOMEM;
894 895
895 TRACE_PS("add client %d to client list", pid); 896 TRACE_PS("add client %d to client list", pid_nr(pid));
896 spin_lock_init(&pClient->lock); 897 spin_lock_init(&pClient->lock);
897 pClient->sig_flags=arg; 898 pClient->sig_flags=arg;
898 pClient->pid = pid; 899 pClient->pid = get_pid(pid);
899 pClient->next=pInfo->firstClient; 900 pClient->next=pInfo->firstClient;
900 pClient->first_msg = NULL; 901 pClient->first_msg = NULL;
901 pClient->last_msg = NULL; 902 pClient->last_msg = NULL;
@@ -908,7 +909,7 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg)
908 return 0; 909 return 0;
909} 910}
910 911
911static int read_telegram(struct r3964_info *pInfo, pid_t pid, unsigned char __user *buf) 912static int read_telegram(struct r3964_info *pInfo, struct pid *pid, unsigned char __user *buf)
912{ 913{
913 struct r3964_client_info *pClient; 914 struct r3964_client_info *pClient;
914 struct r3964_block_header *block; 915 struct r3964_block_header *block;
@@ -1005,7 +1006,7 @@ queue_the_message:
1005 /* Send SIGIO signal to client process: */ 1006 /* Send SIGIO signal to client process: */
1006 if(pClient->sig_flags & R3964_USE_SIGIO) 1007 if(pClient->sig_flags & R3964_USE_SIGIO)
1007 { 1008 {
1008 kill_proc(pClient->pid, SIGIO, 1); 1009 kill_pid(pClient->pid, SIGIO, 1);
1009 } 1010 }
1010} 1011}
1011 1012
@@ -1042,7 +1043,7 @@ static void remove_client_block(struct r3964_info *pInfo,
1042{ 1043{
1043 struct r3964_block_header *block; 1044 struct r3964_block_header *block;
1044 1045
1045 TRACE_PS("remove_client_block PID %d", pClient->pid); 1046 TRACE_PS("remove_client_block PID %d", pid_nr(pClient->pid));
1046 1047
1047 block=pClient->next_block_to_read; 1048 block=pClient->next_block_to_read;
1048 if(block) 1049 if(block)
@@ -1157,6 +1158,7 @@ static void r3964_close(struct tty_struct *tty)
1157 TRACE_M("r3964_close - msg kfree %p",pMsg); 1158 TRACE_M("r3964_close - msg kfree %p",pMsg);
1158 } 1159 }
1159 } 1160 }
1161 put_pid(pClient->pid);
1160 kfree(pClient); 1162 kfree(pClient);
1161 TRACE_M("r3964_close - client kfree %p",pClient); 1163 TRACE_M("r3964_close - client kfree %p",pClient);
1162 pClient=pNext; 1164 pClient=pNext;
@@ -1193,12 +1195,11 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
1193 struct r3964_client_message theMsg; 1195 struct r3964_client_message theMsg;
1194 DECLARE_WAITQUEUE (wait, current); 1196 DECLARE_WAITQUEUE (wait, current);
1195 1197
1196 int pid = current->pid;
1197 int count; 1198 int count;
1198 1199
1199 TRACE_L("read()"); 1200 TRACE_L("read()");
1200 1201
1201 pClient=findClient(pInfo, pid); 1202 pClient=findClient(pInfo, task_pid(current));
1202 if(pClient) 1203 if(pClient)
1203 { 1204 {
1204 pMsg = remove_msg(pInfo, pClient); 1205 pMsg = remove_msg(pInfo, pClient);
@@ -1252,7 +1253,6 @@ static ssize_t r3964_write(struct tty_struct * tty, struct file * file,
1252 struct r3964_block_header *pHeader; 1253 struct r3964_block_header *pHeader;
1253 struct r3964_client_info *pClient; 1254 struct r3964_client_info *pClient;
1254 unsigned char *new_data; 1255 unsigned char *new_data;
1255 int pid;
1256 1256
1257 TRACE_L("write request, %d characters", count); 1257 TRACE_L("write request, %d characters", count);
1258/* 1258/*
@@ -1295,9 +1295,7 @@ static ssize_t r3964_write(struct tty_struct * tty, struct file * file,
1295 pHeader->locks = 0; 1295 pHeader->locks = 0;
1296 pHeader->owner = NULL; 1296 pHeader->owner = NULL;
1297 1297
1298 pid=current->pid; 1298 pClient=findClient(pInfo, task_pid(current));
1299
1300 pClient=findClient(pInfo, pid);
1301 if(pClient) 1299 if(pClient)
1302 { 1300 {
1303 pHeader->owner = pClient; 1301 pHeader->owner = pClient;
@@ -1328,7 +1326,7 @@ static int r3964_ioctl(struct tty_struct * tty, struct file * file,
1328 switch(cmd) 1326 switch(cmd)
1329 { 1327 {
1330 case R3964_ENABLE_SIGNALS: 1328 case R3964_ENABLE_SIGNALS:
1331 return enable_signals(pInfo, current->pid, arg); 1329 return enable_signals(pInfo, task_pid(current), arg);
1332 case R3964_SETPRIORITY: 1330 case R3964_SETPRIORITY:
1333 if(arg<R3964_MASTER || arg>R3964_SLAVE) 1331 if(arg<R3964_MASTER || arg>R3964_SLAVE)
1334 return -EINVAL; 1332 return -EINVAL;
@@ -1341,7 +1339,7 @@ static int r3964_ioctl(struct tty_struct * tty, struct file * file,
1341 pInfo->flags &= ~R3964_BCC; 1339 pInfo->flags &= ~R3964_BCC;
1342 return 0; 1340 return 0;
1343 case R3964_READ_TELEGRAM: 1341 case R3964_READ_TELEGRAM:
1344 return read_telegram(pInfo, current->pid, (unsigned char __user *)arg); 1342 return read_telegram(pInfo, task_pid(current), (unsigned char __user *)arg);
1345 default: 1343 default:
1346 return -ENOIOCTLCMD; 1344 return -ENOIOCTLCMD;
1347 } 1345 }
@@ -1357,7 +1355,6 @@ static unsigned int r3964_poll(struct tty_struct * tty, struct file * file,
1357 struct poll_table_struct *wait) 1355 struct poll_table_struct *wait)
1358{ 1356{
1359 struct r3964_info *pInfo=(struct r3964_info*)tty->disc_data; 1357 struct r3964_info *pInfo=(struct r3964_info*)tty->disc_data;
1360 int pid=current->pid;
1361 struct r3964_client_info *pClient; 1358 struct r3964_client_info *pClient;
1362 struct r3964_message *pMsg=NULL; 1359 struct r3964_message *pMsg=NULL;
1363 unsigned long flags; 1360 unsigned long flags;
@@ -1365,7 +1362,7 @@ static unsigned int r3964_poll(struct tty_struct * tty, struct file * file,
1365 1362
1366 TRACE_L("POLL"); 1363 TRACE_L("POLL");
1367 1364
1368 pClient=findClient(pInfo,pid); 1365 pClient=findClient(pInfo, task_pid(current));
1369 if(pClient) 1366 if(pClient)
1370 { 1367 {
1371 poll_wait(file, &pInfo->read_wait, wait); 1368 poll_wait(file, &pInfo->read_wait, wait);
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index e96a00fe1389..2bdb0144a22e 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -1151,7 +1151,6 @@ static int copy_from_read_buf(struct tty_struct *tty,
1151 n = min(*nr, n); 1151 n = min(*nr, n);
1152 spin_unlock_irqrestore(&tty->read_lock, flags); 1152 spin_unlock_irqrestore(&tty->read_lock, flags);
1153 if (n) { 1153 if (n) {
1154 mb();
1155 retval = copy_to_user(*b, &tty->read_buf[tty->read_tail], n); 1154 retval = copy_to_user(*b, &tty->read_buf[tty->read_tail], n);
1156 n -= retval; 1155 n -= retval;
1157 spin_lock_irqsave(&tty->read_lock, flags); 1156 spin_lock_irqsave(&tty->read_lock, flags);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 5152cedd8878..f108c136800a 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -541,7 +541,7 @@ static int mgslpc_probe(struct pcmcia_device *link)
541 if (debug_level >= DEBUG_LEVEL_INFO) 541 if (debug_level >= DEBUG_LEVEL_INFO)
542 printk("mgslpc_attach\n"); 542 printk("mgslpc_attach\n");
543 543
544 info = (MGSLPC_INFO *)kmalloc(sizeof(MGSLPC_INFO), GFP_KERNEL); 544 info = kmalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
545 if (!info) { 545 if (!info) {
546 printk("Error can't allocate device instance data\n"); 546 printk("Error can't allocate device instance data\n");
547 return -ENOMEM; 547 return -ENOMEM;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 092a01cc02da..13d0b1350a62 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1203,7 +1203,7 @@ static int proc_do_uuid(ctl_table *table, int write, struct file *filp,
1203 1203
1204static int uuid_strategy(ctl_table *table, int __user *name, int nlen, 1204static int uuid_strategy(ctl_table *table, int __user *name, int nlen,
1205 void __user *oldval, size_t __user *oldlenp, 1205 void __user *oldval, size_t __user *oldlenp,
1206 void __user *newval, size_t newlen, void **context) 1206 void __user *newval, size_t newlen)
1207{ 1207{
1208 unsigned char tmp_uuid[16], *uuid; 1208 unsigned char tmp_uuid[16], *uuid;
1209 unsigned int len; 1209 unsigned int len;
diff --git a/drivers/char/rio/riocmd.c b/drivers/char/rio/riocmd.c
index 167ebc84e8d7..245f03195b7c 100644
--- a/drivers/char/rio/riocmd.c
+++ b/drivers/char/rio/riocmd.c
@@ -556,7 +556,7 @@ struct CmdBlk *RIOGetCmdBlk(void)
556{ 556{
557 struct CmdBlk *CmdBlkP; 557 struct CmdBlk *CmdBlkP;
558 558
559 CmdBlkP = (struct CmdBlk *)kmalloc(sizeof(struct CmdBlk), GFP_ATOMIC); 559 CmdBlkP = kmalloc(sizeof(struct CmdBlk), GFP_ATOMIC);
560 if (CmdBlkP) 560 if (CmdBlkP)
561 memset(CmdBlkP, 0, sizeof(struct CmdBlk)); 561 memset(CmdBlkP, 0, sizeof(struct CmdBlk));
562 return CmdBlkP; 562 return CmdBlkP;
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 66a7385bc34a..e1d70e8b6268 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -113,7 +113,7 @@ static int rtc_has_irq = 1;
113#define hpet_set_rtc_irq_bit(arg) 0 113#define hpet_set_rtc_irq_bit(arg) 0
114#define hpet_rtc_timer_init() do { } while (0) 114#define hpet_rtc_timer_init() do { } while (0)
115#define hpet_rtc_dropped_irq() 0 115#define hpet_rtc_dropped_irq() 0
116static inline irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) {return 0;} 116static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) {return 0;}
117#else 117#else
118extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id); 118extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
119#endif 119#endif
@@ -165,7 +165,9 @@ static void mask_rtc_irq_bit(unsigned char bit)
165} 165}
166#endif 166#endif
167 167
168#ifdef CONFIG_PROC_FS
168static int rtc_proc_open(struct inode *inode, struct file *file); 169static int rtc_proc_open(struct inode *inode, struct file *file);
170#endif
169 171
170/* 172/*
171 * Bits in rtc_status. (6 bits of room for future expansion) 173 * Bits in rtc_status. (6 bits of room for future expansion)
@@ -906,6 +908,7 @@ static struct miscdevice rtc_dev = {
906 .fops = &rtc_fops, 908 .fops = &rtc_fops,
907}; 909};
908 910
911#ifdef CONFIG_PROC_FS
909static const struct file_operations rtc_proc_fops = { 912static const struct file_operations rtc_proc_fops = {
910 .owner = THIS_MODULE, 913 .owner = THIS_MODULE,
911 .open = rtc_proc_open, 914 .open = rtc_proc_open,
@@ -913,14 +916,13 @@ static const struct file_operations rtc_proc_fops = {
913 .llseek = seq_lseek, 916 .llseek = seq_lseek,
914 .release = single_release, 917 .release = single_release,
915}; 918};
916
917#if defined(RTC_IRQ) && !defined(__sparc__)
918static irq_handler_t rtc_int_handler_ptr;
919#endif 919#endif
920 920
921static int __init rtc_init(void) 921static int __init rtc_init(void)
922{ 922{
923#ifdef CONFIG_PROC_FS
923 struct proc_dir_entry *ent; 924 struct proc_dir_entry *ent;
925#endif
924#if defined(__alpha__) || defined(__mips__) 926#if defined(__alpha__) || defined(__mips__)
925 unsigned int year, ctrl; 927 unsigned int year, ctrl;
926 char *guess = NULL; 928 char *guess = NULL;
@@ -932,9 +934,11 @@ static int __init rtc_init(void)
932 struct sparc_isa_bridge *isa_br; 934 struct sparc_isa_bridge *isa_br;
933 struct sparc_isa_device *isa_dev; 935 struct sparc_isa_device *isa_dev;
934#endif 936#endif
935#endif 937#else
936#ifndef __sparc__
937 void *r; 938 void *r;
939#ifdef RTC_IRQ
940 irq_handler_t rtc_int_handler_ptr;
941#endif
938#endif 942#endif
939 943
940#ifdef __sparc__ 944#ifdef __sparc__
@@ -958,6 +962,7 @@ static int __init rtc_init(void)
958 } 962 }
959 } 963 }
960#endif 964#endif
965 rtc_has_irq = 0;
961 printk(KERN_ERR "rtc_init: no PC rtc found\n"); 966 printk(KERN_ERR "rtc_init: no PC rtc found\n");
962 return -EIO; 967 return -EIO;
963 968
@@ -972,6 +977,7 @@ found:
972 * PCI Slot 2 INTA# (and some INTx# in Slot 1). 977 * PCI Slot 2 INTA# (and some INTx# in Slot 1).
973 */ 978 */
974 if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", (void *)&rtc_port)) { 979 if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", (void *)&rtc_port)) {
980 rtc_has_irq = 0;
975 printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq); 981 printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq);
976 return -EIO; 982 return -EIO;
977 } 983 }
@@ -982,6 +988,9 @@ no_irq:
982 else 988 else
983 r = request_mem_region(RTC_PORT(0), RTC_IO_EXTENT, "rtc"); 989 r = request_mem_region(RTC_PORT(0), RTC_IO_EXTENT, "rtc");
984 if (!r) { 990 if (!r) {
991#ifdef RTC_IRQ
992 rtc_has_irq = 0;
993#endif
985 printk(KERN_ERR "rtc: I/O resource %lx is not free.\n", 994 printk(KERN_ERR "rtc: I/O resource %lx is not free.\n",
986 (long)(RTC_PORT(0))); 995 (long)(RTC_PORT(0)));
987 return -EIO; 996 return -EIO;
@@ -996,6 +1005,7 @@ no_irq:
996 1005
997 if(request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED, "rtc", NULL)) { 1006 if(request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED, "rtc", NULL)) {
998 /* Yeah right, seeing as irq 8 doesn't even hit the bus. */ 1007 /* Yeah right, seeing as irq 8 doesn't even hit the bus. */
1008 rtc_has_irq = 0;
999 printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ); 1009 printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
1000 if (RTC_IOMAPPED) 1010 if (RTC_IOMAPPED)
1001 release_region(RTC_PORT(0), RTC_IO_EXTENT); 1011 release_region(RTC_PORT(0), RTC_IO_EXTENT);
@@ -1012,21 +1022,19 @@ no_irq:
1012 if (misc_register(&rtc_dev)) { 1022 if (misc_register(&rtc_dev)) {
1013#ifdef RTC_IRQ 1023#ifdef RTC_IRQ
1014 free_irq(RTC_IRQ, NULL); 1024 free_irq(RTC_IRQ, NULL);
1025 rtc_has_irq = 0;
1015#endif 1026#endif
1016 release_region(RTC_PORT(0), RTC_IO_EXTENT); 1027 release_region(RTC_PORT(0), RTC_IO_EXTENT);
1017 return -ENODEV; 1028 return -ENODEV;
1018 } 1029 }
1019 1030
1031#ifdef CONFIG_PROC_FS
1020 ent = create_proc_entry("driver/rtc", 0, NULL); 1032 ent = create_proc_entry("driver/rtc", 0, NULL);
1021 if (!ent) { 1033 if (ent)
1022#ifdef RTC_IRQ 1034 ent->proc_fops = &rtc_proc_fops;
1023 free_irq(RTC_IRQ, NULL); 1035 else
1036 printk(KERN_WARNING "rtc: Failed to register with procfs.\n");
1024#endif 1037#endif
1025 release_region(RTC_PORT(0), RTC_IO_EXTENT);
1026 misc_deregister(&rtc_dev);
1027 return -ENOMEM;
1028 }
1029 ent->proc_fops = &rtc_proc_fops;
1030 1038
1031#if defined(__alpha__) || defined(__mips__) 1039#if defined(__alpha__) || defined(__mips__)
1032 rtc_freq = HZ; 1040 rtc_freq = HZ;
@@ -1159,6 +1167,7 @@ static void rtc_dropped_irq(unsigned long data)
1159} 1167}
1160#endif 1168#endif
1161 1169
1170#ifdef CONFIG_PROC_FS
1162/* 1171/*
1163 * Info exported via "/proc/driver/rtc". 1172 * Info exported via "/proc/driver/rtc".
1164 */ 1173 */
@@ -1243,6 +1252,7 @@ static int rtc_proc_open(struct inode *inode, struct file *file)
1243{ 1252{
1244 return single_open(file, rtc_proc_show, NULL); 1253 return single_open(file, rtc_proc_show, NULL);
1245} 1254}
1255#endif
1246 1256
1247void rtc_get_rtc_time(struct rtc_time *rtc_tm) 1257void rtc_get_rtc_time(struct rtc_time *rtc_tm)
1248{ 1258{
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index a3008ce13015..1da92a689ae4 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -2498,8 +2498,10 @@ static void __devexit sx_remove_card(struct sx_board *board,
2498 /* It is safe/allowed to del_timer a non-active timer */ 2498 /* It is safe/allowed to del_timer a non-active timer */
2499 del_timer(&board->timer); 2499 del_timer(&board->timer);
2500 if (pdev) { 2500 if (pdev) {
2501#ifdef CONFIG_PCI
2501 pci_iounmap(pdev, board->base); 2502 pci_iounmap(pdev, board->base);
2502 pci_release_region(pdev, IS_CF_BOARD(board) ? 3 : 2); 2503 pci_release_region(pdev, IS_CF_BOARD(board) ? 3 : 2);
2504#endif
2503 } else { 2505 } else {
2504 iounmap(board->base); 2506 iounmap(board->base);
2505 release_region(board->hw_base, board->hw_len); 2507 release_region(board->hw_base, board->hw_len);
@@ -2601,6 +2603,7 @@ static struct eisa_driver sx_eisadriver = {
2601 2603
2602#endif 2604#endif
2603 2605
2606#ifdef CONFIG_PCI
2604 /******************************************************** 2607 /********************************************************
2605 * Setting bit 17 in the CNTRL register of the PLX 9050 * 2608 * Setting bit 17 in the CNTRL register of the PLX 9050 *
2606 * chip forces a retry on writes while a read is pending.* 2609 * chip forces a retry on writes while a read is pending.*
@@ -2632,10 +2635,12 @@ static void __devinit fix_sx_pci(struct pci_dev *pdev, struct sx_board *board)
2632 } 2635 }
2633 iounmap(rebase); 2636 iounmap(rebase);
2634} 2637}
2638#endif
2635 2639
2636static int __devinit sx_pci_probe(struct pci_dev *pdev, 2640static int __devinit sx_pci_probe(struct pci_dev *pdev,
2637 const struct pci_device_id *ent) 2641 const struct pci_device_id *ent)
2638{ 2642{
2643#ifdef CONFIG_PCI
2639 struct sx_board *board; 2644 struct sx_board *board;
2640 unsigned int i, reg; 2645 unsigned int i, reg;
2641 int retval = -EIO; 2646 int retval = -EIO;
@@ -2700,6 +2705,9 @@ err_flag:
2700 board->flags &= ~SX_BOARD_PRESENT; 2705 board->flags &= ~SX_BOARD_PRESENT;
2701err: 2706err:
2702 return retval; 2707 return retval;
2708#else
2709 return -ENODEV;
2710#endif
2703} 2711}
2704 2712
2705static void __devexit sx_pci_remove(struct pci_dev *pdev) 2713static void __devexit sx_pci_remove(struct pci_dev *pdev)
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index acc6fab601cc..3fa625db9e4b 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -4332,7 +4332,7 @@ static struct mgsl_struct* mgsl_allocate_device(void)
4332{ 4332{
4333 struct mgsl_struct *info; 4333 struct mgsl_struct *info;
4334 4334
4335 info = (struct mgsl_struct *)kmalloc(sizeof(struct mgsl_struct), 4335 info = kmalloc(sizeof(struct mgsl_struct),
4336 GFP_KERNEL); 4336 GFP_KERNEL);
4337 4337
4338 if (!info) { 4338 if (!info) {
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 53e8ccf94fe3..8f4d67afe5bf 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -2730,7 +2730,7 @@ static int startup(SLMP_INFO * info)
2730 return 0; 2730 return 0;
2731 2731
2732 if (!info->tx_buf) { 2732 if (!info->tx_buf) {
2733 info->tx_buf = (unsigned char *)kmalloc(info->max_frame_size, GFP_KERNEL); 2733 info->tx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
2734 if (!info->tx_buf) { 2734 if (!info->tx_buf) {
2735 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", 2735 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
2736 __FILE__,__LINE__,info->device_name); 2736 __FILE__,__LINE__,info->device_name);
@@ -3798,7 +3798,7 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
3798{ 3798{
3799 SLMP_INFO *info; 3799 SLMP_INFO *info;
3800 3800
3801 info = (SLMP_INFO *)kmalloc(sizeof(SLMP_INFO), 3801 info = kmalloc(sizeof(SLMP_INFO),
3802 GFP_KERNEL); 3802 GFP_KERNEL);
3803 3803
3804 if (!info) { 3804 if (!info) {
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 05810c8d20bc..13935235e066 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -41,7 +41,34 @@
41#include <asm/irq_regs.h> 41#include <asm/irq_regs.h>
42 42
43/* Whether we react on sysrq keys or just ignore them */ 43/* Whether we react on sysrq keys or just ignore them */
44int sysrq_enabled = 1; 44int __read_mostly __sysrq_enabled = 1;
45
46static int __read_mostly sysrq_always_enabled;
47
48int sysrq_on(void)
49{
50 return __sysrq_enabled || sysrq_always_enabled;
51}
52
53/*
54 * A value of 1 means 'all', other nonzero values are an op mask:
55 */
56static inline int sysrq_on_mask(int mask)
57{
58 return sysrq_always_enabled || __sysrq_enabled == 1 ||
59 (__sysrq_enabled & mask);
60}
61
62static int __init sysrq_always_enabled_setup(char *str)
63{
64 sysrq_always_enabled = 1;
65 printk(KERN_INFO "debug: sysrq always enabled.\n");
66
67 return 1;
68}
69
70__setup("sysrq_always_enabled", sysrq_always_enabled_setup);
71
45 72
46static void sysrq_handle_loglevel(int key, struct tty_struct *tty) 73static void sysrq_handle_loglevel(int key, struct tty_struct *tty)
47{ 74{
@@ -379,8 +406,7 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
379 * Should we check for enabled operations (/proc/sysrq-trigger 406 * Should we check for enabled operations (/proc/sysrq-trigger
380 * should not) and is the invoked operation enabled? 407 * should not) and is the invoked operation enabled?
381 */ 408 */
382 if (!check_mask || sysrq_enabled == 1 || 409 if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
383 (sysrq_enabled & op_p->enable_mask)) {
384 printk("%s\n", op_p->action_msg); 410 printk("%s\n", op_p->action_msg);
385 console_loglevel = orig_log_level; 411 console_loglevel = orig_log_level;
386 op_p->handler(key, tty); 412 op_p->handler(key, tty);
@@ -414,9 +440,8 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
414 */ 440 */
415void handle_sysrq(int key, struct tty_struct *tty) 441void handle_sysrq(int key, struct tty_struct *tty)
416{ 442{
417 if (!sysrq_enabled) 443 if (sysrq_on())
418 return; 444 __handle_sysrq(key, tty, 1);
419 __handle_sysrq(key, tty, 1);
420} 445}
421EXPORT_SYMBOL(handle_sysrq); 446EXPORT_SYMBOL(handle_sysrq);
422 447
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 4044c864fdd4..47a6eacb10bc 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -3335,18 +3335,13 @@ static void __do_SAK(struct work_struct *work)
3335 int session; 3335 int session;
3336 int i; 3336 int i;
3337 struct file *filp; 3337 struct file *filp;
3338 struct tty_ldisc *disc;
3339 struct fdtable *fdt; 3338 struct fdtable *fdt;
3340 3339
3341 if (!tty) 3340 if (!tty)
3342 return; 3341 return;
3343 session = tty->session; 3342 session = tty->session;
3344 3343
3345 /* We don't want an ldisc switch during this */ 3344 tty_ldisc_flush(tty);
3346 disc = tty_ldisc_ref(tty);
3347 if (disc && disc->flush_buffer)
3348 disc->flush_buffer(tty);
3349 tty_ldisc_deref(disc);
3350 3345
3351 if (tty->driver->flush_buffer) 3346 if (tty->driver->flush_buffer)
3352 tty->driver->flush_buffer(tty); 3347 tty->driver->flush_buffer(tty);
@@ -3821,6 +3816,7 @@ struct tty_struct *get_current_tty(void)
3821 barrier(); 3816 barrier();
3822 return tty; 3817 return tty;
3823} 3818}
3819EXPORT_SYMBOL_GPL(get_current_tty);
3824 3820
3825/* 3821/*
3826 * Initialize the console device. This is called *early*, so 3822 * Initialize the console device. This is called *early*, so
diff --git a/drivers/char/viocons.c b/drivers/char/viocons.c
index 6d2e314860df..0e0da443cbd5 100644
--- a/drivers/char/viocons.c
+++ b/drivers/char/viocons.c
@@ -61,10 +61,7 @@
61static DEFINE_SPINLOCK(consolelock); 61static DEFINE_SPINLOCK(consolelock);
62static DEFINE_SPINLOCK(consoleloglock); 62static DEFINE_SPINLOCK(consoleloglock);
63 63
64#ifdef CONFIG_MAGIC_SYSRQ
65static int vio_sysrq_pressed; 64static int vio_sysrq_pressed;
66extern int sysrq_enabled;
67#endif
68 65
69#define VIOCHAR_NUM_BUF 16 66#define VIOCHAR_NUM_BUF 16
70 67
@@ -936,8 +933,10 @@ static void vioHandleData(struct HvLpEvent *event)
936 */ 933 */
937 num_pushed = 0; 934 num_pushed = 0;
938 for (index = 0; index < cevent->len; index++) { 935 for (index = 0; index < cevent->len; index++) {
939#ifdef CONFIG_MAGIC_SYSRQ 936 /*
940 if (sysrq_enabled) { 937 * Will be optimized away if !CONFIG_MAGIC_SYSRQ:
938 */
939 if (sysrq_on()) {
941 /* 0x0f is the ascii character for ^O */ 940 /* 0x0f is the ascii character for ^O */
942 if (cevent->data[index] == '\x0f') { 941 if (cevent->data[index] == '\x0f') {
943 vio_sysrq_pressed = 1; 942 vio_sysrq_pressed = 1;
@@ -956,7 +955,6 @@ static void vioHandleData(struct HvLpEvent *event)
956 continue; 955 continue;
957 } 956 }
958 } 957 }
959#endif
960 /* 958 /*
961 * The sysrq sequence isn't included in this check if 959 * The sysrq sequence isn't included in this check if
962 * sysrq is enabled and compiled into the kernel because 960 * sysrq is enabled and compiled into the kernel because
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index a8239dac994f..06c32a3e3ca4 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -784,7 +784,7 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
784 if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) 784 if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
785 return 0; 785 return 0;
786 786
787 newscreen = (unsigned short *) kmalloc(new_screen_size, GFP_USER); 787 newscreen = kmalloc(new_screen_size, GFP_USER);
788 if (!newscreen) 788 if (!newscreen)
789 return -ENOMEM; 789 return -ENOMEM;
790 790
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index ac5d60edbafa..dc8368ebb1ac 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -129,7 +129,7 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
129 !capable(CAP_SYS_RESOURCE)) 129 !capable(CAP_SYS_RESOURCE))
130 return -EPERM; 130 return -EPERM;
131 131
132 key_map = (ushort *) kmalloc(sizeof(plain_map), 132 key_map = kmalloc(sizeof(plain_map),
133 GFP_KERNEL); 133 GFP_KERNEL);
134 if (!key_map) 134 if (!key_map)
135 return -ENOMEM; 135 return -ENOMEM;
@@ -259,7 +259,7 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
259 sz = 256; 259 sz = 256;
260 while (sz < funcbufsize - funcbufleft + delta) 260 while (sz < funcbufsize - funcbufleft + delta)
261 sz <<= 1; 261 sz <<= 1;
262 fnw = (char *) kmalloc(sz, GFP_KERNEL); 262 fnw = kmalloc(sz, GFP_KERNEL);
263 if(!fnw) { 263 if(!fnw) {
264 ret = -ENOMEM; 264 ret = -ENOMEM;
265 goto reterr; 265 goto reterr;
@@ -1087,7 +1087,7 @@ static void complete_change_console(struct vc_data *vc)
1087 switch_screen(vc); 1087 switch_screen(vc);
1088 1088
1089 /* 1089 /*
1090 * This can't appear below a successful kill_proc(). If it did, 1090 * This can't appear below a successful kill_pid(). If it did,
1091 * then the *blank_screen operation could occur while X, having 1091 * then the *blank_screen operation could occur while X, having
1092 * received acqsig, is waking up on another processor. This 1092 * received acqsig, is waking up on another processor. This
1093 * condition can lead to overlapping accesses to the VGA range 1093 * condition can lead to overlapping accesses to the VGA range
@@ -1110,7 +1110,7 @@ static void complete_change_console(struct vc_data *vc)
1110 */ 1110 */
1111 if (vc->vt_mode.mode == VT_PROCESS) { 1111 if (vc->vt_mode.mode == VT_PROCESS) {
1112 /* 1112 /*
1113 * Send the signal as privileged - kill_proc() will 1113 * Send the signal as privileged - kill_pid() will
1114 * tell us if the process has gone or something else 1114 * tell us if the process has gone or something else
1115 * is awry 1115 * is awry
1116 */ 1116 */
@@ -1170,7 +1170,7 @@ void change_console(struct vc_data *new_vc)
1170 vc = vc_cons[fg_console].d; 1170 vc = vc_cons[fg_console].d;
1171 if (vc->vt_mode.mode == VT_PROCESS) { 1171 if (vc->vt_mode.mode == VT_PROCESS) {
1172 /* 1172 /*
1173 * Send the signal as privileged - kill_proc() will 1173 * Send the signal as privileged - kill_pid() will
1174 * tell us if the process has gone or something else 1174 * tell us if the process has gone or something else
1175 * is awry 1175 * is awry
1176 */ 1176 */
diff --git a/drivers/char/watchdog/at91rm9200_wdt.c b/drivers/char/watchdog/at91rm9200_wdt.c
index cb86967e2c5f..38bd37372599 100644
--- a/drivers/char/watchdog/at91rm9200_wdt.c
+++ b/drivers/char/watchdog/at91rm9200_wdt.c
@@ -203,9 +203,9 @@ static int __init at91wdt_probe(struct platform_device *pdev)
203{ 203{
204 int res; 204 int res;
205 205
206 if (at91wdt_miscdev.dev) 206 if (at91wdt_miscdev.parent)
207 return -EBUSY; 207 return -EBUSY;
208 at91wdt_miscdev.dev = &pdev->dev; 208 at91wdt_miscdev.parent = &pdev->dev;
209 209
210 res = misc_register(&at91wdt_miscdev); 210 res = misc_register(&at91wdt_miscdev);
211 if (res) 211 if (res)
@@ -221,7 +221,7 @@ static int __exit at91wdt_remove(struct platform_device *pdev)
221 221
222 res = misc_deregister(&at91wdt_miscdev); 222 res = misc_deregister(&at91wdt_miscdev);
223 if (!res) 223 if (!res)
224 at91wdt_miscdev.dev = NULL; 224 at91wdt_miscdev.parent = NULL;
225 225
226 return res; 226 return res;
227} 227}
diff --git a/drivers/char/watchdog/mpcore_wdt.c b/drivers/char/watchdog/mpcore_wdt.c
index 3404a9c67f08..e88947f8fe53 100644
--- a/drivers/char/watchdog/mpcore_wdt.c
+++ b/drivers/char/watchdog/mpcore_wdt.c
@@ -347,7 +347,7 @@ static int __devinit mpcore_wdt_probe(struct platform_device *dev)
347 goto err_free; 347 goto err_free;
348 } 348 }
349 349
350 mpcore_wdt_miscdev.dev = &dev->dev; 350 mpcore_wdt_miscdev.parent = &dev->dev;
351 ret = misc_register(&mpcore_wdt_miscdev); 351 ret = misc_register(&mpcore_wdt_miscdev);
352 if (ret) { 352 if (ret) {
353 dev_printk(KERN_ERR, _dev, "cannot register miscdev on minor=%d (err=%d)\n", 353 dev_printk(KERN_ERR, _dev, "cannot register miscdev on minor=%d (err=%d)\n",
diff --git a/drivers/char/watchdog/omap_wdt.c b/drivers/char/watchdog/omap_wdt.c
index 5dbd7dc2936f..6c6f97332dbb 100644
--- a/drivers/char/watchdog/omap_wdt.c
+++ b/drivers/char/watchdog/omap_wdt.c
@@ -290,7 +290,7 @@ static int __init omap_wdt_probe(struct platform_device *pdev)
290 omap_wdt_disable(); 290 omap_wdt_disable();
291 omap_wdt_adjust_timeout(timer_margin); 291 omap_wdt_adjust_timeout(timer_margin);
292 292
293 omap_wdt_miscdev.dev = &pdev->dev; 293 omap_wdt_miscdev.parent = &pdev->dev;
294 ret = misc_register(&omap_wdt_miscdev); 294 ret = misc_register(&omap_wdt_miscdev);
295 if (ret) 295 if (ret)
296 goto fail; 296 goto fail;
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c
index 61138726b501..2da5ac99687c 100644
--- a/drivers/char/watchdog/pcwd_usb.c
+++ b/drivers/char/watchdog/pcwd_usb.c
@@ -42,6 +42,7 @@
42#include <asm/uaccess.h> 42#include <asm/uaccess.h>
43#include <linux/usb.h> 43#include <linux/usb.h>
44#include <linux/mutex.h> 44#include <linux/mutex.h>
45#include <linux/hid.h> /* For HID_REQ_SET_REPORT & HID_DT_REPORT */
45 46
46 47
47#ifdef CONFIG_USB_DEBUG 48#ifdef CONFIG_USB_DEBUG
@@ -109,10 +110,6 @@ MODULE_DEVICE_TABLE (usb, usb_pcwd_table);
109#define CMD_ENABLE_WATCHDOG 0x30 /* Enable / Disable Watchdog */ 110#define CMD_ENABLE_WATCHDOG 0x30 /* Enable / Disable Watchdog */
110#define CMD_DISABLE_WATCHDOG CMD_ENABLE_WATCHDOG 111#define CMD_DISABLE_WATCHDOG CMD_ENABLE_WATCHDOG
111 112
112/* Some defines that I like to be somewhere else like include/linux/usb_hid.h */
113#define HID_REQ_SET_REPORT 0x09
114#define HID_DT_REPORT (USB_TYPE_CLASS | 0x02)
115
116/* We can only use 1 card due to the /dev/watchdog restriction */ 113/* We can only use 1 card due to the /dev/watchdog restriction */
117static int cards_found; 114static int cards_found;
118 115
diff --git a/drivers/char/watchdog/rm9k_wdt.c b/drivers/char/watchdog/rm9k_wdt.c
index ec3909371c21..7576a13e86bc 100644
--- a/drivers/char/watchdog/rm9k_wdt.c
+++ b/drivers/char/watchdog/rm9k_wdt.c
@@ -47,7 +47,7 @@
47 47
48 48
49/* Function prototypes */ 49/* Function prototypes */
50static irqreturn_t wdt_gpi_irqhdl(int, void *, struct pt_regs *); 50static irqreturn_t wdt_gpi_irqhdl(int, void *);
51static void wdt_gpi_start(void); 51static void wdt_gpi_start(void);
52static void wdt_gpi_stop(void); 52static void wdt_gpi_stop(void);
53static void wdt_gpi_set_timeout(unsigned int); 53static void wdt_gpi_set_timeout(unsigned int);
@@ -94,8 +94,28 @@ module_param(nowayout, bool, 0444);
94MODULE_PARM_DESC(nowayout, "Watchdog cannot be disabled once started"); 94MODULE_PARM_DESC(nowayout, "Watchdog cannot be disabled once started");
95 95
96 96
97/* Kernel interfaces */
98static struct file_operations fops = {
99 .owner = THIS_MODULE,
100 .open = wdt_gpi_open,
101 .release = wdt_gpi_release,
102 .write = wdt_gpi_write,
103 .unlocked_ioctl = wdt_gpi_ioctl,
104};
105
106static struct miscdevice miscdev = {
107 .minor = WATCHDOG_MINOR,
108 .name = wdt_gpi_name,
109 .fops = &fops,
110};
111
112static struct notifier_block wdt_gpi_shutdown = {
113 .notifier_call = wdt_gpi_notify,
114};
115
116
97/* Interrupt handler */ 117/* Interrupt handler */
98static irqreturn_t wdt_gpi_irqhdl(int irq, void *ctxt, struct pt_regs *regs) 118static irqreturn_t wdt_gpi_irqhdl(int irq, void *ctxt)
99{ 119{
100 if (!unlikely(__raw_readl(wd_regs + 0x0008) & 0x1)) 120 if (!unlikely(__raw_readl(wd_regs + 0x0008) & 0x1))
101 return IRQ_NONE; 121 return IRQ_NONE;
@@ -312,26 +332,6 @@ wdt_gpi_notify(struct notifier_block *this, unsigned long code, void *unused)
312} 332}
313 333
314 334
315/* Kernel interfaces */
316static struct file_operations fops = {
317 .owner = THIS_MODULE,
318 .open = wdt_gpi_open,
319 .release = wdt_gpi_release,
320 .write = wdt_gpi_write,
321 .unlocked_ioctl = wdt_gpi_ioctl,
322};
323
324static struct miscdevice miscdev = {
325 .minor = WATCHDOG_MINOR,
326 .name = wdt_gpi_name,
327 .fops = &fops,
328};
329
330static struct notifier_block wdt_gpi_shutdown = {
331 .notifier_call = wdt_gpi_notify,
332};
333
334
335/* Init & exit procedures */ 335/* Init & exit procedures */
336static const struct resource * 336static const struct resource *
337wdt_gpi_get_resource(struct platform_device *pdv, const char *name, 337wdt_gpi_get_resource(struct platform_device *pdv, const char *name,
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 8ab61ef97b4c..b6bcdbbf57b3 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -77,11 +77,11 @@ static struct clocksource clocksource_acpi_pm = {
77 77
78 78
79#ifdef CONFIG_PCI 79#ifdef CONFIG_PCI
80static int acpi_pm_good; 80static int __devinitdata acpi_pm_good;
81static int __init acpi_pm_good_setup(char *__str) 81static int __init acpi_pm_good_setup(char *__str)
82{ 82{
83 acpi_pm_good = 1; 83 acpi_pm_good = 1;
84 return 1; 84 return 1;
85} 85}
86__setup("acpi_pm_good", acpi_pm_good_setup); 86__setup("acpi_pm_good", acpi_pm_good_setup);
87 87
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 47ab42db122a..9fb2edf36611 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -29,7 +29,8 @@
29#include <linux/completion.h> 29#include <linux/completion.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31 31
32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg) 32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
33 "cpufreq-core", msg)
33 34
34/** 35/**
35 * The "cpufreq driver" - the arch- or hardware-dependent low 36 * The "cpufreq driver" - the arch- or hardware-dependent low
@@ -151,7 +152,8 @@ static void cpufreq_debug_disable_ratelimit(void)
151 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 152 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
152} 153}
153 154
154void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt, ...) 155void cpufreq_debug_printk(unsigned int type, const char *prefix,
156 const char *fmt, ...)
155{ 157{
156 char s[256]; 158 char s[256];
157 va_list args; 159 va_list args;
@@ -161,7 +163,8 @@ void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt
161 WARN_ON(!prefix); 163 WARN_ON(!prefix);
162 if (type & debug) { 164 if (type & debug) {
163 spin_lock_irqsave(&disable_ratelimit_lock, flags); 165 spin_lock_irqsave(&disable_ratelimit_lock, flags);
164 if (!disable_ratelimit && debug_ratelimit && !printk_ratelimit()) { 166 if (!disable_ratelimit && debug_ratelimit
167 && !printk_ratelimit()) {
165 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 168 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
166 return; 169 return;
167 } 170 }
@@ -182,10 +185,12 @@ EXPORT_SYMBOL(cpufreq_debug_printk);
182 185
183 186
184module_param(debug, uint, 0644); 187module_param(debug, uint, 0644);
185MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core, 2 to debug drivers, and 4 to debug governors."); 188MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
189 " 2 to debug drivers, and 4 to debug governors.");
186 190
187module_param(debug_ratelimit, uint, 0644); 191module_param(debug_ratelimit, uint, 0644);
188MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging: set to 0 to disable ratelimiting."); 192MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
193 " set to 0 to disable ratelimiting.");
189 194
190#else /* !CONFIG_CPU_FREQ_DEBUG */ 195#else /* !CONFIG_CPU_FREQ_DEBUG */
191 196
@@ -219,17 +224,23 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
219 if (!l_p_j_ref_freq) { 224 if (!l_p_j_ref_freq) {
220 l_p_j_ref = loops_per_jiffy; 225 l_p_j_ref = loops_per_jiffy;
221 l_p_j_ref_freq = ci->old; 226 l_p_j_ref_freq = ci->old;
222 dprintk("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); 227 dprintk("saving %lu as reference value for loops_per_jiffy;"
228 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
223 } 229 }
224 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || 230 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
225 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) || 231 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
226 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { 232 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
227 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); 233 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
228 dprintk("scaling loops_per_jiffy to %lu for frequency %u kHz\n", loops_per_jiffy, ci->new); 234 ci->new);
235 dprintk("scaling loops_per_jiffy to %lu"
236 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
229 } 237 }
230} 238}
231#else 239#else
232static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { return; } 240static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
241{
242 return;
243}
233#endif 244#endif
234 245
235 246
@@ -316,7 +327,8 @@ static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
316 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 327 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
317 *policy = CPUFREQ_POLICY_PERFORMANCE; 328 *policy = CPUFREQ_POLICY_PERFORMANCE;
318 err = 0; 329 err = 0;
319 } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { 330 } else if (!strnicmp(str_governor, "powersave",
331 CPUFREQ_NAME_LEN)) {
320 *policy = CPUFREQ_POLICY_POWERSAVE; 332 *policy = CPUFREQ_POLICY_POWERSAVE;
321 err = 0; 333 err = 0;
322 } 334 }
@@ -328,7 +340,8 @@ static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
328 t = __find_governor(str_governor); 340 t = __find_governor(str_governor);
329 341
330 if (t == NULL) { 342 if (t == NULL) {
331 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", str_governor); 343 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
344 str_governor);
332 345
333 if (name) { 346 if (name) {
334 int ret; 347 int ret;
@@ -361,7 +374,8 @@ extern struct sysdev_class cpu_sysdev_class;
361 374
362 375
363/** 376/**
364 * cpufreq_per_cpu_attr_read() / show_##file_name() - print out cpufreq information 377 * cpufreq_per_cpu_attr_read() / show_##file_name() -
378 * print out cpufreq information
365 * 379 *
366 * Write out information from cpufreq_driver->policy[cpu]; object must be 380 * Write out information from cpufreq_driver->policy[cpu]; object must be
367 * "unsigned int". 381 * "unsigned int".
@@ -380,7 +394,8 @@ show_one(scaling_min_freq, min);
380show_one(scaling_max_freq, max); 394show_one(scaling_max_freq, max);
381show_one(scaling_cur_freq, cur); 395show_one(scaling_cur_freq, cur);
382 396
383static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy); 397static int __cpufreq_set_policy(struct cpufreq_policy *data,
398 struct cpufreq_policy *policy);
384 399
385/** 400/**
386 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 401 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
@@ -416,7 +431,8 @@ store_one(scaling_max_freq,max);
416/** 431/**
417 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 432 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
418 */ 433 */
419static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf) 434static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy,
435 char *buf)
420{ 436{
421 unsigned int cur_freq = cpufreq_get(policy->cpu); 437 unsigned int cur_freq = cpufreq_get(policy->cpu);
422 if (!cur_freq) 438 if (!cur_freq)
@@ -428,7 +444,8 @@ static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf)
428/** 444/**
429 * show_scaling_governor - show the current policy for the specified CPU 445 * show_scaling_governor - show the current policy for the specified CPU
430 */ 446 */
431static ssize_t show_scaling_governor (struct cpufreq_policy * policy, char *buf) 447static ssize_t show_scaling_governor (struct cpufreq_policy * policy,
448 char *buf)
432{ 449{
433 if(policy->policy == CPUFREQ_POLICY_POWERSAVE) 450 if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
434 return sprintf(buf, "powersave\n"); 451 return sprintf(buf, "powersave\n");
@@ -458,7 +475,8 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
458 if (ret != 1) 475 if (ret != 1)
459 return -EINVAL; 476 return -EINVAL;
460 477
461 if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor)) 478 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
479 &new_policy.governor))
462 return -EINVAL; 480 return -EINVAL;
463 481
464 lock_cpu_hotplug(); 482 lock_cpu_hotplug();
@@ -474,7 +492,10 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
474 492
475 unlock_cpu_hotplug(); 493 unlock_cpu_hotplug();
476 494
477 return ret ? ret : count; 495 if (ret)
496 return ret;
497 else
498 return count;
478} 499}
479 500
480/** 501/**
@@ -488,7 +509,7 @@ static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
488/** 509/**
489 * show_scaling_available_governors - show the available CPUfreq governors 510 * show_scaling_available_governors - show the available CPUfreq governors
490 */ 511 */
491static ssize_t show_scaling_available_governors (struct cpufreq_policy * policy, 512static ssize_t show_scaling_available_governors (struct cpufreq_policy *policy,
492 char *buf) 513 char *buf)
493{ 514{
494 ssize_t i = 0; 515 ssize_t i = 0;
@@ -574,7 +595,11 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
574 policy = cpufreq_cpu_get(policy->cpu); 595 policy = cpufreq_cpu_get(policy->cpu);
575 if (!policy) 596 if (!policy)
576 return -EINVAL; 597 return -EINVAL;
577 ret = fattr->show ? fattr->show(policy,buf) : -EIO; 598 if (fattr->show)
599 ret = fattr->show(policy, buf);
600 else
601 ret = -EIO;
602
578 cpufreq_cpu_put(policy); 603 cpufreq_cpu_put(policy);
579 return ret; 604 return ret;
580} 605}
@@ -588,7 +613,11 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr,
588 policy = cpufreq_cpu_get(policy->cpu); 613 policy = cpufreq_cpu_get(policy->cpu);
589 if (!policy) 614 if (!policy)
590 return -EINVAL; 615 return -EINVAL;
591 ret = fattr->store ? fattr->store(policy,buf,count) : -EIO; 616 if (fattr->store)
617 ret = fattr->store(policy, buf, count);
618 else
619 ret = -EIO;
620
592 cpufreq_cpu_put(policy); 621 cpufreq_cpu_put(policy);
593 return ret; 622 return ret;
594} 623}
@@ -913,7 +942,8 @@ static void handle_update(struct work_struct *work)
913 * We adjust to current frequency first, and need to clean up later. So either call 942 * We adjust to current frequency first, and need to clean up later. So either call
914 * to cpufreq_update_policy() or schedule handle_update()). 943 * to cpufreq_update_policy() or schedule handle_update()).
915 */ 944 */
916static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigned int new_freq) 945static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
946 unsigned int new_freq)
917{ 947{
918 struct cpufreq_freqs freqs; 948 struct cpufreq_freqs freqs;
919 949
@@ -938,16 +968,16 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigne
938unsigned int cpufreq_quick_get(unsigned int cpu) 968unsigned int cpufreq_quick_get(unsigned int cpu)
939{ 969{
940 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 970 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
941 unsigned int ret = 0; 971 unsigned int ret_freq = 0;
942 972
943 if (policy) { 973 if (policy) {
944 mutex_lock(&policy->lock); 974 mutex_lock(&policy->lock);
945 ret = policy->cur; 975 ret_freq = policy->cur;
946 mutex_unlock(&policy->lock); 976 mutex_unlock(&policy->lock);
947 cpufreq_cpu_put(policy); 977 cpufreq_cpu_put(policy);
948 } 978 }
949 979
950 return (ret); 980 return (ret_freq);
951} 981}
952EXPORT_SYMBOL(cpufreq_quick_get); 982EXPORT_SYMBOL(cpufreq_quick_get);
953 983
@@ -961,7 +991,7 @@ EXPORT_SYMBOL(cpufreq_quick_get);
961unsigned int cpufreq_get(unsigned int cpu) 991unsigned int cpufreq_get(unsigned int cpu)
962{ 992{
963 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 993 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
964 unsigned int ret = 0; 994 unsigned int ret_freq = 0;
965 995
966 if (!policy) 996 if (!policy)
967 return 0; 997 return 0;
@@ -971,12 +1001,14 @@ unsigned int cpufreq_get(unsigned int cpu)
971 1001
972 mutex_lock(&policy->lock); 1002 mutex_lock(&policy->lock);
973 1003
974 ret = cpufreq_driver->get(cpu); 1004 ret_freq = cpufreq_driver->get(cpu);
975 1005
976 if (ret && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1006 if (ret_freq && policy->cur &&
977 /* verify no discrepancy between actual and saved value exists */ 1007 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
978 if (unlikely(ret != policy->cur)) { 1008 /* verify no discrepancy between actual and
979 cpufreq_out_of_sync(cpu, policy->cur, ret); 1009 saved value exists */
1010 if (unlikely(ret_freq != policy->cur)) {
1011 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
980 schedule_work(&policy->update); 1012 schedule_work(&policy->update);
981 } 1013 }
982 } 1014 }
@@ -986,7 +1018,7 @@ unsigned int cpufreq_get(unsigned int cpu)
986out: 1018out:
987 cpufreq_cpu_put(policy); 1019 cpufreq_cpu_put(policy);
988 1020
989 return (ret); 1021 return (ret_freq);
990} 1022}
991EXPORT_SYMBOL(cpufreq_get); 1023EXPORT_SYMBOL(cpufreq_get);
992 1024
@@ -998,7 +1030,7 @@ EXPORT_SYMBOL(cpufreq_get);
998static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg) 1030static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
999{ 1031{
1000 int cpu = sysdev->id; 1032 int cpu = sysdev->id;
1001 unsigned int ret = 0; 1033 int ret = 0;
1002 unsigned int cur_freq = 0; 1034 unsigned int cur_freq = 0;
1003 struct cpufreq_policy *cpu_policy; 1035 struct cpufreq_policy *cpu_policy;
1004 1036
@@ -1080,7 +1112,7 @@ out:
1080static int cpufreq_resume(struct sys_device * sysdev) 1112static int cpufreq_resume(struct sys_device * sysdev)
1081{ 1113{
1082 int cpu = sysdev->id; 1114 int cpu = sysdev->id;
1083 unsigned int ret = 0; 1115 int ret = 0;
1084 struct cpufreq_policy *cpu_policy; 1116 struct cpufreq_policy *cpu_policy;
1085 1117
1086 dprintk("resuming cpu %u\n", cpu); 1118 dprintk("resuming cpu %u\n", cpu);
@@ -1276,22 +1308,45 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
1276} 1308}
1277EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1309EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1278 1310
1311int cpufreq_driver_getavg(struct cpufreq_policy *policy)
1312{
1313 int ret = 0;
1314
1315 policy = cpufreq_cpu_get(policy->cpu);
1316 if (!policy)
1317 return -EINVAL;
1318
1319 mutex_lock(&policy->lock);
1320
1321 if (cpu_online(policy->cpu) && cpufreq_driver->getavg)
1322 ret = cpufreq_driver->getavg(policy->cpu);
1323
1324 mutex_unlock(&policy->lock);
1325
1326 cpufreq_cpu_put(policy);
1327 return ret;
1328}
1329EXPORT_SYMBOL_GPL(cpufreq_driver_getavg);
1330
1279/* 1331/*
1280 * Locking: Must be called with the lock_cpu_hotplug() lock held 1332 * Locking: Must be called with the lock_cpu_hotplug() lock held
1281 * when "event" is CPUFREQ_GOV_LIMITS 1333 * when "event" is CPUFREQ_GOV_LIMITS
1282 */ 1334 */
1283 1335
1284static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) 1336static int __cpufreq_governor(struct cpufreq_policy *policy,
1337 unsigned int event)
1285{ 1338{
1286 int ret; 1339 int ret;
1287 1340
1288 if (!try_module_get(policy->governor->owner)) 1341 if (!try_module_get(policy->governor->owner))
1289 return -EINVAL; 1342 return -EINVAL;
1290 1343
1291 dprintk("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event); 1344 dprintk("__cpufreq_governor for CPU %u, event %u\n",
1345 policy->cpu, event);
1292 ret = policy->governor->governor(policy, event); 1346 ret = policy->governor->governor(policy, event);
1293 1347
1294 /* we keep one module reference alive for each CPU governed by this CPU */ 1348 /* we keep one module reference alive for
1349 each CPU governed by this CPU */
1295 if ((event != CPUFREQ_GOV_START) || ret) 1350 if ((event != CPUFREQ_GOV_START) || ret)
1296 module_put(policy->governor->owner); 1351 module_put(policy->governor->owner);
1297 if ((event == CPUFREQ_GOV_STOP) && !ret) 1352 if ((event == CPUFREQ_GOV_STOP) && !ret)
@@ -1367,9 +1422,12 @@ EXPORT_SYMBOL(cpufreq_get_policy);
1367 1422
1368 1423
1369/* 1424/*
1425 * data : current policy.
1426 * policy : policy to be set.
1370 * Locking: Must be called with the lock_cpu_hotplug() lock held 1427 * Locking: Must be called with the lock_cpu_hotplug() lock held
1371 */ 1428 */
1372static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) 1429static int __cpufreq_set_policy(struct cpufreq_policy *data,
1430 struct cpufreq_policy *policy)
1373{ 1431{
1374 int ret = 0; 1432 int ret = 0;
1375 1433
@@ -1377,7 +1435,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
1377 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, 1435 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1378 policy->min, policy->max); 1436 policy->min, policy->max);
1379 1437
1380 memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo)); 1438 memcpy(&policy->cpuinfo, &data->cpuinfo,
1439 sizeof(struct cpufreq_cpuinfo));
1381 1440
1382 if (policy->min > data->min && policy->min > policy->max) { 1441 if (policy->min > data->min && policy->min > policy->max) {
1383 ret = -EINVAL; 1442 ret = -EINVAL;
@@ -1410,7 +1469,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
1410 data->min = policy->min; 1469 data->min = policy->min;
1411 data->max = policy->max; 1470 data->max = policy->max;
1412 1471
1413 dprintk("new min and max freqs are %u - %u kHz\n", data->min, data->max); 1472 dprintk("new min and max freqs are %u - %u kHz\n",
1473 data->min, data->max);
1414 1474
1415 if (cpufreq_driver->setpolicy) { 1475 if (cpufreq_driver->setpolicy) {
1416 data->policy = policy->policy; 1476 data->policy = policy->policy;
@@ -1431,10 +1491,12 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
1431 data->governor = policy->governor; 1491 data->governor = policy->governor;
1432 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { 1492 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1433 /* new governor failed, so re-start old one */ 1493 /* new governor failed, so re-start old one */
1434 dprintk("starting governor %s failed\n", data->governor->name); 1494 dprintk("starting governor %s failed\n",
1495 data->governor->name);
1435 if (old_gov) { 1496 if (old_gov) {
1436 data->governor = old_gov; 1497 data->governor = old_gov;
1437 __cpufreq_governor(data, CPUFREQ_GOV_START); 1498 __cpufreq_governor(data,
1499 CPUFREQ_GOV_START);
1438 } 1500 }
1439 ret = -EINVAL; 1501 ret = -EINVAL;
1440 goto error_out; 1502 goto error_out;
@@ -1524,7 +1586,8 @@ int cpufreq_update_policy(unsigned int cpu)
1524 data->cur = policy.cur; 1586 data->cur = policy.cur;
1525 } else { 1587 } else {
1526 if (data->cur != policy.cur) 1588 if (data->cur != policy.cur)
1527 cpufreq_out_of_sync(cpu, data->cur, policy.cur); 1589 cpufreq_out_of_sync(cpu, data->cur,
1590 policy.cur);
1528 } 1591 }
1529 } 1592 }
1530 1593
@@ -1626,8 +1689,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1626 1689
1627 /* if all ->init() calls failed, unregister */ 1690 /* if all ->init() calls failed, unregister */
1628 if (ret) { 1691 if (ret) {
1629 dprintk("no CPU initialized for driver %s\n", driver_data->name); 1692 dprintk("no CPU initialized for driver %s\n",
1630 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); 1693 driver_data->name);
1694 sysdev_driver_unregister(&cpu_sysdev_class,
1695 &cpufreq_sysdev_driver);
1631 1696
1632 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1697 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1633 cpufreq_driver = NULL; 1698 cpufreq_driver = NULL;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 5ef5ede5b884..eef0270c6f3d 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -44,15 +44,17 @@
44 * latency of the processor. The governor will work on any processor with 44 * latency of the processor. The governor will work on any processor with
45 * transition latency <= 10mS, using appropriate sampling 45 * transition latency <= 10mS, using appropriate sampling
46 * rate. 46 * rate.
47 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) 47 * For CPUs with transition latency > 10mS (mostly drivers
48 * this governor will not work. 48 * with CPUFREQ_ETERNAL), this governor will not work.
49 * All times here are in uS. 49 * All times here are in uS.
50 */ 50 */
51static unsigned int def_sampling_rate; 51static unsigned int def_sampling_rate;
52#define MIN_SAMPLING_RATE_RATIO (2) 52#define MIN_SAMPLING_RATE_RATIO (2)
53/* for correct statistics, we need at least 10 ticks between each measure */ 53/* for correct statistics, we need at least 10 ticks between each measure */
54#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 54#define MIN_STAT_SAMPLING_RATE \
55#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 55 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
56#define MIN_SAMPLING_RATE \
57 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
56#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 58#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
57#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 59#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
58#define DEF_SAMPLING_DOWN_FACTOR (1) 60#define DEF_SAMPLING_DOWN_FACTOR (1)
@@ -103,11 +105,16 @@ static struct dbs_tuners dbs_tuners_ins = {
103 105
104static inline unsigned int get_cpu_idle_time(unsigned int cpu) 106static inline unsigned int get_cpu_idle_time(unsigned int cpu)
105{ 107{
106 return kstat_cpu(cpu).cpustat.idle + 108 unsigned int add_nice = 0, ret;
109
110 if (dbs_tuners_ins.ignore_nice)
111 add_nice = kstat_cpu(cpu).cpustat.nice;
112
113 ret = kstat_cpu(cpu).cpustat.idle +
107 kstat_cpu(cpu).cpustat.iowait + 114 kstat_cpu(cpu).cpustat.iowait +
108 ( dbs_tuners_ins.ignore_nice ? 115 add_nice;
109 kstat_cpu(cpu).cpustat.nice : 116
110 0); 117 return ret;
111} 118}
112 119
113/************************** sysfs interface ************************/ 120/************************** sysfs interface ************************/
@@ -452,6 +459,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
452 unsigned int cpu = policy->cpu; 459 unsigned int cpu = policy->cpu;
453 struct cpu_dbs_info_s *this_dbs_info; 460 struct cpu_dbs_info_s *this_dbs_info;
454 unsigned int j; 461 unsigned int j;
462 int rc;
455 463
456 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 464 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
457 465
@@ -468,6 +476,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
468 break; 476 break;
469 477
470 mutex_lock(&dbs_mutex); 478 mutex_lock(&dbs_mutex);
479
480 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
481 if (rc) {
482 mutex_unlock(&dbs_mutex);
483 return rc;
484 }
485
471 for_each_cpu_mask(j, policy->cpus) { 486 for_each_cpu_mask(j, policy->cpus) {
472 struct cpu_dbs_info_s *j_dbs_info; 487 struct cpu_dbs_info_s *j_dbs_info;
473 j_dbs_info = &per_cpu(cpu_dbs_info, j); 488 j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -480,7 +495,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
480 this_dbs_info->enable = 1; 495 this_dbs_info->enable = 1;
481 this_dbs_info->down_skip = 0; 496 this_dbs_info->down_skip = 0;
482 this_dbs_info->requested_freq = policy->cur; 497 this_dbs_info->requested_freq = policy->cur;
483 sysfs_create_group(&policy->kobj, &dbs_attr_group); 498
484 dbs_enable++; 499 dbs_enable++;
485 /* 500 /*
486 * Start the timerschedule work, when this governor 501 * Start the timerschedule work, when this governor
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index e1cc5113c2ae..f697449327c6 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -41,8 +41,10 @@
41static unsigned int def_sampling_rate; 41static unsigned int def_sampling_rate;
42#define MIN_SAMPLING_RATE_RATIO (2) 42#define MIN_SAMPLING_RATE_RATIO (2)
43/* for correct statistics, we need at least 10 ticks between each measure */ 43/* for correct statistics, we need at least 10 ticks between each measure */
44#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 44#define MIN_STAT_SAMPLING_RATE \
45#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 45 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
46#define MIN_SAMPLING_RATE \
47 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
46#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 48#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
47#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 49#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
48#define TRANSITION_LATENCY_LIMIT (10 * 1000) 50#define TRANSITION_LATENCY_LIMIT (10 * 1000)
@@ -206,7 +208,8 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
206 ret = sscanf(buf, "%u", &input); 208 ret = sscanf(buf, "%u", &input);
207 209
208 mutex_lock(&dbs_mutex); 210 mutex_lock(&dbs_mutex);
209 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { 211 if (ret != 1 || input > MAX_SAMPLING_RATE
212 || input < MIN_SAMPLING_RATE) {
210 mutex_unlock(&dbs_mutex); 213 mutex_unlock(&dbs_mutex);
211 return -EINVAL; 214 return -EINVAL;
212 } 215 }
@@ -397,8 +400,15 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
397 * policy. To be safe, we focus 10 points under the threshold. 400 * policy. To be safe, we focus 10 points under the threshold.
398 */ 401 */
399 if (load < (dbs_tuners_ins.up_threshold - 10)) { 402 if (load < (dbs_tuners_ins.up_threshold - 10)) {
400 unsigned int freq_next = (policy->cur * load) / 403 unsigned int freq_next, freq_cur;
404
405 freq_cur = cpufreq_driver_getavg(policy);
406 if (!freq_cur)
407 freq_cur = policy->cur;
408
409 freq_next = (freq_cur * load) /
401 (dbs_tuners_ins.up_threshold - 10); 410 (dbs_tuners_ins.up_threshold - 10);
411
402 if (!dbs_tuners_ins.powersave_bias) { 412 if (!dbs_tuners_ins.powersave_bias) {
403 __cpufreq_driver_target(policy, freq_next, 413 __cpufreq_driver_target(policy, freq_next,
404 CPUFREQ_RELATION_L); 414 CPUFREQ_RELATION_L);
@@ -472,6 +482,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
472 unsigned int cpu = policy->cpu; 482 unsigned int cpu = policy->cpu;
473 struct cpu_dbs_info_s *this_dbs_info; 483 struct cpu_dbs_info_s *this_dbs_info;
474 unsigned int j; 484 unsigned int j;
485 int rc;
475 486
476 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 487 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
477 488
@@ -494,12 +505,23 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
494 if (dbs_enable == 1) { 505 if (dbs_enable == 1) {
495 kondemand_wq = create_workqueue("kondemand"); 506 kondemand_wq = create_workqueue("kondemand");
496 if (!kondemand_wq) { 507 if (!kondemand_wq) {
497 printk(KERN_ERR "Creation of kondemand failed\n"); 508 printk(KERN_ERR
509 "Creation of kondemand failed\n");
498 dbs_enable--; 510 dbs_enable--;
499 mutex_unlock(&dbs_mutex); 511 mutex_unlock(&dbs_mutex);
500 return -ENOSPC; 512 return -ENOSPC;
501 } 513 }
502 } 514 }
515
516 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
517 if (rc) {
518 if (dbs_enable == 1)
519 destroy_workqueue(kondemand_wq);
520 dbs_enable--;
521 mutex_unlock(&dbs_mutex);
522 return rc;
523 }
524
503 for_each_cpu_mask(j, policy->cpus) { 525 for_each_cpu_mask(j, policy->cpus) {
504 struct cpu_dbs_info_s *j_dbs_info; 526 struct cpu_dbs_info_s *j_dbs_info;
505 j_dbs_info = &per_cpu(cpu_dbs_info, j); 527 j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -509,7 +531,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
509 j_dbs_info->prev_cpu_wall = get_jiffies_64(); 531 j_dbs_info->prev_cpu_wall = get_jiffies_64();
510 } 532 }
511 this_dbs_info->enable = 1; 533 this_dbs_info->enable = 1;
512 sysfs_create_group(&policy->kobj, &dbs_attr_group);
513 /* 534 /*
514 * Start the timerschedule work, when this governor 535 * Start the timerschedule work, when this governor
515 * is used for first time 536 * is used for first time
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index de91e3371ef8..e8e1451ef1c1 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -15,7 +15,8 @@
15#include <linux/cpufreq.h> 15#include <linux/cpufreq.h>
16#include <linux/init.h> 16#include <linux/init.h>
17 17
18#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg) 18#define dprintk(msg...) \
19 cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg)
19 20
20 21
21static int cpufreq_governor_performance(struct cpufreq_policy *policy, 22static int cpufreq_governor_performance(struct cpufreq_policy *policy,
@@ -24,8 +25,10 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy,
24 switch (event) { 25 switch (event) {
25 case CPUFREQ_GOV_START: 26 case CPUFREQ_GOV_START:
26 case CPUFREQ_GOV_LIMITS: 27 case CPUFREQ_GOV_LIMITS:
27 dprintk("setting to %u kHz because of event %u\n", policy->max, event); 28 dprintk("setting to %u kHz because of event %u\n",
28 __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); 29 policy->max, event);
30 __cpufreq_driver_target(policy, policy->max,
31 CPUFREQ_RELATION_H);
29 break; 32 break;
30 default: 33 default:
31 break; 34 break;
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index 0a2596044e65..13fe06b94b0a 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -15,7 +15,8 @@
15#include <linux/cpufreq.h> 15#include <linux/cpufreq.h>
16#include <linux/init.h> 16#include <linux/init.h>
17 17
18#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg) 18#define dprintk(msg...) \
19 cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg)
19 20
20static int cpufreq_governor_powersave(struct cpufreq_policy *policy, 21static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
21 unsigned int event) 22 unsigned int event)
@@ -23,8 +24,10 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
23 switch (event) { 24 switch (event) {
24 case CPUFREQ_GOV_START: 25 case CPUFREQ_GOV_START:
25 case CPUFREQ_GOV_LIMITS: 26 case CPUFREQ_GOV_LIMITS:
26 dprintk("setting to %u kHz because of event %u\n", policy->min, event); 27 dprintk("setting to %u kHz because of event %u\n",
27 __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); 28 policy->min, event);
29 __cpufreq_driver_target(policy, policy->min,
30 CPUFREQ_RELATION_L);
28 break; 31 break;
29 default: 32 default:
30 break; 33 break;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index c2ecc599dc5f..6742b1adf2c8 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -351,8 +351,8 @@ __init cpufreq_stats_init(void)
351 351
352 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 352 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
353 for_each_online_cpu(cpu) { 353 for_each_online_cpu(cpu) {
354 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE, 354 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier,
355 (void *)(long)cpu); 355 CPU_ONLINE, (void *)(long)cpu);
356 } 356 }
357 return 0; 357 return 0;
358} 358}
@@ -368,14 +368,15 @@ __exit cpufreq_stats_exit(void)
368 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 368 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
369 lock_cpu_hotplug(); 369 lock_cpu_hotplug();
370 for_each_online_cpu(cpu) { 370 for_each_online_cpu(cpu) {
371 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD, 371 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier,
372 (void *)(long)cpu); 372 CPU_DEAD, (void *)(long)cpu);
373 } 373 }
374 unlock_cpu_hotplug(); 374 unlock_cpu_hotplug();
375} 375}
376 376
377MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>"); 377MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>");
378MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats through sysfs filesystem"); 378MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats"
379 "through sysfs filesystem");
379MODULE_LICENSE ("GPL"); 380MODULE_LICENSE ("GPL");
380 381
381module_init(cpufreq_stats_init); 382module_init(cpufreq_stats_init);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index a06c204589cd..2a4eb0bfaf30 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -131,19 +131,26 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
131 unsigned int event) 131 unsigned int event)
132{ 132{
133 unsigned int cpu = policy->cpu; 133 unsigned int cpu = policy->cpu;
134 int rc = 0;
135
134 switch (event) { 136 switch (event) {
135 case CPUFREQ_GOV_START: 137 case CPUFREQ_GOV_START:
136 if (!cpu_online(cpu)) 138 if (!cpu_online(cpu))
137 return -EINVAL; 139 return -EINVAL;
138 BUG_ON(!policy->cur); 140 BUG_ON(!policy->cur);
139 mutex_lock(&userspace_mutex); 141 mutex_lock(&userspace_mutex);
142 rc = sysfs_create_file (&policy->kobj,
143 &freq_attr_scaling_setspeed.attr);
144 if (rc)
145 goto start_out;
146
140 cpu_is_managed[cpu] = 1; 147 cpu_is_managed[cpu] = 1;
141 cpu_min_freq[cpu] = policy->min; 148 cpu_min_freq[cpu] = policy->min;
142 cpu_max_freq[cpu] = policy->max; 149 cpu_max_freq[cpu] = policy->max;
143 cpu_cur_freq[cpu] = policy->cur; 150 cpu_cur_freq[cpu] = policy->cur;
144 cpu_set_freq[cpu] = policy->cur; 151 cpu_set_freq[cpu] = policy->cur;
145 sysfs_create_file (&policy->kobj, &freq_attr_scaling_setspeed.attr);
146 dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]); 152 dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]);
153start_out:
147 mutex_unlock(&userspace_mutex); 154 mutex_unlock(&userspace_mutex);
148 break; 155 break;
149 case CPUFREQ_GOV_STOP: 156 case CPUFREQ_GOV_STOP:
@@ -180,7 +187,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
180 mutex_unlock(&userspace_mutex); 187 mutex_unlock(&userspace_mutex);
181 break; 188 break;
182 } 189 }
183 return 0; 190 return rc;
184} 191}
185 192
186 193
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 551f4ccf87fd..e7490925fdcf 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -9,7 +9,8 @@
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/cpufreq.h> 10#include <linux/cpufreq.h>
11 11
12#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg) 12#define dprintk(msg...) \
13 cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg)
13 14
14/********************************************************************* 15/*********************************************************************
15 * FREQUENCY TABLE HELPERS * 16 * FREQUENCY TABLE HELPERS *
@@ -29,7 +30,8 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
29 30
30 continue; 31 continue;
31 } 32 }
32 dprintk("table entry %u: %u kHz, %u index\n", i, freq, table[i].index); 33 dprintk("table entry %u: %u kHz, %u index\n",
34 i, freq, table[i].index);
33 if (freq < min_freq) 35 if (freq < min_freq)
34 min_freq = freq; 36 min_freq = freq;
35 if (freq > max_freq) 37 if (freq > max_freq)
@@ -54,13 +56,14 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
54 unsigned int i; 56 unsigned int i;
55 unsigned int count = 0; 57 unsigned int count = 0;
56 58
57 dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); 59 dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n",
60 policy->min, policy->max, policy->cpu);
58 61
59 if (!cpu_online(policy->cpu)) 62 if (!cpu_online(policy->cpu))
60 return -EINVAL; 63 return -EINVAL;
61 64
62 cpufreq_verify_within_limits(policy, 65 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
63 policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); 66 policy->cpuinfo.max_freq);
64 67
65 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 68 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
66 unsigned int freq = table[i].frequency; 69 unsigned int freq = table[i].frequency;
@@ -75,10 +78,11 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
75 if (!count) 78 if (!count)
76 policy->max = next_larger; 79 policy->max = next_larger;
77 80
78 cpufreq_verify_within_limits(policy, 81 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
79 policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); 82 policy->cpuinfo.max_freq);
80 83
81 dprintk("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); 84 dprintk("verification lead to (%u - %u kHz) for cpu %u\n",
85 policy->min, policy->max, policy->cpu);
82 86
83 return 0; 87 return 0;
84} 88}
@@ -101,7 +105,8 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
101 }; 105 };
102 unsigned int i; 106 unsigned int i;
103 107
104 dprintk("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu); 108 dprintk("request for target %u kHz (relation: %u) for cpu %u\n",
109 target_freq, relation, policy->cpu);
105 110
106 switch (relation) { 111 switch (relation) {
107 case CPUFREQ_RELATION_H: 112 case CPUFREQ_RELATION_H:
@@ -192,7 +197,10 @@ static ssize_t show_available_freqs (struct cpufreq_policy *policy, char *buf)
192} 197}
193 198
194struct freq_attr cpufreq_freq_attr_scaling_available_freqs = { 199struct freq_attr cpufreq_freq_attr_scaling_available_freqs = {
195 .attr = { .name = "scaling_available_frequencies", .mode = 0444, .owner=THIS_MODULE }, 200 .attr = { .name = "scaling_available_frequencies",
201 .mode = 0444,
202 .owner=THIS_MODULE
203 },
196 .show = show_available_freqs, 204 .show = show_available_freqs,
197}; 205};
198EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); 206EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e816535ab305..879250d3d069 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -53,7 +53,7 @@ config CRYPTO_DEV_PADLOCK_SHA
53 53
54config CRYPTO_DEV_GEODE 54config CRYPTO_DEV_GEODE
55 tristate "Support for the Geode LX AES engine" 55 tristate "Support for the Geode LX AES engine"
56 depends on CRYPTO && X86_32 56 depends on CRYPTO && X86_32 && PCI
57 select CRYPTO_ALGAPI 57 select CRYPTO_ALGAPI
58 select CRYPTO_BLKCIPHER 58 select CRYPTO_BLKCIPHER
59 default m 59 default m
diff --git a/drivers/fc4/fc.c b/drivers/fc4/fc.c
index ca4e67a022d0..22b62b3cd14e 100644
--- a/drivers/fc4/fc.c
+++ b/drivers/fc4/fc.c
@@ -266,7 +266,7 @@ static void fcp_report_map_done(fc_channel *fc, int i, int status)
266 printk ("FC: Bad magic from REPORT_AL_MAP on %s - %08x\n", fc->name, p->magic); 266 printk ("FC: Bad magic from REPORT_AL_MAP on %s - %08x\n", fc->name, p->magic);
267 fc->state = FC_STATE_OFFLINE; 267 fc->state = FC_STATE_OFFLINE;
268 } else { 268 } else {
269 fc->posmap = (fcp_posmap *)kzalloc(sizeof(fcp_posmap)+p->len, GFP_KERNEL); 269 fc->posmap = kzalloc(sizeof(fcp_posmap)+p->len, GFP_KERNEL);
270 if (!fc->posmap) { 270 if (!fc->posmap) {
271 printk("FC: Not enough memory, offlining channel\n"); 271 printk("FC: Not enough memory, offlining channel\n");
272 fc->state = FC_STATE_OFFLINE; 272 fc->state = FC_STATE_OFFLINE;
@@ -355,7 +355,7 @@ void fcp_register(fc_channel *fc, u8 type, int unregister)
355 for (i = fc->can_queue; i < fc->scsi_bitmap_end; i++) 355 for (i = fc->can_queue; i < fc->scsi_bitmap_end; i++)
356 set_bit (i, fc->scsi_bitmap); 356 set_bit (i, fc->scsi_bitmap);
357 fc->scsi_free = fc->can_queue; 357 fc->scsi_free = fc->can_queue;
358 fc->cmd_slots = (fcp_cmnd **)kzalloc(slots * sizeof(fcp_cmnd*), GFP_KERNEL); 358 fc->cmd_slots = kzalloc(slots * sizeof(fcp_cmnd*), GFP_KERNEL);
359 fc->abort_count = 0; 359 fc->abort_count = 0;
360 } else { 360 } else {
361 fc->scsi_name[0] = 0; 361 fc->scsi_name[0] = 0;
@@ -933,7 +933,7 @@ int fcp_scsi_dev_reset(struct scsi_cmnd *SCpnt)
933 DECLARE_MUTEX_LOCKED(sem); 933 DECLARE_MUTEX_LOCKED(sem);
934 934
935 if (!fc->rst_pkt) { 935 if (!fc->rst_pkt) {
936 fc->rst_pkt = (struct scsi_cmnd *) kmalloc(sizeof(SCpnt), GFP_KERNEL); 936 fc->rst_pkt = kmalloc(sizeof(SCpnt), GFP_KERNEL);
937 if (!fc->rst_pkt) return FAILED; 937 if (!fc->rst_pkt) return FAILED;
938 938
939 fcmd = FCP_CMND(fc->rst_pkt); 939 fcmd = FCP_CMND(fc->rst_pkt);
@@ -1107,7 +1107,7 @@ int fc_do_plogi(fc_channel *fc, unsigned char alpa, fc_wwn *node, fc_wwn *nport)
1107 logi *l; 1107 logi *l;
1108 int status; 1108 int status;
1109 1109
1110 l = (logi *)kzalloc(2 * sizeof(logi), GFP_KERNEL); 1110 l = kzalloc(2 * sizeof(logi), GFP_KERNEL);
1111 if (!l) return -ENOMEM; 1111 if (!l) return -ENOMEM;
1112 l->code = LS_PLOGI; 1112 l->code = LS_PLOGI;
1113 memcpy (&l->nport_wwn, &fc->wwn_nport, sizeof(fc_wwn)); 1113 memcpy (&l->nport_wwn, &fc->wwn_nport, sizeof(fc_wwn));
@@ -1141,7 +1141,7 @@ int fc_do_prli(fc_channel *fc, unsigned char alpa)
1141 prli *p; 1141 prli *p;
1142 int status; 1142 int status;
1143 1143
1144 p = (prli *)kzalloc(2 * sizeof(prli), GFP_KERNEL); 1144 p = kzalloc(2 * sizeof(prli), GFP_KERNEL);
1145 if (!p) return -ENOMEM; 1145 if (!p) return -ENOMEM;
1146 p->code = LS_PRLI; 1146 p->code = LS_PRLI;
1147 p->params[0] = 0x08002000; 1147 p->params[0] = 0x08002000;
diff --git a/drivers/i2c/algos/Kconfig b/drivers/i2c/algos/Kconfig
index c034820615bb..af0203409dd1 100644
--- a/drivers/i2c/algos/Kconfig
+++ b/drivers/i2c/algos/Kconfig
@@ -38,17 +38,6 @@ config I2C_ALGOPCA
38 This support is also available as a module. If so, the module 38 This support is also available as a module. If so, the module
39 will be called i2c-algo-pca. 39 will be called i2c-algo-pca.
40 40
41config I2C_ALGOITE
42 tristate "ITE I2C Algorithm"
43 depends on MIPS_ITE8172 && I2C
44 help
45 This supports the use of the ITE8172 I2C interface found on some MIPS
46 systems. Say Y if you have one of these. You should also say Y for
47 the ITE I2C peripheral driver support below.
48
49 This support is also available as a module. If so, the module
50 will be called i2c-algo-ite.
51
52config I2C_ALGO8XX 41config I2C_ALGO8XX
53 tristate "MPC8xx CPM I2C interface" 42 tristate "MPC8xx CPM I2C interface"
54 depends on 8xx && I2C 43 depends on 8xx && I2C
diff --git a/drivers/i2c/algos/Makefile b/drivers/i2c/algos/Makefile
index 208be04a3dbd..cac1051bd4f1 100644
--- a/drivers/i2c/algos/Makefile
+++ b/drivers/i2c/algos/Makefile
@@ -5,7 +5,6 @@
5obj-$(CONFIG_I2C_ALGOBIT) += i2c-algo-bit.o 5obj-$(CONFIG_I2C_ALGOBIT) += i2c-algo-bit.o
6obj-$(CONFIG_I2C_ALGOPCF) += i2c-algo-pcf.o 6obj-$(CONFIG_I2C_ALGOPCF) += i2c-algo-pcf.o
7obj-$(CONFIG_I2C_ALGOPCA) += i2c-algo-pca.o 7obj-$(CONFIG_I2C_ALGOPCA) += i2c-algo-pca.o
8obj-$(CONFIG_I2C_ALGOITE) += i2c-algo-ite.o
9obj-$(CONFIG_I2C_ALGO_SGI) += i2c-algo-sgi.o 8obj-$(CONFIG_I2C_ALGO_SGI) += i2c-algo-sgi.o
10 9
11ifeq ($(CONFIG_I2C_DEBUG_ALGO),y) 10ifeq ($(CONFIG_I2C_DEBUG_ALGO),y)
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 21c36bfb5e6b..95aa5395a5be 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -540,15 +540,7 @@ int i2c_bit_add_bus(struct i2c_adapter *adap)
540 540
541 return i2c_add_adapter(adap); 541 return i2c_add_adapter(adap);
542} 542}
543
544
545int i2c_bit_del_bus(struct i2c_adapter *adap)
546{
547 return i2c_del_adapter(adap);
548}
549
550EXPORT_SYMBOL(i2c_bit_add_bus); 543EXPORT_SYMBOL(i2c_bit_add_bus);
551EXPORT_SYMBOL(i2c_bit_del_bus);
552 544
553MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); 545MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
554MODULE_DESCRIPTION("I2C-Bus bit-banging algorithm"); 546MODULE_DESCRIPTION("I2C-Bus bit-banging algorithm");
diff --git a/drivers/i2c/algos/i2c-algo-ite.c b/drivers/i2c/algos/i2c-algo-ite.c
deleted file mode 100644
index 70d8eefb5efc..000000000000
--- a/drivers/i2c/algos/i2c-algo-ite.c
+++ /dev/null
@@ -1,806 +0,0 @@
1/*
2 -------------------------------------------------------------------------
3 i2c-algo-ite.c i2c driver algorithms for ITE adapters
4
5 Hai-Pao Fan, MontaVista Software, Inc.
6 hpfan@mvista.com or source@mvista.com
7
8 Copyright 2000 MontaVista Software Inc.
9
10 ---------------------------------------------------------------------------
11 This file was highly leveraged from i2c-algo-pcf.c, which was created
12 by Simon G. Vogl and Hans Berglund:
13
14
15 Copyright (C) 1995-1997 Simon G. Vogl
16 1998-2000 Hans Berglund
17
18 This program is free software; you can redistribute it and/or modify
19 it under the terms of the GNU General Public License as published by
20 the Free Software Foundation; either version 2 of the License, or
21 (at your option) any later version.
22
23 This program is distributed in the hope that it will be useful,
24 but WITHOUT ANY WARRANTY; without even the implied warranty of
25 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 GNU General Public License for more details.
27
28 You should have received a copy of the GNU General Public License
29 along with this program; if not, write to the Free Software
30 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
31/* ------------------------------------------------------------------------- */
32
33/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
34 Frodo Looijaard <frodol@dds.nl> ,and also from Martin Bailey
35 <mbailey@littlefeet-inc.com> */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/delay.h>
40#include <linux/slab.h>
41#include <linux/init.h>
42#include <asm/uaccess.h>
43#include <linux/ioport.h>
44#include <linux/errno.h>
45#include <linux/sched.h>
46
47#include <linux/i2c.h>
48#include <linux/i2c-algo-ite.h>
49#include "i2c-algo-ite.h"
50
51#define PM_DSR IT8172_PCI_IO_BASE + IT_PM_DSR
52#define PM_IBSR IT8172_PCI_IO_BASE + IT_PM_DSR + 0x04
53#define GPIO_CCR IT8172_PCI_IO_BASE + IT_GPCCR
54
55#define DEB2(x) if (i2c_debug>=2) x
56#define DEB3(x) if (i2c_debug>=3) x /* print several statistical values*/
57#define DEF_TIMEOUT 16
58
59
60/* module parameters:
61 */
62static int i2c_debug;
63static int iic_test; /* see if the line-setting functions work */
64
65/* --- setting states on the bus with the right timing: --------------- */
66
67#define get_clock(adap) adap->getclock(adap->data)
68#define iic_outw(adap, reg, val) adap->setiic(adap->data, reg, val)
69#define iic_inw(adap, reg) adap->getiic(adap->data, reg)
70
71
72/* --- other auxiliary functions -------------------------------------- */
73
74static void iic_start(struct i2c_algo_iic_data *adap)
75{
76 iic_outw(adap,ITE_I2CHCR,ITE_CMD);
77}
78
79static void iic_stop(struct i2c_algo_iic_data *adap)
80{
81 iic_outw(adap,ITE_I2CHCR,0);
82 iic_outw(adap,ITE_I2CHSR,ITE_I2CHSR_TDI);
83}
84
85static void iic_reset(struct i2c_algo_iic_data *adap)
86{
87 iic_outw(adap, PM_IBSR, iic_inw(adap, PM_IBSR) | 0x80);
88}
89
90
91static int wait_for_bb(struct i2c_algo_iic_data *adap)
92{
93 int timeout = DEF_TIMEOUT;
94 short status;
95
96 status = iic_inw(adap, ITE_I2CHSR);
97#ifndef STUB_I2C
98 while (timeout-- && (status & ITE_I2CHSR_HB)) {
99 udelay(1000); /* How much is this? */
100 status = iic_inw(adap, ITE_I2CHSR);
101 }
102#endif
103 if (timeout<=0) {
104 printk(KERN_ERR "Timeout, host is busy\n");
105 iic_reset(adap);
106 }
107 return(timeout<=0);
108}
109
110/* After we issue a transaction on the IIC bus, this function
111 * is called. It puts this process to sleep until we get an interrupt from
112 * from the controller telling us that the transaction we requested in complete.
113 */
114static int wait_for_pin(struct i2c_algo_iic_data *adap, short *status) {
115
116 int timeout = DEF_TIMEOUT;
117
118 timeout = wait_for_bb(adap);
119 if (timeout) {
120 DEB2(printk("Timeout waiting for host not busy\n");)
121 return -EIO;
122 }
123 timeout = DEF_TIMEOUT;
124
125 *status = iic_inw(adap, ITE_I2CHSR);
126#ifndef STUB_I2C
127 while (timeout-- && !(*status & ITE_I2CHSR_TDI)) {
128 adap->waitforpin();
129 *status = iic_inw(adap, ITE_I2CHSR);
130 }
131#endif
132 if (timeout <= 0)
133 return(-1);
134 else
135 return(0);
136}
137
138static int wait_for_fe(struct i2c_algo_iic_data *adap, short *status)
139{
140 int timeout = DEF_TIMEOUT;
141
142 *status = iic_inw(adap, ITE_I2CFSR);
143#ifndef STUB_I2C
144 while (timeout-- && (*status & ITE_I2CFSR_FE)) {
145 udelay(1000);
146 iic_inw(adap, ITE_I2CFSR);
147 }
148#endif
149 if (timeout <= 0)
150 return(-1);
151 else
152 return(0);
153}
154
155static int iic_init (struct i2c_algo_iic_data *adap)
156{
157 short i;
158
159 /* Clear bit 7 to set I2C to normal operation mode */
160 i=iic_inw(adap, PM_DSR)& 0xff7f;
161 iic_outw(adap, PM_DSR, i);
162
163 /* set IT_GPCCR port C bit 2&3 as function 2 */
164 i = iic_inw(adap, GPIO_CCR) & 0xfc0f;
165 iic_outw(adap,GPIO_CCR,i);
166
167 /* Clear slave address/sub-address */
168 iic_outw(adap,ITE_I2CSAR, 0);
169 iic_outw(adap,ITE_I2CSSAR, 0);
170
171 /* Set clock counter register */
172 iic_outw(adap,ITE_I2CCKCNT, get_clock(adap));
173
174 /* Set START/reSTART/STOP time registers */
175 iic_outw(adap,ITE_I2CSHDR, 0x0a);
176 iic_outw(adap,ITE_I2CRSUR, 0x0a);
177 iic_outw(adap,ITE_I2CPSUR, 0x0a);
178
179 /* Enable interrupts on completing the current transaction */
180 iic_outw(adap,ITE_I2CHCR, ITE_I2CHCR_IE | ITE_I2CHCR_HCE);
181
182 /* Clear transfer count */
183 iic_outw(adap,ITE_I2CFBCR, 0x0);
184
185 DEB2(printk("iic_init: Initialized IIC on ITE 0x%x\n",
186 iic_inw(adap, ITE_I2CHSR)));
187 return 0;
188}
189
190
191/*
192 * Sanity check for the adapter hardware - check the reaction of
193 * the bus lines only if it seems to be idle.
194 */
195static int test_bus(struct i2c_algo_iic_data *adap, char *name) {
196#if 0
197 int scl,sda;
198 sda=getsda(adap);
199 if (adap->getscl==NULL) {
200 printk("test_bus: Warning: Adapter can't read from clock line - skipping test.\n");
201 return 0;
202 }
203 scl=getscl(adap);
204 printk("test_bus: Adapter: %s scl: %d sda: %d -- testing...\n",
205 name,getscl(adap),getsda(adap));
206 if (!scl || !sda ) {
207 printk("test_bus: %s seems to be busy.\n",adap->name);
208 goto bailout;
209 }
210 sdalo(adap);
211 printk("test_bus:1 scl: %d sda: %d\n", getscl(adap),
212 getsda(adap));
213 if ( 0 != getsda(adap) ) {
214 printk("test_bus: %s SDA stuck high!\n",name);
215 sdahi(adap);
216 goto bailout;
217 }
218 if ( 0 == getscl(adap) ) {
219 printk("test_bus: %s SCL unexpected low while pulling SDA low!\n",
220 name);
221 goto bailout;
222 }
223 sdahi(adap);
224 printk("test_bus:2 scl: %d sda: %d\n", getscl(adap),
225 getsda(adap));
226 if ( 0 == getsda(adap) ) {
227 printk("test_bus: %s SDA stuck low!\n",name);
228 sdahi(adap);
229 goto bailout;
230 }
231 if ( 0 == getscl(adap) ) {
232 printk("test_bus: %s SCL unexpected low while SDA high!\n",
233 adap->name);
234 goto bailout;
235 }
236 scllo(adap);
237 printk("test_bus:3 scl: %d sda: %d\n", getscl(adap),
238 getsda(adap));
239 if ( 0 != getscl(adap) ) {
240
241 sclhi(adap);
242 goto bailout;
243 }
244 if ( 0 == getsda(adap) ) {
245 printk("test_bus: %s SDA unexpected low while pulling SCL low!\n",
246 name);
247 goto bailout;
248 }
249 sclhi(adap);
250 printk("test_bus:4 scl: %d sda: %d\n", getscl(adap),
251 getsda(adap));
252 if ( 0 == getscl(adap) ) {
253 printk("test_bus: %s SCL stuck low!\n",name);
254 sclhi(adap);
255 goto bailout;
256 }
257 if ( 0 == getsda(adap) ) {
258 printk("test_bus: %s SDA unexpected low while SCL high!\n",
259 name);
260 goto bailout;
261 }
262 printk("test_bus: %s passed test.\n",name);
263 return 0;
264bailout:
265 sdahi(adap);
266 sclhi(adap);
267 return -ENODEV;
268#endif
269 return (0);
270}
271
272/* ----- Utility functions
273 */
274
275
276/* Verify the device we want to talk to on the IIC bus really exists. */
277static inline int try_address(struct i2c_algo_iic_data *adap,
278 unsigned int addr, int retries)
279{
280 int i, ret = -1;
281 short status;
282
283 for (i=0;i<retries;i++) {
284 iic_outw(adap, ITE_I2CSAR, addr);
285 iic_start(adap);
286 if (wait_for_pin(adap, &status) == 0) {
287 if ((status & ITE_I2CHSR_DNE) == 0) {
288 iic_stop(adap);
289 iic_outw(adap, ITE_I2CFCR, ITE_I2CFCR_FLUSH);
290 ret=1;
291 break; /* success! */
292 }
293 }
294 iic_stop(adap);
295 udelay(adap->udelay);
296 }
297 DEB2(if (i) printk("try_address: needed %d retries for 0x%x\n",i,
298 addr));
299 return ret;
300}
301
302
303static int iic_sendbytes(struct i2c_adapter *i2c_adap,const char *buf,
304 int count)
305{
306 struct i2c_algo_iic_data *adap = i2c_adap->algo_data;
307 int wrcount=0, timeout;
308 short status;
309 int loops, remainder, i, j;
310 union {
311 char byte[2];
312 unsigned short word;
313 } tmp;
314
315 iic_outw(adap, ITE_I2CSSAR, (unsigned short)buf[wrcount++]);
316 count--;
317 if (count == 0)
318 return -EIO;
319
320 loops = count / 32; /* 32-byte FIFO */
321 remainder = count % 32;
322
323 if(loops) {
324 for(i=0; i<loops; i++) {
325
326 iic_outw(adap, ITE_I2CFBCR, 32);
327 for(j=0; j<32/2; j++) {
328 tmp.byte[1] = buf[wrcount++];
329 tmp.byte[0] = buf[wrcount++];
330 iic_outw(adap, ITE_I2CFDR, tmp.word);
331 }
332
333 /* status FIFO overrun */
334 iic_inw(adap, ITE_I2CFSR);
335 iic_inw(adap, ITE_I2CFBCR);
336
337 iic_outw(adap, ITE_I2CHCR, ITE_WRITE); /* Issue WRITE command */
338
339 /* Wait for transmission to complete */
340 timeout = wait_for_pin(adap, &status);
341 if(timeout) {
342 iic_stop(adap);
343 printk("iic_sendbytes: %s write timeout.\n", i2c_adap->name);
344 return -EREMOTEIO; /* got a better one ?? */
345 }
346 if (status & ITE_I2CHSR_DB) {
347 iic_stop(adap);
348 printk("iic_sendbytes: %s write error - no ack.\n", i2c_adap->name);
349 return -EREMOTEIO; /* got a better one ?? */
350 }
351 }
352 }
353 if(remainder) {
354 iic_outw(adap, ITE_I2CFBCR, remainder);
355 for(i=0; i<remainder/2; i++) {
356 tmp.byte[1] = buf[wrcount++];
357 tmp.byte[0] = buf[wrcount++];
358 iic_outw(adap, ITE_I2CFDR, tmp.word);
359 }
360
361 /* status FIFO overrun */
362 iic_inw(adap, ITE_I2CFSR);
363 iic_inw(adap, ITE_I2CFBCR);
364
365 iic_outw(adap, ITE_I2CHCR, ITE_WRITE); /* Issue WRITE command */
366
367 timeout = wait_for_pin(adap, &status);
368 if(timeout) {
369 iic_stop(adap);
370 printk("iic_sendbytes: %s write timeout.\n", i2c_adap->name);
371 return -EREMOTEIO; /* got a better one ?? */
372 }
373#ifndef STUB_I2C
374 if (status & ITE_I2CHSR_DB) {
375 iic_stop(adap);
376 printk("iic_sendbytes: %s write error - no ack.\n", i2c_adap->name);
377 return -EREMOTEIO; /* got a better one ?? */
378 }
379#endif
380 }
381 iic_stop(adap);
382 return wrcount;
383}
384
385
386static int iic_readbytes(struct i2c_adapter *i2c_adap, char *buf, int count,
387 int sread)
388{
389 int rdcount=0, i, timeout;
390 short status;
391 struct i2c_algo_iic_data *adap = i2c_adap->algo_data;
392 int loops, remainder, j;
393 union {
394 char byte[2];
395 unsigned short word;
396 } tmp;
397
398 loops = count / 32; /* 32-byte FIFO */
399 remainder = count % 32;
400
401 if(loops) {
402 for(i=0; i<loops; i++) {
403 iic_outw(adap, ITE_I2CFBCR, 32);
404 if (sread)
405 iic_outw(adap, ITE_I2CHCR, ITE_SREAD);
406 else
407 iic_outw(adap, ITE_I2CHCR, ITE_READ); /* Issue READ command */
408
409 timeout = wait_for_pin(adap, &status);
410 if(timeout) {
411 iic_stop(adap);
412 printk("iic_readbytes: %s read timeout.\n", i2c_adap->name);
413 return (-1);
414 }
415#ifndef STUB_I2C
416 if (status & ITE_I2CHSR_DB) {
417 iic_stop(adap);
418 printk("iic_readbytes: %s read error - no ack.\n", i2c_adap->name);
419 return (-1);
420 }
421#endif
422
423 timeout = wait_for_fe(adap, &status);
424 if(timeout) {
425 iic_stop(adap);
426 printk("iic_readbytes: %s FIFO is empty\n", i2c_adap->name);
427 return (-1);
428 }
429
430 for(j=0; j<32/2; j++) {
431 tmp.word = iic_inw(adap, ITE_I2CFDR);
432 buf[rdcount++] = tmp.byte[1];
433 buf[rdcount++] = tmp.byte[0];
434 }
435
436 /* status FIFO underrun */
437 iic_inw(adap, ITE_I2CFSR);
438
439 }
440 }
441
442
443 if(remainder) {
444 remainder=(remainder+1)/2 * 2;
445 iic_outw(adap, ITE_I2CFBCR, remainder);
446 if (sread)
447 iic_outw(adap, ITE_I2CHCR, ITE_SREAD);
448 else
449 iic_outw(adap, ITE_I2CHCR, ITE_READ); /* Issue READ command */
450
451 timeout = wait_for_pin(adap, &status);
452 if(timeout) {
453 iic_stop(adap);
454 printk("iic_readbytes: %s read timeout.\n", i2c_adap->name);
455 return (-1);
456 }
457#ifndef STUB_I2C
458 if (status & ITE_I2CHSR_DB) {
459 iic_stop(adap);
460 printk("iic_readbytes: %s read error - no ack.\n", i2c_adap->name);
461 return (-1);
462 }
463#endif
464 timeout = wait_for_fe(adap, &status);
465 if(timeout) {
466 iic_stop(adap);
467 printk("iic_readbytes: %s FIFO is empty\n", i2c_adap->name);
468 return (-1);
469 }
470
471 for(i=0; i<(remainder+1)/2; i++) {
472 tmp.word = iic_inw(adap, ITE_I2CFDR);
473 buf[rdcount++] = tmp.byte[1];
474 buf[rdcount++] = tmp.byte[0];
475 }
476
477 /* status FIFO underrun */
478 iic_inw(adap, ITE_I2CFSR);
479
480 }
481
482 iic_stop(adap);
483 return rdcount;
484}
485
486
487/* This function implements combined transactions. Combined
488 * transactions consist of combinations of reading and writing blocks of data.
489 * Each transfer (i.e. a read or a write) is separated by a repeated start
490 * condition.
491 */
492#if 0
493static int iic_combined_transaction(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num)
494{
495 int i;
496 struct i2c_msg *pmsg;
497 int ret;
498
499 DEB2(printk("Beginning combined transaction\n"));
500
501 for(i=0; i<(num-1); i++) {
502 pmsg = &msgs[i];
503 if(pmsg->flags & I2C_M_RD) {
504 DEB2(printk(" This one is a read\n"));
505 ret = iic_readbytes(i2c_adap, pmsg->buf, pmsg->len, IIC_COMBINED_XFER);
506 }
507 else if(!(pmsg->flags & I2C_M_RD)) {
508 DEB2(printk("This one is a write\n"));
509 ret = iic_sendbytes(i2c_adap, pmsg->buf, pmsg->len, IIC_COMBINED_XFER);
510 }
511 }
512 /* Last read or write segment needs to be terminated with a stop */
513 pmsg = &msgs[i];
514
515 if(pmsg->flags & I2C_M_RD) {
516 DEB2(printk("Doing the last read\n"));
517 ret = iic_readbytes(i2c_adap, pmsg->buf, pmsg->len, IIC_SINGLE_XFER);
518 }
519 else if(!(pmsg->flags & I2C_M_RD)) {
520 DEB2(printk("Doing the last write\n"));
521 ret = iic_sendbytes(i2c_adap, pmsg->buf, pmsg->len, IIC_SINGLE_XFER);
522 }
523
524 return ret;
525}
526#endif
527
528
529/* Whenever we initiate a transaction, the first byte clocked
530 * onto the bus after the start condition is the address (7 bit) of the
531 * device we want to talk to. This function manipulates the address specified
532 * so that it makes sense to the hardware when written to the IIC peripheral.
533 *
534 * Note: 10 bit addresses are not supported in this driver, although they are
535 * supported by the hardware. This functionality needs to be implemented.
536 */
537static inline int iic_doAddress(struct i2c_algo_iic_data *adap,
538 struct i2c_msg *msg, int retries)
539{
540 unsigned short flags = msg->flags;
541 unsigned int addr;
542 int ret;
543
544/* Ten bit addresses not supported right now */
545 if ( (flags & I2C_M_TEN) ) {
546#if 0
547 addr = 0xf0 | (( msg->addr >> 7) & 0x03);
548 DEB2(printk("addr0: %d\n",addr));
549 ret = try_address(adap, addr, retries);
550 if (ret!=1) {
551 printk("iic_doAddress: died at extended address code.\n");
552 return -EREMOTEIO;
553 }
554 iic_outw(adap,msg->addr & 0x7f);
555 if (ret != 1) {
556 printk("iic_doAddress: died at 2nd address code.\n");
557 return -EREMOTEIO;
558 }
559 if ( flags & I2C_M_RD ) {
560 i2c_repstart(adap);
561 addr |= 0x01;
562 ret = try_address(adap, addr, retries);
563 if (ret!=1) {
564 printk("iic_doAddress: died at extended address code.\n");
565 return -EREMOTEIO;
566 }
567 }
568#endif
569 } else {
570
571 addr = ( msg->addr << 1 );
572
573#if 0
574 if (flags & I2C_M_RD )
575 addr |= 1;
576 if (flags & I2C_M_REV_DIR_ADDR )
577 addr ^= 1;
578#endif
579
580 if (iic_inw(adap, ITE_I2CSAR) != addr) {
581 iic_outw(adap, ITE_I2CSAR, addr);
582 ret = try_address(adap, addr, retries);
583 if (ret!=1) {
584 printk("iic_doAddress: died at address code.\n");
585 return -EREMOTEIO;
586 }
587 }
588
589 }
590
591 return 0;
592}
593
594
595/* Description: Prepares the controller for a transaction (clearing status
596 * registers, data buffers, etc), and then calls either iic_readbytes or
597 * iic_sendbytes to do the actual transaction.
598 *
599 * still to be done: Before we issue a transaction, we should
600 * verify that the bus is not busy or in some unknown state.
601 */
602static int iic_xfer(struct i2c_adapter *i2c_adap,
603 struct i2c_msg *msgs,
604 int num)
605{
606 struct i2c_algo_iic_data *adap = i2c_adap->algo_data;
607 struct i2c_msg *pmsg;
608 int i = 0;
609 int ret, timeout;
610
611 pmsg = &msgs[i];
612
613 if(!pmsg->len) {
614 DEB2(printk("iic_xfer: read/write length is 0\n");)
615 return -EIO;
616 }
617 if(!(pmsg->flags & I2C_M_RD) && (!(pmsg->len)%2) ) {
618 DEB2(printk("iic_xfer: write buffer length is not odd\n");)
619 return -EIO;
620 }
621
622 /* Wait for any pending transfers to complete */
623 timeout = wait_for_bb(adap);
624 if (timeout) {
625 DEB2(printk("iic_xfer: Timeout waiting for host not busy\n");)
626 return -EIO;
627 }
628
629 /* Flush FIFO */
630 iic_outw(adap, ITE_I2CFCR, ITE_I2CFCR_FLUSH);
631
632 /* Load address */
633 ret = iic_doAddress(adap, pmsg, i2c_adap->retries);
634 if (ret)
635 return -EIO;
636
637#if 0
638 /* Combined transaction (read and write) */
639 if(num > 1) {
640 DEB2(printk("iic_xfer: Call combined transaction\n"));
641 ret = iic_combined_transaction(i2c_adap, msgs, num);
642 }
643#endif
644
645 DEB3(printk("iic_xfer: Msg %d, addr=0x%x, flags=0x%x, len=%d\n",
646 i, msgs[i].addr, msgs[i].flags, msgs[i].len);)
647
648 if(pmsg->flags & I2C_M_RD) /* Read */
649 ret = iic_readbytes(i2c_adap, pmsg->buf, pmsg->len, 0);
650 else { /* Write */
651 udelay(1000);
652 ret = iic_sendbytes(i2c_adap, pmsg->buf, pmsg->len);
653 }
654
655 if (ret != pmsg->len)
656 DEB3(printk("iic_xfer: error or fail on read/write %d bytes.\n",ret));
657 else
658 DEB3(printk("iic_xfer: read/write %d bytes.\n",ret));
659
660 return ret;
661}
662
663
664/* Implements device specific ioctls. Higher level ioctls can
665 * be found in i2c-core.c and are typical of any i2c controller (specifying
666 * slave address, timeouts, etc). These ioctls take advantage of any hardware
667 * features built into the controller for which this algorithm-adapter set
668 * was written. These ioctls allow you to take control of the data and clock
669 * lines and set the either high or low,
670 * similar to a GPIO pin.
671 */
672static int algo_control(struct i2c_adapter *adapter,
673 unsigned int cmd, unsigned long arg)
674{
675
676 struct i2c_algo_iic_data *adap = adapter->algo_data;
677 struct i2c_iic_msg s_msg;
678 char *buf;
679 int ret;
680
681 if (cmd == I2C_SREAD) {
682 if(copy_from_user(&s_msg, (struct i2c_iic_msg *)arg,
683 sizeof(struct i2c_iic_msg)))
684 return -EFAULT;
685 buf = kmalloc(s_msg.len, GFP_KERNEL);
686 if (buf== NULL)
687 return -ENOMEM;
688
689 /* Flush FIFO */
690 iic_outw(adap, ITE_I2CFCR, ITE_I2CFCR_FLUSH);
691
692 /* Load address */
693 iic_outw(adap, ITE_I2CSAR,s_msg.addr<<1);
694 iic_outw(adap, ITE_I2CSSAR,s_msg.waddr & 0xff);
695
696 ret = iic_readbytes(adapter, buf, s_msg.len, 1);
697 if (ret>=0) {
698 if(copy_to_user( s_msg.buf, buf, s_msg.len) )
699 ret = -EFAULT;
700 }
701 kfree(buf);
702 }
703 return 0;
704}
705
706
707static u32 iic_func(struct i2c_adapter *adap)
708{
709 return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR |
710 I2C_FUNC_PROTOCOL_MANGLING;
711}
712
713/* -----exported algorithm data: ------------------------------------- */
714
715static struct i2c_algorithm iic_algo = {
716 .master_xfer = iic_xfer,
717 .algo_control = algo_control, /* ioctl */
718 .functionality = iic_func,
719};
720
721
722/*
723 * registering functions to load algorithms at runtime
724 */
725int i2c_iic_add_bus(struct i2c_adapter *adap)
726{
727 struct i2c_algo_iic_data *iic_adap = adap->algo_data;
728
729 if (iic_test) {
730 int ret = test_bus(iic_adap, adap->name);
731 if (ret<0)
732 return -ENODEV;
733 }
734
735 DEB2(printk("i2c-algo-ite: hw routines for %s registered.\n",
736 adap->name));
737
738 /* register new adapter to i2c module... */
739 adap->algo = &iic_algo;
740
741 adap->timeout = 100; /* default values, should */
742 adap->retries = 3; /* be replaced by defines */
743 adap->flags = 0;
744
745 iic_init(iic_adap);
746 return i2c_add_adapter(adap);
747}
748
749
750int i2c_iic_del_bus(struct i2c_adapter *adap)
751{
752 int res;
753 if ((res = i2c_del_adapter(adap)) < 0)
754 return res;
755 DEB2(printk("i2c-algo-ite: adapter unregistered: %s\n",adap->name));
756
757 return 0;
758}
759
760
761int __init i2c_algo_iic_init (void)
762{
763 printk(KERN_INFO "ITE iic (i2c) algorithm module\n");
764 return 0;
765}
766
767
768void i2c_algo_iic_exit(void)
769{
770 return;
771}
772
773
774EXPORT_SYMBOL(i2c_iic_add_bus);
775EXPORT_SYMBOL(i2c_iic_del_bus);
776
777/* The MODULE_* macros resolve to nothing if MODULES is not defined
778 * when this file is compiled.
779 */
780MODULE_AUTHOR("MontaVista Software <www.mvista.com>");
781MODULE_DESCRIPTION("ITE iic algorithm");
782MODULE_LICENSE("GPL");
783
784module_param(iic_test, bool, 0);
785module_param(i2c_debug, int, S_IRUGO | S_IWUSR);
786
787MODULE_PARM_DESC(iic_test, "Test if the I2C bus is available");
788MODULE_PARM_DESC(i2c_debug,
789 "debug level - 0 off; 1 normal; 2,3 more verbose; 9 iic-protocol");
790
791
792/* This function resolves to init_module (the function invoked when a module
793 * is loaded via insmod) when this file is compiled with MODULES defined.
794 * Otherwise (i.e. if you want this driver statically linked to the kernel),
795 * a pointer to this function is stored in a table and called
796 * during the initialization of the kernel (in do_basic_setup in /init/main.c)
797 *
798 * All this functionality is complements of the macros defined in linux/init.h
799 */
800module_init(i2c_algo_iic_init);
801
802
803/* If MODULES is defined when this file is compiled, then this function will
804 * resolved to cleanup_module.
805 */
806module_exit(i2c_algo_iic_exit);
diff --git a/drivers/i2c/algos/i2c-algo-ite.h b/drivers/i2c/algos/i2c-algo-ite.h
deleted file mode 100644
index a8ca3c9b546a..000000000000
--- a/drivers/i2c/algos/i2c-algo-ite.h
+++ /dev/null
@@ -1,117 +0,0 @@
1/*
2 --------------------------------------------------------------------
3 i2c-ite.h: Global defines for the I2C controller on board the
4 ITE MIPS processor.
5 --------------------------------------------------------------------
6 Hai-Pao Fan, MontaVista Software, Inc.
7 hpfan@mvista.com or source@mvista.com
8
9 Copyright 2001 MontaVista Software Inc.
10
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
19 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
22 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
23 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * You should have received a copy of the GNU General Public License along
28 * with this program; if not, write to the Free Software Foundation, Inc.,
29 * 675 Mass Ave, Cambridge, MA 02139, USA.
30
31 */
32
33#ifndef I2C_ITE_H
34#define I2C_ITE_H 1
35
36#include <asm/it8172/it8172.h>
37
38/* I2C Registers */
39#define ITE_I2CHCR IT8172_PCI_IO_BASE + IT_I2C_BASE + 0x30
40#define ITE_I2CHSR IT8172_PCI_IO_BASE + IT_I2C_BASE + 0x34
41#define ITE_I2CSAR IT8172_PCI_IO_BASE + IT_I2C_BASE + 0x38
42#define ITE_I2CSSAR IT8172_PCI_IO_BASE + IT_I2C_BASE + 0x3c
43#define ITE_I2CCKCNT IT8172_PCI_IO_BASE + IT_I2C_BASE + 0x48
44#define ITE_I2CSHDR IT8172_PCI_IO_BASE + IT_I2C_BASE + 0x4c
45#define ITE_I2CRSUR IT8172_PCI_IO_BASE + IT_I2C_BASE + 0x50
46#define ITE_I2CPSUR IT8172_PCI_IO_BASE + IT_I2C_BASE + 0x54
47
48#define ITE_I2CFDR IT8172_PCI_IO_BASE + IT_I2C_BASE + 0x70
49#define ITE_I2CFBCR IT8172_PCI_IO_BASE + IT_I2C_BASE + 0x74
50#define ITE_I2CFCR IT8172_PCI_IO_BASE + IT_I2C_BASE + 0x78
51#define ITE_I2CFSR IT8172_PCI_IO_BASE + IT_I2C_BASE + 0x7c
52
53
54/* Host Control Register ITE_I2CHCR */
55#define ITE_I2CHCR_HCE 0x01 /* Enable I2C Host Controller */
56#define ITE_I2CHCR_IE 0x02 /* Enable the interrupt after completing
57 the current transaction */
58#define ITE_I2CHCR_CP_W 0x00 /* bit2-4 000 - Write */
59#define ITE_I2CHCR_CP_R 0x08 /* 010 - Current address read */
60#define ITE_I2CHCR_CP_S 0x10 /* 100 - Sequential read */
61#define ITE_I2CHCR_ST 0x20 /* Initiates the I2C host controller to execute
62 the command and send the data programmed in
63 all required registers to I2C bus */
64#define ITE_CMD ITE_I2CHCR_HCE | ITE_I2CHCR_IE | ITE_I2CHCR_ST
65#define ITE_WRITE ITE_CMD | ITE_I2CHCR_CP_W
66#define ITE_READ ITE_CMD | ITE_I2CHCR_CP_R
67#define ITE_SREAD ITE_CMD | ITE_I2CHCR_CP_S
68
69/* Host Status Register ITE_I2CHSR */
70#define ITE_I2CHSR_DB 0x01 /* Device is busy, receives NACK response except
71 in the first and last bytes */
72#define ITE_I2CHSR_DNE 0x02 /* Target address on I2C bus does not exist */
73#define ITE_I2CHSR_TDI 0x04 /* R/W Transaction on I2C bus was completed */
74#define ITE_I2CHSR_HB 0x08 /* Host controller is processing transactions */
75#define ITE_I2CHSR_FER 0x10 /* Error occurs in the FIFO */
76
77/* Slave Address Register ITE_I2CSAR */
78#define ITE_I2CSAR_SA_MASK 0xfe /* Target I2C device address */
79#define ITE_I2CSAR_ASO 0x0100 /* Output 1/0 to I2CAS port when the
80 next slave address is addressed */
81
82/* Slave Sub-address Register ITE_I2CSSAR */
83#define ITE_I2CSSAR_SUBA_MASK 0xff /* Target I2C device sub-address */
84
85/* Clock Counter Register ITE_I2CCKCNT */
86#define ITE_I2CCKCNT_STOP 0x00 /* stop I2C clock */
87#define ITE_I2CCKCNT_HPCC_MASK 0x7f /* SCL high period counter */
88#define ITE_I2CCKCNT_LPCC_MASK 0x7f00 /* SCL low period counter */
89
90/* START Hold Time Register ITE_I2CSHDR */
91/* value is counted based on 16 MHz internal clock */
92#define ITE_I2CSHDR_FM 0x0a /* START condition at fast mode */
93#define ITE_I2CSHDR_SM 0x47 /* START contition at standard mode */
94
95/* (Repeated) START Setup Time Register ITE_I2CRSUR */
96/* value is counted based on 16 MHz internal clock */
97#define ITE_I2CRSUR_FM 0x0a /* repeated START condition at fast mode */
98#define ITE_I2CRSUR_SM 0x50 /* repeated START condition at standard mode */
99
100/* STOP setup Time Register ITE_I2CPSUR */
101
102/* FIFO Data Register ITE_I2CFDR */
103#define ITE_I2CFDR_MASK 0xff
104
105/* FIFO Byte Count Register ITE_I2CFBCR */
106#define ITE_I2CFBCR_MASK 0x3f
107
108/* FIFO Control Register ITE_I2CFCR */
109#define ITE_I2CFCR_FLUSH 0x01 /* Flush FIFO and reset the FIFO point
110 and I2CFSR */
111/* FIFO Status Register ITE_I2CFSR */
112#define ITE_I2CFSR_FO 0x01 /* FIFO is overrun when write */
113#define ITE_I2CFSR_FU 0x02 /* FIFO is underrun when read */
114#define ITE_I2CFSR_FF 0x04 /* FIFO is full when write */
115#define ITE_I2CFSR_FE 0x08 /* FIFO is empty when read */
116
117#endif /* I2C_ITE_H */
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 9081c9fbcd29..36fdf971f080 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -381,14 +381,7 @@ int i2c_pca_add_bus(struct i2c_adapter *adap)
381 381
382 return rval; 382 return rval;
383} 383}
384
385int i2c_pca_del_bus(struct i2c_adapter *adap)
386{
387 return i2c_del_adapter(adap);
388}
389
390EXPORT_SYMBOL(i2c_pca_add_bus); 384EXPORT_SYMBOL(i2c_pca_add_bus);
391EXPORT_SYMBOL(i2c_pca_del_bus);
392 385
393MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>"); 386MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>");
394MODULE_DESCRIPTION("I2C-Bus PCA9564 algorithm"); 387MODULE_DESCRIPTION("I2C-Bus PCA9564 algorithm");
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index 3b2003398966..ecb2c2d7d540 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -486,15 +486,7 @@ int i2c_pcf_add_bus(struct i2c_adapter *adap)
486 486
487 return rval; 487 return rval;
488} 488}
489
490
491int i2c_pcf_del_bus(struct i2c_adapter *adap)
492{
493 return i2c_del_adapter(adap);
494}
495
496EXPORT_SYMBOL(i2c_pcf_add_bus); 489EXPORT_SYMBOL(i2c_pcf_add_bus);
497EXPORT_SYMBOL(i2c_pcf_del_bus);
498 490
499MODULE_AUTHOR("Hans Berglund <hb@spacetec.no>"); 491MODULE_AUTHOR("Hans Berglund <hb@spacetec.no>");
500MODULE_DESCRIPTION("I2C-Bus PCF8584 algorithm"); 492MODULE_DESCRIPTION("I2C-Bus PCF8584 algorithm");
diff --git a/drivers/i2c/algos/i2c-algo-sgi.c b/drivers/i2c/algos/i2c-algo-sgi.c
index 490d99997fd0..ac2d5053078a 100644
--- a/drivers/i2c/algos/i2c-algo-sgi.c
+++ b/drivers/i2c/algos/i2c-algo-sgi.c
@@ -171,15 +171,7 @@ int i2c_sgi_add_bus(struct i2c_adapter *adap)
171 171
172 return i2c_add_adapter(adap); 172 return i2c_add_adapter(adap);
173} 173}
174
175
176int i2c_sgi_del_bus(struct i2c_adapter *adap)
177{
178 return i2c_del_adapter(adap);
179}
180
181EXPORT_SYMBOL(i2c_sgi_add_bus); 174EXPORT_SYMBOL(i2c_sgi_add_bus);
182EXPORT_SYMBOL(i2c_sgi_del_bus);
183 175
184MODULE_AUTHOR("Ladislav Michl <ladis@linux-mips.org>"); 176MODULE_AUTHOR("Ladislav Michl <ladis@linux-mips.org>");
185MODULE_DESCRIPTION("I2C-Bus SGI algorithm"); 177MODULE_DESCRIPTION("I2C-Bus SGI algorithm");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 90f91d039ee2..e1989f3a2684 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -74,6 +74,13 @@ config I2C_AMD8111
74 This driver can also be built as a module. If so, the module 74 This driver can also be built as a module. If so, the module
75 will be called i2c-amd8111. 75 will be called i2c-amd8111.
76 76
77config I2C_AT91
78 tristate "Atmel AT91 I2C Two-Wire interface (TWI)"
79 depends on I2C && ARCH_AT91 && EXPERIMENTAL
80 help
81 This supports the use of the I2C interface on Atmel AT91
82 processors.
83
77config I2C_AU1550 84config I2C_AU1550
78 tristate "Au1550/Au1200 SMBus interface" 85 tristate "Au1550/Au1200 SMBus interface"
79 depends on I2C && (SOC_AU1550 || SOC_AU1200) 86 depends on I2C && (SOC_AU1550 || SOC_AU1200)
@@ -209,18 +216,6 @@ config I2C_ISA
209 tristate 216 tristate
210 depends on I2C 217 depends on I2C
211 218
212config I2C_ITE
213 tristate "ITE I2C Adapter"
214 depends on I2C && MIPS_ITE8172
215 select I2C_ALGOITE
216 help
217 This supports the ITE8172 I2C peripheral found on some MIPS
218 systems. Say Y if you have one of these. You should also say Y for
219 the ITE I2C driver algorithm support above.
220
221 This support is also available as a module. If so, the module
222 will be called i2c-ite.
223
224config I2C_IXP4XX 219config I2C_IXP4XX
225 tristate "IXP4xx GPIO-Based I2C Interface" 220 tristate "IXP4xx GPIO-Based I2C Interface"
226 depends on I2C && ARCH_IXP4XX 221 depends on I2C && ARCH_IXP4XX
@@ -481,6 +476,17 @@ config I2C_STUB
481 476
482 If you don't know what to do here, definitely say N. 477 If you don't know what to do here, definitely say N.
483 478
479config I2C_VERSATILE
480 tristate "ARM Versatile/Realview I2C bus support"
481 depends on I2C && (ARCH_VERSATILE || ARCH_REALVIEW)
482 select I2C_ALGOBIT
483 help
484 Say yes if you want to support the I2C serial bus on ARMs Versatile
485 range of platforms.
486
487 This driver can also be built as a module. If so, the module
488 will be called i2c-versatile.
489
484config I2C_VIA 490config I2C_VIA
485 tristate "VIA 82C586B" 491 tristate "VIA 82C586B"
486 depends on I2C && PCI && EXPERIMENTAL 492 depends on I2C && PCI && EXPERIMENTAL
@@ -548,4 +554,23 @@ config I2C_MV64XXX
548 This driver can also be built as a module. If so, the module 554 This driver can also be built as a module. If so, the module
549 will be called i2c-mv64xxx. 555 will be called i2c-mv64xxx.
550 556
557config I2C_PNX
558 tristate "I2C bus support for Philips PNX targets"
559 depends on ARCH_PNX4008 && I2C
560 help
561 This driver supports the Philips IP3204 I2C IP block master and/or
562 slave controller
563
564 This driver can also be built as a module. If so, the module
565 will be called i2c-pnx.
566
567config I2C_PNX_EARLY
568 bool "Early initialization for I2C on PNXxxxx"
569 depends on I2C_PNX=y
570 help
571 Under certain circumstances one may need to make sure I2C on PNXxxxx
572 is initialized earlier than some other driver that depends on it
573 (for instance, that might be USB in case of PNX4008). With this
574 option turned on you can guarantee that.
575
551endmenu 576endmenu
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 493c87289b62..37196c1d0794 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_I2C_ALI15X3) += i2c-ali15x3.o
8obj-$(CONFIG_I2C_AMD756) += i2c-amd756.o 8obj-$(CONFIG_I2C_AMD756) += i2c-amd756.o
9obj-$(CONFIG_I2C_AMD756_S4882) += i2c-amd756-s4882.o 9obj-$(CONFIG_I2C_AMD756_S4882) += i2c-amd756-s4882.o
10obj-$(CONFIG_I2C_AMD8111) += i2c-amd8111.o 10obj-$(CONFIG_I2C_AMD8111) += i2c-amd8111.o
11obj-$(CONFIG_I2C_AT91) += i2c-at91.o
11obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o 12obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
12obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o 13obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
13obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o 14obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o
@@ -16,7 +17,6 @@ obj-$(CONFIG_I2C_I810) += i2c-i810.o
16obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o 17obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
17obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o 18obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
18obj-$(CONFIG_I2C_ISA) += i2c-isa.o 19obj-$(CONFIG_I2C_ISA) += i2c-isa.o
19obj-$(CONFIG_I2C_ITE) += i2c-ite.o
20obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o 20obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
21obj-$(CONFIG_I2C_IXP4XX) += i2c-ixp4xx.o 21obj-$(CONFIG_I2C_IXP4XX) += i2c-ixp4xx.o
22obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o 22obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
@@ -29,6 +29,7 @@ obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
29obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o 29obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o
30obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o 30obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
31obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o 31obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o
32obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
32obj-$(CONFIG_I2C_PROSAVAGE) += i2c-prosavage.o 33obj-$(CONFIG_I2C_PROSAVAGE) += i2c-prosavage.o
33obj-$(CONFIG_I2C_PXA) += i2c-pxa.o 34obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
34obj-$(CONFIG_I2C_RPXLITE) += i2c-rpx.o 35obj-$(CONFIG_I2C_RPXLITE) += i2c-rpx.o
@@ -39,6 +40,7 @@ obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o
39obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o 40obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o
40obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o 41obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o
41obj-$(CONFIG_I2C_STUB) += i2c-stub.o 42obj-$(CONFIG_I2C_STUB) += i2c-stub.o
43obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
42obj-$(CONFIG_I2C_VIA) += i2c-via.o 44obj-$(CONFIG_I2C_VIA) += i2c-via.o
43obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o 45obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o
44obj-$(CONFIG_I2C_VOODOO3) += i2c-voodoo3.o 46obj-$(CONFIG_I2C_VOODOO3) += i2c-voodoo3.o
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
new file mode 100644
index 000000000000..67f91bdda089
--- /dev/null
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -0,0 +1,325 @@
1/*
2 i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
3
4 Copyright (C) 2004 Rick Bronson
5 Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
6
7 Borrowed heavily from original work by:
8 Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14*/
15
16#include <linux/module.h>
17#include <linux/version.h>
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/pci.h>
21#include <linux/types.h>
22#include <linux/delay.h>
23#include <linux/i2c.h>
24#include <linux/init.h>
25#include <linux/clk.h>
26#include <linux/platform_device.h>
27
28#include <asm/io.h>
29
30#include <asm/arch/at91_twi.h>
31#include <asm/arch/board.h>
32#include <asm/arch/cpu.h>
33
34#define TWI_CLOCK 100000 /* Hz. max 400 Kbits/sec */
35
36
37static struct clk *twi_clk;
38static void __iomem *twi_base;
39
40#define at91_twi_read(reg) __raw_readl(twi_base + (reg))
41#define at91_twi_write(reg, val) __raw_writel((val), twi_base + (reg))
42
43
44/*
45 * Initialize the TWI hardware registers.
46 */
47static void __devinit at91_twi_hwinit(void)
48{
49 unsigned long cdiv, ckdiv;
50
51 at91_twi_write(AT91_TWI_IDR, 0xffffffff); /* Disable all interrupts */
52 at91_twi_write(AT91_TWI_CR, AT91_TWI_SWRST); /* Reset peripheral */
53 at91_twi_write(AT91_TWI_CR, AT91_TWI_MSEN); /* Set Master mode */
54
55 /* Calcuate clock dividers */
56 cdiv = (clk_get_rate(twi_clk) / (2 * TWI_CLOCK)) - 3;
57 cdiv = cdiv + 1; /* round up */
58 ckdiv = 0;
59 while (cdiv > 255) {
60 ckdiv++;
61 cdiv = cdiv >> 1;
62 }
63
64 if (cpu_is_at91rm9200()) { /* AT91RM9200 Errata #22 */
65 if (ckdiv > 5) {
66 printk(KERN_ERR "AT91 I2C: Invalid TWI_CLOCK value!\n");
67 ckdiv = 5;
68 }
69 }
70
71 at91_twi_write(AT91_TWI_CWGR, (ckdiv << 16) | (cdiv << 8) | cdiv);
72}
73
74/*
75 * Poll the i2c status register until the specified bit is set.
76 * Returns 0 if timed out (100 msec).
77 */
78static short at91_poll_status(unsigned long bit)
79{
80 int loop_cntr = 10000;
81
82 do {
83 udelay(10);
84 } while (!(at91_twi_read(AT91_TWI_SR) & bit) && (--loop_cntr > 0));
85
86 return (loop_cntr > 0);
87}
88
89static int xfer_read(struct i2c_adapter *adap, unsigned char *buf, int length)
90{
91 /* Send Start */
92 at91_twi_write(AT91_TWI_CR, AT91_TWI_START);
93
94 /* Read data */
95 while (length--) {
96 if (!length) /* need to send Stop before reading last byte */
97 at91_twi_write(AT91_TWI_CR, AT91_TWI_STOP);
98 if (!at91_poll_status(AT91_TWI_RXRDY)) {
99 dev_dbg(&adap->dev, "RXRDY timeout\n");
100 return -ETIMEDOUT;
101 }
102 *buf++ = (at91_twi_read(AT91_TWI_RHR) & 0xff);
103 }
104
105 return 0;
106}
107
108static int xfer_write(struct i2c_adapter *adap, unsigned char *buf, int length)
109{
110 /* Load first byte into transmitter */
111 at91_twi_write(AT91_TWI_THR, *buf++);
112
113 /* Send Start */
114 at91_twi_write(AT91_TWI_CR, AT91_TWI_START);
115
116 do {
117 if (!at91_poll_status(AT91_TWI_TXRDY)) {
118 dev_dbg(&adap->dev, "TXRDY timeout\n");
119 return -ETIMEDOUT;
120 }
121
122 length--; /* byte was transmitted */
123
124 if (length > 0) /* more data to send? */
125 at91_twi_write(AT91_TWI_THR, *buf++);
126 } while (length);
127
128 /* Send Stop */
129 at91_twi_write(AT91_TWI_CR, AT91_TWI_STOP);
130
131 return 0;
132}
133
134/*
135 * Generic i2c master transfer entrypoint.
136 *
137 * Note: We do not use Atmel's feature of storing the "internal device address".
138 * Instead the "internal device address" has to be written using a seperate
139 * i2c message.
140 * http://lists.arm.linux.org.uk/pipermail/linux-arm-kernel/2004-September/024411.html
141 */
142static int at91_xfer(struct i2c_adapter *adap, struct i2c_msg *pmsg, int num)
143{
144 int i, ret;
145
146 dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
147
148 for (i = 0; i < num; i++) {
149 dev_dbg(&adap->dev, " #%d: %sing %d byte%s %s 0x%02x\n", i,
150 pmsg->flags & I2C_M_RD ? "read" : "writ",
151 pmsg->len, pmsg->len > 1 ? "s" : "",
152 pmsg->flags & I2C_M_RD ? "from" : "to", pmsg->addr);
153
154 at91_twi_write(AT91_TWI_MMR, (pmsg->addr << 16)
155 | ((pmsg->flags & I2C_M_RD) ? AT91_TWI_MREAD : 0));
156
157 if (pmsg->len && pmsg->buf) { /* sanity check */
158 if (pmsg->flags & I2C_M_RD)
159 ret = xfer_read(adap, pmsg->buf, pmsg->len);
160 else
161 ret = xfer_write(adap, pmsg->buf, pmsg->len);
162
163 if (ret)
164 return ret;
165
166 /* Wait until transfer is finished */
167 if (!at91_poll_status(AT91_TWI_TXCOMP)) {
168 dev_dbg(&adap->dev, "TXCOMP timeout\n");
169 return -ETIMEDOUT;
170 }
171 }
172 dev_dbg(&adap->dev, "transfer complete\n");
173 pmsg++; /* next message */
174 }
175 return i;
176}
177
178/*
179 * Return list of supported functionality.
180 */
181static u32 at91_func(struct i2c_adapter *adapter)
182{
183 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
184}
185
186static struct i2c_algorithm at91_algorithm = {
187 .master_xfer = at91_xfer,
188 .functionality = at91_func,
189};
190
191/*
192 * Main initialization routine.
193 */
194static int __devinit at91_i2c_probe(struct platform_device *pdev)
195{
196 struct i2c_adapter *adapter;
197 struct resource *res;
198 int rc;
199
200 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
201 if (!res)
202 return -ENXIO;
203
204 if (!request_mem_region(res->start, res->end - res->start + 1, "at91_i2c"))
205 return -EBUSY;
206
207 twi_base = ioremap(res->start, res->end - res->start + 1);
208 if (!twi_base) {
209 rc = -ENOMEM;
210 goto fail0;
211 }
212
213 twi_clk = clk_get(NULL, "twi_clk");
214 if (IS_ERR(twi_clk)) {
215 dev_err(&pdev->dev, "no clock defined\n");
216 rc = -ENODEV;
217 goto fail1;
218 }
219
220 adapter = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
221 if (adapter == NULL) {
222 dev_err(&pdev->dev, "can't allocate inteface!\n");
223 rc = -ENOMEM;
224 goto fail2;
225 }
226 sprintf(adapter->name, "AT91");
227 adapter->algo = &at91_algorithm;
228 adapter->class = I2C_CLASS_HWMON;
229 adapter->dev.parent = &pdev->dev;
230
231 platform_set_drvdata(pdev, adapter);
232
233 clk_enable(twi_clk); /* enable peripheral clock */
234 at91_twi_hwinit(); /* initialize TWI controller */
235
236 rc = i2c_add_adapter(adapter);
237 if (rc) {
238 dev_err(&pdev->dev, "Adapter %s registration failed\n",
239 adapter->name);
240 goto fail3;
241 }
242
243 dev_info(&pdev->dev, "AT91 i2c bus driver.\n");
244 return 0;
245
246fail3:
247 platform_set_drvdata(pdev, NULL);
248 kfree(adapter);
249 clk_disable(twi_clk);
250fail2:
251 clk_put(twi_clk);
252fail1:
253 iounmap(twi_base);
254fail0:
255 release_mem_region(res->start, res->end - res->start + 1);
256
257 return rc;
258}
259
260static int __devexit at91_i2c_remove(struct platform_device *pdev)
261{
262 struct i2c_adapter *adapter = platform_get_drvdata(pdev);
263 struct resource *res;
264 int rc;
265
266 rc = i2c_del_adapter(adapter);
267 platform_set_drvdata(pdev, NULL);
268
269 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
270 iounmap(twi_base);
271 release_mem_region(res->start, res->end - res->start + 1);
272
273 clk_disable(twi_clk); /* disable peripheral clock */
274 clk_put(twi_clk);
275
276 return rc;
277}
278
279#ifdef CONFIG_PM
280
281/* NOTE: could save a few mA by keeping clock off outside of at91_xfer... */
282
283static int at91_i2c_suspend(struct platform_device *pdev, pm_message_t mesg)
284{
285 clk_disable(twi_clk);
286 return 0;
287}
288
289static int at91_i2c_resume(struct platform_device *pdev)
290{
291 return clk_enable(twi_clk);
292}
293
294#else
295#define at91_i2c_suspend NULL
296#define at91_i2c_resume NULL
297#endif
298
299static struct platform_driver at91_i2c_driver = {
300 .probe = at91_i2c_probe,
301 .remove = __devexit_p(at91_i2c_remove),
302 .suspend = at91_i2c_suspend,
303 .resume = at91_i2c_resume,
304 .driver = {
305 .name = "at91_i2c",
306 .owner = THIS_MODULE,
307 },
308};
309
310static int __init at91_i2c_init(void)
311{
312 return platform_driver_register(&at91_i2c_driver);
313}
314
315static void __exit at91_i2c_exit(void)
316{
317 platform_driver_unregister(&at91_i2c_driver);
318}
319
320module_init(at91_i2c_init);
321module_exit(at91_i2c_exit);
322
323MODULE_AUTHOR("Rick Bronson");
324MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
325MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index a591fe685f06..834967464814 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -293,7 +293,7 @@ static int __init i2c_pcfisa_init(void)
293 293
294static void i2c_pcfisa_exit(void) 294static void i2c_pcfisa_exit(void)
295{ 295{
296 i2c_pcf_del_bus(&pcf_isa_ops); 296 i2c_del_adapter(&pcf_isa_ops);
297 297
298 if (irq > 0) { 298 if (irq > 0) {
299 disable_irq(irq); 299 disable_irq(irq);
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index 457d48a0ab9d..9832f773651d 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -146,7 +146,7 @@ static int __devinit hydra_probe(struct pci_dev *dev,
146static void __devexit hydra_remove(struct pci_dev *dev) 146static void __devexit hydra_remove(struct pci_dev *dev)
147{ 147{
148 pdregw(hydra_bit_data.data, 0); /* clear SCLK_OE and SDAT_OE */ 148 pdregw(hydra_bit_data.data, 0); /* clear SCLK_OE and SDAT_OE */
149 i2c_bit_del_bus(&hydra_adap); 149 i2c_del_adapter(&hydra_adap);
150 iounmap(hydra_bit_data.data); 150 iounmap(hydra_bit_data.data);
151 release_mem_region(pci_resource_start(dev, 0)+ 151 release_mem_region(pci_resource_start(dev, 0)+
152 offsetof(struct Hydra, CachePD), 4); 152 offsetof(struct Hydra, CachePD), 4);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c7be2fdbd86b..ae625b854470 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -470,12 +470,20 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
470 int err; 470 int err;
471 471
472 I801_dev = dev; 472 I801_dev = dev;
473 if ((dev->device == PCI_DEVICE_ID_INTEL_82801DB_3) || 473 switch (dev->device) {
474 (dev->device == PCI_DEVICE_ID_INTEL_82801EB_3) || 474 case PCI_DEVICE_ID_INTEL_82801DB_3:
475 (dev->device == PCI_DEVICE_ID_INTEL_ESB_4)) 475 case PCI_DEVICE_ID_INTEL_82801EB_3:
476 case PCI_DEVICE_ID_INTEL_ESB_4:
477 case PCI_DEVICE_ID_INTEL_ICH6_16:
478 case PCI_DEVICE_ID_INTEL_ICH7_17:
479 case PCI_DEVICE_ID_INTEL_ESB2_17:
480 case PCI_DEVICE_ID_INTEL_ICH8_5:
481 case PCI_DEVICE_ID_INTEL_ICH9_6:
476 isich4 = 1; 482 isich4 = 1;
477 else 483 break;
484 default:
478 isich4 = 0; 485 isich4 = 0;
486 }
479 487
480 err = pci_enable_device(dev); 488 err = pci_enable_device(dev);
481 if (err) { 489 if (err) {
diff --git a/drivers/i2c/busses/i2c-i810.c b/drivers/i2c/busses/i2c-i810.c
index b66fb6bb1870..10c98bc88aa6 100644
--- a/drivers/i2c/busses/i2c-i810.c
+++ b/drivers/i2c/busses/i2c-i810.c
@@ -219,14 +219,14 @@ static int __devinit i810_probe(struct pci_dev *dev, const struct pci_device_id
219 return retval; 219 return retval;
220 retval = i2c_bit_add_bus(&i810_ddc_adapter); 220 retval = i2c_bit_add_bus(&i810_ddc_adapter);
221 if (retval) 221 if (retval)
222 i2c_bit_del_bus(&i810_i2c_adapter); 222 i2c_del_adapter(&i810_i2c_adapter);
223 return retval; 223 return retval;
224} 224}
225 225
226static void __devexit i810_remove(struct pci_dev *dev) 226static void __devexit i810_remove(struct pci_dev *dev)
227{ 227{
228 i2c_bit_del_bus(&i810_ddc_adapter); 228 i2c_del_adapter(&i810_ddc_adapter);
229 i2c_bit_del_bus(&i810_i2c_adapter); 229 i2c_del_adapter(&i810_i2c_adapter);
230 iounmap(ioaddr); 230 iounmap(ioaddr);
231} 231}
232 232
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 781a99c1647a..1898e9987021 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -680,6 +680,12 @@ static int __devinit iic_probe(struct ocp_device *ocp){
680 dev->idx = ocp->def->index; 680 dev->idx = ocp->def->index;
681 ocp_set_drvdata(ocp, dev); 681 ocp_set_drvdata(ocp, dev);
682 682
683 if (!request_mem_region(ocp->def->paddr, sizeof(struct iic_regs),
684 "ibm_iic")) {
685 ret = -EBUSY;
686 goto fail1;
687 }
688
683 if (!(dev->vaddr = ioremap(ocp->def->paddr, sizeof(struct iic_regs)))){ 689 if (!(dev->vaddr = ioremap(ocp->def->paddr, sizeof(struct iic_regs)))){
684 printk(KERN_CRIT "ibm-iic%d: failed to ioremap device registers\n", 690 printk(KERN_CRIT "ibm-iic%d: failed to ioremap device registers\n",
685 dev->idx); 691 dev->idx);
@@ -750,6 +756,8 @@ fail:
750 756
751 iounmap(dev->vaddr); 757 iounmap(dev->vaddr);
752fail2: 758fail2:
759 release_mem_region(ocp->def->paddr, sizeof(struct iic_regs));
760fail1:
753 ocp_set_drvdata(ocp, NULL); 761 ocp_set_drvdata(ocp, NULL);
754 kfree(dev); 762 kfree(dev);
755 return ret; 763 return ret;
@@ -777,6 +785,7 @@ static void __devexit iic_remove(struct ocp_device *ocp)
777 free_irq(dev->irq, dev); 785 free_irq(dev->irq, dev);
778 } 786 }
779 iounmap(dev->vaddr); 787 iounmap(dev->vaddr);
788 release_mem_region(ocp->def->paddr, sizeof(struct iic_regs));
780 kfree(dev); 789 kfree(dev);
781 } 790 }
782} 791}
diff --git a/drivers/i2c/busses/i2c-ite.c b/drivers/i2c/busses/i2c-ite.c
deleted file mode 100644
index f7d71869b3b9..000000000000
--- a/drivers/i2c/busses/i2c-ite.c
+++ /dev/null
@@ -1,278 +0,0 @@
1/*
2 -------------------------------------------------------------------------
3 i2c-adap-ite.c i2c-hw access for the IIC peripheral on the ITE MIPS system
4 -------------------------------------------------------------------------
5 Hai-Pao Fan, MontaVista Software, Inc.
6 hpfan@mvista.com or source@mvista.com
7
8 Copyright 2001 MontaVista Software Inc.
9
10 ----------------------------------------------------------------------------
11 This file was highly leveraged from i2c-elektor.c, which was created
12 by Simon G. Vogl and Hans Berglund:
13
14
15 Copyright (C) 1995-97 Simon G. Vogl
16 1998-99 Hans Berglund
17
18 This program is free software; you can redistribute it and/or modify
19 it under the terms of the GNU General Public License as published by
20 the Free Software Foundation; either version 2 of the License, or
21 (at your option) any later version.
22
23 This program is distributed in the hope that it will be useful,
24 but WITHOUT ANY WARRANTY; without even the implied warranty of
25 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 GNU General Public License for more details.
27
28 You should have received a copy of the GNU General Public License
29 along with this program; if not, write to the Free Software
30 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
31/* ------------------------------------------------------------------------- */
32
33/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
34 Frodo Looijaard <frodol@dds.nl> */
35
36#include <linux/kernel.h>
37#include <linux/ioport.h>
38#include <linux/module.h>
39#include <linux/delay.h>
40#include <linux/slab.h>
41#include <linux/init.h>
42#include <linux/wait.h>
43#include <asm/irq.h>
44#include <asm/io.h>
45
46#include <linux/i2c.h>
47#include <linux/i2c-algo-ite.h>
48#include <linux/i2c-adap-ite.h>
49#include "../i2c-ite.h"
50
51#define DEFAULT_BASE 0x14014030
52#define ITE_IIC_IO_SIZE 0x40
53#define DEFAULT_IRQ 0
54#define DEFAULT_CLOCK 0x1b0e /* default 16MHz/(27+14) = 400KHz */
55#define DEFAULT_OWN 0x55
56
57static int base;
58static int irq;
59static int clock;
60static int own;
61
62static struct iic_ite gpi;
63static wait_queue_head_t iic_wait;
64static int iic_pending;
65static spinlock_t lock;
66
67/* ----- local functions ---------------------------------------------- */
68
69static void iic_ite_setiic(void *data, int ctl, short val)
70{
71 unsigned long j = jiffies + 10;
72
73 pr_debug(" Write 0x%02x to 0x%x\n",(unsigned short)val, ctl&0xff);
74#ifdef DEBUG
75 while (time_before(jiffies, j))
76 schedule();
77#endif
78 outw(val,ctl);
79}
80
81static short iic_ite_getiic(void *data, int ctl)
82{
83 short val;
84
85 val = inw(ctl);
86 pr_debug("Read 0x%02x from 0x%x\n",(unsigned short)val, ctl&0xff);
87 return (val);
88}
89
90/* Return our slave address. This is the address
91 * put on the I2C bus when another master on the bus wants to address us
92 * as a slave
93 */
94static int iic_ite_getown(void *data)
95{
96 return (gpi.iic_own);
97}
98
99
100static int iic_ite_getclock(void *data)
101{
102 return (gpi.iic_clock);
103}
104
105
106/* Put this process to sleep. We will wake up when the
107 * IIC controller interrupts.
108 */
109static void iic_ite_waitforpin(void) {
110 DEFINE_WAIT(wait);
111 int timeout = 2;
112 unsigned long flags;
113
114 /* If interrupts are enabled (which they are), then put the process to
115 * sleep. This process will be awakened by two events -- either the
116 * the IIC peripheral interrupts or the timeout expires.
117 * If interrupts are not enabled then delay for a reasonable amount
118 * of time and return.
119 */
120 if (gpi.iic_irq > 0) {
121 spin_lock_irqsave(&lock, flags);
122 if (iic_pending == 0) {
123 spin_unlock_irqrestore(&lock, flags);
124 prepare_to_wait(&iic_wait, &wait, TASK_INTERRUPTIBLE);
125 if (schedule_timeout(timeout*HZ)) {
126 spin_lock_irqsave(&lock, flags);
127 if (iic_pending == 1) {
128 iic_pending = 0;
129 }
130 spin_unlock_irqrestore(&lock, flags);
131 }
132 finish_wait(&iic_wait, &wait);
133 } else {
134 iic_pending = 0;
135 spin_unlock_irqrestore(&lock, flags);
136 }
137 } else {
138 udelay(100);
139 }
140}
141
142
143static irqreturn_t iic_ite_handler(int this_irq, void *dev_id)
144{
145 spin_lock(&lock);
146 iic_pending = 1;
147 spin_unlock(&lock);
148
149 wake_up_interruptible(&iic_wait);
150
151 return IRQ_HANDLED;
152}
153
154
155/* Lock the region of memory where I/O registers exist. Request our
156 * interrupt line and register its associated handler.
157 */
158static int iic_hw_resrc_init(void)
159{
160 if (!request_region(gpi.iic_base, ITE_IIC_IO_SIZE, "i2c"))
161 return -ENODEV;
162
163 if (gpi.iic_irq <= 0)
164 return 0;
165
166 if (request_irq(gpi.iic_irq, iic_ite_handler, 0, "ITE IIC", 0) < 0)
167 gpi.iic_irq = 0;
168 else
169 enable_irq(gpi.iic_irq);
170
171 return 0;
172}
173
174
175static void iic_ite_release(void)
176{
177 if (gpi.iic_irq > 0) {
178 disable_irq(gpi.iic_irq);
179 free_irq(gpi.iic_irq, 0);
180 }
181 release_region(gpi.iic_base , 2);
182}
183
184/* ------------------------------------------------------------------------
185 * Encapsulate the above functions in the correct operations structure.
186 * This is only done when more than one hardware adapter is supported.
187 */
188static struct i2c_algo_iic_data iic_ite_data = {
189 NULL,
190 iic_ite_setiic,
191 iic_ite_getiic,
192 iic_ite_getown,
193 iic_ite_getclock,
194 iic_ite_waitforpin,
195 80, 80, 100, /* waits, timeout */
196};
197
198static struct i2c_adapter iic_ite_ops = {
199 .owner = THIS_MODULE,
200 .id = I2C_HW_I_IIC,
201 .algo_data = &iic_ite_data,
202 .name = "ITE IIC adapter",
203};
204
205/* Called when the module is loaded. This function starts the
206 * cascade of calls up through the hierarchy of i2c modules (i.e. up to the
207 * algorithm layer and into to the core layer)
208 */
209static int __init iic_ite_init(void)
210{
211
212 struct iic_ite *piic = &gpi;
213
214 printk(KERN_INFO "Initialize ITE IIC adapter module\n");
215 if (base == 0)
216 piic->iic_base = DEFAULT_BASE;
217 else
218 piic->iic_base = base;
219
220 if (irq == 0)
221 piic->iic_irq = DEFAULT_IRQ;
222 else
223 piic->iic_irq = irq;
224
225 if (clock == 0)
226 piic->iic_clock = DEFAULT_CLOCK;
227 else
228 piic->iic_clock = clock;
229
230 if (own == 0)
231 piic->iic_own = DEFAULT_OWN;
232 else
233 piic->iic_own = own;
234
235 iic_ite_data.data = (void *)piic;
236 init_waitqueue_head(&iic_wait);
237 spin_lock_init(&lock);
238 if (iic_hw_resrc_init() == 0) {
239 if (i2c_iic_add_bus(&iic_ite_ops) < 0)
240 return -ENODEV;
241 } else {
242 return -ENODEV;
243 }
244 printk(KERN_INFO " found device at %#x irq %d.\n",
245 piic->iic_base, piic->iic_irq);
246 return 0;
247}
248
249
250static void iic_ite_exit(void)
251{
252 i2c_iic_del_bus(&iic_ite_ops);
253 iic_ite_release();
254}
255
256/* If modules is NOT defined when this file is compiled, then the MODULE_*
257 * macros will resolve to nothing
258 */
259MODULE_AUTHOR("MontaVista Software <www.mvista.com>");
260MODULE_DESCRIPTION("I2C-Bus adapter routines for ITE IIC bus adapter");
261MODULE_LICENSE("GPL");
262
263module_param(base, int, 0);
264module_param(irq, int, 0);
265module_param(clock, int, 0);
266module_param(own, int, 0);
267
268
269/* Called when module is loaded or when kernel is initialized.
270 * If MODULES is defined when this file is compiled, then this function will
271 * resolve to init_module (the function called when insmod is invoked for a
272 * module). Otherwise, this function is called early in the boot, when the
273 * kernel is intialized. Check out /include/init.h to see how this works.
274 */
275module_init(iic_ite_init);
276
277/* Resolves to module_cleanup when MODULES is defined. */
278module_exit(iic_ite_exit);
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
index dd3f4cd3aa68..efa3ecc5522a 100644
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ b/drivers/i2c/busses/i2c-ixp2000.c
@@ -90,7 +90,7 @@ static int ixp2000_i2c_remove(struct platform_device *plat_dev)
90 90
91 platform_set_drvdata(plat_dev, NULL); 91 platform_set_drvdata(plat_dev, NULL);
92 92
93 i2c_bit_del_bus(&drv_data->adapter); 93 i2c_del_adapter(&drv_data->adapter);
94 94
95 kfree(drv_data); 95 kfree(drv_data);
96 96
diff --git a/drivers/i2c/busses/i2c-ixp4xx.c b/drivers/i2c/busses/i2c-ixp4xx.c
index 68fe863f9d54..08e89b83984a 100644
--- a/drivers/i2c/busses/i2c-ixp4xx.c
+++ b/drivers/i2c/busses/i2c-ixp4xx.c
@@ -91,7 +91,7 @@ static int ixp4xx_i2c_remove(struct platform_device *plat_dev)
91 91
92 platform_set_drvdata(plat_dev, NULL); 92 platform_set_drvdata(plat_dev, NULL);
93 93
94 i2c_bit_del_bus(&drv_data->adapter); 94 i2c_del_adapter(&drv_data->adapter);
95 95
96 kfree(drv_data); 96 kfree(drv_data);
97 97
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index e0292e414ab2..ad37c10e7fec 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -35,7 +35,7 @@
35 nForce4 MCP55 0368 35 nForce4 MCP55 0368
36 36
37 This driver supports the 2 SMBuses that are included in the MCP of the 37 This driver supports the 2 SMBuses that are included in the MCP of the
38 nForce2/3/4 chipsets. 38 nForce2/3/4/5xx chipsets.
39*/ 39*/
40 40
41/* Note: we assume there can only be one nForce2, with two SMBus interfaces */ 41/* Note: we assume there can only be one nForce2, with two SMBus interfaces */
@@ -52,8 +52,8 @@
52#include <asm/io.h> 52#include <asm/io.h>
53 53
54MODULE_LICENSE("GPL"); 54MODULE_LICENSE("GPL");
55MODULE_AUTHOR ("Hans-Frieder Vogt <hfvogt@arcor.de>"); 55MODULE_AUTHOR ("Hans-Frieder Vogt <hfvogt@gmx.net>");
56MODULE_DESCRIPTION("nForce2 SMBus driver"); 56MODULE_DESCRIPTION("nForce2/3/4/5xx SMBus driver");
57 57
58 58
59struct nforce2_smbus { 59struct nforce2_smbus {
@@ -80,9 +80,6 @@ struct nforce2_smbus {
80#define NVIDIA_SMB_ADDR (smbus->base + 0x02) /* address */ 80#define NVIDIA_SMB_ADDR (smbus->base + 0x02) /* address */
81#define NVIDIA_SMB_CMD (smbus->base + 0x03) /* command */ 81#define NVIDIA_SMB_CMD (smbus->base + 0x03) /* command */
82#define NVIDIA_SMB_DATA (smbus->base + 0x04) /* 32 data registers */ 82#define NVIDIA_SMB_DATA (smbus->base + 0x04) /* 32 data registers */
83#define NVIDIA_SMB_BCNT (smbus->base + 0x24) /* number of data bytes */
84#define NVIDIA_SMB_ALRM_A (smbus->base + 0x25) /* alarm address */
85#define NVIDIA_SMB_ALRM_D (smbus->base + 0x26) /* 2 bytes alarm data */
86 83
87#define NVIDIA_SMB_STS_DONE 0x80 84#define NVIDIA_SMB_STS_DONE 0x80
88#define NVIDIA_SMB_STS_ALRM 0x40 85#define NVIDIA_SMB_STS_ALRM 0x40
@@ -95,40 +92,17 @@ struct nforce2_smbus {
95#define NVIDIA_SMB_PRTCL_BYTE 0x04 92#define NVIDIA_SMB_PRTCL_BYTE 0x04
96#define NVIDIA_SMB_PRTCL_BYTE_DATA 0x06 93#define NVIDIA_SMB_PRTCL_BYTE_DATA 0x06
97#define NVIDIA_SMB_PRTCL_WORD_DATA 0x08 94#define NVIDIA_SMB_PRTCL_WORD_DATA 0x08
98#define NVIDIA_SMB_PRTCL_BLOCK_DATA 0x0a
99#define NVIDIA_SMB_PRTCL_PROC_CALL 0x0c
100#define NVIDIA_SMB_PRTCL_BLOCK_PROC_CALL 0x0d
101#define NVIDIA_SMB_PRTCL_I2C_BLOCK_DATA 0x4a
102#define NVIDIA_SMB_PRTCL_PEC 0x80 95#define NVIDIA_SMB_PRTCL_PEC 0x80
103 96
104static struct pci_driver nforce2_driver; 97static struct pci_driver nforce2_driver;
105 98
106static s32 nforce2_access(struct i2c_adapter *adap, u16 addr, 99/* Return -1 on error */
107 unsigned short flags, char read_write,
108 u8 command, int size, union i2c_smbus_data *data);
109static u32 nforce2_func(struct i2c_adapter *adapter);
110
111
112static const struct i2c_algorithm smbus_algorithm = {
113 .smbus_xfer = nforce2_access,
114 .functionality = nforce2_func,
115};
116
117static struct i2c_adapter nforce2_adapter = {
118 .owner = THIS_MODULE,
119 .class = I2C_CLASS_HWMON,
120 .algo = &smbus_algorithm,
121};
122
123/* Return -1 on error. See smbus.h for more information */
124static s32 nforce2_access(struct i2c_adapter * adap, u16 addr, 100static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
125 unsigned short flags, char read_write, 101 unsigned short flags, char read_write,
126 u8 command, int size, union i2c_smbus_data * data) 102 u8 command, int size, union i2c_smbus_data * data)
127{ 103{
128 struct nforce2_smbus *smbus = adap->algo_data; 104 struct nforce2_smbus *smbus = adap->algo_data;
129 unsigned char protocol, pec, temp; 105 unsigned char protocol, pec, temp;
130 unsigned char len = 0; /* to keep the compiler quiet */
131 int i;
132 106
133 protocol = (read_write == I2C_SMBUS_READ) ? NVIDIA_SMB_PRTCL_READ : 107 protocol = (read_write == I2C_SMBUS_READ) ? NVIDIA_SMB_PRTCL_READ :
134 NVIDIA_SMB_PRTCL_WRITE; 108 NVIDIA_SMB_PRTCL_WRITE;
@@ -163,35 +137,6 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
163 protocol |= NVIDIA_SMB_PRTCL_WORD_DATA | pec; 137 protocol |= NVIDIA_SMB_PRTCL_WORD_DATA | pec;
164 break; 138 break;
165 139
166 case I2C_SMBUS_BLOCK_DATA:
167 outb_p(command, NVIDIA_SMB_CMD);
168 if (read_write == I2C_SMBUS_WRITE) {
169 len = min_t(u8, data->block[0], 32);
170 outb_p(len, NVIDIA_SMB_BCNT);
171 for (i = 0; i < len; i++)
172 outb_p(data->block[i + 1], NVIDIA_SMB_DATA+i);
173 }
174 protocol |= NVIDIA_SMB_PRTCL_BLOCK_DATA | pec;
175 break;
176
177 case I2C_SMBUS_I2C_BLOCK_DATA:
178 len = min_t(u8, data->block[0], 32);
179 outb_p(command, NVIDIA_SMB_CMD);
180 outb_p(len, NVIDIA_SMB_BCNT);
181 if (read_write == I2C_SMBUS_WRITE)
182 for (i = 0; i < len; i++)
183 outb_p(data->block[i + 1], NVIDIA_SMB_DATA+i);
184 protocol |= NVIDIA_SMB_PRTCL_I2C_BLOCK_DATA;
185 break;
186
187 case I2C_SMBUS_PROC_CALL:
188 dev_err(&adap->dev, "I2C_SMBUS_PROC_CALL not supported!\n");
189 return -1;
190
191 case I2C_SMBUS_BLOCK_PROC_CALL:
192 dev_err(&adap->dev, "I2C_SMBUS_BLOCK_PROC_CALL not supported!\n");
193 return -1;
194
195 default: 140 default:
196 dev_err(&adap->dev, "Unsupported transaction %d\n", size); 141 dev_err(&adap->dev, "Unsupported transaction %d\n", size);
197 return -1; 142 return -1;
@@ -227,19 +172,8 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
227 break; 172 break;
228 173
229 case I2C_SMBUS_WORD_DATA: 174 case I2C_SMBUS_WORD_DATA:
230 /* case I2C_SMBUS_PROC_CALL: not supported */
231 data->word = inb_p(NVIDIA_SMB_DATA) | (inb_p(NVIDIA_SMB_DATA+1) << 8); 175 data->word = inb_p(NVIDIA_SMB_DATA) | (inb_p(NVIDIA_SMB_DATA+1) << 8);
232 break; 176 break;
233
234 case I2C_SMBUS_BLOCK_DATA:
235 /* case I2C_SMBUS_BLOCK_PROC_CALL: not supported */
236 len = inb_p(NVIDIA_SMB_BCNT);
237 len = min_t(u8, len, 32);
238 case I2C_SMBUS_I2C_BLOCK_DATA:
239 for (i = 0; i < len; i++)
240 data->block[i+1] = inb_p(NVIDIA_SMB_DATA + i);
241 data->block[0] = len;
242 break;
243 } 177 }
244 178
245 return 0; 179 return 0;
@@ -250,10 +184,14 @@ static u32 nforce2_func(struct i2c_adapter *adapter)
250{ 184{
251 /* other functionality might be possible, but is not tested */ 185 /* other functionality might be possible, but is not tested */
252 return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | 186 return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
253 I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA /* | 187 I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA;
254 I2C_FUNC_SMBUS_BLOCK_DATA */;
255} 188}
256 189
190static struct i2c_algorithm smbus_algorithm = {
191 .smbus_xfer = nforce2_access,
192 .functionality = nforce2_func,
193};
194
257 195
258static struct pci_device_id nforce2_ids[] = { 196static struct pci_device_id nforce2_ids[] = {
259 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) }, 197 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) },
@@ -267,7 +205,6 @@ static struct pci_device_id nforce2_ids[] = {
267 { 0 } 205 { 0 }
268}; 206};
269 207
270
271MODULE_DEVICE_TABLE (pci, nforce2_ids); 208MODULE_DEVICE_TABLE (pci, nforce2_ids);
272 209
273 210
@@ -291,7 +228,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
291 } 228 }
292 229
293 smbus->base = iobase & PCI_BASE_ADDRESS_IO_MASK; 230 smbus->base = iobase & PCI_BASE_ADDRESS_IO_MASK;
294 smbus->size = 8; 231 smbus->size = 64;
295 } 232 }
296 smbus->dev = dev; 233 smbus->dev = dev;
297 234
@@ -300,7 +237,9 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
300 smbus->base, smbus->base+smbus->size-1, name); 237 smbus->base, smbus->base+smbus->size-1, name);
301 return -1; 238 return -1;
302 } 239 }
303 smbus->adapter = nforce2_adapter; 240 smbus->adapter.owner = THIS_MODULE;
241 smbus->adapter.class = I2C_CLASS_HWMON;
242 smbus->adapter.algo = &smbus_algorithm;
304 smbus->adapter.algo_data = smbus; 243 smbus->adapter.algo_data = smbus;
305 smbus->adapter.dev.parent = &dev->dev; 244 smbus->adapter.dev.parent = &dev->dev;
306 snprintf(smbus->adapter.name, I2C_NAME_SIZE, 245 snprintf(smbus->adapter.name, I2C_NAME_SIZE,
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index dec04da0455c..bcd8367cede1 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -231,8 +231,8 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
231 * 13 2 1 231 * 13 2 1
232 * 19.2 2 1 232 * 19.2 2 1
233 */ 233 */
234 if (fclk_rate > 16000000) 234 if (fclk_rate > 12000000)
235 psc = (fclk_rate + 8000000) / 12000000; 235 psc = fclk_rate / 12000000;
236 } 236 }
237 237
238 /* Setup clock prescaler to obtain approx 12MHz I2C module clock: */ 238 /* Setup clock prescaler to obtain approx 12MHz I2C module clock: */
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index 5eb2bd294fd9..4bc42810b9aa 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -163,7 +163,7 @@ static void __exit i2c_parport_exit(void)
163 if (adapter_parm[type].init.val) 163 if (adapter_parm[type].init.val)
164 line_set(0, &adapter_parm[type].init); 164 line_set(0, &adapter_parm[type].init);
165 165
166 i2c_bit_del_bus(&parport_adapter); 166 i2c_del_adapter(&parport_adapter);
167 release_region(base, 3); 167 release_region(base, 3);
168} 168}
169 169
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 48a829431c7b..66696a40c7b5 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -218,7 +218,7 @@ static void i2c_parport_detach (struct parport *port)
218 if (adapter_parm[type].init.val) 218 if (adapter_parm[type].init.val)
219 line_set(port, 0, &adapter_parm[type].init); 219 line_set(port, 0, &adapter_parm[type].init);
220 220
221 i2c_bit_del_bus(&adapter->adapter); 221 i2c_del_adapter(&adapter->adapter);
222 parport_unregister_device(adapter->pdev); 222 parport_unregister_device(adapter->pdev);
223 if (prev) 223 if (prev)
224 prev->next = adapter->next; 224 prev->next = adapter->next;
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 407840b6a260..cc6536a19eca 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -156,7 +156,7 @@ static int __init pca_isa_init(void)
156 156
157static void pca_isa_exit(void) 157static void pca_isa_exit(void)
158{ 158{
159 i2c_pca_del_bus(&pca_isa_ops); 159 i2c_del_adapter(&pca_isa_ops);
160 160
161 if (irq > 0) { 161 if (irq > 0) {
162 disable_irq(irq); 162 disable_irq(irq);
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
new file mode 100644
index 000000000000..de0bca77e926
--- /dev/null
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -0,0 +1,708 @@
1/*
2 * Provides I2C support for Philips PNX010x/PNX4008 boards.
3 *
4 * Authors: Dennis Kovalev <dkovalev@ru.mvista.com>
5 * Vitaly Wool <vwool@ru.mvista.com>
6 *
7 * 2004-2006 (c) MontaVista Software, Inc. This file is licensed under
8 * the terms of the GNU General Public License version 2. This program
9 * is licensed "as is" without any warranty of any kind, whether express
10 * or implied.
11 */
12
13#include <linux/module.h>
14#include <linux/interrupt.h>
15#include <linux/ioport.h>
16#include <linux/delay.h>
17#include <linux/i2c.h>
18#include <linux/timer.h>
19#include <linux/completion.h>
20#include <linux/platform_device.h>
21#include <linux/i2c-pnx.h>
22#include <asm/hardware.h>
23#include <asm/irq.h>
24#include <asm/uaccess.h>
25
26#define I2C_PNX_TIMEOUT 10 /* msec */
27#define I2C_PNX_SPEED_KHZ 100
28#define I2C_PNX_REGION_SIZE 0x100
29#define PNX_DEFAULT_FREQ 13 /* MHz */
30
31static inline int wait_timeout(long timeout, struct i2c_pnx_algo_data *data)
32{
33 while (timeout > 0 &&
34 (ioread32(I2C_REG_STS(data)) & mstatus_active)) {
35 mdelay(1);
36 timeout--;
37 }
38 return (timeout <= 0);
39}
40
41static inline int wait_reset(long timeout, struct i2c_pnx_algo_data *data)
42{
43 while (timeout > 0 &&
44 (ioread32(I2C_REG_CTL(data)) & mcntrl_reset)) {
45 mdelay(1);
46 timeout--;
47 }
48 return (timeout <= 0);
49}
50
51static inline void i2c_pnx_arm_timer(struct i2c_adapter *adap)
52{
53 struct i2c_pnx_algo_data *data = adap->algo_data;
54 struct timer_list *timer = &data->mif.timer;
55 int expires = I2C_PNX_TIMEOUT / (1000 / HZ);
56
57 del_timer_sync(timer);
58
59 dev_dbg(&adap->dev, "Timer armed at %lu plus %u jiffies.\n",
60 jiffies, expires);
61
62 timer->expires = jiffies + expires;
63 timer->data = (unsigned long)adap;
64
65 add_timer(timer);
66}
67
68/**
69 * i2c_pnx_start - start a device
70 * @slave_addr: slave address
71 * @adap: pointer to adapter structure
72 *
73 * Generate a START signal in the desired mode.
74 */
75static int i2c_pnx_start(unsigned char slave_addr, struct i2c_adapter *adap)
76{
77 struct i2c_pnx_algo_data *alg_data = adap->algo_data;
78
79 dev_dbg(&adap->dev, "%s(): addr 0x%x mode %d\n", __FUNCTION__,
80 slave_addr, alg_data->mif.mode);
81
82 /* Check for 7 bit slave addresses only */
83 if (slave_addr & ~0x7f) {
84 dev_err(&adap->dev, "%s: Invalid slave address %x. "
85 "Only 7-bit addresses are supported\n",
86 adap->name, slave_addr);
87 return -EINVAL;
88 }
89
90 /* First, make sure bus is idle */
91 if (wait_timeout(I2C_PNX_TIMEOUT, alg_data)) {
92 /* Somebody else is monopolizing the bus */
93 dev_err(&adap->dev, "%s: Bus busy. Slave addr = %02x, "
94 "cntrl = %x, stat = %x\n",
95 adap->name, slave_addr,
96 ioread32(I2C_REG_CTL(alg_data)),
97 ioread32(I2C_REG_STS(alg_data)));
98 return -EBUSY;
99 } else if (ioread32(I2C_REG_STS(alg_data)) & mstatus_afi) {
100 /* Sorry, we lost the bus */
101 dev_err(&adap->dev, "%s: Arbitration failure. "
102 "Slave addr = %02x\n", adap->name, slave_addr);
103 return -EIO;
104 }
105
106 /*
107 * OK, I2C is enabled and we have the bus.
108 * Clear the current TDI and AFI status flags.
109 */
110 iowrite32(ioread32(I2C_REG_STS(alg_data)) | mstatus_tdi | mstatus_afi,
111 I2C_REG_STS(alg_data));
112
113 dev_dbg(&adap->dev, "%s(): sending %#x\n", __FUNCTION__,
114 (slave_addr << 1) | start_bit | alg_data->mif.mode);
115
116 /* Write the slave address, START bit and R/W bit */
117 iowrite32((slave_addr << 1) | start_bit | alg_data->mif.mode,
118 I2C_REG_TX(alg_data));
119
120 dev_dbg(&adap->dev, "%s(): exit\n", __FUNCTION__);
121
122 return 0;
123}
124
125/**
126 * i2c_pnx_stop - stop a device
127 * @adap: pointer to I2C adapter structure
128 *
129 * Generate a STOP signal to terminate the master transaction.
130 */
131static void i2c_pnx_stop(struct i2c_adapter *adap)
132{
133 struct i2c_pnx_algo_data *alg_data = adap->algo_data;
134 /* Only 1 msec max timeout due to interrupt context */
135 long timeout = 1000;
136
137 dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
138 __FUNCTION__, ioread32(I2C_REG_STS(alg_data)));
139
140 /* Write a STOP bit to TX FIFO */
141 iowrite32(0xff | stop_bit, I2C_REG_TX(alg_data));
142
143 /* Wait until the STOP is seen. */
144 while (timeout > 0 &&
145 (ioread32(I2C_REG_STS(alg_data)) & mstatus_active)) {
146 /* may be called from interrupt context */
147 udelay(1);
148 timeout--;
149 }
150
151 dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
152 __FUNCTION__, ioread32(I2C_REG_STS(alg_data)));
153}
154
155/**
156 * i2c_pnx_master_xmit - transmit data to slave
157 * @adap: pointer to I2C adapter structure
158 *
159 * Sends one byte of data to the slave
160 */
161static int i2c_pnx_master_xmit(struct i2c_adapter *adap)
162{
163 struct i2c_pnx_algo_data *alg_data = adap->algo_data;
164 u32 val;
165
166 dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
167 __FUNCTION__, ioread32(I2C_REG_STS(alg_data)));
168
169 if (alg_data->mif.len > 0) {
170 /* We still have something to talk about... */
171 val = *alg_data->mif.buf++;
172
173 if (alg_data->mif.len == 1) {
174 val |= stop_bit;
175 if (!alg_data->last)
176 val |= start_bit;
177 }
178
179 alg_data->mif.len--;
180 iowrite32(val, I2C_REG_TX(alg_data));
181
182 dev_dbg(&adap->dev, "%s(): xmit %#x [%d]\n", __FUNCTION__,
183 val, alg_data->mif.len + 1);
184
185 if (alg_data->mif.len == 0) {
186 if (alg_data->last) {
187 /* Wait until the STOP is seen. */
188 if (wait_timeout(I2C_PNX_TIMEOUT, alg_data))
189 dev_err(&adap->dev, "The bus is still "
190 "active after timeout\n");
191 }
192 /* Disable master interrupts */
193 iowrite32(ioread32(I2C_REG_CTL(alg_data)) &
194 ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
195 I2C_REG_CTL(alg_data));
196
197 del_timer_sync(&alg_data->mif.timer);
198
199 dev_dbg(&adap->dev, "%s(): Waking up xfer routine.\n",
200 __FUNCTION__);
201
202 complete(&alg_data->mif.complete);
203 }
204 } else if (alg_data->mif.len == 0) {
205 /* zero-sized transfer */
206 i2c_pnx_stop(adap);
207
208 /* Disable master interrupts. */
209 iowrite32(ioread32(I2C_REG_CTL(alg_data)) &
210 ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
211 I2C_REG_CTL(alg_data));
212
213 /* Stop timer. */
214 del_timer_sync(&alg_data->mif.timer);
215 dev_dbg(&adap->dev, "%s(): Waking up xfer routine after "
216 "zero-xfer.\n", __FUNCTION__);
217
218 complete(&alg_data->mif.complete);
219 }
220
221 dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
222 __FUNCTION__, ioread32(I2C_REG_STS(alg_data)));
223
224 return 0;
225}
226
227/**
228 * i2c_pnx_master_rcv - receive data from slave
229 * @adap: pointer to I2C adapter structure
230 *
231 * Reads one byte data from the slave
232 */
233static int i2c_pnx_master_rcv(struct i2c_adapter *adap)
234{
235 struct i2c_pnx_algo_data *alg_data = adap->algo_data;
236 unsigned int val = 0;
237 u32 ctl = 0;
238
239 dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
240 __FUNCTION__, ioread32(I2C_REG_STS(alg_data)));
241
242 /* Check, whether there is already data,
243 * or we didn't 'ask' for it yet.
244 */
245 if (ioread32(I2C_REG_STS(alg_data)) & mstatus_rfe) {
246 dev_dbg(&adap->dev, "%s(): Write dummy data to fill "
247 "Rx-fifo...\n", __FUNCTION__);
248
249 if (alg_data->mif.len == 1) {
250 /* Last byte, do not acknowledge next rcv. */
251 val |= stop_bit;
252 if (!alg_data->last)
253 val |= start_bit;
254
255 /*
256 * Enable interrupt RFDAIE (data in Rx fifo),
257 * and disable DRMIE (need data for Tx)
258 */
259 ctl = ioread32(I2C_REG_CTL(alg_data));
260 ctl |= mcntrl_rffie | mcntrl_daie;
261 ctl &= ~mcntrl_drmie;
262 iowrite32(ctl, I2C_REG_CTL(alg_data));
263 }
264
265 /*
266 * Now we'll 'ask' for data:
267 * For each byte we want to receive, we must
268 * write a (dummy) byte to the Tx-FIFO.
269 */
270 iowrite32(val, I2C_REG_TX(alg_data));
271
272 return 0;
273 }
274
275 /* Handle data. */
276 if (alg_data->mif.len > 0) {
277 val = ioread32(I2C_REG_RX(alg_data));
278 *alg_data->mif.buf++ = (u8) (val & 0xff);
279 dev_dbg(&adap->dev, "%s(): rcv 0x%x [%d]\n", __FUNCTION__, val,
280 alg_data->mif.len);
281
282 alg_data->mif.len--;
283 if (alg_data->mif.len == 0) {
284 if (alg_data->last)
285 /* Wait until the STOP is seen. */
286 if (wait_timeout(I2C_PNX_TIMEOUT, alg_data))
287 dev_err(&adap->dev, "The bus is still "
288 "active after timeout\n");
289
290 /* Disable master interrupts */
291 ctl = ioread32(I2C_REG_CTL(alg_data));
292 ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie |
293 mcntrl_drmie | mcntrl_daie);
294 iowrite32(ctl, I2C_REG_CTL(alg_data));
295
296 /* Kill timer. */
297 del_timer_sync(&alg_data->mif.timer);
298 complete(&alg_data->mif.complete);
299 }
300 }
301
302 dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
303 __FUNCTION__, ioread32(I2C_REG_STS(alg_data)));
304
305 return 0;
306}
307
308static irqreturn_t
309i2c_pnx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
310{
311 u32 stat, ctl;
312 struct i2c_adapter *adap = dev_id;
313 struct i2c_pnx_algo_data *alg_data = adap->algo_data;
314
315 dev_dbg(&adap->dev, "%s(): mstat = %x mctrl = %x, mode = %d\n",
316 __FUNCTION__,
317 ioread32(I2C_REG_STS(alg_data)),
318 ioread32(I2C_REG_CTL(alg_data)),
319 alg_data->mif.mode);
320 stat = ioread32(I2C_REG_STS(alg_data));
321
322 /* let's see what kind of event this is */
323 if (stat & mstatus_afi) {
324 /* We lost arbitration in the midst of a transfer */
325 alg_data->mif.ret = -EIO;
326
327 /* Disable master interrupts. */
328 ctl = ioread32(I2C_REG_CTL(alg_data));
329 ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie |
330 mcntrl_drmie);
331 iowrite32(ctl, I2C_REG_CTL(alg_data));
332
333 /* Stop timer, to prevent timeout. */
334 del_timer_sync(&alg_data->mif.timer);
335 complete(&alg_data->mif.complete);
336 } else if (stat & mstatus_nai) {
337 /* Slave did not acknowledge, generate a STOP */
338 dev_dbg(&adap->dev, "%s(): "
339 "Slave did not acknowledge, generating a STOP.\n",
340 __FUNCTION__);
341 i2c_pnx_stop(adap);
342
343 /* Disable master interrupts. */
344 ctl = ioread32(I2C_REG_CTL(alg_data));
345 ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie |
346 mcntrl_drmie);
347 iowrite32(ctl, I2C_REG_CTL(alg_data));
348
349 /* Our return value. */
350 alg_data->mif.ret = -EIO;
351
352 /* Stop timer, to prevent timeout. */
353 del_timer_sync(&alg_data->mif.timer);
354 complete(&alg_data->mif.complete);
355 } else {
356 /*
357 * Two options:
358 * - Master Tx needs data.
359 * - There is data in the Rx-fifo
360 * The latter is only the case if we have requested for data,
361 * via a dummy write. (See 'i2c_pnx_master_rcv'.)
362 * We therefore check, as a sanity check, whether that interrupt
363 * has been enabled.
364 */
365 if ((stat & mstatus_drmi) || !(stat & mstatus_rfe)) {
366 if (alg_data->mif.mode == I2C_SMBUS_WRITE) {
367 i2c_pnx_master_xmit(adap);
368 } else if (alg_data->mif.mode == I2C_SMBUS_READ) {
369 i2c_pnx_master_rcv(adap);
370 }
371 }
372 }
373
374 /* Clear TDI and AFI bits */
375 stat = ioread32(I2C_REG_STS(alg_data));
376 iowrite32(stat | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data));
377
378 dev_dbg(&adap->dev, "%s(): exiting, stat = %x ctrl = %x.\n",
379 __FUNCTION__, ioread32(I2C_REG_STS(alg_data)),
380 ioread32(I2C_REG_CTL(alg_data)));
381
382 return IRQ_HANDLED;
383}
384
385static void i2c_pnx_timeout(unsigned long data)
386{
387 struct i2c_adapter *adap = (struct i2c_adapter *)data;
388 struct i2c_pnx_algo_data *alg_data = adap->algo_data;
389 u32 ctl;
390
391 dev_err(&adap->dev, "Master timed out. stat = %04x, cntrl = %04x. "
392 "Resetting master...\n",
393 ioread32(I2C_REG_STS(alg_data)),
394 ioread32(I2C_REG_CTL(alg_data)));
395
396 /* Reset master and disable interrupts */
397 ctl = ioread32(I2C_REG_CTL(alg_data));
398 ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie);
399 iowrite32(ctl, I2C_REG_CTL(alg_data));
400
401 ctl |= mcntrl_reset;
402 iowrite32(ctl, I2C_REG_CTL(alg_data));
403 wait_reset(I2C_PNX_TIMEOUT, alg_data);
404 alg_data->mif.ret = -EIO;
405 complete(&alg_data->mif.complete);
406}
407
408static inline void bus_reset_if_active(struct i2c_adapter *adap)
409{
410 struct i2c_pnx_algo_data *alg_data = adap->algo_data;
411 u32 stat;
412
413 if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_active) {
414 dev_err(&adap->dev,
415 "%s: Bus is still active after xfer. Reset it...\n",
416 adap->name);
417 iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_reset,
418 I2C_REG_CTL(alg_data));
419 wait_reset(I2C_PNX_TIMEOUT, alg_data);
420 } else if (!(stat & mstatus_rfe) || !(stat & mstatus_tfe)) {
421 /* If there is data in the fifo's after transfer,
422 * flush fifo's by reset.
423 */
424 iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_reset,
425 I2C_REG_CTL(alg_data));
426 wait_reset(I2C_PNX_TIMEOUT, alg_data);
427 } else if (stat & mstatus_nai) {
428 iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_reset,
429 I2C_REG_CTL(alg_data));
430 wait_reset(I2C_PNX_TIMEOUT, alg_data);
431 }
432}
433
434/**
435 * i2c_pnx_xfer - generic transfer entry point
436 * @adap: pointer to I2C adapter structure
437 * @msgs: array of messages
438 * @num: number of messages
439 *
440 * Initiates the transfer
441 */
442static int
443i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
444{
445 struct i2c_msg *pmsg;
446 int rc = 0, completed = 0, i;
447 struct i2c_pnx_algo_data *alg_data = adap->algo_data;
448 u32 stat = ioread32(I2C_REG_STS(alg_data));
449
450 dev_dbg(&adap->dev, "%s(): entering: %d messages, stat = %04x.\n",
451 __FUNCTION__, num, ioread32(I2C_REG_STS(alg_data)));
452
453 bus_reset_if_active(adap);
454
455 /* Process transactions in a loop. */
456 for (i = 0; rc >= 0 && i < num; i++) {
457 u8 addr;
458
459 pmsg = &msgs[i];
460 addr = pmsg->addr;
461
462 if (pmsg->flags & I2C_M_TEN) {
463 dev_err(&adap->dev,
464 "%s: 10 bits addr not supported!\n",
465 adap->name);
466 rc = -EINVAL;
467 break;
468 }
469
470 alg_data->mif.buf = pmsg->buf;
471 alg_data->mif.len = pmsg->len;
472 alg_data->mif.mode = (pmsg->flags & I2C_M_RD) ?
473 I2C_SMBUS_READ : I2C_SMBUS_WRITE;
474 alg_data->mif.ret = 0;
475 alg_data->last = (i == num - 1);
476
477 dev_dbg(&adap->dev, "%s(): mode %d, %d bytes\n", __FUNCTION__,
478 alg_data->mif.mode,
479 alg_data->mif.len);
480
481 i2c_pnx_arm_timer(adap);
482
483 /* initialize the completion var */
484 init_completion(&alg_data->mif.complete);
485
486 /* Enable master interrupt */
487 iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_afie |
488 mcntrl_naie | mcntrl_drmie,
489 I2C_REG_CTL(alg_data));
490
491 /* Put start-code and slave-address on the bus. */
492 rc = i2c_pnx_start(addr, adap);
493 if (rc < 0)
494 break;
495
496 /* Wait for completion */
497 wait_for_completion(&alg_data->mif.complete);
498
499 if (!(rc = alg_data->mif.ret))
500 completed++;
501 dev_dbg(&adap->dev, "%s(): Complete, return code = %d.\n",
502 __FUNCTION__, rc);
503
504 /* Clear TDI and AFI bits in case they are set. */
505 if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_tdi) {
506 dev_dbg(&adap->dev,
507 "%s: TDI still set... clearing now.\n",
508 adap->name);
509 iowrite32(stat, I2C_REG_STS(alg_data));
510 }
511 if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_afi) {
512 dev_dbg(&adap->dev,
513 "%s: AFI still set... clearing now.\n",
514 adap->name);
515 iowrite32(stat, I2C_REG_STS(alg_data));
516 }
517 }
518
519 bus_reset_if_active(adap);
520
521 /* Cleanup to be sure... */
522 alg_data->mif.buf = NULL;
523 alg_data->mif.len = 0;
524
525 dev_dbg(&adap->dev, "%s(): exiting, stat = %x\n",
526 __FUNCTION__, ioread32(I2C_REG_STS(alg_data)));
527
528 if (completed != num)
529 return ((rc < 0) ? rc : -EREMOTEIO);
530
531 return num;
532}
533
534static u32 i2c_pnx_func(struct i2c_adapter *adapter)
535{
536 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
537}
538
539static struct i2c_algorithm pnx_algorithm = {
540 .master_xfer = i2c_pnx_xfer,
541 .functionality = i2c_pnx_func,
542};
543
544static int i2c_pnx_controller_suspend(struct platform_device *pdev,
545 pm_message_t state)
546{
547 struct i2c_pnx_data *i2c_pnx = platform_get_drvdata(pdev);
548 return i2c_pnx->suspend(pdev, state);
549}
550
551static int i2c_pnx_controller_resume(struct platform_device *pdev)
552{
553 struct i2c_pnx_data *i2c_pnx = platform_get_drvdata(pdev);
554 return i2c_pnx->resume(pdev);
555}
556
557static int __devinit i2c_pnx_probe(struct platform_device *pdev)
558{
559 unsigned long tmp;
560 int ret = 0;
561 struct i2c_pnx_algo_data *alg_data;
562 int freq_mhz;
563 struct i2c_pnx_data *i2c_pnx = pdev->dev.platform_data;
564
565 if (!i2c_pnx || !i2c_pnx->adapter) {
566 dev_err(&pdev->dev, "%s: no platform data supplied\n",
567 __FUNCTION__);
568 ret = -EINVAL;
569 goto out;
570 }
571
572 platform_set_drvdata(pdev, i2c_pnx);
573
574 if (i2c_pnx->calculate_input_freq)
575 freq_mhz = i2c_pnx->calculate_input_freq(pdev);
576 else {
577 freq_mhz = PNX_DEFAULT_FREQ;
578 dev_info(&pdev->dev, "Setting bus frequency to default value: "
579 "%d MHz", freq_mhz);
580 }
581
582 i2c_pnx->adapter->algo = &pnx_algorithm;
583
584 alg_data = i2c_pnx->adapter->algo_data;
585 init_timer(&alg_data->mif.timer);
586 alg_data->mif.timer.function = i2c_pnx_timeout;
587 alg_data->mif.timer.data = (unsigned long)i2c_pnx->adapter;
588
589 /* Register I/O resource */
590 if (!request_region(alg_data->base, I2C_PNX_REGION_SIZE, pdev->name)) {
591 dev_err(&pdev->dev,
592 "I/O region 0x%08x for I2C already in use.\n",
593 alg_data->base);
594 ret = -ENODEV;
595 goto out_drvdata;
596 }
597
598 if (!(alg_data->ioaddr =
599 (u32)ioremap(alg_data->base, I2C_PNX_REGION_SIZE))) {
600 dev_err(&pdev->dev, "Couldn't ioremap I2C I/O region\n");
601 ret = -ENOMEM;
602 goto out_release;
603 }
604
605 i2c_pnx->set_clock_run(pdev);
606
607 /*
608 * Clock Divisor High This value is the number of system clocks
609 * the serial clock (SCL) will be high.
610 * For example, if the system clock period is 50 ns and the maximum
611 * desired serial period is 10000 ns (100 kHz), then CLKHI would be
612 * set to 0.5*(f_sys/f_i2c)-2=0.5*(20e6/100e3)-2=98. The actual value
613 * programmed into CLKHI will vary from this slightly due to
614 * variations in the output pad's rise and fall times as well as
615 * the deglitching filter length.
616 */
617
618 tmp = ((freq_mhz * 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2;
619 iowrite32(tmp, I2C_REG_CKH(alg_data));
620 iowrite32(tmp, I2C_REG_CKL(alg_data));
621
622 iowrite32(mcntrl_reset, I2C_REG_CTL(alg_data));
623 if (wait_reset(I2C_PNX_TIMEOUT, alg_data)) {
624 ret = -ENODEV;
625 goto out_unmap;
626 }
627 init_completion(&alg_data->mif.complete);
628
629 ret = request_irq(alg_data->irq, i2c_pnx_interrupt,
630 0, pdev->name, i2c_pnx->adapter);
631 if (ret)
632 goto out_clock;
633
634 /* Register this adapter with the I2C subsystem */
635 i2c_pnx->adapter->dev.parent = &pdev->dev;
636 ret = i2c_add_adapter(i2c_pnx->adapter);
637 if (ret < 0) {
638 dev_err(&pdev->dev, "I2C: Failed to add bus\n");
639 goto out_irq;
640 }
641
642 dev_dbg(&pdev->dev, "%s: Master at %#8x, irq %d.\n",
643 i2c_pnx->adapter->name, alg_data->base, alg_data->irq);
644
645 return 0;
646
647out_irq:
648 free_irq(alg_data->irq, alg_data);
649out_clock:
650 i2c_pnx->set_clock_stop(pdev);
651out_unmap:
652 iounmap((void *)alg_data->ioaddr);
653out_release:
654 release_region(alg_data->base, I2C_PNX_REGION_SIZE);
655out_drvdata:
656 platform_set_drvdata(pdev, NULL);
657out:
658 return ret;
659}
660
661static int __devexit i2c_pnx_remove(struct platform_device *pdev)
662{
663 struct i2c_pnx_data *i2c_pnx = platform_get_drvdata(pdev);
664 struct i2c_adapter *adap = i2c_pnx->adapter;
665 struct i2c_pnx_algo_data *alg_data = adap->algo_data;
666
667 free_irq(alg_data->irq, alg_data);
668 i2c_del_adapter(adap);
669 i2c_pnx->set_clock_stop(pdev);
670 iounmap((void *)alg_data->ioaddr);
671 release_region(alg_data->base, I2C_PNX_REGION_SIZE);
672 platform_set_drvdata(pdev, NULL);
673
674 return 0;
675}
676
677static struct platform_driver i2c_pnx_driver = {
678 .driver = {
679 .name = "pnx-i2c",
680 .owner = THIS_MODULE,
681 },
682 .probe = i2c_pnx_probe,
683 .remove = __devexit_p(i2c_pnx_remove),
684 .suspend = i2c_pnx_controller_suspend,
685 .resume = i2c_pnx_controller_resume,
686};
687
688static int __init i2c_adap_pnx_init(void)
689{
690 return platform_driver_register(&i2c_pnx_driver);
691}
692
693static void __exit i2c_adap_pnx_exit(void)
694{
695 platform_driver_unregister(&i2c_pnx_driver);
696}
697
698MODULE_AUTHOR("Vitaly Wool, Dennis Kovalev <source@mvista.com>");
699MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses");
700MODULE_LICENSE("GPL");
701
702#ifdef CONFIG_I2C_PNX_EARLY
703/* We need to make sure I2C is initialized before USB */
704subsys_initcall(i2c_adap_pnx_init);
705#else
706mudule_init(i2c_adap_pnx_init);
707#endif
708module_exit(i2c_adap_pnx_exit);
diff --git a/drivers/i2c/busses/i2c-prosavage.c b/drivers/i2c/busses/i2c-prosavage.c
index 7745e21874a8..07c1f1e27df1 100644
--- a/drivers/i2c/busses/i2c-prosavage.c
+++ b/drivers/i2c/busses/i2c-prosavage.c
@@ -212,7 +212,7 @@ static void prosavage_remove(struct pci_dev *dev)
212 if (chip->i2c_bus[i].adap_ok == 0) 212 if (chip->i2c_bus[i].adap_ok == 0)
213 continue; 213 continue;
214 214
215 ret = i2c_bit_del_bus(&chip->i2c_bus[i].adap); 215 ret = i2c_del_adapter(&chip->i2c_bus[i].adap);
216 if (ret) { 216 if (ret) {
217 dev_err(&dev->dev, "%s not removed\n", 217 dev_err(&dev->dev, "%s not removed\n",
218 chip->i2c_bus[i].adap.name); 218 chip->i2c_bus[i].adap.name);
diff --git a/drivers/i2c/busses/i2c-savage4.c b/drivers/i2c/busses/i2c-savage4.c
index 209f47ea1750..844b4ff90893 100644
--- a/drivers/i2c/busses/i2c-savage4.c
+++ b/drivers/i2c/busses/i2c-savage4.c
@@ -173,7 +173,7 @@ static int __devinit savage4_probe(struct pci_dev *dev, const struct pci_device_
173 173
174static void __devexit savage4_remove(struct pci_dev *dev) 174static void __devexit savage4_remove(struct pci_dev *dev)
175{ 175{
176 i2c_bit_del_bus(&savage4_i2c_adapter); 176 i2c_del_adapter(&savage4_i2c_adapter);
177 iounmap(ioaddr); 177 iounmap(ioaddr);
178} 178}
179 179
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
new file mode 100644
index 000000000000..081d9578ce10
--- /dev/null
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -0,0 +1,153 @@
1/*
2 * i2c-versatile.c
3 *
4 * Copyright (C) 2006 ARM Ltd.
5 * written by Russell King, Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/i2c.h>
14#include <linux/i2c-algo-bit.h>
15#include <linux/init.h>
16#include <linux/platform_device.h>
17
18#include <asm/io.h>
19
20#define I2C_CONTROL 0x00
21#define I2C_CONTROLS 0x00
22#define I2C_CONTROLC 0x04
23#define SCL (1 << 0)
24#define SDA (1 << 1)
25
26struct i2c_versatile {
27 struct i2c_adapter adap;
28 struct i2c_algo_bit_data algo;
29 void __iomem *base;
30};
31
32static void i2c_versatile_setsda(void *data, int state)
33{
34 struct i2c_versatile *i2c = data;
35
36 writel(SDA, i2c->base + (state ? I2C_CONTROLS : I2C_CONTROLC));
37}
38
39static void i2c_versatile_setscl(void *data, int state)
40{
41 struct i2c_versatile *i2c = data;
42
43 writel(SCL, i2c->base + (state ? I2C_CONTROLS : I2C_CONTROLC));
44}
45
46static int i2c_versatile_getsda(void *data)
47{
48 struct i2c_versatile *i2c = data;
49 return !!(readl(i2c->base + I2C_CONTROL) & SDA);
50}
51
52static int i2c_versatile_getscl(void *data)
53{
54 struct i2c_versatile *i2c = data;
55 return !!(readl(i2c->base + I2C_CONTROL) & SCL);
56}
57
58static struct i2c_algo_bit_data i2c_versatile_algo = {
59 .setsda = i2c_versatile_setsda,
60 .setscl = i2c_versatile_setscl,
61 .getsda = i2c_versatile_getsda,
62 .getscl = i2c_versatile_getscl,
63 .udelay = 30,
64 .timeout = HZ,
65};
66
67static int i2c_versatile_probe(struct platform_device *dev)
68{
69 struct i2c_versatile *i2c;
70 struct resource *r;
71 int ret;
72
73 r = platform_get_resource(dev, IORESOURCE_MEM, 0);
74 if (!r) {
75 ret = -EINVAL;
76 goto err_out;
77 }
78
79 if (!request_mem_region(r->start, r->end - r->start + 1, "versatile-i2c")) {
80 ret = -EBUSY;
81 goto err_out;
82 }
83
84 i2c = kzalloc(sizeof(struct i2c_versatile), GFP_KERNEL);
85 if (!i2c) {
86 ret = -ENOMEM;
87 goto err_release;
88 }
89
90 i2c->base = ioremap(r->start, r->end - r->start + 1);
91 if (!i2c->base) {
92 ret = -ENOMEM;
93 goto err_free;
94 }
95
96 writel(SCL | SDA, i2c->base + I2C_CONTROLS);
97
98 i2c->adap.owner = THIS_MODULE;
99 strlcpy(i2c->adap.name, "Versatile I2C adapter", sizeof(i2c->adap.name));
100 i2c->adap.algo_data = &i2c->algo;
101 i2c->adap.dev.parent = &dev->dev;
102 i2c->algo = i2c_versatile_algo;
103 i2c->algo.data = i2c;
104
105 ret = i2c_bit_add_bus(&i2c->adap);
106 if (ret >= 0) {
107 platform_set_drvdata(dev, i2c);
108 return 0;
109 }
110
111 iounmap(i2c->base);
112 err_free:
113 kfree(i2c);
114 err_release:
115 release_mem_region(r->start, r->end - r->start + 1);
116 err_out:
117 return ret;
118}
119
120static int i2c_versatile_remove(struct platform_device *dev)
121{
122 struct i2c_versatile *i2c = platform_get_drvdata(dev);
123
124 platform_set_drvdata(dev, NULL);
125
126 i2c_del_adapter(&i2c->adap);
127 return 0;
128}
129
130static struct platform_driver i2c_versatile_driver = {
131 .probe = i2c_versatile_probe,
132 .remove = i2c_versatile_remove,
133 .driver = {
134 .name = "versatile-i2c",
135 .owner = THIS_MODULE,
136 },
137};
138
139static int __init i2c_versatile_init(void)
140{
141 return platform_driver_register(&i2c_versatile_driver);
142}
143
144static void __exit i2c_versatile_exit(void)
145{
146 platform_driver_unregister(&i2c_versatile_driver);
147}
148
149module_init(i2c_versatile_init);
150module_exit(i2c_versatile_exit);
151
152MODULE_DESCRIPTION("ARM Versatile I2C bus driver");
153MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 910e200ad500..15d7e00e47e6 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -151,7 +151,7 @@ static int __devinit vt586b_probe(struct pci_dev *dev, const struct pci_device_i
151 151
152static void __devexit vt586b_remove(struct pci_dev *dev) 152static void __devexit vt586b_remove(struct pci_dev *dev)
153{ 153{
154 i2c_bit_del_bus(&vt586b_adapter); 154 i2c_del_adapter(&vt586b_adapter);
155 release_region(I2C_DIR, IOSPACE); 155 release_region(I2C_DIR, IOSPACE);
156 pm_io_base = 0; 156 pm_io_base = 0;
157} 157}
diff --git a/drivers/i2c/busses/i2c-voodoo3.c b/drivers/i2c/busses/i2c-voodoo3.c
index 6c8d25183382..b0377b81744b 100644
--- a/drivers/i2c/busses/i2c-voodoo3.c
+++ b/drivers/i2c/busses/i2c-voodoo3.c
@@ -211,14 +211,14 @@ static int __devinit voodoo3_probe(struct pci_dev *dev, const struct pci_device_
211 return retval; 211 return retval;
212 retval = i2c_bit_add_bus(&voodoo3_ddc_adapter); 212 retval = i2c_bit_add_bus(&voodoo3_ddc_adapter);
213 if (retval) 213 if (retval)
214 i2c_bit_del_bus(&voodoo3_i2c_adapter); 214 i2c_del_adapter(&voodoo3_i2c_adapter);
215 return retval; 215 return retval;
216} 216}
217 217
218static void __devexit voodoo3_remove(struct pci_dev *dev) 218static void __devexit voodoo3_remove(struct pci_dev *dev)
219{ 219{
220 i2c_bit_del_bus(&voodoo3_i2c_adapter); 220 i2c_del_adapter(&voodoo3_i2c_adapter);
221 i2c_bit_del_bus(&voodoo3_ddc_adapter); 221 i2c_del_adapter(&voodoo3_ddc_adapter);
222 iounmap(ioaddr); 222 iounmap(ioaddr);
223} 223}
224 224
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index 8ddbae4fafe6..6cd96e43aa72 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -116,7 +116,7 @@ static int scx200_i2c_init(void)
116 116
117static void scx200_i2c_cleanup(void) 117static void scx200_i2c_cleanup(void)
118{ 118{
119 i2c_bit_del_bus(&scx200_i2c_ops); 119 i2c_del_adapter(&scx200_i2c_ops);
120} 120}
121 121
122module_init(scx200_i2c_init); 122module_init(scx200_i2c_init);
diff --git a/drivers/i2c/chips/ds1337.c b/drivers/i2c/chips/ds1337.c
index 93d483b8b770..ec17d6b684a2 100644
--- a/drivers/i2c/chips/ds1337.c
+++ b/drivers/i2c/chips/ds1337.c
@@ -347,13 +347,19 @@ static void ds1337_init_client(struct i2c_client *client)
347 347
348 if ((status & 0x80) || (control & 0x80)) { 348 if ((status & 0x80) || (control & 0x80)) {
349 /* RTC not running */ 349 /* RTC not running */
350 u8 buf[16]; 350 u8 buf[1+16]; /* First byte is interpreted as address */
351 struct i2c_msg msg[1]; 351 struct i2c_msg msg[1];
352 352
353 dev_dbg(&client->dev, "%s: RTC not running!\n", __FUNCTION__); 353 dev_dbg(&client->dev, "%s: RTC not running!\n", __FUNCTION__);
354 354
355 /* Initialize all, including STATUS and CONTROL to zero */ 355 /* Initialize all, including STATUS and CONTROL to zero */
356 memset(buf, 0, sizeof(buf)); 356 memset(buf, 0, sizeof(buf));
357
358 /* Write valid values in the date/time registers */
359 buf[1+DS1337_REG_DAY] = 1;
360 buf[1+DS1337_REG_DATE] = 1;
361 buf[1+DS1337_REG_MONTH] = 1;
362
357 msg[0].addr = client->addr; 363 msg[0].addr = client->addr;
358 msg[0].flags = 0; 364 msg[0].flags = 0;
359 msg[0].len = sizeof(buf); 365 msg[0].len = sizeof(buf);
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index 60bef94cd25f..4ee56def61f2 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -82,7 +82,7 @@ struct tps65010 {
82 struct i2c_client client; 82 struct i2c_client client;
83 struct mutex lock; 83 struct mutex lock;
84 int irq; 84 int irq;
85 struct work_struct work; 85 struct delayed_work work;
86 struct dentry *file; 86 struct dentry *file;
87 unsigned charging:1; 87 unsigned charging:1;
88 unsigned por:1; 88 unsigned por:1;
@@ -328,7 +328,7 @@ static void tps65010_interrupt(struct tps65010 *tps)
328{ 328{
329 u8 tmp = 0, mask, poll; 329 u8 tmp = 0, mask, poll;
330 330
331 /* IRQs won't trigger irqs for certain events, but we can get 331 /* IRQs won't trigger for certain events, but we can get
332 * others by polling (normally, with external power applied). 332 * others by polling (normally, with external power applied).
333 */ 333 */
334 poll = 0; 334 poll = 0;
@@ -411,10 +411,11 @@ static void tps65010_interrupt(struct tps65010 *tps)
411} 411}
412 412
413/* handle IRQs and polling using keventd for now */ 413/* handle IRQs and polling using keventd for now */
414static void tps65010_work(void *_tps) 414static void tps65010_work(struct work_struct *work)
415{ 415{
416 struct tps65010 *tps = _tps; 416 struct tps65010 *tps;
417 417
418 tps = container_of(work, struct tps65010, work.work);
418 mutex_lock(&tps->lock); 419 mutex_lock(&tps->lock);
419 420
420 tps65010_interrupt(tps); 421 tps65010_interrupt(tps);
@@ -452,7 +453,7 @@ static irqreturn_t tps65010_irq(int irq, void *_tps)
452 453
453 disable_irq_nosync(irq); 454 disable_irq_nosync(irq);
454 set_bit(FLAG_IRQ_ENABLE, &tps->flags); 455 set_bit(FLAG_IRQ_ENABLE, &tps->flags);
455 (void) schedule_work(&tps->work); 456 (void) schedule_work(&tps->work.work);
456 return IRQ_HANDLED; 457 return IRQ_HANDLED;
457} 458}
458 459
@@ -465,13 +466,15 @@ static int __exit tps65010_detach_client(struct i2c_client *client)
465 struct tps65010 *tps; 466 struct tps65010 *tps;
466 467
467 tps = container_of(client, struct tps65010, client); 468 tps = container_of(client, struct tps65010, client);
469 free_irq(tps->irq, tps);
468#ifdef CONFIG_ARM 470#ifdef CONFIG_ARM
469 if (machine_is_omap_h2()) 471 if (machine_is_omap_h2())
470 omap_free_gpio(58); 472 omap_free_gpio(58);
471 if (machine_is_omap_osk()) 473 if (machine_is_omap_osk())
472 omap_free_gpio(OMAP_MPUIO(1)); 474 omap_free_gpio(OMAP_MPUIO(1));
473#endif 475#endif
474 free_irq(tps->irq, tps); 476 cancel_delayed_work(&tps->work);
477 flush_scheduled_work();
475 debugfs_remove(tps->file); 478 debugfs_remove(tps->file);
476 if (i2c_detach_client(client) == 0) 479 if (i2c_detach_client(client) == 0)
477 kfree(tps); 480 kfree(tps);
@@ -505,7 +508,7 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
505 return 0; 508 return 0;
506 509
507 mutex_init(&tps->lock); 510 mutex_init(&tps->lock);
508 INIT_WORK(&tps->work, tps65010_work, tps); 511 INIT_DELAYED_WORK(&tps->work, tps65010_work);
509 tps->irq = -1; 512 tps->irq = -1;
510 tps->client.addr = address; 513 tps->client.addr = address;
511 tps->client.adapter = bus; 514 tps->client.adapter = bus;
@@ -620,7 +623,7 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
620 (void) i2c_smbus_write_byte_data(&tps->client, TPS_MASK3, 0x0f 623 (void) i2c_smbus_write_byte_data(&tps->client, TPS_MASK3, 0x0f
621 | i2c_smbus_read_byte_data(&tps->client, TPS_MASK3)); 624 | i2c_smbus_read_byte_data(&tps->client, TPS_MASK3));
622 625
623 tps65010_work(tps); 626 tps65010_work(&tps->work.work);
624 627
625 tps->file = debugfs_create_file(DRIVER_NAME, S_IRUGO, NULL, 628 tps->file = debugfs_create_file(DRIVER_NAME, S_IRUGO, NULL,
626 tps, DEBUG_FOPS); 629 tps, DEBUG_FOPS);
@@ -672,7 +675,7 @@ int tps65010_set_vbus_draw(unsigned mA)
672 && test_and_set_bit( 675 && test_and_set_bit(
673 FLAG_VBUS_CHANGED, &the_tps->flags)) { 676 FLAG_VBUS_CHANGED, &the_tps->flags)) {
674 /* gadget drivers call this in_irq() */ 677 /* gadget drivers call this in_irq() */
675 (void) schedule_work(&the_tps->work); 678 (void) schedule_work(&the_tps->work.work);
676 } 679 }
677 local_irq_restore(flags); 680 local_irq_restore(flags);
678 681
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 7ca81f42d14b..3e31f1d265c9 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -127,20 +127,17 @@ static ssize_t show_client_name(struct device *dev, struct device_attribute *att
127 return sprintf(buf, "%s\n", client->name); 127 return sprintf(buf, "%s\n", client->name);
128} 128}
129 129
130/* 130/*
131 * We can't use the DEVICE_ATTR() macro here as we want the same filename for a 131 * We can't use the DEVICE_ATTR() macro here, as we used the same name for
132 * different type of a device. So beware if the DEVICE_ATTR() macro ever 132 * an i2c adapter attribute (above).
133 * changes, this definition will also have to change.
134 */ 133 */
135static struct device_attribute dev_attr_client_name = { 134static struct device_attribute dev_attr_client_name =
136 .attr = {.name = "name", .mode = S_IRUGO, .owner = THIS_MODULE }, 135 __ATTR(name, S_IRUGO, &show_client_name, NULL);
137 .show = &show_client_name,
138};
139 136
140 137
141/* --------------------------------------------------- 138/* ---------------------------------------------------
142 * registering functions 139 * registering functions
143 * --------------------------------------------------- 140 * ---------------------------------------------------
144 */ 141 */
145 142
146/* ----- 143/* -----
@@ -314,7 +311,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
314 res = driver_register(&driver->driver); 311 res = driver_register(&driver->driver);
315 if (res) 312 if (res)
316 return res; 313 return res;
317 314
318 mutex_lock(&core_lists); 315 mutex_lock(&core_lists);
319 316
320 list_add_tail(&driver->list,&drivers); 317 list_add_tail(&driver->list,&drivers);
@@ -338,13 +335,13 @@ int i2c_del_driver(struct i2c_driver *driver)
338 struct list_head *item1, *item2, *_n; 335 struct list_head *item1, *item2, *_n;
339 struct i2c_client *client; 336 struct i2c_client *client;
340 struct i2c_adapter *adap; 337 struct i2c_adapter *adap;
341 338
342 int res = 0; 339 int res = 0;
343 340
344 mutex_lock(&core_lists); 341 mutex_lock(&core_lists);
345 342
346 /* Have a look at each adapter, if clients of this driver are still 343 /* Have a look at each adapter, if clients of this driver are still
347 * attached. If so, detach them to be able to kill the driver 344 * attached. If so, detach them to be able to kill the driver
348 * afterwards. 345 * afterwards.
349 */ 346 */
350 list_for_each(item1,&adapters) { 347 list_for_each(item1,&adapters) {
@@ -419,14 +416,14 @@ int i2c_attach_client(struct i2c_client *client)
419 goto out_unlock; 416 goto out_unlock;
420 } 417 }
421 list_add_tail(&client->list,&adapter->clients); 418 list_add_tail(&client->list,&adapter->clients);
422 419
423 client->usage_count = 0; 420 client->usage_count = 0;
424 421
425 client->dev.parent = &client->adapter->dev; 422 client->dev.parent = &client->adapter->dev;
426 client->dev.driver = &client->driver->driver; 423 client->dev.driver = &client->driver->driver;
427 client->dev.bus = &i2c_bus_type; 424 client->dev.bus = &i2c_bus_type;
428 client->dev.release = &i2c_client_release; 425 client->dev.release = &i2c_client_release;
429 426
430 snprintf(&client->dev.bus_id[0], sizeof(client->dev.bus_id), 427 snprintf(&client->dev.bus_id[0], sizeof(client->dev.bus_id),
431 "%d-%04x", i2c_adapter_id(adapter), client->addr); 428 "%d-%04x", i2c_adapter_id(adapter), client->addr);
432 dev_dbg(&adapter->dev, "client [%s] registered with bus id %s\n", 429 dev_dbg(&adapter->dev, "client [%s] registered with bus id %s\n",
@@ -467,7 +464,7 @@ int i2c_detach_client(struct i2c_client *client)
467{ 464{
468 struct i2c_adapter *adapter = client->adapter; 465 struct i2c_adapter *adapter = client->adapter;
469 int res = 0; 466 int res = 0;
470 467
471 if (client->usage_count > 0) { 468 if (client->usage_count > 0) {
472 dev_warn(&client->dev, "Client [%s] still busy, " 469 dev_warn(&client->dev, "Client [%s] still busy, "
473 "can't detach\n", client->name); 470 "can't detach\n", client->name);
@@ -535,10 +532,10 @@ int i2c_release_client(struct i2c_client *client)
535 __FUNCTION__); 532 __FUNCTION__);
536 return -EPERM; 533 return -EPERM;
537 } 534 }
538 535
539 client->usage_count--; 536 client->usage_count--;
540 i2c_dec_use_client(client); 537 i2c_dec_use_client(client);
541 538
542 return 0; 539 return 0;
543} 540}
544 541
@@ -603,7 +600,7 @@ int i2c_transfer(struct i2c_adapter * adap, struct i2c_msg *msgs, int num)
603 } 600 }
604#endif 601#endif
605 602
606 mutex_lock(&adap->bus_lock); 603 mutex_lock_nested(&adap->bus_lock, adap->level);
607 ret = adap->algo->master_xfer(adap,msgs,num); 604 ret = adap->algo->master_xfer(adap,msgs,num);
608 mutex_unlock(&adap->bus_lock); 605 mutex_unlock(&adap->bus_lock);
609 606
@@ -624,7 +621,7 @@ int i2c_master_send(struct i2c_client *client,const char *buf ,int count)
624 msg.flags = client->flags & I2C_M_TEN; 621 msg.flags = client->flags & I2C_M_TEN;
625 msg.len = count; 622 msg.len = count;
626 msg.buf = (char *)buf; 623 msg.buf = (char *)buf;
627 624
628 ret = i2c_transfer(adap, &msg, 1); 625 ret = i2c_transfer(adap, &msg, 1);
629 626
630 /* If everything went ok (i.e. 1 msg transmitted), return #bytes 627 /* If everything went ok (i.e. 1 msg transmitted), return #bytes
@@ -757,7 +754,7 @@ int i2c_probe(struct i2c_adapter *adapter,
757 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_QUICK)) { 754 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_QUICK)) {
758 if (address_data->probe[0] == I2C_CLIENT_END 755 if (address_data->probe[0] == I2C_CLIENT_END
759 && address_data->normal_i2c[0] == I2C_CLIENT_END) 756 && address_data->normal_i2c[0] == I2C_CLIENT_END)
760 return 0; 757 return 0;
761 758
762 dev_warn(&adapter->dev, "SMBus Quick command not supported, " 759 dev_warn(&adapter->dev, "SMBus Quick command not supported, "
763 "can't probe for chips\n"); 760 "can't probe for chips\n");
@@ -817,7 +814,7 @@ int i2c_probe(struct i2c_adapter *adapter,
817struct i2c_adapter* i2c_get_adapter(int id) 814struct i2c_adapter* i2c_get_adapter(int id)
818{ 815{
819 struct i2c_adapter *adapter; 816 struct i2c_adapter *adapter;
820 817
821 mutex_lock(&core_lists); 818 mutex_lock(&core_lists);
822 adapter = (struct i2c_adapter *)idr_find(&i2c_adapter_idr, id); 819 adapter = (struct i2c_adapter *)idr_find(&i2c_adapter_idr, id);
823 if (adapter && !try_module_get(adapter->owner)) 820 if (adapter && !try_module_get(adapter->owner))
@@ -834,14 +831,14 @@ void i2c_put_adapter(struct i2c_adapter *adap)
834 831
835/* The SMBus parts */ 832/* The SMBus parts */
836 833
837#define POLY (0x1070U << 3) 834#define POLY (0x1070U << 3)
838static u8 835static u8
839crc8(u16 data) 836crc8(u16 data)
840{ 837{
841 int i; 838 int i;
842 839
843 for(i = 0; i < 8; i++) { 840 for(i = 0; i < 8; i++) {
844 if (data & 0x8000) 841 if (data & 0x8000)
845 data = data ^ POLY; 842 data = data ^ POLY;
846 data = data << 1; 843 data = data << 1;
847 } 844 }
@@ -891,13 +888,13 @@ static int i2c_smbus_check_pec(u8 cpec, struct i2c_msg *msg)
891 rpec, cpec); 888 rpec, cpec);
892 return -1; 889 return -1;
893 } 890 }
894 return 0; 891 return 0;
895} 892}
896 893
897s32 i2c_smbus_write_quick(struct i2c_client *client, u8 value) 894s32 i2c_smbus_write_quick(struct i2c_client *client, u8 value)
898{ 895{
899 return i2c_smbus_xfer(client->adapter,client->addr,client->flags, 896 return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
900 value,0,I2C_SMBUS_QUICK,NULL); 897 value,0,I2C_SMBUS_QUICK,NULL);
901} 898}
902 899
903s32 i2c_smbus_read_byte(struct i2c_client *client) 900s32 i2c_smbus_read_byte(struct i2c_client *client)
@@ -996,11 +993,11 @@ s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, u8 command,
996 I2C_SMBUS_I2C_BLOCK_DATA, &data); 993 I2C_SMBUS_I2C_BLOCK_DATA, &data);
997} 994}
998 995
999/* Simulate a SMBus command using the i2c protocol 996/* Simulate a SMBus command using the i2c protocol
1000 No checking of parameters is done! */ 997 No checking of parameters is done! */
1001static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr, 998static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
1002 unsigned short flags, 999 unsigned short flags,
1003 char read_write, u8 command, int size, 1000 char read_write, u8 command, int size,
1004 union i2c_smbus_data * data) 1001 union i2c_smbus_data * data)
1005{ 1002{
1006 /* So we need to generate a series of msgs. In the case of writing, we 1003 /* So we need to generate a series of msgs. In the case of writing, we
@@ -1010,7 +1007,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
1010 unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3]; 1007 unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3];
1011 unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2]; 1008 unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2];
1012 int num = read_write == I2C_SMBUS_READ?2:1; 1009 int num = read_write == I2C_SMBUS_READ?2:1;
1013 struct i2c_msg msg[2] = { { addr, flags, 1, msgbuf0 }, 1010 struct i2c_msg msg[2] = { { addr, flags, 1, msgbuf0 },
1014 { addr, flags | I2C_M_RD, 0, msgbuf1 } 1011 { addr, flags | I2C_M_RD, 0, msgbuf1 }
1015 }; 1012 };
1016 int i; 1013 int i;
@@ -1103,14 +1100,14 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
1103 if (i) { 1100 if (i) {
1104 /* Compute PEC if first message is a write */ 1101 /* Compute PEC if first message is a write */
1105 if (!(msg[0].flags & I2C_M_RD)) { 1102 if (!(msg[0].flags & I2C_M_RD)) {
1106 if (num == 1) /* Write only */ 1103 if (num == 1) /* Write only */
1107 i2c_smbus_add_pec(&msg[0]); 1104 i2c_smbus_add_pec(&msg[0]);
1108 else /* Write followed by read */ 1105 else /* Write followed by read */
1109 partial_pec = i2c_smbus_msg_pec(0, &msg[0]); 1106 partial_pec = i2c_smbus_msg_pec(0, &msg[0]);
1110 } 1107 }
1111 /* Ask for PEC if last message is a read */ 1108 /* Ask for PEC if last message is a read */
1112 if (msg[num-1].flags & I2C_M_RD) 1109 if (msg[num-1].flags & I2C_M_RD)
1113 msg[num-1].len++; 1110 msg[num-1].len++;
1114 } 1111 }
1115 1112
1116 if (i2c_transfer(adapter, msg, num) < 0) 1113 if (i2c_transfer(adapter, msg, num) < 0)
@@ -1130,7 +1127,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
1130 case I2C_SMBUS_BYTE_DATA: 1127 case I2C_SMBUS_BYTE_DATA:
1131 data->byte = msgbuf1[0]; 1128 data->byte = msgbuf1[0];
1132 break; 1129 break;
1133 case I2C_SMBUS_WORD_DATA: 1130 case I2C_SMBUS_WORD_DATA:
1134 case I2C_SMBUS_PROC_CALL: 1131 case I2C_SMBUS_PROC_CALL:
1135 data->word = msgbuf1[0] | (msgbuf1[1] << 8); 1132 data->word = msgbuf1[0] | (msgbuf1[1] << 8);
1136 break; 1133 break;
@@ -1146,7 +1143,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
1146 1143
1147 1144
1148s32 i2c_smbus_xfer(struct i2c_adapter * adapter, u16 addr, unsigned short flags, 1145s32 i2c_smbus_xfer(struct i2c_adapter * adapter, u16 addr, unsigned short flags,
1149 char read_write, u8 command, int size, 1146 char read_write, u8 command, int size,
1150 union i2c_smbus_data * data) 1147 union i2c_smbus_data * data)
1151{ 1148{
1152 s32 res; 1149 s32 res;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 2e22a2ffa606..ac5bd2a7ca99 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -1,5 +1,5 @@
1/* 1/*
2 i2c-dev.c - i2c-bus driver, char device interface 2 i2c-dev.c - i2c-bus driver, char device interface
3 3
4 Copyright (C) 1995-97 Simon G. Vogl 4 Copyright (C) 1995-97 Simon G. Vogl
5 Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl> 5 Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl>
@@ -90,6 +90,7 @@ static void return_i2c_dev(struct i2c_dev *i2c_dev)
90 spin_lock(&i2c_dev_list_lock); 90 spin_lock(&i2c_dev_list_lock);
91 list_del(&i2c_dev->list); 91 list_del(&i2c_dev->list);
92 spin_unlock(&i2c_dev_list_lock); 92 spin_unlock(&i2c_dev_list_lock);
93 kfree(i2c_dev);
93} 94}
94 95
95static ssize_t show_adapter_name(struct device *dev, 96static ssize_t show_adapter_name(struct device *dev,
@@ -172,7 +173,7 @@ static int i2cdev_ioctl(struct inode *inode, struct file *file,
172 switch ( cmd ) { 173 switch ( cmd ) {
173 case I2C_SLAVE: 174 case I2C_SLAVE:
174 case I2C_SLAVE_FORCE: 175 case I2C_SLAVE_FORCE:
175 if ((arg > 0x3ff) || 176 if ((arg > 0x3ff) ||
176 (((client->flags & I2C_M_TEN) == 0) && arg > 0x7f)) 177 (((client->flags & I2C_M_TEN) == 0) && arg > 0x7f))
177 return -EINVAL; 178 return -EINVAL;
178 if ((cmd == I2C_SLAVE) && i2c_check_addr(client->adapter,arg)) 179 if ((cmd == I2C_SLAVE) && i2c_check_addr(client->adapter,arg))
@@ -193,12 +194,11 @@ static int i2cdev_ioctl(struct inode *inode, struct file *file,
193 return 0; 194 return 0;
194 case I2C_FUNCS: 195 case I2C_FUNCS:
195 funcs = i2c_get_functionality(client->adapter); 196 funcs = i2c_get_functionality(client->adapter);
196 return (copy_to_user((unsigned long __user *)arg, &funcs, 197 return put_user(funcs, (unsigned long __user *)arg);
197 sizeof(unsigned long)))?-EFAULT:0;
198 198
199 case I2C_RDWR: 199 case I2C_RDWR:
200 if (copy_from_user(&rdwr_arg, 200 if (copy_from_user(&rdwr_arg,
201 (struct i2c_rdwr_ioctl_data __user *)arg, 201 (struct i2c_rdwr_ioctl_data __user *)arg,
202 sizeof(rdwr_arg))) 202 sizeof(rdwr_arg)))
203 return -EFAULT; 203 return -EFAULT;
204 204
@@ -206,9 +206,9 @@ static int i2cdev_ioctl(struct inode *inode, struct file *file,
206 * be sent at once */ 206 * be sent at once */
207 if (rdwr_arg.nmsgs > I2C_RDRW_IOCTL_MAX_MSGS) 207 if (rdwr_arg.nmsgs > I2C_RDRW_IOCTL_MAX_MSGS)
208 return -EINVAL; 208 return -EINVAL;
209 209
210 rdwr_pa = (struct i2c_msg *) 210 rdwr_pa = (struct i2c_msg *)
211 kmalloc(rdwr_arg.nmsgs * sizeof(struct i2c_msg), 211 kmalloc(rdwr_arg.nmsgs * sizeof(struct i2c_msg),
212 GFP_KERNEL); 212 GFP_KERNEL);
213 213
214 if (rdwr_pa == NULL) return -ENOMEM; 214 if (rdwr_pa == NULL) return -ENOMEM;
@@ -278,9 +278,9 @@ static int i2cdev_ioctl(struct inode *inode, struct file *file,
278 (struct i2c_smbus_ioctl_data __user *) arg, 278 (struct i2c_smbus_ioctl_data __user *) arg,
279 sizeof(struct i2c_smbus_ioctl_data))) 279 sizeof(struct i2c_smbus_ioctl_data)))
280 return -EFAULT; 280 return -EFAULT;
281 if ((data_arg.size != I2C_SMBUS_BYTE) && 281 if ((data_arg.size != I2C_SMBUS_BYTE) &&
282 (data_arg.size != I2C_SMBUS_QUICK) && 282 (data_arg.size != I2C_SMBUS_QUICK) &&
283 (data_arg.size != I2C_SMBUS_BYTE_DATA) && 283 (data_arg.size != I2C_SMBUS_BYTE_DATA) &&
284 (data_arg.size != I2C_SMBUS_WORD_DATA) && 284 (data_arg.size != I2C_SMBUS_WORD_DATA) &&
285 (data_arg.size != I2C_SMBUS_PROC_CALL) && 285 (data_arg.size != I2C_SMBUS_PROC_CALL) &&
286 (data_arg.size != I2C_SMBUS_BLOCK_DATA) && 286 (data_arg.size != I2C_SMBUS_BLOCK_DATA) &&
@@ -291,11 +291,11 @@ static int i2cdev_ioctl(struct inode *inode, struct file *file,
291 data_arg.size); 291 data_arg.size);
292 return -EINVAL; 292 return -EINVAL;
293 } 293 }
294 /* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1, 294 /* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1,
295 so the check is valid if size==I2C_SMBUS_QUICK too. */ 295 so the check is valid if size==I2C_SMBUS_QUICK too. */
296 if ((data_arg.read_write != I2C_SMBUS_READ) && 296 if ((data_arg.read_write != I2C_SMBUS_READ) &&
297 (data_arg.read_write != I2C_SMBUS_WRITE)) { 297 (data_arg.read_write != I2C_SMBUS_WRITE)) {
298 dev_dbg(&client->adapter->dev, 298 dev_dbg(&client->adapter->dev,
299 "read_write out of range (%x) in ioctl I2C_SMBUS.\n", 299 "read_write out of range (%x) in ioctl I2C_SMBUS.\n",
300 data_arg.read_write); 300 data_arg.read_write);
301 return -EINVAL; 301 return -EINVAL;
@@ -304,7 +304,7 @@ static int i2cdev_ioctl(struct inode *inode, struct file *file,
304 /* Note that command values are always valid! */ 304 /* Note that command values are always valid! */
305 305
306 if ((data_arg.size == I2C_SMBUS_QUICK) || 306 if ((data_arg.size == I2C_SMBUS_QUICK) ||
307 ((data_arg.size == I2C_SMBUS_BYTE) && 307 ((data_arg.size == I2C_SMBUS_BYTE) &&
308 (data_arg.read_write == I2C_SMBUS_WRITE))) 308 (data_arg.read_write == I2C_SMBUS_WRITE)))
309 /* These are special: we do not use data */ 309 /* These are special: we do not use data */
310 return i2c_smbus_xfer(client->adapter, client->addr, 310 return i2c_smbus_xfer(client->adapter, client->addr,
@@ -322,14 +322,14 @@ static int i2cdev_ioctl(struct inode *inode, struct file *file,
322 if ((data_arg.size == I2C_SMBUS_BYTE_DATA) || 322 if ((data_arg.size == I2C_SMBUS_BYTE_DATA) ||
323 (data_arg.size == I2C_SMBUS_BYTE)) 323 (data_arg.size == I2C_SMBUS_BYTE))
324 datasize = sizeof(data_arg.data->byte); 324 datasize = sizeof(data_arg.data->byte);
325 else if ((data_arg.size == I2C_SMBUS_WORD_DATA) || 325 else if ((data_arg.size == I2C_SMBUS_WORD_DATA) ||
326 (data_arg.size == I2C_SMBUS_PROC_CALL)) 326 (data_arg.size == I2C_SMBUS_PROC_CALL))
327 datasize = sizeof(data_arg.data->word); 327 datasize = sizeof(data_arg.data->word);
328 else /* size == smbus block, i2c block, or block proc. call */ 328 else /* size == smbus block, i2c block, or block proc. call */
329 datasize = sizeof(data_arg.data->block); 329 datasize = sizeof(data_arg.data->block);
330 330
331 if ((data_arg.size == I2C_SMBUS_PROC_CALL) || 331 if ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
332 (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) || 332 (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
333 (data_arg.read_write == I2C_SMBUS_WRITE)) { 333 (data_arg.read_write == I2C_SMBUS_WRITE)) {
334 if (copy_from_user(&temp, data_arg.data, datasize)) 334 if (copy_from_user(&temp, data_arg.data, datasize))
335 return -EFAULT; 335 return -EFAULT;
@@ -337,8 +337,8 @@ static int i2cdev_ioctl(struct inode *inode, struct file *file,
337 res = i2c_smbus_xfer(client->adapter,client->addr,client->flags, 337 res = i2c_smbus_xfer(client->adapter,client->addr,client->flags,
338 data_arg.read_write, 338 data_arg.read_write,
339 data_arg.command,data_arg.size,&temp); 339 data_arg.command,data_arg.size,&temp);
340 if (! res && ((data_arg.size == I2C_SMBUS_PROC_CALL) || 340 if (! res && ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
341 (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) || 341 (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
342 (data_arg.read_write == I2C_SMBUS_READ))) { 342 (data_arg.read_write == I2C_SMBUS_READ))) {
343 if (copy_to_user(data_arg.data, &temp, datasize)) 343 if (copy_to_user(data_arg.data, &temp, datasize))
344 return -EFAULT; 344 return -EFAULT;
@@ -417,8 +417,8 @@ static int i2cdev_attach_adapter(struct i2c_adapter *adap)
417 i2c_dev->dev = device_create(i2c_dev_class, &adap->dev, 417 i2c_dev->dev = device_create(i2c_dev_class, &adap->dev,
418 MKDEV(I2C_MAJOR, adap->nr), 418 MKDEV(I2C_MAJOR, adap->nr),
419 "i2c-%d", adap->nr); 419 "i2c-%d", adap->nr);
420 if (!i2c_dev->dev) { 420 if (IS_ERR(i2c_dev->dev)) {
421 res = -ENODEV; 421 res = PTR_ERR(i2c_dev->dev);
422 goto error; 422 goto error;
423 } 423 }
424 res = device_create_file(i2c_dev->dev, &dev_attr_name); 424 res = device_create_file(i2c_dev->dev, &dev_attr_name);
@@ -432,7 +432,6 @@ error_destroy:
432 device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); 432 device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
433error: 433error:
434 return_i2c_dev(i2c_dev); 434 return_i2c_dev(i2c_dev);
435 kfree(i2c_dev);
436 return res; 435 return res;
437} 436}
438 437
@@ -447,7 +446,6 @@ static int i2cdev_detach_adapter(struct i2c_adapter *adap)
447 device_remove_file(i2c_dev->dev, &dev_attr_name); 446 device_remove_file(i2c_dev->dev, &dev_attr_name);
448 return_i2c_dev(i2c_dev); 447 return_i2c_dev(i2c_dev);
449 device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); 448 device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
450 kfree(i2c_dev);
451 449
452 pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name); 450 pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
453 return 0; 451 return 0;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index e23bc0d62159..3f828052f8d2 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -796,7 +796,7 @@ endchoice
796config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ 796config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ
797 int "Maximum transfer size (KB) per request (up to 128)" 797 int "Maximum transfer size (KB) per request (up to 128)"
798 default "128" 798 default "128"
799 depends BLK_DEV_IDE_AU1XXX 799 depends on BLK_DEV_IDE_AU1XXX
800 800
801config IDE_ARM 801config IDE_ARM
802 def_bool ARM && (ARCH_A5K || ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK) 802 def_bool ARM && (ARCH_A5K || ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK)
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 88214943d00a..5969cec58dc1 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -687,8 +687,15 @@ static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 sta
687static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) 687static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
688{ 688{
689 struct request *rq = HWGROUP(drive)->rq; 689 struct request *rq = HWGROUP(drive)->rq;
690 ide_hwif_t *hwif = HWIF(drive);
690 int stat, err, sense_key; 691 int stat, err, sense_key;
691 692
693 /* We may have bogus DMA interrupts in PIO state here */
694 if (HWIF(drive)->dma_status && hwif->atapi_irq_bogon) {
695 stat = hwif->INB(hwif->dma_status);
696 /* Should we force the bit as well ? */
697 hwif->OUTB(stat, hwif->dma_status);
698 }
692 /* Check for errors. */ 699 /* Check for errors. */
693 stat = HWIF(drive)->INB(IDE_STATUS_REG); 700 stat = HWIF(drive)->INB(IDE_STATUS_REG);
694 if (stat_ret) 701 if (stat_ret)
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index e3a267622bb6..d33717c8afd4 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -2147,7 +2147,7 @@ static int ide_floppy_probe(ide_drive_t *drive)
2147 printk("ide-floppy: passing drive %s to ide-scsi emulation.\n", drive->name); 2147 printk("ide-floppy: passing drive %s to ide-scsi emulation.\n", drive->name);
2148 goto failed; 2148 goto failed;
2149 } 2149 }
2150 if ((floppy = (idefloppy_floppy_t *) kzalloc (sizeof (idefloppy_floppy_t), GFP_KERNEL)) == NULL) { 2150 if ((floppy = kzalloc(sizeof (idefloppy_floppy_t), GFP_KERNEL)) == NULL) {
2151 printk (KERN_ERR "ide-floppy: %s: Can't allocate a floppy structure\n", drive->name); 2151 printk (KERN_ERR "ide-floppy: %s: Can't allocate a floppy structure\n", drive->name);
2152 goto failed; 2152 goto failed;
2153 } 2153 }
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index dad9c47ebb69..5a5c565a32a8 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1000,10 +1000,6 @@ static int ide_init_queue(ide_drive_t *drive)
1000 /* needs drive->queue to be set */ 1000 /* needs drive->queue to be set */
1001 ide_toggle_bounce(drive, 1); 1001 ide_toggle_bounce(drive, 1);
1002 1002
1003 /* enable led activity for disk drives only */
1004 if (drive->media == ide_disk && hwif->led_act)
1005 blk_queue_activity_fn(q, hwif->led_act, drive);
1006
1007 return 0; 1003 return 0;
1008} 1004}
1009 1005
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index e2f4bb549063..b3bcd1d7315e 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -2573,11 +2573,11 @@ static idetape_stage_t *__idetape_kmalloc_stage (idetape_tape_t *tape, int full,
2573 int pages = tape->pages_per_stage; 2573 int pages = tape->pages_per_stage;
2574 char *b_data = NULL; 2574 char *b_data = NULL;
2575 2575
2576 if ((stage = (idetape_stage_t *) kmalloc (sizeof (idetape_stage_t),GFP_KERNEL)) == NULL) 2576 if ((stage = kmalloc(sizeof (idetape_stage_t),GFP_KERNEL)) == NULL)
2577 return NULL; 2577 return NULL;
2578 stage->next = NULL; 2578 stage->next = NULL;
2579 2579
2580 bh = stage->bh = (struct idetape_bh *)kmalloc(sizeof(struct idetape_bh), GFP_KERNEL); 2580 bh = stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
2581 if (bh == NULL) 2581 if (bh == NULL)
2582 goto abort; 2582 goto abort;
2583 bh->b_reqnext = NULL; 2583 bh->b_reqnext = NULL;
@@ -2607,7 +2607,7 @@ static idetape_stage_t *__idetape_kmalloc_stage (idetape_tape_t *tape, int full,
2607 continue; 2607 continue;
2608 } 2608 }
2609 prev_bh = bh; 2609 prev_bh = bh;
2610 if ((bh = (struct idetape_bh *)kmalloc(sizeof(struct idetape_bh), GFP_KERNEL)) == NULL) { 2610 if ((bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL)) == NULL) {
2611 free_page((unsigned long) b_data); 2611 free_page((unsigned long) b_data);
2612 goto abort; 2612 goto abort;
2613 } 2613 }
@@ -4860,7 +4860,7 @@ static int ide_tape_probe(ide_drive_t *drive)
4860 printk(KERN_WARNING "ide-tape: Use drive %s with ide-scsi emulation and osst.\n", drive->name); 4860 printk(KERN_WARNING "ide-tape: Use drive %s with ide-scsi emulation and osst.\n", drive->name);
4861 printk(KERN_WARNING "ide-tape: OnStream support will be removed soon from ide-tape!\n"); 4861 printk(KERN_WARNING "ide-tape: OnStream support will be removed soon from ide-tape!\n");
4862 } 4862 }
4863 tape = (idetape_tape_t *) kzalloc (sizeof (idetape_tape_t), GFP_KERNEL); 4863 tape = kzalloc(sizeof (idetape_tape_t), GFP_KERNEL);
4864 if (tape == NULL) { 4864 if (tape == NULL) {
4865 printk(KERN_ERR "ide-tape: %s: Can't allocate a tape structure\n", drive->name); 4865 printk(KERN_ERR "ide-tape: %s: Can't allocate a tape structure\n", drive->name);
4866 goto failed; 4866 goto failed;
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index e993a51f250e..08119da06d54 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> 4 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
5 * Portions Copyright (C) 2001 Sun Microsystems, Inc. 5 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
6 * Portions Copyright (C) 2003 Red Hat Inc 6 * Portions Copyright (C) 2003 Red Hat Inc
7 * Portions Copyright (C) 2005-2006 MontaVista Software, Inc.
7 * 8 *
8 * Thanks to HighPoint Technologies for their assistance, and hardware. 9 * Thanks to HighPoint Technologies for their assistance, and hardware.
9 * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his 10 * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his
@@ -11,9 +12,11 @@
11 * development and support. 12 * development and support.
12 * 13 *
13 * 14 *
14 * Highpoint have their own driver (source except for the raid part) 15 * HighPoint has its own drivers (open source except for the RAID part)
15 * available from http://www.highpoint-tech.com/hpt3xx-opensource-v131.tgz 16 * available from http://www.highpoint-tech.com/BIOS%20+%20Driver/.
16 * This may be useful to anyone wanting to work on the mainstream hpt IDE. 17 * This may be useful to anyone wanting to work on this driver, however do not
18 * trust them too much since the code tends to become less and less meaningful
19 * as the time passes... :-/
17 * 20 *
18 * Note that final HPT370 support was done by force extraction of GPL. 21 * Note that final HPT370 support was done by force extraction of GPL.
19 * 22 *
@@ -52,6 +55,29 @@
52 * keeping me sane. 55 * keeping me sane.
53 * Alan Cox <alan@redhat.com> 56 * Alan Cox <alan@redhat.com>
54 * 57 *
58 * - fix the clock turnaround code: it was writing to the wrong ports when
59 * called for the secondary channel, caching the current clock mode per-
60 * channel caused the cached register value to get out of sync with the
61 * actual one, the channels weren't serialized, the turnaround shouldn't
62 * be done on 66 MHz PCI bus
63 * - avoid calibrating PLL twice as the second time results in a wrong PCI
64 * frequency and thus in the wrong timings for the secondary channel
65 * - disable UltraATA/133 for HPT372 by default (50 MHz DPLL clock do not
66 * allow for this speed anyway)
67 * - add support for HPT302N and HPT371N clocking (the same as for HPT372N)
68 * - HPT371/N are single channel chips, so avoid touching the primary channel
69 * which exists only virtually (there's no pins for it)
70 * - fix/remove bad/unused timing tables and use one set of tables for the whole
71 * HPT37x chip family; save space by introducing the separate transfer mode
72 * table in which the mode lookup is done
73 * - use f_CNT value saved by the HighPoint BIOS as reading it directly gives
74 * the wrong PCI frequency since DPLL has already been calibrated by BIOS
75 * - fix the hotswap code: it caused RESET- to glitch when tristating the bus,
76 * and for HPT36x the obsolete HDIO_TRISTATE_HWIF handler was called instead
77 * - pass to init_chipset() handlers a copy of the IDE PCI device structure as
78 * they tamper with its fields
79 * <source@mvista.com>
80 *
55 */ 81 */
56 82
57 83
@@ -76,8 +102,8 @@
76 102
77/* various tuning parameters */ 103/* various tuning parameters */
78#define HPT_RESET_STATE_ENGINE 104#define HPT_RESET_STATE_ENGINE
79#undef HPT_DELAY_INTERRUPT 105#undef HPT_DELAY_INTERRUPT
80#undef HPT_SERIALIZE_IO 106#define HPT_SERIALIZE_IO 0
81 107
82static const char *quirk_drives[] = { 108static const char *quirk_drives[] = {
83 "QUANTUM FIREBALLlct08 08", 109 "QUANTUM FIREBALLlct08 08",
@@ -141,305 +167,175 @@ static const char *bad_ata33[] = {
141 NULL 167 NULL
142}; 168};
143 169
144struct chipset_bus_clock_list_entry { 170static u8 xfer_speeds[] = {
145 u8 xfer_speed; 171 XFER_UDMA_6,
146 unsigned int chipset_settings; 172 XFER_UDMA_5,
173 XFER_UDMA_4,
174 XFER_UDMA_3,
175 XFER_UDMA_2,
176 XFER_UDMA_1,
177 XFER_UDMA_0,
178
179 XFER_MW_DMA_2,
180 XFER_MW_DMA_1,
181 XFER_MW_DMA_0,
182
183 XFER_PIO_4,
184 XFER_PIO_3,
185 XFER_PIO_2,
186 XFER_PIO_1,
187 XFER_PIO_0
147}; 188};
148 189
149/* key for bus clock timings 190/* Key for bus clock timings
150 * bit 191 * 36x 37x
151 * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW 192 * bits bits
152 * DMA. cycles = value + 1 193 * 0:3 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA.
153 * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW 194 * cycles = value + 1
154 * DMA. cycles = value + 1 195 * 4:7 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA.
155 * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file 196 * cycles = value + 1
156 * register access. 197 * 8:11 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file
157 * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file 198 * register access.
158 * register access. 199 * 12:15 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file
159 * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer. 200 * register access.
160 * during task file register access. 201 * 16:18 18:20 udma_cycle_time. Clock cycles for UDMA xfer.
161 * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA 202 * - 21 CLK frequency: 0=ATA clock, 1=dual ATA clock.
162 * xfer. 203 * 19:21 22:24 pre_high_time. Time to initialize the 1st cycle for PIO and
163 * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task 204 * MW DMA xfer.
164 * register access. 205 * 22:24 25:27 cmd_pre_high_time. Time to initialize the 1st PIO cycle for
165 * 28 UDMA enable 206 * task file register access.
166 * 29 DMA enable 207 * 28 28 UDMA enable.
167 * 30 PIO_MST enable. if set, the chip is in bus master mode during 208 * 29 29 DMA enable.
168 * PIO. 209 * 30 30 PIO MST enable. If set, the chip is in bus master mode during
169 * 31 FIFO enable. 210 * PIO xfer.
211 * 31 31 FIFO enable.
170 */ 212 */
171static struct chipset_bus_clock_list_entry forty_base_hpt366[] = {
172 { XFER_UDMA_4, 0x900fd943 },
173 { XFER_UDMA_3, 0x900ad943 },
174 { XFER_UDMA_2, 0x900bd943 },
175 { XFER_UDMA_1, 0x9008d943 },
176 { XFER_UDMA_0, 0x9008d943 },
177
178 { XFER_MW_DMA_2, 0xa008d943 },
179 { XFER_MW_DMA_1, 0xa010d955 },
180 { XFER_MW_DMA_0, 0xa010d9fc },
181
182 { XFER_PIO_4, 0xc008d963 },
183 { XFER_PIO_3, 0xc010d974 },
184 { XFER_PIO_2, 0xc010d997 },
185 { XFER_PIO_1, 0xc010d9c7 },
186 { XFER_PIO_0, 0xc018d9d9 },
187 { 0, 0x0120d9d9 }
188};
189
190static struct chipset_bus_clock_list_entry thirty_three_base_hpt366[] = {
191 { XFER_UDMA_4, 0x90c9a731 },
192 { XFER_UDMA_3, 0x90cfa731 },
193 { XFER_UDMA_2, 0x90caa731 },
194 { XFER_UDMA_1, 0x90cba731 },
195 { XFER_UDMA_0, 0x90c8a731 },
196
197 { XFER_MW_DMA_2, 0xa0c8a731 },
198 { XFER_MW_DMA_1, 0xa0c8a732 }, /* 0xa0c8a733 */
199 { XFER_MW_DMA_0, 0xa0c8a797 },
200
201 { XFER_PIO_4, 0xc0c8a731 },
202 { XFER_PIO_3, 0xc0c8a742 },
203 { XFER_PIO_2, 0xc0d0a753 },
204 { XFER_PIO_1, 0xc0d0a7a3 }, /* 0xc0d0a793 */
205 { XFER_PIO_0, 0xc0d0a7aa }, /* 0xc0d0a7a7 */
206 { 0, 0x0120a7a7 }
207};
208
209static struct chipset_bus_clock_list_entry twenty_five_base_hpt366[] = {
210 { XFER_UDMA_4, 0x90c98521 },
211 { XFER_UDMA_3, 0x90cf8521 },
212 { XFER_UDMA_2, 0x90cf8521 },
213 { XFER_UDMA_1, 0x90cb8521 },
214 { XFER_UDMA_0, 0x90cb8521 },
215
216 { XFER_MW_DMA_2, 0xa0ca8521 },
217 { XFER_MW_DMA_1, 0xa0ca8532 },
218 { XFER_MW_DMA_0, 0xa0ca8575 },
219
220 { XFER_PIO_4, 0xc0ca8521 },
221 { XFER_PIO_3, 0xc0ca8532 },
222 { XFER_PIO_2, 0xc0ca8542 },
223 { XFER_PIO_1, 0xc0d08572 },
224 { XFER_PIO_0, 0xc0d08585 },
225 { 0, 0x01208585 }
226};
227
228/* from highpoint documentation. these are old values */
229static struct chipset_bus_clock_list_entry thirty_three_base_hpt370[] = {
230/* { XFER_UDMA_5, 0x1A85F442, 0x16454e31 }, */
231 { XFER_UDMA_5, 0x16454e31 },
232 { XFER_UDMA_4, 0x16454e31 },
233 { XFER_UDMA_3, 0x166d4e31 },
234 { XFER_UDMA_2, 0x16494e31 },
235 { XFER_UDMA_1, 0x164d4e31 },
236 { XFER_UDMA_0, 0x16514e31 },
237
238 { XFER_MW_DMA_2, 0x26514e21 },
239 { XFER_MW_DMA_1, 0x26514e33 },
240 { XFER_MW_DMA_0, 0x26514e97 },
241
242 { XFER_PIO_4, 0x06514e21 },
243 { XFER_PIO_3, 0x06514e22 },
244 { XFER_PIO_2, 0x06514e33 },
245 { XFER_PIO_1, 0x06914e43 },
246 { XFER_PIO_0, 0x06914e57 },
247 { 0, 0x06514e57 }
248};
249
250static struct chipset_bus_clock_list_entry sixty_six_base_hpt370[] = {
251 { XFER_UDMA_5, 0x14846231 },
252 { XFER_UDMA_4, 0x14886231 },
253 { XFER_UDMA_3, 0x148c6231 },
254 { XFER_UDMA_2, 0x148c6231 },
255 { XFER_UDMA_1, 0x14906231 },
256 { XFER_UDMA_0, 0x14986231 },
257
258 { XFER_MW_DMA_2, 0x26514e21 },
259 { XFER_MW_DMA_1, 0x26514e33 },
260 { XFER_MW_DMA_0, 0x26514e97 },
261
262 { XFER_PIO_4, 0x06514e21 },
263 { XFER_PIO_3, 0x06514e22 },
264 { XFER_PIO_2, 0x06514e33 },
265 { XFER_PIO_1, 0x06914e43 },
266 { XFER_PIO_0, 0x06914e57 },
267 { 0, 0x06514e57 }
268};
269
270/* these are the current (4 sep 2001) timings from highpoint */
271static struct chipset_bus_clock_list_entry thirty_three_base_hpt370a[] = {
272 { XFER_UDMA_5, 0x12446231 },
273 { XFER_UDMA_4, 0x12446231 },
274 { XFER_UDMA_3, 0x126c6231 },
275 { XFER_UDMA_2, 0x12486231 },
276 { XFER_UDMA_1, 0x124c6233 },
277 { XFER_UDMA_0, 0x12506297 },
278
279 { XFER_MW_DMA_2, 0x22406c31 },
280 { XFER_MW_DMA_1, 0x22406c33 },
281 { XFER_MW_DMA_0, 0x22406c97 },
282
283 { XFER_PIO_4, 0x06414e31 },
284 { XFER_PIO_3, 0x06414e42 },
285 { XFER_PIO_2, 0x06414e53 },
286 { XFER_PIO_1, 0x06814e93 },
287 { XFER_PIO_0, 0x06814ea7 },
288 { 0, 0x06814ea7 }
289};
290
291/* 2x 33MHz timings */
292static struct chipset_bus_clock_list_entry sixty_six_base_hpt370a[] = {
293 { XFER_UDMA_5, 0x1488e673 },
294 { XFER_UDMA_4, 0x1488e673 },
295 { XFER_UDMA_3, 0x1498e673 },
296 { XFER_UDMA_2, 0x1490e673 },
297 { XFER_UDMA_1, 0x1498e677 },
298 { XFER_UDMA_0, 0x14a0e73f },
299
300 { XFER_MW_DMA_2, 0x2480fa73 },
301 { XFER_MW_DMA_1, 0x2480fa77 },
302 { XFER_MW_DMA_0, 0x2480fb3f },
303
304 { XFER_PIO_4, 0x0c82be73 },
305 { XFER_PIO_3, 0x0c82be95 },
306 { XFER_PIO_2, 0x0c82beb7 },
307 { XFER_PIO_1, 0x0d02bf37 },
308 { XFER_PIO_0, 0x0d02bf5f },
309 { 0, 0x0d02bf5f }
310};
311 213
312static struct chipset_bus_clock_list_entry fifty_base_hpt370a[] = { 214static u32 forty_base_hpt36x[] = {
313 { XFER_UDMA_5, 0x12848242 }, 215 /* XFER_UDMA_6 */ 0x900fd943,
314 { XFER_UDMA_4, 0x12ac8242 }, 216 /* XFER_UDMA_5 */ 0x900fd943,
315 { XFER_UDMA_3, 0x128c8242 }, 217 /* XFER_UDMA_4 */ 0x900fd943,
316 { XFER_UDMA_2, 0x120c8242 }, 218 /* XFER_UDMA_3 */ 0x900ad943,
317 { XFER_UDMA_1, 0x12148254 }, 219 /* XFER_UDMA_2 */ 0x900bd943,
318 { XFER_UDMA_0, 0x121882ea }, 220 /* XFER_UDMA_1 */ 0x9008d943,
319 221 /* XFER_UDMA_0 */ 0x9008d943,
320 { XFER_MW_DMA_2, 0x22808242 }, 222
321 { XFER_MW_DMA_1, 0x22808254 }, 223 /* XFER_MW_DMA_2 */ 0xa008d943,
322 { XFER_MW_DMA_0, 0x228082ea }, 224 /* XFER_MW_DMA_1 */ 0xa010d955,
323 225 /* XFER_MW_DMA_0 */ 0xa010d9fc,
324 { XFER_PIO_4, 0x0a81f442 }, 226
325 { XFER_PIO_3, 0x0a81f443 }, 227 /* XFER_PIO_4 */ 0xc008d963,
326 { XFER_PIO_2, 0x0a81f454 }, 228 /* XFER_PIO_3 */ 0xc010d974,
327 { XFER_PIO_1, 0x0ac1f465 }, 229 /* XFER_PIO_2 */ 0xc010d997,
328 { XFER_PIO_0, 0x0ac1f48a }, 230 /* XFER_PIO_1 */ 0xc010d9c7,
329 { 0, 0x0ac1f48a } 231 /* XFER_PIO_0 */ 0xc018d9d9
330}; 232};
331 233
332static struct chipset_bus_clock_list_entry thirty_three_base_hpt372[] = { 234static u32 thirty_three_base_hpt36x[] = {
333 { XFER_UDMA_6, 0x1c81dc62 }, 235 /* XFER_UDMA_6 */ 0x90c9a731,
334 { XFER_UDMA_5, 0x1c6ddc62 }, 236 /* XFER_UDMA_5 */ 0x90c9a731,
335 { XFER_UDMA_4, 0x1c8ddc62 }, 237 /* XFER_UDMA_4 */ 0x90c9a731,
336 { XFER_UDMA_3, 0x1c8edc62 }, /* checkme */ 238 /* XFER_UDMA_3 */ 0x90cfa731,
337 { XFER_UDMA_2, 0x1c91dc62 }, 239 /* XFER_UDMA_2 */ 0x90caa731,
338 { XFER_UDMA_1, 0x1c9adc62 }, /* checkme */ 240 /* XFER_UDMA_1 */ 0x90cba731,
339 { XFER_UDMA_0, 0x1c82dc62 }, /* checkme */ 241 /* XFER_UDMA_0 */ 0x90c8a731,
340 242
341 { XFER_MW_DMA_2, 0x2c829262 }, 243 /* XFER_MW_DMA_2 */ 0xa0c8a731,
342 { XFER_MW_DMA_1, 0x2c829266 }, /* checkme */ 244 /* XFER_MW_DMA_1 */ 0xa0c8a732, /* 0xa0c8a733 */
343 { XFER_MW_DMA_0, 0x2c82922e }, /* checkme */ 245 /* XFER_MW_DMA_0 */ 0xa0c8a797,
344 246
345 { XFER_PIO_4, 0x0c829c62 }, 247 /* XFER_PIO_4 */ 0xc0c8a731,
346 { XFER_PIO_3, 0x0c829c84 }, 248 /* XFER_PIO_3 */ 0xc0c8a742,
347 { XFER_PIO_2, 0x0c829ca6 }, 249 /* XFER_PIO_2 */ 0xc0d0a753,
348 { XFER_PIO_1, 0x0d029d26 }, 250 /* XFER_PIO_1 */ 0xc0d0a7a3, /* 0xc0d0a793 */
349 { XFER_PIO_0, 0x0d029d5e }, 251 /* XFER_PIO_0 */ 0xc0d0a7aa /* 0xc0d0a7a7 */
350 { 0, 0x0d029d5e }
351}; 252};
352 253
353static struct chipset_bus_clock_list_entry fifty_base_hpt372[] = { 254static u32 twenty_five_base_hpt36x[] = {
354 { XFER_UDMA_5, 0x12848242 }, 255 /* XFER_UDMA_6 */ 0x90c98521,
355 { XFER_UDMA_4, 0x12ac8242 }, 256 /* XFER_UDMA_5 */ 0x90c98521,
356 { XFER_UDMA_3, 0x128c8242 }, 257 /* XFER_UDMA_4 */ 0x90c98521,
357 { XFER_UDMA_2, 0x120c8242 }, 258 /* XFER_UDMA_3 */ 0x90cf8521,
358 { XFER_UDMA_1, 0x12148254 }, 259 /* XFER_UDMA_2 */ 0x90cf8521,
359 { XFER_UDMA_0, 0x121882ea }, 260 /* XFER_UDMA_1 */ 0x90cb8521,
360 261 /* XFER_UDMA_0 */ 0x90cb8521,
361 { XFER_MW_DMA_2, 0x22808242 }, 262
362 { XFER_MW_DMA_1, 0x22808254 }, 263 /* XFER_MW_DMA_2 */ 0xa0ca8521,
363 { XFER_MW_DMA_0, 0x228082ea }, 264 /* XFER_MW_DMA_1 */ 0xa0ca8532,
364 265 /* XFER_MW_DMA_0 */ 0xa0ca8575,
365 { XFER_PIO_4, 0x0a81f442 }, 266
366 { XFER_PIO_3, 0x0a81f443 }, 267 /* XFER_PIO_4 */ 0xc0ca8521,
367 { XFER_PIO_2, 0x0a81f454 }, 268 /* XFER_PIO_3 */ 0xc0ca8532,
368 { XFER_PIO_1, 0x0ac1f465 }, 269 /* XFER_PIO_2 */ 0xc0ca8542,
369 { XFER_PIO_0, 0x0ac1f48a }, 270 /* XFER_PIO_1 */ 0xc0d08572,
370 { 0, 0x0a81f443 } 271 /* XFER_PIO_0 */ 0xc0d08585
371}; 272};
372 273
373static struct chipset_bus_clock_list_entry sixty_six_base_hpt372[] = { 274static u32 thirty_three_base_hpt37x[] = {
374 { XFER_UDMA_6, 0x1c869c62 }, 275 /* XFER_UDMA_6 */ 0x12446231, /* 0x12646231 ?? */
375 { XFER_UDMA_5, 0x1cae9c62 }, 276 /* XFER_UDMA_5 */ 0x12446231,
376 { XFER_UDMA_4, 0x1c8a9c62 }, 277 /* XFER_UDMA_4 */ 0x12446231,
377 { XFER_UDMA_3, 0x1c8e9c62 }, 278 /* XFER_UDMA_3 */ 0x126c6231,
378 { XFER_UDMA_2, 0x1c929c62 }, 279 /* XFER_UDMA_2 */ 0x12486231,
379 { XFER_UDMA_1, 0x1c9a9c62 }, 280 /* XFER_UDMA_1 */ 0x124c6233,
380 { XFER_UDMA_0, 0x1c829c62 }, 281 /* XFER_UDMA_0 */ 0x12506297,
381 282
382 { XFER_MW_DMA_2, 0x2c829c62 }, 283 /* XFER_MW_DMA_2 */ 0x22406c31,
383 { XFER_MW_DMA_1, 0x2c829c66 }, 284 /* XFER_MW_DMA_1 */ 0x22406c33,
384 { XFER_MW_DMA_0, 0x2c829d2e }, 285 /* XFER_MW_DMA_0 */ 0x22406c97,
385 286
386 { XFER_PIO_4, 0x0c829c62 }, 287 /* XFER_PIO_4 */ 0x06414e31,
387 { XFER_PIO_3, 0x0c829c84 }, 288 /* XFER_PIO_3 */ 0x06414e42,
388 { XFER_PIO_2, 0x0c829ca6 }, 289 /* XFER_PIO_2 */ 0x06414e53,
389 { XFER_PIO_1, 0x0d029d26 }, 290 /* XFER_PIO_1 */ 0x06814e93,
390 { XFER_PIO_0, 0x0d029d5e }, 291 /* XFER_PIO_0 */ 0x06814ea7
391 { 0, 0x0d029d26 }
392}; 292};
393 293
394static struct chipset_bus_clock_list_entry thirty_three_base_hpt374[] = { 294static u32 fifty_base_hpt37x[] = {
395 { XFER_UDMA_6, 0x12808242 }, 295 /* XFER_UDMA_6 */ 0x12848242,
396 { XFER_UDMA_5, 0x12848242 }, 296 /* XFER_UDMA_5 */ 0x12848242,
397 { XFER_UDMA_4, 0x12ac8242 }, 297 /* XFER_UDMA_4 */ 0x12ac8242,
398 { XFER_UDMA_3, 0x128c8242 }, 298 /* XFER_UDMA_3 */ 0x128c8242,
399 { XFER_UDMA_2, 0x120c8242 }, 299 /* XFER_UDMA_2 */ 0x120c8242,
400 { XFER_UDMA_1, 0x12148254 }, 300 /* XFER_UDMA_1 */ 0x12148254,
401 { XFER_UDMA_0, 0x121882ea }, 301 /* XFER_UDMA_0 */ 0x121882ea,
402 302
403 { XFER_MW_DMA_2, 0x22808242 }, 303 /* XFER_MW_DMA_2 */ 0x22808242,
404 { XFER_MW_DMA_1, 0x22808254 }, 304 /* XFER_MW_DMA_1 */ 0x22808254,
405 { XFER_MW_DMA_0, 0x228082ea }, 305 /* XFER_MW_DMA_0 */ 0x228082ea,
406 306
407 { XFER_PIO_4, 0x0a81f442 }, 307 /* XFER_PIO_4 */ 0x0a81f442,
408 { XFER_PIO_3, 0x0a81f443 }, 308 /* XFER_PIO_3 */ 0x0a81f443,
409 { XFER_PIO_2, 0x0a81f454 }, 309 /* XFER_PIO_2 */ 0x0a81f454,
410 { XFER_PIO_1, 0x0ac1f465 }, 310 /* XFER_PIO_1 */ 0x0ac1f465,
411 { XFER_PIO_0, 0x0ac1f48a }, 311 /* XFER_PIO_0 */ 0x0ac1f48a
412 { 0, 0x06814e93 }
413}; 312};
414 313
415/* FIXME: 50MHz timings for HPT374 */ 314static u32 sixty_six_base_hpt37x[] = {
416 315 /* XFER_UDMA_6 */ 0x1c869c62,
417#if 0 316 /* XFER_UDMA_5 */ 0x1cae9c62, /* 0x1c8a9c62 */
418static struct chipset_bus_clock_list_entry sixty_six_base_hpt374[] = { 317 /* XFER_UDMA_4 */ 0x1c8a9c62,
419 { XFER_UDMA_6, 0x12406231 }, /* checkme */ 318 /* XFER_UDMA_3 */ 0x1c8e9c62,
420 { XFER_UDMA_5, 0x12446231 }, /* 0x14846231 */ 319 /* XFER_UDMA_2 */ 0x1c929c62,
421 { XFER_UDMA_4, 0x16814ea7 }, /* 0x14886231 */ 320 /* XFER_UDMA_1 */ 0x1c9a9c62,
422 { XFER_UDMA_3, 0x16814ea7 }, /* 0x148c6231 */ 321 /* XFER_UDMA_0 */ 0x1c829c62,
423 { XFER_UDMA_2, 0x16814ea7 }, /* 0x148c6231 */ 322
424 { XFER_UDMA_1, 0x16814ea7 }, /* 0x14906231 */ 323 /* XFER_MW_DMA_2 */ 0x2c829c62,
425 { XFER_UDMA_0, 0x16814ea7 }, /* 0x14986231 */ 324 /* XFER_MW_DMA_1 */ 0x2c829c66,
426 { XFER_MW_DMA_2, 0x16814ea7 }, /* 0x26514e21 */ 325 /* XFER_MW_DMA_0 */ 0x2c829d2e,
427 { XFER_MW_DMA_1, 0x16814ea7 }, /* 0x26514e97 */ 326
428 { XFER_MW_DMA_0, 0x16814ea7 }, /* 0x26514e97 */ 327 /* XFER_PIO_4 */ 0x0c829c62,
429 { XFER_PIO_4, 0x06814ea7 }, /* 0x06514e21 */ 328 /* XFER_PIO_3 */ 0x0c829c84,
430 { XFER_PIO_3, 0x06814ea7 }, /* 0x06514e22 */ 329 /* XFER_PIO_2 */ 0x0c829ca6,
431 { XFER_PIO_2, 0x06814ea7 }, /* 0x06514e33 */ 330 /* XFER_PIO_1 */ 0x0d029d26,
432 { XFER_PIO_1, 0x06814ea7 }, /* 0x06914e43 */ 331 /* XFER_PIO_0 */ 0x0d029d5e
433 { XFER_PIO_0, 0x06814ea7 }, /* 0x06914e57 */
434 { 0, 0x06814ea7 }
435}; 332};
436#endif
437 333
438#define HPT366_DEBUG_DRIVE_INFO 0 334#define HPT366_DEBUG_DRIVE_INFO 0
439#define HPT374_ALLOW_ATA133_6 0 335#define HPT374_ALLOW_ATA133_6 0
440#define HPT371_ALLOW_ATA133_6 0 336#define HPT371_ALLOW_ATA133_6 0
441#define HPT302_ALLOW_ATA133_6 0 337#define HPT302_ALLOW_ATA133_6 0
442#define HPT372_ALLOW_ATA133_6 1 338#define HPT372_ALLOW_ATA133_6 0
443#define HPT370_ALLOW_ATA100_5 1 339#define HPT370_ALLOW_ATA100_5 1
444#define HPT366_ALLOW_ATA66_4 1 340#define HPT366_ALLOW_ATA66_4 1
445#define HPT366_ALLOW_ATA66_3 1 341#define HPT366_ALLOW_ATA66_3 1
@@ -461,9 +357,10 @@ struct hpt_info
461 int revision; /* Chipset revision */ 357 int revision; /* Chipset revision */
462 int flags; /* Chipset properties */ 358 int flags; /* Chipset properties */
463#define PLL_MODE 1 359#define PLL_MODE 1
464#define IS_372N 2 360#define IS_3xxN 2
361#define PCI_66MHZ 4
465 /* Speed table */ 362 /* Speed table */
466 struct chipset_bus_clock_list_entry *speed; 363 u32 *speed;
467}; 364};
468 365
469/* 366/*
@@ -600,12 +497,20 @@ static int check_in_drive_lists (ide_drive_t *drive, const char **list)
600 return 0; 497 return 0;
601} 498}
602 499
603static unsigned int pci_bus_clock_list (u8 speed, struct chipset_bus_clock_list_entry * chipset_table) 500static u32 pci_bus_clock_list(u8 speed, u32 *chipset_table)
604{ 501{
605 for ( ; chipset_table->xfer_speed ; chipset_table++) 502 int i;
606 if (chipset_table->xfer_speed == speed) 503
607 return chipset_table->chipset_settings; 504 /*
608 return chipset_table->chipset_settings; 505 * Lookup the transfer mode table to get the index into
506 * the timing table.
507 *
508 * NOTE: For XFER_PIO_SLOW, PIO mode 0 timings will be used.
509 */
510 for (i = 0; i < ARRAY_SIZE(xfer_speeds) - 1; i++)
511 if (xfer_speeds[i] == speed)
512 break;
513 return chipset_table[i];
609} 514}
610 515
611static int hpt36x_tune_chipset(ide_drive_t *drive, u8 xferspeed) 516static int hpt36x_tune_chipset(ide_drive_t *drive, u8 xferspeed)
@@ -956,156 +861,127 @@ static int hpt374_ide_dma_end (ide_drive_t *drive)
956} 861}
957 862
958/** 863/**
959 * hpt372n_set_clock - perform clock switching dance 864 * hpt3xxn_set_clock - perform clock switching dance
960 * @drive: Drive to switch 865 * @hwif: hwif to switch
961 * @mode: Switching mode (0x21 for write, 0x23 otherwise) 866 * @mode: clocking mode (0x21 for write, 0x23 otherwise)
962 * 867 *
963 * Switch the DPLL clock on the HPT372N devices. This is a 868 * Switch the DPLL clock on the HPT3xxN devices. This is a right mess.
964 * right mess. 869 * NOTE: avoid touching the disabled primary channel on HPT371N -- it
870 * doesn't physically exist anyway...
965 */ 871 */
966 872
967static void hpt372n_set_clock(ide_drive_t *drive, int mode) 873static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode)
968{ 874{
969 ide_hwif_t *hwif = HWIF(drive); 875 u8 mcr1, scr2 = hwif->INB(hwif->dma_master + 0x7b);
970 876
971 /* FIXME: should we check for DMA active and BUG() */ 877 if ((scr2 & 0x7f) == mode)
878 return;
879
880 /* MISC. control register 1 has the channel enable bit... */
881 mcr1 = hwif->INB(hwif->dma_master + 0x70);
882
972 /* Tristate the bus */ 883 /* Tristate the bus */
973 outb(0x80, hwif->dma_base+0x73); 884 if (mcr1 & 0x04)
974 outb(0x80, hwif->dma_base+0x77); 885 hwif->OUTB(0x80, hwif->dma_master + 0x73);
975 886 hwif->OUTB(0x80, hwif->dma_master + 0x77);
887
976 /* Switch clock and reset channels */ 888 /* Switch clock and reset channels */
977 outb(mode, hwif->dma_base+0x7B); 889 hwif->OUTB(mode, hwif->dma_master + 0x7b);
978 outb(0xC0, hwif->dma_base+0x79); 890 hwif->OUTB(0xc0, hwif->dma_master + 0x79);
979 891
980 /* Reset state machines */ 892 /* Reset state machines */
981 outb(0x37, hwif->dma_base+0x70); 893 if (mcr1 & 0x04)
982 outb(0x37, hwif->dma_base+0x74); 894 hwif->OUTB(0x37, hwif->dma_master + 0x70);
983 895 hwif->OUTB(0x37, hwif->dma_master + 0x74);
896
984 /* Complete reset */ 897 /* Complete reset */
985 outb(0x00, hwif->dma_base+0x79); 898 hwif->OUTB(0x00, hwif->dma_master + 0x79);
986 899
987 /* Reconnect channels to bus */ 900 /* Reconnect channels to bus */
988 outb(0x00, hwif->dma_base+0x73); 901 if (mcr1 & 0x04)
989 outb(0x00, hwif->dma_base+0x77); 902 hwif->OUTB(0x00, hwif->dma_master + 0x73);
903 hwif->OUTB(0x00, hwif->dma_master + 0x77);
990} 904}
991 905
992/** 906/**
993 * hpt372n_rw_disk - prepare for I/O 907 * hpt3xxn_rw_disk - prepare for I/O
994 * @drive: drive for command 908 * @drive: drive for command
995 * @rq: block request structure 909 * @rq: block request structure
996 * 910 *
997 * This is called when a disk I/O is issued to the 372N. 911 * This is called when a disk I/O is issued to HPT3xxN.
998 * We need it because of the clock switching. 912 * We need it because of the clock switching.
999 */ 913 */
1000 914
1001static void hpt372n_rw_disk(ide_drive_t *drive, struct request *rq) 915static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq)
1002{
1003 ide_hwif_t *hwif = drive->hwif;
1004 int wantclock;
1005
1006 wantclock = rq_data_dir(rq) ? 0x23 : 0x21;
1007
1008 if (hwif->config_data != wantclock) {
1009 hpt372n_set_clock(drive, wantclock);
1010 hwif->config_data = wantclock;
1011 }
1012}
1013
1014/*
1015 * Since SUN Cobalt is attempting to do this operation, I should disclose
1016 * this has been a long time ago Thu Jul 27 16:40:57 2000 was the patch date
1017 * HOTSWAP ATA Infrastructure.
1018 */
1019
1020static void hpt3xx_reset (ide_drive_t *drive)
1021{
1022}
1023
1024static int hpt3xx_tristate (ide_drive_t * drive, int state)
1025{ 916{
1026 ide_hwif_t *hwif = HWIF(drive); 917 ide_hwif_t *hwif = HWIF(drive);
1027 struct pci_dev *dev = hwif->pci_dev; 918 u8 wantclock = rq_data_dir(rq) ? 0x23 : 0x21;
1028 u8 reg59h = 0, reset = (hwif->channel) ? 0x80 : 0x40;
1029 u8 regXXh = 0, state_reg= (hwif->channel) ? 0x57 : 0x53;
1030
1031 pci_read_config_byte(dev, 0x59, &reg59h);
1032 pci_read_config_byte(dev, state_reg, &regXXh);
1033 919
1034 if (state) { 920 hpt3xxn_set_clock(hwif, wantclock);
1035 (void) ide_do_reset(drive);
1036 pci_write_config_byte(dev, state_reg, regXXh|0x80);
1037 pci_write_config_byte(dev, 0x59, reg59h|reset);
1038 } else {
1039 pci_write_config_byte(dev, 0x59, reg59h & ~(reset));
1040 pci_write_config_byte(dev, state_reg, regXXh & ~(0x80));
1041 (void) ide_do_reset(drive);
1042 }
1043 return 0;
1044} 921}
1045 922
1046/* 923/*
1047 * set/get power state for a drive. 924 * Set/get power state for a drive.
1048 * turning the power off does the following things:
1049 * 1) soft-reset the drive
1050 * 2) tri-states the ide bus
1051 * 925 *
1052 * when we turn things back on, we need to re-initialize things. 926 * When we turn the power back on, we need to re-initialize things.
1053 */ 927 */
1054#define TRISTATE_BIT 0x8000 928#define TRISTATE_BIT 0x8000
1055static int hpt370_busproc(ide_drive_t * drive, int state) 929
930static int hpt3xx_busproc(ide_drive_t *drive, int state)
1056{ 931{
1057 ide_hwif_t *hwif = drive->hwif; 932 ide_hwif_t *hwif = drive->hwif;
1058 struct pci_dev *dev = hwif->pci_dev; 933 struct pci_dev *dev = hwif->pci_dev;
1059 u8 tristate = 0, resetmask = 0, bus_reg = 0; 934 u8 tristate, resetmask, bus_reg = 0;
1060 u16 tri_reg; 935 u16 tri_reg = 0;
1061 936
1062 hwif->bus_state = state; 937 hwif->bus_state = state;
1063 938
1064 if (hwif->channel) { 939 if (hwif->channel) {
1065 /* secondary channel */ 940 /* secondary channel */
1066 tristate = 0x56; 941 tristate = 0x56;
1067 resetmask = 0x80; 942 resetmask = 0x80;
1068 } else { 943 } else {
1069 /* primary channel */ 944 /* primary channel */
1070 tristate = 0x52; 945 tristate = 0x52;
1071 resetmask = 0x40; 946 resetmask = 0x40;
1072 } 947 }
1073 948
1074 /* grab status */ 949 /* Grab the status. */
1075 pci_read_config_word(dev, tristate, &tri_reg); 950 pci_read_config_word(dev, tristate, &tri_reg);
1076 pci_read_config_byte(dev, 0x59, &bus_reg); 951 pci_read_config_byte(dev, 0x59, &bus_reg);
1077 952
1078 /* set the state. we don't set it if we don't need to do so. 953 /*
1079 * make sure that the drive knows that it has failed if it's off */ 954 * Set the state. We don't set it if we don't need to do so.
955 * Make sure that the drive knows that it has failed if it's off.
956 */
1080 switch (state) { 957 switch (state) {
1081 case BUSSTATE_ON: 958 case BUSSTATE_ON:
1082 hwif->drives[0].failures = 0; 959 if (!(bus_reg & resetmask))
1083 hwif->drives[1].failures = 0;
1084 if ((bus_reg & resetmask) == 0)
1085 return 0; 960 return 0;
1086 tri_reg &= ~TRISTATE_BIT; 961 hwif->drives[0].failures = hwif->drives[1].failures = 0;
1087 bus_reg &= ~resetmask; 962
1088 break; 963 pci_write_config_byte(dev, 0x59, bus_reg & ~resetmask);
964 pci_write_config_word(dev, tristate, tri_reg & ~TRISTATE_BIT);
965 return 0;
1089 case BUSSTATE_OFF: 966 case BUSSTATE_OFF:
1090 hwif->drives[0].failures = hwif->drives[0].max_failures + 1; 967 if ((bus_reg & resetmask) && !(tri_reg & TRISTATE_BIT))
1091 hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
1092 if ((tri_reg & TRISTATE_BIT) == 0 && (bus_reg & resetmask))
1093 return 0; 968 return 0;
1094 tri_reg &= ~TRISTATE_BIT; 969 tri_reg &= ~TRISTATE_BIT;
1095 bus_reg |= resetmask;
1096 break; 970 break;
1097 case BUSSTATE_TRISTATE: 971 case BUSSTATE_TRISTATE:
1098 hwif->drives[0].failures = hwif->drives[0].max_failures + 1; 972 if ((bus_reg & resetmask) && (tri_reg & TRISTATE_BIT))
1099 hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
1100 if ((tri_reg & TRISTATE_BIT) && (bus_reg & resetmask))
1101 return 0; 973 return 0;
1102 tri_reg |= TRISTATE_BIT; 974 tri_reg |= TRISTATE_BIT;
1103 bus_reg |= resetmask;
1104 break; 975 break;
976 default:
977 return -EINVAL;
1105 } 978 }
1106 pci_write_config_byte(dev, 0x59, bus_reg);
1107 pci_write_config_word(dev, tristate, tri_reg);
1108 979
980 hwif->drives[0].failures = hwif->drives[0].max_failures + 1;
981 hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
982
983 pci_write_config_word(dev, tristate, tri_reg);
984 pci_write_config_byte(dev, 0x59, bus_reg | resetmask);
1109 return 0; 985 return 0;
1110} 986}
1111 987
@@ -1119,14 +995,14 @@ static void __devinit hpt366_clocking(ide_hwif_t *hwif)
1119 /* detect bus speed by looking at control reg timing: */ 995 /* detect bus speed by looking at control reg timing: */
1120 switch((reg1 >> 8) & 7) { 996 switch((reg1 >> 8) & 7) {
1121 case 5: 997 case 5:
1122 info->speed = forty_base_hpt366; 998 info->speed = forty_base_hpt36x;
1123 break; 999 break;
1124 case 9: 1000 case 9:
1125 info->speed = twenty_five_base_hpt366; 1001 info->speed = twenty_five_base_hpt36x;
1126 break; 1002 break;
1127 case 7: 1003 case 7:
1128 default: 1004 default:
1129 info->speed = thirty_three_base_hpt366; 1005 info->speed = thirty_three_base_hpt36x;
1130 break; 1006 break;
1131 } 1007 }
1132} 1008}
@@ -1136,9 +1012,9 @@ static void __devinit hpt37x_clocking(ide_hwif_t *hwif)
1136 struct hpt_info *info = ide_get_hwifdata(hwif); 1012 struct hpt_info *info = ide_get_hwifdata(hwif);
1137 struct pci_dev *dev = hwif->pci_dev; 1013 struct pci_dev *dev = hwif->pci_dev;
1138 int adjust, i; 1014 int adjust, i;
1139 u16 freq; 1015 u16 freq = 0;
1140 u32 pll; 1016 u32 pll, temp = 0;
1141 u8 reg5bh; 1017 u8 reg5bh = 0, mcr1 = 0;
1142 1018
1143 /* 1019 /*
1144 * default to pci clock. make sure MA15/16 are set to output 1020 * default to pci clock. make sure MA15/16 are set to output
@@ -1151,27 +1027,40 @@ static void __devinit hpt37x_clocking(ide_hwif_t *hwif)
1151 pci_write_config_byte(dev, 0x5b, 0x23); 1027 pci_write_config_byte(dev, 0x5b, 0x23);
1152 1028
1153 /* 1029 /*
1154 * set up the PLL. we need to adjust it so that it's stable. 1030 * We'll have to read f_CNT value in order to determine
1155 * freq = Tpll * 192 / Tpci 1031 * the PCI clock frequency according to the following ratio:
1032 *
1033 * f_CNT = Fpci * 192 / Fdpll
1034 *
1035 * First try reading the register in which the HighPoint BIOS
1036 * saves f_CNT value before reprogramming the DPLL from its
1037 * default setting (which differs for the various chips).
1038 * NOTE: This register is only accessible via I/O space.
1156 * 1039 *
1157 * Todo. For non x86 should probably check the dword is 1040 * In case the signature check fails, we'll have to resort to
1158 * set to 0xABCDExxx indicating the BIOS saved f_CNT 1041 * reading the f_CNT register itself in hopes that nobody has
1042 * touched the DPLL yet...
1159 */ 1043 */
1160 pci_read_config_word(dev, 0x78, &freq); 1044 temp = inl(pci_resource_start(dev, 4) + 0x90);
1161 freq &= 0x1FF; 1045 if ((temp & 0xFFFFF000) != 0xABCDE000) {
1162 1046 printk(KERN_WARNING "HPT37X: no clock data saved by BIOS\n");
1047
1048 /* Calculate the average value of f_CNT */
1049 for (temp = i = 0; i < 128; i++) {
1050 pci_read_config_word(dev, 0x78, &freq);
1051 temp += freq & 0x1ff;
1052 mdelay(1);
1053 }
1054 freq = temp / 128;
1055 } else
1056 freq = temp & 0x1ff;
1057
1163 /* 1058 /*
1164 * The 372N uses different PCI clock information and has 1059 * HPT3xxN chips use different PCI clock information.
1165 * some other complications 1060 * Currently we always set up the PLL for them.
1166 * On PCI33 timing we must clock switch
1167 * On PCI66 timing we must NOT use the PCI clock
1168 *
1169 * Currently we always set up the PLL for the 372N
1170 */ 1061 */
1171 1062
1172 if(info->flags & IS_372N) 1063 if (info->flags & IS_3xxN) {
1173 {
1174 printk(KERN_INFO "hpt: HPT372N detected, using 372N timing.\n");
1175 if(freq < 0x55) 1064 if(freq < 0x55)
1176 pll = F_LOW_PCI_33; 1065 pll = F_LOW_PCI_33;
1177 else if(freq < 0x70) 1066 else if(freq < 0x70)
@@ -1180,10 +1069,8 @@ static void __devinit hpt37x_clocking(ide_hwif_t *hwif)
1180 pll = F_LOW_PCI_50; 1069 pll = F_LOW_PCI_50;
1181 else 1070 else
1182 pll = F_LOW_PCI_66; 1071 pll = F_LOW_PCI_66;
1183 1072
1184 printk(KERN_INFO "FREQ: %d PLL: %d\n", freq, pll); 1073 printk(KERN_INFO "HPT3xxN detected, FREQ: %d, PLL: %d\n", freq, pll);
1185
1186 /* We always use the pll not the PCI clock on 372N */
1187 } 1074 }
1188 else 1075 else
1189 { 1076 {
@@ -1197,41 +1084,22 @@ static void __devinit hpt37x_clocking(ide_hwif_t *hwif)
1197 pll = F_LOW_PCI_66; 1084 pll = F_LOW_PCI_66;
1198 1085
1199 if (pll == F_LOW_PCI_33) { 1086 if (pll == F_LOW_PCI_33) {
1200 if (info->revision >= 8) 1087 info->speed = thirty_three_base_hpt37x;
1201 info->speed = thirty_three_base_hpt374;
1202 else if (info->revision >= 5)
1203 info->speed = thirty_three_base_hpt372;
1204 else if (info->revision >= 4)
1205 info->speed = thirty_three_base_hpt370a;
1206 else
1207 info->speed = thirty_three_base_hpt370;
1208 printk(KERN_DEBUG "HPT37X: using 33MHz PCI clock\n"); 1088 printk(KERN_DEBUG "HPT37X: using 33MHz PCI clock\n");
1209 } else if (pll == F_LOW_PCI_40) { 1089 } else if (pll == F_LOW_PCI_40) {
1210 /* Unsupported */ 1090 /* Unsupported */
1211 } else if (pll == F_LOW_PCI_50) { 1091 } else if (pll == F_LOW_PCI_50) {
1212 if (info->revision >= 8) 1092 info->speed = fifty_base_hpt37x;
1213 info->speed = fifty_base_hpt370a;
1214 else if (info->revision >= 5)
1215 info->speed = fifty_base_hpt372;
1216 else if (info->revision >= 4)
1217 info->speed = fifty_base_hpt370a;
1218 else
1219 info->speed = fifty_base_hpt370a;
1220 printk(KERN_DEBUG "HPT37X: using 50MHz PCI clock\n"); 1093 printk(KERN_DEBUG "HPT37X: using 50MHz PCI clock\n");
1221 } else { 1094 } else {
1222 if (info->revision >= 8) { 1095 info->speed = sixty_six_base_hpt37x;
1223 printk(KERN_ERR "HPT37x: 66MHz timings are not supported.\n");
1224 }
1225 else if (info->revision >= 5)
1226 info->speed = sixty_six_base_hpt372;
1227 else if (info->revision >= 4)
1228 info->speed = sixty_six_base_hpt370a;
1229 else
1230 info->speed = sixty_six_base_hpt370;
1231 printk(KERN_DEBUG "HPT37X: using 66MHz PCI clock\n"); 1096 printk(KERN_DEBUG "HPT37X: using 66MHz PCI clock\n");
1232 } 1097 }
1233 } 1098 }
1234 1099
1100 if (pll == F_LOW_PCI_66)
1101 info->flags |= PCI_66MHZ;
1102
1235 /* 1103 /*
1236 * only try the pll if we don't have a table for the clock 1104 * only try the pll if we don't have a table for the clock
1237 * speed that we're running at. NOTE: the internal PLL will 1105 * speed that we're running at. NOTE: the internal PLL will
@@ -1248,11 +1116,8 @@ static void __devinit hpt37x_clocking(ide_hwif_t *hwif)
1248 info->flags |= PLL_MODE; 1116 info->flags |= PLL_MODE;
1249 1117
1250 /* 1118 /*
1251 * FIXME: make this work correctly, esp with 372N as per 1119 * Adjust the PLL based upon the PCI clock, enable it, and
1252 * reference driver code. 1120 * wait for stabilization...
1253 *
1254 * adjust PLL based upon PCI clock, enable it, and wait for
1255 * stabilization.
1256 */ 1121 */
1257 adjust = 0; 1122 adjust = 0;
1258 freq = (pll < F_LOW_PCI_50) ? 2 : 4; 1123 freq = (pll < F_LOW_PCI_50) ? 2 : 4;
@@ -1275,22 +1140,12 @@ static void __devinit hpt37x_clocking(ide_hwif_t *hwif)
1275 pci_write_config_dword(dev, 0x5c, 1140 pci_write_config_dword(dev, 0x5c,
1276 pll & ~0x100); 1141 pll & ~0x100);
1277 pci_write_config_byte(dev, 0x5b, 0x21); 1142 pci_write_config_byte(dev, 0x5b, 0x21);
1278 if (info->revision >= 8) 1143
1279 info->speed = fifty_base_hpt370a; 1144 info->speed = fifty_base_hpt37x;
1280 else if (info->revision >= 5)
1281 info->speed = fifty_base_hpt372;
1282 else if (info->revision >= 4)
1283 info->speed = fifty_base_hpt370a;
1284 else
1285 info->speed = fifty_base_hpt370a;
1286 printk("HPT37X: using 50MHz internal PLL\n"); 1145 printk("HPT37X: using 50MHz internal PLL\n");
1287 goto init_hpt37X_done; 1146 goto init_hpt37X_done;
1288 } 1147 }
1289 } 1148 }
1290 if (!pci_get_drvdata(dev)) {
1291 printk("No Clock Stabilization!!!\n");
1292 return;
1293 }
1294pll_recal: 1149pll_recal:
1295 if (adjust & 1) 1150 if (adjust & 1)
1296 pll -= (adjust >> 1); 1151 pll -= (adjust >> 1);
@@ -1300,11 +1155,16 @@ pll_recal:
1300 1155
1301init_hpt37X_done: 1156init_hpt37X_done:
1302 if (!info->speed) 1157 if (!info->speed)
1303 printk(KERN_ERR "HPT37X%s: unknown bus timing [%d %d].\n", 1158 printk(KERN_ERR "HPT37x%s: unknown bus timing [%d %d].\n",
1304 (info->flags & IS_372N)?"N":"", pll, freq); 1159 (info->flags & IS_3xxN) ? "N" : "", pll, freq);
1305 /* reset state engine */ 1160 /*
1306 pci_write_config_byte(dev, 0x50, 0x37); 1161 * Reset the state engines.
1307 pci_write_config_byte(dev, 0x54, 0x37); 1162 * NOTE: avoid accidentally enabling the primary channel on HPT371N.
1163 */
1164 pci_read_config_byte(dev, 0x50, &mcr1);
1165 if (mcr1 & 0x04)
1166 pci_write_config_byte(dev, 0x50, 0x37);
1167 pci_write_config_byte(dev, 0x54, 0x37);
1308 udelay(100); 1168 udelay(100);
1309} 1169}
1310 1170
@@ -1367,6 +1227,7 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1367 struct pci_dev *dev = hwif->pci_dev; 1227 struct pci_dev *dev = hwif->pci_dev;
1368 struct hpt_info *info = ide_get_hwifdata(hwif); 1228 struct hpt_info *info = ide_get_hwifdata(hwif);
1369 u8 ata66 = 0, regmask = (hwif->channel) ? 0x01 : 0x02; 1229 u8 ata66 = 0, regmask = (hwif->channel) ? 0x01 : 0x02;
1230 int serialize = HPT_SERIALIZE_IO;
1370 1231
1371 hwif->tuneproc = &hpt3xx_tune_drive; 1232 hwif->tuneproc = &hpt3xx_tune_drive;
1372 hwif->speedproc = &hpt3xx_tune_chipset; 1233 hwif->speedproc = &hpt3xx_tune_chipset;
@@ -1374,8 +1235,20 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1374 hwif->intrproc = &hpt3xx_intrproc; 1235 hwif->intrproc = &hpt3xx_intrproc;
1375 hwif->maskproc = &hpt3xx_maskproc; 1236 hwif->maskproc = &hpt3xx_maskproc;
1376 1237
1377 if(info->flags & IS_372N) 1238 /*
1378 hwif->rw_disk = &hpt372n_rw_disk; 1239 * HPT3xxN chips have some complications:
1240 *
1241 * - on 33 MHz PCI we must clock switch
1242 * - on 66 MHz PCI we must NOT use the PCI clock
1243 */
1244 if ((info->flags & (IS_3xxN | PCI_66MHZ)) == IS_3xxN) {
1245 /*
1246 * Clock is shared between the channels,
1247 * so we'll have to serialize them... :-(
1248 */
1249 serialize = 1;
1250 hwif->rw_disk = &hpt3xxn_rw_disk;
1251 }
1379 1252
1380 /* 1253 /*
1381 * The HPT37x uses the CBLID pins as outputs for MA15/MA16 1254 * The HPT37x uses the CBLID pins as outputs for MA15/MA16
@@ -1418,29 +1291,15 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1418 PCI_FUNC(hwif->pci_dev->devfn)); 1291 PCI_FUNC(hwif->pci_dev->devfn));
1419#endif /* DEBUG */ 1292#endif /* DEBUG */
1420 1293
1421#ifdef HPT_SERIALIZE_IO 1294 /* Serialize access to this device */
1422 /* serialize access to this device */ 1295 if (serialize && hwif->mate)
1423 if (hwif->mate)
1424 hwif->serialized = hwif->mate->serialized = 1; 1296 hwif->serialized = hwif->mate->serialized = 1;
1425#endif
1426 1297
1427 if (info->revision >= 3) { 1298 /*
1428 u8 reg5ah = 0; 1299 * Set up ioctl for power status.
1429 pci_write_config_byte(dev, 0x5a, reg5ah & ~0x10); 1300 * NOTE: power affects both drives on each channel.
1430 /* 1301 */
1431 * set up ioctl for power status. 1302 hwif->busproc = &hpt3xx_busproc;
1432 * note: power affects both
1433 * drives on each channel
1434 */
1435 hwif->resetproc = &hpt3xx_reset;
1436 hwif->busproc = &hpt370_busproc;
1437 } else if (info->revision >= 2) {
1438 hwif->resetproc = &hpt3xx_reset;
1439 hwif->busproc = &hpt3xx_tristate;
1440 } else {
1441 hwif->resetproc = &hpt3xx_reset;
1442 hwif->busproc = &hpt3xx_tristate;
1443 }
1444 1303
1445 if (!hwif->dma_base) { 1304 if (!hwif->dma_base) {
1446 hwif->drives[0].autotune = 1; 1305 hwif->drives[0].autotune = 1;
@@ -1490,7 +1349,7 @@ static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase)
1490 return; 1349 return;
1491 1350
1492 if(info->speed == NULL) { 1351 if(info->speed == NULL) {
1493 printk(KERN_WARNING "hpt: no known IDE timings, disabling DMA.\n"); 1352 printk(KERN_WARNING "hpt366: no known IDE timings, disabling DMA.\n");
1494 return; 1353 return;
1495 } 1354 }
1496 1355
@@ -1519,9 +1378,10 @@ static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase)
1519 1378
1520static void __devinit init_iops_hpt366(ide_hwif_t *hwif) 1379static void __devinit init_iops_hpt366(ide_hwif_t *hwif)
1521{ 1380{
1522 struct hpt_info *info = kzalloc(sizeof(struct hpt_info), GFP_KERNEL); 1381 struct hpt_info *info = kzalloc(sizeof(struct hpt_info), GFP_KERNEL);
1523 unsigned long dmabase = pci_resource_start(hwif->pci_dev, 4); 1382 struct pci_dev *dev = hwif->pci_dev;
1524 u8 did, rid; 1383 u16 did = dev->device;
1384 u8 rid = 0;
1525 1385
1526 if(info == NULL) { 1386 if(info == NULL) {
1527 printk(KERN_WARNING "hpt366: out of memory.\n"); 1387 printk(KERN_WARNING "hpt366: out of memory.\n");
@@ -1529,15 +1389,22 @@ static void __devinit init_iops_hpt366(ide_hwif_t *hwif)
1529 } 1389 }
1530 ide_set_hwifdata(hwif, info); 1390 ide_set_hwifdata(hwif, info);
1531 1391
1532 if(dmabase) { 1392 /* Avoid doing the same thing twice. */
1533 did = inb(dmabase + 0x22); 1393 if (hwif->channel && hwif->mate) {
1534 rid = inb(dmabase + 0x28); 1394 memcpy(info, ide_get_hwifdata(hwif->mate), sizeof(struct hpt_info));
1535 1395 return;
1536 if((did == 4 && rid == 6) || (did == 5 && rid > 1))
1537 info->flags |= IS_372N;
1538 } 1396 }
1539 1397
1540 info->revision = hpt_revision(hwif->pci_dev); 1398 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rid);
1399
1400 if (( did == PCI_DEVICE_ID_TTI_HPT366 && rid == 6) ||
1401 ((did == PCI_DEVICE_ID_TTI_HPT372 ||
1402 did == PCI_DEVICE_ID_TTI_HPT302 ||
1403 did == PCI_DEVICE_ID_TTI_HPT371) && rid > 1) ||
1404 did == PCI_DEVICE_ID_TTI_HPT372N)
1405 info->flags |= IS_3xxN;
1406
1407 info->revision = hpt_revision(dev);
1541 1408
1542 if (info->revision >= 3) 1409 if (info->revision >= 3)
1543 hpt37x_clocking(hwif); 1410 hpt37x_clocking(hwif);
@@ -1574,6 +1441,23 @@ static int __devinit init_setup_hpt37x(struct pci_dev *dev, ide_pci_device_t *d)
1574 return ide_setup_pci_device(dev, d); 1441 return ide_setup_pci_device(dev, d);
1575} 1442}
1576 1443
1444static int __devinit init_setup_hpt371(struct pci_dev *dev, ide_pci_device_t *d)
1445{
1446 u8 mcr1 = 0;
1447
1448 /*
1449 * HPT371 chips physically have only one channel, the secondary one,
1450 * but the primary channel registers do exist! Go figure...
1451 * So, we manually disable the non-existing channel here
1452 * (if the BIOS hasn't done this already).
1453 */
1454 pci_read_config_byte(dev, 0x50, &mcr1);
1455 if (mcr1 & 0x04)
1456 pci_write_config_byte(dev, 0x50, (mcr1 & ~0x04));
1457
1458 return ide_setup_pci_device(dev, d);
1459}
1460
1577static int __devinit init_setup_hpt366(struct pci_dev *dev, ide_pci_device_t *d) 1461static int __devinit init_setup_hpt366(struct pci_dev *dev, ide_pci_device_t *d)
1578{ 1462{
1579 struct pci_dev *findev = NULL; 1463 struct pci_dev *findev = NULL;
@@ -1661,13 +1545,14 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
1661 .bootable = OFF_BOARD, 1545 .bootable = OFF_BOARD,
1662 },{ /* 3 */ 1546 },{ /* 3 */
1663 .name = "HPT371", 1547 .name = "HPT371",
1664 .init_setup = init_setup_hpt37x, 1548 .init_setup = init_setup_hpt371,
1665 .init_chipset = init_chipset_hpt366, 1549 .init_chipset = init_chipset_hpt366,
1666 .init_iops = init_iops_hpt366, 1550 .init_iops = init_iops_hpt366,
1667 .init_hwif = init_hwif_hpt366, 1551 .init_hwif = init_hwif_hpt366,
1668 .init_dma = init_dma_hpt366, 1552 .init_dma = init_dma_hpt366,
1669 .channels = 2, 1553 .channels = 2,
1670 .autodma = AUTODMA, 1554 .autodma = AUTODMA,
1555 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1671 .bootable = OFF_BOARD, 1556 .bootable = OFF_BOARD,
1672 },{ /* 4 */ 1557 },{ /* 4 */
1673 .name = "HPT374", 1558 .name = "HPT374",
@@ -1699,13 +1584,16 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
1699 * 1584 *
1700 * Called when the PCI registration layer (or the IDE initialization) 1585 * Called when the PCI registration layer (or the IDE initialization)
1701 * finds a device matching our IDE device tables. 1586 * finds a device matching our IDE device tables.
1587 *
1588 * NOTE: since we'll have to modify some fields of the ide_pci_device_t
1589 * structure depending on the chip's revision, we'd better pass a local
1590 * copy down the call chain...
1702 */ 1591 */
1703
1704static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id) 1592static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1705{ 1593{
1706 ide_pci_device_t *d = &hpt366_chipsets[id->driver_data]; 1594 ide_pci_device_t d = hpt366_chipsets[id->driver_data];
1707 1595
1708 return d->init_setup(dev, d); 1596 return d.init_setup(dev, &d);
1709} 1597}
1710 1598
1711static struct pci_device_id hpt366_pci_tbl[] = { 1599static struct pci_device_id hpt366_pci_tbl[] = {
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 3ca581063f72..7cb48576e479 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -39,6 +39,14 @@
39 39
40#define PDC202_DEBUG_CABLE 0 40#define PDC202_DEBUG_CABLE 0
41 41
42#undef DEBUG
43
44#ifdef DEBUG
45#define DBG(fmt, args...) printk("%s: " fmt, __FUNCTION__, ## args)
46#else
47#define DBG(fmt, args...)
48#endif
49
42static const char *pdc_quirk_drives[] = { 50static const char *pdc_quirk_drives[] = {
43 "QUANTUM FIREBALLlct08 08", 51 "QUANTUM FIREBALLlct08 08",
44 "QUANTUM FIREBALLP KA6.4", 52 "QUANTUM FIREBALLP KA6.4",
@@ -51,37 +59,11 @@ static const char *pdc_quirk_drives[] = {
51 NULL 59 NULL
52}; 60};
53 61
54#define set_2regs(a, b) \ 62static u8 max_dma_rate(struct pci_dev *pdev)
55 do { \
56 hwif->OUTB((a + adj), indexreg); \
57 hwif->OUTB(b, datareg); \
58 } while(0)
59
60#define set_ultra(a, b, c) \
61 do { \
62 set_2regs(0x10,(a)); \
63 set_2regs(0x11,(b)); \
64 set_2regs(0x12,(c)); \
65 } while(0)
66
67#define set_ata2(a, b) \
68 do { \
69 set_2regs(0x0e,(a)); \
70 set_2regs(0x0f,(b)); \
71 } while(0)
72
73#define set_pio(a, b, c) \
74 do { \
75 set_2regs(0x0c,(a)); \
76 set_2regs(0x0d,(b)); \
77 set_2regs(0x13,(c)); \
78 } while(0)
79
80static u8 pdcnew_ratemask (ide_drive_t *drive)
81{ 63{
82 u8 mode; 64 u8 mode;
83 65
84 switch(HWIF(drive)->pci_dev->device) { 66 switch(pdev->device) {
85 case PCI_DEVICE_ID_PROMISE_20277: 67 case PCI_DEVICE_ID_PROMISE_20277:
86 case PCI_DEVICE_ID_PROMISE_20276: 68 case PCI_DEVICE_ID_PROMISE_20276:
87 case PCI_DEVICE_ID_PROMISE_20275: 69 case PCI_DEVICE_ID_PROMISE_20275:
@@ -96,12 +78,21 @@ static u8 pdcnew_ratemask (ide_drive_t *drive)
96 default: 78 default:
97 return 0; 79 return 0;
98 } 80 }
99 if (!eighty_ninty_three(drive)) 81
100 mode = min(mode, (u8)1);
101 return mode; 82 return mode;
102} 83}
103 84
104static int check_in_drive_lists (ide_drive_t *drive, const char **list) 85static u8 pdcnew_ratemask(ide_drive_t *drive)
86{
87 u8 mode = max_dma_rate(HWIF(drive)->pci_dev);
88
89 if (!eighty_ninty_three(drive))
90 mode = min_t(u8, mode, 1);
91
92 return mode;
93}
94
95static int check_in_drive_lists(ide_drive_t *drive, const char **list)
105{ 96{
106 struct hd_driveid *id = drive->id; 97 struct hd_driveid *id = drive->id;
107 98
@@ -121,43 +112,141 @@ static int check_in_drive_lists (ide_drive_t *drive, const char **list)
121 return 0; 112 return 0;
122} 113}
123 114
124static int pdcnew_new_tune_chipset (ide_drive_t *drive, u8 xferspeed) 115/**
116 * get_indexed_reg - Get indexed register
117 * @hwif: for the port address
118 * @index: index of the indexed register
119 */
120static u8 get_indexed_reg(ide_hwif_t *hwif, u8 index)
121{
122 u8 value;
123
124 hwif->OUTB(index, hwif->dma_vendor1);
125 value = hwif->INB(hwif->dma_vendor3);
126
127 DBG("index[%02X] value[%02X]\n", index, value);
128 return value;
129}
130
131/**
132 * set_indexed_reg - Set indexed register
133 * @hwif: for the port address
134 * @index: index of the indexed register
135 */
136static void set_indexed_reg(ide_hwif_t *hwif, u8 index, u8 value)
137{
138 hwif->OUTB(index, hwif->dma_vendor1);
139 hwif->OUTB(value, hwif->dma_vendor3);
140 DBG("index[%02X] value[%02X]\n", index, value);
141}
142
143/*
144 * ATA Timing Tables based on 133 MHz PLL output clock.
145 *
146 * If the PLL outputs 100 MHz clock, the ASIC hardware will set
147 * the timing registers automatically when "set features" command is
148 * issued to the device. However, if the PLL output clock is 133 MHz,
149 * the following tables must be used.
150 */
151static struct pio_timing {
152 u8 reg0c, reg0d, reg13;
153} pio_timings [] = {
154 { 0xfb, 0x2b, 0xac }, /* PIO mode 0, IORDY off, Prefetch off */
155 { 0x46, 0x29, 0xa4 }, /* PIO mode 1, IORDY off, Prefetch off */
156 { 0x23, 0x26, 0x64 }, /* PIO mode 2, IORDY off, Prefetch off */
157 { 0x27, 0x0d, 0x35 }, /* PIO mode 3, IORDY on, Prefetch off */
158 { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
159};
160
161static struct mwdma_timing {
162 u8 reg0e, reg0f;
163} mwdma_timings [] = {
164 { 0xdf, 0x5f }, /* MWDMA mode 0 */
165 { 0x6b, 0x27 }, /* MWDMA mode 1 */
166 { 0x69, 0x25 }, /* MWDMA mode 2 */
167};
168
169static struct udma_timing {
170 u8 reg10, reg11, reg12;
171} udma_timings [] = {
172 { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
173 { 0x3a, 0x0a, 0xd0 }, /* UDMA mode 1 */
174 { 0x2a, 0x07, 0xcd }, /* UDMA mode 2 */
175 { 0x1a, 0x05, 0xcd }, /* UDMA mode 3 */
176 { 0x1a, 0x03, 0xcd }, /* UDMA mode 4 */
177 { 0x1a, 0x02, 0xcb }, /* UDMA mode 5 */
178 { 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
179};
180
181static int pdcnew_tune_chipset(ide_drive_t *drive, u8 speed)
125{ 182{
126 ide_hwif_t *hwif = HWIF(drive); 183 ide_hwif_t *hwif = HWIF(drive);
127 unsigned long indexreg = hwif->dma_vendor1; 184 u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
128 unsigned long datareg = hwif->dma_vendor3; 185 int err;
129 u8 thold = 0x10;
130 u8 adj = (drive->dn%2) ? 0x08 : 0x00;
131 u8 speed = ide_rate_filter(pdcnew_ratemask(drive), xferspeed);
132
133 if (speed == XFER_UDMA_2) {
134 hwif->OUTB((thold + adj), indexreg);
135 hwif->OUTB((hwif->INB(datareg) & 0x7f), datareg);
136 }
137 186
138 switch (speed) { 187 speed = ide_rate_filter(pdcnew_ratemask(drive), speed);
139 case XFER_UDMA_7: 188
140 speed = XFER_UDMA_6; 189 /*
141 case XFER_UDMA_6: set_ultra(0x1a, 0x01, 0xcb); break; 190 * Issue SETFEATURES_XFER to the drive first. PDC202xx hardware will
142 case XFER_UDMA_5: set_ultra(0x1a, 0x02, 0xcb); break; 191 * automatically set the timing registers based on 100 MHz PLL output.
143 case XFER_UDMA_4: set_ultra(0x1a, 0x03, 0xcd); break; 192 */
144 case XFER_UDMA_3: set_ultra(0x1a, 0x05, 0xcd); break; 193 err = ide_config_drive_speed(drive, speed);
145 case XFER_UDMA_2: set_ultra(0x2a, 0x07, 0xcd); break; 194
146 case XFER_UDMA_1: set_ultra(0x3a, 0x0a, 0xd0); break; 195 /*
147 case XFER_UDMA_0: set_ultra(0x4a, 0x0f, 0xd5); break; 196 * As we set up the PLL to output 133 MHz for UltraDMA/133 capable
148 case XFER_MW_DMA_2: set_ata2(0x69, 0x25); break; 197 * chips, we must override the default register settings...
149 case XFER_MW_DMA_1: set_ata2(0x6b, 0x27); break; 198 */
150 case XFER_MW_DMA_0: set_ata2(0xdf, 0x5f); break; 199 if (max_dma_rate(hwif->pci_dev) == 4) {
151 case XFER_PIO_4: set_pio(0x23, 0x09, 0x25); break; 200 u8 mode = speed & 0x07;
152 case XFER_PIO_3: set_pio(0x27, 0x0d, 0x35); break; 201
153 case XFER_PIO_2: set_pio(0x23, 0x26, 0x64); break; 202 switch (speed) {
154 case XFER_PIO_1: set_pio(0x46, 0x29, 0xa4); break; 203 case XFER_UDMA_6:
155 case XFER_PIO_0: set_pio(0xfb, 0x2b, 0xac); break; 204 case XFER_UDMA_5:
156 default: 205 case XFER_UDMA_4:
157 ; 206 case XFER_UDMA_3:
158 } 207 case XFER_UDMA_2:
208 case XFER_UDMA_1:
209 case XFER_UDMA_0:
210 set_indexed_reg(hwif, 0x10 + adj,
211 udma_timings[mode].reg10);
212 set_indexed_reg(hwif, 0x11 + adj,
213 udma_timings[mode].reg11);
214 set_indexed_reg(hwif, 0x12 + adj,
215 udma_timings[mode].reg12);
216 break;
217
218 case XFER_MW_DMA_2:
219 case XFER_MW_DMA_1:
220 case XFER_MW_DMA_0:
221 set_indexed_reg(hwif, 0x0e + adj,
222 mwdma_timings[mode].reg0e);
223 set_indexed_reg(hwif, 0x0f + adj,
224 mwdma_timings[mode].reg0f);
225 break;
226 case XFER_PIO_4:
227 case XFER_PIO_3:
228 case XFER_PIO_2:
229 case XFER_PIO_1:
230 case XFER_PIO_0:
231 set_indexed_reg(hwif, 0x0c + adj,
232 pio_timings[mode].reg0c);
233 set_indexed_reg(hwif, 0x0d + adj,
234 pio_timings[mode].reg0d);
235 set_indexed_reg(hwif, 0x13 + adj,
236 pio_timings[mode].reg13);
237 break;
238 default:
239 printk(KERN_ERR "pdc202xx_new: "
240 "Unknown speed %d ignored\n", speed);
241 }
242 } else if (speed == XFER_UDMA_2) {
243 /* Set tHOLD bit to 0 if using UDMA mode 2 */
244 u8 tmp = get_indexed_reg(hwif, 0x10 + adj);
159 245
160 return (ide_config_drive_speed(drive, speed)); 246 set_indexed_reg(hwif, 0x10 + adj, tmp & 0x7f);
247 }
248
249 return err;
161} 250}
162 251
163/* 0 1 2 3 4 5 6 7 8 252/* 0 1 2 3 4 5 6 7 8
@@ -170,36 +259,42 @@ static int pdcnew_new_tune_chipset (ide_drive_t *drive, u8 xferspeed)
170static void pdcnew_tune_drive(ide_drive_t *drive, u8 pio) 259static void pdcnew_tune_drive(ide_drive_t *drive, u8 pio)
171{ 260{
172 pio = ide_get_best_pio_mode(drive, pio, 4, NULL); 261 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
173 (void)pdcnew_new_tune_chipset(drive, XFER_PIO_0 + pio); 262 (void)pdcnew_tune_chipset(drive, XFER_PIO_0 + pio);
174} 263}
175 264
176static u8 pdcnew_new_cable_detect (ide_hwif_t *hwif) 265static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
177{ 266{
178 hwif->OUTB(0x0b, hwif->dma_vendor1); 267 return get_indexed_reg(hwif, 0x0b) & 0x04;
179 return ((u8)((hwif->INB(hwif->dma_vendor3) & 0x04)));
180} 268}
181static int config_chipset_for_dma (ide_drive_t *drive) 269
270static int config_chipset_for_dma(ide_drive_t *drive)
182{ 271{
183 struct hd_driveid *id = drive->id; 272 struct hd_driveid *id = drive->id;
184 ide_hwif_t *hwif = HWIF(drive); 273 ide_hwif_t *hwif = HWIF(drive);
185 u8 speed = -1; 274 u8 ultra_66 = (id->dma_ultra & 0x0078) ? 1 : 0;
186 u8 cable; 275 u8 cable = pdcnew_cable_detect(hwif);
187 276 u8 speed;
188 u8 ultra_66 = ((id->dma_ultra & 0x0010) ||
189 (id->dma_ultra & 0x0008)) ? 1 : 0;
190
191 cable = pdcnew_new_cable_detect(hwif);
192 277
193 if (ultra_66 && cable) { 278 if (ultra_66 && cable) {
194 printk(KERN_WARNING "Warning: %s channel requires an 80-pin cable for operation.\n", hwif->channel ? "Secondary":"Primary"); 279 printk(KERN_WARNING "Warning: %s channel "
280 "requires an 80-pin cable for operation.\n",
281 hwif->channel ? "Secondary" : "Primary");
195 printk(KERN_WARNING "%s reduced to Ultra33 mode.\n", drive->name); 282 printk(KERN_WARNING "%s reduced to Ultra33 mode.\n", drive->name);
196 } 283 }
197 284
198 if (drive->media != ide_disk) 285 if (drive->media != ide_disk)
199 return 0; 286 return 0;
200 if (id->capability & 4) { /* IORDY_EN & PREFETCH_EN */ 287
201 hwif->OUTB((0x13 + ((drive->dn%2) ? 0x08 : 0x00)), hwif->dma_vendor1); 288 if (id->capability & 4) {
202 hwif->OUTB((hwif->INB(hwif->dma_vendor3)|0x03), hwif->dma_vendor3); 289 /*
290 * Set IORDY_EN & PREFETCH_EN (this seems to have
291 * NO real effect since this register is reloaded
292 * by hardware when the transfer mode is selected)
293 */
294 u8 tmp, adj = (drive->dn & 1) ? 0x08 : 0x00;
295
296 tmp = get_indexed_reg(hwif, 0x13 + adj);
297 set_indexed_reg(hwif, 0x13 + adj, tmp | 0x03);
203 } 298 }
204 299
205 speed = ide_dma_speed(drive, pdcnew_ratemask(drive)); 300 speed = ide_dma_speed(drive, pdcnew_ratemask(drive));
@@ -211,7 +306,7 @@ static int config_chipset_for_dma (ide_drive_t *drive)
211 return ide_dma_enable(drive); 306 return ide_dma_enable(drive);
212} 307}
213 308
214static int pdcnew_config_drive_xfer_rate (ide_drive_t *drive) 309static int pdcnew_config_drive_xfer_rate(ide_drive_t *drive)
215{ 310{
216 ide_hwif_t *hwif = HWIF(drive); 311 ide_hwif_t *hwif = HWIF(drive);
217 struct hd_driveid *id = drive->id; 312 struct hd_driveid *id = drive->id;
@@ -236,9 +331,9 @@ fast_ata_pio:
236 return 0; 331 return 0;
237} 332}
238 333
239static int pdcnew_quirkproc (ide_drive_t *drive) 334static int pdcnew_quirkproc(ide_drive_t *drive)
240{ 335{
241 return ((int) check_in_drive_lists(drive, pdc_quirk_drives)); 336 return check_in_drive_lists(drive, pdc_quirk_drives);
242} 337}
243 338
244static int pdcnew_ide_dma_lostirq(ide_drive_t *drive) 339static int pdcnew_ide_dma_lostirq(ide_drive_t *drive)
@@ -255,21 +350,100 @@ static int pdcnew_ide_dma_timeout(ide_drive_t *drive)
255 return __ide_dma_timeout(drive); 350 return __ide_dma_timeout(drive);
256} 351}
257 352
258static void pdcnew_new_reset (ide_drive_t *drive) 353static void pdcnew_reset(ide_drive_t *drive)
259{ 354{
260 /* 355 /*
261 * Deleted this because it is redundant from the caller. 356 * Deleted this because it is redundant from the caller.
262 */ 357 */
263 printk(KERN_WARNING "PDC202XX: %s channel reset.\n", 358 printk(KERN_WARNING "pdc202xx_new: %s channel reset.\n",
264 HWIF(drive)->channel ? "Secondary" : "Primary"); 359 HWIF(drive)->channel ? "Secondary" : "Primary");
265} 360}
266 361
362/**
363 * read_counter - Read the byte count registers
364 * @dma_base: for the port address
365 */
366static long __devinit read_counter(u32 dma_base)
367{
368 u32 pri_dma_base = dma_base, sec_dma_base = dma_base + 0x08;
369 u8 cnt0, cnt1, cnt2, cnt3;
370 long count = 0, last;
371 int retry = 3;
372
373 do {
374 last = count;
375
376 /* Read the current count */
377 outb(0x20, pri_dma_base + 0x01);
378 cnt0 = inb(pri_dma_base + 0x03);
379 outb(0x21, pri_dma_base + 0x01);
380 cnt1 = inb(pri_dma_base + 0x03);
381 outb(0x20, sec_dma_base + 0x01);
382 cnt2 = inb(sec_dma_base + 0x03);
383 outb(0x21, sec_dma_base + 0x01);
384 cnt3 = inb(sec_dma_base + 0x03);
385
386 count = (cnt3 << 23) | (cnt2 << 15) | (cnt1 << 8) | cnt0;
387
388 /*
389 * The 30-bit decrementing counter is read in 4 pieces.
390 * Incorrect value may be read when the most significant bytes
391 * are changing...
392 */
393 } while (retry-- && (((last ^ count) & 0x3fff8000) || last < count));
394
395 DBG("cnt0[%02X] cnt1[%02X] cnt2[%02X] cnt3[%02X]\n",
396 cnt0, cnt1, cnt2, cnt3);
397
398 return count;
399}
400
401/**
402 * detect_pll_input_clock - Detect the PLL input clock in Hz.
403 * @dma_base: for the port address
404 * E.g. 16949000 on 33 MHz PCI bus, i.e. half of the PCI clock.
405 */
406static long __devinit detect_pll_input_clock(unsigned long dma_base)
407{
408 long start_count, end_count;
409 long pll_input;
410 u8 scr1;
411
412 start_count = read_counter(dma_base);
413
414 /* Start the test mode */
415 outb(0x01, dma_base + 0x01);
416 scr1 = inb(dma_base + 0x03);
417 DBG("scr1[%02X]\n", scr1);
418 outb(scr1 | 0x40, dma_base + 0x03);
419
420 /* Let the counter run for 10 ms. */
421 mdelay(10);
422
423 end_count = read_counter(dma_base);
424
425 /* Stop the test mode */
426 outb(0x01, dma_base + 0x01);
427 scr1 = inb(dma_base + 0x03);
428 DBG("scr1[%02X]\n", scr1);
429 outb(scr1 & ~0x40, dma_base + 0x03);
430
431 /*
432 * Calculate the input clock in Hz
433 * (the clock counter is 30 bit wide and counts down)
434 */
435 pll_input = ((start_count - end_count) & 0x3ffffff) * 100;
436
437 DBG("start[%ld] end[%ld]\n", start_count, end_count);
438
439 return pll_input;
440}
441
267#ifdef CONFIG_PPC_PMAC 442#ifdef CONFIG_PPC_PMAC
268static void __devinit apple_kiwi_init(struct pci_dev *pdev) 443static void __devinit apple_kiwi_init(struct pci_dev *pdev)
269{ 444{
270 struct device_node *np = pci_device_to_OF_node(pdev); 445 struct device_node *np = pci_device_to_OF_node(pdev);
271 unsigned int class_rev = 0; 446 unsigned int class_rev = 0;
272 void __iomem *mmio;
273 u8 conf; 447 u8 conf;
274 448
275 if (np == NULL || !device_is_compatible(np, "kiwi-root")) 449 if (np == NULL || !device_is_compatible(np, "kiwi-root"))
@@ -280,30 +454,20 @@ static void __devinit apple_kiwi_init(struct pci_dev *pdev)
280 454
281 if (class_rev >= 0x03) { 455 if (class_rev >= 0x03) {
282 /* Setup chip magic config stuff (from darwin) */ 456 /* Setup chip magic config stuff (from darwin) */
283 pci_read_config_byte(pdev, 0x40, &conf); 457 pci_read_config_byte (pdev, 0x40, &conf);
284 pci_write_config_byte(pdev, 0x40, conf | 0x01); 458 pci_write_config_byte(pdev, 0x40, (conf | 0x01));
285 }
286 mmio = ioremap(pci_resource_start(pdev, 5),
287 pci_resource_len(pdev, 5));
288
289 /* Setup some PLL stuffs */
290 switch (pdev->device) {
291 case PCI_DEVICE_ID_PROMISE_20270:
292 writew(0x0d2b, mmio + 0x1202);
293 mdelay(30);
294 break;
295 case PCI_DEVICE_ID_PROMISE_20271:
296 writew(0x0826, mmio + 0x1202);
297 mdelay(30);
298 break;
299 } 459 }
300
301 iounmap(mmio);
302} 460}
303#endif /* CONFIG_PPC_PMAC */ 461#endif /* CONFIG_PPC_PMAC */
304 462
305static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const char *name) 463static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const char *name)
306{ 464{
465 unsigned long dma_base = pci_resource_start(dev, 4);
466 unsigned long sec_dma_base = dma_base + 0x08;
467 long pll_input, pll_output, ratio;
468 int f, r;
469 u8 pll_ctl0, pll_ctl1;
470
307 if (dev->resource[PCI_ROM_RESOURCE].start) { 471 if (dev->resource[PCI_ROM_RESOURCE].start) {
308 pci_write_config_dword(dev, PCI_ROM_ADDRESS, 472 pci_write_config_dword(dev, PCI_ROM_ADDRESS,
309 dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE); 473 dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
@@ -315,6 +479,106 @@ static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const cha
315 apple_kiwi_init(dev); 479 apple_kiwi_init(dev);
316#endif 480#endif
317 481
482 /* Calculate the required PLL output frequency */
483 switch(max_dma_rate(dev)) {
484 case 4: /* it's 133 MHz for Ultra133 chips */
485 pll_output = 133333333;
486 break;
487 case 3: /* and 100 MHz for Ultra100 chips */
488 default:
489 pll_output = 100000000;
490 break;
491 }
492
493 /*
494 * Detect PLL input clock.
495 * On some systems, where PCI bus is running at non-standard clock rate
496 * (e.g. 25 or 40 MHz), we have to adjust the cycle time.
497 * PDC20268 and newer chips employ PLL circuit to help correct timing
498 * registers setting.
499 */
500 pll_input = detect_pll_input_clock(dma_base);
501 printk("%s: PLL input clock is %ld kHz\n", name, pll_input / 1000);
502
503 /* Sanity check */
504 if (unlikely(pll_input < 5000000L || pll_input > 70000000L)) {
505 printk(KERN_ERR "%s: Bad PLL input clock %ld Hz, giving up!\n",
506 name, pll_input);
507 goto out;
508 }
509
510#ifdef DEBUG
511 DBG("pll_output is %ld Hz\n", pll_output);
512
513 /* Show the current clock value of PLL control register
514 * (maybe already configured by the BIOS)
515 */
516 outb(0x02, sec_dma_base + 0x01);
517 pll_ctl0 = inb(sec_dma_base + 0x03);
518 outb(0x03, sec_dma_base + 0x01);
519 pll_ctl1 = inb(sec_dma_base + 0x03);
520
521 DBG("pll_ctl[%02X][%02X]\n", pll_ctl0, pll_ctl1);
522#endif
523
524 /*
525 * Calculate the ratio of F, R and NO
526 * POUT = (F + 2) / (( R + 2) * NO)
527 */
528 ratio = pll_output / (pll_input / 1000);
529 if (ratio < 8600L) { /* 8.6x */
530 /* Using NO = 0x01, R = 0x0d */
531 r = 0x0d;
532 } else if (ratio < 12900L) { /* 12.9x */
533 /* Using NO = 0x01, R = 0x08 */
534 r = 0x08;
535 } else if (ratio < 16100L) { /* 16.1x */
536 /* Using NO = 0x01, R = 0x06 */
537 r = 0x06;
538 } else if (ratio < 64000L) { /* 64x */
539 r = 0x00;
540 } else {
541 /* Invalid ratio */
542 printk(KERN_ERR "%s: Bad ratio %ld, giving up!\n", name, ratio);
543 goto out;
544 }
545
546 f = (ratio * (r + 2)) / 1000 - 2;
547
548 DBG("F[%d] R[%d] ratio*1000[%ld]\n", f, r, ratio);
549
550 if (unlikely(f < 0 || f > 127)) {
551 /* Invalid F */
552 printk(KERN_ERR "%s: F[%d] invalid!\n", name, f);
553 goto out;
554 }
555
556 pll_ctl0 = (u8) f;
557 pll_ctl1 = (u8) r;
558
559 DBG("Writing pll_ctl[%02X][%02X]\n", pll_ctl0, pll_ctl1);
560
561 outb(0x02, sec_dma_base + 0x01);
562 outb(pll_ctl0, sec_dma_base + 0x03);
563 outb(0x03, sec_dma_base + 0x01);
564 outb(pll_ctl1, sec_dma_base + 0x03);
565
566 /* Wait the PLL circuit to be stable */
567 mdelay(30);
568
569#ifdef DEBUG
570 /*
571 * Show the current clock value of PLL control register
572 */
573 outb(0x02, sec_dma_base + 0x01);
574 pll_ctl0 = inb(sec_dma_base + 0x03);
575 outb(0x03, sec_dma_base + 0x01);
576 pll_ctl1 = inb(sec_dma_base + 0x03);
577
578 DBG("pll_ctl[%02X][%02X]\n", pll_ctl0, pll_ctl1);
579#endif
580
581 out:
318 return dev->irq; 582 return dev->irq;
319} 583}
320 584
@@ -324,8 +588,8 @@ static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
324 588
325 hwif->tuneproc = &pdcnew_tune_drive; 589 hwif->tuneproc = &pdcnew_tune_drive;
326 hwif->quirkproc = &pdcnew_quirkproc; 590 hwif->quirkproc = &pdcnew_quirkproc;
327 hwif->speedproc = &pdcnew_new_tune_chipset; 591 hwif->speedproc = &pdcnew_tune_chipset;
328 hwif->resetproc = &pdcnew_new_reset; 592 hwif->resetproc = &pdcnew_reset;
329 593
330 hwif->drives[0].autotune = hwif->drives[1].autotune = 1; 594 hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
331 595
@@ -337,11 +601,14 @@ static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
337 hwif->ide_dma_check = &pdcnew_config_drive_xfer_rate; 601 hwif->ide_dma_check = &pdcnew_config_drive_xfer_rate;
338 hwif->ide_dma_lostirq = &pdcnew_ide_dma_lostirq; 602 hwif->ide_dma_lostirq = &pdcnew_ide_dma_lostirq;
339 hwif->ide_dma_timeout = &pdcnew_ide_dma_timeout; 603 hwif->ide_dma_timeout = &pdcnew_ide_dma_timeout;
340 if (!(hwif->udma_four)) 604
341 hwif->udma_four = (pdcnew_new_cable_detect(hwif)) ? 0 : 1; 605 if (!hwif->udma_four)
606 hwif->udma_four = pdcnew_cable_detect(hwif) ? 0 : 1;
607
342 if (!noautodma) 608 if (!noautodma)
343 hwif->autodma = 1; 609 hwif->autodma = 1;
344 hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma; 610 hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma;
611
345#if PDC202_DEBUG_CABLE 612#if PDC202_DEBUG_CABLE
346 printk(KERN_DEBUG "%s: %s-pin cable\n", 613 printk(KERN_DEBUG "%s: %s-pin cable\n",
347 hwif->name, hwif->udma_four ? "80" : "40"); 614 hwif->name, hwif->udma_four ? "80" : "40");
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index cdc3aab9ebcb..b1e9a8eba6b6 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -505,6 +505,10 @@ static void __devinit init_hwif_piix(ide_hwif_t *hwif)
505 /* This is a painful system best to let it self tune for now */ 505 /* This is a painful system best to let it self tune for now */
506 return; 506 return;
507 } 507 }
508 /* ESB2 appears to generate spurious DMA interrupts in PIO mode
509 when in native mode */
510 if (hwif->pci_dev->device == PCI_DEVICE_ID_INTEL_ESB2_18)
511 hwif->atapi_irq_bogon = 1;
508 512
509 hwif->autodma = 0; 513 hwif->autodma = 0;
510 hwif->tuneproc = &piix_tune_drive; 514 hwif->tuneproc = &piix_tune_drive;
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 0719b6484824..695e23904d30 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -844,11 +844,11 @@ void __init ide_scan_pcibus (int scan_direction)
844 844
845 pre_init = 0; 845 pre_init = 0;
846 if (!scan_direction) { 846 if (!scan_direction) {
847 while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 847 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
848 ide_scan_pcidev(dev); 848 ide_scan_pcidev(dev);
849 } 849 }
850 } else { 850 } else {
851 while ((dev = pci_find_device_reverse(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 851 while ((dev = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
852 ide_scan_pcidev(dev); 852 ide_scan_pcidev(dev);
853 } 853 }
854 } 854 }
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index 13a617917bf2..fbb7f14ec509 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -1485,7 +1485,7 @@ static int __devinit add_card(struct pci_dev *dev,
1485 1485
1486 } 1486 }
1487 1487
1488 i2c_bit_del_bus(i2c_ad); 1488 i2c_del_adapter(i2c_ad);
1489 kfree(i2c_ad); 1489 kfree(i2c_ad);
1490 } 1490 }
1491 } 1491 }
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 163d991eb8c9..50fb1cd447b7 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,9 +1,11 @@
1infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o 1infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o
2user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
2 3
3obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ 4obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
4 ib_cm.o iw_cm.o $(infiniband-y) 5 ib_cm.o iw_cm.o $(infiniband-y)
5obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o 6obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
6obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o 7obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
8 $(user_access-y)
7 9
8ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ 10ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
9 device.o fmr_pool.o cache.o 11 device.o fmr_pool.o cache.o
@@ -18,6 +20,8 @@ iw_cm-y := iwcm.o
18 20
19rdma_cm-y := cma.o 21rdma_cm-y := cma.o
20 22
23rdma_ucm-y := ucma.o
24
21ib_addr-y := addr.o 25ib_addr-y := addr.o
22 26
23ib_umad-y := user_mad.o 27ib_umad-y := user_mad.o
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 79c937bf6962..d446998b12a4 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3289,6 +3289,10 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3289 3289
3290 spin_lock_irqsave(&cm_id_priv->lock, flags); 3290 spin_lock_irqsave(&cm_id_priv->lock, flags);
3291 switch (cm_id_priv->id.state) { 3291 switch (cm_id_priv->id.state) {
3292 /* Allow transition to RTS before sending REP */
3293 case IB_CM_REQ_RCVD:
3294 case IB_CM_MRA_REQ_SENT:
3295
3292 case IB_CM_REP_RCVD: 3296 case IB_CM_REP_RCVD:
3293 case IB_CM_MRA_REP_SENT: 3297 case IB_CM_MRA_REP_SENT:
3294 case IB_CM_REP_SENT: 3298 case IB_CM_REP_SENT:
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 985a6b564d8f..533193d4e5df 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -70,6 +70,7 @@ static DEFINE_MUTEX(lock);
70static struct workqueue_struct *cma_wq; 70static struct workqueue_struct *cma_wq;
71static DEFINE_IDR(sdp_ps); 71static DEFINE_IDR(sdp_ps);
72static DEFINE_IDR(tcp_ps); 72static DEFINE_IDR(tcp_ps);
73static DEFINE_IDR(udp_ps);
73 74
74struct cma_device { 75struct cma_device {
75 struct list_head list; 76 struct list_head list;
@@ -133,7 +134,6 @@ struct rdma_id_private {
133 134
134 u32 seq_num; 135 u32 seq_num;
135 u32 qp_num; 136 u32 qp_num;
136 enum ib_qp_type qp_type;
137 u8 srq; 137 u8 srq;
138}; 138};
139 139
@@ -392,7 +392,6 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
392 392
393 id->qp = qp; 393 id->qp = qp;
394 id_priv->qp_num = qp->qp_num; 394 id_priv->qp_num = qp->qp_num;
395 id_priv->qp_type = qp->qp_type;
396 id_priv->srq = (qp->srq != NULL); 395 id_priv->srq = (qp->srq != NULL);
397 return 0; 396 return 0;
398err: 397err:
@@ -510,9 +509,17 @@ static inline int cma_any_addr(struct sockaddr *addr)
510 return cma_zero_addr(addr) || cma_loopback_addr(addr); 509 return cma_zero_addr(addr) || cma_loopback_addr(addr);
511} 510}
512 511
512static inline __be16 cma_port(struct sockaddr *addr)
513{
514 if (addr->sa_family == AF_INET)
515 return ((struct sockaddr_in *) addr)->sin_port;
516 else
517 return ((struct sockaddr_in6 *) addr)->sin6_port;
518}
519
513static inline int cma_any_port(struct sockaddr *addr) 520static inline int cma_any_port(struct sockaddr *addr)
514{ 521{
515 return !((struct sockaddr_in *) addr)->sin_port; 522 return !cma_port(addr);
516} 523}
517 524
518static int cma_get_net_info(void *hdr, enum rdma_port_space ps, 525static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
@@ -594,20 +601,6 @@ static inline int cma_user_data_offset(enum rdma_port_space ps)
594 } 601 }
595} 602}
596 603
597static int cma_notify_user(struct rdma_id_private *id_priv,
598 enum rdma_cm_event_type type, int status,
599 void *data, u8 data_len)
600{
601 struct rdma_cm_event event;
602
603 event.event = type;
604 event.status = status;
605 event.private_data = data;
606 event.private_data_len = data_len;
607
608 return id_priv->id.event_handler(&id_priv->id, &event);
609}
610
611static void cma_cancel_route(struct rdma_id_private *id_priv) 604static void cma_cancel_route(struct rdma_id_private *id_priv)
612{ 605{
613 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 606 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
@@ -776,63 +769,61 @@ static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
776 return 0; 769 return 0;
777} 770}
778 771
779static int cma_rtu_recv(struct rdma_id_private *id_priv) 772static void cma_set_rep_event_data(struct rdma_cm_event *event,
773 struct ib_cm_rep_event_param *rep_data,
774 void *private_data)
780{ 775{
781 int ret; 776 event->param.conn.private_data = private_data;
782 777 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
783 ret = cma_modify_qp_rts(&id_priv->id); 778 event->param.conn.responder_resources = rep_data->responder_resources;
784 if (ret) 779 event->param.conn.initiator_depth = rep_data->initiator_depth;
785 goto reject; 780 event->param.conn.flow_control = rep_data->flow_control;
786 781 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
787 return 0; 782 event->param.conn.srq = rep_data->srq;
788reject: 783 event->param.conn.qp_num = rep_data->remote_qpn;
789 cma_modify_qp_err(&id_priv->id);
790 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
791 NULL, 0, NULL, 0);
792 return ret;
793} 784}
794 785
795static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 786static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
796{ 787{
797 struct rdma_id_private *id_priv = cm_id->context; 788 struct rdma_id_private *id_priv = cm_id->context;
798 enum rdma_cm_event_type event; 789 struct rdma_cm_event event;
799 u8 private_data_len = 0; 790 int ret = 0;
800 int ret = 0, status = 0;
801 791
802 atomic_inc(&id_priv->dev_remove); 792 atomic_inc(&id_priv->dev_remove);
803 if (!cma_comp(id_priv, CMA_CONNECT)) 793 if (!cma_comp(id_priv, CMA_CONNECT))
804 goto out; 794 goto out;
805 795
796 memset(&event, 0, sizeof event);
806 switch (ib_event->event) { 797 switch (ib_event->event) {
807 case IB_CM_REQ_ERROR: 798 case IB_CM_REQ_ERROR:
808 case IB_CM_REP_ERROR: 799 case IB_CM_REP_ERROR:
809 event = RDMA_CM_EVENT_UNREACHABLE; 800 event.event = RDMA_CM_EVENT_UNREACHABLE;
810 status = -ETIMEDOUT; 801 event.status = -ETIMEDOUT;
811 break; 802 break;
812 case IB_CM_REP_RECEIVED: 803 case IB_CM_REP_RECEIVED:
813 status = cma_verify_rep(id_priv, ib_event->private_data); 804 event.status = cma_verify_rep(id_priv, ib_event->private_data);
814 if (status) 805 if (event.status)
815 event = RDMA_CM_EVENT_CONNECT_ERROR; 806 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
816 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) { 807 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
817 status = cma_rep_recv(id_priv); 808 event.status = cma_rep_recv(id_priv);
818 event = status ? RDMA_CM_EVENT_CONNECT_ERROR : 809 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
819 RDMA_CM_EVENT_ESTABLISHED; 810 RDMA_CM_EVENT_ESTABLISHED;
820 } else 811 } else
821 event = RDMA_CM_EVENT_CONNECT_RESPONSE; 812 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
822 private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 813 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
814 ib_event->private_data);
823 break; 815 break;
824 case IB_CM_RTU_RECEIVED: 816 case IB_CM_RTU_RECEIVED:
825 status = cma_rtu_recv(id_priv); 817 case IB_CM_USER_ESTABLISHED:
826 event = status ? RDMA_CM_EVENT_CONNECT_ERROR : 818 event.event = RDMA_CM_EVENT_ESTABLISHED;
827 RDMA_CM_EVENT_ESTABLISHED;
828 break; 819 break;
829 case IB_CM_DREQ_ERROR: 820 case IB_CM_DREQ_ERROR:
830 status = -ETIMEDOUT; /* fall through */ 821 event.status = -ETIMEDOUT; /* fall through */
831 case IB_CM_DREQ_RECEIVED: 822 case IB_CM_DREQ_RECEIVED:
832 case IB_CM_DREP_RECEIVED: 823 case IB_CM_DREP_RECEIVED:
833 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) 824 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
834 goto out; 825 goto out;
835 event = RDMA_CM_EVENT_DISCONNECTED; 826 event.event = RDMA_CM_EVENT_DISCONNECTED;
836 break; 827 break;
837 case IB_CM_TIMEWAIT_EXIT: 828 case IB_CM_TIMEWAIT_EXIT:
838 case IB_CM_MRA_RECEIVED: 829 case IB_CM_MRA_RECEIVED:
@@ -840,9 +831,10 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
840 goto out; 831 goto out;
841 case IB_CM_REJ_RECEIVED: 832 case IB_CM_REJ_RECEIVED:
842 cma_modify_qp_err(&id_priv->id); 833 cma_modify_qp_err(&id_priv->id);
843 status = ib_event->param.rej_rcvd.reason; 834 event.status = ib_event->param.rej_rcvd.reason;
844 event = RDMA_CM_EVENT_REJECTED; 835 event.event = RDMA_CM_EVENT_REJECTED;
845 private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 836 event.param.conn.private_data = ib_event->private_data;
837 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
846 break; 838 break;
847 default: 839 default:
848 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d", 840 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
@@ -850,8 +842,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
850 goto out; 842 goto out;
851 } 843 }
852 844
853 ret = cma_notify_user(id_priv, event, status, ib_event->private_data, 845 ret = id_priv->id.event_handler(&id_priv->id, &event);
854 private_data_len);
855 if (ret) { 846 if (ret) {
856 /* Destroy the CM ID by returning a non-zero value. */ 847 /* Destroy the CM ID by returning a non-zero value. */
857 id_priv->cm_id.ib = NULL; 848 id_priv->cm_id.ib = NULL;
@@ -865,8 +856,8 @@ out:
865 return ret; 856 return ret;
866} 857}
867 858
868static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id, 859static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
869 struct ib_cm_event *ib_event) 860 struct ib_cm_event *ib_event)
870{ 861{
871 struct rdma_id_private *id_priv; 862 struct rdma_id_private *id_priv;
872 struct rdma_cm_id *id; 863 struct rdma_cm_id *id;
@@ -913,9 +904,61 @@ err:
913 return NULL; 904 return NULL;
914} 905}
915 906
907static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
908 struct ib_cm_event *ib_event)
909{
910 struct rdma_id_private *id_priv;
911 struct rdma_cm_id *id;
912 union cma_ip_addr *src, *dst;
913 __u16 port;
914 u8 ip_ver;
915 int ret;
916
917 id = rdma_create_id(listen_id->event_handler, listen_id->context,
918 listen_id->ps);
919 if (IS_ERR(id))
920 return NULL;
921
922
923 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
924 &ip_ver, &port, &src, &dst))
925 goto err;
926
927 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
928 ip_ver, port, src, dst);
929
930 ret = rdma_translate_ip(&id->route.addr.src_addr,
931 &id->route.addr.dev_addr);
932 if (ret)
933 goto err;
934
935 id_priv = container_of(id, struct rdma_id_private, id);
936 id_priv->state = CMA_CONNECT;
937 return id_priv;
938err:
939 rdma_destroy_id(id);
940 return NULL;
941}
942
943static void cma_set_req_event_data(struct rdma_cm_event *event,
944 struct ib_cm_req_event_param *req_data,
945 void *private_data, int offset)
946{
947 event->param.conn.private_data = private_data + offset;
948 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
949 event->param.conn.responder_resources = req_data->responder_resources;
950 event->param.conn.initiator_depth = req_data->initiator_depth;
951 event->param.conn.flow_control = req_data->flow_control;
952 event->param.conn.retry_count = req_data->retry_count;
953 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
954 event->param.conn.srq = req_data->srq;
955 event->param.conn.qp_num = req_data->remote_qpn;
956}
957
916static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 958static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
917{ 959{
918 struct rdma_id_private *listen_id, *conn_id; 960 struct rdma_id_private *listen_id, *conn_id;
961 struct rdma_cm_event event;
919 int offset, ret; 962 int offset, ret;
920 963
921 listen_id = cm_id->context; 964 listen_id = cm_id->context;
@@ -925,7 +968,19 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
925 goto out; 968 goto out;
926 } 969 }
927 970
928 conn_id = cma_new_id(&listen_id->id, ib_event); 971 memset(&event, 0, sizeof event);
972 offset = cma_user_data_offset(listen_id->id.ps);
973 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
974 if (listen_id->id.ps == RDMA_PS_UDP) {
975 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
976 event.param.ud.private_data = ib_event->private_data + offset;
977 event.param.ud.private_data_len =
978 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
979 } else {
980 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
981 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
982 ib_event->private_data, offset);
983 }
929 if (!conn_id) { 984 if (!conn_id) {
930 ret = -ENOMEM; 985 ret = -ENOMEM;
931 goto out; 986 goto out;
@@ -942,10 +997,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
942 cm_id->context = conn_id; 997 cm_id->context = conn_id;
943 cm_id->cm_handler = cma_ib_handler; 998 cm_id->cm_handler = cma_ib_handler;
944 999
945 offset = cma_user_data_offset(listen_id->id.ps); 1000 ret = conn_id->id.event_handler(&conn_id->id, &event);
946 ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
947 ib_event->private_data + offset,
948 IB_CM_REQ_PRIVATE_DATA_SIZE - offset);
949 if (!ret) 1001 if (!ret)
950 goto out; 1002 goto out;
951 1003
@@ -964,8 +1016,7 @@ out:
964 1016
965static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) 1017static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
966{ 1018{
967 return cpu_to_be64(((u64)ps << 16) + 1019 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
968 be16_to_cpu(((struct sockaddr_in *) addr)->sin_port));
969} 1020}
970 1021
971static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, 1022static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
@@ -1021,15 +1072,16 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
1021static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 1072static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1022{ 1073{
1023 struct rdma_id_private *id_priv = iw_id->context; 1074 struct rdma_id_private *id_priv = iw_id->context;
1024 enum rdma_cm_event_type event = 0; 1075 struct rdma_cm_event event;
1025 struct sockaddr_in *sin; 1076 struct sockaddr_in *sin;
1026 int ret = 0; 1077 int ret = 0;
1027 1078
1079 memset(&event, 0, sizeof event);
1028 atomic_inc(&id_priv->dev_remove); 1080 atomic_inc(&id_priv->dev_remove);
1029 1081
1030 switch (iw_event->event) { 1082 switch (iw_event->event) {
1031 case IW_CM_EVENT_CLOSE: 1083 case IW_CM_EVENT_CLOSE:
1032 event = RDMA_CM_EVENT_DISCONNECTED; 1084 event.event = RDMA_CM_EVENT_DISCONNECTED;
1033 break; 1085 break;
1034 case IW_CM_EVENT_CONNECT_REPLY: 1086 case IW_CM_EVENT_CONNECT_REPLY:
1035 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1087 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
@@ -1037,20 +1089,21 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1037 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1089 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1038 *sin = iw_event->remote_addr; 1090 *sin = iw_event->remote_addr;
1039 if (iw_event->status) 1091 if (iw_event->status)
1040 event = RDMA_CM_EVENT_REJECTED; 1092 event.event = RDMA_CM_EVENT_REJECTED;
1041 else 1093 else
1042 event = RDMA_CM_EVENT_ESTABLISHED; 1094 event.event = RDMA_CM_EVENT_ESTABLISHED;
1043 break; 1095 break;
1044 case IW_CM_EVENT_ESTABLISHED: 1096 case IW_CM_EVENT_ESTABLISHED:
1045 event = RDMA_CM_EVENT_ESTABLISHED; 1097 event.event = RDMA_CM_EVENT_ESTABLISHED;
1046 break; 1098 break;
1047 default: 1099 default:
1048 BUG_ON(1); 1100 BUG_ON(1);
1049 } 1101 }
1050 1102
1051 ret = cma_notify_user(id_priv, event, iw_event->status, 1103 event.status = iw_event->status;
1052 iw_event->private_data, 1104 event.param.conn.private_data = iw_event->private_data;
1053 iw_event->private_data_len); 1105 event.param.conn.private_data_len = iw_event->private_data_len;
1106 ret = id_priv->id.event_handler(&id_priv->id, &event);
1054 if (ret) { 1107 if (ret) {
1055 /* Destroy the CM ID by returning a non-zero value. */ 1108 /* Destroy the CM ID by returning a non-zero value. */
1056 id_priv->cm_id.iw = NULL; 1109 id_priv->cm_id.iw = NULL;
@@ -1071,6 +1124,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1071 struct rdma_id_private *listen_id, *conn_id; 1124 struct rdma_id_private *listen_id, *conn_id;
1072 struct sockaddr_in *sin; 1125 struct sockaddr_in *sin;
1073 struct net_device *dev = NULL; 1126 struct net_device *dev = NULL;
1127 struct rdma_cm_event event;
1074 int ret; 1128 int ret;
1075 1129
1076 listen_id = cm_id->context; 1130 listen_id = cm_id->context;
@@ -1124,9 +1178,11 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1124 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; 1178 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
1125 *sin = iw_event->remote_addr; 1179 *sin = iw_event->remote_addr;
1126 1180
1127 ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, 1181 memset(&event, 0, sizeof event);
1128 iw_event->private_data, 1182 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1129 iw_event->private_data_len); 1183 event.param.conn.private_data = iw_event->private_data;
1184 event.param.conn.private_data_len = iw_event->private_data_len;
1185 ret = conn_id->id.event_handler(&conn_id->id, &event);
1130 if (ret) { 1186 if (ret) {
1131 /* User wants to destroy the CM ID */ 1187 /* User wants to destroy the CM ID */
1132 conn_id->cm_id.iw = NULL; 1188 conn_id->cm_id.iw = NULL;
@@ -1515,8 +1571,9 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1515 struct rdma_dev_addr *dev_addr, void *context) 1571 struct rdma_dev_addr *dev_addr, void *context)
1516{ 1572{
1517 struct rdma_id_private *id_priv = context; 1573 struct rdma_id_private *id_priv = context;
1518 enum rdma_cm_event_type event; 1574 struct rdma_cm_event event;
1519 1575
1576 memset(&event, 0, sizeof event);
1520 atomic_inc(&id_priv->dev_remove); 1577 atomic_inc(&id_priv->dev_remove);
1521 1578
1522 /* 1579 /*
@@ -1536,14 +1593,15 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1536 if (status) { 1593 if (status) {
1537 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 1594 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
1538 goto out; 1595 goto out;
1539 event = RDMA_CM_EVENT_ADDR_ERROR; 1596 event.event = RDMA_CM_EVENT_ADDR_ERROR;
1597 event.status = status;
1540 } else { 1598 } else {
1541 memcpy(&id_priv->id.route.addr.src_addr, src_addr, 1599 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1542 ip_addr_size(src_addr)); 1600 ip_addr_size(src_addr));
1543 event = RDMA_CM_EVENT_ADDR_RESOLVED; 1601 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1544 } 1602 }
1545 1603
1546 if (cma_notify_user(id_priv, event, status, NULL, 0)) { 1604 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1547 cma_exch(id_priv, CMA_DESTROYING); 1605 cma_exch(id_priv, CMA_DESTROYING);
1548 cma_release_remove(id_priv); 1606 cma_release_remove(id_priv);
1549 cma_deref_id(id_priv); 1607 cma_deref_id(id_priv);
@@ -1733,6 +1791,9 @@ static int cma_get_port(struct rdma_id_private *id_priv)
1733 case RDMA_PS_TCP: 1791 case RDMA_PS_TCP:
1734 ps = &tcp_ps; 1792 ps = &tcp_ps;
1735 break; 1793 break;
1794 case RDMA_PS_UDP:
1795 ps = &udp_ps;
1796 break;
1736 default: 1797 default:
1737 return -EPROTONOSUPPORT; 1798 return -EPROTONOSUPPORT;
1738 } 1799 }
@@ -1821,6 +1882,110 @@ static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
1821 return 0; 1882 return 0;
1822} 1883}
1823 1884
1885static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
1886 struct ib_cm_event *ib_event)
1887{
1888 struct rdma_id_private *id_priv = cm_id->context;
1889 struct rdma_cm_event event;
1890 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
1891 int ret = 0;
1892
1893 memset(&event, 0, sizeof event);
1894 atomic_inc(&id_priv->dev_remove);
1895 if (!cma_comp(id_priv, CMA_CONNECT))
1896 goto out;
1897
1898 switch (ib_event->event) {
1899 case IB_CM_SIDR_REQ_ERROR:
1900 event.event = RDMA_CM_EVENT_UNREACHABLE;
1901 event.status = -ETIMEDOUT;
1902 break;
1903 case IB_CM_SIDR_REP_RECEIVED:
1904 event.param.ud.private_data = ib_event->private_data;
1905 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
1906 if (rep->status != IB_SIDR_SUCCESS) {
1907 event.event = RDMA_CM_EVENT_UNREACHABLE;
1908 event.status = ib_event->param.sidr_rep_rcvd.status;
1909 break;
1910 }
1911 if (rep->qkey != RDMA_UD_QKEY) {
1912 event.event = RDMA_CM_EVENT_UNREACHABLE;
1913 event.status = -EINVAL;
1914 break;
1915 }
1916 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
1917 id_priv->id.route.path_rec,
1918 &event.param.ud.ah_attr);
1919 event.param.ud.qp_num = rep->qpn;
1920 event.param.ud.qkey = rep->qkey;
1921 event.event = RDMA_CM_EVENT_ESTABLISHED;
1922 event.status = 0;
1923 break;
1924 default:
1925 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
1926 ib_event->event);
1927 goto out;
1928 }
1929
1930 ret = id_priv->id.event_handler(&id_priv->id, &event);
1931 if (ret) {
1932 /* Destroy the CM ID by returning a non-zero value. */
1933 id_priv->cm_id.ib = NULL;
1934 cma_exch(id_priv, CMA_DESTROYING);
1935 cma_release_remove(id_priv);
1936 rdma_destroy_id(&id_priv->id);
1937 return ret;
1938 }
1939out:
1940 cma_release_remove(id_priv);
1941 return ret;
1942}
1943
1944static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
1945 struct rdma_conn_param *conn_param)
1946{
1947 struct ib_cm_sidr_req_param req;
1948 struct rdma_route *route;
1949 int ret;
1950
1951 req.private_data_len = sizeof(struct cma_hdr) +
1952 conn_param->private_data_len;
1953 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
1954 if (!req.private_data)
1955 return -ENOMEM;
1956
1957 if (conn_param->private_data && conn_param->private_data_len)
1958 memcpy((void *) req.private_data + sizeof(struct cma_hdr),
1959 conn_param->private_data, conn_param->private_data_len);
1960
1961 route = &id_priv->id.route;
1962 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
1963 if (ret)
1964 goto out;
1965
1966 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
1967 cma_sidr_rep_handler, id_priv);
1968 if (IS_ERR(id_priv->cm_id.ib)) {
1969 ret = PTR_ERR(id_priv->cm_id.ib);
1970 goto out;
1971 }
1972
1973 req.path = route->path_rec;
1974 req.service_id = cma_get_service_id(id_priv->id.ps,
1975 &route->addr.dst_addr);
1976 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
1977 req.max_cm_retries = CMA_MAX_CM_RETRIES;
1978
1979 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
1980 if (ret) {
1981 ib_destroy_cm_id(id_priv->cm_id.ib);
1982 id_priv->cm_id.ib = NULL;
1983 }
1984out:
1985 kfree(req.private_data);
1986 return ret;
1987}
1988
1824static int cma_connect_ib(struct rdma_id_private *id_priv, 1989static int cma_connect_ib(struct rdma_id_private *id_priv,
1825 struct rdma_conn_param *conn_param) 1990 struct rdma_conn_param *conn_param)
1826{ 1991{
@@ -1860,7 +2025,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
1860 req.service_id = cma_get_service_id(id_priv->id.ps, 2025 req.service_id = cma_get_service_id(id_priv->id.ps,
1861 &route->addr.dst_addr); 2026 &route->addr.dst_addr);
1862 req.qp_num = id_priv->qp_num; 2027 req.qp_num = id_priv->qp_num;
1863 req.qp_type = id_priv->qp_type; 2028 req.qp_type = IB_QPT_RC;
1864 req.starting_psn = id_priv->seq_num; 2029 req.starting_psn = id_priv->seq_num;
1865 req.responder_resources = conn_param->responder_resources; 2030 req.responder_resources = conn_param->responder_resources;
1866 req.initiator_depth = conn_param->initiator_depth; 2031 req.initiator_depth = conn_param->initiator_depth;
@@ -1937,13 +2102,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
1937 2102
1938 if (!id->qp) { 2103 if (!id->qp) {
1939 id_priv->qp_num = conn_param->qp_num; 2104 id_priv->qp_num = conn_param->qp_num;
1940 id_priv->qp_type = conn_param->qp_type;
1941 id_priv->srq = conn_param->srq; 2105 id_priv->srq = conn_param->srq;
1942 } 2106 }
1943 2107
1944 switch (rdma_node_get_transport(id->device->node_type)) { 2108 switch (rdma_node_get_transport(id->device->node_type)) {
1945 case RDMA_TRANSPORT_IB: 2109 case RDMA_TRANSPORT_IB:
1946 ret = cma_connect_ib(id_priv, conn_param); 2110 if (id->ps == RDMA_PS_UDP)
2111 ret = cma_resolve_ib_udp(id_priv, conn_param);
2112 else
2113 ret = cma_connect_ib(id_priv, conn_param);
1947 break; 2114 break;
1948 case RDMA_TRANSPORT_IWARP: 2115 case RDMA_TRANSPORT_IWARP:
1949 ret = cma_connect_iw(id_priv, conn_param); 2116 ret = cma_connect_iw(id_priv, conn_param);
@@ -1966,11 +2133,25 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
1966 struct rdma_conn_param *conn_param) 2133 struct rdma_conn_param *conn_param)
1967{ 2134{
1968 struct ib_cm_rep_param rep; 2135 struct ib_cm_rep_param rep;
1969 int ret; 2136 struct ib_qp_attr qp_attr;
2137 int qp_attr_mask, ret;
1970 2138
1971 ret = cma_modify_qp_rtr(&id_priv->id); 2139 if (id_priv->id.qp) {
1972 if (ret) 2140 ret = cma_modify_qp_rtr(&id_priv->id);
1973 return ret; 2141 if (ret)
2142 goto out;
2143
2144 qp_attr.qp_state = IB_QPS_RTS;
2145 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr,
2146 &qp_attr_mask);
2147 if (ret)
2148 goto out;
2149
2150 qp_attr.max_rd_atomic = conn_param->initiator_depth;
2151 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
2152 if (ret)
2153 goto out;
2154 }
1974 2155
1975 memset(&rep, 0, sizeof rep); 2156 memset(&rep, 0, sizeof rep);
1976 rep.qp_num = id_priv->qp_num; 2157 rep.qp_num = id_priv->qp_num;
@@ -1985,7 +2166,9 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
1985 rep.rnr_retry_count = conn_param->rnr_retry_count; 2166 rep.rnr_retry_count = conn_param->rnr_retry_count;
1986 rep.srq = id_priv->srq ? 1 : 0; 2167 rep.srq = id_priv->srq ? 1 : 0;
1987 2168
1988 return ib_send_cm_rep(id_priv->cm_id.ib, &rep); 2169 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
2170out:
2171 return ret;
1989} 2172}
1990 2173
1991static int cma_accept_iw(struct rdma_id_private *id_priv, 2174static int cma_accept_iw(struct rdma_id_private *id_priv,
@@ -2010,6 +2193,24 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
2010 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 2193 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
2011} 2194}
2012 2195
2196static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
2197 enum ib_cm_sidr_status status,
2198 const void *private_data, int private_data_len)
2199{
2200 struct ib_cm_sidr_rep_param rep;
2201
2202 memset(&rep, 0, sizeof rep);
2203 rep.status = status;
2204 if (status == IB_SIDR_SUCCESS) {
2205 rep.qp_num = id_priv->qp_num;
2206 rep.qkey = RDMA_UD_QKEY;
2207 }
2208 rep.private_data = private_data;
2209 rep.private_data_len = private_data_len;
2210
2211 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
2212}
2213
2013int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2214int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2014{ 2215{
2015 struct rdma_id_private *id_priv; 2216 struct rdma_id_private *id_priv;
@@ -2021,13 +2222,16 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2021 2222
2022 if (!id->qp && conn_param) { 2223 if (!id->qp && conn_param) {
2023 id_priv->qp_num = conn_param->qp_num; 2224 id_priv->qp_num = conn_param->qp_num;
2024 id_priv->qp_type = conn_param->qp_type;
2025 id_priv->srq = conn_param->srq; 2225 id_priv->srq = conn_param->srq;
2026 } 2226 }
2027 2227
2028 switch (rdma_node_get_transport(id->device->node_type)) { 2228 switch (rdma_node_get_transport(id->device->node_type)) {
2029 case RDMA_TRANSPORT_IB: 2229 case RDMA_TRANSPORT_IB:
2030 if (conn_param) 2230 if (id->ps == RDMA_PS_UDP)
2231 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2232 conn_param->private_data,
2233 conn_param->private_data_len);
2234 else if (conn_param)
2031 ret = cma_accept_ib(id_priv, conn_param); 2235 ret = cma_accept_ib(id_priv, conn_param);
2032 else 2236 else
2033 ret = cma_rep_recv(id_priv); 2237 ret = cma_rep_recv(id_priv);
@@ -2051,6 +2255,27 @@ reject:
2051} 2255}
2052EXPORT_SYMBOL(rdma_accept); 2256EXPORT_SYMBOL(rdma_accept);
2053 2257
2258int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2259{
2260 struct rdma_id_private *id_priv;
2261 int ret;
2262
2263 id_priv = container_of(id, struct rdma_id_private, id);
2264 if (!cma_comp(id_priv, CMA_CONNECT))
2265 return -EINVAL;
2266
2267 switch (id->device->node_type) {
2268 case RDMA_NODE_IB_CA:
2269 ret = ib_cm_notify(id_priv->cm_id.ib, event);
2270 break;
2271 default:
2272 ret = 0;
2273 break;
2274 }
2275 return ret;
2276}
2277EXPORT_SYMBOL(rdma_notify);
2278
2054int rdma_reject(struct rdma_cm_id *id, const void *private_data, 2279int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2055 u8 private_data_len) 2280 u8 private_data_len)
2056{ 2281{
@@ -2063,9 +2288,13 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2063 2288
2064 switch (rdma_node_get_transport(id->device->node_type)) { 2289 switch (rdma_node_get_transport(id->device->node_type)) {
2065 case RDMA_TRANSPORT_IB: 2290 case RDMA_TRANSPORT_IB:
2066 ret = ib_send_cm_rej(id_priv->cm_id.ib, 2291 if (id->ps == RDMA_PS_UDP)
2067 IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, 2292 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
2068 private_data, private_data_len); 2293 private_data, private_data_len);
2294 else
2295 ret = ib_send_cm_rej(id_priv->cm_id.ib,
2296 IB_CM_REJ_CONSUMER_DEFINED, NULL,
2297 0, private_data, private_data_len);
2069 break; 2298 break;
2070 case RDMA_TRANSPORT_IWARP: 2299 case RDMA_TRANSPORT_IWARP:
2071 ret = iw_cm_reject(id_priv->cm_id.iw, 2300 ret = iw_cm_reject(id_priv->cm_id.iw,
@@ -2136,6 +2365,7 @@ static void cma_add_one(struct ib_device *device)
2136 2365
2137static int cma_remove_id_dev(struct rdma_id_private *id_priv) 2366static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2138{ 2367{
2368 struct rdma_cm_event event;
2139 enum cma_state state; 2369 enum cma_state state;
2140 2370
2141 /* Record that we want to remove the device */ 2371 /* Record that we want to remove the device */
@@ -2150,8 +2380,9 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2150 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 2380 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
2151 return 0; 2381 return 0;
2152 2382
2153 return cma_notify_user(id_priv, RDMA_CM_EVENT_DEVICE_REMOVAL, 2383 memset(&event, 0, sizeof event);
2154 0, NULL, 0); 2384 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
2385 return id_priv->id.event_handler(&id_priv->id, &event);
2155} 2386}
2156 2387
2157static void cma_process_remove(struct cma_device *cma_dev) 2388static void cma_process_remove(struct cma_device *cma_dev)
@@ -2233,6 +2464,7 @@ static void cma_cleanup(void)
2233 destroy_workqueue(cma_wq); 2464 destroy_workqueue(cma_wq);
2234 idr_destroy(&sdp_ps); 2465 idr_destroy(&sdp_ps);
2235 idr_destroy(&tcp_ps); 2466 idr_destroy(&tcp_ps);
2467 idr_destroy(&udp_ps);
2236} 2468}
2237 2469
2238module_init(cma_init); 2470module_init(cma_init);
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 86a3b2d401db..8926a2bd4a87 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -394,20 +394,12 @@ EXPORT_SYMBOL(ib_destroy_fmr_pool);
394 */ 394 */
395int ib_flush_fmr_pool(struct ib_fmr_pool *pool) 395int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
396{ 396{
397 int serial; 397 int serial = atomic_inc_return(&pool->req_ser);
398
399 atomic_inc(&pool->req_ser);
400 /*
401 * It's OK if someone else bumps req_ser again here -- we'll
402 * just wait a little longer.
403 */
404 serial = atomic_read(&pool->req_ser);
405 398
406 wake_up_process(pool->thread); 399 wake_up_process(pool->thread);
407 400
408 if (wait_event_interruptible(pool->force_wait, 401 if (wait_event_interruptible(pool->force_wait,
409 atomic_read(&pool->flush_ser) - 402 atomic_read(&pool->flush_ser) - serial >= 0))
410 atomic_read(&pool->req_ser) >= 0))
411 return -EINTR; 403 return -EINTR;
412 404
413 return 0; 405 return 0;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 15f38d94b3a8..5ed141ebd1c8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -998,17 +998,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
998 998
999 mad_agent = mad_send_wr->send_buf.mad_agent; 999 mad_agent = mad_send_wr->send_buf.mad_agent;
1000 sge = mad_send_wr->sg_list; 1000 sge = mad_send_wr->sg_list;
1001 sge[0].addr = dma_map_single(mad_agent->device->dma_device, 1001 sge[0].addr = ib_dma_map_single(mad_agent->device,
1002 mad_send_wr->send_buf.mad, 1002 mad_send_wr->send_buf.mad,
1003 sge[0].length, 1003 sge[0].length,
1004 DMA_TO_DEVICE); 1004 DMA_TO_DEVICE);
1005 pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr); 1005 mad_send_wr->header_mapping = sge[0].addr;
1006 1006
1007 sge[1].addr = dma_map_single(mad_agent->device->dma_device, 1007 sge[1].addr = ib_dma_map_single(mad_agent->device,
1008 ib_get_payload(mad_send_wr), 1008 ib_get_payload(mad_send_wr),
1009 sge[1].length, 1009 sge[1].length,
1010 DMA_TO_DEVICE); 1010 DMA_TO_DEVICE);
1011 pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr); 1011 mad_send_wr->payload_mapping = sge[1].addr;
1012 1012
1013 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1013 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1014 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 1014 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
@@ -1026,12 +1026,12 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1026 } 1026 }
1027 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 1027 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1028 if (ret) { 1028 if (ret) {
1029 dma_unmap_single(mad_agent->device->dma_device, 1029 ib_dma_unmap_single(mad_agent->device,
1030 pci_unmap_addr(mad_send_wr, header_mapping), 1030 mad_send_wr->header_mapping,
1031 sge[0].length, DMA_TO_DEVICE); 1031 sge[0].length, DMA_TO_DEVICE);
1032 dma_unmap_single(mad_agent->device->dma_device, 1032 ib_dma_unmap_single(mad_agent->device,
1033 pci_unmap_addr(mad_send_wr, payload_mapping), 1033 mad_send_wr->payload_mapping,
1034 sge[1].length, DMA_TO_DEVICE); 1034 sge[1].length, DMA_TO_DEVICE);
1035 } 1035 }
1036 return ret; 1036 return ret;
1037} 1037}
@@ -1850,11 +1850,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1850 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, 1850 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1851 mad_list); 1851 mad_list);
1852 recv = container_of(mad_priv_hdr, struct ib_mad_private, header); 1852 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1853 dma_unmap_single(port_priv->device->dma_device, 1853 ib_dma_unmap_single(port_priv->device,
1854 pci_unmap_addr(&recv->header, mapping), 1854 recv->header.mapping,
1855 sizeof(struct ib_mad_private) - 1855 sizeof(struct ib_mad_private) -
1856 sizeof(struct ib_mad_private_header), 1856 sizeof(struct ib_mad_private_header),
1857 DMA_FROM_DEVICE); 1857 DMA_FROM_DEVICE);
1858 1858
1859 /* Setup MAD receive work completion from "normal" work completion */ 1859 /* Setup MAD receive work completion from "normal" work completion */
1860 recv->header.wc = *wc; 1860 recv->header.wc = *wc;
@@ -2080,12 +2080,12 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2080 qp_info = send_queue->qp_info; 2080 qp_info = send_queue->qp_info;
2081 2081
2082retry: 2082retry:
2083 dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, 2083 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2084 pci_unmap_addr(mad_send_wr, header_mapping), 2084 mad_send_wr->header_mapping,
2085 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 2085 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2086 dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, 2086 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2087 pci_unmap_addr(mad_send_wr, payload_mapping), 2087 mad_send_wr->payload_mapping,
2088 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); 2088 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2089 queued_send_wr = NULL; 2089 queued_send_wr = NULL;
2090 spin_lock_irqsave(&send_queue->lock, flags); 2090 spin_lock_irqsave(&send_queue->lock, flags);
2091 list_del(&mad_list->list); 2091 list_del(&mad_list->list);
@@ -2528,13 +2528,12 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2528 break; 2528 break;
2529 } 2529 }
2530 } 2530 }
2531 sg_list.addr = dma_map_single(qp_info->port_priv-> 2531 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2532 device->dma_device, 2532 &mad_priv->grh,
2533 &mad_priv->grh, 2533 sizeof *mad_priv -
2534 sizeof *mad_priv - 2534 sizeof mad_priv->header,
2535 sizeof mad_priv->header, 2535 DMA_FROM_DEVICE);
2536 DMA_FROM_DEVICE); 2536 mad_priv->header.mapping = sg_list.addr;
2537 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2538 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; 2537 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2539 mad_priv->header.mad_list.mad_queue = recv_queue; 2538 mad_priv->header.mad_list.mad_queue = recv_queue;
2540 2539
@@ -2549,12 +2548,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2549 list_del(&mad_priv->header.mad_list.list); 2548 list_del(&mad_priv->header.mad_list.list);
2550 recv_queue->count--; 2549 recv_queue->count--;
2551 spin_unlock_irqrestore(&recv_queue->lock, flags); 2550 spin_unlock_irqrestore(&recv_queue->lock, flags);
2552 dma_unmap_single(qp_info->port_priv->device->dma_device, 2551 ib_dma_unmap_single(qp_info->port_priv->device,
2553 pci_unmap_addr(&mad_priv->header, 2552 mad_priv->header.mapping,
2554 mapping), 2553 sizeof *mad_priv -
2555 sizeof *mad_priv - 2554 sizeof mad_priv->header,
2556 sizeof mad_priv->header, 2555 DMA_FROM_DEVICE);
2557 DMA_FROM_DEVICE);
2558 kmem_cache_free(ib_mad_cache, mad_priv); 2556 kmem_cache_free(ib_mad_cache, mad_priv);
2559 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); 2557 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2560 break; 2558 break;
@@ -2586,11 +2584,11 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2586 /* Remove from posted receive MAD list */ 2584 /* Remove from posted receive MAD list */
2587 list_del(&mad_list->list); 2585 list_del(&mad_list->list);
2588 2586
2589 dma_unmap_single(qp_info->port_priv->device->dma_device, 2587 ib_dma_unmap_single(qp_info->port_priv->device,
2590 pci_unmap_addr(&recv->header, mapping), 2588 recv->header.mapping,
2591 sizeof(struct ib_mad_private) - 2589 sizeof(struct ib_mad_private) -
2592 sizeof(struct ib_mad_private_header), 2590 sizeof(struct ib_mad_private_header),
2593 DMA_FROM_DEVICE); 2591 DMA_FROM_DEVICE);
2594 kmem_cache_free(ib_mad_cache, recv); 2592 kmem_cache_free(ib_mad_cache, recv);
2595 } 2593 }
2596 2594
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d5548e73e068..de89717f49fe 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -73,7 +73,7 @@ struct ib_mad_private_header {
73 struct ib_mad_list_head mad_list; 73 struct ib_mad_list_head mad_list;
74 struct ib_mad_recv_wc recv_wc; 74 struct ib_mad_recv_wc recv_wc;
75 struct ib_wc wc; 75 struct ib_wc wc;
76 DECLARE_PCI_UNMAP_ADDR(mapping) 76 u64 mapping;
77} __attribute__ ((packed)); 77} __attribute__ ((packed));
78 78
79struct ib_mad_private { 79struct ib_mad_private {
@@ -126,8 +126,8 @@ struct ib_mad_send_wr_private {
126 struct list_head agent_list; 126 struct list_head agent_list;
127 struct ib_mad_agent_private *mad_agent_priv; 127 struct ib_mad_agent_private *mad_agent_priv;
128 struct ib_mad_send_buf send_buf; 128 struct ib_mad_send_buf send_buf;
129 DECLARE_PCI_UNMAP_ADDR(header_mapping) 129 u64 header_mapping;
130 DECLARE_PCI_UNMAP_ADDR(payload_mapping) 130 u64 payload_mapping;
131 struct ib_send_wr send_wr; 131 struct ib_send_wr send_wr;
132 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 132 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
133 __be64 tid; 133 __be64 tid;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
new file mode 100644
index 000000000000..81a5cdc5733a
--- /dev/null
+++ b/drivers/infiniband/core/ucma.c
@@ -0,0 +1,874 @@
1/*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/completion.h>
34#include <linux/mutex.h>
35#include <linux/poll.h>
36#include <linux/idr.h>
37#include <linux/in.h>
38#include <linux/in6.h>
39#include <linux/miscdevice.h>
40
41#include <rdma/rdma_user_cm.h>
42#include <rdma/ib_marshall.h>
43#include <rdma/rdma_cm.h>
44
45MODULE_AUTHOR("Sean Hefty");
46MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
47MODULE_LICENSE("Dual BSD/GPL");
48
49enum {
50 UCMA_MAX_BACKLOG = 128
51};
52
53struct ucma_file {
54 struct mutex mut;
55 struct file *filp;
56 struct list_head ctx_list;
57 struct list_head event_list;
58 wait_queue_head_t poll_wait;
59};
60
61struct ucma_context {
62 int id;
63 struct completion comp;
64 atomic_t ref;
65 int events_reported;
66 int backlog;
67
68 struct ucma_file *file;
69 struct rdma_cm_id *cm_id;
70 u64 uid;
71
72 struct list_head list;
73};
74
75struct ucma_event {
76 struct ucma_context *ctx;
77 struct list_head list;
78 struct rdma_cm_id *cm_id;
79 struct rdma_ucm_event_resp resp;
80};
81
82static DEFINE_MUTEX(mut);
83static DEFINE_IDR(ctx_idr);
84
85static inline struct ucma_context *_ucma_find_context(int id,
86 struct ucma_file *file)
87{
88 struct ucma_context *ctx;
89
90 ctx = idr_find(&ctx_idr, id);
91 if (!ctx)
92 ctx = ERR_PTR(-ENOENT);
93 else if (ctx->file != file)
94 ctx = ERR_PTR(-EINVAL);
95 return ctx;
96}
97
98static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
99{
100 struct ucma_context *ctx;
101
102 mutex_lock(&mut);
103 ctx = _ucma_find_context(id, file);
104 if (!IS_ERR(ctx))
105 atomic_inc(&ctx->ref);
106 mutex_unlock(&mut);
107 return ctx;
108}
109
110static void ucma_put_ctx(struct ucma_context *ctx)
111{
112 if (atomic_dec_and_test(&ctx->ref))
113 complete(&ctx->comp);
114}
115
116static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
117{
118 struct ucma_context *ctx;
119 int ret;
120
121 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
122 if (!ctx)
123 return NULL;
124
125 atomic_set(&ctx->ref, 1);
126 init_completion(&ctx->comp);
127 ctx->file = file;
128
129 do {
130 ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
131 if (!ret)
132 goto error;
133
134 mutex_lock(&mut);
135 ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
136 mutex_unlock(&mut);
137 } while (ret == -EAGAIN);
138
139 if (ret)
140 goto error;
141
142 list_add_tail(&ctx->list, &file->ctx_list);
143 return ctx;
144
145error:
146 kfree(ctx);
147 return NULL;
148}
149
150static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
151 struct rdma_conn_param *src)
152{
153 if (src->private_data_len)
154 memcpy(dst->private_data, src->private_data,
155 src->private_data_len);
156 dst->private_data_len = src->private_data_len;
157 dst->responder_resources =src->responder_resources;
158 dst->initiator_depth = src->initiator_depth;
159 dst->flow_control = src->flow_control;
160 dst->retry_count = src->retry_count;
161 dst->rnr_retry_count = src->rnr_retry_count;
162 dst->srq = src->srq;
163 dst->qp_num = src->qp_num;
164}
165
166static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
167 struct rdma_ud_param *src)
168{
169 if (src->private_data_len)
170 memcpy(dst->private_data, src->private_data,
171 src->private_data_len);
172 dst->private_data_len = src->private_data_len;
173 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
174 dst->qp_num = src->qp_num;
175 dst->qkey = src->qkey;
176}
177
178static void ucma_set_event_context(struct ucma_context *ctx,
179 struct rdma_cm_event *event,
180 struct ucma_event *uevent)
181{
182 uevent->ctx = ctx;
183 uevent->resp.uid = ctx->uid;
184 uevent->resp.id = ctx->id;
185}
186
187static int ucma_event_handler(struct rdma_cm_id *cm_id,
188 struct rdma_cm_event *event)
189{
190 struct ucma_event *uevent;
191 struct ucma_context *ctx = cm_id->context;
192 int ret = 0;
193
194 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
195 if (!uevent)
196 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
197
198 uevent->cm_id = cm_id;
199 ucma_set_event_context(ctx, event, uevent);
200 uevent->resp.event = event->event;
201 uevent->resp.status = event->status;
202 if (cm_id->ps == RDMA_PS_UDP)
203 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
204 else
205 ucma_copy_conn_event(&uevent->resp.param.conn,
206 &event->param.conn);
207
208 mutex_lock(&ctx->file->mut);
209 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
210 if (!ctx->backlog) {
211 ret = -EDQUOT;
212 goto out;
213 }
214 ctx->backlog--;
215 }
216 list_add_tail(&uevent->list, &ctx->file->event_list);
217 wake_up_interruptible(&ctx->file->poll_wait);
218out:
219 mutex_unlock(&ctx->file->mut);
220 return ret;
221}
222
223static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
224 int in_len, int out_len)
225{
226 struct ucma_context *ctx;
227 struct rdma_ucm_get_event cmd;
228 struct ucma_event *uevent;
229 int ret = 0;
230 DEFINE_WAIT(wait);
231
232 if (out_len < sizeof uevent->resp)
233 return -ENOSPC;
234
235 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
236 return -EFAULT;
237
238 mutex_lock(&file->mut);
239 while (list_empty(&file->event_list)) {
240 if (file->filp->f_flags & O_NONBLOCK) {
241 ret = -EAGAIN;
242 break;
243 }
244
245 if (signal_pending(current)) {
246 ret = -ERESTARTSYS;
247 break;
248 }
249
250 prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
251 mutex_unlock(&file->mut);
252 schedule();
253 mutex_lock(&file->mut);
254 finish_wait(&file->poll_wait, &wait);
255 }
256
257 if (ret)
258 goto done;
259
260 uevent = list_entry(file->event_list.next, struct ucma_event, list);
261
262 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
263 ctx = ucma_alloc_ctx(file);
264 if (!ctx) {
265 ret = -ENOMEM;
266 goto done;
267 }
268 uevent->ctx->backlog++;
269 ctx->cm_id = uevent->cm_id;
270 ctx->cm_id->context = ctx;
271 uevent->resp.id = ctx->id;
272 }
273
274 if (copy_to_user((void __user *)(unsigned long)cmd.response,
275 &uevent->resp, sizeof uevent->resp)) {
276 ret = -EFAULT;
277 goto done;
278 }
279
280 list_del(&uevent->list);
281 uevent->ctx->events_reported++;
282 kfree(uevent);
283done:
284 mutex_unlock(&file->mut);
285 return ret;
286}
287
288static ssize_t ucma_create_id(struct ucma_file *file,
289 const char __user *inbuf,
290 int in_len, int out_len)
291{
292 struct rdma_ucm_create_id cmd;
293 struct rdma_ucm_create_id_resp resp;
294 struct ucma_context *ctx;
295 int ret;
296
297 if (out_len < sizeof(resp))
298 return -ENOSPC;
299
300 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
301 return -EFAULT;
302
303 mutex_lock(&file->mut);
304 ctx = ucma_alloc_ctx(file);
305 mutex_unlock(&file->mut);
306 if (!ctx)
307 return -ENOMEM;
308
309 ctx->uid = cmd.uid;
310 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
311 if (IS_ERR(ctx->cm_id)) {
312 ret = PTR_ERR(ctx->cm_id);
313 goto err1;
314 }
315
316 resp.id = ctx->id;
317 if (copy_to_user((void __user *)(unsigned long)cmd.response,
318 &resp, sizeof(resp))) {
319 ret = -EFAULT;
320 goto err2;
321 }
322 return 0;
323
324err2:
325 rdma_destroy_id(ctx->cm_id);
326err1:
327 mutex_lock(&mut);
328 idr_remove(&ctx_idr, ctx->id);
329 mutex_unlock(&mut);
330 kfree(ctx);
331 return ret;
332}
333
334static void ucma_cleanup_events(struct ucma_context *ctx)
335{
336 struct ucma_event *uevent, *tmp;
337
338 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
339 if (uevent->ctx != ctx)
340 continue;
341
342 list_del(&uevent->list);
343
344 /* clear incoming connections. */
345 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
346 rdma_destroy_id(uevent->cm_id);
347
348 kfree(uevent);
349 }
350}
351
352static int ucma_free_ctx(struct ucma_context *ctx)
353{
354 int events_reported;
355
356 /* No new events will be generated after destroying the id. */
357 rdma_destroy_id(ctx->cm_id);
358
359 /* Cleanup events not yet reported to the user. */
360 mutex_lock(&ctx->file->mut);
361 ucma_cleanup_events(ctx);
362 list_del(&ctx->list);
363 mutex_unlock(&ctx->file->mut);
364
365 events_reported = ctx->events_reported;
366 kfree(ctx);
367 return events_reported;
368}
369
370static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
371 int in_len, int out_len)
372{
373 struct rdma_ucm_destroy_id cmd;
374 struct rdma_ucm_destroy_id_resp resp;
375 struct ucma_context *ctx;
376 int ret = 0;
377
378 if (out_len < sizeof(resp))
379 return -ENOSPC;
380
381 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
382 return -EFAULT;
383
384 mutex_lock(&mut);
385 ctx = _ucma_find_context(cmd.id, file);
386 if (!IS_ERR(ctx))
387 idr_remove(&ctx_idr, ctx->id);
388 mutex_unlock(&mut);
389
390 if (IS_ERR(ctx))
391 return PTR_ERR(ctx);
392
393 ucma_put_ctx(ctx);
394 wait_for_completion(&ctx->comp);
395 resp.events_reported = ucma_free_ctx(ctx);
396
397 if (copy_to_user((void __user *)(unsigned long)cmd.response,
398 &resp, sizeof(resp)))
399 ret = -EFAULT;
400
401 return ret;
402}
403
404static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
405 int in_len, int out_len)
406{
407 struct rdma_ucm_bind_addr cmd;
408 struct ucma_context *ctx;
409 int ret;
410
411 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
412 return -EFAULT;
413
414 ctx = ucma_get_ctx(file, cmd.id);
415 if (IS_ERR(ctx))
416 return PTR_ERR(ctx);
417
418 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
419 ucma_put_ctx(ctx);
420 return ret;
421}
422
423static ssize_t ucma_resolve_addr(struct ucma_file *file,
424 const char __user *inbuf,
425 int in_len, int out_len)
426{
427 struct rdma_ucm_resolve_addr cmd;
428 struct ucma_context *ctx;
429 int ret;
430
431 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
432 return -EFAULT;
433
434 ctx = ucma_get_ctx(file, cmd.id);
435 if (IS_ERR(ctx))
436 return PTR_ERR(ctx);
437
438 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
439 (struct sockaddr *) &cmd.dst_addr,
440 cmd.timeout_ms);
441 ucma_put_ctx(ctx);
442 return ret;
443}
444
445static ssize_t ucma_resolve_route(struct ucma_file *file,
446 const char __user *inbuf,
447 int in_len, int out_len)
448{
449 struct rdma_ucm_resolve_route cmd;
450 struct ucma_context *ctx;
451 int ret;
452
453 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
454 return -EFAULT;
455
456 ctx = ucma_get_ctx(file, cmd.id);
457 if (IS_ERR(ctx))
458 return PTR_ERR(ctx);
459
460 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
461 ucma_put_ctx(ctx);
462 return ret;
463}
464
465static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
466 struct rdma_route *route)
467{
468 struct rdma_dev_addr *dev_addr;
469
470 resp->num_paths = route->num_paths;
471 switch (route->num_paths) {
472 case 0:
473 dev_addr = &route->addr.dev_addr;
474 ib_addr_get_dgid(dev_addr,
475 (union ib_gid *) &resp->ib_route[0].dgid);
476 ib_addr_get_sgid(dev_addr,
477 (union ib_gid *) &resp->ib_route[0].sgid);
478 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
479 break;
480 case 2:
481 ib_copy_path_rec_to_user(&resp->ib_route[1],
482 &route->path_rec[1]);
483 /* fall through */
484 case 1:
485 ib_copy_path_rec_to_user(&resp->ib_route[0],
486 &route->path_rec[0]);
487 break;
488 default:
489 break;
490 }
491}
492
493static ssize_t ucma_query_route(struct ucma_file *file,
494 const char __user *inbuf,
495 int in_len, int out_len)
496{
497 struct rdma_ucm_query_route cmd;
498 struct rdma_ucm_query_route_resp resp;
499 struct ucma_context *ctx;
500 struct sockaddr *addr;
501 int ret = 0;
502
503 if (out_len < sizeof(resp))
504 return -ENOSPC;
505
506 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
507 return -EFAULT;
508
509 ctx = ucma_get_ctx(file, cmd.id);
510 if (IS_ERR(ctx))
511 return PTR_ERR(ctx);
512
513 memset(&resp, 0, sizeof resp);
514 addr = &ctx->cm_id->route.addr.src_addr;
515 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
516 sizeof(struct sockaddr_in) :
517 sizeof(struct sockaddr_in6));
518 addr = &ctx->cm_id->route.addr.dst_addr;
519 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
520 sizeof(struct sockaddr_in) :
521 sizeof(struct sockaddr_in6));
522 if (!ctx->cm_id->device)
523 goto out;
524
525 resp.node_guid = ctx->cm_id->device->node_guid;
526 resp.port_num = ctx->cm_id->port_num;
527 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
528 case RDMA_TRANSPORT_IB:
529 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
530 break;
531 default:
532 break;
533 }
534
535out:
536 if (copy_to_user((void __user *)(unsigned long)cmd.response,
537 &resp, sizeof(resp)))
538 ret = -EFAULT;
539
540 ucma_put_ctx(ctx);
541 return ret;
542}
543
544static void ucma_copy_conn_param(struct rdma_conn_param *dst,
545 struct rdma_ucm_conn_param *src)
546{
547 dst->private_data = src->private_data;
548 dst->private_data_len = src->private_data_len;
549 dst->responder_resources =src->responder_resources;
550 dst->initiator_depth = src->initiator_depth;
551 dst->flow_control = src->flow_control;
552 dst->retry_count = src->retry_count;
553 dst->rnr_retry_count = src->rnr_retry_count;
554 dst->srq = src->srq;
555 dst->qp_num = src->qp_num;
556}
557
558static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
559 int in_len, int out_len)
560{
561 struct rdma_ucm_connect cmd;
562 struct rdma_conn_param conn_param;
563 struct ucma_context *ctx;
564 int ret;
565
566 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
567 return -EFAULT;
568
569 if (!cmd.conn_param.valid)
570 return -EINVAL;
571
572 ctx = ucma_get_ctx(file, cmd.id);
573 if (IS_ERR(ctx))
574 return PTR_ERR(ctx);
575
576 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
577 ret = rdma_connect(ctx->cm_id, &conn_param);
578 ucma_put_ctx(ctx);
579 return ret;
580}
581
582static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
583 int in_len, int out_len)
584{
585 struct rdma_ucm_listen cmd;
586 struct ucma_context *ctx;
587 int ret;
588
589 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
590 return -EFAULT;
591
592 ctx = ucma_get_ctx(file, cmd.id);
593 if (IS_ERR(ctx))
594 return PTR_ERR(ctx);
595
596 ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
597 cmd.backlog : UCMA_MAX_BACKLOG;
598 ret = rdma_listen(ctx->cm_id, ctx->backlog);
599 ucma_put_ctx(ctx);
600 return ret;
601}
602
603static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
604 int in_len, int out_len)
605{
606 struct rdma_ucm_accept cmd;
607 struct rdma_conn_param conn_param;
608 struct ucma_context *ctx;
609 int ret;
610
611 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
612 return -EFAULT;
613
614 ctx = ucma_get_ctx(file, cmd.id);
615 if (IS_ERR(ctx))
616 return PTR_ERR(ctx);
617
618 if (cmd.conn_param.valid) {
619 ctx->uid = cmd.uid;
620 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
621 ret = rdma_accept(ctx->cm_id, &conn_param);
622 } else
623 ret = rdma_accept(ctx->cm_id, NULL);
624
625 ucma_put_ctx(ctx);
626 return ret;
627}
628
629static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
630 int in_len, int out_len)
631{
632 struct rdma_ucm_reject cmd;
633 struct ucma_context *ctx;
634 int ret;
635
636 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
637 return -EFAULT;
638
639 ctx = ucma_get_ctx(file, cmd.id);
640 if (IS_ERR(ctx))
641 return PTR_ERR(ctx);
642
643 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
644 ucma_put_ctx(ctx);
645 return ret;
646}
647
648static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
649 int in_len, int out_len)
650{
651 struct rdma_ucm_disconnect cmd;
652 struct ucma_context *ctx;
653 int ret;
654
655 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
656 return -EFAULT;
657
658 ctx = ucma_get_ctx(file, cmd.id);
659 if (IS_ERR(ctx))
660 return PTR_ERR(ctx);
661
662 ret = rdma_disconnect(ctx->cm_id);
663 ucma_put_ctx(ctx);
664 return ret;
665}
666
667static ssize_t ucma_init_qp_attr(struct ucma_file *file,
668 const char __user *inbuf,
669 int in_len, int out_len)
670{
671 struct rdma_ucm_init_qp_attr cmd;
672 struct ib_uverbs_qp_attr resp;
673 struct ucma_context *ctx;
674 struct ib_qp_attr qp_attr;
675 int ret;
676
677 if (out_len < sizeof(resp))
678 return -ENOSPC;
679
680 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
681 return -EFAULT;
682
683 ctx = ucma_get_ctx(file, cmd.id);
684 if (IS_ERR(ctx))
685 return PTR_ERR(ctx);
686
687 resp.qp_attr_mask = 0;
688 memset(&qp_attr, 0, sizeof qp_attr);
689 qp_attr.qp_state = cmd.qp_state;
690 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
691 if (ret)
692 goto out;
693
694 ib_copy_qp_attr_to_user(&resp, &qp_attr);
695 if (copy_to_user((void __user *)(unsigned long)cmd.response,
696 &resp, sizeof(resp)))
697 ret = -EFAULT;
698
699out:
700 ucma_put_ctx(ctx);
701 return ret;
702}
703
704static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
705 int in_len, int out_len)
706{
707 struct rdma_ucm_notify cmd;
708 struct ucma_context *ctx;
709 int ret;
710
711 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
712 return -EFAULT;
713
714 ctx = ucma_get_ctx(file, cmd.id);
715 if (IS_ERR(ctx))
716 return PTR_ERR(ctx);
717
718 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
719 ucma_put_ctx(ctx);
720 return ret;
721}
722
723static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
724 const char __user *inbuf,
725 int in_len, int out_len) = {
726 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
727 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
728 [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
729 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
730 [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
731 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
732 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
733 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
734 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
735 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
736 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
737 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
738 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
739 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
740 [RDMA_USER_CM_CMD_SET_OPTION] = NULL,
741 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
742};
743
744static ssize_t ucma_write(struct file *filp, const char __user *buf,
745 size_t len, loff_t *pos)
746{
747 struct ucma_file *file = filp->private_data;
748 struct rdma_ucm_cmd_hdr hdr;
749 ssize_t ret;
750
751 if (len < sizeof(hdr))
752 return -EINVAL;
753
754 if (copy_from_user(&hdr, buf, sizeof(hdr)))
755 return -EFAULT;
756
757 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
758 return -EINVAL;
759
760 if (hdr.in + sizeof(hdr) > len)
761 return -EINVAL;
762
763 if (!ucma_cmd_table[hdr.cmd])
764 return -ENOSYS;
765
766 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
767 if (!ret)
768 ret = len;
769
770 return ret;
771}
772
773static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
774{
775 struct ucma_file *file = filp->private_data;
776 unsigned int mask = 0;
777
778 poll_wait(filp, &file->poll_wait, wait);
779
780 if (!list_empty(&file->event_list))
781 mask = POLLIN | POLLRDNORM;
782
783 return mask;
784}
785
786static int ucma_open(struct inode *inode, struct file *filp)
787{
788 struct ucma_file *file;
789
790 file = kmalloc(sizeof *file, GFP_KERNEL);
791 if (!file)
792 return -ENOMEM;
793
794 INIT_LIST_HEAD(&file->event_list);
795 INIT_LIST_HEAD(&file->ctx_list);
796 init_waitqueue_head(&file->poll_wait);
797 mutex_init(&file->mut);
798
799 filp->private_data = file;
800 file->filp = filp;
801 return 0;
802}
803
804static int ucma_close(struct inode *inode, struct file *filp)
805{
806 struct ucma_file *file = filp->private_data;
807 struct ucma_context *ctx, *tmp;
808
809 mutex_lock(&file->mut);
810 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
811 mutex_unlock(&file->mut);
812
813 mutex_lock(&mut);
814 idr_remove(&ctx_idr, ctx->id);
815 mutex_unlock(&mut);
816
817 ucma_free_ctx(ctx);
818 mutex_lock(&file->mut);
819 }
820 mutex_unlock(&file->mut);
821 kfree(file);
822 return 0;
823}
824
825static struct file_operations ucma_fops = {
826 .owner = THIS_MODULE,
827 .open = ucma_open,
828 .release = ucma_close,
829 .write = ucma_write,
830 .poll = ucma_poll,
831};
832
833static struct miscdevice ucma_misc = {
834 .minor = MISC_DYNAMIC_MINOR,
835 .name = "rdma_cm",
836 .fops = &ucma_fops,
837};
838
839static ssize_t show_abi_version(struct device *dev,
840 struct device_attribute *attr,
841 char *buf)
842{
843 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
844}
845static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
846
847static int __init ucma_init(void)
848{
849 int ret;
850
851 ret = misc_register(&ucma_misc);
852 if (ret)
853 return ret;
854
855 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
856 if (ret) {
857 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
858 goto err;
859 }
860 return 0;
861err:
862 misc_deregister(&ucma_misc);
863 return ret;
864}
865
866static void __exit ucma_cleanup(void)
867{
868 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
869 misc_deregister(&ucma_misc);
870 idr_destroy(&ctx_idr);
871}
872
873module_init(ucma_init);
874module_exit(ucma_cleanup);
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index ce46b13ae02b..5440da0e59b4 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -32,8 +32,8 @@
32 32
33#include <rdma/ib_marshall.h> 33#include <rdma/ib_marshall.h>
34 34
35static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, 35void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
36 struct ib_ah_attr *src) 36 struct ib_ah_attr *src)
37{ 37{
38 memcpy(dst->grh.dgid, src->grh.dgid.raw, sizeof src->grh.dgid); 38 memcpy(dst->grh.dgid, src->grh.dgid.raw, sizeof src->grh.dgid);
39 dst->grh.flow_label = src->grh.flow_label; 39 dst->grh.flow_label = src->grh.flow_label;
@@ -47,6 +47,7 @@ static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
47 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0; 47 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
48 dst->port_num = src->port_num; 48 dst->port_num = src->port_num;
49} 49}
50EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
50 51
51void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, 52void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
52 struct ib_qp_attr *src) 53 struct ib_qp_attr *src)
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index db12cc0841df..c95fe952abd5 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -52,8 +52,8 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
52 int i; 52 int i;
53 53
54 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) { 54 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
55 dma_unmap_sg(dev->dma_device, chunk->page_list, 55 ib_dma_unmap_sg(dev, chunk->page_list,
56 chunk->nents, DMA_BIDIRECTIONAL); 56 chunk->nents, DMA_BIDIRECTIONAL);
57 for (i = 0; i < chunk->nents; ++i) { 57 for (i = 0; i < chunk->nents; ++i) {
58 if (umem->writable && dirty) 58 if (umem->writable && dirty)
59 set_page_dirty_lock(chunk->page_list[i].page); 59 set_page_dirty_lock(chunk->page_list[i].page);
@@ -136,10 +136,10 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
136 chunk->page_list[i].length = PAGE_SIZE; 136 chunk->page_list[i].length = PAGE_SIZE;
137 } 137 }
138 138
139 chunk->nmap = dma_map_sg(dev->dma_device, 139 chunk->nmap = ib_dma_map_sg(dev,
140 &chunk->page_list[0], 140 &chunk->page_list[0],
141 chunk->nents, 141 chunk->nents,
142 DMA_BIDIRECTIONAL); 142 DMA_BIDIRECTIONAL);
143 if (chunk->nmap <= 0) { 143 if (chunk->nmap <= 0) {
144 for (i = 0; i < chunk->nents; ++i) 144 for (i = 0; i < chunk->nents; ++i)
145 put_page(chunk->page_list[i].page); 145 put_page(chunk->page_list[i].page);
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 179d005ed4a5..420c1380f5c3 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -161,8 +161,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
161 161
162 if (attr_mask & IB_QP_STATE) { 162 if (attr_mask & IB_QP_STATE) {
163 /* Ensure the state is valid */ 163 /* Ensure the state is valid */
164 if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) 164 if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) {
165 return -EINVAL; 165 err = -EINVAL;
166 goto bail0;
167 }
166 168
167 wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state)); 169 wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
168 170
@@ -184,9 +186,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
184 if (attr->cur_qp_state != IB_QPS_RTR && 186 if (attr->cur_qp_state != IB_QPS_RTR &&
185 attr->cur_qp_state != IB_QPS_RTS && 187 attr->cur_qp_state != IB_QPS_RTS &&
186 attr->cur_qp_state != IB_QPS_SQD && 188 attr->cur_qp_state != IB_QPS_SQD &&
187 attr->cur_qp_state != IB_QPS_SQE) 189 attr->cur_qp_state != IB_QPS_SQE) {
188 return -EINVAL; 190 err = -EINVAL;
189 else 191 goto bail0;
192 } else
190 wr.next_qp_state = 193 wr.next_qp_state =
191 cpu_to_be32(to_c2_state(attr->cur_qp_state)); 194 cpu_to_be32(to_c2_state(attr->cur_qp_state));
192 195
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index 7dc10551cf18..ec2e603ea241 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
6ib_ipath-y := \ 6ib_ipath-y := \
7 ipath_cq.o \ 7 ipath_cq.o \
8 ipath_diag.o \ 8 ipath_diag.o \
9 ipath_dma.o \
9 ipath_driver.o \ 10 ipath_driver.o \
10 ipath_eeprom.o \ 11 ipath_eeprom.o \
11 ipath_file_ops.o \ 12 ipath_file_ops.o \
diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
new file mode 100644
index 000000000000..6e0f2b8918ce
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
@@ -0,0 +1,189 @@
1/*
2 * Copyright (c) 2006 QLogic, Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_verbs.h>
34
35#include "ipath_verbs.h"
36
37#define BAD_DMA_ADDRESS ((u64) 0)
38
39/*
40 * The following functions implement driver specific replacements
41 * for the ib_dma_*() functions.
42 *
43 * These functions return kernel virtual addresses instead of
44 * device bus addresses since the driver uses the CPU to copy
45 * data instead of using hardware DMA.
46 */
47
48static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr)
49{
50 return dma_addr == BAD_DMA_ADDRESS;
51}
52
53static u64 ipath_dma_map_single(struct ib_device *dev,
54 void *cpu_addr, size_t size,
55 enum dma_data_direction direction)
56{
57 BUG_ON(!valid_dma_direction(direction));
58 return (u64) cpu_addr;
59}
60
61static void ipath_dma_unmap_single(struct ib_device *dev,
62 u64 addr, size_t size,
63 enum dma_data_direction direction)
64{
65 BUG_ON(!valid_dma_direction(direction));
66}
67
68static u64 ipath_dma_map_page(struct ib_device *dev,
69 struct page *page,
70 unsigned long offset,
71 size_t size,
72 enum dma_data_direction direction)
73{
74 u64 addr;
75
76 BUG_ON(!valid_dma_direction(direction));
77
78 if (offset + size > PAGE_SIZE) {
79 addr = BAD_DMA_ADDRESS;
80 goto done;
81 }
82
83 addr = (u64) page_address(page);
84 if (addr)
85 addr += offset;
86 /* TODO: handle highmem pages */
87
88done:
89 return addr;
90}
91
92static void ipath_dma_unmap_page(struct ib_device *dev,
93 u64 addr, size_t size,
94 enum dma_data_direction direction)
95{
96 BUG_ON(!valid_dma_direction(direction));
97}
98
99int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents,
100 enum dma_data_direction direction)
101{
102 u64 addr;
103 int i;
104 int ret = nents;
105
106 BUG_ON(!valid_dma_direction(direction));
107
108 for (i = 0; i < nents; i++) {
109 addr = (u64) page_address(sg[i].page);
110 /* TODO: handle highmem pages */
111 if (!addr) {
112 ret = 0;
113 break;
114 }
115 }
116 return ret;
117}
118
119static void ipath_unmap_sg(struct ib_device *dev,
120 struct scatterlist *sg, int nents,
121 enum dma_data_direction direction)
122{
123 BUG_ON(!valid_dma_direction(direction));
124}
125
126static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
127{
128 u64 addr = (u64) page_address(sg->page);
129
130 if (addr)
131 addr += sg->offset;
132 return addr;
133}
134
135static unsigned int ipath_sg_dma_len(struct ib_device *dev,
136 struct scatterlist *sg)
137{
138 return sg->length;
139}
140
141static void ipath_sync_single_for_cpu(struct ib_device *dev,
142 u64 addr,
143 size_t size,
144 enum dma_data_direction dir)
145{
146}
147
148static void ipath_sync_single_for_device(struct ib_device *dev,
149 u64 addr,
150 size_t size,
151 enum dma_data_direction dir)
152{
153}
154
155static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size,
156 u64 *dma_handle, gfp_t flag)
157{
158 struct page *p;
159 void *addr = NULL;
160
161 p = alloc_pages(flag, get_order(size));
162 if (p)
163 addr = page_address(p);
164 if (dma_handle)
165 *dma_handle = (u64) addr;
166 return addr;
167}
168
169static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
170 void *cpu_addr, dma_addr_t dma_handle)
171{
172 free_pages((unsigned long) cpu_addr, get_order(size));
173}
174
175struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
176 ipath_mapping_error,
177 ipath_dma_map_single,
178 ipath_dma_unmap_single,
179 ipath_dma_map_page,
180 ipath_dma_unmap_page,
181 ipath_map_sg,
182 ipath_unmap_sg,
183 ipath_sg_dma_address,
184 ipath_sg_dma_len,
185 ipath_sync_single_for_cpu,
186 ipath_sync_single_for_device,
187 ipath_dma_alloc_coherent,
188 ipath_dma_free_coherent
189};
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 1aeddb48e355..ae7f21a0cdc0 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1825,8 +1825,6 @@ void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
1825 */ 1825 */
1826void ipath_shutdown_device(struct ipath_devdata *dd) 1826void ipath_shutdown_device(struct ipath_devdata *dd)
1827{ 1827{
1828 u64 val;
1829
1830 ipath_dbg("Shutting down the device\n"); 1828 ipath_dbg("Shutting down the device\n");
1831 1829
1832 dd->ipath_flags |= IPATH_LINKUNK; 1830 dd->ipath_flags |= IPATH_LINKUNK;
@@ -1849,7 +1847,7 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
1849 */ 1847 */
1850 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL); 1848 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
1851 /* flush it */ 1849 /* flush it */
1852 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1850 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1853 /* 1851 /*
1854 * enough for anything that's going to trickle out to have actually 1852 * enough for anything that's going to trickle out to have actually
1855 * done so. 1853 * done so.
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 340f27e3ebff..b932bcb67a5e 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -699,7 +699,6 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
699 int start_stop) 699 int start_stop)
700{ 700{
701 struct ipath_devdata *dd = pd->port_dd; 701 struct ipath_devdata *dd = pd->port_dd;
702 u64 tval;
703 702
704 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n", 703 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n",
705 start_stop ? "en" : "dis", dd->ipath_unit, 704 start_stop ? "en" : "dis", dd->ipath_unit,
@@ -729,7 +728,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
729 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 728 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
730 dd->ipath_rcvctrl); 729 dd->ipath_rcvctrl);
731 /* now be sure chip saw it before we return */ 730 /* now be sure chip saw it before we return */
732 tval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 731 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
733 if (start_stop) { 732 if (start_stop) {
734 /* 733 /*
735 * And try to be sure that tail reg update has happened too. 734 * And try to be sure that tail reg update has happened too.
@@ -738,7 +737,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
738 * in memory copy, since we could overwrite an update by the 737 * in memory copy, since we could overwrite an update by the
739 * chip if we did. 738 * chip if we did.
740 */ 739 */
741 tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); 740 ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
742 } 741 }
743 /* always; new head should be equal to new tail; see above */ 742 /* always; new head should be equal to new tail; see above */
744bail: 743bail:
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index e57c7a351cb5..7468477ba837 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -1447,7 +1447,7 @@ static void ipath_ht_tidtemplate(struct ipath_devdata *dd)
1447static int ipath_ht_early_init(struct ipath_devdata *dd) 1447static int ipath_ht_early_init(struct ipath_devdata *dd)
1448{ 1448{
1449 u32 __iomem *piobuf; 1449 u32 __iomem *piobuf;
1450 u32 pioincr, val32, egrsize; 1450 u32 pioincr, val32;
1451 int i; 1451 int i;
1452 1452
1453 /* 1453 /*
@@ -1467,7 +1467,6 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
1467 * errors interrupts if we ever see one). 1467 * errors interrupts if we ever see one).
1468 */ 1468 */
1469 dd->ipath_rcvegrbufsize = dd->ipath_piosize2k; 1469 dd->ipath_rcvegrbufsize = dd->ipath_piosize2k;
1470 egrsize = dd->ipath_rcvegrbufsize;
1471 1470
1472 /* 1471 /*
1473 * the min() check here is currently a nop, but it may not 1472 * the min() check here is currently a nop, but it may not
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index 6af89683f710..ae8bf9950c6d 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -602,7 +602,7 @@ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
602 */ 602 */
603static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) 603static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
604{ 604{
605 u64 val, tmp, config1, prev_val; 605 u64 val, config1, prev_val;
606 int ret = 0; 606 int ret = 0;
607 607
608 ipath_dbg("Trying to bringup serdes\n"); 608 ipath_dbg("Trying to bringup serdes\n");
@@ -633,7 +633,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
633 | INFINIPATH_SERDC0_L1PWR_DN; 633 | INFINIPATH_SERDC0_L1PWR_DN;
634 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); 634 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
635 /* be sure chip saw it */ 635 /* be sure chip saw it */
636 tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 636 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
637 udelay(5); /* need pll reset set at least for a bit */ 637 udelay(5); /* need pll reset set at least for a bit */
638 /* 638 /*
639 * after PLL is reset, set the per-lane Resets and TxIdle and 639 * after PLL is reset, set the per-lane Resets and TxIdle and
@@ -647,7 +647,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
647 "and txidle (%llx)\n", (unsigned long long) val); 647 "and txidle (%llx)\n", (unsigned long long) val);
648 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); 648 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
649 /* be sure chip saw it */ 649 /* be sure chip saw it */
650 tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 650 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
651 /* need PLL reset clear for at least 11 usec before lane 651 /* need PLL reset clear for at least 11 usec before lane
652 * resets cleared; give it a few more to be sure */ 652 * resets cleared; give it a few more to be sure */
653 udelay(15); 653 udelay(15);
@@ -851,12 +851,12 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd,
851 int pos, ret; 851 int pos, ret;
852 852
853 dd->ipath_msi_lo = 0; /* used as a flag during reset processing */ 853 dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
854 dd->ipath_irq = pdev->irq;
855 ret = pci_enable_msi(dd->pcidev); 854 ret = pci_enable_msi(dd->pcidev);
856 if (ret) 855 if (ret)
857 ipath_dev_err(dd, "pci_enable_msi failed: %d, " 856 ipath_dev_err(dd, "pci_enable_msi failed: %d, "
858 "interrupts may not work\n", ret); 857 "interrupts may not work\n", ret);
859 /* continue even if it fails, we may still be OK... */ 858 /* continue even if it fails, we may still be OK... */
859 dd->ipath_irq = pdev->irq;
860 860
861 if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) { 861 if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
862 u16 control; 862 u16 control;
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index d819cca524cd..d4f6b5239ef8 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -347,10 +347,9 @@ done:
347static int init_chip_reset(struct ipath_devdata *dd, 347static int init_chip_reset(struct ipath_devdata *dd,
348 struct ipath_portdata **pdp) 348 struct ipath_portdata **pdp)
349{ 349{
350 struct ipath_portdata *pd;
351 u32 rtmp; 350 u32 rtmp;
352 351
353 *pdp = pd = dd->ipath_pd[0]; 352 *pdp = dd->ipath_pd[0];
354 /* ensure chip does no sends or receives while we re-initialize */ 353 /* ensure chip does no sends or receives while we re-initialize */
355 dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U; 354 dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
356 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0); 355 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0);
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 5652a550d442..72b9e279d19d 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -598,10 +598,9 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
598 * on close 598 * on close
599 */ 599 */
600 if (errs & INFINIPATH_E_RRCVHDRFULL) { 600 if (errs & INFINIPATH_E_RRCVHDRFULL) {
601 int any;
602 u32 hd, tl; 601 u32 hd, tl;
603 ipath_stats.sps_hdrqfull++; 602 ipath_stats.sps_hdrqfull++;
604 for (any = i = 0; i < dd->ipath_cfgports; i++) { 603 for (i = 0; i < dd->ipath_cfgports; i++) {
605 struct ipath_portdata *pd = dd->ipath_pd[i]; 604 struct ipath_portdata *pd = dd->ipath_pd[i];
606 if (i == 0) { 605 if (i == 0) {
607 hd = dd->ipath_port0head; 606 hd = dd->ipath_port0head;
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index 9a6cbd05adcd..851763d7d2db 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -134,7 +134,7 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
134 */ 134 */
135 if (sge->lkey == 0) { 135 if (sge->lkey == 0) {
136 isge->mr = NULL; 136 isge->mr = NULL;
137 isge->vaddr = bus_to_virt(sge->addr); 137 isge->vaddr = (void *) sge->addr;
138 isge->length = sge->length; 138 isge->length = sge->length;
139 isge->sge_length = sge->length; 139 isge->sge_length = sge->length;
140 ret = 1; 140 ret = 1;
@@ -202,12 +202,12 @@ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
202 int ret; 202 int ret;
203 203
204 /* 204 /*
205 * We use RKEY == zero for physical addresses 205 * We use RKEY == zero for kernel virtual addresses
206 * (see ipath_get_dma_mr). 206 * (see ipath_get_dma_mr and ipath_dma.c).
207 */ 207 */
208 if (rkey == 0) { 208 if (rkey == 0) {
209 sge->mr = NULL; 209 sge->mr = NULL;
210 sge->vaddr = phys_to_virt(vaddr); 210 sge->vaddr = (void *) vaddr;
211 sge->length = len; 211 sge->length = len;
212 sge->sge_length = len; 212 sge->sge_length = len;
213 ss->sg_list = NULL; 213 ss->sg_list = NULL;
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index a0673c1eef71..8cc8598d6c69 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -54,6 +54,8 @@ static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
54 * @acc: access flags 54 * @acc: access flags
55 * 55 *
56 * Returns the memory region on success, otherwise returns an errno. 56 * Returns the memory region on success, otherwise returns an errno.
57 * Note that all DMA addresses should be created via the
58 * struct ib_dma_mapping_ops functions (see ipath_dma.c).
57 */ 59 */
58struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc) 60struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
59{ 61{
@@ -149,8 +151,7 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
149 m = 0; 151 m = 0;
150 n = 0; 152 n = 0;
151 for (i = 0; i < num_phys_buf; i++) { 153 for (i = 0; i < num_phys_buf; i++) {
152 mr->mr.map[m]->segs[n].vaddr = 154 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
153 phys_to_virt(buffer_list[i].addr);
154 mr->mr.map[m]->segs[n].length = buffer_list[i].size; 155 mr->mr.map[m]->segs[n].length = buffer_list[i].size;
155 mr->mr.length += buffer_list[i].size; 156 mr->mr.length += buffer_list[i].size;
156 n++; 157 n++;
@@ -347,7 +348,7 @@ int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
347 n = 0; 348 n = 0;
348 ps = 1 << fmr->page_shift; 349 ps = 1 << fmr->page_shift;
349 for (i = 0; i < list_len; i++) { 350 for (i = 0; i < list_len; i++) {
350 fmr->mr.map[m]->segs[n].vaddr = phys_to_virt(page_list[i]); 351 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
351 fmr->mr.map[m]->segs[n].length = ps; 352 fmr->mr.map[m]->segs[n].length = ps;
352 if (++n == IPATH_SEGSZ) { 353 if (++n == IPATH_SEGSZ) {
353 m++; 354 m++;
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index 182de34f9f47..ffa6318ad0cc 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -215,7 +215,6 @@ static ssize_t store_mlid(struct device *dev,
215 size_t count) 215 size_t count)
216{ 216{
217 struct ipath_devdata *dd = dev_get_drvdata(dev); 217 struct ipath_devdata *dd = dev_get_drvdata(dev);
218 int unit;
219 u16 mlid; 218 u16 mlid;
220 int ret; 219 int ret;
221 220
@@ -223,8 +222,6 @@ static ssize_t store_mlid(struct device *dev,
223 if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE) 222 if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE)
224 goto invalid; 223 goto invalid;
225 224
226 unit = dd->ipath_unit;
227
228 dd->ipath_mlid = mlid; 225 dd->ipath_mlid = mlid;
229 226
230 goto bail; 227 goto bail;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index acdee33ee1f8..2aaacdb7e52a 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1599,6 +1599,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1599 dev->detach_mcast = ipath_multicast_detach; 1599 dev->detach_mcast = ipath_multicast_detach;
1600 dev->process_mad = ipath_process_mad; 1600 dev->process_mad = ipath_process_mad;
1601 dev->mmap = ipath_mmap; 1601 dev->mmap = ipath_mmap;
1602 dev->dma_ops = &ipath_dma_mapping_ops;
1602 1603
1603 snprintf(dev->node_desc, sizeof(dev->node_desc), 1604 snprintf(dev->node_desc, sizeof(dev->node_desc),
1604 IPATH_IDSTR " %s", init_utsname()->nodename); 1605 IPATH_IDSTR " %s", init_utsname()->nodename);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 8039f6e5f0c8..c0c8d5b24a7d 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -812,4 +812,6 @@ extern unsigned int ib_ipath_max_srq_wrs;
812 812
813extern const u32 ib_ipath_rnr_table[]; 813extern const u32 ib_ipath_rnr_table[];
814 814
815extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
816
815#endif /* IPATH_VERBS_H */ 817#endif /* IPATH_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 99547996aba2..07deee8f81ce 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -105,12 +105,12 @@ struct ipoib_mcast;
105 105
106struct ipoib_rx_buf { 106struct ipoib_rx_buf {
107 struct sk_buff *skb; 107 struct sk_buff *skb;
108 dma_addr_t mapping; 108 u64 mapping;
109}; 109};
110 110
111struct ipoib_tx_buf { 111struct ipoib_tx_buf {
112 struct sk_buff *skb; 112 struct sk_buff *skb;
113 DECLARE_PCI_UNMAP_ADDR(mapping) 113 u64 mapping;
114}; 114};
115 115
116/* 116/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f10fba5d3265..59d9594ed6d9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -109,9 +109,8 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
109 ret = ib_post_recv(priv->qp, &param, &bad_wr); 109 ret = ib_post_recv(priv->qp, &param, &bad_wr);
110 if (unlikely(ret)) { 110 if (unlikely(ret)) {
111 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); 111 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
112 dma_unmap_single(priv->ca->dma_device, 112 ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
113 priv->rx_ring[id].mapping, 113 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
114 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
115 dev_kfree_skb_any(priv->rx_ring[id].skb); 114 dev_kfree_skb_any(priv->rx_ring[id].skb);
116 priv->rx_ring[id].skb = NULL; 115 priv->rx_ring[id].skb = NULL;
117 } 116 }
@@ -123,7 +122,7 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
123{ 122{
124 struct ipoib_dev_priv *priv = netdev_priv(dev); 123 struct ipoib_dev_priv *priv = netdev_priv(dev);
125 struct sk_buff *skb; 124 struct sk_buff *skb;
126 dma_addr_t addr; 125 u64 addr;
127 126
128 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); 127 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
129 if (!skb) 128 if (!skb)
@@ -136,10 +135,9 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
136 */ 135 */
137 skb_reserve(skb, 4); 136 skb_reserve(skb, 4);
138 137
139 addr = dma_map_single(priv->ca->dma_device, 138 addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
140 skb->data, IPOIB_BUF_SIZE, 139 DMA_FROM_DEVICE);
141 DMA_FROM_DEVICE); 140 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
142 if (unlikely(dma_mapping_error(addr))) {
143 dev_kfree_skb_any(skb); 141 dev_kfree_skb_any(skb);
144 return -EIO; 142 return -EIO;
145 } 143 }
@@ -174,7 +172,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
174 struct ipoib_dev_priv *priv = netdev_priv(dev); 172 struct ipoib_dev_priv *priv = netdev_priv(dev);
175 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; 173 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
176 struct sk_buff *skb; 174 struct sk_buff *skb;
177 dma_addr_t addr; 175 u64 addr;
178 176
179 ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n", 177 ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n",
180 wr_id, wc->opcode, wc->status); 178 wr_id, wc->opcode, wc->status);
@@ -193,8 +191,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
193 ipoib_warn(priv, "failed recv event " 191 ipoib_warn(priv, "failed recv event "
194 "(status=%d, wrid=%d vend_err %x)\n", 192 "(status=%d, wrid=%d vend_err %x)\n",
195 wc->status, wr_id, wc->vendor_err); 193 wc->status, wr_id, wc->vendor_err);
196 dma_unmap_single(priv->ca->dma_device, addr, 194 ib_dma_unmap_single(priv->ca, addr,
197 IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 195 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
198 dev_kfree_skb_any(skb); 196 dev_kfree_skb_any(skb);
199 priv->rx_ring[wr_id].skb = NULL; 197 priv->rx_ring[wr_id].skb = NULL;
200 return; 198 return;
@@ -212,8 +210,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
212 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 210 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
213 wc->byte_len, wc->slid); 211 wc->byte_len, wc->slid);
214 212
215 dma_unmap_single(priv->ca->dma_device, addr, 213 ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
216 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
217 214
218 skb_put(skb, wc->byte_len); 215 skb_put(skb, wc->byte_len);
219 skb_pull(skb, IB_GRH_BYTES); 216 skb_pull(skb, IB_GRH_BYTES);
@@ -261,10 +258,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
261 258
262 tx_req = &priv->tx_ring[wr_id]; 259 tx_req = &priv->tx_ring[wr_id];
263 260
264 dma_unmap_single(priv->ca->dma_device, 261 ib_dma_unmap_single(priv->ca, tx_req->mapping,
265 pci_unmap_addr(tx_req, mapping), 262 tx_req->skb->len, DMA_TO_DEVICE);
266 tx_req->skb->len,
267 DMA_TO_DEVICE);
268 263
269 ++priv->stats.tx_packets; 264 ++priv->stats.tx_packets;
270 priv->stats.tx_bytes += tx_req->skb->len; 265 priv->stats.tx_bytes += tx_req->skb->len;
@@ -311,7 +306,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
311static inline int post_send(struct ipoib_dev_priv *priv, 306static inline int post_send(struct ipoib_dev_priv *priv,
312 unsigned int wr_id, 307 unsigned int wr_id,
313 struct ib_ah *address, u32 qpn, 308 struct ib_ah *address, u32 qpn,
314 dma_addr_t addr, int len) 309 u64 addr, int len)
315{ 310{
316 struct ib_send_wr *bad_wr; 311 struct ib_send_wr *bad_wr;
317 312
@@ -330,7 +325,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
330{ 325{
331 struct ipoib_dev_priv *priv = netdev_priv(dev); 326 struct ipoib_dev_priv *priv = netdev_priv(dev);
332 struct ipoib_tx_buf *tx_req; 327 struct ipoib_tx_buf *tx_req;
333 dma_addr_t addr; 328 u64 addr;
334 329
335 if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) { 330 if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) {
336 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 331 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -353,21 +348,20 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
353 */ 348 */
354 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; 349 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
355 tx_req->skb = skb; 350 tx_req->skb = skb;
356 addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len, 351 addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
357 DMA_TO_DEVICE); 352 DMA_TO_DEVICE);
358 if (unlikely(dma_mapping_error(addr))) { 353 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
359 ++priv->stats.tx_errors; 354 ++priv->stats.tx_errors;
360 dev_kfree_skb_any(skb); 355 dev_kfree_skb_any(skb);
361 return; 356 return;
362 } 357 }
363 pci_unmap_addr_set(tx_req, mapping, addr); 358 tx_req->mapping = addr;
364 359
365 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 360 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
366 address->ah, qpn, addr, skb->len))) { 361 address->ah, qpn, addr, skb->len))) {
367 ipoib_warn(priv, "post_send failed\n"); 362 ipoib_warn(priv, "post_send failed\n");
368 ++priv->stats.tx_errors; 363 ++priv->stats.tx_errors;
369 dma_unmap_single(priv->ca->dma_device, addr, skb->len, 364 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
370 DMA_TO_DEVICE);
371 dev_kfree_skb_any(skb); 365 dev_kfree_skb_any(skb);
372 } else { 366 } else {
373 dev->trans_start = jiffies; 367 dev->trans_start = jiffies;
@@ -538,24 +532,27 @@ int ipoib_ib_dev_stop(struct net_device *dev)
538 while ((int) priv->tx_tail - (int) priv->tx_head < 0) { 532 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
539 tx_req = &priv->tx_ring[priv->tx_tail & 533 tx_req = &priv->tx_ring[priv->tx_tail &
540 (ipoib_sendq_size - 1)]; 534 (ipoib_sendq_size - 1)];
541 dma_unmap_single(priv->ca->dma_device, 535 ib_dma_unmap_single(priv->ca,
542 pci_unmap_addr(tx_req, mapping), 536 tx_req->mapping,
543 tx_req->skb->len, 537 tx_req->skb->len,
544 DMA_TO_DEVICE); 538 DMA_TO_DEVICE);
545 dev_kfree_skb_any(tx_req->skb); 539 dev_kfree_skb_any(tx_req->skb);
546 ++priv->tx_tail; 540 ++priv->tx_tail;
547 } 541 }
548 542
549 for (i = 0; i < ipoib_recvq_size; ++i) 543 for (i = 0; i < ipoib_recvq_size; ++i) {
550 if (priv->rx_ring[i].skb) { 544 struct ipoib_rx_buf *rx_req;
551 dma_unmap_single(priv->ca->dma_device, 545
552 pci_unmap_addr(&priv->rx_ring[i], 546 rx_req = &priv->rx_ring[i];
553 mapping), 547 if (!rx_req->skb)
554 IPOIB_BUF_SIZE, 548 continue;
555 DMA_FROM_DEVICE); 549 ib_dma_unmap_single(priv->ca,
556 dev_kfree_skb_any(priv->rx_ring[i].skb); 550 rx_req->mapping,
557 priv->rx_ring[i].skb = NULL; 551 IPOIB_BUF_SIZE,
558 } 552 DMA_FROM_DEVICE);
553 dev_kfree_skb_any(rx_req->skb);
554 rx_req->skb = NULL;
555 }
559 556
560 goto timeout; 557 goto timeout;
561 } 558 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index c09280243726..705eb1d0e554 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -497,8 +497,6 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
497 return; 497 return;
498 } 498 }
499 499
500 skb_queue_head_init(&neigh->queue);
501
502 /* 500 /*
503 * We can only be called from ipoib_start_xmit, so we're 501 * We can only be called from ipoib_start_xmit, so we're
504 * inside tx_lock -- no need to save/restore flags. 502 * inside tx_lock -- no need to save/restore flags.
@@ -806,6 +804,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
806 804
807 neigh->neighbour = neighbour; 805 neigh->neighbour = neighbour;
808 *to_ipoib_neigh(neighbour) = neigh; 806 *to_ipoib_neigh(neighbour) = neigh;
807 skb_queue_head_init(&neigh->queue);
809 808
810 return neigh; 809 return neigh;
811} 810}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 234e5b061a75..cae8c96a55f8 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -182,7 +182,7 @@ struct iser_regd_buf {
182 struct iser_mem_reg reg; /* memory registration info */ 182 struct iser_mem_reg reg; /* memory registration info */
183 void *virt_addr; 183 void *virt_addr;
184 struct iser_device *device; /* device->device for dma_unmap */ 184 struct iser_device *device; /* device->device for dma_unmap */
185 dma_addr_t dma_addr; /* if non zero, addr for dma_unmap */ 185 u64 dma_addr; /* if non zero, addr for dma_unmap */
186 enum dma_data_direction direction; /* direction for dma_unmap */ 186 enum dma_data_direction direction; /* direction for dma_unmap */
187 unsigned int data_size; 187 unsigned int data_size;
188 atomic_t ref_count; /* refcount, freed when dec to 0 */ 188 atomic_t ref_count; /* refcount, freed when dec to 0 */
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 9b3d79c796c8..e73c87b9be43 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -487,10 +487,8 @@ int iser_send_control(struct iscsi_conn *conn,
487 struct iscsi_iser_conn *iser_conn = conn->dd_data; 487 struct iscsi_iser_conn *iser_conn = conn->dd_data;
488 struct iser_desc *mdesc = mtask->dd_data; 488 struct iser_desc *mdesc = mtask->dd_data;
489 struct iser_dto *send_dto = NULL; 489 struct iser_dto *send_dto = NULL;
490 unsigned int itt;
491 unsigned long data_seg_len; 490 unsigned long data_seg_len;
492 int err = 0; 491 int err = 0;
493 unsigned char opcode;
494 struct iser_regd_buf *regd_buf; 492 struct iser_regd_buf *regd_buf;
495 struct iser_device *device; 493 struct iser_device *device;
496 494
@@ -512,8 +510,6 @@ int iser_send_control(struct iscsi_conn *conn,
512 510
513 iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); 511 iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
514 512
515 itt = ntohl(mtask->hdr->itt);
516 opcode = mtask->hdr->opcode & ISCSI_OPCODE_MASK;
517 data_seg_len = ntoh24(mtask->hdr->dlength); 513 data_seg_len = ntoh24(mtask->hdr->dlength);
518 514
519 if (data_seg_len > 0) { 515 if (data_seg_len > 0) {
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 3aedd59b8a84..fc9f1fd0ae54 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -52,7 +52,7 @@
52 */ 52 */
53int iser_regd_buff_release(struct iser_regd_buf *regd_buf) 53int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
54{ 54{
55 struct device *dma_device; 55 struct ib_device *dev;
56 56
57 if ((atomic_read(&regd_buf->ref_count) == 0) || 57 if ((atomic_read(&regd_buf->ref_count) == 0) ||
58 atomic_dec_and_test(&regd_buf->ref_count)) { 58 atomic_dec_and_test(&regd_buf->ref_count)) {
@@ -61,8 +61,8 @@ int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
61 iser_unreg_mem(&regd_buf->reg); 61 iser_unreg_mem(&regd_buf->reg);
62 62
63 if (regd_buf->dma_addr) { 63 if (regd_buf->dma_addr) {
64 dma_device = regd_buf->device->ib_device->dma_device; 64 dev = regd_buf->device->ib_device;
65 dma_unmap_single(dma_device, 65 ib_dma_unmap_single(dev,
66 regd_buf->dma_addr, 66 regd_buf->dma_addr,
67 regd_buf->data_size, 67 regd_buf->data_size,
68 regd_buf->direction); 68 regd_buf->direction);
@@ -84,12 +84,12 @@ void iser_reg_single(struct iser_device *device,
84 struct iser_regd_buf *regd_buf, 84 struct iser_regd_buf *regd_buf,
85 enum dma_data_direction direction) 85 enum dma_data_direction direction)
86{ 86{
87 dma_addr_t dma_addr; 87 u64 dma_addr;
88 88
89 dma_addr = dma_map_single(device->ib_device->dma_device, 89 dma_addr = ib_dma_map_single(device->ib_device,
90 regd_buf->virt_addr, 90 regd_buf->virt_addr,
91 regd_buf->data_size, direction); 91 regd_buf->data_size, direction);
92 BUG_ON(dma_mapping_error(dma_addr)); 92 BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));
93 93
94 regd_buf->reg.lkey = device->mr->lkey; 94 regd_buf->reg.lkey = device->mr->lkey;
95 regd_buf->reg.len = regd_buf->data_size; 95 regd_buf->reg.len = regd_buf->data_size;
@@ -107,7 +107,7 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
107 enum iser_data_dir cmd_dir) 107 enum iser_data_dir cmd_dir)
108{ 108{
109 int dma_nents; 109 int dma_nents;
110 struct device *dma_device; 110 struct ib_device *dev;
111 char *mem = NULL; 111 char *mem = NULL;
112 struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; 112 struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
113 unsigned long cmd_data_len = data->data_len; 113 unsigned long cmd_data_len = data->data_len;
@@ -147,17 +147,12 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
147 147
148 iser_ctask->data_copy[cmd_dir].copy_buf = mem; 148 iser_ctask->data_copy[cmd_dir].copy_buf = mem;
149 149
150 dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 150 dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
151 151 dma_nents = ib_dma_map_sg(dev,
152 if (cmd_dir == ISER_DIR_OUT) 152 &iser_ctask->data_copy[cmd_dir].sg_single,
153 dma_nents = dma_map_sg(dma_device, 153 1,
154 &iser_ctask->data_copy[cmd_dir].sg_single, 154 (cmd_dir == ISER_DIR_OUT) ?
155 1, DMA_TO_DEVICE); 155 DMA_TO_DEVICE : DMA_FROM_DEVICE);
156 else
157 dma_nents = dma_map_sg(dma_device,
158 &iser_ctask->data_copy[cmd_dir].sg_single,
159 1, DMA_FROM_DEVICE);
160
161 BUG_ON(dma_nents == 0); 156 BUG_ON(dma_nents == 0);
162 157
163 iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; 158 iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
@@ -170,19 +165,16 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
170void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, 165void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
171 enum iser_data_dir cmd_dir) 166 enum iser_data_dir cmd_dir)
172{ 167{
173 struct device *dma_device; 168 struct ib_device *dev;
174 struct iser_data_buf *mem_copy; 169 struct iser_data_buf *mem_copy;
175 unsigned long cmd_data_len; 170 unsigned long cmd_data_len;
176 171
177 dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 172 dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
178 mem_copy = &iser_ctask->data_copy[cmd_dir]; 173 mem_copy = &iser_ctask->data_copy[cmd_dir];
179 174
180 if (cmd_dir == ISER_DIR_OUT) 175 ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
181 dma_unmap_sg(dma_device, &mem_copy->sg_single, 1, 176 (cmd_dir == ISER_DIR_OUT) ?
182 DMA_TO_DEVICE); 177 DMA_TO_DEVICE : DMA_FROM_DEVICE);
183 else
184 dma_unmap_sg(dma_device, &mem_copy->sg_single, 1,
185 DMA_FROM_DEVICE);
186 178
187 if (cmd_dir == ISER_DIR_IN) { 179 if (cmd_dir == ISER_DIR_IN) {
188 char *mem; 180 char *mem;
@@ -231,11 +223,12 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
231 * consecutive elements. Also, it handles one entry SG. 223 * consecutive elements. Also, it handles one entry SG.
232 */ 224 */
233static int iser_sg_to_page_vec(struct iser_data_buf *data, 225static int iser_sg_to_page_vec(struct iser_data_buf *data,
234 struct iser_page_vec *page_vec) 226 struct iser_page_vec *page_vec,
227 struct ib_device *ibdev)
235{ 228{
236 struct scatterlist *sg = (struct scatterlist *)data->buf; 229 struct scatterlist *sg = (struct scatterlist *)data->buf;
237 dma_addr_t first_addr, last_addr, page; 230 u64 first_addr, last_addr, page;
238 int start_aligned, end_aligned; 231 int end_aligned;
239 unsigned int cur_page = 0; 232 unsigned int cur_page = 0;
240 unsigned long total_sz = 0; 233 unsigned long total_sz = 0;
241 int i; 234 int i;
@@ -244,19 +237,21 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
244 page_vec->offset = (u64) sg[0].offset & ~MASK_4K; 237 page_vec->offset = (u64) sg[0].offset & ~MASK_4K;
245 238
246 for (i = 0; i < data->dma_nents; i++) { 239 for (i = 0; i < data->dma_nents; i++) {
247 total_sz += sg_dma_len(&sg[i]); 240 unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]);
241
242 total_sz += dma_len;
248 243
249 first_addr = sg_dma_address(&sg[i]); 244 first_addr = ib_sg_dma_address(ibdev, &sg[i]);
250 last_addr = first_addr + sg_dma_len(&sg[i]); 245 last_addr = first_addr + dma_len;
251 246
252 start_aligned = !(first_addr & ~MASK_4K);
253 end_aligned = !(last_addr & ~MASK_4K); 247 end_aligned = !(last_addr & ~MASK_4K);
254 248
255 /* continue to collect page fragments till aligned or SG ends */ 249 /* continue to collect page fragments till aligned or SG ends */
256 while (!end_aligned && (i + 1 < data->dma_nents)) { 250 while (!end_aligned && (i + 1 < data->dma_nents)) {
257 i++; 251 i++;
258 total_sz += sg_dma_len(&sg[i]); 252 dma_len = ib_sg_dma_len(ibdev, &sg[i]);
259 last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]); 253 total_sz += dma_len;
254 last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len;
260 end_aligned = !(last_addr & ~MASK_4K); 255 end_aligned = !(last_addr & ~MASK_4K);
261 } 256 }
262 257
@@ -288,10 +283,11 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
288 * the number of entries which are aligned correctly. Supports the case where 283 * the number of entries which are aligned correctly. Supports the case where
289 * consecutive SG elements are actually fragments of the same physcial page. 284 * consecutive SG elements are actually fragments of the same physcial page.
290 */ 285 */
291static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data) 286static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
287 struct ib_device *ibdev)
292{ 288{
293 struct scatterlist *sg; 289 struct scatterlist *sg;
294 dma_addr_t end_addr, next_addr; 290 u64 end_addr, next_addr;
295 int i, cnt; 291 int i, cnt;
296 unsigned int ret_len = 0; 292 unsigned int ret_len = 0;
297 293
@@ -303,12 +299,12 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
303 (unsigned long)page_to_phys(sg[i].page), 299 (unsigned long)page_to_phys(sg[i].page),
304 (unsigned long)sg[i].offset, 300 (unsigned long)sg[i].offset,
305 (unsigned long)sg[i].length); */ 301 (unsigned long)sg[i].length); */
306 end_addr = sg_dma_address(&sg[i]) + 302 end_addr = ib_sg_dma_address(ibdev, &sg[i]) +
307 sg_dma_len(&sg[i]); 303 ib_sg_dma_len(ibdev, &sg[i]);
308 /* iser_dbg("Checking sg iobuf end address " 304 /* iser_dbg("Checking sg iobuf end address "
309 "0x%08lX\n", end_addr); */ 305 "0x%08lX\n", end_addr); */
310 if (i + 1 < data->dma_nents) { 306 if (i + 1 < data->dma_nents) {
311 next_addr = sg_dma_address(&sg[i+1]); 307 next_addr = ib_sg_dma_address(ibdev, &sg[i+1]);
312 /* are i, i+1 fragments of the same page? */ 308 /* are i, i+1 fragments of the same page? */
313 if (end_addr == next_addr) 309 if (end_addr == next_addr)
314 continue; 310 continue;
@@ -325,7 +321,8 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
325 return ret_len; 321 return ret_len;
326} 322}
327 323
328static void iser_data_buf_dump(struct iser_data_buf *data) 324static void iser_data_buf_dump(struct iser_data_buf *data,
325 struct ib_device *ibdev)
329{ 326{
330 struct scatterlist *sg = (struct scatterlist *)data->buf; 327 struct scatterlist *sg = (struct scatterlist *)data->buf;
331 int i; 328 int i;
@@ -333,9 +330,9 @@ static void iser_data_buf_dump(struct iser_data_buf *data)
333 for (i = 0; i < data->dma_nents; i++) 330 for (i = 0; i < data->dma_nents; i++)
334 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 331 iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
335 "off:0x%x sz:0x%x dma_len:0x%x\n", 332 "off:0x%x sz:0x%x dma_len:0x%x\n",
336 i, (unsigned long)sg_dma_address(&sg[i]), 333 i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]),
337 sg[i].page, sg[i].offset, 334 sg[i].page, sg[i].offset,
338 sg[i].length,sg_dma_len(&sg[i])); 335 sg[i].length, ib_sg_dma_len(ibdev, &sg[i]));
339} 336}
340 337
341static void iser_dump_page_vec(struct iser_page_vec *page_vec) 338static void iser_dump_page_vec(struct iser_page_vec *page_vec)
@@ -349,7 +346,8 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
349} 346}
350 347
351static void iser_page_vec_build(struct iser_data_buf *data, 348static void iser_page_vec_build(struct iser_data_buf *data,
352 struct iser_page_vec *page_vec) 349 struct iser_page_vec *page_vec,
350 struct ib_device *ibdev)
353{ 351{
354 int page_vec_len = 0; 352 int page_vec_len = 0;
355 353
@@ -357,14 +355,14 @@ static void iser_page_vec_build(struct iser_data_buf *data,
357 page_vec->offset = 0; 355 page_vec->offset = 0;
358 356
359 iser_dbg("Translating sg sz: %d\n", data->dma_nents); 357 iser_dbg("Translating sg sz: %d\n", data->dma_nents);
360 page_vec_len = iser_sg_to_page_vec(data,page_vec); 358 page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev);
361 iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len); 359 iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len);
362 360
363 page_vec->length = page_vec_len; 361 page_vec->length = page_vec_len;
364 362
365 if (page_vec_len * SIZE_4K < page_vec->data_size) { 363 if (page_vec_len * SIZE_4K < page_vec->data_size) {
366 iser_err("page_vec too short to hold this SG\n"); 364 iser_err("page_vec too short to hold this SG\n");
367 iser_data_buf_dump(data); 365 iser_data_buf_dump(data, ibdev);
368 iser_dump_page_vec(page_vec); 366 iser_dump_page_vec(page_vec);
369 BUG(); 367 BUG();
370 } 368 }
@@ -375,13 +373,12 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
375 enum iser_data_dir iser_dir, 373 enum iser_data_dir iser_dir,
376 enum dma_data_direction dma_dir) 374 enum dma_data_direction dma_dir)
377{ 375{
378 struct device *dma_device; 376 struct ib_device *dev;
379 377
380 iser_ctask->dir[iser_dir] = 1; 378 iser_ctask->dir[iser_dir] = 1;
381 dma_device = 379 dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
382 iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
383 380
384 data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir); 381 data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
385 if (data->dma_nents == 0) { 382 if (data->dma_nents == 0) {
386 iser_err("dma_map_sg failed!!!\n"); 383 iser_err("dma_map_sg failed!!!\n");
387 return -EINVAL; 384 return -EINVAL;
@@ -391,20 +388,19 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
391 388
392void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) 389void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
393{ 390{
394 struct device *dma_device; 391 struct ib_device *dev;
395 struct iser_data_buf *data; 392 struct iser_data_buf *data;
396 393
397 dma_device = 394 dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
398 iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
399 395
400 if (iser_ctask->dir[ISER_DIR_IN]) { 396 if (iser_ctask->dir[ISER_DIR_IN]) {
401 data = &iser_ctask->data[ISER_DIR_IN]; 397 data = &iser_ctask->data[ISER_DIR_IN];
402 dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE); 398 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
403 } 399 }
404 400
405 if (iser_ctask->dir[ISER_DIR_OUT]) { 401 if (iser_ctask->dir[ISER_DIR_OUT]) {
406 data = &iser_ctask->data[ISER_DIR_OUT]; 402 data = &iser_ctask->data[ISER_DIR_OUT];
407 dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE); 403 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
408 } 404 }
409} 405}
410 406
@@ -419,6 +415,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
419{ 415{
420 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; 416 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
421 struct iser_device *device = ib_conn->device; 417 struct iser_device *device = ib_conn->device;
418 struct ib_device *ibdev = device->ib_device;
422 struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; 419 struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
423 struct iser_regd_buf *regd_buf; 420 struct iser_regd_buf *regd_buf;
424 int aligned_len; 421 int aligned_len;
@@ -428,11 +425,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
428 425
429 regd_buf = &iser_ctask->rdma_regd[cmd_dir]; 426 regd_buf = &iser_ctask->rdma_regd[cmd_dir];
430 427
431 aligned_len = iser_data_buf_aligned_len(mem); 428 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
432 if (aligned_len != mem->dma_nents) { 429 if (aligned_len != mem->dma_nents) {
433 iser_err("rdma alignment violation %d/%d aligned\n", 430 iser_err("rdma alignment violation %d/%d aligned\n",
434 aligned_len, mem->size); 431 aligned_len, mem->size);
435 iser_data_buf_dump(mem); 432 iser_data_buf_dump(mem, ibdev);
436 433
437 /* unmap the command data before accessing it */ 434 /* unmap the command data before accessing it */
438 iser_dma_unmap_task_data(iser_ctask); 435 iser_dma_unmap_task_data(iser_ctask);
@@ -450,8 +447,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
450 447
451 regd_buf->reg.lkey = device->mr->lkey; 448 regd_buf->reg.lkey = device->mr->lkey;
452 regd_buf->reg.rkey = device->mr->rkey; 449 regd_buf->reg.rkey = device->mr->rkey;
453 regd_buf->reg.len = sg_dma_len(&sg[0]); 450 regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
454 regd_buf->reg.va = sg_dma_address(&sg[0]); 451 regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
455 regd_buf->reg.is_fmr = 0; 452 regd_buf->reg.is_fmr = 0;
456 453
457 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " 454 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
@@ -461,10 +458,10 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
461 (unsigned long)regd_buf->reg.va, 458 (unsigned long)regd_buf->reg.va,
462 (unsigned long)regd_buf->reg.len); 459 (unsigned long)regd_buf->reg.len);
463 } else { /* use FMR for multiple dma entries */ 460 } else { /* use FMR for multiple dma entries */
464 iser_page_vec_build(mem, ib_conn->page_vec); 461 iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
465 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg); 462 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
466 if (err) { 463 if (err) {
467 iser_data_buf_dump(mem); 464 iser_data_buf_dump(mem, ibdev);
468 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, 465 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
469 ntoh24(iser_ctask->desc.iscsi_header.dlength)); 466 ntoh24(iser_ctask->desc.iscsi_header.dlength));
470 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", 467 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index a6289595557b..e9b6a6f07dd7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -122,9 +122,8 @@ static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
122 if (!iu->buf) 122 if (!iu->buf)
123 goto out_free_iu; 123 goto out_free_iu;
124 124
125 iu->dma = dma_map_single(host->dev->dev->dma_device, 125 iu->dma = ib_dma_map_single(host->dev->dev, iu->buf, size, direction);
126 iu->buf, size, direction); 126 if (ib_dma_mapping_error(host->dev->dev, iu->dma))
127 if (dma_mapping_error(iu->dma))
128 goto out_free_buf; 127 goto out_free_buf;
129 128
130 iu->size = size; 129 iu->size = size;
@@ -145,8 +144,7 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
145 if (!iu) 144 if (!iu)
146 return; 145 return;
147 146
148 dma_unmap_single(host->dev->dev->dma_device, 147 ib_dma_unmap_single(host->dev->dev, iu->dma, iu->size, iu->direction);
149 iu->dma, iu->size, iu->direction);
150 kfree(iu->buf); 148 kfree(iu->buf);
151 kfree(iu); 149 kfree(iu);
152} 150}
@@ -482,8 +480,8 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
482 scat = &req->fake_sg; 480 scat = &req->fake_sg;
483 } 481 }
484 482
485 dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents, 483 ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents,
486 scmnd->sc_data_direction); 484 scmnd->sc_data_direction);
487} 485}
488 486
489static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) 487static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
@@ -595,23 +593,26 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
595 int i, j; 593 int i, j;
596 int ret; 594 int ret;
597 struct srp_device *dev = target->srp_host->dev; 595 struct srp_device *dev = target->srp_host->dev;
596 struct ib_device *ibdev = dev->dev;
598 597
599 if (!dev->fmr_pool) 598 if (!dev->fmr_pool)
600 return -ENODEV; 599 return -ENODEV;
601 600
602 if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) && 601 if ((ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask) &&
603 mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) 602 mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3))
604 return -EINVAL; 603 return -EINVAL;
605 604
606 len = page_cnt = 0; 605 len = page_cnt = 0;
607 for (i = 0; i < sg_cnt; ++i) { 606 for (i = 0; i < sg_cnt; ++i) {
608 if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { 607 unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
608
609 if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) {
609 if (i > 0) 610 if (i > 0)
610 return -EINVAL; 611 return -EINVAL;
611 else 612 else
612 ++page_cnt; 613 ++page_cnt;
613 } 614 }
614 if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) & 615 if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) &
615 ~dev->fmr_page_mask) { 616 ~dev->fmr_page_mask) {
616 if (i < sg_cnt - 1) 617 if (i < sg_cnt - 1)
617 return -EINVAL; 618 return -EINVAL;
@@ -619,7 +620,7 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
619 ++page_cnt; 620 ++page_cnt;
620 } 621 }
621 622
622 len += sg_dma_len(&scat[i]); 623 len += dma_len;
623 } 624 }
624 625
625 page_cnt += len >> dev->fmr_page_shift; 626 page_cnt += len >> dev->fmr_page_shift;
@@ -631,10 +632,14 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
631 return -ENOMEM; 632 return -ENOMEM;
632 633
633 page_cnt = 0; 634 page_cnt = 0;
634 for (i = 0; i < sg_cnt; ++i) 635 for (i = 0; i < sg_cnt; ++i) {
635 for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size) 636 unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
637
638 for (j = 0; j < dma_len; j += dev->fmr_page_size)
636 dma_pages[page_cnt++] = 639 dma_pages[page_cnt++] =
637 (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j; 640 (ib_sg_dma_address(ibdev, &scat[i]) &
641 dev->fmr_page_mask) + j;
642 }
638 643
639 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, 644 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
640 dma_pages, page_cnt, io_addr); 645 dma_pages, page_cnt, io_addr);
@@ -644,7 +649,8 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
644 goto out; 649 goto out;
645 } 650 }
646 651
647 buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask); 652 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) &
653 ~dev->fmr_page_mask);
648 buf->key = cpu_to_be32(req->fmr->fmr->rkey); 654 buf->key = cpu_to_be32(req->fmr->fmr->rkey);
649 buf->len = cpu_to_be32(len); 655 buf->len = cpu_to_be32(len);
650 656
@@ -663,6 +669,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
663 struct srp_cmd *cmd = req->cmd->buf; 669 struct srp_cmd *cmd = req->cmd->buf;
664 int len, nents, count; 670 int len, nents, count;
665 u8 fmt = SRP_DATA_DESC_DIRECT; 671 u8 fmt = SRP_DATA_DESC_DIRECT;
672 struct srp_device *dev;
673 struct ib_device *ibdev;
666 674
667 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) 675 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
668 return sizeof (struct srp_cmd); 676 return sizeof (struct srp_cmd);
@@ -687,8 +695,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
687 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); 695 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
688 } 696 }
689 697
690 count = dma_map_sg(target->srp_host->dev->dev->dma_device, 698 dev = target->srp_host->dev;
691 scat, nents, scmnd->sc_data_direction); 699 ibdev = dev->dev;
700
701 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
692 702
693 fmt = SRP_DATA_DESC_DIRECT; 703 fmt = SRP_DATA_DESC_DIRECT;
694 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 704 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
@@ -702,9 +712,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
702 */ 712 */
703 struct srp_direct_buf *buf = (void *) cmd->add_data; 713 struct srp_direct_buf *buf = (void *) cmd->add_data;
704 714
705 buf->va = cpu_to_be64(sg_dma_address(scat)); 715 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
706 buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); 716 buf->key = cpu_to_be32(dev->mr->rkey);
707 buf->len = cpu_to_be32(sg_dma_len(scat)); 717 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
708 } else if (srp_map_fmr(target, scat, count, req, 718 } else if (srp_map_fmr(target, scat, count, req,
709 (void *) cmd->add_data)) { 719 (void *) cmd->add_data)) {
710 /* 720 /*
@@ -722,13 +732,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
722 count * sizeof (struct srp_direct_buf); 732 count * sizeof (struct srp_direct_buf);
723 733
724 for (i = 0; i < count; ++i) { 734 for (i = 0; i < count; ++i) {
735 unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
736
725 buf->desc_list[i].va = 737 buf->desc_list[i].va =
726 cpu_to_be64(sg_dma_address(&scat[i])); 738 cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i]));
727 buf->desc_list[i].key = 739 buf->desc_list[i].key =
728 cpu_to_be32(target->srp_host->dev->mr->rkey); 740 cpu_to_be32(dev->mr->rkey);
729 buf->desc_list[i].len = 741 buf->desc_list[i].len = cpu_to_be32(dma_len);
730 cpu_to_be32(sg_dma_len(&scat[i])); 742 datalen += dma_len;
731 datalen += sg_dma_len(&scat[i]);
732 } 743 }
733 744
734 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 745 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
@@ -808,13 +819,15 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
808 819
809static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 820static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
810{ 821{
822 struct ib_device *dev;
811 struct srp_iu *iu; 823 struct srp_iu *iu;
812 u8 opcode; 824 u8 opcode;
813 825
814 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 826 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
815 827
816 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, 828 dev = target->srp_host->dev->dev;
817 target->max_ti_iu_len, DMA_FROM_DEVICE); 829 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
830 DMA_FROM_DEVICE);
818 831
819 opcode = *(u8 *) iu->buf; 832 opcode = *(u8 *) iu->buf;
820 833
@@ -850,8 +863,8 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
850 break; 863 break;
851 } 864 }
852 865
853 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, 866 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
854 target->max_ti_iu_len, DMA_FROM_DEVICE); 867 DMA_FROM_DEVICE);
855} 868}
856 869
857static void srp_completion(struct ib_cq *cq, void *target_ptr) 870static void srp_completion(struct ib_cq *cq, void *target_ptr)
@@ -969,6 +982,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
969 struct srp_request *req; 982 struct srp_request *req;
970 struct srp_iu *iu; 983 struct srp_iu *iu;
971 struct srp_cmd *cmd; 984 struct srp_cmd *cmd;
985 struct ib_device *dev;
972 int len; 986 int len;
973 987
974 if (target->state == SRP_TARGET_CONNECTING) 988 if (target->state == SRP_TARGET_CONNECTING)
@@ -985,8 +999,9 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
985 if (!iu) 999 if (!iu)
986 goto err; 1000 goto err;
987 1001
988 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, 1002 dev = target->srp_host->dev->dev;
989 srp_max_iu_len, DMA_TO_DEVICE); 1003 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
1004 DMA_TO_DEVICE);
990 1005
991 req = list_entry(target->free_reqs.next, struct srp_request, list); 1006 req = list_entry(target->free_reqs.next, struct srp_request, list);
992 1007
@@ -1018,8 +1033,8 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
1018 goto err_unmap; 1033 goto err_unmap;
1019 } 1034 }
1020 1035
1021 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, 1036 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
1022 srp_max_iu_len, DMA_TO_DEVICE); 1037 DMA_TO_DEVICE);
1023 1038
1024 if (__srp_post_send(target, iu, len)) { 1039 if (__srp_post_send(target, iu, len)) {
1025 printk(KERN_ERR PFX "Send failed\n"); 1040 printk(KERN_ERR PFX "Send failed\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index d4e35ef51374..868a540ef7cd 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -161,7 +161,7 @@ struct srp_target_port {
161}; 161};
162 162
163struct srp_iu { 163struct srp_iu {
164 dma_addr_t dma; 164 u64 dma;
165 void *buf; 165 void *buf;
166 size_t size; 166 size_t size;
167 enum dma_data_direction direction; 167 enum dma_data_direction direction;
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c
index 54bc569db4b0..35461eab2faf 100644
--- a/drivers/input/keyboard/hilkbd.c
+++ b/drivers/input/keyboard/hilkbd.c
@@ -23,7 +23,12 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/hil.h> 25#include <linux/hil.h>
26#include <linux/io.h>
26#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <asm/irq.h>
29#ifdef CONFIG_HP300
30#include <asm/hwtest.h>
31#endif
27 32
28 33
29MODULE_AUTHOR("Philip Blundell, Matthew Wilcox, Helge Deller"); 34MODULE_AUTHOR("Philip Blundell, Matthew Wilcox, Helge Deller");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 29ca0ab0acb8..6b46c9bf1d20 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -146,7 +146,7 @@ config TOUCHSCREEN_TOUCHWIN
146 146
147config TOUCHSCREEN_UCB1400 147config TOUCHSCREEN_UCB1400
148 tristate "Philips UCB1400 touchscreen" 148 tristate "Philips UCB1400 touchscreen"
149 select SND_AC97_BUS 149 select AC97_BUS
150 help 150 help
151 This enables support for the Philips UCB1400 touchscreen interface. 151 This enables support for the Philips UCB1400 touchscreen interface.
152 The UCB1400 is an AC97 audio codec. The touchscreen interface 152 The UCB1400 is an AC97 audio codec. The touchscreen interface
diff --git a/drivers/isdn/act2000/act2000_isa.c b/drivers/isdn/act2000/act2000_isa.c
index 3cac23739344..09ea50dd3459 100644
--- a/drivers/isdn/act2000/act2000_isa.c
+++ b/drivers/isdn/act2000/act2000_isa.c
@@ -408,7 +408,7 @@ act2000_isa_download(act2000_card * card, act2000_ddef __user * cb)
408 p = cblock.buffer; 408 p = cblock.buffer;
409 if (!access_ok(VERIFY_READ, p, length)) 409 if (!access_ok(VERIFY_READ, p, length))
410 return -EFAULT; 410 return -EFAULT;
411 buf = (u_char *) kmalloc(1024, GFP_KERNEL); 411 buf = kmalloc(1024, GFP_KERNEL);
412 if (!buf) 412 if (!buf)
413 return -ENOMEM; 413 return -ENOMEM;
414 timeout = 0; 414 timeout = 0;
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 097bfa7bc323..c4d438c17dab 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -2013,7 +2013,7 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
2013 strcpy(card->name, id); 2013 strcpy(card->name, id);
2014 card->contrnr = contr; 2014 card->contrnr = contr;
2015 card->nbchan = profp->nbchannel; 2015 card->nbchan = profp->nbchannel;
2016 card->bchans = (capidrv_bchan *) kmalloc(sizeof(capidrv_bchan) * card->nbchan, GFP_ATOMIC); 2016 card->bchans = kmalloc(sizeof(capidrv_bchan) * card->nbchan, GFP_ATOMIC);
2017 if (!card->bchans) { 2017 if (!card->bchans) {
2018 printk(KERN_WARNING 2018 printk(KERN_WARNING
2019 "capidrv: (%s) Could not allocate bchan-structs.\n", id); 2019 "capidrv: (%s) Could not allocate bchan-structs.\n", id);
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 399b316111f7..06967da7c4a8 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -45,7 +45,7 @@ put_info_buffer(char *cp)
45 return; 45 return;
46 if (!*cp) 46 if (!*cp)
47 return; 47 return;
48 if (!(ib = (struct divert_info *) kmalloc(sizeof(struct divert_info) + strlen(cp), GFP_ATOMIC))) 48 if (!(ib = kmalloc(sizeof(struct divert_info) + strlen(cp), GFP_ATOMIC)))
49 return; /* no memory */ 49 return; /* no memory */
50 strcpy(ib->info_start, cp); /* set output string */ 50 strcpy(ib->info_start, cp); /* set output string */
51 ib->next = NULL; 51 ib->next = NULL;
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 03319ea5aa0c..7d97d54588d9 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -153,7 +153,7 @@ int cf_command(int drvid, int mode,
153 *ielenp = p - ielenp - 1; /* set total IE length */ 153 *ielenp = p - ielenp - 1; /* set total IE length */
154 154
155 /* allocate mem for information struct */ 155 /* allocate mem for information struct */
156 if (!(cs = (struct call_struc *) kmalloc(sizeof(struct call_struc), GFP_ATOMIC))) 156 if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
157 return(-ENOMEM); /* no memory */ 157 return(-ENOMEM); /* no memory */
158 init_timer(&cs->timer); 158 init_timer(&cs->timer);
159 cs->info[0] = '\0'; 159 cs->info[0] = '\0';
@@ -276,7 +276,7 @@ int insertrule(int idx, divert_rule *newrule)
276{ struct deflect_struc *ds,*ds1=NULL; 276{ struct deflect_struc *ds,*ds1=NULL;
277 unsigned long flags; 277 unsigned long flags;
278 278
279 if (!(ds = (struct deflect_struc *) kmalloc(sizeof(struct deflect_struc), 279 if (!(ds = kmalloc(sizeof(struct deflect_struc),
280 GFP_KERNEL))) 280 GFP_KERNEL)))
281 return(-ENOMEM); /* no memory */ 281 return(-ENOMEM); /* no memory */
282 282
@@ -451,7 +451,7 @@ static int isdn_divert_icall(isdn_ctrl *ic)
451 if (dv->rule.action == DEFLECT_PROCEED) 451 if (dv->rule.action == DEFLECT_PROCEED)
452 if ((!if_used) || ((!extern_wait_max) && (!dv->rule.waittime))) 452 if ((!if_used) || ((!extern_wait_max) && (!dv->rule.waittime)))
453 return(0); /* no external deflection needed */ 453 return(0); /* no external deflection needed */
454 if (!(cs = (struct call_struc *) kmalloc(sizeof(struct call_struc), GFP_ATOMIC))) 454 if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
455 return(0); /* no memory */ 455 return(0); /* no memory */
456 init_timer(&cs->timer); 456 init_timer(&cs->timer);
457 cs->info[0] = '\0'; 457 cs->info[0] = '\0';
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 63b629b1cdb2..b5e7f9c7d74e 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -1853,20 +1853,24 @@ static int gigaset_write_cmd(struct cardstate *cs,
1853{ 1853{
1854 struct cmdbuf_t *cb; 1854 struct cmdbuf_t *cb;
1855 unsigned long flags; 1855 unsigned long flags;
1856 int status; 1856 int rc;
1857 1857
1858 gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ? 1858 gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ?
1859 DEBUG_TRANSCMD : DEBUG_LOCKCMD, 1859 DEBUG_TRANSCMD : DEBUG_LOCKCMD,
1860 "CMD Transmit", len, buf); 1860 "CMD Transmit", len, buf);
1861 1861
1862 if (len <= 0) 1862 if (len <= 0) {
1863 return 0; /* nothing to do */ 1863 /* nothing to do */
1864 rc = 0;
1865 goto notqueued;
1866 }
1864 1867
1865 if (len > IF_WRITEBUF) 1868 if (len > IF_WRITEBUF)
1866 len = IF_WRITEBUF; 1869 len = IF_WRITEBUF;
1867 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { 1870 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
1868 dev_err(cs->dev, "%s: out of memory\n", __func__); 1871 dev_err(cs->dev, "%s: out of memory\n", __func__);
1869 return -ENOMEM; 1872 rc = -ENOMEM;
1873 goto notqueued;
1870 } 1874 }
1871 1875
1872 memcpy(cb->buf, buf, len); 1876 memcpy(cb->buf, buf, len);
@@ -1891,11 +1895,21 @@ static int gigaset_write_cmd(struct cardstate *cs,
1891 if (unlikely(!cs->connected)) { 1895 if (unlikely(!cs->connected)) {
1892 spin_unlock_irqrestore(&cs->lock, flags); 1896 spin_unlock_irqrestore(&cs->lock, flags);
1893 gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__); 1897 gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
1898 /* flush command queue */
1899 spin_lock_irqsave(&cs->cmdlock, flags);
1900 while (cs->cmdbuf != NULL)
1901 complete_cb(cs);
1902 spin_unlock_irqrestore(&cs->cmdlock, flags);
1894 return -ENODEV; 1903 return -ENODEV;
1895 } 1904 }
1896 status = start_cbsend(cs); 1905 rc = start_cbsend(cs);
1897 spin_unlock_irqrestore(&cs->lock, flags); 1906 spin_unlock_irqrestore(&cs->lock, flags);
1898 return status < 0 ? status : len; 1907 return rc < 0 ? rc : len;
1908
1909notqueued: /* request handled without queuing */
1910 if (wake_tasklet)
1911 tasklet_schedule(wake_tasklet);
1912 return rc;
1899} 1913}
1900 1914
1901/* gigaset_write_room 1915/* gigaset_write_room
@@ -1964,20 +1978,15 @@ static int gigaset_freebcshw(struct bc_state *bcs)
1964 1978
1965 /* kill URBs and tasklets before freeing - better safe than sorry */ 1979 /* kill URBs and tasklets before freeing - better safe than sorry */
1966 atomic_set(&ubc->running, 0); 1980 atomic_set(&ubc->running, 0);
1967 for (i = 0; i < BAS_OUTURBS; ++i) 1981 gig_dbg(DEBUG_INIT, "%s: killing iso URBs", __func__);
1968 if (ubc->isoouturbs[i].urb) { 1982 for (i = 0; i < BAS_OUTURBS; ++i) {
1969 gig_dbg(DEBUG_INIT, "%s: killing iso out URB %d", 1983 usb_kill_urb(ubc->isoouturbs[i].urb);
1970 __func__, i); 1984 usb_free_urb(ubc->isoouturbs[i].urb);
1971 usb_kill_urb(ubc->isoouturbs[i].urb); 1985 }
1972 usb_free_urb(ubc->isoouturbs[i].urb); 1986 for (i = 0; i < BAS_INURBS; ++i) {
1973 } 1987 usb_kill_urb(ubc->isoinurbs[i]);
1974 for (i = 0; i < BAS_INURBS; ++i) 1988 usb_free_urb(ubc->isoinurbs[i]);
1975 if (ubc->isoinurbs[i]) { 1989 }
1976 gig_dbg(DEBUG_INIT, "%s: killing iso in URB %d",
1977 __func__, i);
1978 usb_kill_urb(ubc->isoinurbs[i]);
1979 usb_free_urb(ubc->isoinurbs[i]);
1980 }
1981 tasklet_kill(&ubc->sent_tasklet); 1990 tasklet_kill(&ubc->sent_tasklet);
1982 tasklet_kill(&ubc->rcvd_tasklet); 1991 tasklet_kill(&ubc->rcvd_tasklet);
1983 kfree(ubc->isooutbuf); 1992 kfree(ubc->isooutbuf);
@@ -2099,55 +2108,32 @@ static void freeurbs(struct cardstate *cs)
2099 struct bas_bc_state *ubc; 2108 struct bas_bc_state *ubc;
2100 int i, j; 2109 int i, j;
2101 2110
2111 gig_dbg(DEBUG_INIT, "%s: killing URBs", __func__);
2102 for (j = 0; j < 2; ++j) { 2112 for (j = 0; j < 2; ++j) {
2103 ubc = cs->bcs[j].hw.bas; 2113 ubc = cs->bcs[j].hw.bas;
2104 for (i = 0; i < BAS_OUTURBS; ++i) 2114 for (i = 0; i < BAS_OUTURBS; ++i) {
2105 if (ubc->isoouturbs[i].urb) { 2115 usb_kill_urb(ubc->isoouturbs[i].urb);
2106 usb_kill_urb(ubc->isoouturbs[i].urb); 2116 usb_free_urb(ubc->isoouturbs[i].urb);
2107 gig_dbg(DEBUG_INIT, 2117 ubc->isoouturbs[i].urb = NULL;
2108 "%s: isoc output URB %d/%d unlinked", 2118 }
2109 __func__, j, i); 2119 for (i = 0; i < BAS_INURBS; ++i) {
2110 usb_free_urb(ubc->isoouturbs[i].urb); 2120 usb_kill_urb(ubc->isoinurbs[i]);
2111 ubc->isoouturbs[i].urb = NULL; 2121 usb_free_urb(ubc->isoinurbs[i]);
2112 } 2122 ubc->isoinurbs[i] = NULL;
2113 for (i = 0; i < BAS_INURBS; ++i) 2123 }
2114 if (ubc->isoinurbs[i]) {
2115 usb_kill_urb(ubc->isoinurbs[i]);
2116 gig_dbg(DEBUG_INIT,
2117 "%s: isoc input URB %d/%d unlinked",
2118 __func__, j, i);
2119 usb_free_urb(ubc->isoinurbs[i]);
2120 ubc->isoinurbs[i] = NULL;
2121 }
2122 }
2123 if (ucs->urb_int_in) {
2124 usb_kill_urb(ucs->urb_int_in);
2125 gig_dbg(DEBUG_INIT, "%s: interrupt input URB unlinked",
2126 __func__);
2127 usb_free_urb(ucs->urb_int_in);
2128 ucs->urb_int_in = NULL;
2129 }
2130 if (ucs->urb_cmd_out) {
2131 usb_kill_urb(ucs->urb_cmd_out);
2132 gig_dbg(DEBUG_INIT, "%s: command output URB unlinked",
2133 __func__);
2134 usb_free_urb(ucs->urb_cmd_out);
2135 ucs->urb_cmd_out = NULL;
2136 }
2137 if (ucs->urb_cmd_in) {
2138 usb_kill_urb(ucs->urb_cmd_in);
2139 gig_dbg(DEBUG_INIT, "%s: command input URB unlinked",
2140 __func__);
2141 usb_free_urb(ucs->urb_cmd_in);
2142 ucs->urb_cmd_in = NULL;
2143 }
2144 if (ucs->urb_ctrl) {
2145 usb_kill_urb(ucs->urb_ctrl);
2146 gig_dbg(DEBUG_INIT, "%s: control output URB unlinked",
2147 __func__);
2148 usb_free_urb(ucs->urb_ctrl);
2149 ucs->urb_ctrl = NULL;
2150 } 2124 }
2125 usb_kill_urb(ucs->urb_int_in);
2126 usb_free_urb(ucs->urb_int_in);
2127 ucs->urb_int_in = NULL;
2128 usb_kill_urb(ucs->urb_cmd_out);
2129 usb_free_urb(ucs->urb_cmd_out);
2130 ucs->urb_cmd_out = NULL;
2131 usb_kill_urb(ucs->urb_cmd_in);
2132 usb_free_urb(ucs->urb_cmd_in);
2133 ucs->urb_cmd_in = NULL;
2134 usb_kill_urb(ucs->urb_ctrl);
2135 usb_free_urb(ucs->urb_ctrl);
2136 ucs->urb_ctrl = NULL;
2151} 2137}
2152 2138
2153/* gigaset_probe 2139/* gigaset_probe
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig
index 6fa12cc8e4ff..34ab5f7dcabc 100644
--- a/drivers/isdn/hisax/Kconfig
+++ b/drivers/isdn/hisax/Kconfig
@@ -110,7 +110,7 @@ config HISAX_16_3
110 110
111config HISAX_TELESPCI 111config HISAX_TELESPCI
112 bool "Teles PCI" 112 bool "Teles PCI"
113 depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || FRV)) 113 depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
114 help 114 help
115 This enables HiSax support for the Teles PCI. 115 This enables HiSax support for the Teles PCI.
116 See <file:Documentation/isdn/README.HiSax> on how to configure it. 116 See <file:Documentation/isdn/README.HiSax> on how to configure it.
@@ -238,7 +238,7 @@ config HISAX_MIC
238 238
239config HISAX_NETJET 239config HISAX_NETJET
240 bool "NETjet card" 240 bool "NETjet card"
241 depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || FRV)) 241 depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
242 help 242 help
243 This enables HiSax support for the NetJet from Traverse 243 This enables HiSax support for the NetJet from Traverse
244 Technologies. 244 Technologies.
@@ -249,7 +249,7 @@ config HISAX_NETJET
249 249
250config HISAX_NETJET_U 250config HISAX_NETJET_U
251 bool "NETspider U card" 251 bool "NETspider U card"
252 depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || FRV)) 252 depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
253 help 253 help
254 This enables HiSax support for the Netspider U interface ISDN card 254 This enables HiSax support for the Netspider U interface ISDN card
255 from Traverse Technologies. 255 from Traverse Technologies.
@@ -317,7 +317,7 @@ config HISAX_GAZEL
317 317
318config HISAX_HFC_PCI 318config HISAX_HFC_PCI
319 bool "HFC PCI-Bus cards" 319 bool "HFC PCI-Bus cards"
320 depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || FRV)) 320 depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
321 help 321 help
322 This enables HiSax support for the HFC-S PCI 2BDS0 based cards. 322 This enables HiSax support for the HFC-S PCI 2BDS0 based cards.
323 323
@@ -344,7 +344,7 @@ config HISAX_HFC_SX
344 344
345config HISAX_ENTERNOW_PCI 345config HISAX_ENTERNOW_PCI
346 bool "Formula-n enter:now PCI card" 346 bool "Formula-n enter:now PCI card"
347 depends on HISAX_NETJET && PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || FRV)) 347 depends on HISAX_NETJET && PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
348 help 348 help
349 This enables HiSax support for the Formula-n enter:now PCI 349 This enables HiSax support for the Formula-n enter:now PCI
350 ISDN card. 350 ISDN card.
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c
index 8e2b03889f3c..94a935089119 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/isdn/hysdn/hysdn_procconf.c
@@ -275,7 +275,7 @@ hysdn_conf_open(struct inode *ino, struct file *filep)
275 } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) { 275 } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
276 /* read access -> output card info data */ 276 /* read access -> output card info data */
277 277
278 if (!(tmp = (char *) kmalloc(INFO_OUT_LEN * 2 + 2, GFP_KERNEL))) { 278 if (!(tmp = kmalloc(INFO_OUT_LEN * 2 + 2, GFP_KERNEL))) {
279 unlock_kernel(); 279 unlock_kernel();
280 return (-EFAULT); /* out of memory */ 280 return (-EFAULT); /* out of memory */
281 } 281 }
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index f241f5e551cb..375d956884d7 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -111,7 +111,7 @@ put_log_buffer(hysdn_card * card, char *cp)
111 if (pd->if_used <= 0) 111 if (pd->if_used <= 0)
112 return; /* no open file for read */ 112 return; /* no open file for read */
113 113
114 if (!(ib = (struct log_data *) kmalloc(sizeof(struct log_data) + strlen(cp), GFP_ATOMIC))) 114 if (!(ib = kmalloc(sizeof(struct log_data) + strlen(cp), GFP_ATOMIC)))
115 return; /* no memory */ 115 return; /* no memory */
116 strcpy(ib->log_start, cp); /* set output string */ 116 strcpy(ib->log_start, cp); /* set output string */
117 ib->next = NULL; 117 ib->next = NULL;
diff --git a/drivers/isdn/i4l/isdn_audio.c b/drivers/isdn/i4l/isdn_audio.c
index 2cc56d6a9fae..fb350c567c6b 100644
--- a/drivers/isdn/i4l/isdn_audio.c
+++ b/drivers/isdn/i4l/isdn_audio.c
@@ -328,7 +328,7 @@ adpcm_state *
328isdn_audio_adpcm_init(adpcm_state * s, int nbits) 328isdn_audio_adpcm_init(adpcm_state * s, int nbits)
329{ 329{
330 if (!s) 330 if (!s)
331 s = (adpcm_state *) kmalloc(sizeof(adpcm_state), GFP_ATOMIC); 331 s = kmalloc(sizeof(adpcm_state), GFP_ATOMIC);
332 if (s) { 332 if (s) {
333 s->a = 0; 333 s->a = 0;
334 s->d = 5; 334 s->d = 5;
@@ -343,7 +343,7 @@ dtmf_state *
343isdn_audio_dtmf_init(dtmf_state * s) 343isdn_audio_dtmf_init(dtmf_state * s)
344{ 344{
345 if (!s) 345 if (!s)
346 s = (dtmf_state *) kmalloc(sizeof(dtmf_state), GFP_ATOMIC); 346 s = kmalloc(sizeof(dtmf_state), GFP_ATOMIC);
347 if (s) { 347 if (s) {
348 s->idx = 0; 348 s->idx = 0;
349 s->last = ' '; 349 s->last = ' ';
@@ -621,7 +621,7 @@ silence_state *
621isdn_audio_silence_init(silence_state * s) 621isdn_audio_silence_init(silence_state * s)
622{ 622{
623 if (!s) 623 if (!s)
624 s = (silence_state *) kmalloc(sizeof(silence_state), GFP_ATOMIC); 624 s = kmalloc(sizeof(silence_state), GFP_ATOMIC);
625 if (s) { 625 if (s) {
626 s->idx = 0; 626 s->idx = 0;
627 s->state = 0; 627 s->state = 0;
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index c36c817578cb..838b3734e2b6 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2948,7 +2948,7 @@ isdn_net_addphone(isdn_net_ioctl_phone * phone)
2948 isdn_net_phone *n; 2948 isdn_net_phone *n;
2949 2949
2950 if (p) { 2950 if (p) {
2951 if (!(n = (isdn_net_phone *) kmalloc(sizeof(isdn_net_phone), GFP_KERNEL))) 2951 if (!(n = kmalloc(sizeof(isdn_net_phone), GFP_KERNEL)))
2952 return -ENOMEM; 2952 return -ENOMEM;
2953 strcpy(n->num, phone->phone); 2953 strcpy(n->num, phone->phone);
2954 n->next = p->local->phone[phone->outgoing & 1]; 2954 n->next = p->local->phone[phone->outgoing & 1];
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 43811795b46b..1726131b20be 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -717,7 +717,7 @@ isdn_ppp_fill_rq(unsigned char *buf, int len, int proto, int slot)
717 printk(KERN_DEBUG "ippp: device not activated.\n"); 717 printk(KERN_DEBUG "ippp: device not activated.\n");
718 return 0; 718 return 0;
719 } 719 }
720 nbuf = (unsigned char *) kmalloc(len + 4, GFP_ATOMIC); 720 nbuf = kmalloc(len + 4, GFP_ATOMIC);
721 if (!nbuf) { 721 if (!nbuf) {
722 printk(KERN_WARNING "ippp: Can't alloc buf\n"); 722 printk(KERN_WARNING "ippp: Can't alloc buf\n");
723 return 0; 723 return 0;
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c
index 6ff85574e941..eafcce5e656a 100644
--- a/drivers/isdn/pcbit/layer2.c
+++ b/drivers/isdn/pcbit/layer2.c
@@ -100,7 +100,7 @@ pcbit_l2_write(struct pcbit_dev *dev, ulong msg, ushort refnum,
100 dev_kfree_skb(skb); 100 dev_kfree_skb(skb);
101 return -1; 101 return -1;
102 } 102 }
103 if ((frame = (struct frame_buf *) kmalloc(sizeof(struct frame_buf), 103 if ((frame = kmalloc(sizeof(struct frame_buf),
104 GFP_ATOMIC)) == NULL) { 104 GFP_ATOMIC)) == NULL) {
105 printk(KERN_WARNING "pcbit_2_write: kmalloc failed\n"); 105 printk(KERN_WARNING "pcbit_2_write: kmalloc failed\n");
106 dev_kfree_skb(skb); 106 dev_kfree_skb(skb);
diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig
new file mode 100644
index 000000000000..703cc88d1ef9
--- /dev/null
+++ b/drivers/kvm/Kconfig
@@ -0,0 +1,37 @@
1#
2# KVM configuration
3#
4menu "Virtualization"
5
6config KVM
7 tristate "Kernel-based Virtual Machine (KVM) support"
8 depends on X86 && EXPERIMENTAL
9 ---help---
10 Support hosting fully virtualized guest machines using hardware
11 virtualization extensions. You will need a fairly recent
12 processor equipped with virtualization extensions. You will also
13 need to select one or more of the processor modules below.
14
15 This module provides access to the hardware capabilities through
16 a character device node named /dev/kvm.
17
18 To compile this as a module, choose M here: the module
19 will be called kvm.
20
21 If unsure, say N.
22
23config KVM_INTEL
24 tristate "KVM for Intel processors support"
25 depends on KVM
26 ---help---
27 Provides support for KVM on Intel processors equipped with the VT
28 extensions.
29
30config KVM_AMD
31 tristate "KVM for AMD processors support"
32 depends on KVM
33 ---help---
34 Provides support for KVM on AMD processors equipped with the AMD-V
35 (SVM) extensions.
36
37endmenu
diff --git a/drivers/kvm/Makefile b/drivers/kvm/Makefile
new file mode 100644
index 000000000000..c0a789fa9d65
--- /dev/null
+++ b/drivers/kvm/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for Kernel-based Virtual Machine module
3#
4
5kvm-objs := kvm_main.o mmu.o x86_emulate.o
6obj-$(CONFIG_KVM) += kvm.o
7kvm-intel-objs = vmx.o
8obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
9kvm-amd-objs = svm.o
10obj-$(CONFIG_KVM_AMD) += kvm-amd.o
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
new file mode 100644
index 000000000000..930e04ce1af6
--- /dev/null
+++ b/drivers/kvm/kvm.h
@@ -0,0 +1,551 @@
1#ifndef __KVM_H
2#define __KVM_H
3
4/*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/types.h>
10#include <linux/list.h>
11#include <linux/mutex.h>
12#include <linux/spinlock.h>
13#include <linux/mm.h>
14
15#include "vmx.h"
16#include <linux/kvm.h>
17
18#define CR0_PE_MASK (1ULL << 0)
19#define CR0_TS_MASK (1ULL << 3)
20#define CR0_NE_MASK (1ULL << 5)
21#define CR0_WP_MASK (1ULL << 16)
22#define CR0_NW_MASK (1ULL << 29)
23#define CR0_CD_MASK (1ULL << 30)
24#define CR0_PG_MASK (1ULL << 31)
25
26#define CR3_WPT_MASK (1ULL << 3)
27#define CR3_PCD_MASK (1ULL << 4)
28
29#define CR3_RESEVED_BITS 0x07ULL
30#define CR3_L_MODE_RESEVED_BITS (~((1ULL << 40) - 1) | 0x0fe7ULL)
31#define CR3_FLAGS_MASK ((1ULL << 5) - 1)
32
33#define CR4_VME_MASK (1ULL << 0)
34#define CR4_PSE_MASK (1ULL << 4)
35#define CR4_PAE_MASK (1ULL << 5)
36#define CR4_PGE_MASK (1ULL << 7)
37#define CR4_VMXE_MASK (1ULL << 13)
38
39#define KVM_GUEST_CR0_MASK \
40 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \
41 | CR0_NW_MASK | CR0_CD_MASK)
42#define KVM_VM_CR0_ALWAYS_ON \
43 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK)
44#define KVM_GUEST_CR4_MASK \
45 (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK)
46#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK)
47#define KVM_RMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK | CR4_VME_MASK)
48
49#define INVALID_PAGE (~(hpa_t)0)
50#define UNMAPPED_GVA (~(gpa_t)0)
51
52#define KVM_MAX_VCPUS 1
53#define KVM_MEMORY_SLOTS 4
54#define KVM_NUM_MMU_PAGES 256
55
56#define FX_IMAGE_SIZE 512
57#define FX_IMAGE_ALIGN 16
58#define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
59
60#define DE_VECTOR 0
61#define DF_VECTOR 8
62#define TS_VECTOR 10
63#define NP_VECTOR 11
64#define SS_VECTOR 12
65#define GP_VECTOR 13
66#define PF_VECTOR 14
67
68#define SELECTOR_TI_MASK (1 << 2)
69#define SELECTOR_RPL_MASK 0x03
70
71#define IOPL_SHIFT 12
72
73/*
74 * Address types:
75 *
76 * gva - guest virtual address
77 * gpa - guest physical address
78 * gfn - guest frame number
79 * hva - host virtual address
80 * hpa - host physical address
81 * hfn - host frame number
82 */
83
84typedef unsigned long gva_t;
85typedef u64 gpa_t;
86typedef unsigned long gfn_t;
87
88typedef unsigned long hva_t;
89typedef u64 hpa_t;
90typedef unsigned long hfn_t;
91
92struct kvm_mmu_page {
93 struct list_head link;
94 hpa_t page_hpa;
95 unsigned long slot_bitmap; /* One bit set per slot which has memory
96 * in this shadow page.
97 */
98 int global; /* Set if all ptes in this page are global */
99 u64 *parent_pte;
100};
101
102struct vmcs {
103 u32 revision_id;
104 u32 abort;
105 char data[0];
106};
107
108#define vmx_msr_entry kvm_msr_entry
109
110struct kvm_vcpu;
111
112/*
113 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
114 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
115 * mode.
116 */
117struct kvm_mmu {
118 void (*new_cr3)(struct kvm_vcpu *vcpu);
119 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
120 void (*inval_page)(struct kvm_vcpu *vcpu, gva_t gva);
121 void (*free)(struct kvm_vcpu *vcpu);
122 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
123 hpa_t root_hpa;
124 int root_level;
125 int shadow_root_level;
126};
127
128struct kvm_guest_debug {
129 int enabled;
130 unsigned long bp[4];
131 int singlestep;
132};
133
134enum {
135 VCPU_REGS_RAX = 0,
136 VCPU_REGS_RCX = 1,
137 VCPU_REGS_RDX = 2,
138 VCPU_REGS_RBX = 3,
139 VCPU_REGS_RSP = 4,
140 VCPU_REGS_RBP = 5,
141 VCPU_REGS_RSI = 6,
142 VCPU_REGS_RDI = 7,
143#ifdef CONFIG_X86_64
144 VCPU_REGS_R8 = 8,
145 VCPU_REGS_R9 = 9,
146 VCPU_REGS_R10 = 10,
147 VCPU_REGS_R11 = 11,
148 VCPU_REGS_R12 = 12,
149 VCPU_REGS_R13 = 13,
150 VCPU_REGS_R14 = 14,
151 VCPU_REGS_R15 = 15,
152#endif
153 NR_VCPU_REGS
154};
155
156enum {
157 VCPU_SREG_CS,
158 VCPU_SREG_DS,
159 VCPU_SREG_ES,
160 VCPU_SREG_FS,
161 VCPU_SREG_GS,
162 VCPU_SREG_SS,
163 VCPU_SREG_TR,
164 VCPU_SREG_LDTR,
165};
166
167struct kvm_vcpu {
168 struct kvm *kvm;
169 union {
170 struct vmcs *vmcs;
171 struct vcpu_svm *svm;
172 };
173 struct mutex mutex;
174 int cpu;
175 int launched;
176 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
177#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
178 unsigned long irq_pending[NR_IRQ_WORDS];
179 unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
180 unsigned long rip; /* needs vcpu_load_rsp_rip() */
181
182 unsigned long cr0;
183 unsigned long cr2;
184 unsigned long cr3;
185 unsigned long cr4;
186 unsigned long cr8;
187 u64 shadow_efer;
188 u64 apic_base;
189 int nmsrs;
190 struct vmx_msr_entry *guest_msrs;
191 struct vmx_msr_entry *host_msrs;
192
193 struct list_head free_pages;
194 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
195 struct kvm_mmu mmu;
196
197 struct kvm_guest_debug guest_debug;
198
199 char fx_buf[FX_BUF_SIZE];
200 char *host_fx_image;
201 char *guest_fx_image;
202
203 int mmio_needed;
204 int mmio_read_completed;
205 int mmio_is_write;
206 int mmio_size;
207 unsigned char mmio_data[8];
208 gpa_t mmio_phys_addr;
209
210 struct {
211 int active;
212 u8 save_iopl;
213 struct kvm_save_segment {
214 u16 selector;
215 unsigned long base;
216 u32 limit;
217 u32 ar;
218 } tr, es, ds, fs, gs;
219 } rmode;
220};
221
222struct kvm_memory_slot {
223 gfn_t base_gfn;
224 unsigned long npages;
225 unsigned long flags;
226 struct page **phys_mem;
227 unsigned long *dirty_bitmap;
228};
229
230struct kvm {
231 spinlock_t lock; /* protects everything except vcpus */
232 int nmemslots;
233 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
234 struct list_head active_mmu_pages;
235 struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
236 int memory_config_version;
237 int busy;
238};
239
240struct kvm_stat {
241 u32 pf_fixed;
242 u32 pf_guest;
243 u32 tlb_flush;
244 u32 invlpg;
245
246 u32 exits;
247 u32 io_exits;
248 u32 mmio_exits;
249 u32 signal_exits;
250 u32 irq_exits;
251};
252
253struct descriptor_table {
254 u16 limit;
255 unsigned long base;
256} __attribute__((packed));
257
258struct kvm_arch_ops {
259 int (*cpu_has_kvm_support)(void); /* __init */
260 int (*disabled_by_bios)(void); /* __init */
261 void (*hardware_enable)(void *dummy); /* __init */
262 void (*hardware_disable)(void *dummy);
263 int (*hardware_setup)(void); /* __init */
264 void (*hardware_unsetup)(void); /* __exit */
265
266 int (*vcpu_create)(struct kvm_vcpu *vcpu);
267 void (*vcpu_free)(struct kvm_vcpu *vcpu);
268
269 struct kvm_vcpu *(*vcpu_load)(struct kvm_vcpu *vcpu);
270 void (*vcpu_put)(struct kvm_vcpu *vcpu);
271
272 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
273 struct kvm_debug_guest *dbg);
274 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
275 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
276 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
277 void (*get_segment)(struct kvm_vcpu *vcpu,
278 struct kvm_segment *var, int seg);
279 void (*set_segment)(struct kvm_vcpu *vcpu,
280 struct kvm_segment *var, int seg);
281 int (*is_long_mode)(struct kvm_vcpu *vcpu);
282 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
283 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
284 void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu,
285 unsigned long cr0);
286 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
287 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
288 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
289 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
290 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
291 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
292 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
293 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
294 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
295 int *exception);
296 void (*cache_regs)(struct kvm_vcpu *vcpu);
297 void (*decache_regs)(struct kvm_vcpu *vcpu);
298 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
299 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
300
301 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t addr);
302 void (*tlb_flush)(struct kvm_vcpu *vcpu);
303 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
304 unsigned long addr, u32 err_code);
305
306 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
307
308 int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
309 int (*vcpu_setup)(struct kvm_vcpu *vcpu);
310 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
311};
312
313extern struct kvm_stat kvm_stat;
314extern struct kvm_arch_ops *kvm_arch_ops;
315
316#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
317#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
318
319int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module);
320void kvm_exit_arch(void);
321
322void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
323int kvm_mmu_init(struct kvm_vcpu *vcpu);
324
325int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
326void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
327
328hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
329#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
330#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
331static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
332hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
333
334void kvm_emulator_want_group7_invlpg(void);
335
336extern hpa_t bad_page_address;
337
338static inline struct page *gfn_to_page(struct kvm_memory_slot *slot, gfn_t gfn)
339{
340 return slot->phys_mem[gfn - slot->base_gfn];
341}
342
343struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
344void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
345
346enum emulation_result {
347 EMULATE_DONE, /* no further processing */
348 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
349 EMULATE_FAIL, /* can't emulate this instruction */
350};
351
352int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
353 unsigned long cr2, u16 error_code);
354void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
355void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
356void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
357 unsigned long *rflags);
358
359unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
360void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
361 unsigned long *rflags);
362
363struct x86_emulate_ctxt;
364
365int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
366int emulate_clts(struct kvm_vcpu *vcpu);
367int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
368 unsigned long *dest);
369int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
370 unsigned long value);
371
372void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
373void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
374void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
375void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
376void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
377
378#ifdef CONFIG_X86_64
379void set_efer(struct kvm_vcpu *vcpu, u64 efer);
380#endif
381
382void fx_init(struct kvm_vcpu *vcpu);
383
384void load_msrs(struct vmx_msr_entry *e, int n);
385void save_msrs(struct vmx_msr_entry *e, int n);
386void kvm_resched(struct kvm_vcpu *vcpu);
387
388int kvm_read_guest(struct kvm_vcpu *vcpu,
389 gva_t addr,
390 unsigned long size,
391 void *dest);
392
393int kvm_write_guest(struct kvm_vcpu *vcpu,
394 gva_t addr,
395 unsigned long size,
396 void *data);
397
398unsigned long segment_base(u16 selector);
399
400static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn)
401{
402 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
403 return (slot) ? slot->phys_mem[gfn - slot->base_gfn] : NULL;
404}
405
406static inline int is_pae(struct kvm_vcpu *vcpu)
407{
408 return vcpu->cr4 & CR4_PAE_MASK;
409}
410
411static inline int is_pse(struct kvm_vcpu *vcpu)
412{
413 return vcpu->cr4 & CR4_PSE_MASK;
414}
415
416static inline int is_paging(struct kvm_vcpu *vcpu)
417{
418 return vcpu->cr0 & CR0_PG_MASK;
419}
420
421static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
422{
423 return slot - kvm->memslots;
424}
425
426static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
427{
428 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
429
430 return (struct kvm_mmu_page *)page->private;
431}
432
433static inline u16 read_fs(void)
434{
435 u16 seg;
436 asm ("mov %%fs, %0" : "=g"(seg));
437 return seg;
438}
439
440static inline u16 read_gs(void)
441{
442 u16 seg;
443 asm ("mov %%gs, %0" : "=g"(seg));
444 return seg;
445}
446
447static inline u16 read_ldt(void)
448{
449 u16 ldt;
450 asm ("sldt %0" : "=g"(ldt));
451 return ldt;
452}
453
454static inline void load_fs(u16 sel)
455{
456 asm ("mov %0, %%fs" : : "rm"(sel));
457}
458
459static inline void load_gs(u16 sel)
460{
461 asm ("mov %0, %%gs" : : "rm"(sel));
462}
463
464#ifndef load_ldt
465static inline void load_ldt(u16 sel)
466{
467 asm ("lldt %0" : : "g"(sel));
468}
469#endif
470
471static inline void get_idt(struct descriptor_table *table)
472{
473 asm ("sidt %0" : "=m"(*table));
474}
475
476static inline void get_gdt(struct descriptor_table *table)
477{
478 asm ("sgdt %0" : "=m"(*table));
479}
480
481static inline unsigned long read_tr_base(void)
482{
483 u16 tr;
484 asm ("str %0" : "=g"(tr));
485 return segment_base(tr);
486}
487
488#ifdef CONFIG_X86_64
489static inline unsigned long read_msr(unsigned long msr)
490{
491 u64 value;
492
493 rdmsrl(msr, value);
494 return value;
495}
496#endif
497
498static inline void fx_save(void *image)
499{
500 asm ("fxsave (%0)":: "r" (image));
501}
502
503static inline void fx_restore(void *image)
504{
505 asm ("fxrstor (%0)":: "r" (image));
506}
507
508static inline void fpu_init(void)
509{
510 asm ("finit");
511}
512
513static inline u32 get_rdx_init_val(void)
514{
515 return 0x600; /* P6 family */
516}
517
518#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
519#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
520#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
521#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
522#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
523#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
524#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
525#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
526#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
527
528#define MSR_IA32_TIME_STAMP_COUNTER 0x010
529
530#define TSS_IOPB_BASE_OFFSET 0x66
531#define TSS_BASE_SIZE 0x68
532#define TSS_IOPB_SIZE (65536 / 8)
533#define TSS_REDIRECTION_SIZE (256 / 8)
534#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
535
536#ifdef CONFIG_X86_64
537
538/*
539 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. Therefore
540 * we need to allocate shadow page tables in the first 4GB of memory, which
541 * happens to fit the DMA32 zone.
542 */
543#define GFP_KVM_MMU (GFP_KERNEL | __GFP_DMA32)
544
545#else
546
547#define GFP_KVM_MMU GFP_KERNEL
548
549#endif
550
551#endif
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
new file mode 100644
index 000000000000..fd1bb870545c
--- /dev/null
+++ b/drivers/kvm/kvm_main.c
@@ -0,0 +1,1917 @@
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
19
20#include <linux/kvm.h>
21#include <linux/module.h>
22#include <linux/errno.h>
23#include <asm/processor.h>
24#include <linux/percpu.h>
25#include <linux/gfp.h>
26#include <asm/msr.h>
27#include <linux/mm.h>
28#include <linux/miscdevice.h>
29#include <linux/vmalloc.h>
30#include <asm/uaccess.h>
31#include <linux/reboot.h>
32#include <asm/io.h>
33#include <linux/debugfs.h>
34#include <linux/highmem.h>
35#include <linux/file.h>
36#include <asm/desc.h>
37
38#include "x86_emulate.h"
39#include "segment_descriptor.h"
40
41MODULE_AUTHOR("Qumranet");
42MODULE_LICENSE("GPL");
43
44struct kvm_arch_ops *kvm_arch_ops;
45struct kvm_stat kvm_stat;
46EXPORT_SYMBOL_GPL(kvm_stat);
47
48static struct kvm_stats_debugfs_item {
49 const char *name;
50 u32 *data;
51 struct dentry *dentry;
52} debugfs_entries[] = {
53 { "pf_fixed", &kvm_stat.pf_fixed },
54 { "pf_guest", &kvm_stat.pf_guest },
55 { "tlb_flush", &kvm_stat.tlb_flush },
56 { "invlpg", &kvm_stat.invlpg },
57 { "exits", &kvm_stat.exits },
58 { "io_exits", &kvm_stat.io_exits },
59 { "mmio_exits", &kvm_stat.mmio_exits },
60 { "signal_exits", &kvm_stat.signal_exits },
61 { "irq_exits", &kvm_stat.irq_exits },
62 { 0, 0 }
63};
64
65static struct dentry *debugfs_dir;
66
67#define MAX_IO_MSRS 256
68
69#define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
70#define LMSW_GUEST_MASK 0x0eULL
71#define CR4_RESEVED_BITS (~((1ULL << 11) - 1))
72#define CR8_RESEVED_BITS (~0x0fULL)
73#define EFER_RESERVED_BITS 0xfffffffffffff2fe
74
75#ifdef CONFIG_X86_64
76// LDT or TSS descriptor in the GDT. 16 bytes.
77struct segment_descriptor_64 {
78 struct segment_descriptor s;
79 u32 base_higher;
80 u32 pad_zero;
81};
82
83#endif
84
85unsigned long segment_base(u16 selector)
86{
87 struct descriptor_table gdt;
88 struct segment_descriptor *d;
89 unsigned long table_base;
90 typedef unsigned long ul;
91 unsigned long v;
92
93 if (selector == 0)
94 return 0;
95
96 asm ("sgdt %0" : "=m"(gdt));
97 table_base = gdt.base;
98
99 if (selector & 4) { /* from ldt */
100 u16 ldt_selector;
101
102 asm ("sldt %0" : "=g"(ldt_selector));
103 table_base = segment_base(ldt_selector);
104 }
105 d = (struct segment_descriptor *)(table_base + (selector & ~7));
106 v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
107#ifdef CONFIG_X86_64
108 if (d->system == 0
109 && (d->type == 2 || d->type == 9 || d->type == 11))
110 v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
111#endif
112 return v;
113}
114EXPORT_SYMBOL_GPL(segment_base);
115
116int kvm_read_guest(struct kvm_vcpu *vcpu,
117 gva_t addr,
118 unsigned long size,
119 void *dest)
120{
121 unsigned char *host_buf = dest;
122 unsigned long req_size = size;
123
124 while (size) {
125 hpa_t paddr;
126 unsigned now;
127 unsigned offset;
128 hva_t guest_buf;
129
130 paddr = gva_to_hpa(vcpu, addr);
131
132 if (is_error_hpa(paddr))
133 break;
134
135 guest_buf = (hva_t)kmap_atomic(
136 pfn_to_page(paddr >> PAGE_SHIFT),
137 KM_USER0);
138 offset = addr & ~PAGE_MASK;
139 guest_buf |= offset;
140 now = min(size, PAGE_SIZE - offset);
141 memcpy(host_buf, (void*)guest_buf, now);
142 host_buf += now;
143 addr += now;
144 size -= now;
145 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
146 }
147 return req_size - size;
148}
149EXPORT_SYMBOL_GPL(kvm_read_guest);
150
151int kvm_write_guest(struct kvm_vcpu *vcpu,
152 gva_t addr,
153 unsigned long size,
154 void *data)
155{
156 unsigned char *host_buf = data;
157 unsigned long req_size = size;
158
159 while (size) {
160 hpa_t paddr;
161 unsigned now;
162 unsigned offset;
163 hva_t guest_buf;
164
165 paddr = gva_to_hpa(vcpu, addr);
166
167 if (is_error_hpa(paddr))
168 break;
169
170 guest_buf = (hva_t)kmap_atomic(
171 pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0);
172 offset = addr & ~PAGE_MASK;
173 guest_buf |= offset;
174 now = min(size, PAGE_SIZE - offset);
175 memcpy((void*)guest_buf, host_buf, now);
176 host_buf += now;
177 addr += now;
178 size -= now;
179 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
180 }
181 return req_size - size;
182}
183EXPORT_SYMBOL_GPL(kvm_write_guest);
184
185static int vcpu_slot(struct kvm_vcpu *vcpu)
186{
187 return vcpu - vcpu->kvm->vcpus;
188}
189
190/*
191 * Switches to specified vcpu, until a matching vcpu_put()
192 */
193static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot)
194{
195 struct kvm_vcpu *vcpu = &kvm->vcpus[vcpu_slot];
196
197 mutex_lock(&vcpu->mutex);
198 if (unlikely(!vcpu->vmcs)) {
199 mutex_unlock(&vcpu->mutex);
200 return 0;
201 }
202 return kvm_arch_ops->vcpu_load(vcpu);
203}
204
205static void vcpu_put(struct kvm_vcpu *vcpu)
206{
207 kvm_arch_ops->vcpu_put(vcpu);
208 mutex_unlock(&vcpu->mutex);
209}
210
211static int kvm_dev_open(struct inode *inode, struct file *filp)
212{
213 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
214 int i;
215
216 if (!kvm)
217 return -ENOMEM;
218
219 spin_lock_init(&kvm->lock);
220 INIT_LIST_HEAD(&kvm->active_mmu_pages);
221 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
222 struct kvm_vcpu *vcpu = &kvm->vcpus[i];
223
224 mutex_init(&vcpu->mutex);
225 vcpu->mmu.root_hpa = INVALID_PAGE;
226 INIT_LIST_HEAD(&vcpu->free_pages);
227 }
228 filp->private_data = kvm;
229 return 0;
230}
231
232/*
233 * Free any memory in @free but not in @dont.
234 */
235static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
236 struct kvm_memory_slot *dont)
237{
238 int i;
239
240 if (!dont || free->phys_mem != dont->phys_mem)
241 if (free->phys_mem) {
242 for (i = 0; i < free->npages; ++i)
243 __free_page(free->phys_mem[i]);
244 vfree(free->phys_mem);
245 }
246
247 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
248 vfree(free->dirty_bitmap);
249
250 free->phys_mem = 0;
251 free->npages = 0;
252 free->dirty_bitmap = 0;
253}
254
255static void kvm_free_physmem(struct kvm *kvm)
256{
257 int i;
258
259 for (i = 0; i < kvm->nmemslots; ++i)
260 kvm_free_physmem_slot(&kvm->memslots[i], 0);
261}
262
263static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
264{
265 kvm_arch_ops->vcpu_free(vcpu);
266 kvm_mmu_destroy(vcpu);
267}
268
269static void kvm_free_vcpus(struct kvm *kvm)
270{
271 unsigned int i;
272
273 for (i = 0; i < KVM_MAX_VCPUS; ++i)
274 kvm_free_vcpu(&kvm->vcpus[i]);
275}
276
277static int kvm_dev_release(struct inode *inode, struct file *filp)
278{
279 struct kvm *kvm = filp->private_data;
280
281 kvm_free_vcpus(kvm);
282 kvm_free_physmem(kvm);
283 kfree(kvm);
284 return 0;
285}
286
287static void inject_gp(struct kvm_vcpu *vcpu)
288{
289 kvm_arch_ops->inject_gp(vcpu, 0);
290}
291
292static int pdptrs_have_reserved_bits_set(struct kvm_vcpu *vcpu,
293 unsigned long cr3)
294{
295 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
296 unsigned offset = (cr3 & (PAGE_SIZE-1)) >> 5;
297 int i;
298 u64 pdpte;
299 u64 *pdpt;
300 struct kvm_memory_slot *memslot;
301
302 spin_lock(&vcpu->kvm->lock);
303 memslot = gfn_to_memslot(vcpu->kvm, pdpt_gfn);
304 /* FIXME: !memslot - emulate? 0xff? */
305 pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0);
306
307 for (i = 0; i < 4; ++i) {
308 pdpte = pdpt[offset + i];
309 if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull))
310 break;
311 }
312
313 kunmap_atomic(pdpt, KM_USER0);
314 spin_unlock(&vcpu->kvm->lock);
315
316 return i != 4;
317}
318
319void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
320{
321 if (cr0 & CR0_RESEVED_BITS) {
322 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
323 cr0, vcpu->cr0);
324 inject_gp(vcpu);
325 return;
326 }
327
328 if ((cr0 & CR0_NW_MASK) && !(cr0 & CR0_CD_MASK)) {
329 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
330 inject_gp(vcpu);
331 return;
332 }
333
334 if ((cr0 & CR0_PG_MASK) && !(cr0 & CR0_PE_MASK)) {
335 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
336 "and a clear PE flag\n");
337 inject_gp(vcpu);
338 return;
339 }
340
341 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
342#ifdef CONFIG_X86_64
343 if ((vcpu->shadow_efer & EFER_LME)) {
344 int cs_db, cs_l;
345
346 if (!is_pae(vcpu)) {
347 printk(KERN_DEBUG "set_cr0: #GP, start paging "
348 "in long mode while PAE is disabled\n");
349 inject_gp(vcpu);
350 return;
351 }
352 kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
353 if (cs_l) {
354 printk(KERN_DEBUG "set_cr0: #GP, start paging "
355 "in long mode while CS.L == 1\n");
356 inject_gp(vcpu);
357 return;
358
359 }
360 } else
361#endif
362 if (is_pae(vcpu) &&
363 pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) {
364 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
365 "reserved bits\n");
366 inject_gp(vcpu);
367 return;
368 }
369
370 }
371
372 kvm_arch_ops->set_cr0(vcpu, cr0);
373 vcpu->cr0 = cr0;
374
375 spin_lock(&vcpu->kvm->lock);
376 kvm_mmu_reset_context(vcpu);
377 spin_unlock(&vcpu->kvm->lock);
378 return;
379}
380EXPORT_SYMBOL_GPL(set_cr0);
381
382void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
383{
384 set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
385}
386EXPORT_SYMBOL_GPL(lmsw);
387
388void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
389{
390 if (cr4 & CR4_RESEVED_BITS) {
391 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
392 inject_gp(vcpu);
393 return;
394 }
395
396 if (kvm_arch_ops->is_long_mode(vcpu)) {
397 if (!(cr4 & CR4_PAE_MASK)) {
398 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
399 "in long mode\n");
400 inject_gp(vcpu);
401 return;
402 }
403 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK)
404 && pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) {
405 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
406 inject_gp(vcpu);
407 }
408
409 if (cr4 & CR4_VMXE_MASK) {
410 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
411 inject_gp(vcpu);
412 return;
413 }
414 kvm_arch_ops->set_cr4(vcpu, cr4);
415 spin_lock(&vcpu->kvm->lock);
416 kvm_mmu_reset_context(vcpu);
417 spin_unlock(&vcpu->kvm->lock);
418}
419EXPORT_SYMBOL_GPL(set_cr4);
420
421void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
422{
423 if (kvm_arch_ops->is_long_mode(vcpu)) {
424 if ( cr3 & CR3_L_MODE_RESEVED_BITS) {
425 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
426 inject_gp(vcpu);
427 return;
428 }
429 } else {
430 if (cr3 & CR3_RESEVED_BITS) {
431 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
432 inject_gp(vcpu);
433 return;
434 }
435 if (is_paging(vcpu) && is_pae(vcpu) &&
436 pdptrs_have_reserved_bits_set(vcpu, cr3)) {
437 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
438 "reserved bits\n");
439 inject_gp(vcpu);
440 return;
441 }
442 }
443
444 vcpu->cr3 = cr3;
445 spin_lock(&vcpu->kvm->lock);
446 vcpu->mmu.new_cr3(vcpu);
447 spin_unlock(&vcpu->kvm->lock);
448}
449EXPORT_SYMBOL_GPL(set_cr3);
450
451void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
452{
453 if ( cr8 & CR8_RESEVED_BITS) {
454 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
455 inject_gp(vcpu);
456 return;
457 }
458 vcpu->cr8 = cr8;
459}
460EXPORT_SYMBOL_GPL(set_cr8);
461
462void fx_init(struct kvm_vcpu *vcpu)
463{
464 struct __attribute__ ((__packed__)) fx_image_s {
465 u16 control; //fcw
466 u16 status; //fsw
467 u16 tag; // ftw
468 u16 opcode; //fop
469 u64 ip; // fpu ip
470 u64 operand;// fpu dp
471 u32 mxcsr;
472 u32 mxcsr_mask;
473
474 } *fx_image;
475
476 fx_save(vcpu->host_fx_image);
477 fpu_init();
478 fx_save(vcpu->guest_fx_image);
479 fx_restore(vcpu->host_fx_image);
480
481 fx_image = (struct fx_image_s *)vcpu->guest_fx_image;
482 fx_image->mxcsr = 0x1f80;
483 memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
484 0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
485}
486EXPORT_SYMBOL_GPL(fx_init);
487
488/*
489 * Creates some virtual cpus. Good luck creating more than one.
490 */
491static int kvm_dev_ioctl_create_vcpu(struct kvm *kvm, int n)
492{
493 int r;
494 struct kvm_vcpu *vcpu;
495
496 r = -EINVAL;
497 if (n < 0 || n >= KVM_MAX_VCPUS)
498 goto out;
499
500 vcpu = &kvm->vcpus[n];
501
502 mutex_lock(&vcpu->mutex);
503
504 if (vcpu->vmcs) {
505 mutex_unlock(&vcpu->mutex);
506 return -EEXIST;
507 }
508
509 vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
510 FX_IMAGE_ALIGN);
511 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
512
513 vcpu->cpu = -1; /* First load will set up TR */
514 vcpu->kvm = kvm;
515 r = kvm_arch_ops->vcpu_create(vcpu);
516 if (r < 0)
517 goto out_free_vcpus;
518
519 kvm_arch_ops->vcpu_load(vcpu);
520
521 r = kvm_arch_ops->vcpu_setup(vcpu);
522 if (r >= 0)
523 r = kvm_mmu_init(vcpu);
524
525 vcpu_put(vcpu);
526
527 if (r < 0)
528 goto out_free_vcpus;
529
530 return 0;
531
532out_free_vcpus:
533 kvm_free_vcpu(vcpu);
534 mutex_unlock(&vcpu->mutex);
535out:
536 return r;
537}
538
539/*
540 * Allocate some memory and give it an address in the guest physical address
541 * space.
542 *
543 * Discontiguous memory is allowed, mostly for framebuffers.
544 */
545static int kvm_dev_ioctl_set_memory_region(struct kvm *kvm,
546 struct kvm_memory_region *mem)
547{
548 int r;
549 gfn_t base_gfn;
550 unsigned long npages;
551 unsigned long i;
552 struct kvm_memory_slot *memslot;
553 struct kvm_memory_slot old, new;
554 int memory_config_version;
555
556 r = -EINVAL;
557 /* General sanity checks */
558 if (mem->memory_size & (PAGE_SIZE - 1))
559 goto out;
560 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
561 goto out;
562 if (mem->slot >= KVM_MEMORY_SLOTS)
563 goto out;
564 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
565 goto out;
566
567 memslot = &kvm->memslots[mem->slot];
568 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
569 npages = mem->memory_size >> PAGE_SHIFT;
570
571 if (!npages)
572 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
573
574raced:
575 spin_lock(&kvm->lock);
576
577 memory_config_version = kvm->memory_config_version;
578 new = old = *memslot;
579
580 new.base_gfn = base_gfn;
581 new.npages = npages;
582 new.flags = mem->flags;
583
584 /* Disallow changing a memory slot's size. */
585 r = -EINVAL;
586 if (npages && old.npages && npages != old.npages)
587 goto out_unlock;
588
589 /* Check for overlaps */
590 r = -EEXIST;
591 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
592 struct kvm_memory_slot *s = &kvm->memslots[i];
593
594 if (s == memslot)
595 continue;
596 if (!((base_gfn + npages <= s->base_gfn) ||
597 (base_gfn >= s->base_gfn + s->npages)))
598 goto out_unlock;
599 }
600 /*
601 * Do memory allocations outside lock. memory_config_version will
602 * detect any races.
603 */
604 spin_unlock(&kvm->lock);
605
606 /* Deallocate if slot is being removed */
607 if (!npages)
608 new.phys_mem = 0;
609
610 /* Free page dirty bitmap if unneeded */
611 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
612 new.dirty_bitmap = 0;
613
614 r = -ENOMEM;
615
616 /* Allocate if a slot is being created */
617 if (npages && !new.phys_mem) {
618 new.phys_mem = vmalloc(npages * sizeof(struct page *));
619
620 if (!new.phys_mem)
621 goto out_free;
622
623 memset(new.phys_mem, 0, npages * sizeof(struct page *));
624 for (i = 0; i < npages; ++i) {
625 new.phys_mem[i] = alloc_page(GFP_HIGHUSER
626 | __GFP_ZERO);
627 if (!new.phys_mem[i])
628 goto out_free;
629 }
630 }
631
632 /* Allocate page dirty bitmap if needed */
633 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
634 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
635
636 new.dirty_bitmap = vmalloc(dirty_bytes);
637 if (!new.dirty_bitmap)
638 goto out_free;
639 memset(new.dirty_bitmap, 0, dirty_bytes);
640 }
641
642 spin_lock(&kvm->lock);
643
644 if (memory_config_version != kvm->memory_config_version) {
645 spin_unlock(&kvm->lock);
646 kvm_free_physmem_slot(&new, &old);
647 goto raced;
648 }
649
650 r = -EAGAIN;
651 if (kvm->busy)
652 goto out_unlock;
653
654 if (mem->slot >= kvm->nmemslots)
655 kvm->nmemslots = mem->slot + 1;
656
657 *memslot = new;
658 ++kvm->memory_config_version;
659
660 spin_unlock(&kvm->lock);
661
662 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
663 struct kvm_vcpu *vcpu;
664
665 vcpu = vcpu_load(kvm, i);
666 if (!vcpu)
667 continue;
668 kvm_mmu_reset_context(vcpu);
669 vcpu_put(vcpu);
670 }
671
672 kvm_free_physmem_slot(&old, &new);
673 return 0;
674
675out_unlock:
676 spin_unlock(&kvm->lock);
677out_free:
678 kvm_free_physmem_slot(&new, &old);
679out:
680 return r;
681}
682
683/*
684 * Get (and clear) the dirty memory log for a memory slot.
685 */
686static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
687 struct kvm_dirty_log *log)
688{
689 struct kvm_memory_slot *memslot;
690 int r, i;
691 int n;
692 unsigned long any = 0;
693
694 spin_lock(&kvm->lock);
695
696 /*
697 * Prevent changes to guest memory configuration even while the lock
698 * is not taken.
699 */
700 ++kvm->busy;
701 spin_unlock(&kvm->lock);
702 r = -EINVAL;
703 if (log->slot >= KVM_MEMORY_SLOTS)
704 goto out;
705
706 memslot = &kvm->memslots[log->slot];
707 r = -ENOENT;
708 if (!memslot->dirty_bitmap)
709 goto out;
710
711 n = ALIGN(memslot->npages, 8) / 8;
712
713 for (i = 0; !any && i < n; ++i)
714 any = memslot->dirty_bitmap[i];
715
716 r = -EFAULT;
717 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
718 goto out;
719
720
721 if (any) {
722 spin_lock(&kvm->lock);
723 kvm_mmu_slot_remove_write_access(kvm, log->slot);
724 spin_unlock(&kvm->lock);
725 memset(memslot->dirty_bitmap, 0, n);
726 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
727 struct kvm_vcpu *vcpu = vcpu_load(kvm, i);
728
729 if (!vcpu)
730 continue;
731 kvm_arch_ops->tlb_flush(vcpu);
732 vcpu_put(vcpu);
733 }
734 }
735
736 r = 0;
737
738out:
739 spin_lock(&kvm->lock);
740 --kvm->busy;
741 spin_unlock(&kvm->lock);
742 return r;
743}
744
745struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
746{
747 int i;
748
749 for (i = 0; i < kvm->nmemslots; ++i) {
750 struct kvm_memory_slot *memslot = &kvm->memslots[i];
751
752 if (gfn >= memslot->base_gfn
753 && gfn < memslot->base_gfn + memslot->npages)
754 return memslot;
755 }
756 return 0;
757}
758EXPORT_SYMBOL_GPL(gfn_to_memslot);
759
760void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
761{
762 int i;
763 struct kvm_memory_slot *memslot = 0;
764 unsigned long rel_gfn;
765
766 for (i = 0; i < kvm->nmemslots; ++i) {
767 memslot = &kvm->memslots[i];
768
769 if (gfn >= memslot->base_gfn
770 && gfn < memslot->base_gfn + memslot->npages) {
771
772 if (!memslot || !memslot->dirty_bitmap)
773 return;
774
775 rel_gfn = gfn - memslot->base_gfn;
776
777 /* avoid RMW */
778 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
779 set_bit(rel_gfn, memslot->dirty_bitmap);
780 return;
781 }
782 }
783}
784
785static int emulator_read_std(unsigned long addr,
786 unsigned long *val,
787 unsigned int bytes,
788 struct x86_emulate_ctxt *ctxt)
789{
790 struct kvm_vcpu *vcpu = ctxt->vcpu;
791 void *data = val;
792
793 while (bytes) {
794 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
795 unsigned offset = addr & (PAGE_SIZE-1);
796 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
797 unsigned long pfn;
798 struct kvm_memory_slot *memslot;
799 void *page;
800
801 if (gpa == UNMAPPED_GVA)
802 return X86EMUL_PROPAGATE_FAULT;
803 pfn = gpa >> PAGE_SHIFT;
804 memslot = gfn_to_memslot(vcpu->kvm, pfn);
805 if (!memslot)
806 return X86EMUL_UNHANDLEABLE;
807 page = kmap_atomic(gfn_to_page(memslot, pfn), KM_USER0);
808
809 memcpy(data, page + offset, tocopy);
810
811 kunmap_atomic(page, KM_USER0);
812
813 bytes -= tocopy;
814 data += tocopy;
815 addr += tocopy;
816 }
817
818 return X86EMUL_CONTINUE;
819}
820
821static int emulator_write_std(unsigned long addr,
822 unsigned long val,
823 unsigned int bytes,
824 struct x86_emulate_ctxt *ctxt)
825{
826 printk(KERN_ERR "emulator_write_std: addr %lx n %d\n",
827 addr, bytes);
828 return X86EMUL_UNHANDLEABLE;
829}
830
831static int emulator_read_emulated(unsigned long addr,
832 unsigned long *val,
833 unsigned int bytes,
834 struct x86_emulate_ctxt *ctxt)
835{
836 struct kvm_vcpu *vcpu = ctxt->vcpu;
837
838 if (vcpu->mmio_read_completed) {
839 memcpy(val, vcpu->mmio_data, bytes);
840 vcpu->mmio_read_completed = 0;
841 return X86EMUL_CONTINUE;
842 } else if (emulator_read_std(addr, val, bytes, ctxt)
843 == X86EMUL_CONTINUE)
844 return X86EMUL_CONTINUE;
845 else {
846 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
847 if (gpa == UNMAPPED_GVA)
848 return vcpu_printf(vcpu, "not present\n"), X86EMUL_PROPAGATE_FAULT;
849 vcpu->mmio_needed = 1;
850 vcpu->mmio_phys_addr = gpa;
851 vcpu->mmio_size = bytes;
852 vcpu->mmio_is_write = 0;
853
854 return X86EMUL_UNHANDLEABLE;
855 }
856}
857
858static int emulator_write_emulated(unsigned long addr,
859 unsigned long val,
860 unsigned int bytes,
861 struct x86_emulate_ctxt *ctxt)
862{
863 struct kvm_vcpu *vcpu = ctxt->vcpu;
864 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
865
866 if (gpa == UNMAPPED_GVA)
867 return X86EMUL_PROPAGATE_FAULT;
868
869 vcpu->mmio_needed = 1;
870 vcpu->mmio_phys_addr = gpa;
871 vcpu->mmio_size = bytes;
872 vcpu->mmio_is_write = 1;
873 memcpy(vcpu->mmio_data, &val, bytes);
874
875 return X86EMUL_CONTINUE;
876}
877
878static int emulator_cmpxchg_emulated(unsigned long addr,
879 unsigned long old,
880 unsigned long new,
881 unsigned int bytes,
882 struct x86_emulate_ctxt *ctxt)
883{
884 static int reported;
885
886 if (!reported) {
887 reported = 1;
888 printk(KERN_WARNING "kvm: emulating exchange as write\n");
889 }
890 return emulator_write_emulated(addr, new, bytes, ctxt);
891}
892
893static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
894{
895 return kvm_arch_ops->get_segment_base(vcpu, seg);
896}
897
898int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
899{
900 spin_lock(&vcpu->kvm->lock);
901 vcpu->mmu.inval_page(vcpu, address);
902 spin_unlock(&vcpu->kvm->lock);
903 kvm_arch_ops->invlpg(vcpu, address);
904 return X86EMUL_CONTINUE;
905}
906
907int emulate_clts(struct kvm_vcpu *vcpu)
908{
909 unsigned long cr0 = vcpu->cr0;
910
911 cr0 &= ~CR0_TS_MASK;
912 kvm_arch_ops->set_cr0(vcpu, cr0);
913 return X86EMUL_CONTINUE;
914}
915
916int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
917{
918 struct kvm_vcpu *vcpu = ctxt->vcpu;
919
920 switch (dr) {
921 case 0 ... 3:
922 *dest = kvm_arch_ops->get_dr(vcpu, dr);
923 return X86EMUL_CONTINUE;
924 default:
925 printk(KERN_DEBUG "%s: unexpected dr %u\n",
926 __FUNCTION__, dr);
927 return X86EMUL_UNHANDLEABLE;
928 }
929}
930
931int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
932{
933 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
934 int exception;
935
936 kvm_arch_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
937 if (exception) {
938 /* FIXME: better handling */
939 return X86EMUL_UNHANDLEABLE;
940 }
941 return X86EMUL_CONTINUE;
942}
943
944static void report_emulation_failure(struct x86_emulate_ctxt *ctxt)
945{
946 static int reported;
947 u8 opcodes[4];
948 unsigned long rip = ctxt->vcpu->rip;
949 unsigned long rip_linear;
950
951 rip_linear = rip + get_segment_base(ctxt->vcpu, VCPU_SREG_CS);
952
953 if (reported)
954 return;
955
956 emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt);
957
958 printk(KERN_ERR "emulation failed but !mmio_needed?"
959 " rip %lx %02x %02x %02x %02x\n",
960 rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
961 reported = 1;
962}
963
964struct x86_emulate_ops emulate_ops = {
965 .read_std = emulator_read_std,
966 .write_std = emulator_write_std,
967 .read_emulated = emulator_read_emulated,
968 .write_emulated = emulator_write_emulated,
969 .cmpxchg_emulated = emulator_cmpxchg_emulated,
970};
971
972int emulate_instruction(struct kvm_vcpu *vcpu,
973 struct kvm_run *run,
974 unsigned long cr2,
975 u16 error_code)
976{
977 struct x86_emulate_ctxt emulate_ctxt;
978 int r;
979 int cs_db, cs_l;
980
981 kvm_arch_ops->cache_regs(vcpu);
982
983 kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
984
985 emulate_ctxt.vcpu = vcpu;
986 emulate_ctxt.eflags = kvm_arch_ops->get_rflags(vcpu);
987 emulate_ctxt.cr2 = cr2;
988 emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
989 ? X86EMUL_MODE_REAL : cs_l
990 ? X86EMUL_MODE_PROT64 : cs_db
991 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
992
993 if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
994 emulate_ctxt.cs_base = 0;
995 emulate_ctxt.ds_base = 0;
996 emulate_ctxt.es_base = 0;
997 emulate_ctxt.ss_base = 0;
998 } else {
999 emulate_ctxt.cs_base = get_segment_base(vcpu, VCPU_SREG_CS);
1000 emulate_ctxt.ds_base = get_segment_base(vcpu, VCPU_SREG_DS);
1001 emulate_ctxt.es_base = get_segment_base(vcpu, VCPU_SREG_ES);
1002 emulate_ctxt.ss_base = get_segment_base(vcpu, VCPU_SREG_SS);
1003 }
1004
1005 emulate_ctxt.gs_base = get_segment_base(vcpu, VCPU_SREG_GS);
1006 emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
1007
1008 vcpu->mmio_is_write = 0;
1009 r = x86_emulate_memop(&emulate_ctxt, &emulate_ops);
1010
1011 if ((r || vcpu->mmio_is_write) && run) {
1012 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1013 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1014 run->mmio.len = vcpu->mmio_size;
1015 run->mmio.is_write = vcpu->mmio_is_write;
1016 }
1017
1018 if (r) {
1019 if (!vcpu->mmio_needed) {
1020 report_emulation_failure(&emulate_ctxt);
1021 return EMULATE_FAIL;
1022 }
1023 return EMULATE_DO_MMIO;
1024 }
1025
1026 kvm_arch_ops->decache_regs(vcpu);
1027 kvm_arch_ops->set_rflags(vcpu, emulate_ctxt.eflags);
1028
1029 if (vcpu->mmio_is_write)
1030 return EMULATE_DO_MMIO;
1031
1032 return EMULATE_DONE;
1033}
1034EXPORT_SYMBOL_GPL(emulate_instruction);
1035
1036static u64 mk_cr_64(u64 curr_cr, u32 new_val)
1037{
1038 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
1039}
1040
1041void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1042{
1043 struct descriptor_table dt = { limit, base };
1044
1045 kvm_arch_ops->set_gdt(vcpu, &dt);
1046}
1047
1048void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1049{
1050 struct descriptor_table dt = { limit, base };
1051
1052 kvm_arch_ops->set_idt(vcpu, &dt);
1053}
1054
1055void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1056 unsigned long *rflags)
1057{
1058 lmsw(vcpu, msw);
1059 *rflags = kvm_arch_ops->get_rflags(vcpu);
1060}
1061
1062unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1063{
1064 switch (cr) {
1065 case 0:
1066 return vcpu->cr0;
1067 case 2:
1068 return vcpu->cr2;
1069 case 3:
1070 return vcpu->cr3;
1071 case 4:
1072 return vcpu->cr4;
1073 default:
1074 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1075 return 0;
1076 }
1077}
1078
1079void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1080 unsigned long *rflags)
1081{
1082 switch (cr) {
1083 case 0:
1084 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
1085 *rflags = kvm_arch_ops->get_rflags(vcpu);
1086 break;
1087 case 2:
1088 vcpu->cr2 = val;
1089 break;
1090 case 3:
1091 set_cr3(vcpu, val);
1092 break;
1093 case 4:
1094 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
1095 break;
1096 default:
1097 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1098 }
1099}
1100
1101/*
1102 * Reads an msr value (of 'msr_index') into 'pdata'.
1103 * Returns 0 on success, non-0 otherwise.
1104 * Assumes vcpu_load() was already called.
1105 */
1106static int get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1107{
1108 return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
1109}
1110
1111#ifdef CONFIG_X86_64
1112
1113void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1114{
1115 if (efer & EFER_RESERVED_BITS) {
1116 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
1117 efer);
1118 inject_gp(vcpu);
1119 return;
1120 }
1121
1122 if (is_paging(vcpu)
1123 && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
1124 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
1125 inject_gp(vcpu);
1126 return;
1127 }
1128
1129 kvm_arch_ops->set_efer(vcpu, efer);
1130
1131 efer &= ~EFER_LMA;
1132 efer |= vcpu->shadow_efer & EFER_LMA;
1133
1134 vcpu->shadow_efer = efer;
1135}
1136EXPORT_SYMBOL_GPL(set_efer);
1137
1138#endif
1139
1140/*
1141 * Writes msr value into into the appropriate "register".
1142 * Returns 0 on success, non-0 otherwise.
1143 * Assumes vcpu_load() was already called.
1144 */
1145static int set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1146{
1147 return kvm_arch_ops->set_msr(vcpu, msr_index, data);
1148}
1149
1150void kvm_resched(struct kvm_vcpu *vcpu)
1151{
1152 vcpu_put(vcpu);
1153 cond_resched();
1154 /* Cannot fail - no vcpu unplug yet. */
1155 vcpu_load(vcpu->kvm, vcpu_slot(vcpu));
1156}
1157EXPORT_SYMBOL_GPL(kvm_resched);
1158
1159void load_msrs(struct vmx_msr_entry *e, int n)
1160{
1161 int i;
1162
1163 for (i = 0; i < n; ++i)
1164 wrmsrl(e[i].index, e[i].data);
1165}
1166EXPORT_SYMBOL_GPL(load_msrs);
1167
1168void save_msrs(struct vmx_msr_entry *e, int n)
1169{
1170 int i;
1171
1172 for (i = 0; i < n; ++i)
1173 rdmsrl(e[i].index, e[i].data);
1174}
1175EXPORT_SYMBOL_GPL(save_msrs);
1176
1177static int kvm_dev_ioctl_run(struct kvm *kvm, struct kvm_run *kvm_run)
1178{
1179 struct kvm_vcpu *vcpu;
1180 int r;
1181
1182 if (kvm_run->vcpu < 0 || kvm_run->vcpu >= KVM_MAX_VCPUS)
1183 return -EINVAL;
1184
1185 vcpu = vcpu_load(kvm, kvm_run->vcpu);
1186 if (!vcpu)
1187 return -ENOENT;
1188
1189 if (kvm_run->emulated) {
1190 kvm_arch_ops->skip_emulated_instruction(vcpu);
1191 kvm_run->emulated = 0;
1192 }
1193
1194 if (kvm_run->mmio_completed) {
1195 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
1196 vcpu->mmio_read_completed = 1;
1197 }
1198
1199 vcpu->mmio_needed = 0;
1200
1201 r = kvm_arch_ops->run(vcpu, kvm_run);
1202
1203 vcpu_put(vcpu);
1204 return r;
1205}
1206
1207static int kvm_dev_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs)
1208{
1209 struct kvm_vcpu *vcpu;
1210
1211 if (regs->vcpu < 0 || regs->vcpu >= KVM_MAX_VCPUS)
1212 return -EINVAL;
1213
1214 vcpu = vcpu_load(kvm, regs->vcpu);
1215 if (!vcpu)
1216 return -ENOENT;
1217
1218 kvm_arch_ops->cache_regs(vcpu);
1219
1220 regs->rax = vcpu->regs[VCPU_REGS_RAX];
1221 regs->rbx = vcpu->regs[VCPU_REGS_RBX];
1222 regs->rcx = vcpu->regs[VCPU_REGS_RCX];
1223 regs->rdx = vcpu->regs[VCPU_REGS_RDX];
1224 regs->rsi = vcpu->regs[VCPU_REGS_RSI];
1225 regs->rdi = vcpu->regs[VCPU_REGS_RDI];
1226 regs->rsp = vcpu->regs[VCPU_REGS_RSP];
1227 regs->rbp = vcpu->regs[VCPU_REGS_RBP];
1228#ifdef CONFIG_X86_64
1229 regs->r8 = vcpu->regs[VCPU_REGS_R8];
1230 regs->r9 = vcpu->regs[VCPU_REGS_R9];
1231 regs->r10 = vcpu->regs[VCPU_REGS_R10];
1232 regs->r11 = vcpu->regs[VCPU_REGS_R11];
1233 regs->r12 = vcpu->regs[VCPU_REGS_R12];
1234 regs->r13 = vcpu->regs[VCPU_REGS_R13];
1235 regs->r14 = vcpu->regs[VCPU_REGS_R14];
1236 regs->r15 = vcpu->regs[VCPU_REGS_R15];
1237#endif
1238
1239 regs->rip = vcpu->rip;
1240 regs->rflags = kvm_arch_ops->get_rflags(vcpu);
1241
1242 /*
1243 * Don't leak debug flags in case they were set for guest debugging
1244 */
1245 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
1246 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1247
1248 vcpu_put(vcpu);
1249
1250 return 0;
1251}
1252
1253static int kvm_dev_ioctl_set_regs(struct kvm *kvm, struct kvm_regs *regs)
1254{
1255 struct kvm_vcpu *vcpu;
1256
1257 if (regs->vcpu < 0 || regs->vcpu >= KVM_MAX_VCPUS)
1258 return -EINVAL;
1259
1260 vcpu = vcpu_load(kvm, regs->vcpu);
1261 if (!vcpu)
1262 return -ENOENT;
1263
1264 vcpu->regs[VCPU_REGS_RAX] = regs->rax;
1265 vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
1266 vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
1267 vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
1268 vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
1269 vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
1270 vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
1271 vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
1272#ifdef CONFIG_X86_64
1273 vcpu->regs[VCPU_REGS_R8] = regs->r8;
1274 vcpu->regs[VCPU_REGS_R9] = regs->r9;
1275 vcpu->regs[VCPU_REGS_R10] = regs->r10;
1276 vcpu->regs[VCPU_REGS_R11] = regs->r11;
1277 vcpu->regs[VCPU_REGS_R12] = regs->r12;
1278 vcpu->regs[VCPU_REGS_R13] = regs->r13;
1279 vcpu->regs[VCPU_REGS_R14] = regs->r14;
1280 vcpu->regs[VCPU_REGS_R15] = regs->r15;
1281#endif
1282
1283 vcpu->rip = regs->rip;
1284 kvm_arch_ops->set_rflags(vcpu, regs->rflags);
1285
1286 kvm_arch_ops->decache_regs(vcpu);
1287
1288 vcpu_put(vcpu);
1289
1290 return 0;
1291}
1292
1293static void get_segment(struct kvm_vcpu *vcpu,
1294 struct kvm_segment *var, int seg)
1295{
1296 return kvm_arch_ops->get_segment(vcpu, var, seg);
1297}
1298
1299static int kvm_dev_ioctl_get_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
1300{
1301 struct kvm_vcpu *vcpu;
1302 struct descriptor_table dt;
1303
1304 if (sregs->vcpu < 0 || sregs->vcpu >= KVM_MAX_VCPUS)
1305 return -EINVAL;
1306 vcpu = vcpu_load(kvm, sregs->vcpu);
1307 if (!vcpu)
1308 return -ENOENT;
1309
1310 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
1311 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
1312 get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
1313 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
1314 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
1315 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
1316
1317 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
1318 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
1319
1320 kvm_arch_ops->get_idt(vcpu, &dt);
1321 sregs->idt.limit = dt.limit;
1322 sregs->idt.base = dt.base;
1323 kvm_arch_ops->get_gdt(vcpu, &dt);
1324 sregs->gdt.limit = dt.limit;
1325 sregs->gdt.base = dt.base;
1326
1327 sregs->cr0 = vcpu->cr0;
1328 sregs->cr2 = vcpu->cr2;
1329 sregs->cr3 = vcpu->cr3;
1330 sregs->cr4 = vcpu->cr4;
1331 sregs->cr8 = vcpu->cr8;
1332 sregs->efer = vcpu->shadow_efer;
1333 sregs->apic_base = vcpu->apic_base;
1334
1335 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
1336 sizeof sregs->interrupt_bitmap);
1337
1338 vcpu_put(vcpu);
1339
1340 return 0;
1341}
1342
1343static void set_segment(struct kvm_vcpu *vcpu,
1344 struct kvm_segment *var, int seg)
1345{
1346 return kvm_arch_ops->set_segment(vcpu, var, seg);
1347}
1348
1349static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
1350{
1351 struct kvm_vcpu *vcpu;
1352 int mmu_reset_needed = 0;
1353 int i;
1354 struct descriptor_table dt;
1355
1356 if (sregs->vcpu < 0 || sregs->vcpu >= KVM_MAX_VCPUS)
1357 return -EINVAL;
1358 vcpu = vcpu_load(kvm, sregs->vcpu);
1359 if (!vcpu)
1360 return -ENOENT;
1361
1362 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
1363 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
1364 set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
1365 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
1366 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
1367 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
1368
1369 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
1370 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
1371
1372 dt.limit = sregs->idt.limit;
1373 dt.base = sregs->idt.base;
1374 kvm_arch_ops->set_idt(vcpu, &dt);
1375 dt.limit = sregs->gdt.limit;
1376 dt.base = sregs->gdt.base;
1377 kvm_arch_ops->set_gdt(vcpu, &dt);
1378
1379 vcpu->cr2 = sregs->cr2;
1380 mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
1381 vcpu->cr3 = sregs->cr3;
1382
1383 vcpu->cr8 = sregs->cr8;
1384
1385 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
1386#ifdef CONFIG_X86_64
1387 kvm_arch_ops->set_efer(vcpu, sregs->efer);
1388#endif
1389 vcpu->apic_base = sregs->apic_base;
1390
1391 mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
1392 kvm_arch_ops->set_cr0_no_modeswitch(vcpu, sregs->cr0);
1393
1394 mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
1395 kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
1396
1397 if (mmu_reset_needed)
1398 kvm_mmu_reset_context(vcpu);
1399
1400 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
1401 sizeof vcpu->irq_pending);
1402 vcpu->irq_summary = 0;
1403 for (i = 0; i < NR_IRQ_WORDS; ++i)
1404 if (vcpu->irq_pending[i])
1405 __set_bit(i, &vcpu->irq_summary);
1406
1407 vcpu_put(vcpu);
1408
1409 return 0;
1410}
1411
1412/*
1413 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
1414 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
1415 */
1416static u32 msrs_to_save[] = {
1417 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
1418 MSR_K6_STAR,
1419#ifdef CONFIG_X86_64
1420 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
1421#endif
1422 MSR_IA32_TIME_STAMP_COUNTER,
1423};
1424
1425
1426/*
1427 * Adapt set_msr() to msr_io()'s calling convention
1428 */
1429static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1430{
1431 return set_msr(vcpu, index, *data);
1432}
1433
1434/*
1435 * Read or write a bunch of msrs. All parameters are kernel addresses.
1436 *
1437 * @return number of msrs set successfully.
1438 */
1439static int __msr_io(struct kvm *kvm, struct kvm_msrs *msrs,
1440 struct kvm_msr_entry *entries,
1441 int (*do_msr)(struct kvm_vcpu *vcpu,
1442 unsigned index, u64 *data))
1443{
1444 struct kvm_vcpu *vcpu;
1445 int i;
1446
1447 if (msrs->vcpu < 0 || msrs->vcpu >= KVM_MAX_VCPUS)
1448 return -EINVAL;
1449
1450 vcpu = vcpu_load(kvm, msrs->vcpu);
1451 if (!vcpu)
1452 return -ENOENT;
1453
1454 for (i = 0; i < msrs->nmsrs; ++i)
1455 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1456 break;
1457
1458 vcpu_put(vcpu);
1459
1460 return i;
1461}
1462
1463/*
1464 * Read or write a bunch of msrs. Parameters are user addresses.
1465 *
1466 * @return number of msrs set successfully.
1467 */
1468static int msr_io(struct kvm *kvm, struct kvm_msrs __user *user_msrs,
1469 int (*do_msr)(struct kvm_vcpu *vcpu,
1470 unsigned index, u64 *data),
1471 int writeback)
1472{
1473 struct kvm_msrs msrs;
1474 struct kvm_msr_entry *entries;
1475 int r, n;
1476 unsigned size;
1477
1478 r = -EFAULT;
1479 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1480 goto out;
1481
1482 r = -E2BIG;
1483 if (msrs.nmsrs >= MAX_IO_MSRS)
1484 goto out;
1485
1486 r = -ENOMEM;
1487 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1488 entries = vmalloc(size);
1489 if (!entries)
1490 goto out;
1491
1492 r = -EFAULT;
1493 if (copy_from_user(entries, user_msrs->entries, size))
1494 goto out_free;
1495
1496 r = n = __msr_io(kvm, &msrs, entries, do_msr);
1497 if (r < 0)
1498 goto out_free;
1499
1500 r = -EFAULT;
1501 if (writeback && copy_to_user(user_msrs->entries, entries, size))
1502 goto out_free;
1503
1504 r = n;
1505
1506out_free:
1507 vfree(entries);
1508out:
1509 return r;
1510}
1511
1512/*
1513 * Translate a guest virtual address to a guest physical address.
1514 */
1515static int kvm_dev_ioctl_translate(struct kvm *kvm, struct kvm_translation *tr)
1516{
1517 unsigned long vaddr = tr->linear_address;
1518 struct kvm_vcpu *vcpu;
1519 gpa_t gpa;
1520
1521 vcpu = vcpu_load(kvm, tr->vcpu);
1522 if (!vcpu)
1523 return -ENOENT;
1524 spin_lock(&kvm->lock);
1525 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
1526 tr->physical_address = gpa;
1527 tr->valid = gpa != UNMAPPED_GVA;
1528 tr->writeable = 1;
1529 tr->usermode = 0;
1530 spin_unlock(&kvm->lock);
1531 vcpu_put(vcpu);
1532
1533 return 0;
1534}
1535
1536static int kvm_dev_ioctl_interrupt(struct kvm *kvm, struct kvm_interrupt *irq)
1537{
1538 struct kvm_vcpu *vcpu;
1539
1540 if (irq->vcpu < 0 || irq->vcpu >= KVM_MAX_VCPUS)
1541 return -EINVAL;
1542 if (irq->irq < 0 || irq->irq >= 256)
1543 return -EINVAL;
1544 vcpu = vcpu_load(kvm, irq->vcpu);
1545 if (!vcpu)
1546 return -ENOENT;
1547
1548 set_bit(irq->irq, vcpu->irq_pending);
1549 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
1550
1551 vcpu_put(vcpu);
1552
1553 return 0;
1554}
1555
1556static int kvm_dev_ioctl_debug_guest(struct kvm *kvm,
1557 struct kvm_debug_guest *dbg)
1558{
1559 struct kvm_vcpu *vcpu;
1560 int r;
1561
1562 if (dbg->vcpu < 0 || dbg->vcpu >= KVM_MAX_VCPUS)
1563 return -EINVAL;
1564 vcpu = vcpu_load(kvm, dbg->vcpu);
1565 if (!vcpu)
1566 return -ENOENT;
1567
1568 r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
1569
1570 vcpu_put(vcpu);
1571
1572 return r;
1573}
1574
1575static long kvm_dev_ioctl(struct file *filp,
1576 unsigned int ioctl, unsigned long arg)
1577{
1578 struct kvm *kvm = filp->private_data;
1579 int r = -EINVAL;
1580
1581 switch (ioctl) {
1582 case KVM_CREATE_VCPU: {
1583 r = kvm_dev_ioctl_create_vcpu(kvm, arg);
1584 if (r)
1585 goto out;
1586 break;
1587 }
1588 case KVM_RUN: {
1589 struct kvm_run kvm_run;
1590
1591 r = -EFAULT;
1592 if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run))
1593 goto out;
1594 r = kvm_dev_ioctl_run(kvm, &kvm_run);
1595 if (r < 0)
1596 goto out;
1597 r = -EFAULT;
1598 if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run))
1599 goto out;
1600 r = 0;
1601 break;
1602 }
1603 case KVM_GET_REGS: {
1604 struct kvm_regs kvm_regs;
1605
1606 r = -EFAULT;
1607 if (copy_from_user(&kvm_regs, (void *)arg, sizeof kvm_regs))
1608 goto out;
1609 r = kvm_dev_ioctl_get_regs(kvm, &kvm_regs);
1610 if (r)
1611 goto out;
1612 r = -EFAULT;
1613 if (copy_to_user((void *)arg, &kvm_regs, sizeof kvm_regs))
1614 goto out;
1615 r = 0;
1616 break;
1617 }
1618 case KVM_SET_REGS: {
1619 struct kvm_regs kvm_regs;
1620
1621 r = -EFAULT;
1622 if (copy_from_user(&kvm_regs, (void *)arg, sizeof kvm_regs))
1623 goto out;
1624 r = kvm_dev_ioctl_set_regs(kvm, &kvm_regs);
1625 if (r)
1626 goto out;
1627 r = 0;
1628 break;
1629 }
1630 case KVM_GET_SREGS: {
1631 struct kvm_sregs kvm_sregs;
1632
1633 r = -EFAULT;
1634 if (copy_from_user(&kvm_sregs, (void *)arg, sizeof kvm_sregs))
1635 goto out;
1636 r = kvm_dev_ioctl_get_sregs(kvm, &kvm_sregs);
1637 if (r)
1638 goto out;
1639 r = -EFAULT;
1640 if (copy_to_user((void *)arg, &kvm_sregs, sizeof kvm_sregs))
1641 goto out;
1642 r = 0;
1643 break;
1644 }
1645 case KVM_SET_SREGS: {
1646 struct kvm_sregs kvm_sregs;
1647
1648 r = -EFAULT;
1649 if (copy_from_user(&kvm_sregs, (void *)arg, sizeof kvm_sregs))
1650 goto out;
1651 r = kvm_dev_ioctl_set_sregs(kvm, &kvm_sregs);
1652 if (r)
1653 goto out;
1654 r = 0;
1655 break;
1656 }
1657 case KVM_TRANSLATE: {
1658 struct kvm_translation tr;
1659
1660 r = -EFAULT;
1661 if (copy_from_user(&tr, (void *)arg, sizeof tr))
1662 goto out;
1663 r = kvm_dev_ioctl_translate(kvm, &tr);
1664 if (r)
1665 goto out;
1666 r = -EFAULT;
1667 if (copy_to_user((void *)arg, &tr, sizeof tr))
1668 goto out;
1669 r = 0;
1670 break;
1671 }
1672 case KVM_INTERRUPT: {
1673 struct kvm_interrupt irq;
1674
1675 r = -EFAULT;
1676 if (copy_from_user(&irq, (void *)arg, sizeof irq))
1677 goto out;
1678 r = kvm_dev_ioctl_interrupt(kvm, &irq);
1679 if (r)
1680 goto out;
1681 r = 0;
1682 break;
1683 }
1684 case KVM_DEBUG_GUEST: {
1685 struct kvm_debug_guest dbg;
1686
1687 r = -EFAULT;
1688 if (copy_from_user(&dbg, (void *)arg, sizeof dbg))
1689 goto out;
1690 r = kvm_dev_ioctl_debug_guest(kvm, &dbg);
1691 if (r)
1692 goto out;
1693 r = 0;
1694 break;
1695 }
1696 case KVM_SET_MEMORY_REGION: {
1697 struct kvm_memory_region kvm_mem;
1698
1699 r = -EFAULT;
1700 if (copy_from_user(&kvm_mem, (void *)arg, sizeof kvm_mem))
1701 goto out;
1702 r = kvm_dev_ioctl_set_memory_region(kvm, &kvm_mem);
1703 if (r)
1704 goto out;
1705 break;
1706 }
1707 case KVM_GET_DIRTY_LOG: {
1708 struct kvm_dirty_log log;
1709
1710 r = -EFAULT;
1711 if (copy_from_user(&log, (void *)arg, sizeof log))
1712 goto out;
1713 r = kvm_dev_ioctl_get_dirty_log(kvm, &log);
1714 if (r)
1715 goto out;
1716 break;
1717 }
1718 case KVM_GET_MSRS:
1719 r = msr_io(kvm, (void __user *)arg, get_msr, 1);
1720 break;
1721 case KVM_SET_MSRS:
1722 r = msr_io(kvm, (void __user *)arg, do_set_msr, 0);
1723 break;
1724 case KVM_GET_MSR_INDEX_LIST: {
1725 struct kvm_msr_list __user *user_msr_list = (void __user *)arg;
1726 struct kvm_msr_list msr_list;
1727 unsigned n;
1728
1729 r = -EFAULT;
1730 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1731 goto out;
1732 n = msr_list.nmsrs;
1733 msr_list.nmsrs = ARRAY_SIZE(msrs_to_save);
1734 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1735 goto out;
1736 r = -E2BIG;
1737 if (n < ARRAY_SIZE(msrs_to_save))
1738 goto out;
1739 r = -EFAULT;
1740 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1741 sizeof msrs_to_save))
1742 goto out;
1743 r = 0;
1744 }
1745 default:
1746 ;
1747 }
1748out:
1749 return r;
1750}
1751
1752static struct page *kvm_dev_nopage(struct vm_area_struct *vma,
1753 unsigned long address,
1754 int *type)
1755{
1756 struct kvm *kvm = vma->vm_file->private_data;
1757 unsigned long pgoff;
1758 struct kvm_memory_slot *slot;
1759 struct page *page;
1760
1761 *type = VM_FAULT_MINOR;
1762 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1763 slot = gfn_to_memslot(kvm, pgoff);
1764 if (!slot)
1765 return NOPAGE_SIGBUS;
1766 page = gfn_to_page(slot, pgoff);
1767 if (!page)
1768 return NOPAGE_SIGBUS;
1769 get_page(page);
1770 return page;
1771}
1772
1773static struct vm_operations_struct kvm_dev_vm_ops = {
1774 .nopage = kvm_dev_nopage,
1775};
1776
1777static int kvm_dev_mmap(struct file *file, struct vm_area_struct *vma)
1778{
1779 vma->vm_ops = &kvm_dev_vm_ops;
1780 return 0;
1781}
1782
1783static struct file_operations kvm_chardev_ops = {
1784 .open = kvm_dev_open,
1785 .release = kvm_dev_release,
1786 .unlocked_ioctl = kvm_dev_ioctl,
1787 .compat_ioctl = kvm_dev_ioctl,
1788 .mmap = kvm_dev_mmap,
1789};
1790
1791static struct miscdevice kvm_dev = {
1792 MISC_DYNAMIC_MINOR,
1793 "kvm",
1794 &kvm_chardev_ops,
1795};
1796
1797static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
1798 void *v)
1799{
1800 if (val == SYS_RESTART) {
1801 /*
1802 * Some (well, at least mine) BIOSes hang on reboot if
1803 * in vmx root mode.
1804 */
1805 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1806 on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1);
1807 }
1808 return NOTIFY_OK;
1809}
1810
1811static struct notifier_block kvm_reboot_notifier = {
1812 .notifier_call = kvm_reboot,
1813 .priority = 0,
1814};
1815
1816static __init void kvm_init_debug(void)
1817{
1818 struct kvm_stats_debugfs_item *p;
1819
1820 debugfs_dir = debugfs_create_dir("kvm", 0);
1821 for (p = debugfs_entries; p->name; ++p)
1822 p->dentry = debugfs_create_u32(p->name, 0444, debugfs_dir,
1823 p->data);
1824}
1825
1826static void kvm_exit_debug(void)
1827{
1828 struct kvm_stats_debugfs_item *p;
1829
1830 for (p = debugfs_entries; p->name; ++p)
1831 debugfs_remove(p->dentry);
1832 debugfs_remove(debugfs_dir);
1833}
1834
1835hpa_t bad_page_address;
1836
1837int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
1838{
1839 int r;
1840
1841 kvm_arch_ops = ops;
1842
1843 if (!kvm_arch_ops->cpu_has_kvm_support()) {
1844 printk(KERN_ERR "kvm: no hardware support\n");
1845 return -EOPNOTSUPP;
1846 }
1847 if (kvm_arch_ops->disabled_by_bios()) {
1848 printk(KERN_ERR "kvm: disabled by bios\n");
1849 return -EOPNOTSUPP;
1850 }
1851
1852 r = kvm_arch_ops->hardware_setup();
1853 if (r < 0)
1854 return r;
1855
1856 on_each_cpu(kvm_arch_ops->hardware_enable, 0, 0, 1);
1857 register_reboot_notifier(&kvm_reboot_notifier);
1858
1859 kvm_chardev_ops.owner = module;
1860
1861 r = misc_register(&kvm_dev);
1862 if (r) {
1863 printk (KERN_ERR "kvm: misc device register failed\n");
1864 goto out_free;
1865 }
1866
1867 return r;
1868
1869out_free:
1870 unregister_reboot_notifier(&kvm_reboot_notifier);
1871 on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1);
1872 kvm_arch_ops->hardware_unsetup();
1873 return r;
1874}
1875
1876void kvm_exit_arch(void)
1877{
1878 misc_deregister(&kvm_dev);
1879
1880 unregister_reboot_notifier(&kvm_reboot_notifier);
1881 on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1);
1882 kvm_arch_ops->hardware_unsetup();
1883}
1884
1885static __init int kvm_init(void)
1886{
1887 static struct page *bad_page;
1888 int r = 0;
1889
1890 kvm_init_debug();
1891
1892 if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
1893 r = -ENOMEM;
1894 goto out;
1895 }
1896
1897 bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
1898 memset(__va(bad_page_address), 0, PAGE_SIZE);
1899
1900 return r;
1901
1902out:
1903 kvm_exit_debug();
1904 return r;
1905}
1906
1907static __exit void kvm_exit(void)
1908{
1909 kvm_exit_debug();
1910 __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
1911}
1912
1913module_init(kvm_init)
1914module_exit(kvm_exit)
1915
1916EXPORT_SYMBOL_GPL(kvm_init_arch);
1917EXPORT_SYMBOL_GPL(kvm_exit_arch);
diff --git a/drivers/kvm/kvm_svm.h b/drivers/kvm/kvm_svm.h
new file mode 100644
index 000000000000..74cc862f4935
--- /dev/null
+++ b/drivers/kvm/kvm_svm.h
@@ -0,0 +1,44 @@
1#ifndef __KVM_SVM_H
2#define __KVM_SVM_H
3
4#include <linux/types.h>
5#include <linux/list.h>
6#include <asm/msr.h>
7
8#include "svm.h"
9#include "kvm.h"
10
11static const u32 host_save_msrs[] = {
12#ifdef CONFIG_X86_64
13 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
14 MSR_FS_BASE, MSR_GS_BASE,
15#endif
16 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
17 MSR_IA32_DEBUGCTLMSR, /*MSR_IA32_LASTBRANCHFROMIP,
18 MSR_IA32_LASTBRANCHTOIP, MSR_IA32_LASTINTFROMIP,MSR_IA32_LASTINTTOIP,*/
19};
20
21#define NR_HOST_SAVE_MSRS (sizeof(host_save_msrs) / sizeof(*host_save_msrs))
22#define NUM_DB_REGS 4
23
24struct vcpu_svm {
25 struct vmcb *vmcb;
26 unsigned long vmcb_pa;
27 struct svm_cpu_data *svm_data;
28 uint64_t asid_generation;
29
30 unsigned long cr0;
31 unsigned long cr4;
32 unsigned long db_regs[NUM_DB_REGS];
33
34 u64 next_rip;
35
36 u64 host_msrs[NR_HOST_SAVE_MSRS];
37 unsigned long host_cr2;
38 unsigned long host_db_regs[NUM_DB_REGS];
39 unsigned long host_dr6;
40 unsigned long host_dr7;
41};
42
43#endif
44
diff --git a/drivers/kvm/kvm_vmx.h b/drivers/kvm/kvm_vmx.h
new file mode 100644
index 000000000000..d139f73fb6e1
--- /dev/null
+++ b/drivers/kvm/kvm_vmx.h
@@ -0,0 +1,14 @@
1#ifndef __KVM_VMX_H
2#define __KVM_VMX_H
3
4#ifdef CONFIG_X86_64
5/*
6 * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt
7 * mechanism (cpu bug AA24)
8 */
9#define NR_BAD_MSRS 2
10#else
11#define NR_BAD_MSRS 0
12#endif
13
14#endif
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
new file mode 100644
index 000000000000..3d367cbfe1f9
--- /dev/null
+++ b/drivers/kvm/mmu.c
@@ -0,0 +1,686 @@
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19#include <linux/types.h>
20#include <linux/string.h>
21#include <asm/page.h>
22#include <linux/mm.h>
23#include <linux/highmem.h>
24#include <linux/module.h>
25
26#include "vmx.h"
27#include "kvm.h"
28
29#define pgprintk(x...) do { } while (0)
30
31#define ASSERT(x) \
32 if (!(x)) { \
33 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
34 __FILE__, __LINE__, #x); \
35 }
36
37#define PT64_ENT_PER_PAGE 512
38#define PT32_ENT_PER_PAGE 1024
39
40#define PT_WRITABLE_SHIFT 1
41
42#define PT_PRESENT_MASK (1ULL << 0)
43#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
44#define PT_USER_MASK (1ULL << 2)
45#define PT_PWT_MASK (1ULL << 3)
46#define PT_PCD_MASK (1ULL << 4)
47#define PT_ACCESSED_MASK (1ULL << 5)
48#define PT_DIRTY_MASK (1ULL << 6)
49#define PT_PAGE_SIZE_MASK (1ULL << 7)
50#define PT_PAT_MASK (1ULL << 7)
51#define PT_GLOBAL_MASK (1ULL << 8)
52#define PT64_NX_MASK (1ULL << 63)
53
54#define PT_PAT_SHIFT 7
55#define PT_DIR_PAT_SHIFT 12
56#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
57
58#define PT32_DIR_PSE36_SIZE 4
59#define PT32_DIR_PSE36_SHIFT 13
60#define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
61
62
63#define PT32_PTE_COPY_MASK \
64 (PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK)
65
66#define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK)
67
68#define PT_FIRST_AVAIL_BITS_SHIFT 9
69#define PT64_SECOND_AVAIL_BITS_SHIFT 52
70
71#define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
72#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
73
74#define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
75#define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)
76
77#define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
78#define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))
79
80#define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)
81
82#define VALID_PAGE(x) ((x) != INVALID_PAGE)
83
84#define PT64_LEVEL_BITS 9
85
86#define PT64_LEVEL_SHIFT(level) \
87 ( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS )
88
89#define PT64_LEVEL_MASK(level) \
90 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
91
92#define PT64_INDEX(address, level)\
93 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
94
95
96#define PT32_LEVEL_BITS 10
97
98#define PT32_LEVEL_SHIFT(level) \
99 ( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS )
100
101#define PT32_LEVEL_MASK(level) \
102 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
103
104#define PT32_INDEX(address, level)\
105 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
106
107
108#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & PAGE_MASK)
109#define PT64_DIR_BASE_ADDR_MASK \
110 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
111
112#define PT32_BASE_ADDR_MASK PAGE_MASK
113#define PT32_DIR_BASE_ADDR_MASK \
114 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
115
116
117#define PFERR_PRESENT_MASK (1U << 0)
118#define PFERR_WRITE_MASK (1U << 1)
119#define PFERR_USER_MASK (1U << 2)
120
121#define PT64_ROOT_LEVEL 4
122#define PT32_ROOT_LEVEL 2
123#define PT32E_ROOT_LEVEL 3
124
125#define PT_DIRECTORY_LEVEL 2
126#define PT_PAGE_TABLE_LEVEL 1
127
128static int is_write_protection(struct kvm_vcpu *vcpu)
129{
130 return vcpu->cr0 & CR0_WP_MASK;
131}
132
133static int is_cpuid_PSE36(void)
134{
135 return 1;
136}
137
138static int is_present_pte(unsigned long pte)
139{
140 return pte & PT_PRESENT_MASK;
141}
142
143static int is_writeble_pte(unsigned long pte)
144{
145 return pte & PT_WRITABLE_MASK;
146}
147
148static int is_io_pte(unsigned long pte)
149{
150 return pte & PT_SHADOW_IO_MARK;
151}
152
153static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
154{
155 struct kvm_mmu_page *page_head = page_header(page_hpa);
156
157 list_del(&page_head->link);
158 page_head->page_hpa = page_hpa;
159 list_add(&page_head->link, &vcpu->free_pages);
160}
161
162static int is_empty_shadow_page(hpa_t page_hpa)
163{
164 u32 *pos;
165 u32 *end;
166 for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u32);
167 pos != end; pos++)
168 if (*pos != 0)
169 return 0;
170 return 1;
171}
172
173static hpa_t kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, u64 *parent_pte)
174{
175 struct kvm_mmu_page *page;
176
177 if (list_empty(&vcpu->free_pages))
178 return INVALID_PAGE;
179
180 page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
181 list_del(&page->link);
182 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
183 ASSERT(is_empty_shadow_page(page->page_hpa));
184 page->slot_bitmap = 0;
185 page->global = 1;
186 page->parent_pte = parent_pte;
187 return page->page_hpa;
188}
189
190static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
191{
192 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
193 struct kvm_mmu_page *page_head = page_header(__pa(pte));
194
195 __set_bit(slot, &page_head->slot_bitmap);
196}
197
198hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
199{
200 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
201
202 return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
203}
204
205hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
206{
207 struct kvm_memory_slot *slot;
208 struct page *page;
209
210 ASSERT((gpa & HPA_ERR_MASK) == 0);
211 slot = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
212 if (!slot)
213 return gpa | HPA_ERR_MASK;
214 page = gfn_to_page(slot, gpa >> PAGE_SHIFT);
215 return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
216 | (gpa & (PAGE_SIZE-1));
217}
218
219hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
220{
221 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
222
223 if (gpa == UNMAPPED_GVA)
224 return UNMAPPED_GVA;
225 return gpa_to_hpa(vcpu, gpa);
226}
227
228
229static void release_pt_page_64(struct kvm_vcpu *vcpu, hpa_t page_hpa,
230 int level)
231{
232 ASSERT(vcpu);
233 ASSERT(VALID_PAGE(page_hpa));
234 ASSERT(level <= PT64_ROOT_LEVEL && level > 0);
235
236 if (level == 1)
237 memset(__va(page_hpa), 0, PAGE_SIZE);
238 else {
239 u64 *pos;
240 u64 *end;
241
242 for (pos = __va(page_hpa), end = pos + PT64_ENT_PER_PAGE;
243 pos != end; pos++) {
244 u64 current_ent = *pos;
245
246 *pos = 0;
247 if (is_present_pte(current_ent))
248 release_pt_page_64(vcpu,
249 current_ent &
250 PT64_BASE_ADDR_MASK,
251 level - 1);
252 }
253 }
254 kvm_mmu_free_page(vcpu, page_hpa);
255}
256
257static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
258{
259}
260
261static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
262{
263 int level = PT32E_ROOT_LEVEL;
264 hpa_t table_addr = vcpu->mmu.root_hpa;
265
266 for (; ; level--) {
267 u32 index = PT64_INDEX(v, level);
268 u64 *table;
269
270 ASSERT(VALID_PAGE(table_addr));
271 table = __va(table_addr);
272
273 if (level == 1) {
274 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
275 page_header_update_slot(vcpu->kvm, table, v);
276 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
277 PT_USER_MASK;
278 return 0;
279 }
280
281 if (table[index] == 0) {
282 hpa_t new_table = kvm_mmu_alloc_page(vcpu,
283 &table[index]);
284
285 if (!VALID_PAGE(new_table)) {
286 pgprintk("nonpaging_map: ENOMEM\n");
287 return -ENOMEM;
288 }
289
290 if (level == PT32E_ROOT_LEVEL)
291 table[index] = new_table | PT_PRESENT_MASK;
292 else
293 table[index] = new_table | PT_PRESENT_MASK |
294 PT_WRITABLE_MASK | PT_USER_MASK;
295 }
296 table_addr = table[index] & PT64_BASE_ADDR_MASK;
297 }
298}
299
300static void nonpaging_flush(struct kvm_vcpu *vcpu)
301{
302 hpa_t root = vcpu->mmu.root_hpa;
303
304 ++kvm_stat.tlb_flush;
305 pgprintk("nonpaging_flush\n");
306 ASSERT(VALID_PAGE(root));
307 release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level);
308 root = kvm_mmu_alloc_page(vcpu, NULL);
309 ASSERT(VALID_PAGE(root));
310 vcpu->mmu.root_hpa = root;
311 if (is_paging(vcpu))
312 root |= (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK));
313 kvm_arch_ops->set_cr3(vcpu, root);
314 kvm_arch_ops->tlb_flush(vcpu);
315}
316
317static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
318{
319 return vaddr;
320}
321
322static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
323 u32 error_code)
324{
325 int ret;
326 gpa_t addr = gva;
327
328 ASSERT(vcpu);
329 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
330
331 for (;;) {
332 hpa_t paddr;
333
334 paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
335
336 if (is_error_hpa(paddr))
337 return 1;
338
339 ret = nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
340 if (ret) {
341 nonpaging_flush(vcpu);
342 continue;
343 }
344 break;
345 }
346 return ret;
347}
348
349static void nonpaging_inval_page(struct kvm_vcpu *vcpu, gva_t addr)
350{
351}
352
353static void nonpaging_free(struct kvm_vcpu *vcpu)
354{
355 hpa_t root;
356
357 ASSERT(vcpu);
358 root = vcpu->mmu.root_hpa;
359 if (VALID_PAGE(root))
360 release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level);
361 vcpu->mmu.root_hpa = INVALID_PAGE;
362}
363
364static int nonpaging_init_context(struct kvm_vcpu *vcpu)
365{
366 struct kvm_mmu *context = &vcpu->mmu;
367
368 context->new_cr3 = nonpaging_new_cr3;
369 context->page_fault = nonpaging_page_fault;
370 context->inval_page = nonpaging_inval_page;
371 context->gva_to_gpa = nonpaging_gva_to_gpa;
372 context->free = nonpaging_free;
373 context->root_level = PT32E_ROOT_LEVEL;
374 context->shadow_root_level = PT32E_ROOT_LEVEL;
375 context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL);
376 ASSERT(VALID_PAGE(context->root_hpa));
377 kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
378 return 0;
379}
380
381
382static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
383{
384 struct kvm_mmu_page *page, *npage;
385
386 list_for_each_entry_safe(page, npage, &vcpu->kvm->active_mmu_pages,
387 link) {
388 if (page->global)
389 continue;
390
391 if (!page->parent_pte)
392 continue;
393
394 *page->parent_pte = 0;
395 release_pt_page_64(vcpu, page->page_hpa, 1);
396 }
397 ++kvm_stat.tlb_flush;
398 kvm_arch_ops->tlb_flush(vcpu);
399}
400
401static void paging_new_cr3(struct kvm_vcpu *vcpu)
402{
403 kvm_mmu_flush_tlb(vcpu);
404}
405
406static void mark_pagetable_nonglobal(void *shadow_pte)
407{
408 page_header(__pa(shadow_pte))->global = 0;
409}
410
411static inline void set_pte_common(struct kvm_vcpu *vcpu,
412 u64 *shadow_pte,
413 gpa_t gaddr,
414 int dirty,
415 u64 access_bits)
416{
417 hpa_t paddr;
418
419 *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
420 if (!dirty)
421 access_bits &= ~PT_WRITABLE_MASK;
422
423 if (access_bits & PT_WRITABLE_MASK)
424 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
425
426 *shadow_pte |= access_bits;
427
428 paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
429
430 if (!(*shadow_pte & PT_GLOBAL_MASK))
431 mark_pagetable_nonglobal(shadow_pte);
432
433 if (is_error_hpa(paddr)) {
434 *shadow_pte |= gaddr;
435 *shadow_pte |= PT_SHADOW_IO_MARK;
436 *shadow_pte &= ~PT_PRESENT_MASK;
437 } else {
438 *shadow_pte |= paddr;
439 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
440 }
441}
442
443static void inject_page_fault(struct kvm_vcpu *vcpu,
444 u64 addr,
445 u32 err_code)
446{
447 kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
448}
449
450static inline int fix_read_pf(u64 *shadow_ent)
451{
452 if ((*shadow_ent & PT_SHADOW_USER_MASK) &&
453 !(*shadow_ent & PT_USER_MASK)) {
454 /*
455 * If supervisor write protect is disabled, we shadow kernel
456 * pages as user pages so we can trap the write access.
457 */
458 *shadow_ent |= PT_USER_MASK;
459 *shadow_ent &= ~PT_WRITABLE_MASK;
460
461 return 1;
462
463 }
464 return 0;
465}
466
467static int may_access(u64 pte, int write, int user)
468{
469
470 if (user && !(pte & PT_USER_MASK))
471 return 0;
472 if (write && !(pte & PT_WRITABLE_MASK))
473 return 0;
474 return 1;
475}
476
477/*
478 * Remove a shadow pte.
479 */
480static void paging_inval_page(struct kvm_vcpu *vcpu, gva_t addr)
481{
482 hpa_t page_addr = vcpu->mmu.root_hpa;
483 int level = vcpu->mmu.shadow_root_level;
484
485 ++kvm_stat.invlpg;
486
487 for (; ; level--) {
488 u32 index = PT64_INDEX(addr, level);
489 u64 *table = __va(page_addr);
490
491 if (level == PT_PAGE_TABLE_LEVEL ) {
492 table[index] = 0;
493 return;
494 }
495
496 if (!is_present_pte(table[index]))
497 return;
498
499 page_addr = table[index] & PT64_BASE_ADDR_MASK;
500
501 if (level == PT_DIRECTORY_LEVEL &&
502 (table[index] & PT_SHADOW_PS_MARK)) {
503 table[index] = 0;
504 release_pt_page_64(vcpu, page_addr, PT_PAGE_TABLE_LEVEL);
505
506 kvm_arch_ops->tlb_flush(vcpu);
507 return;
508 }
509 }
510}
511
512static void paging_free(struct kvm_vcpu *vcpu)
513{
514 nonpaging_free(vcpu);
515}
516
517#define PTTYPE 64
518#include "paging_tmpl.h"
519#undef PTTYPE
520
521#define PTTYPE 32
522#include "paging_tmpl.h"
523#undef PTTYPE
524
525static int paging64_init_context(struct kvm_vcpu *vcpu)
526{
527 struct kvm_mmu *context = &vcpu->mmu;
528
529 ASSERT(is_pae(vcpu));
530 context->new_cr3 = paging_new_cr3;
531 context->page_fault = paging64_page_fault;
532 context->inval_page = paging_inval_page;
533 context->gva_to_gpa = paging64_gva_to_gpa;
534 context->free = paging_free;
535 context->root_level = PT64_ROOT_LEVEL;
536 context->shadow_root_level = PT64_ROOT_LEVEL;
537 context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL);
538 ASSERT(VALID_PAGE(context->root_hpa));
539 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
540 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
541 return 0;
542}
543
544static int paging32_init_context(struct kvm_vcpu *vcpu)
545{
546 struct kvm_mmu *context = &vcpu->mmu;
547
548 context->new_cr3 = paging_new_cr3;
549 context->page_fault = paging32_page_fault;
550 context->inval_page = paging_inval_page;
551 context->gva_to_gpa = paging32_gva_to_gpa;
552 context->free = paging_free;
553 context->root_level = PT32_ROOT_LEVEL;
554 context->shadow_root_level = PT32E_ROOT_LEVEL;
555 context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL);
556 ASSERT(VALID_PAGE(context->root_hpa));
557 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
558 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
559 return 0;
560}
561
562static int paging32E_init_context(struct kvm_vcpu *vcpu)
563{
564 int ret;
565
566 if ((ret = paging64_init_context(vcpu)))
567 return ret;
568
569 vcpu->mmu.root_level = PT32E_ROOT_LEVEL;
570 vcpu->mmu.shadow_root_level = PT32E_ROOT_LEVEL;
571 return 0;
572}
573
574static int init_kvm_mmu(struct kvm_vcpu *vcpu)
575{
576 ASSERT(vcpu);
577 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
578
579 if (!is_paging(vcpu))
580 return nonpaging_init_context(vcpu);
581 else if (kvm_arch_ops->is_long_mode(vcpu))
582 return paging64_init_context(vcpu);
583 else if (is_pae(vcpu))
584 return paging32E_init_context(vcpu);
585 else
586 return paging32_init_context(vcpu);
587}
588
589static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
590{
591 ASSERT(vcpu);
592 if (VALID_PAGE(vcpu->mmu.root_hpa)) {
593 vcpu->mmu.free(vcpu);
594 vcpu->mmu.root_hpa = INVALID_PAGE;
595 }
596}
597
598int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
599{
600 destroy_kvm_mmu(vcpu);
601 return init_kvm_mmu(vcpu);
602}
603
604static void free_mmu_pages(struct kvm_vcpu *vcpu)
605{
606 while (!list_empty(&vcpu->free_pages)) {
607 struct kvm_mmu_page *page;
608
609 page = list_entry(vcpu->free_pages.next,
610 struct kvm_mmu_page, link);
611 list_del(&page->link);
612 __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
613 page->page_hpa = INVALID_PAGE;
614 }
615}
616
617static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
618{
619 int i;
620
621 ASSERT(vcpu);
622
623 for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
624 struct page *page;
625 struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
626
627 INIT_LIST_HEAD(&page_header->link);
628 if ((page = alloc_page(GFP_KVM_MMU)) == NULL)
629 goto error_1;
630 page->private = (unsigned long)page_header;
631 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
632 memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
633 list_add(&page_header->link, &vcpu->free_pages);
634 }
635 return 0;
636
637error_1:
638 free_mmu_pages(vcpu);
639 return -ENOMEM;
640}
641
642int kvm_mmu_init(struct kvm_vcpu *vcpu)
643{
644 int r;
645
646 ASSERT(vcpu);
647 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
648 ASSERT(list_empty(&vcpu->free_pages));
649
650 if ((r = alloc_mmu_pages(vcpu)))
651 return r;
652
653 if ((r = init_kvm_mmu(vcpu))) {
654 free_mmu_pages(vcpu);
655 return r;
656 }
657 return 0;
658}
659
660void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
661{
662 ASSERT(vcpu);
663
664 destroy_kvm_mmu(vcpu);
665 free_mmu_pages(vcpu);
666}
667
668void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
669{
670 struct kvm_mmu_page *page;
671
672 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
673 int i;
674 u64 *pt;
675
676 if (!test_bit(slot, &page->slot_bitmap))
677 continue;
678
679 pt = __va(page->page_hpa);
680 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
681 /* avoid RMW */
682 if (pt[i] & PT_WRITABLE_MASK)
683 pt[i] &= ~PT_WRITABLE_MASK;
684
685 }
686}
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
new file mode 100644
index 000000000000..a9771b4c5bb8
--- /dev/null
+++ b/drivers/kvm/paging_tmpl.h
@@ -0,0 +1,391 @@
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20/*
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
23 */
24
25#if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
35#elif PTTYPE == 32
36 #define pt_element_t u32
37 #define guest_walker guest_walker32
38 #define FNAME(name) paging##32_##name
39 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
40 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
41 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
42 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
43 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
44 #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
45#else
46 #error Invalid PTTYPE value
47#endif
48
49/*
50 * The guest_walker structure emulates the behavior of the hardware page
51 * table walker.
52 */
53struct guest_walker {
54 int level;
55 pt_element_t *table;
56 pt_element_t inherited_ar;
57};
58
59static void FNAME(init_walker)(struct guest_walker *walker,
60 struct kvm_vcpu *vcpu)
61{
62 hpa_t hpa;
63 struct kvm_memory_slot *slot;
64
65 walker->level = vcpu->mmu.root_level;
66 slot = gfn_to_memslot(vcpu->kvm,
67 (vcpu->cr3 & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
68 hpa = safe_gpa_to_hpa(vcpu, vcpu->cr3 & PT64_BASE_ADDR_MASK);
69 walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0);
70
71 ASSERT((!kvm_arch_ops->is_long_mode(vcpu) && is_pae(vcpu)) ||
72 (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);
73
74 walker->table = (pt_element_t *)( (unsigned long)walker->table |
75 (unsigned long)(vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) );
76 walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
77}
78
79static void FNAME(release_walker)(struct guest_walker *walker)
80{
81 kunmap_atomic(walker->table, KM_USER0);
82}
83
84static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
85 u64 *shadow_pte, u64 access_bits)
86{
87 ASSERT(*shadow_pte == 0);
88 access_bits &= guest_pte;
89 *shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
90 set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
91 guest_pte & PT_DIRTY_MASK, access_bits);
92}
93
94static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
95 u64 *shadow_pte, u64 access_bits,
96 int index)
97{
98 gpa_t gaddr;
99
100 ASSERT(*shadow_pte == 0);
101 access_bits &= guest_pde;
102 gaddr = (guest_pde & PT_DIR_BASE_ADDR_MASK) + PAGE_SIZE * index;
103 if (PTTYPE == 32 && is_cpuid_PSE36())
104 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
105 (32 - PT32_DIR_PSE36_SHIFT);
106 *shadow_pte = guest_pde & PT_PTE_COPY_MASK;
107 set_pte_common(vcpu, shadow_pte, gaddr,
108 guest_pde & PT_DIRTY_MASK, access_bits);
109}
110
111/*
112 * Fetch a guest pte from a specific level in the paging hierarchy.
113 */
114static pt_element_t *FNAME(fetch_guest)(struct kvm_vcpu *vcpu,
115 struct guest_walker *walker,
116 int level,
117 gva_t addr)
118{
119
120 ASSERT(level > 0 && level <= walker->level);
121
122 for (;;) {
123 int index = PT_INDEX(addr, walker->level);
124 hpa_t paddr;
125
126 ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
127 ((unsigned long)&walker->table[index] & PAGE_MASK));
128 if (level == walker->level ||
129 !is_present_pte(walker->table[index]) ||
130 (walker->level == PT_DIRECTORY_LEVEL &&
131 (walker->table[index] & PT_PAGE_SIZE_MASK) &&
132 (PTTYPE == 64 || is_pse(vcpu))))
133 return &walker->table[index];
134 if (walker->level != 3 || kvm_arch_ops->is_long_mode(vcpu))
135 walker->inherited_ar &= walker->table[index];
136 paddr = safe_gpa_to_hpa(vcpu, walker->table[index] & PT_BASE_ADDR_MASK);
137 kunmap_atomic(walker->table, KM_USER0);
138 walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT),
139 KM_USER0);
140 --walker->level;
141 }
142}
143
144/*
145 * Fetch a shadow pte for a specific level in the paging hierarchy.
146 */
147static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
148 struct guest_walker *walker)
149{
150 hpa_t shadow_addr;
151 int level;
152 u64 *prev_shadow_ent = NULL;
153
154 shadow_addr = vcpu->mmu.root_hpa;
155 level = vcpu->mmu.shadow_root_level;
156
157 for (; ; level--) {
158 u32 index = SHADOW_PT_INDEX(addr, level);
159 u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
160 pt_element_t *guest_ent;
161 u64 shadow_pte;
162
163 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
164 if (level == PT_PAGE_TABLE_LEVEL)
165 return shadow_ent;
166 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
167 prev_shadow_ent = shadow_ent;
168 continue;
169 }
170
171 if (PTTYPE == 32 && level > PT32_ROOT_LEVEL) {
172 ASSERT(level == PT32E_ROOT_LEVEL);
173 guest_ent = FNAME(fetch_guest)(vcpu, walker,
174 PT32_ROOT_LEVEL, addr);
175 } else
176 guest_ent = FNAME(fetch_guest)(vcpu, walker,
177 level, addr);
178
179 if (!is_present_pte(*guest_ent))
180 return NULL;
181
182 /* Don't set accessed bit on PAE PDPTRs */
183 if (vcpu->mmu.root_level != 3 || walker->level != 3)
184 *guest_ent |= PT_ACCESSED_MASK;
185
186 if (level == PT_PAGE_TABLE_LEVEL) {
187
188 if (walker->level == PT_DIRECTORY_LEVEL) {
189 if (prev_shadow_ent)
190 *prev_shadow_ent |= PT_SHADOW_PS_MARK;
191 FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
192 walker->inherited_ar,
193 PT_INDEX(addr, PT_PAGE_TABLE_LEVEL));
194 } else {
195 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
196 FNAME(set_pte)(vcpu, *guest_ent, shadow_ent, walker->inherited_ar);
197 }
198 return shadow_ent;
199 }
200
201 shadow_addr = kvm_mmu_alloc_page(vcpu, shadow_ent);
202 if (!VALID_PAGE(shadow_addr))
203 return ERR_PTR(-ENOMEM);
204 shadow_pte = shadow_addr | PT_PRESENT_MASK;
205 if (vcpu->mmu.root_level > 3 || level != 3)
206 shadow_pte |= PT_ACCESSED_MASK
207 | PT_WRITABLE_MASK | PT_USER_MASK;
208 *shadow_ent = shadow_pte;
209 prev_shadow_ent = shadow_ent;
210 }
211}
212
213/*
214 * The guest faulted for write. We need to
215 *
216 * - check write permissions
217 * - update the guest pte dirty bit
218 * - update our own dirty page tracking structures
219 */
220static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
221 u64 *shadow_ent,
222 struct guest_walker *walker,
223 gva_t addr,
224 int user)
225{
226 pt_element_t *guest_ent;
227 int writable_shadow;
228 gfn_t gfn;
229
230 if (is_writeble_pte(*shadow_ent))
231 return 0;
232
233 writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK;
234 if (user) {
235 /*
236 * User mode access. Fail if it's a kernel page or a read-only
237 * page.
238 */
239 if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow)
240 return 0;
241 ASSERT(*shadow_ent & PT_USER_MASK);
242 } else
243 /*
244 * Kernel mode access. Fail if it's a read-only page and
245 * supervisor write protection is enabled.
246 */
247 if (!writable_shadow) {
248 if (is_write_protection(vcpu))
249 return 0;
250 *shadow_ent &= ~PT_USER_MASK;
251 }
252
253 guest_ent = FNAME(fetch_guest)(vcpu, walker, PT_PAGE_TABLE_LEVEL, addr);
254
255 if (!is_present_pte(*guest_ent)) {
256 *shadow_ent = 0;
257 return 0;
258 }
259
260 gfn = (*guest_ent & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
261 mark_page_dirty(vcpu->kvm, gfn);
262 *shadow_ent |= PT_WRITABLE_MASK;
263 *guest_ent |= PT_DIRTY_MASK;
264
265 return 1;
266}
267
268/*
269 * Page fault handler. There are several causes for a page fault:
270 * - there is no shadow pte for the guest pte
271 * - write access through a shadow pte marked read only so that we can set
272 * the dirty bit
273 * - write access to a shadow pte marked read only so we can update the page
274 * dirty bitmap, when userspace requests it
275 * - mmio access; in this case we will never install a present shadow pte
276 * - normal guest page fault due to the guest pte marked not present, not
277 * writable, or not executable
278 *
279 * Returns: 1 if we need to emulate the instruction, 0 otherwise
280 */
281static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
282 u32 error_code)
283{
284 int write_fault = error_code & PFERR_WRITE_MASK;
285 int pte_present = error_code & PFERR_PRESENT_MASK;
286 int user_fault = error_code & PFERR_USER_MASK;
287 struct guest_walker walker;
288 u64 *shadow_pte;
289 int fixed;
290
291 /*
292 * Look up the shadow pte for the faulting address.
293 */
294 for (;;) {
295 FNAME(init_walker)(&walker, vcpu);
296 shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
297 if (IS_ERR(shadow_pte)) { /* must be -ENOMEM */
298 nonpaging_flush(vcpu);
299 FNAME(release_walker)(&walker);
300 continue;
301 }
302 break;
303 }
304
305 /*
306 * The page is not mapped by the guest. Let the guest handle it.
307 */
308 if (!shadow_pte) {
309 inject_page_fault(vcpu, addr, error_code);
310 FNAME(release_walker)(&walker);
311 return 0;
312 }
313
314 /*
315 * Update the shadow pte.
316 */
317 if (write_fault)
318 fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
319 user_fault);
320 else
321 fixed = fix_read_pf(shadow_pte);
322
323 FNAME(release_walker)(&walker);
324
325 /*
326 * mmio: emulate if accessible, otherwise its a guest fault.
327 */
328 if (is_io_pte(*shadow_pte)) {
329 if (may_access(*shadow_pte, write_fault, user_fault))
330 return 1;
331 pgprintk("%s: io work, no access\n", __FUNCTION__);
332 inject_page_fault(vcpu, addr,
333 error_code | PFERR_PRESENT_MASK);
334 return 0;
335 }
336
337 /*
338 * pte not present, guest page fault.
339 */
340 if (pte_present && !fixed) {
341 inject_page_fault(vcpu, addr, error_code);
342 return 0;
343 }
344
345 ++kvm_stat.pf_fixed;
346
347 return 0;
348}
349
350static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
351{
352 struct guest_walker walker;
353 pt_element_t guest_pte;
354 gpa_t gpa;
355
356 FNAME(init_walker)(&walker, vcpu);
357 guest_pte = *FNAME(fetch_guest)(vcpu, &walker, PT_PAGE_TABLE_LEVEL,
358 vaddr);
359 FNAME(release_walker)(&walker);
360
361 if (!is_present_pte(guest_pte))
362 return UNMAPPED_GVA;
363
364 if (walker.level == PT_DIRECTORY_LEVEL) {
365 ASSERT((guest_pte & PT_PAGE_SIZE_MASK));
366 ASSERT(PTTYPE == 64 || is_pse(vcpu));
367
368 gpa = (guest_pte & PT_DIR_BASE_ADDR_MASK) | (vaddr &
369 (PT_LEVEL_MASK(PT_PAGE_TABLE_LEVEL) | ~PAGE_MASK));
370
371 if (PTTYPE == 32 && is_cpuid_PSE36())
372 gpa |= (guest_pte & PT32_DIR_PSE36_MASK) <<
373 (32 - PT32_DIR_PSE36_SHIFT);
374 } else {
375 gpa = (guest_pte & PT_BASE_ADDR_MASK);
376 gpa |= (vaddr & ~PAGE_MASK);
377 }
378
379 return gpa;
380}
381
382#undef pt_element_t
383#undef guest_walker
384#undef FNAME
385#undef PT_BASE_ADDR_MASK
386#undef PT_INDEX
387#undef SHADOW_PT_INDEX
388#undef PT_LEVEL_MASK
389#undef PT_PTE_COPY_MASK
390#undef PT_NON_PTE_COPY_MASK
391#undef PT_DIR_BASE_ADDR_MASK
diff --git a/drivers/kvm/segment_descriptor.h b/drivers/kvm/segment_descriptor.h
new file mode 100644
index 000000000000..71fdf458619a
--- /dev/null
+++ b/drivers/kvm/segment_descriptor.h
@@ -0,0 +1,17 @@
1struct segment_descriptor {
2 u16 limit_low;
3 u16 base_low;
4 u8 base_mid;
5 u8 type : 4;
6 u8 system : 1;
7 u8 dpl : 2;
8 u8 present : 1;
9 u8 limit_high : 4;
10 u8 avl : 1;
11 u8 long_mode : 1;
12 u8 default_op : 1;
13 u8 granularity : 1;
14 u8 base_high;
15} __attribute__((packed));
16
17
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
new file mode 100644
index 000000000000..0e6bc8c649ce
--- /dev/null
+++ b/drivers/kvm/svm.c
@@ -0,0 +1,1641 @@
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/vmalloc.h>
19#include <linux/highmem.h>
20#include <asm/desc.h>
21
22#include "kvm_svm.h"
23#include "x86_emulate.h"
24
25MODULE_AUTHOR("Qumranet");
26MODULE_LICENSE("GPL");
27
28#define IOPM_ALLOC_ORDER 2
29#define MSRPM_ALLOC_ORDER 1
30
31#define DB_VECTOR 1
32#define UD_VECTOR 6
33#define GP_VECTOR 13
34
35#define DR7_GD_MASK (1 << 13)
36#define DR6_BD_MASK (1 << 13)
37#define CR4_DE_MASK (1UL << 3)
38
39#define SEG_TYPE_LDT 2
40#define SEG_TYPE_BUSY_TSS16 3
41
42#define KVM_EFER_LMA (1 << 10)
43#define KVM_EFER_LME (1 << 8)
44
45unsigned long iopm_base;
46unsigned long msrpm_base;
47
48struct kvm_ldttss_desc {
49 u16 limit0;
50 u16 base0;
51 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
52 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
53 u32 base3;
54 u32 zero1;
55} __attribute__((packed));
56
57struct svm_cpu_data {
58 int cpu;
59
60 uint64_t asid_generation;
61 uint32_t max_asid;
62 uint32_t next_asid;
63 struct kvm_ldttss_desc *tss_desc;
64
65 struct page *save_area;
66};
67
68static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
69
70struct svm_init_data {
71 int cpu;
72 int r;
73};
74
75static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
76
77#define NUM_MSR_MAPS (sizeof(msrpm_ranges) / sizeof(*msrpm_ranges))
78#define MSRS_RANGE_SIZE 2048
79#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
80
81#define MAX_INST_SIZE 15
82
83static unsigned get_addr_size(struct kvm_vcpu *vcpu)
84{
85 struct vmcb_save_area *sa = &vcpu->svm->vmcb->save;
86 u16 cs_attrib;
87
88 if (!(sa->cr0 & CR0_PE_MASK) || (sa->rflags & X86_EFLAGS_VM))
89 return 2;
90
91 cs_attrib = sa->cs.attrib;
92
93 return (cs_attrib & SVM_SELECTOR_L_MASK) ? 8 :
94 (cs_attrib & SVM_SELECTOR_DB_MASK) ? 4 : 2;
95}
96
97static inline u8 pop_irq(struct kvm_vcpu *vcpu)
98{
99 int word_index = __ffs(vcpu->irq_summary);
100 int bit_index = __ffs(vcpu->irq_pending[word_index]);
101 int irq = word_index * BITS_PER_LONG + bit_index;
102
103 clear_bit(bit_index, &vcpu->irq_pending[word_index]);
104 if (!vcpu->irq_pending[word_index])
105 clear_bit(word_index, &vcpu->irq_summary);
106 return irq;
107}
108
109static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
110{
111 set_bit(irq, vcpu->irq_pending);
112 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
113}
114
115static inline void clgi(void)
116{
117 asm volatile (SVM_CLGI);
118}
119
120static inline void stgi(void)
121{
122 asm volatile (SVM_STGI);
123}
124
125static inline void invlpga(unsigned long addr, u32 asid)
126{
127 asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid));
128}
129
130static inline unsigned long kvm_read_cr2(void)
131{
132 unsigned long cr2;
133
134 asm volatile ("mov %%cr2, %0" : "=r" (cr2));
135 return cr2;
136}
137
138static inline void kvm_write_cr2(unsigned long val)
139{
140 asm volatile ("mov %0, %%cr2" :: "r" (val));
141}
142
143static inline unsigned long read_dr6(void)
144{
145 unsigned long dr6;
146
147 asm volatile ("mov %%dr6, %0" : "=r" (dr6));
148 return dr6;
149}
150
151static inline void write_dr6(unsigned long val)
152{
153 asm volatile ("mov %0, %%dr6" :: "r" (val));
154}
155
156static inline unsigned long read_dr7(void)
157{
158 unsigned long dr7;
159
160 asm volatile ("mov %%dr7, %0" : "=r" (dr7));
161 return dr7;
162}
163
164static inline void write_dr7(unsigned long val)
165{
166 asm volatile ("mov %0, %%dr7" :: "r" (val));
167}
168
169static inline int svm_is_long_mode(struct kvm_vcpu *vcpu)
170{
171 return vcpu->svm->vmcb->save.efer & KVM_EFER_LMA;
172}
173
174static inline void force_new_asid(struct kvm_vcpu *vcpu)
175{
176 vcpu->svm->asid_generation--;
177}
178
179static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
180{
181 force_new_asid(vcpu);
182}
183
184static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
185{
186 if (!(efer & KVM_EFER_LMA))
187 efer &= ~KVM_EFER_LME;
188
189 vcpu->svm->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
190 vcpu->shadow_efer = efer;
191}
192
193static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
194{
195 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
196 SVM_EVTINJ_VALID_ERR |
197 SVM_EVTINJ_TYPE_EXEPT |
198 GP_VECTOR;
199 vcpu->svm->vmcb->control.event_inj_err = error_code;
200}
201
202static void inject_ud(struct kvm_vcpu *vcpu)
203{
204 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
205 SVM_EVTINJ_TYPE_EXEPT |
206 UD_VECTOR;
207}
208
209static void inject_db(struct kvm_vcpu *vcpu)
210{
211 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
212 SVM_EVTINJ_TYPE_EXEPT |
213 DB_VECTOR;
214}
215
216static int is_page_fault(uint32_t info)
217{
218 info &= SVM_EVTINJ_VEC_MASK | SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
219 return info == (PF_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT);
220}
221
222static int is_external_interrupt(u32 info)
223{
224 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
225 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
226}
227
228static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
229{
230 if (!vcpu->svm->next_rip) {
231 printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__);
232 return;
233 }
234 if (vcpu->svm->next_rip - vcpu->svm->vmcb->save.rip > 15) {
235 printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
236 __FUNCTION__,
237 vcpu->svm->vmcb->save.rip,
238 vcpu->svm->next_rip);
239 }
240
241 vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip;
242 vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
243}
244
245static int has_svm(void)
246{
247 uint32_t eax, ebx, ecx, edx;
248
249 if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) {
250 printk(KERN_INFO "has_svm: not amd\n");
251 return 0;
252 }
253
254 cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
255 if (eax < SVM_CPUID_FUNC) {
256 printk(KERN_INFO "has_svm: can't execute cpuid_8000000a\n");
257 return 0;
258 }
259
260 cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
261 if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) {
262 printk(KERN_DEBUG "has_svm: svm not available\n");
263 return 0;
264 }
265 return 1;
266}
267
268static void svm_hardware_disable(void *garbage)
269{
270 struct svm_cpu_data *svm_data
271 = per_cpu(svm_data, raw_smp_processor_id());
272
273 if (svm_data) {
274 uint64_t efer;
275
276 wrmsrl(MSR_VM_HSAVE_PA, 0);
277 rdmsrl(MSR_EFER, efer);
278 wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
279 per_cpu(svm_data, raw_smp_processor_id()) = 0;
280 __free_page(svm_data->save_area);
281 kfree(svm_data);
282 }
283}
284
285static void svm_hardware_enable(void *garbage)
286{
287
288 struct svm_cpu_data *svm_data;
289 uint64_t efer;
290#ifdef CONFIG_X86_64
291 struct desc_ptr gdt_descr;
292#else
293 struct Xgt_desc_struct gdt_descr;
294#endif
295 struct desc_struct *gdt;
296 int me = raw_smp_processor_id();
297
298 if (!has_svm()) {
299 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
300 return;
301 }
302 svm_data = per_cpu(svm_data, me);
303
304 if (!svm_data) {
305 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
306 me);
307 return;
308 }
309
310 svm_data->asid_generation = 1;
311 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
312 svm_data->next_asid = svm_data->max_asid + 1;
313
314 asm volatile ( "sgdt %0" : "=m"(gdt_descr) );
315 gdt = (struct desc_struct *)gdt_descr.address;
316 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
317
318 rdmsrl(MSR_EFER, efer);
319 wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK);
320
321 wrmsrl(MSR_VM_HSAVE_PA,
322 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
323}
324
325static int svm_cpu_init(int cpu)
326{
327 struct svm_cpu_data *svm_data;
328 int r;
329
330 svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
331 if (!svm_data)
332 return -ENOMEM;
333 svm_data->cpu = cpu;
334 svm_data->save_area = alloc_page(GFP_KERNEL);
335 r = -ENOMEM;
336 if (!svm_data->save_area)
337 goto err_1;
338
339 per_cpu(svm_data, cpu) = svm_data;
340
341 return 0;
342
343err_1:
344 kfree(svm_data);
345 return r;
346
347}
348
349static int set_msr_interception(u32 *msrpm, unsigned msr,
350 int read, int write)
351{
352 int i;
353
354 for (i = 0; i < NUM_MSR_MAPS; i++) {
355 if (msr >= msrpm_ranges[i] &&
356 msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
357 u32 msr_offset = (i * MSRS_IN_RANGE + msr -
358 msrpm_ranges[i]) * 2;
359
360 u32 *base = msrpm + (msr_offset / 32);
361 u32 msr_shift = msr_offset % 32;
362 u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
363 *base = (*base & ~(0x3 << msr_shift)) |
364 (mask << msr_shift);
365 return 1;
366 }
367 }
368 printk(KERN_DEBUG "%s: not found 0x%x\n", __FUNCTION__, msr);
369 return 0;
370}
371
372static __init int svm_hardware_setup(void)
373{
374 int cpu;
375 struct page *iopm_pages;
376 struct page *msrpm_pages;
377 void *msrpm_va;
378 int r;
379
380 kvm_emulator_want_group7_invlpg();
381
382 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
383
384 if (!iopm_pages)
385 return -ENOMEM;
386 memset(page_address(iopm_pages), 0xff,
387 PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
388 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
389
390
391 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
392
393 r = -ENOMEM;
394 if (!msrpm_pages)
395 goto err_1;
396
397 msrpm_va = page_address(msrpm_pages);
398 memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
399 msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT;
400
401#ifdef CONFIG_X86_64
402 set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1);
403 set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1);
404 set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1);
405 set_msr_interception(msrpm_va, MSR_STAR, 1, 1);
406 set_msr_interception(msrpm_va, MSR_LSTAR, 1, 1);
407 set_msr_interception(msrpm_va, MSR_CSTAR, 1, 1);
408 set_msr_interception(msrpm_va, MSR_SYSCALL_MASK, 1, 1);
409#endif
410 set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_CS, 1, 1);
411 set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1);
412 set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1);
413
414 for_each_online_cpu(cpu) {
415 r = svm_cpu_init(cpu);
416 if (r)
417 goto err_2;
418 }
419 return 0;
420
421err_2:
422 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
423 msrpm_base = 0;
424err_1:
425 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
426 iopm_base = 0;
427 return r;
428}
429
430static __exit void svm_hardware_unsetup(void)
431{
432 __free_pages(pfn_to_page(msrpm_base >> PAGE_SHIFT), MSRPM_ALLOC_ORDER);
433 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
434 iopm_base = msrpm_base = 0;
435}
436
437static void init_seg(struct vmcb_seg *seg)
438{
439 seg->selector = 0;
440 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
441 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
442 seg->limit = 0xffff;
443 seg->base = 0;
444}
445
446static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
447{
448 seg->selector = 0;
449 seg->attrib = SVM_SELECTOR_P_MASK | type;
450 seg->limit = 0xffff;
451 seg->base = 0;
452}
453
454static int svm_vcpu_setup(struct kvm_vcpu *vcpu)
455{
456 return 0;
457}
458
459static void init_vmcb(struct vmcb *vmcb)
460{
461 struct vmcb_control_area *control = &vmcb->control;
462 struct vmcb_save_area *save = &vmcb->save;
463 u64 tsc;
464
465 control->intercept_cr_read = INTERCEPT_CR0_MASK |
466 INTERCEPT_CR3_MASK |
467 INTERCEPT_CR4_MASK;
468
469 control->intercept_cr_write = INTERCEPT_CR0_MASK |
470 INTERCEPT_CR3_MASK |
471 INTERCEPT_CR4_MASK;
472
473 control->intercept_dr_read = INTERCEPT_DR0_MASK |
474 INTERCEPT_DR1_MASK |
475 INTERCEPT_DR2_MASK |
476 INTERCEPT_DR3_MASK;
477
478 control->intercept_dr_write = INTERCEPT_DR0_MASK |
479 INTERCEPT_DR1_MASK |
480 INTERCEPT_DR2_MASK |
481 INTERCEPT_DR3_MASK |
482 INTERCEPT_DR5_MASK |
483 INTERCEPT_DR7_MASK;
484
485 control->intercept_exceptions = 1 << PF_VECTOR;
486
487
488 control->intercept = (1ULL << INTERCEPT_INTR) |
489 (1ULL << INTERCEPT_NMI) |
490 /*
491 * selective cr0 intercept bug?
492 * 0: 0f 22 d8 mov %eax,%cr3
493 * 3: 0f 20 c0 mov %cr0,%eax
494 * 6: 0d 00 00 00 80 or $0x80000000,%eax
495 * b: 0f 22 c0 mov %eax,%cr0
496 * set cr3 ->interception
497 * get cr0 ->interception
498 * set cr0 -> no interception
499 */
500 /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */
501 (1ULL << INTERCEPT_CPUID) |
502 (1ULL << INTERCEPT_HLT) |
503 (1ULL << INTERCEPT_INVLPG) |
504 (1ULL << INTERCEPT_INVLPGA) |
505 (1ULL << INTERCEPT_IOIO_PROT) |
506 (1ULL << INTERCEPT_MSR_PROT) |
507 (1ULL << INTERCEPT_TASK_SWITCH) |
508 (1ULL << INTERCEPT_VMRUN) |
509 (1ULL << INTERCEPT_VMMCALL) |
510 (1ULL << INTERCEPT_VMLOAD) |
511 (1ULL << INTERCEPT_VMSAVE) |
512 (1ULL << INTERCEPT_STGI) |
513 (1ULL << INTERCEPT_CLGI) |
514 (1ULL << INTERCEPT_SKINIT);
515
516 control->iopm_base_pa = iopm_base;
517 control->msrpm_base_pa = msrpm_base;
518 rdtscll(tsc);
519 control->tsc_offset = -tsc;
520 control->int_ctl = V_INTR_MASKING_MASK;
521
522 init_seg(&save->es);
523 init_seg(&save->ss);
524 init_seg(&save->ds);
525 init_seg(&save->fs);
526 init_seg(&save->gs);
527
528 save->cs.selector = 0xf000;
529 /* Executable/Readable Code Segment */
530 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
531 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
532 save->cs.limit = 0xffff;
533 save->cs.base = 0xffff0000;
534
535 save->gdtr.limit = 0xffff;
536 save->idtr.limit = 0xffff;
537
538 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
539 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
540
541 save->efer = MSR_EFER_SVME_MASK;
542
543 save->dr6 = 0xffff0ff0;
544 save->dr7 = 0x400;
545 save->rflags = 2;
546 save->rip = 0x0000fff0;
547
548 /*
549 * cr0 val on cpu init should be 0x60000010, we enable cpu
550 * cache by default. the orderly way is to enable cache in bios.
551 */
552 save->cr0 = 0x00000010 | CR0_PG_MASK;
553 save->cr4 = CR4_PAE_MASK;
554 /* rdx = ?? */
555}
556
557static int svm_create_vcpu(struct kvm_vcpu *vcpu)
558{
559 struct page *page;
560 int r;
561
562 r = -ENOMEM;
563 vcpu->svm = kzalloc(sizeof *vcpu->svm, GFP_KERNEL);
564 if (!vcpu->svm)
565 goto out1;
566 page = alloc_page(GFP_KERNEL);
567 if (!page)
568 goto out2;
569
570 vcpu->svm->vmcb = page_address(page);
571 memset(vcpu->svm->vmcb, 0, PAGE_SIZE);
572 vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
573 vcpu->svm->cr0 = 0x00000010;
574 vcpu->svm->asid_generation = 0;
575 memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs));
576 init_vmcb(vcpu->svm->vmcb);
577
578 return 0;
579
580out2:
581 kfree(vcpu->svm);
582out1:
583 return r;
584}
585
586static void svm_free_vcpu(struct kvm_vcpu *vcpu)
587{
588 if (!vcpu->svm)
589 return;
590 if (vcpu->svm->vmcb)
591 __free_page(pfn_to_page(vcpu->svm->vmcb_pa >> PAGE_SHIFT));
592 kfree(vcpu->svm);
593}
594
595static struct kvm_vcpu *svm_vcpu_load(struct kvm_vcpu *vcpu)
596{
597 get_cpu();
598 return vcpu;
599}
600
601static void svm_vcpu_put(struct kvm_vcpu *vcpu)
602{
603 put_cpu();
604}
605
606static void svm_cache_regs(struct kvm_vcpu *vcpu)
607{
608 vcpu->regs[VCPU_REGS_RAX] = vcpu->svm->vmcb->save.rax;
609 vcpu->regs[VCPU_REGS_RSP] = vcpu->svm->vmcb->save.rsp;
610 vcpu->rip = vcpu->svm->vmcb->save.rip;
611}
612
613static void svm_decache_regs(struct kvm_vcpu *vcpu)
614{
615 vcpu->svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX];
616 vcpu->svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP];
617 vcpu->svm->vmcb->save.rip = vcpu->rip;
618}
619
620static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
621{
622 return vcpu->svm->vmcb->save.rflags;
623}
624
625static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
626{
627 vcpu->svm->vmcb->save.rflags = rflags;
628}
629
630static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
631{
632 struct vmcb_save_area *save = &vcpu->svm->vmcb->save;
633
634 switch (seg) {
635 case VCPU_SREG_CS: return &save->cs;
636 case VCPU_SREG_DS: return &save->ds;
637 case VCPU_SREG_ES: return &save->es;
638 case VCPU_SREG_FS: return &save->fs;
639 case VCPU_SREG_GS: return &save->gs;
640 case VCPU_SREG_SS: return &save->ss;
641 case VCPU_SREG_TR: return &save->tr;
642 case VCPU_SREG_LDTR: return &save->ldtr;
643 }
644 BUG();
645 return 0;
646}
647
648static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
649{
650 struct vmcb_seg *s = svm_seg(vcpu, seg);
651
652 return s->base;
653}
654
655static void svm_get_segment(struct kvm_vcpu *vcpu,
656 struct kvm_segment *var, int seg)
657{
658 struct vmcb_seg *s = svm_seg(vcpu, seg);
659
660 var->base = s->base;
661 var->limit = s->limit;
662 var->selector = s->selector;
663 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
664 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
665 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
666 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
667 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
668 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
669 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
670 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
671 var->unusable = !var->present;
672}
673
674static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
675{
676 struct vmcb_seg *s = svm_seg(vcpu, VCPU_SREG_CS);
677
678 *db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
679 *l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
680}
681
682static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
683{
684 dt->limit = vcpu->svm->vmcb->save.ldtr.limit;
685 dt->base = vcpu->svm->vmcb->save.ldtr.base;
686}
687
688static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
689{
690 vcpu->svm->vmcb->save.ldtr.limit = dt->limit;
691 vcpu->svm->vmcb->save.ldtr.base = dt->base ;
692}
693
694static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
695{
696 dt->limit = vcpu->svm->vmcb->save.gdtr.limit;
697 dt->base = vcpu->svm->vmcb->save.gdtr.base;
698}
699
700static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
701{
702 vcpu->svm->vmcb->save.gdtr.limit = dt->limit;
703 vcpu->svm->vmcb->save.gdtr.base = dt->base ;
704}
705
706static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
707{
708#ifdef CONFIG_X86_64
709 if (vcpu->shadow_efer & KVM_EFER_LME) {
710 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
711 vcpu->shadow_efer |= KVM_EFER_LMA;
712 vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME;
713 }
714
715 if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK) ) {
716 vcpu->shadow_efer &= ~KVM_EFER_LMA;
717 vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME);
718 }
719 }
720#endif
721 vcpu->svm->cr0 = cr0;
722 vcpu->svm->vmcb->save.cr0 = cr0 | CR0_PG_MASK;
723 vcpu->cr0 = cr0;
724}
725
726static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
727{
728 vcpu->cr4 = cr4;
729 vcpu->svm->vmcb->save.cr4 = cr4 | CR4_PAE_MASK;
730}
731
732static void svm_set_segment(struct kvm_vcpu *vcpu,
733 struct kvm_segment *var, int seg)
734{
735 struct vmcb_seg *s = svm_seg(vcpu, seg);
736
737 s->base = var->base;
738 s->limit = var->limit;
739 s->selector = var->selector;
740 if (var->unusable)
741 s->attrib = 0;
742 else {
743 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
744 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
745 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
746 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
747 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
748 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
749 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
750 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
751 }
752 if (seg == VCPU_SREG_CS)
753 vcpu->svm->vmcb->save.cpl
754 = (vcpu->svm->vmcb->save.cs.attrib
755 >> SVM_SELECTOR_DPL_SHIFT) & 3;
756
757}
758
759/* FIXME:
760
761 vcpu->svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
762 vcpu->svm->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
763
764*/
765
766static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
767{
768 return -EOPNOTSUPP;
769}
770
771static void load_host_msrs(struct kvm_vcpu *vcpu)
772{
773 int i;
774
775 for ( i = 0; i < NR_HOST_SAVE_MSRS; i++)
776 wrmsrl(host_save_msrs[i], vcpu->svm->host_msrs[i]);
777}
778
779static void save_host_msrs(struct kvm_vcpu *vcpu)
780{
781 int i;
782
783 for ( i = 0; i < NR_HOST_SAVE_MSRS; i++)
784 rdmsrl(host_save_msrs[i], vcpu->svm->host_msrs[i]);
785}
786
787static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data)
788{
789 if (svm_data->next_asid > svm_data->max_asid) {
790 ++svm_data->asid_generation;
791 svm_data->next_asid = 1;
792 vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
793 }
794
795 vcpu->cpu = svm_data->cpu;
796 vcpu->svm->asid_generation = svm_data->asid_generation;
797 vcpu->svm->vmcb->control.asid = svm_data->next_asid++;
798}
799
800static void svm_invlpg(struct kvm_vcpu *vcpu, gva_t address)
801{
802 invlpga(address, vcpu->svm->vmcb->control.asid); // is needed?
803}
804
805static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
806{
807 return vcpu->svm->db_regs[dr];
808}
809
810static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
811 int *exception)
812{
813 *exception = 0;
814
815 if (vcpu->svm->vmcb->save.dr7 & DR7_GD_MASK) {
816 vcpu->svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
817 vcpu->svm->vmcb->save.dr6 |= DR6_BD_MASK;
818 *exception = DB_VECTOR;
819 return;
820 }
821
822 switch (dr) {
823 case 0 ... 3:
824 vcpu->svm->db_regs[dr] = value;
825 return;
826 case 4 ... 5:
827 if (vcpu->cr4 & CR4_DE_MASK) {
828 *exception = UD_VECTOR;
829 return;
830 }
831 case 7: {
832 if (value & ~((1ULL << 32) - 1)) {
833 *exception = GP_VECTOR;
834 return;
835 }
836 vcpu->svm->vmcb->save.dr7 = value;
837 return;
838 }
839 default:
840 printk(KERN_DEBUG "%s: unexpected dr %u\n",
841 __FUNCTION__, dr);
842 *exception = UD_VECTOR;
843 return;
844 }
845}
846
847static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
848{
849 u32 exit_int_info = vcpu->svm->vmcb->control.exit_int_info;
850 u64 fault_address;
851 u32 error_code;
852 enum emulation_result er;
853
854 if (is_external_interrupt(exit_int_info))
855 push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
856
857 spin_lock(&vcpu->kvm->lock);
858
859 fault_address = vcpu->svm->vmcb->control.exit_info_2;
860 error_code = vcpu->svm->vmcb->control.exit_info_1;
861 if (!vcpu->mmu.page_fault(vcpu, fault_address, error_code)) {
862 spin_unlock(&vcpu->kvm->lock);
863 return 1;
864 }
865 er = emulate_instruction(vcpu, kvm_run, fault_address, error_code);
866 spin_unlock(&vcpu->kvm->lock);
867
868 switch (er) {
869 case EMULATE_DONE:
870 return 1;
871 case EMULATE_DO_MMIO:
872 ++kvm_stat.mmio_exits;
873 kvm_run->exit_reason = KVM_EXIT_MMIO;
874 return 0;
875 case EMULATE_FAIL:
876 vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
877 break;
878 default:
879 BUG();
880 }
881
882 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
883 return 0;
884}
885
886static int io_get_override(struct kvm_vcpu *vcpu,
887 struct vmcb_seg **seg,
888 int *addr_override)
889{
890 u8 inst[MAX_INST_SIZE];
891 unsigned ins_length;
892 gva_t rip;
893 int i;
894
895 rip = vcpu->svm->vmcb->save.rip;
896 ins_length = vcpu->svm->next_rip - rip;
897 rip += vcpu->svm->vmcb->save.cs.base;
898
899 if (ins_length > MAX_INST_SIZE)
900 printk(KERN_DEBUG
901 "%s: inst length err, cs base 0x%llx rip 0x%llx "
902 "next rip 0x%llx ins_length %u\n",
903 __FUNCTION__,
904 vcpu->svm->vmcb->save.cs.base,
905 vcpu->svm->vmcb->save.rip,
906 vcpu->svm->vmcb->control.exit_info_2,
907 ins_length);
908
909 if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length)
910 /* #PF */
911 return 0;
912
913 *addr_override = 0;
914 *seg = 0;
915 for (i = 0; i < ins_length; i++)
916 switch (inst[i]) {
917 case 0xf0:
918 case 0xf2:
919 case 0xf3:
920 case 0x66:
921 continue;
922 case 0x67:
923 *addr_override = 1;
924 continue;
925 case 0x2e:
926 *seg = &vcpu->svm->vmcb->save.cs;
927 continue;
928 case 0x36:
929 *seg = &vcpu->svm->vmcb->save.ss;
930 continue;
931 case 0x3e:
932 *seg = &vcpu->svm->vmcb->save.ds;
933 continue;
934 case 0x26:
935 *seg = &vcpu->svm->vmcb->save.es;
936 continue;
937 case 0x64:
938 *seg = &vcpu->svm->vmcb->save.fs;
939 continue;
940 case 0x65:
941 *seg = &vcpu->svm->vmcb->save.gs;
942 continue;
943 default:
944 return 1;
945 }
946 printk(KERN_DEBUG "%s: unexpected\n", __FUNCTION__);
947 return 0;
948}
949
950static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, u64 *address)
951{
952 unsigned long addr_mask;
953 unsigned long *reg;
954 struct vmcb_seg *seg;
955 int addr_override;
956 struct vmcb_save_area *save_area = &vcpu->svm->vmcb->save;
957 u16 cs_attrib = save_area->cs.attrib;
958 unsigned addr_size = get_addr_size(vcpu);
959
960 if (!io_get_override(vcpu, &seg, &addr_override))
961 return 0;
962
963 if (addr_override)
964 addr_size = (addr_size == 2) ? 4: (addr_size >> 1);
965
966 if (ins) {
967 reg = &vcpu->regs[VCPU_REGS_RDI];
968 seg = &vcpu->svm->vmcb->save.es;
969 } else {
970 reg = &vcpu->regs[VCPU_REGS_RSI];
971 seg = (seg) ? seg : &vcpu->svm->vmcb->save.ds;
972 }
973
974 addr_mask = ~0ULL >> (64 - (addr_size * 8));
975
976 if ((cs_attrib & SVM_SELECTOR_L_MASK) &&
977 !(vcpu->svm->vmcb->save.rflags & X86_EFLAGS_VM)) {
978 *address = (*reg & addr_mask);
979 return addr_mask;
980 }
981
982 if (!(seg->attrib & SVM_SELECTOR_P_SHIFT)) {
983 svm_inject_gp(vcpu, 0);
984 return 0;
985 }
986
987 *address = (*reg & addr_mask) + seg->base;
988 return addr_mask;
989}
990
991static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
992{
993 u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug?
994 int _in = io_info & SVM_IOIO_TYPE_MASK;
995
996 ++kvm_stat.io_exits;
997
998 vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2;
999
1000 kvm_run->exit_reason = KVM_EXIT_IO;
1001 kvm_run->io.port = io_info >> 16;
1002 kvm_run->io.direction = (_in) ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1003 kvm_run->io.size = ((io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT);
1004 kvm_run->io.string = (io_info & SVM_IOIO_STR_MASK) != 0;
1005 kvm_run->io.rep = (io_info & SVM_IOIO_REP_MASK) != 0;
1006
1007 if (kvm_run->io.string) {
1008 unsigned addr_mask;
1009
1010 addr_mask = io_adress(vcpu, _in, &kvm_run->io.address);
1011 if (!addr_mask) {
1012 printk(KERN_DEBUG "%s: get io address failed\n", __FUNCTION__);
1013 return 1;
1014 }
1015
1016 if (kvm_run->io.rep) {
1017 kvm_run->io.count = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
1018 kvm_run->io.string_down = (vcpu->svm->vmcb->save.rflags
1019 & X86_EFLAGS_DF) != 0;
1020 }
1021 } else {
1022 kvm_run->io.value = vcpu->svm->vmcb->save.rax;
1023 }
1024 return 0;
1025}
1026
1027
1028static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1029{
1030 return 1;
1031}
1032
1033static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1034{
1035 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
1036 skip_emulated_instruction(vcpu);
1037 if (vcpu->irq_summary && (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF))
1038 return 1;
1039
1040 kvm_run->exit_reason = KVM_EXIT_HLT;
1041 return 0;
1042}
1043
1044static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1045{
1046 inject_ud(vcpu);
1047 return 1;
1048}
1049
1050static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1051{
1052 printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__);
1053 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1054 return 0;
1055}
1056
1057static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1058{
1059 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2;
1060 kvm_run->exit_reason = KVM_EXIT_CPUID;
1061 return 0;
1062}
1063
1064static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1065{
1066 if (emulate_instruction(vcpu, 0, 0, 0) != EMULATE_DONE)
1067 printk(KERN_ERR "%s: failed\n", __FUNCTION__);
1068 return 1;
1069}
1070
1071static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1072{
1073 switch (ecx) {
1074 case MSR_IA32_MC0_CTL:
1075 case MSR_IA32_MCG_STATUS:
1076 case MSR_IA32_MCG_CAP:
1077 case MSR_IA32_MC0_MISC:
1078 case MSR_IA32_MC0_MISC+4:
1079 case MSR_IA32_MC0_MISC+8:
1080 case MSR_IA32_MC0_MISC+12:
1081 case MSR_IA32_MC0_MISC+16:
1082 case MSR_IA32_UCODE_REV:
1083 /* MTRR registers */
1084 case 0xfe:
1085 case 0x200 ... 0x2ff:
1086 *data = 0;
1087 break;
1088 case MSR_IA32_TIME_STAMP_COUNTER: {
1089 u64 tsc;
1090
1091 rdtscll(tsc);
1092 *data = vcpu->svm->vmcb->control.tsc_offset + tsc;
1093 break;
1094 }
1095 case MSR_EFER:
1096 *data = vcpu->shadow_efer;
1097 break;
1098 case MSR_IA32_APICBASE:
1099 *data = vcpu->apic_base;
1100 break;
1101#ifdef CONFIG_X86_64
1102 case MSR_STAR:
1103 *data = vcpu->svm->vmcb->save.star;
1104 break;
1105 case MSR_LSTAR:
1106 *data = vcpu->svm->vmcb->save.lstar;
1107 break;
1108 case MSR_CSTAR:
1109 *data = vcpu->svm->vmcb->save.cstar;
1110 break;
1111 case MSR_KERNEL_GS_BASE:
1112 *data = vcpu->svm->vmcb->save.kernel_gs_base;
1113 break;
1114 case MSR_SYSCALL_MASK:
1115 *data = vcpu->svm->vmcb->save.sfmask;
1116 break;
1117#endif
1118 case MSR_IA32_SYSENTER_CS:
1119 *data = vcpu->svm->vmcb->save.sysenter_cs;
1120 break;
1121 case MSR_IA32_SYSENTER_EIP:
1122 *data = vcpu->svm->vmcb->save.sysenter_eip;
1123 break;
1124 case MSR_IA32_SYSENTER_ESP:
1125 *data = vcpu->svm->vmcb->save.sysenter_esp;
1126 break;
1127 default:
1128 printk(KERN_ERR "kvm: unhandled rdmsr: 0x%x\n", ecx);
1129 return 1;
1130 }
1131 return 0;
1132}
1133
1134static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1135{
1136 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1137 u64 data;
1138
1139 if (svm_get_msr(vcpu, ecx, &data))
1140 svm_inject_gp(vcpu, 0);
1141 else {
1142 vcpu->svm->vmcb->save.rax = data & 0xffffffff;
1143 vcpu->regs[VCPU_REGS_RDX] = data >> 32;
1144 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2;
1145 skip_emulated_instruction(vcpu);
1146 }
1147 return 1;
1148}
1149
1150static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1151{
1152 switch (ecx) {
1153#ifdef CONFIG_X86_64
1154 case MSR_EFER:
1155 set_efer(vcpu, data);
1156 break;
1157#endif
1158 case MSR_IA32_MC0_STATUS:
1159 printk(KERN_WARNING "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n"
1160 , __FUNCTION__, data);
1161 break;
1162 case MSR_IA32_TIME_STAMP_COUNTER: {
1163 u64 tsc;
1164
1165 rdtscll(tsc);
1166 vcpu->svm->vmcb->control.tsc_offset = data - tsc;
1167 break;
1168 }
1169 case MSR_IA32_UCODE_REV:
1170 case MSR_IA32_UCODE_WRITE:
1171 case 0x200 ... 0x2ff: /* MTRRs */
1172 break;
1173 case MSR_IA32_APICBASE:
1174 vcpu->apic_base = data;
1175 break;
1176#ifdef CONFIG_X86_64_
1177 case MSR_STAR:
1178 vcpu->svm->vmcb->save.star = data;
1179 break;
1180 case MSR_LSTAR:
1181 vcpu->svm->vmcb->save.lstar = data;
1182 break;
1183 case MSR_CSTAR:
1184 vcpu->svm->vmcb->save.cstar = data;
1185 break;
1186 case MSR_KERNEL_GS_BASE:
1187 vcpu->svm->vmcb->save.kernel_gs_base = data;
1188 break;
1189 case MSR_SYSCALL_MASK:
1190 vcpu->svm->vmcb->save.sfmask = data;
1191 break;
1192#endif
1193 case MSR_IA32_SYSENTER_CS:
1194 vcpu->svm->vmcb->save.sysenter_cs = data;
1195 break;
1196 case MSR_IA32_SYSENTER_EIP:
1197 vcpu->svm->vmcb->save.sysenter_eip = data;
1198 break;
1199 case MSR_IA32_SYSENTER_ESP:
1200 vcpu->svm->vmcb->save.sysenter_esp = data;
1201 break;
1202 default:
1203 printk(KERN_ERR "kvm: unhandled wrmsr: %x\n", ecx);
1204 return 1;
1205 }
1206 return 0;
1207}
1208
1209static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1210{
1211 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1212 u64 data = (vcpu->svm->vmcb->save.rax & -1u)
1213 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
1214 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2;
1215 if (svm_set_msr(vcpu, ecx, data))
1216 svm_inject_gp(vcpu, 0);
1217 else
1218 skip_emulated_instruction(vcpu);
1219 return 1;
1220}
1221
1222static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1223{
1224 if (vcpu->svm->vmcb->control.exit_info_1)
1225 return wrmsr_interception(vcpu, kvm_run);
1226 else
1227 return rdmsr_interception(vcpu, kvm_run);
1228}
1229
1230static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
1231 struct kvm_run *kvm_run) = {
1232 [SVM_EXIT_READ_CR0] = emulate_on_interception,
1233 [SVM_EXIT_READ_CR3] = emulate_on_interception,
1234 [SVM_EXIT_READ_CR4] = emulate_on_interception,
1235 /* for now: */
1236 [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
1237 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
1238 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
1239 [SVM_EXIT_READ_DR0] = emulate_on_interception,
1240 [SVM_EXIT_READ_DR1] = emulate_on_interception,
1241 [SVM_EXIT_READ_DR2] = emulate_on_interception,
1242 [SVM_EXIT_READ_DR3] = emulate_on_interception,
1243 [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
1244 [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
1245 [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
1246 [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
1247 [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
1248 [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
1249 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
1250 [SVM_EXIT_INTR] = nop_on_interception,
1251 [SVM_EXIT_NMI] = nop_on_interception,
1252 [SVM_EXIT_SMI] = nop_on_interception,
1253 [SVM_EXIT_INIT] = nop_on_interception,
1254 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1255 [SVM_EXIT_CPUID] = cpuid_interception,
1256 [SVM_EXIT_HLT] = halt_interception,
1257 [SVM_EXIT_INVLPG] = emulate_on_interception,
1258 [SVM_EXIT_INVLPGA] = invalid_op_interception,
1259 [SVM_EXIT_IOIO] = io_interception,
1260 [SVM_EXIT_MSR] = msr_interception,
1261 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
1262 [SVM_EXIT_VMRUN] = invalid_op_interception,
1263 [SVM_EXIT_VMMCALL] = invalid_op_interception,
1264 [SVM_EXIT_VMLOAD] = invalid_op_interception,
1265 [SVM_EXIT_VMSAVE] = invalid_op_interception,
1266 [SVM_EXIT_STGI] = invalid_op_interception,
1267 [SVM_EXIT_CLGI] = invalid_op_interception,
1268 [SVM_EXIT_SKINIT] = invalid_op_interception,
1269};
1270
1271
1272static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1273{
1274 u32 exit_code = vcpu->svm->vmcb->control.exit_code;
1275
1276 kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT;
1277
1278 if (is_external_interrupt(vcpu->svm->vmcb->control.exit_int_info) &&
1279 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR)
1280 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
1281 "exit_code 0x%x\n",
1282 __FUNCTION__, vcpu->svm->vmcb->control.exit_int_info,
1283 exit_code);
1284
1285 if (exit_code >= sizeof(svm_exit_handlers) / sizeof(*svm_exit_handlers)
1286 || svm_exit_handlers[exit_code] == 0) {
1287 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1288 printk(KERN_ERR "%s: 0x%x @ 0x%llx cr0 0x%lx rflags 0x%llx\n",
1289 __FUNCTION__,
1290 exit_code,
1291 vcpu->svm->vmcb->save.rip,
1292 vcpu->cr0,
1293 vcpu->svm->vmcb->save.rflags);
1294 return 0;
1295 }
1296
1297 return svm_exit_handlers[exit_code](vcpu, kvm_run);
1298}
1299
1300static void reload_tss(struct kvm_vcpu *vcpu)
1301{
1302 int cpu = raw_smp_processor_id();
1303
1304 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1305 svm_data->tss_desc->type = 9; //available 32/64-bit TSS
1306 load_TR_desc();
1307}
1308
1309static void pre_svm_run(struct kvm_vcpu *vcpu)
1310{
1311 int cpu = raw_smp_processor_id();
1312
1313 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1314
1315 vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
1316 if (vcpu->cpu != cpu ||
1317 vcpu->svm->asid_generation != svm_data->asid_generation)
1318 new_asid(vcpu, svm_data);
1319}
1320
1321
1322static inline void kvm_try_inject_irq(struct kvm_vcpu *vcpu)
1323{
1324 struct vmcb_control_area *control;
1325
1326 if (!vcpu->irq_summary)
1327 return;
1328
1329 control = &vcpu->svm->vmcb->control;
1330
1331 control->int_vector = pop_irq(vcpu);
1332 control->int_ctl &= ~V_INTR_PRIO_MASK;
1333 control->int_ctl |= V_IRQ_MASK |
1334 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1335}
1336
1337static void kvm_reput_irq(struct kvm_vcpu *vcpu)
1338{
1339 struct vmcb_control_area *control = &vcpu->svm->vmcb->control;
1340
1341 if (control->int_ctl & V_IRQ_MASK) {
1342 control->int_ctl &= ~V_IRQ_MASK;
1343 push_irq(vcpu, control->int_vector);
1344 }
1345}
1346
1347static void save_db_regs(unsigned long *db_regs)
1348{
1349 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
1350 asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
1351 asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
1352 asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
1353}
1354
1355static void load_db_regs(unsigned long *db_regs)
1356{
1357 asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
1358 asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
1359 asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
1360 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
1361}
1362
1363static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1364{
1365 u16 fs_selector;
1366 u16 gs_selector;
1367 u16 ldt_selector;
1368
1369again:
1370 kvm_try_inject_irq(vcpu);
1371
1372 clgi();
1373
1374 pre_svm_run(vcpu);
1375
1376 save_host_msrs(vcpu);
1377 fs_selector = read_fs();
1378 gs_selector = read_gs();
1379 ldt_selector = read_ldt();
1380 vcpu->svm->host_cr2 = kvm_read_cr2();
1381 vcpu->svm->host_dr6 = read_dr6();
1382 vcpu->svm->host_dr7 = read_dr7();
1383 vcpu->svm->vmcb->save.cr2 = vcpu->cr2;
1384
1385 if (vcpu->svm->vmcb->save.dr7 & 0xff) {
1386 write_dr7(0);
1387 save_db_regs(vcpu->svm->host_db_regs);
1388 load_db_regs(vcpu->svm->db_regs);
1389 }
1390 asm volatile (
1391#ifdef CONFIG_X86_64
1392 "push %%rbx; push %%rcx; push %%rdx;"
1393 "push %%rsi; push %%rdi; push %%rbp;"
1394 "push %%r8; push %%r9; push %%r10; push %%r11;"
1395 "push %%r12; push %%r13; push %%r14; push %%r15;"
1396#else
1397 "push %%ebx; push %%ecx; push %%edx;"
1398 "push %%esi; push %%edi; push %%ebp;"
1399#endif
1400
1401#ifdef CONFIG_X86_64
1402 "mov %c[rbx](%[vcpu]), %%rbx \n\t"
1403 "mov %c[rcx](%[vcpu]), %%rcx \n\t"
1404 "mov %c[rdx](%[vcpu]), %%rdx \n\t"
1405 "mov %c[rsi](%[vcpu]), %%rsi \n\t"
1406 "mov %c[rdi](%[vcpu]), %%rdi \n\t"
1407 "mov %c[rbp](%[vcpu]), %%rbp \n\t"
1408 "mov %c[r8](%[vcpu]), %%r8 \n\t"
1409 "mov %c[r9](%[vcpu]), %%r9 \n\t"
1410 "mov %c[r10](%[vcpu]), %%r10 \n\t"
1411 "mov %c[r11](%[vcpu]), %%r11 \n\t"
1412 "mov %c[r12](%[vcpu]), %%r12 \n\t"
1413 "mov %c[r13](%[vcpu]), %%r13 \n\t"
1414 "mov %c[r14](%[vcpu]), %%r14 \n\t"
1415 "mov %c[r15](%[vcpu]), %%r15 \n\t"
1416#else
1417 "mov %c[rbx](%[vcpu]), %%ebx \n\t"
1418 "mov %c[rcx](%[vcpu]), %%ecx \n\t"
1419 "mov %c[rdx](%[vcpu]), %%edx \n\t"
1420 "mov %c[rsi](%[vcpu]), %%esi \n\t"
1421 "mov %c[rdi](%[vcpu]), %%edi \n\t"
1422 "mov %c[rbp](%[vcpu]), %%ebp \n\t"
1423#endif
1424
1425#ifdef CONFIG_X86_64
1426 /* Enter guest mode */
1427 "push %%rax \n\t"
1428 "mov %c[svm](%[vcpu]), %%rax \n\t"
1429 "mov %c[vmcb](%%rax), %%rax \n\t"
1430 SVM_VMLOAD "\n\t"
1431 SVM_VMRUN "\n\t"
1432 SVM_VMSAVE "\n\t"
1433 "pop %%rax \n\t"
1434#else
1435 /* Enter guest mode */
1436 "push %%eax \n\t"
1437 "mov %c[svm](%[vcpu]), %%eax \n\t"
1438 "mov %c[vmcb](%%eax), %%eax \n\t"
1439 SVM_VMLOAD "\n\t"
1440 SVM_VMRUN "\n\t"
1441 SVM_VMSAVE "\n\t"
1442 "pop %%eax \n\t"
1443#endif
1444
1445 /* Save guest registers, load host registers */
1446#ifdef CONFIG_X86_64
1447 "mov %%rbx, %c[rbx](%[vcpu]) \n\t"
1448 "mov %%rcx, %c[rcx](%[vcpu]) \n\t"
1449 "mov %%rdx, %c[rdx](%[vcpu]) \n\t"
1450 "mov %%rsi, %c[rsi](%[vcpu]) \n\t"
1451 "mov %%rdi, %c[rdi](%[vcpu]) \n\t"
1452 "mov %%rbp, %c[rbp](%[vcpu]) \n\t"
1453 "mov %%r8, %c[r8](%[vcpu]) \n\t"
1454 "mov %%r9, %c[r9](%[vcpu]) \n\t"
1455 "mov %%r10, %c[r10](%[vcpu]) \n\t"
1456 "mov %%r11, %c[r11](%[vcpu]) \n\t"
1457 "mov %%r12, %c[r12](%[vcpu]) \n\t"
1458 "mov %%r13, %c[r13](%[vcpu]) \n\t"
1459 "mov %%r14, %c[r14](%[vcpu]) \n\t"
1460 "mov %%r15, %c[r15](%[vcpu]) \n\t"
1461
1462 "pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
1463 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
1464 "pop %%rbp; pop %%rdi; pop %%rsi;"
1465 "pop %%rdx; pop %%rcx; pop %%rbx; \n\t"
1466#else
1467 "mov %%ebx, %c[rbx](%[vcpu]) \n\t"
1468 "mov %%ecx, %c[rcx](%[vcpu]) \n\t"
1469 "mov %%edx, %c[rdx](%[vcpu]) \n\t"
1470 "mov %%esi, %c[rsi](%[vcpu]) \n\t"
1471 "mov %%edi, %c[rdi](%[vcpu]) \n\t"
1472 "mov %%ebp, %c[rbp](%[vcpu]) \n\t"
1473
1474 "pop %%ebp; pop %%edi; pop %%esi;"
1475 "pop %%edx; pop %%ecx; pop %%ebx; \n\t"
1476#endif
1477 :
1478 : [vcpu]"a"(vcpu),
1479 [svm]"i"(offsetof(struct kvm_vcpu, svm)),
1480 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
1481 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
1482 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
1483 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
1484 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
1485 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
1486 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP]))
1487#ifdef CONFIG_X86_64
1488 ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
1489 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
1490 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
1491 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
1492 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
1493 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
1494 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
1495 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15]))
1496#endif
1497 : "cc", "memory" );
1498
1499 if ((vcpu->svm->vmcb->save.dr7 & 0xff))
1500 load_db_regs(vcpu->svm->host_db_regs);
1501
1502 vcpu->cr2 = vcpu->svm->vmcb->save.cr2;
1503
1504 write_dr6(vcpu->svm->host_dr6);
1505 write_dr7(vcpu->svm->host_dr7);
1506 kvm_write_cr2(vcpu->svm->host_cr2);
1507
1508 load_fs(fs_selector);
1509 load_gs(gs_selector);
1510 load_ldt(ldt_selector);
1511 load_host_msrs(vcpu);
1512
1513 reload_tss(vcpu);
1514
1515 stgi();
1516
1517 kvm_reput_irq(vcpu);
1518
1519 vcpu->svm->next_rip = 0;
1520
1521 if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1522 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY;
1523 kvm_run->exit_reason = vcpu->svm->vmcb->control.exit_code;
1524 return 0;
1525 }
1526
1527 if (handle_exit(vcpu, kvm_run)) {
1528 if (signal_pending(current)) {
1529 ++kvm_stat.signal_exits;
1530 return -EINTR;
1531 }
1532 kvm_resched(vcpu);
1533 goto again;
1534 }
1535 return 0;
1536}
1537
1538static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1539{
1540 force_new_asid(vcpu);
1541}
1542
1543static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
1544{
1545 vcpu->svm->vmcb->save.cr3 = root;
1546 force_new_asid(vcpu);
1547}
1548
1549static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
1550 unsigned long addr,
1551 uint32_t err_code)
1552{
1553 uint32_t exit_int_info = vcpu->svm->vmcb->control.exit_int_info;
1554
1555 ++kvm_stat.pf_guest;
1556
1557 if (is_page_fault(exit_int_info)) {
1558
1559 vcpu->svm->vmcb->control.event_inj_err = 0;
1560 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
1561 SVM_EVTINJ_VALID_ERR |
1562 SVM_EVTINJ_TYPE_EXEPT |
1563 DF_VECTOR;
1564 return;
1565 }
1566 vcpu->cr2 = addr;
1567 vcpu->svm->vmcb->save.cr2 = addr;
1568 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
1569 SVM_EVTINJ_VALID_ERR |
1570 SVM_EVTINJ_TYPE_EXEPT |
1571 PF_VECTOR;
1572 vcpu->svm->vmcb->control.event_inj_err = err_code;
1573}
1574
1575
1576static int is_disabled(void)
1577{
1578 return 0;
1579}
1580
1581static struct kvm_arch_ops svm_arch_ops = {
1582 .cpu_has_kvm_support = has_svm,
1583 .disabled_by_bios = is_disabled,
1584 .hardware_setup = svm_hardware_setup,
1585 .hardware_unsetup = svm_hardware_unsetup,
1586 .hardware_enable = svm_hardware_enable,
1587 .hardware_disable = svm_hardware_disable,
1588
1589 .vcpu_create = svm_create_vcpu,
1590 .vcpu_free = svm_free_vcpu,
1591
1592 .vcpu_load = svm_vcpu_load,
1593 .vcpu_put = svm_vcpu_put,
1594
1595 .set_guest_debug = svm_guest_debug,
1596 .get_msr = svm_get_msr,
1597 .set_msr = svm_set_msr,
1598 .get_segment_base = svm_get_segment_base,
1599 .get_segment = svm_get_segment,
1600 .set_segment = svm_set_segment,
1601 .is_long_mode = svm_is_long_mode,
1602 .get_cs_db_l_bits = svm_get_cs_db_l_bits,
1603 .set_cr0 = svm_set_cr0,
1604 .set_cr0_no_modeswitch = svm_set_cr0,
1605 .set_cr3 = svm_set_cr3,
1606 .set_cr4 = svm_set_cr4,
1607 .set_efer = svm_set_efer,
1608 .get_idt = svm_get_idt,
1609 .set_idt = svm_set_idt,
1610 .get_gdt = svm_get_gdt,
1611 .set_gdt = svm_set_gdt,
1612 .get_dr = svm_get_dr,
1613 .set_dr = svm_set_dr,
1614 .cache_regs = svm_cache_regs,
1615 .decache_regs = svm_decache_regs,
1616 .get_rflags = svm_get_rflags,
1617 .set_rflags = svm_set_rflags,
1618
1619 .invlpg = svm_invlpg,
1620 .tlb_flush = svm_flush_tlb,
1621 .inject_page_fault = svm_inject_page_fault,
1622
1623 .inject_gp = svm_inject_gp,
1624
1625 .run = svm_vcpu_run,
1626 .skip_emulated_instruction = skip_emulated_instruction,
1627 .vcpu_setup = svm_vcpu_setup,
1628};
1629
1630static int __init svm_init(void)
1631{
1632 return kvm_init_arch(&svm_arch_ops, THIS_MODULE);
1633}
1634
1635static void __exit svm_exit(void)
1636{
1637 kvm_exit_arch();
1638}
1639
1640module_init(svm_init)
1641module_exit(svm_exit)
diff --git a/drivers/kvm/svm.h b/drivers/kvm/svm.h
new file mode 100644
index 000000000000..df731c3fb588
--- /dev/null
+++ b/drivers/kvm/svm.h
@@ -0,0 +1,315 @@
1#ifndef __SVM_H
2#define __SVM_H
3
4enum {
5 INTERCEPT_INTR,
6 INTERCEPT_NMI,
7 INTERCEPT_SMI,
8 INTERCEPT_INIT,
9 INTERCEPT_VINTR,
10 INTERCEPT_SELECTIVE_CR0,
11 INTERCEPT_STORE_IDTR,
12 INTERCEPT_STORE_GDTR,
13 INTERCEPT_STORE_LDTR,
14 INTERCEPT_STORE_TR,
15 INTERCEPT_LOAD_IDTR,
16 INTERCEPT_LOAD_GDTR,
17 INTERCEPT_LOAD_LDTR,
18 INTERCEPT_LOAD_TR,
19 INTERCEPT_RDTSC,
20 INTERCEPT_RDPMC,
21 INTERCEPT_PUSHF,
22 INTERCEPT_POPF,
23 INTERCEPT_CPUID,
24 INTERCEPT_RSM,
25 INTERCEPT_IRET,
26 INTERCEPT_INTn,
27 INTERCEPT_INVD,
28 INTERCEPT_PAUSE,
29 INTERCEPT_HLT,
30 INTERCEPT_INVLPG,
31 INTERCEPT_INVLPGA,
32 INTERCEPT_IOIO_PROT,
33 INTERCEPT_MSR_PROT,
34 INTERCEPT_TASK_SWITCH,
35 INTERCEPT_FERR_FREEZE,
36 INTERCEPT_SHUTDOWN,
37 INTERCEPT_VMRUN,
38 INTERCEPT_VMMCALL,
39 INTERCEPT_VMLOAD,
40 INTERCEPT_VMSAVE,
41 INTERCEPT_STGI,
42 INTERCEPT_CLGI,
43 INTERCEPT_SKINIT,
44 INTERCEPT_RDTSCP,
45 INTERCEPT_ICEBP,
46 INTERCEPT_WBINVD,
47};
48
49
50struct __attribute__ ((__packed__)) vmcb_control_area {
51 u16 intercept_cr_read;
52 u16 intercept_cr_write;
53 u16 intercept_dr_read;
54 u16 intercept_dr_write;
55 u32 intercept_exceptions;
56 u64 intercept;
57 u8 reserved_1[44];
58 u64 iopm_base_pa;
59 u64 msrpm_base_pa;
60 u64 tsc_offset;
61 u32 asid;
62 u8 tlb_ctl;
63 u8 reserved_2[3];
64 u32 int_ctl;
65 u32 int_vector;
66 u32 int_state;
67 u8 reserved_3[4];
68 u32 exit_code;
69 u32 exit_code_hi;
70 u64 exit_info_1;
71 u64 exit_info_2;
72 u32 exit_int_info;
73 u32 exit_int_info_err;
74 u64 nested_ctl;
75 u8 reserved_4[16];
76 u32 event_inj;
77 u32 event_inj_err;
78 u64 nested_cr3;
79 u64 lbr_ctl;
80 u8 reserved_5[832];
81};
82
83
84#define TLB_CONTROL_DO_NOTHING 0
85#define TLB_CONTROL_FLUSH_ALL_ASID 1
86
87#define V_TPR_MASK 0x0f
88
89#define V_IRQ_SHIFT 8
90#define V_IRQ_MASK (1 << V_IRQ_SHIFT)
91
92#define V_INTR_PRIO_SHIFT 16
93#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
94
95#define V_IGN_TPR_SHIFT 20
96#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
97
98#define V_INTR_MASKING_SHIFT 24
99#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
100
101#define SVM_INTERRUPT_SHADOW_MASK 1
102
103#define SVM_IOIO_STR_SHIFT 2
104#define SVM_IOIO_REP_SHIFT 3
105#define SVM_IOIO_SIZE_SHIFT 4
106#define SVM_IOIO_ASIZE_SHIFT 7
107
108#define SVM_IOIO_TYPE_MASK 1
109#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT)
110#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT)
111#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
112#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
113
114struct __attribute__ ((__packed__)) vmcb_seg {
115 u16 selector;
116 u16 attrib;
117 u32 limit;
118 u64 base;
119};
120
121struct __attribute__ ((__packed__)) vmcb_save_area {
122 struct vmcb_seg es;
123 struct vmcb_seg cs;
124 struct vmcb_seg ss;
125 struct vmcb_seg ds;
126 struct vmcb_seg fs;
127 struct vmcb_seg gs;
128 struct vmcb_seg gdtr;
129 struct vmcb_seg ldtr;
130 struct vmcb_seg idtr;
131 struct vmcb_seg tr;
132 u8 reserved_1[43];
133 u8 cpl;
134 u8 reserved_2[4];
135 u64 efer;
136 u8 reserved_3[112];
137 u64 cr4;
138 u64 cr3;
139 u64 cr0;
140 u64 dr7;
141 u64 dr6;
142 u64 rflags;
143 u64 rip;
144 u8 reserved_4[88];
145 u64 rsp;
146 u8 reserved_5[24];
147 u64 rax;
148 u64 star;
149 u64 lstar;
150 u64 cstar;
151 u64 sfmask;
152 u64 kernel_gs_base;
153 u64 sysenter_cs;
154 u64 sysenter_esp;
155 u64 sysenter_eip;
156 u64 cr2;
157 u8 reserved_6[32];
158 u64 g_pat;
159 u64 dbgctl;
160 u64 br_from;
161 u64 br_to;
162 u64 last_excp_from;
163 u64 last_excp_to;
164};
165
166struct __attribute__ ((__packed__)) vmcb {
167 struct vmcb_control_area control;
168 struct vmcb_save_area save;
169};
170
171#define SVM_CPUID_FEATURE_SHIFT 2
172#define SVM_CPUID_FUNC 0x8000000a
173
174#define MSR_EFER_SVME_MASK (1ULL << 12)
175#define MSR_VM_HSAVE_PA 0xc0010117ULL
176
177#define SVM_SELECTOR_S_SHIFT 4
178#define SVM_SELECTOR_DPL_SHIFT 5
179#define SVM_SELECTOR_P_SHIFT 7
180#define SVM_SELECTOR_AVL_SHIFT 8
181#define SVM_SELECTOR_L_SHIFT 9
182#define SVM_SELECTOR_DB_SHIFT 10
183#define SVM_SELECTOR_G_SHIFT 11
184
185#define SVM_SELECTOR_TYPE_MASK (0xf)
186#define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT)
187#define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT)
188#define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT)
189#define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT)
190#define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT)
191#define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT)
192#define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT)
193
194#define SVM_SELECTOR_WRITE_MASK (1 << 1)
195#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
196#define SVM_SELECTOR_CODE_MASK (1 << 3)
197
198#define INTERCEPT_CR0_MASK 1
199#define INTERCEPT_CR3_MASK (1 << 3)
200#define INTERCEPT_CR4_MASK (1 << 4)
201
202#define INTERCEPT_DR0_MASK 1
203#define INTERCEPT_DR1_MASK (1 << 1)
204#define INTERCEPT_DR2_MASK (1 << 2)
205#define INTERCEPT_DR3_MASK (1 << 3)
206#define INTERCEPT_DR4_MASK (1 << 4)
207#define INTERCEPT_DR5_MASK (1 << 5)
208#define INTERCEPT_DR6_MASK (1 << 6)
209#define INTERCEPT_DR7_MASK (1 << 7)
210
211#define SVM_EVTINJ_VEC_MASK 0xff
212
213#define SVM_EVTINJ_TYPE_SHIFT 8
214#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT)
215
216#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT)
217#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT)
218#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT)
219#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT)
220
221#define SVM_EVTINJ_VALID (1 << 31)
222#define SVM_EVTINJ_VALID_ERR (1 << 11)
223
224#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
225
226#define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
227#define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
228#define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT
229#define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT
230
231#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
232#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
233
234#define SVM_EXIT_READ_CR0 0x000
235#define SVM_EXIT_READ_CR3 0x003
236#define SVM_EXIT_READ_CR4 0x004
237#define SVM_EXIT_READ_CR8 0x008
238#define SVM_EXIT_WRITE_CR0 0x010
239#define SVM_EXIT_WRITE_CR3 0x013
240#define SVM_EXIT_WRITE_CR4 0x014
241#define SVM_EXIT_WRITE_CR8 0x018
242#define SVM_EXIT_READ_DR0 0x020
243#define SVM_EXIT_READ_DR1 0x021
244#define SVM_EXIT_READ_DR2 0x022
245#define SVM_EXIT_READ_DR3 0x023
246#define SVM_EXIT_READ_DR4 0x024
247#define SVM_EXIT_READ_DR5 0x025
248#define SVM_EXIT_READ_DR6 0x026
249#define SVM_EXIT_READ_DR7 0x027
250#define SVM_EXIT_WRITE_DR0 0x030
251#define SVM_EXIT_WRITE_DR1 0x031
252#define SVM_EXIT_WRITE_DR2 0x032
253#define SVM_EXIT_WRITE_DR3 0x033
254#define SVM_EXIT_WRITE_DR4 0x034
255#define SVM_EXIT_WRITE_DR5 0x035
256#define SVM_EXIT_WRITE_DR6 0x036
257#define SVM_EXIT_WRITE_DR7 0x037
258#define SVM_EXIT_EXCP_BASE 0x040
259#define SVM_EXIT_INTR 0x060
260#define SVM_EXIT_NMI 0x061
261#define SVM_EXIT_SMI 0x062
262#define SVM_EXIT_INIT 0x063
263#define SVM_EXIT_VINTR 0x064
264#define SVM_EXIT_CR0_SEL_WRITE 0x065
265#define SVM_EXIT_IDTR_READ 0x066
266#define SVM_EXIT_GDTR_READ 0x067
267#define SVM_EXIT_LDTR_READ 0x068
268#define SVM_EXIT_TR_READ 0x069
269#define SVM_EXIT_IDTR_WRITE 0x06a
270#define SVM_EXIT_GDTR_WRITE 0x06b
271#define SVM_EXIT_LDTR_WRITE 0x06c
272#define SVM_EXIT_TR_WRITE 0x06d
273#define SVM_EXIT_RDTSC 0x06e
274#define SVM_EXIT_RDPMC 0x06f
275#define SVM_EXIT_PUSHF 0x070
276#define SVM_EXIT_POPF 0x071
277#define SVM_EXIT_CPUID 0x072
278#define SVM_EXIT_RSM 0x073
279#define SVM_EXIT_IRET 0x074
280#define SVM_EXIT_SWINT 0x075
281#define SVM_EXIT_INVD 0x076
282#define SVM_EXIT_PAUSE 0x077
283#define SVM_EXIT_HLT 0x078
284#define SVM_EXIT_INVLPG 0x079
285#define SVM_EXIT_INVLPGA 0x07a
286#define SVM_EXIT_IOIO 0x07b
287#define SVM_EXIT_MSR 0x07c
288#define SVM_EXIT_TASK_SWITCH 0x07d
289#define SVM_EXIT_FERR_FREEZE 0x07e
290#define SVM_EXIT_SHUTDOWN 0x07f
291#define SVM_EXIT_VMRUN 0x080
292#define SVM_EXIT_VMMCALL 0x081
293#define SVM_EXIT_VMLOAD 0x082
294#define SVM_EXIT_VMSAVE 0x083
295#define SVM_EXIT_STGI 0x084
296#define SVM_EXIT_CLGI 0x085
297#define SVM_EXIT_SKINIT 0x086
298#define SVM_EXIT_RDTSCP 0x087
299#define SVM_EXIT_ICEBP 0x088
300#define SVM_EXIT_WBINVD 0x089
301#define SVM_EXIT_NPF 0x400
302
303#define SVM_EXIT_ERR -1
304
305#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) // TS and MP
306
307#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
308#define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8"
309#define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb"
310#define SVM_CLGI ".byte 0x0f, 0x01, 0xdd"
311#define SVM_STGI ".byte 0x0f, 0x01, 0xdc"
312#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf"
313
314#endif
315
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
new file mode 100644
index 000000000000..f0f0b1a781f8
--- /dev/null
+++ b/drivers/kvm/vmx.c
@@ -0,0 +1,2014 @@
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
19#include "vmx.h"
20#include "kvm_vmx.h"
21#include <linux/module.h>
22#include <linux/mm.h>
23#include <linux/highmem.h>
24#include <asm/io.h>
25#include <asm/desc.h>
26
27#include "segment_descriptor.h"
28
29#define MSR_IA32_FEATURE_CONTROL 0x03a
30
31MODULE_AUTHOR("Qumranet");
32MODULE_LICENSE("GPL");
33
34static DEFINE_PER_CPU(struct vmcs *, vmxarea);
35static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
36
37#ifdef CONFIG_X86_64
38#define HOST_IS_64 1
39#else
40#define HOST_IS_64 0
41#endif
42
43static struct vmcs_descriptor {
44 int size;
45 int order;
46 u32 revision_id;
47} vmcs_descriptor;
48
49#define VMX_SEGMENT_FIELD(seg) \
50 [VCPU_SREG_##seg] = { \
51 .selector = GUEST_##seg##_SELECTOR, \
52 .base = GUEST_##seg##_BASE, \
53 .limit = GUEST_##seg##_LIMIT, \
54 .ar_bytes = GUEST_##seg##_AR_BYTES, \
55 }
56
57static struct kvm_vmx_segment_field {
58 unsigned selector;
59 unsigned base;
60 unsigned limit;
61 unsigned ar_bytes;
62} kvm_vmx_segment_fields[] = {
63 VMX_SEGMENT_FIELD(CS),
64 VMX_SEGMENT_FIELD(DS),
65 VMX_SEGMENT_FIELD(ES),
66 VMX_SEGMENT_FIELD(FS),
67 VMX_SEGMENT_FIELD(GS),
68 VMX_SEGMENT_FIELD(SS),
69 VMX_SEGMENT_FIELD(TR),
70 VMX_SEGMENT_FIELD(LDTR),
71};
72
73static const u32 vmx_msr_index[] = {
74#ifdef CONFIG_X86_64
75 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
76#endif
77 MSR_EFER, MSR_K6_STAR,
78};
79#define NR_VMX_MSR (sizeof(vmx_msr_index) / sizeof(*vmx_msr_index))
80
81static inline int is_page_fault(u32 intr_info)
82{
83 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
84 INTR_INFO_VALID_MASK)) ==
85 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
86}
87
88static inline int is_external_interrupt(u32 intr_info)
89{
90 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
91 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
92}
93
94static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
95{
96 int i;
97
98 for (i = 0; i < vcpu->nmsrs; ++i)
99 if (vcpu->guest_msrs[i].index == msr)
100 return &vcpu->guest_msrs[i];
101 return 0;
102}
103
104static void vmcs_clear(struct vmcs *vmcs)
105{
106 u64 phys_addr = __pa(vmcs);
107 u8 error;
108
109 asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
110 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
111 : "cc", "memory");
112 if (error)
113 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
114 vmcs, phys_addr);
115}
116
117static void __vcpu_clear(void *arg)
118{
119 struct kvm_vcpu *vcpu = arg;
120 int cpu = smp_processor_id();
121
122 if (vcpu->cpu == cpu)
123 vmcs_clear(vcpu->vmcs);
124 if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
125 per_cpu(current_vmcs, cpu) = NULL;
126}
127
128static unsigned long vmcs_readl(unsigned long field)
129{
130 unsigned long value;
131
132 asm volatile (ASM_VMX_VMREAD_RDX_RAX
133 : "=a"(value) : "d"(field) : "cc");
134 return value;
135}
136
137static u16 vmcs_read16(unsigned long field)
138{
139 return vmcs_readl(field);
140}
141
142static u32 vmcs_read32(unsigned long field)
143{
144 return vmcs_readl(field);
145}
146
147static u64 vmcs_read64(unsigned long field)
148{
149#ifdef CONFIG_X86_64
150 return vmcs_readl(field);
151#else
152 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
153#endif
154}
155
156static void vmcs_writel(unsigned long field, unsigned long value)
157{
158 u8 error;
159
160 asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
161 : "=q"(error) : "a"(value), "d"(field) : "cc" );
162 if (error)
163 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
164 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
165}
166
167static void vmcs_write16(unsigned long field, u16 value)
168{
169 vmcs_writel(field, value);
170}
171
172static void vmcs_write32(unsigned long field, u32 value)
173{
174 vmcs_writel(field, value);
175}
176
177static void vmcs_write64(unsigned long field, u64 value)
178{
179#ifdef CONFIG_X86_64
180 vmcs_writel(field, value);
181#else
182 vmcs_writel(field, value);
183 asm volatile ("");
184 vmcs_writel(field+1, value >> 32);
185#endif
186}
187
188/*
189 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
190 * vcpu mutex is already taken.
191 */
192static struct kvm_vcpu *vmx_vcpu_load(struct kvm_vcpu *vcpu)
193{
194 u64 phys_addr = __pa(vcpu->vmcs);
195 int cpu;
196
197 cpu = get_cpu();
198
199 if (vcpu->cpu != cpu) {
200 smp_call_function(__vcpu_clear, vcpu, 0, 1);
201 vcpu->launched = 0;
202 }
203
204 if (per_cpu(current_vmcs, cpu) != vcpu->vmcs) {
205 u8 error;
206
207 per_cpu(current_vmcs, cpu) = vcpu->vmcs;
208 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
209 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
210 : "cc");
211 if (error)
212 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
213 vcpu->vmcs, phys_addr);
214 }
215
216 if (vcpu->cpu != cpu) {
217 struct descriptor_table dt;
218 unsigned long sysenter_esp;
219
220 vcpu->cpu = cpu;
221 /*
222 * Linux uses per-cpu TSS and GDT, so set these when switching
223 * processors.
224 */
225 vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
226 get_gdt(&dt);
227 vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
228
229 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
230 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
231 }
232 return vcpu;
233}
234
235static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
236{
237 put_cpu();
238}
239
240static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
241{
242 return vmcs_readl(GUEST_RFLAGS);
243}
244
245static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
246{
247 vmcs_writel(GUEST_RFLAGS, rflags);
248}
249
250static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
251{
252 unsigned long rip;
253 u32 interruptibility;
254
255 rip = vmcs_readl(GUEST_RIP);
256 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
257 vmcs_writel(GUEST_RIP, rip);
258
259 /*
260 * We emulated an instruction, so temporary interrupt blocking
261 * should be removed, if set.
262 */
263 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
264 if (interruptibility & 3)
265 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
266 interruptibility & ~3);
267}
268
269static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
270{
271 printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
272 vmcs_readl(GUEST_RIP));
273 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
274 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
275 GP_VECTOR |
276 INTR_TYPE_EXCEPTION |
277 INTR_INFO_DELIEVER_CODE_MASK |
278 INTR_INFO_VALID_MASK);
279}
280
281/*
282 * reads and returns guest's timestamp counter "register"
283 * guest_tsc = host_tsc + tsc_offset -- 21.3
284 */
285static u64 guest_read_tsc(void)
286{
287 u64 host_tsc, tsc_offset;
288
289 rdtscll(host_tsc);
290 tsc_offset = vmcs_read64(TSC_OFFSET);
291 return host_tsc + tsc_offset;
292}
293
294/*
295 * writes 'guest_tsc' into guest's timestamp counter "register"
296 * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
297 */
298static void guest_write_tsc(u64 guest_tsc)
299{
300 u64 host_tsc;
301
302 rdtscll(host_tsc);
303 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
304}
305
306static void reload_tss(void)
307{
308#ifndef CONFIG_X86_64
309
310 /*
311 * VT restores TR but not its size. Useless.
312 */
313 struct descriptor_table gdt;
314 struct segment_descriptor *descs;
315
316 get_gdt(&gdt);
317 descs = (void *)gdt.base;
318 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
319 load_TR_desc();
320#endif
321}
322
323/*
324 * Reads an msr value (of 'msr_index') into 'pdata'.
325 * Returns 0 on success, non-0 otherwise.
326 * Assumes vcpu_load() was already called.
327 */
328static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
329{
330 u64 data;
331 struct vmx_msr_entry *msr;
332
333 if (!pdata) {
334 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
335 return -EINVAL;
336 }
337
338 switch (msr_index) {
339#ifdef CONFIG_X86_64
340 case MSR_FS_BASE:
341 data = vmcs_readl(GUEST_FS_BASE);
342 break;
343 case MSR_GS_BASE:
344 data = vmcs_readl(GUEST_GS_BASE);
345 break;
346 case MSR_EFER:
347 data = vcpu->shadow_efer;
348 break;
349#endif
350 case MSR_IA32_TIME_STAMP_COUNTER:
351 data = guest_read_tsc();
352 break;
353 case MSR_IA32_SYSENTER_CS:
354 data = vmcs_read32(GUEST_SYSENTER_CS);
355 break;
356 case MSR_IA32_SYSENTER_EIP:
357 data = vmcs_read32(GUEST_SYSENTER_EIP);
358 break;
359 case MSR_IA32_SYSENTER_ESP:
360 data = vmcs_read32(GUEST_SYSENTER_ESP);
361 break;
362 case MSR_IA32_MC0_CTL:
363 case MSR_IA32_MCG_STATUS:
364 case MSR_IA32_MCG_CAP:
365 case MSR_IA32_MC0_MISC:
366 case MSR_IA32_MC0_MISC+4:
367 case MSR_IA32_MC0_MISC+8:
368 case MSR_IA32_MC0_MISC+12:
369 case MSR_IA32_MC0_MISC+16:
370 case MSR_IA32_UCODE_REV:
371 /* MTRR registers */
372 case 0xfe:
373 case 0x200 ... 0x2ff:
374 data = 0;
375 break;
376 case MSR_IA32_APICBASE:
377 data = vcpu->apic_base;
378 break;
379 default:
380 msr = find_msr_entry(vcpu, msr_index);
381 if (!msr) {
382 printk(KERN_ERR "kvm: unhandled rdmsr: %x\n", msr_index);
383 return 1;
384 }
385 data = msr->data;
386 break;
387 }
388
389 *pdata = data;
390 return 0;
391}
392
393/*
394 * Writes msr value into into the appropriate "register".
395 * Returns 0 on success, non-0 otherwise.
396 * Assumes vcpu_load() was already called.
397 */
398static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
399{
400 struct vmx_msr_entry *msr;
401 switch (msr_index) {
402#ifdef CONFIG_X86_64
403 case MSR_FS_BASE:
404 vmcs_writel(GUEST_FS_BASE, data);
405 break;
406 case MSR_GS_BASE:
407 vmcs_writel(GUEST_GS_BASE, data);
408 break;
409#endif
410 case MSR_IA32_SYSENTER_CS:
411 vmcs_write32(GUEST_SYSENTER_CS, data);
412 break;
413 case MSR_IA32_SYSENTER_EIP:
414 vmcs_write32(GUEST_SYSENTER_EIP, data);
415 break;
416 case MSR_IA32_SYSENTER_ESP:
417 vmcs_write32(GUEST_SYSENTER_ESP, data);
418 break;
419#ifdef __x86_64
420 case MSR_EFER:
421 set_efer(vcpu, data);
422 break;
423 case MSR_IA32_MC0_STATUS:
424 printk(KERN_WARNING "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n"
425 , __FUNCTION__, data);
426 break;
427#endif
428 case MSR_IA32_TIME_STAMP_COUNTER: {
429 guest_write_tsc(data);
430 break;
431 }
432 case MSR_IA32_UCODE_REV:
433 case MSR_IA32_UCODE_WRITE:
434 case 0x200 ... 0x2ff: /* MTRRs */
435 break;
436 case MSR_IA32_APICBASE:
437 vcpu->apic_base = data;
438 break;
439 default:
440 msr = find_msr_entry(vcpu, msr_index);
441 if (!msr) {
442 printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr_index);
443 return 1;
444 }
445 msr->data = data;
446 break;
447 }
448
449 return 0;
450}
451
452/*
453 * Sync the rsp and rip registers into the vcpu structure. This allows
454 * registers to be accessed by indexing vcpu->regs.
455 */
456static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
457{
458 vcpu->regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
459 vcpu->rip = vmcs_readl(GUEST_RIP);
460}
461
462/*
463 * Syncs rsp and rip back into the vmcs. Should be called after possible
464 * modification.
465 */
466static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
467{
468 vmcs_writel(GUEST_RSP, vcpu->regs[VCPU_REGS_RSP]);
469 vmcs_writel(GUEST_RIP, vcpu->rip);
470}
471
472static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
473{
474 unsigned long dr7 = 0x400;
475 u32 exception_bitmap;
476 int old_singlestep;
477
478 exception_bitmap = vmcs_read32(EXCEPTION_BITMAP);
479 old_singlestep = vcpu->guest_debug.singlestep;
480
481 vcpu->guest_debug.enabled = dbg->enabled;
482 if (vcpu->guest_debug.enabled) {
483 int i;
484
485 dr7 |= 0x200; /* exact */
486 for (i = 0; i < 4; ++i) {
487 if (!dbg->breakpoints[i].enabled)
488 continue;
489 vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
490 dr7 |= 2 << (i*2); /* global enable */
491 dr7 |= 0 << (i*4+16); /* execution breakpoint */
492 }
493
494 exception_bitmap |= (1u << 1); /* Trap debug exceptions */
495
496 vcpu->guest_debug.singlestep = dbg->singlestep;
497 } else {
498 exception_bitmap &= ~(1u << 1); /* Ignore debug exceptions */
499 vcpu->guest_debug.singlestep = 0;
500 }
501
502 if (old_singlestep && !vcpu->guest_debug.singlestep) {
503 unsigned long flags;
504
505 flags = vmcs_readl(GUEST_RFLAGS);
506 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
507 vmcs_writel(GUEST_RFLAGS, flags);
508 }
509
510 vmcs_write32(EXCEPTION_BITMAP, exception_bitmap);
511 vmcs_writel(GUEST_DR7, dr7);
512
513 return 0;
514}
515
516static __init int cpu_has_kvm_support(void)
517{
518 unsigned long ecx = cpuid_ecx(1);
519 return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
520}
521
522static __init int vmx_disabled_by_bios(void)
523{
524 u64 msr;
525
526 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
527 return (msr & 5) == 1; /* locked but not enabled */
528}
529
530static __init void hardware_enable(void *garbage)
531{
532 int cpu = raw_smp_processor_id();
533 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
534 u64 old;
535
536 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
537 if ((old & 5) != 5)
538 /* enable and lock */
539 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 5);
540 write_cr4(read_cr4() | CR4_VMXE); /* FIXME: not cpu hotplug safe */
541 asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
542 : "memory", "cc");
543}
544
545static void hardware_disable(void *garbage)
546{
547 asm volatile (ASM_VMX_VMXOFF : : : "cc");
548}
549
550static __init void setup_vmcs_descriptor(void)
551{
552 u32 vmx_msr_low, vmx_msr_high;
553
554 rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
555 vmcs_descriptor.size = vmx_msr_high & 0x1fff;
556 vmcs_descriptor.order = get_order(vmcs_descriptor.size);
557 vmcs_descriptor.revision_id = vmx_msr_low;
558};
559
560static struct vmcs *alloc_vmcs_cpu(int cpu)
561{
562 int node = cpu_to_node(cpu);
563 struct page *pages;
564 struct vmcs *vmcs;
565
566 pages = alloc_pages_node(node, GFP_KERNEL, vmcs_descriptor.order);
567 if (!pages)
568 return NULL;
569 vmcs = page_address(pages);
570 memset(vmcs, 0, vmcs_descriptor.size);
571 vmcs->revision_id = vmcs_descriptor.revision_id; /* vmcs revision id */
572 return vmcs;
573}
574
575static struct vmcs *alloc_vmcs(void)
576{
577 return alloc_vmcs_cpu(smp_processor_id());
578}
579
580static void free_vmcs(struct vmcs *vmcs)
581{
582 free_pages((unsigned long)vmcs, vmcs_descriptor.order);
583}
584
585static __exit void free_kvm_area(void)
586{
587 int cpu;
588
589 for_each_online_cpu(cpu)
590 free_vmcs(per_cpu(vmxarea, cpu));
591}
592
593extern struct vmcs *alloc_vmcs_cpu(int cpu);
594
595static __init int alloc_kvm_area(void)
596{
597 int cpu;
598
599 for_each_online_cpu(cpu) {
600 struct vmcs *vmcs;
601
602 vmcs = alloc_vmcs_cpu(cpu);
603 if (!vmcs) {
604 free_kvm_area();
605 return -ENOMEM;
606 }
607
608 per_cpu(vmxarea, cpu) = vmcs;
609 }
610 return 0;
611}
612
613static __init int hardware_setup(void)
614{
615 setup_vmcs_descriptor();
616 return alloc_kvm_area();
617}
618
619static __exit void hardware_unsetup(void)
620{
621 free_kvm_area();
622}
623
624static void update_exception_bitmap(struct kvm_vcpu *vcpu)
625{
626 if (vcpu->rmode.active)
627 vmcs_write32(EXCEPTION_BITMAP, ~0);
628 else
629 vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
630}
631
632static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
633{
634 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
635
636 if (vmcs_readl(sf->base) == save->base) {
637 vmcs_write16(sf->selector, save->selector);
638 vmcs_writel(sf->base, save->base);
639 vmcs_write32(sf->limit, save->limit);
640 vmcs_write32(sf->ar_bytes, save->ar);
641 } else {
642 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
643 << AR_DPL_SHIFT;
644 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
645 }
646}
647
648static void enter_pmode(struct kvm_vcpu *vcpu)
649{
650 unsigned long flags;
651
652 vcpu->rmode.active = 0;
653
654 vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base);
655 vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit);
656 vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
657
658 flags = vmcs_readl(GUEST_RFLAGS);
659 flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
660 flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
661 vmcs_writel(GUEST_RFLAGS, flags);
662
663 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~CR4_VME_MASK) |
664 (vmcs_readl(CR4_READ_SHADOW) & CR4_VME_MASK));
665
666 update_exception_bitmap(vcpu);
667
668 fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es);
669 fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds);
670 fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs);
671 fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs);
672
673 vmcs_write16(GUEST_SS_SELECTOR, 0);
674 vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
675
676 vmcs_write16(GUEST_CS_SELECTOR,
677 vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
678 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
679}
680
681static int rmode_tss_base(struct kvm* kvm)
682{
683 gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
684 return base_gfn << PAGE_SHIFT;
685}
686
687static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
688{
689 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
690
691 save->selector = vmcs_read16(sf->selector);
692 save->base = vmcs_readl(sf->base);
693 save->limit = vmcs_read32(sf->limit);
694 save->ar = vmcs_read32(sf->ar_bytes);
695 vmcs_write16(sf->selector, vmcs_readl(sf->base) >> 4);
696 vmcs_write32(sf->limit, 0xffff);
697 vmcs_write32(sf->ar_bytes, 0xf3);
698}
699
700static void enter_rmode(struct kvm_vcpu *vcpu)
701{
702 unsigned long flags;
703
704 vcpu->rmode.active = 1;
705
706 vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
707 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
708
709 vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
710 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
711
712 vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
713 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
714
715 flags = vmcs_readl(GUEST_RFLAGS);
716 vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
717
718 flags |= IOPL_MASK | X86_EFLAGS_VM;
719
720 vmcs_writel(GUEST_RFLAGS, flags);
721 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | CR4_VME_MASK);
722 update_exception_bitmap(vcpu);
723
724 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
725 vmcs_write32(GUEST_SS_LIMIT, 0xffff);
726 vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
727
728 vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
729 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
730
731 fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es);
732 fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
733 fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
734 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
735}
736
737#ifdef CONFIG_X86_64
738
739static void enter_lmode(struct kvm_vcpu *vcpu)
740{
741 u32 guest_tr_ar;
742
743 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
744 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
745 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
746 __FUNCTION__);
747 vmcs_write32(GUEST_TR_AR_BYTES,
748 (guest_tr_ar & ~AR_TYPE_MASK)
749 | AR_TYPE_BUSY_64_TSS);
750 }
751
752 vcpu->shadow_efer |= EFER_LMA;
753
754 find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME;
755 vmcs_write32(VM_ENTRY_CONTROLS,
756 vmcs_read32(VM_ENTRY_CONTROLS)
757 | VM_ENTRY_CONTROLS_IA32E_MASK);
758}
759
760static void exit_lmode(struct kvm_vcpu *vcpu)
761{
762 vcpu->shadow_efer &= ~EFER_LMA;
763
764 vmcs_write32(VM_ENTRY_CONTROLS,
765 vmcs_read32(VM_ENTRY_CONTROLS)
766 & ~VM_ENTRY_CONTROLS_IA32E_MASK);
767}
768
769#endif
770
771static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
772{
773 if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
774 enter_pmode(vcpu);
775
776 if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK))
777 enter_rmode(vcpu);
778
779#ifdef CONFIG_X86_64
780 if (vcpu->shadow_efer & EFER_LME) {
781 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK))
782 enter_lmode(vcpu);
783 if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK))
784 exit_lmode(vcpu);
785 }
786#endif
787
788 vmcs_writel(CR0_READ_SHADOW, cr0);
789 vmcs_writel(GUEST_CR0,
790 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
791 vcpu->cr0 = cr0;
792}
793
794/*
795 * Used when restoring the VM to avoid corrupting segment registers
796 */
797static void vmx_set_cr0_no_modeswitch(struct kvm_vcpu *vcpu, unsigned long cr0)
798{
799 vcpu->rmode.active = ((cr0 & CR0_PE_MASK) == 0);
800 update_exception_bitmap(vcpu);
801 vmcs_writel(CR0_READ_SHADOW, cr0);
802 vmcs_writel(GUEST_CR0,
803 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
804 vcpu->cr0 = cr0;
805}
806
807static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
808{
809 vmcs_writel(GUEST_CR3, cr3);
810}
811
812static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
813{
814 vmcs_writel(CR4_READ_SHADOW, cr4);
815 vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
816 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
817 vcpu->cr4 = cr4;
818}
819
820#ifdef CONFIG_X86_64
821
822static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
823{
824 struct vmx_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER);
825
826 vcpu->shadow_efer = efer;
827 if (efer & EFER_LMA) {
828 vmcs_write32(VM_ENTRY_CONTROLS,
829 vmcs_read32(VM_ENTRY_CONTROLS) |
830 VM_ENTRY_CONTROLS_IA32E_MASK);
831 msr->data = efer;
832
833 } else {
834 vmcs_write32(VM_ENTRY_CONTROLS,
835 vmcs_read32(VM_ENTRY_CONTROLS) &
836 ~VM_ENTRY_CONTROLS_IA32E_MASK);
837
838 msr->data = efer & ~EFER_LME;
839 }
840}
841
842#endif
843
844static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
845{
846 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
847
848 return vmcs_readl(sf->base);
849}
850
851static void vmx_get_segment(struct kvm_vcpu *vcpu,
852 struct kvm_segment *var, int seg)
853{
854 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
855 u32 ar;
856
857 var->base = vmcs_readl(sf->base);
858 var->limit = vmcs_read32(sf->limit);
859 var->selector = vmcs_read16(sf->selector);
860 ar = vmcs_read32(sf->ar_bytes);
861 if (ar & AR_UNUSABLE_MASK)
862 ar = 0;
863 var->type = ar & 15;
864 var->s = (ar >> 4) & 1;
865 var->dpl = (ar >> 5) & 3;
866 var->present = (ar >> 7) & 1;
867 var->avl = (ar >> 12) & 1;
868 var->l = (ar >> 13) & 1;
869 var->db = (ar >> 14) & 1;
870 var->g = (ar >> 15) & 1;
871 var->unusable = (ar >> 16) & 1;
872}
873
874static void vmx_set_segment(struct kvm_vcpu *vcpu,
875 struct kvm_segment *var, int seg)
876{
877 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
878 u32 ar;
879
880 vmcs_writel(sf->base, var->base);
881 vmcs_write32(sf->limit, var->limit);
882 vmcs_write16(sf->selector, var->selector);
883 if (var->unusable)
884 ar = 1 << 16;
885 else {
886 ar = var->type & 15;
887 ar |= (var->s & 1) << 4;
888 ar |= (var->dpl & 3) << 5;
889 ar |= (var->present & 1) << 7;
890 ar |= (var->avl & 1) << 12;
891 ar |= (var->l & 1) << 13;
892 ar |= (var->db & 1) << 14;
893 ar |= (var->g & 1) << 15;
894 }
895 if (ar == 0) /* a 0 value means unusable */
896 ar = AR_UNUSABLE_MASK;
897 vmcs_write32(sf->ar_bytes, ar);
898}
899
900static int vmx_is_long_mode(struct kvm_vcpu *vcpu)
901{
902 return vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_CONTROLS_IA32E_MASK;
903}
904
905static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
906{
907 u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
908
909 *db = (ar >> 14) & 1;
910 *l = (ar >> 13) & 1;
911}
912
913static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
914{
915 dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
916 dt->base = vmcs_readl(GUEST_IDTR_BASE);
917}
918
919static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
920{
921 vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
922 vmcs_writel(GUEST_IDTR_BASE, dt->base);
923}
924
925static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
926{
927 dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
928 dt->base = vmcs_readl(GUEST_GDTR_BASE);
929}
930
931static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
932{
933 vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
934 vmcs_writel(GUEST_GDTR_BASE, dt->base);
935}
936
937static int init_rmode_tss(struct kvm* kvm)
938{
939 struct page *p1, *p2, *p3;
940 gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
941 char *page;
942
943 p1 = _gfn_to_page(kvm, fn++);
944 p2 = _gfn_to_page(kvm, fn++);
945 p3 = _gfn_to_page(kvm, fn);
946
947 if (!p1 || !p2 || !p3) {
948 kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__);
949 return 0;
950 }
951
952 page = kmap_atomic(p1, KM_USER0);
953 memset(page, 0, PAGE_SIZE);
954 *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
955 kunmap_atomic(page, KM_USER0);
956
957 page = kmap_atomic(p2, KM_USER0);
958 memset(page, 0, PAGE_SIZE);
959 kunmap_atomic(page, KM_USER0);
960
961 page = kmap_atomic(p3, KM_USER0);
962 memset(page, 0, PAGE_SIZE);
963 *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
964 kunmap_atomic(page, KM_USER0);
965
966 return 1;
967}
968
969static void vmcs_write32_fixedbits(u32 msr, u32 vmcs_field, u32 val)
970{
971 u32 msr_high, msr_low;
972
973 rdmsr(msr, msr_low, msr_high);
974
975 val &= msr_high;
976 val |= msr_low;
977 vmcs_write32(vmcs_field, val);
978}
979
980static void seg_setup(int seg)
981{
982 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
983
984 vmcs_write16(sf->selector, 0);
985 vmcs_writel(sf->base, 0);
986 vmcs_write32(sf->limit, 0xffff);
987 vmcs_write32(sf->ar_bytes, 0x93);
988}
989
990/*
991 * Sets up the vmcs for emulated real mode.
992 */
993static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
994{
995 u32 host_sysenter_cs;
996 u32 junk;
997 unsigned long a;
998 struct descriptor_table dt;
999 int i;
1000 int ret = 0;
1001 int nr_good_msrs;
1002 extern asmlinkage void kvm_vmx_return(void);
1003
1004 if (!init_rmode_tss(vcpu->kvm)) {
1005 ret = -ENOMEM;
1006 goto out;
1007 }
1008
1009 memset(vcpu->regs, 0, sizeof(vcpu->regs));
1010 vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
1011 vcpu->cr8 = 0;
1012 vcpu->apic_base = 0xfee00000 |
1013 /*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
1014 MSR_IA32_APICBASE_ENABLE;
1015
1016 fx_init(vcpu);
1017
1018 /*
1019 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
1020 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
1021 */
1022 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
1023 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
1024 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1025 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1026
1027 seg_setup(VCPU_SREG_DS);
1028 seg_setup(VCPU_SREG_ES);
1029 seg_setup(VCPU_SREG_FS);
1030 seg_setup(VCPU_SREG_GS);
1031 seg_setup(VCPU_SREG_SS);
1032
1033 vmcs_write16(GUEST_TR_SELECTOR, 0);
1034 vmcs_writel(GUEST_TR_BASE, 0);
1035 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
1036 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1037
1038 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
1039 vmcs_writel(GUEST_LDTR_BASE, 0);
1040 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
1041 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
1042
1043 vmcs_write32(GUEST_SYSENTER_CS, 0);
1044 vmcs_writel(GUEST_SYSENTER_ESP, 0);
1045 vmcs_writel(GUEST_SYSENTER_EIP, 0);
1046
1047 vmcs_writel(GUEST_RFLAGS, 0x02);
1048 vmcs_writel(GUEST_RIP, 0xfff0);
1049 vmcs_writel(GUEST_RSP, 0);
1050
1051 vmcs_writel(GUEST_CR3, 0);
1052
1053 //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
1054 vmcs_writel(GUEST_DR7, 0x400);
1055
1056 vmcs_writel(GUEST_GDTR_BASE, 0);
1057 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
1058
1059 vmcs_writel(GUEST_IDTR_BASE, 0);
1060 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
1061
1062 vmcs_write32(GUEST_ACTIVITY_STATE, 0);
1063 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
1064 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
1065
1066 /* I/O */
1067 vmcs_write64(IO_BITMAP_A, 0);
1068 vmcs_write64(IO_BITMAP_B, 0);
1069
1070 guest_write_tsc(0);
1071
1072 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
1073
1074 /* Special registers */
1075 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
1076
1077 /* Control */
1078 vmcs_write32_fixedbits(MSR_IA32_VMX_PINBASED_CTLS_MSR,
1079 PIN_BASED_VM_EXEC_CONTROL,
1080 PIN_BASED_EXT_INTR_MASK /* 20.6.1 */
1081 | PIN_BASED_NMI_EXITING /* 20.6.1 */
1082 );
1083 vmcs_write32_fixedbits(MSR_IA32_VMX_PROCBASED_CTLS_MSR,
1084 CPU_BASED_VM_EXEC_CONTROL,
1085 CPU_BASED_HLT_EXITING /* 20.6.2 */
1086 | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */
1087 | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */
1088 | CPU_BASED_UNCOND_IO_EXITING /* 20.6.2 */
1089 | CPU_BASED_INVDPG_EXITING
1090 | CPU_BASED_MOV_DR_EXITING
1091 | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */
1092 );
1093
1094 vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
1095 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
1096 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
1097 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
1098
1099 vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */
1100 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
1101 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
1102
1103 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
1104 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1105 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1106 vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */
1107 vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */
1108 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1109#ifdef CONFIG_X86_64
1110 rdmsrl(MSR_FS_BASE, a);
1111 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
1112 rdmsrl(MSR_GS_BASE, a);
1113 vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
1114#else
1115 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
1116 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
1117#endif
1118
1119 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
1120
1121 get_idt(&dt);
1122 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
1123
1124
1125 vmcs_writel(HOST_RIP, (unsigned long)kvm_vmx_return); /* 22.2.5 */
1126
1127 rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
1128 vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
1129 rdmsrl(MSR_IA32_SYSENTER_ESP, a);
1130 vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
1131 rdmsrl(MSR_IA32_SYSENTER_EIP, a);
1132 vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
1133
1134 ret = -ENOMEM;
1135 vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
1136 if (!vcpu->guest_msrs)
1137 goto out;
1138 vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
1139 if (!vcpu->host_msrs)
1140 goto out_free_guest_msrs;
1141
1142 for (i = 0; i < NR_VMX_MSR; ++i) {
1143 u32 index = vmx_msr_index[i];
1144 u32 data_low, data_high;
1145 u64 data;
1146 int j = vcpu->nmsrs;
1147
1148 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1149 continue;
1150 data = data_low | ((u64)data_high << 32);
1151 vcpu->host_msrs[j].index = index;
1152 vcpu->host_msrs[j].reserved = 0;
1153 vcpu->host_msrs[j].data = data;
1154 vcpu->guest_msrs[j] = vcpu->host_msrs[j];
1155 ++vcpu->nmsrs;
1156 }
1157 printk(KERN_DEBUG "kvm: msrs: %d\n", vcpu->nmsrs);
1158
1159 nr_good_msrs = vcpu->nmsrs - NR_BAD_MSRS;
1160 vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
1161 virt_to_phys(vcpu->guest_msrs + NR_BAD_MSRS));
1162 vmcs_writel(VM_EXIT_MSR_STORE_ADDR,
1163 virt_to_phys(vcpu->guest_msrs + NR_BAD_MSRS));
1164 vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
1165 virt_to_phys(vcpu->host_msrs + NR_BAD_MSRS));
1166 vmcs_write32_fixedbits(MSR_IA32_VMX_EXIT_CTLS_MSR, VM_EXIT_CONTROLS,
1167 (HOST_IS_64 << 9)); /* 22.2,1, 20.7.1 */
1168 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
1169 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
1170 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
1171
1172
1173 /* 22.2.1, 20.8.1 */
1174 vmcs_write32_fixedbits(MSR_IA32_VMX_ENTRY_CTLS_MSR,
1175 VM_ENTRY_CONTROLS, 0);
1176 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
1177
1178#ifdef CONFIG_X86_64
1179 vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0);
1180 vmcs_writel(TPR_THRESHOLD, 0);
1181#endif
1182
1183 vmcs_writel(CR0_GUEST_HOST_MASK, KVM_GUEST_CR0_MASK);
1184 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1185
1186 vcpu->cr0 = 0x60000010;
1187 vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
1188 vmx_set_cr4(vcpu, 0);
1189#ifdef CONFIG_X86_64
1190 vmx_set_efer(vcpu, 0);
1191#endif
1192
1193 return 0;
1194
1195out_free_guest_msrs:
1196 kfree(vcpu->guest_msrs);
1197out:
1198 return ret;
1199}
1200
1201static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1202{
1203 u16 ent[2];
1204 u16 cs;
1205 u16 ip;
1206 unsigned long flags;
1207 unsigned long ss_base = vmcs_readl(GUEST_SS_BASE);
1208 u16 sp = vmcs_readl(GUEST_RSP);
1209 u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT);
1210
1211 if (sp > ss_limit || sp - 6 > sp) {
1212 vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n",
1213 __FUNCTION__,
1214 vmcs_readl(GUEST_RSP),
1215 vmcs_readl(GUEST_SS_BASE),
1216 vmcs_read32(GUEST_SS_LIMIT));
1217 return;
1218 }
1219
1220 if (kvm_read_guest(vcpu, irq * sizeof(ent), sizeof(ent), &ent) !=
1221 sizeof(ent)) {
1222 vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
1223 return;
1224 }
1225
1226 flags = vmcs_readl(GUEST_RFLAGS);
1227 cs = vmcs_readl(GUEST_CS_BASE) >> 4;
1228 ip = vmcs_readl(GUEST_RIP);
1229
1230
1231 if (kvm_write_guest(vcpu, ss_base + sp - 2, 2, &flags) != 2 ||
1232 kvm_write_guest(vcpu, ss_base + sp - 4, 2, &cs) != 2 ||
1233 kvm_write_guest(vcpu, ss_base + sp - 6, 2, &ip) != 2) {
1234 vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
1235 return;
1236 }
1237
1238 vmcs_writel(GUEST_RFLAGS, flags &
1239 ~( X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
1240 vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ;
1241 vmcs_writel(GUEST_CS_BASE, ent[1] << 4);
1242 vmcs_writel(GUEST_RIP, ent[0]);
1243 vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
1244}
1245
1246static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1247{
1248 int word_index = __ffs(vcpu->irq_summary);
1249 int bit_index = __ffs(vcpu->irq_pending[word_index]);
1250 int irq = word_index * BITS_PER_LONG + bit_index;
1251
1252 clear_bit(bit_index, &vcpu->irq_pending[word_index]);
1253 if (!vcpu->irq_pending[word_index])
1254 clear_bit(word_index, &vcpu->irq_summary);
1255
1256 if (vcpu->rmode.active) {
1257 inject_rmode_irq(vcpu, irq);
1258 return;
1259 }
1260 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1261 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1262}
1263
1264static void kvm_try_inject_irq(struct kvm_vcpu *vcpu)
1265{
1266 if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)
1267 && (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0)
1268 /*
1269 * Interrupts enabled, and not blocked by sti or mov ss. Good.
1270 */
1271 kvm_do_inject_irq(vcpu);
1272 else
1273 /*
1274 * Interrupts blocked. Wait for unblock.
1275 */
1276 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
1277 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL)
1278 | CPU_BASED_VIRTUAL_INTR_PENDING);
1279}
1280
1281static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
1282{
1283 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
1284
1285 set_debugreg(dbg->bp[0], 0);
1286 set_debugreg(dbg->bp[1], 1);
1287 set_debugreg(dbg->bp[2], 2);
1288 set_debugreg(dbg->bp[3], 3);
1289
1290 if (dbg->singlestep) {
1291 unsigned long flags;
1292
1293 flags = vmcs_readl(GUEST_RFLAGS);
1294 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1295 vmcs_writel(GUEST_RFLAGS, flags);
1296 }
1297}
1298
1299static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1300 int vec, u32 err_code)
1301{
1302 if (!vcpu->rmode.active)
1303 return 0;
1304
1305 if (vec == GP_VECTOR && err_code == 0)
1306 if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
1307 return 1;
1308 return 0;
1309}
1310
1311static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1312{
1313 u32 intr_info, error_code;
1314 unsigned long cr2, rip;
1315 u32 vect_info;
1316 enum emulation_result er;
1317
1318 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1319 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1320
1321 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
1322 !is_page_fault(intr_info)) {
1323 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
1324 "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
1325 }
1326
1327 if (is_external_interrupt(vect_info)) {
1328 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
1329 set_bit(irq, vcpu->irq_pending);
1330 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
1331 }
1332
1333 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
1334 asm ("int $2");
1335 return 1;
1336 }
1337 error_code = 0;
1338 rip = vmcs_readl(GUEST_RIP);
1339 if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
1340 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
1341 if (is_page_fault(intr_info)) {
1342 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1343
1344 spin_lock(&vcpu->kvm->lock);
1345 if (!vcpu->mmu.page_fault(vcpu, cr2, error_code)) {
1346 spin_unlock(&vcpu->kvm->lock);
1347 return 1;
1348 }
1349
1350 er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
1351 spin_unlock(&vcpu->kvm->lock);
1352
1353 switch (er) {
1354 case EMULATE_DONE:
1355 return 1;
1356 case EMULATE_DO_MMIO:
1357 ++kvm_stat.mmio_exits;
1358 kvm_run->exit_reason = KVM_EXIT_MMIO;
1359 return 0;
1360 case EMULATE_FAIL:
1361 vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
1362 break;
1363 default:
1364 BUG();
1365 }
1366 }
1367
1368 if (vcpu->rmode.active &&
1369 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
1370 error_code))
1371 return 1;
1372
1373 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
1374 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1375 return 0;
1376 }
1377 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
1378 kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
1379 kvm_run->ex.error_code = error_code;
1380 return 0;
1381}
1382
1383static int handle_external_interrupt(struct kvm_vcpu *vcpu,
1384 struct kvm_run *kvm_run)
1385{
1386 ++kvm_stat.irq_exits;
1387 return 1;
1388}
1389
1390
1391static int get_io_count(struct kvm_vcpu *vcpu, u64 *count)
1392{
1393 u64 inst;
1394 gva_t rip;
1395 int countr_size;
1396 int i, n;
1397
1398 if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_VM)) {
1399 countr_size = 2;
1400 } else {
1401 u32 cs_ar = vmcs_read32(GUEST_CS_AR_BYTES);
1402
1403 countr_size = (cs_ar & AR_L_MASK) ? 8:
1404 (cs_ar & AR_DB_MASK) ? 4: 2;
1405 }
1406
1407 rip = vmcs_readl(GUEST_RIP);
1408 if (countr_size != 8)
1409 rip += vmcs_readl(GUEST_CS_BASE);
1410
1411 n = kvm_read_guest(vcpu, rip, sizeof(inst), &inst);
1412
1413 for (i = 0; i < n; i++) {
1414 switch (((u8*)&inst)[i]) {
1415 case 0xf0:
1416 case 0xf2:
1417 case 0xf3:
1418 case 0x2e:
1419 case 0x36:
1420 case 0x3e:
1421 case 0x26:
1422 case 0x64:
1423 case 0x65:
1424 case 0x66:
1425 break;
1426 case 0x67:
1427 countr_size = (countr_size == 2) ? 4: (countr_size >> 1);
1428 default:
1429 goto done;
1430 }
1431 }
1432 return 0;
1433done:
1434 countr_size *= 8;
1435 *count = vcpu->regs[VCPU_REGS_RCX] & (~0ULL >> (64 - countr_size));
1436 return 1;
1437}
1438
1439static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1440{
1441 u64 exit_qualification;
1442
1443 ++kvm_stat.io_exits;
1444 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1445 kvm_run->exit_reason = KVM_EXIT_IO;
1446 if (exit_qualification & 8)
1447 kvm_run->io.direction = KVM_EXIT_IO_IN;
1448 else
1449 kvm_run->io.direction = KVM_EXIT_IO_OUT;
1450 kvm_run->io.size = (exit_qualification & 7) + 1;
1451 kvm_run->io.string = (exit_qualification & 16) != 0;
1452 kvm_run->io.string_down
1453 = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
1454 kvm_run->io.rep = (exit_qualification & 32) != 0;
1455 kvm_run->io.port = exit_qualification >> 16;
1456 if (kvm_run->io.string) {
1457 if (!get_io_count(vcpu, &kvm_run->io.count))
1458 return 1;
1459 kvm_run->io.address = vmcs_readl(GUEST_LINEAR_ADDRESS);
1460 } else
1461 kvm_run->io.value = vcpu->regs[VCPU_REGS_RAX]; /* rax */
1462 return 0;
1463}
1464
1465static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1466{
1467 u64 address = vmcs_read64(EXIT_QUALIFICATION);
1468 int instruction_length = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1469 spin_lock(&vcpu->kvm->lock);
1470 vcpu->mmu.inval_page(vcpu, address);
1471 spin_unlock(&vcpu->kvm->lock);
1472 vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP) + instruction_length);
1473 return 1;
1474}
1475
1476static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1477{
1478 u64 exit_qualification;
1479 int cr;
1480 int reg;
1481
1482 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1483 cr = exit_qualification & 15;
1484 reg = (exit_qualification >> 8) & 15;
1485 switch ((exit_qualification >> 4) & 3) {
1486 case 0: /* mov to cr */
1487 switch (cr) {
1488 case 0:
1489 vcpu_load_rsp_rip(vcpu);
1490 set_cr0(vcpu, vcpu->regs[reg]);
1491 skip_emulated_instruction(vcpu);
1492 return 1;
1493 case 3:
1494 vcpu_load_rsp_rip(vcpu);
1495 set_cr3(vcpu, vcpu->regs[reg]);
1496 skip_emulated_instruction(vcpu);
1497 return 1;
1498 case 4:
1499 vcpu_load_rsp_rip(vcpu);
1500 set_cr4(vcpu, vcpu->regs[reg]);
1501 skip_emulated_instruction(vcpu);
1502 return 1;
1503 case 8:
1504 vcpu_load_rsp_rip(vcpu);
1505 set_cr8(vcpu, vcpu->regs[reg]);
1506 skip_emulated_instruction(vcpu);
1507 return 1;
1508 };
1509 break;
1510 case 1: /*mov from cr*/
1511 switch (cr) {
1512 case 3:
1513 vcpu_load_rsp_rip(vcpu);
1514 vcpu->regs[reg] = vcpu->cr3;
1515 vcpu_put_rsp_rip(vcpu);
1516 skip_emulated_instruction(vcpu);
1517 return 1;
1518 case 8:
1519 printk(KERN_DEBUG "handle_cr: read CR8 "
1520 "cpu erratum AA15\n");
1521 vcpu_load_rsp_rip(vcpu);
1522 vcpu->regs[reg] = vcpu->cr8;
1523 vcpu_put_rsp_rip(vcpu);
1524 skip_emulated_instruction(vcpu);
1525 return 1;
1526 }
1527 break;
1528 case 3: /* lmsw */
1529 lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
1530
1531 skip_emulated_instruction(vcpu);
1532 return 1;
1533 default:
1534 break;
1535 }
1536 kvm_run->exit_reason = 0;
1537 printk(KERN_ERR "kvm: unhandled control register: op %d cr %d\n",
1538 (int)(exit_qualification >> 4) & 3, cr);
1539 return 0;
1540}
1541
1542static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1543{
1544 u64 exit_qualification;
1545 unsigned long val;
1546 int dr, reg;
1547
1548 /*
1549 * FIXME: this code assumes the host is debugging the guest.
1550 * need to deal with guest debugging itself too.
1551 */
1552 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1553 dr = exit_qualification & 7;
1554 reg = (exit_qualification >> 8) & 15;
1555 vcpu_load_rsp_rip(vcpu);
1556 if (exit_qualification & 16) {
1557 /* mov from dr */
1558 switch (dr) {
1559 case 6:
1560 val = 0xffff0ff0;
1561 break;
1562 case 7:
1563 val = 0x400;
1564 break;
1565 default:
1566 val = 0;
1567 }
1568 vcpu->regs[reg] = val;
1569 } else {
1570 /* mov to dr */
1571 }
1572 vcpu_put_rsp_rip(vcpu);
1573 skip_emulated_instruction(vcpu);
1574 return 1;
1575}
1576
1577static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1578{
1579 kvm_run->exit_reason = KVM_EXIT_CPUID;
1580 return 0;
1581}
1582
1583static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1584{
1585 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1586 u64 data;
1587
1588 if (vmx_get_msr(vcpu, ecx, &data)) {
1589 vmx_inject_gp(vcpu, 0);
1590 return 1;
1591 }
1592
1593 /* FIXME: handling of bits 32:63 of rax, rdx */
1594 vcpu->regs[VCPU_REGS_RAX] = data & -1u;
1595 vcpu->regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
1596 skip_emulated_instruction(vcpu);
1597 return 1;
1598}
1599
1600static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1601{
1602 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1603 u64 data = (vcpu->regs[VCPU_REGS_RAX] & -1u)
1604 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
1605
1606 if (vmx_set_msr(vcpu, ecx, data) != 0) {
1607 vmx_inject_gp(vcpu, 0);
1608 return 1;
1609 }
1610
1611 skip_emulated_instruction(vcpu);
1612 return 1;
1613}
1614
1615static int handle_interrupt_window(struct kvm_vcpu *vcpu,
1616 struct kvm_run *kvm_run)
1617{
1618 /* Turn off interrupt window reporting. */
1619 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
1620 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL)
1621 & ~CPU_BASED_VIRTUAL_INTR_PENDING);
1622 return 1;
1623}
1624
1625static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1626{
1627 skip_emulated_instruction(vcpu);
1628 if (vcpu->irq_summary && (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF))
1629 return 1;
1630
1631 kvm_run->exit_reason = KVM_EXIT_HLT;
1632 return 0;
1633}
1634
1635/*
1636 * The exit handlers return 1 if the exit was handled fully and guest execution
1637 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
1638 * to be done to userspace and return 0.
1639 */
1640static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
1641 struct kvm_run *kvm_run) = {
1642 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
1643 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
1644 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
1645 [EXIT_REASON_INVLPG] = handle_invlpg,
1646 [EXIT_REASON_CR_ACCESS] = handle_cr,
1647 [EXIT_REASON_DR_ACCESS] = handle_dr,
1648 [EXIT_REASON_CPUID] = handle_cpuid,
1649 [EXIT_REASON_MSR_READ] = handle_rdmsr,
1650 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
1651 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
1652 [EXIT_REASON_HLT] = handle_halt,
1653};
1654
1655static const int kvm_vmx_max_exit_handlers =
1656 sizeof(kvm_vmx_exit_handlers) / sizeof(*kvm_vmx_exit_handlers);
1657
1658/*
1659 * The guest has exited. See if we can fix it or if we need userspace
1660 * assistance.
1661 */
1662static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1663{
1664 u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1665 u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
1666
1667 if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
1668 exit_reason != EXIT_REASON_EXCEPTION_NMI )
1669 printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
1670 "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
1671 kvm_run->instruction_length = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1672 if (exit_reason < kvm_vmx_max_exit_handlers
1673 && kvm_vmx_exit_handlers[exit_reason])
1674 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
1675 else {
1676 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1677 kvm_run->hw.hardware_exit_reason = exit_reason;
1678 }
1679 return 0;
1680}
1681
1682static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1683{
1684 u8 fail;
1685 u16 fs_sel, gs_sel, ldt_sel;
1686 int fs_gs_ldt_reload_needed;
1687
1688again:
1689 /*
1690 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
1691 * allow segment selectors with cpl > 0 or ti == 1.
1692 */
1693 fs_sel = read_fs();
1694 gs_sel = read_gs();
1695 ldt_sel = read_ldt();
1696 fs_gs_ldt_reload_needed = (fs_sel & 7) | (gs_sel & 7) | ldt_sel;
1697 if (!fs_gs_ldt_reload_needed) {
1698 vmcs_write16(HOST_FS_SELECTOR, fs_sel);
1699 vmcs_write16(HOST_GS_SELECTOR, gs_sel);
1700 } else {
1701 vmcs_write16(HOST_FS_SELECTOR, 0);
1702 vmcs_write16(HOST_GS_SELECTOR, 0);
1703 }
1704
1705#ifdef CONFIG_X86_64
1706 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1707 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1708#else
1709 vmcs_writel(HOST_FS_BASE, segment_base(fs_sel));
1710 vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
1711#endif
1712
1713 if (vcpu->irq_summary &&
1714 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
1715 kvm_try_inject_irq(vcpu);
1716
1717 if (vcpu->guest_debug.enabled)
1718 kvm_guest_debug_pre(vcpu);
1719
1720 fx_save(vcpu->host_fx_image);
1721 fx_restore(vcpu->guest_fx_image);
1722
1723 save_msrs(vcpu->host_msrs, vcpu->nmsrs);
1724 load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
1725
1726 asm (
1727 /* Store host registers */
1728 "pushf \n\t"
1729#ifdef CONFIG_X86_64
1730 "push %%rax; push %%rbx; push %%rdx;"
1731 "push %%rsi; push %%rdi; push %%rbp;"
1732 "push %%r8; push %%r9; push %%r10; push %%r11;"
1733 "push %%r12; push %%r13; push %%r14; push %%r15;"
1734 "push %%rcx \n\t"
1735 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
1736#else
1737 "pusha; push %%ecx \n\t"
1738 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
1739#endif
1740 /* Check if vmlaunch of vmresume is needed */
1741 "cmp $0, %1 \n\t"
1742 /* Load guest registers. Don't clobber flags. */
1743#ifdef CONFIG_X86_64
1744 "mov %c[cr2](%3), %%rax \n\t"
1745 "mov %%rax, %%cr2 \n\t"
1746 "mov %c[rax](%3), %%rax \n\t"
1747 "mov %c[rbx](%3), %%rbx \n\t"
1748 "mov %c[rdx](%3), %%rdx \n\t"
1749 "mov %c[rsi](%3), %%rsi \n\t"
1750 "mov %c[rdi](%3), %%rdi \n\t"
1751 "mov %c[rbp](%3), %%rbp \n\t"
1752 "mov %c[r8](%3), %%r8 \n\t"
1753 "mov %c[r9](%3), %%r9 \n\t"
1754 "mov %c[r10](%3), %%r10 \n\t"
1755 "mov %c[r11](%3), %%r11 \n\t"
1756 "mov %c[r12](%3), %%r12 \n\t"
1757 "mov %c[r13](%3), %%r13 \n\t"
1758 "mov %c[r14](%3), %%r14 \n\t"
1759 "mov %c[r15](%3), %%r15 \n\t"
1760 "mov %c[rcx](%3), %%rcx \n\t" /* kills %3 (rcx) */
1761#else
1762 "mov %c[cr2](%3), %%eax \n\t"
1763 "mov %%eax, %%cr2 \n\t"
1764 "mov %c[rax](%3), %%eax \n\t"
1765 "mov %c[rbx](%3), %%ebx \n\t"
1766 "mov %c[rdx](%3), %%edx \n\t"
1767 "mov %c[rsi](%3), %%esi \n\t"
1768 "mov %c[rdi](%3), %%edi \n\t"
1769 "mov %c[rbp](%3), %%ebp \n\t"
1770 "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
1771#endif
1772 /* Enter guest mode */
1773 "jne launched \n\t"
1774 ASM_VMX_VMLAUNCH "\n\t"
1775 "jmp kvm_vmx_return \n\t"
1776 "launched: " ASM_VMX_VMRESUME "\n\t"
1777 ".globl kvm_vmx_return \n\t"
1778 "kvm_vmx_return: "
1779 /* Save guest registers, load host registers, keep flags */
1780#ifdef CONFIG_X86_64
1781 "xchg %3, 0(%%rsp) \n\t"
1782 "mov %%rax, %c[rax](%3) \n\t"
1783 "mov %%rbx, %c[rbx](%3) \n\t"
1784 "pushq 0(%%rsp); popq %c[rcx](%3) \n\t"
1785 "mov %%rdx, %c[rdx](%3) \n\t"
1786 "mov %%rsi, %c[rsi](%3) \n\t"
1787 "mov %%rdi, %c[rdi](%3) \n\t"
1788 "mov %%rbp, %c[rbp](%3) \n\t"
1789 "mov %%r8, %c[r8](%3) \n\t"
1790 "mov %%r9, %c[r9](%3) \n\t"
1791 "mov %%r10, %c[r10](%3) \n\t"
1792 "mov %%r11, %c[r11](%3) \n\t"
1793 "mov %%r12, %c[r12](%3) \n\t"
1794 "mov %%r13, %c[r13](%3) \n\t"
1795 "mov %%r14, %c[r14](%3) \n\t"
1796 "mov %%r15, %c[r15](%3) \n\t"
1797 "mov %%cr2, %%rax \n\t"
1798 "mov %%rax, %c[cr2](%3) \n\t"
1799 "mov 0(%%rsp), %3 \n\t"
1800
1801 "pop %%rcx; pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
1802 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
1803 "pop %%rbp; pop %%rdi; pop %%rsi;"
1804 "pop %%rdx; pop %%rbx; pop %%rax \n\t"
1805#else
1806 "xchg %3, 0(%%esp) \n\t"
1807 "mov %%eax, %c[rax](%3) \n\t"
1808 "mov %%ebx, %c[rbx](%3) \n\t"
1809 "pushl 0(%%esp); popl %c[rcx](%3) \n\t"
1810 "mov %%edx, %c[rdx](%3) \n\t"
1811 "mov %%esi, %c[rsi](%3) \n\t"
1812 "mov %%edi, %c[rdi](%3) \n\t"
1813 "mov %%ebp, %c[rbp](%3) \n\t"
1814 "mov %%cr2, %%eax \n\t"
1815 "mov %%eax, %c[cr2](%3) \n\t"
1816 "mov 0(%%esp), %3 \n\t"
1817
1818 "pop %%ecx; popa \n\t"
1819#endif
1820 "setbe %0 \n\t"
1821 "popf \n\t"
1822 : "=g" (fail)
1823 : "r"(vcpu->launched), "d"((unsigned long)HOST_RSP),
1824 "c"(vcpu),
1825 [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
1826 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
1827 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
1828 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
1829 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
1830 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
1831 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
1832#ifdef CONFIG_X86_64
1833 [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
1834 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
1835 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
1836 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
1837 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
1838 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
1839 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
1840 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
1841#endif
1842 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
1843 : "cc", "memory" );
1844
1845 ++kvm_stat.exits;
1846
1847 save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
1848 load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
1849
1850 fx_save(vcpu->guest_fx_image);
1851 fx_restore(vcpu->host_fx_image);
1852
1853#ifndef CONFIG_X86_64
1854 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
1855#endif
1856
1857 kvm_run->exit_type = 0;
1858 if (fail) {
1859 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY;
1860 kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR);
1861 } else {
1862 if (fs_gs_ldt_reload_needed) {
1863 load_ldt(ldt_sel);
1864 load_fs(fs_sel);
1865 /*
1866 * If we have to reload gs, we must take care to
1867 * preserve our gs base.
1868 */
1869 local_irq_disable();
1870 load_gs(gs_sel);
1871#ifdef CONFIG_X86_64
1872 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
1873#endif
1874 local_irq_enable();
1875
1876 reload_tss();
1877 }
1878 vcpu->launched = 1;
1879 kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT;
1880 if (kvm_handle_exit(kvm_run, vcpu)) {
1881 /* Give scheduler a change to reschedule. */
1882 if (signal_pending(current)) {
1883 ++kvm_stat.signal_exits;
1884 return -EINTR;
1885 }
1886 kvm_resched(vcpu);
1887 goto again;
1888 }
1889 }
1890 return 0;
1891}
1892
1893static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1894{
1895 vmcs_writel(GUEST_CR3, vmcs_readl(GUEST_CR3));
1896}
1897
1898static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
1899 unsigned long addr,
1900 u32 err_code)
1901{
1902 u32 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1903
1904 ++kvm_stat.pf_guest;
1905
1906 if (is_page_fault(vect_info)) {
1907 printk(KERN_DEBUG "inject_page_fault: "
1908 "double fault 0x%lx @ 0x%lx\n",
1909 addr, vmcs_readl(GUEST_RIP));
1910 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
1911 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1912 DF_VECTOR |
1913 INTR_TYPE_EXCEPTION |
1914 INTR_INFO_DELIEVER_CODE_MASK |
1915 INTR_INFO_VALID_MASK);
1916 return;
1917 }
1918 vcpu->cr2 = addr;
1919 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
1920 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1921 PF_VECTOR |
1922 INTR_TYPE_EXCEPTION |
1923 INTR_INFO_DELIEVER_CODE_MASK |
1924 INTR_INFO_VALID_MASK);
1925
1926}
1927
1928static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
1929{
1930 if (vcpu->vmcs) {
1931 on_each_cpu(__vcpu_clear, vcpu, 0, 1);
1932 free_vmcs(vcpu->vmcs);
1933 vcpu->vmcs = NULL;
1934 }
1935}
1936
1937static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
1938{
1939 vmx_free_vmcs(vcpu);
1940}
1941
1942static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
1943{
1944 struct vmcs *vmcs;
1945
1946 vmcs = alloc_vmcs();
1947 if (!vmcs)
1948 return -ENOMEM;
1949 vmcs_clear(vmcs);
1950 vcpu->vmcs = vmcs;
1951 vcpu->launched = 0;
1952 return 0;
1953}
1954
1955static struct kvm_arch_ops vmx_arch_ops = {
1956 .cpu_has_kvm_support = cpu_has_kvm_support,
1957 .disabled_by_bios = vmx_disabled_by_bios,
1958 .hardware_setup = hardware_setup,
1959 .hardware_unsetup = hardware_unsetup,
1960 .hardware_enable = hardware_enable,
1961 .hardware_disable = hardware_disable,
1962
1963 .vcpu_create = vmx_create_vcpu,
1964 .vcpu_free = vmx_free_vcpu,
1965
1966 .vcpu_load = vmx_vcpu_load,
1967 .vcpu_put = vmx_vcpu_put,
1968
1969 .set_guest_debug = set_guest_debug,
1970 .get_msr = vmx_get_msr,
1971 .set_msr = vmx_set_msr,
1972 .get_segment_base = vmx_get_segment_base,
1973 .get_segment = vmx_get_segment,
1974 .set_segment = vmx_set_segment,
1975 .is_long_mode = vmx_is_long_mode,
1976 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
1977 .set_cr0 = vmx_set_cr0,
1978 .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch,
1979 .set_cr3 = vmx_set_cr3,
1980 .set_cr4 = vmx_set_cr4,
1981#ifdef CONFIG_X86_64
1982 .set_efer = vmx_set_efer,
1983#endif
1984 .get_idt = vmx_get_idt,
1985 .set_idt = vmx_set_idt,
1986 .get_gdt = vmx_get_gdt,
1987 .set_gdt = vmx_set_gdt,
1988 .cache_regs = vcpu_load_rsp_rip,
1989 .decache_regs = vcpu_put_rsp_rip,
1990 .get_rflags = vmx_get_rflags,
1991 .set_rflags = vmx_set_rflags,
1992
1993 .tlb_flush = vmx_flush_tlb,
1994 .inject_page_fault = vmx_inject_page_fault,
1995
1996 .inject_gp = vmx_inject_gp,
1997
1998 .run = vmx_vcpu_run,
1999 .skip_emulated_instruction = skip_emulated_instruction,
2000 .vcpu_setup = vmx_vcpu_setup,
2001};
2002
2003static int __init vmx_init(void)
2004{
2005 return kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
2006}
2007
2008static void __exit vmx_exit(void)
2009{
2010 kvm_exit_arch();
2011}
2012
2013module_init(vmx_init)
2014module_exit(vmx_exit)
diff --git a/drivers/kvm/vmx.h b/drivers/kvm/vmx.h
new file mode 100644
index 000000000000..797278341581
--- /dev/null
+++ b/drivers/kvm/vmx.h
@@ -0,0 +1,296 @@
1#ifndef VMX_H
2#define VMX_H
3
4/*
5 * vmx.h: VMX Architecture related definitions
6 * Copyright (c) 2004, Intel Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 * A few random additions are:
22 * Copyright (C) 2006 Qumranet
23 * Avi Kivity <avi@qumranet.com>
24 * Yaniv Kamay <yaniv@qumranet.com>
25 *
26 */
27
28#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
29#define CPU_BASED_USE_TSC_OFFSETING 0x00000008
30#define CPU_BASED_HLT_EXITING 0x00000080
31#define CPU_BASED_INVDPG_EXITING 0x00000200
32#define CPU_BASED_MWAIT_EXITING 0x00000400
33#define CPU_BASED_RDPMC_EXITING 0x00000800
34#define CPU_BASED_RDTSC_EXITING 0x00001000
35#define CPU_BASED_CR8_LOAD_EXITING 0x00080000
36#define CPU_BASED_CR8_STORE_EXITING 0x00100000
37#define CPU_BASED_TPR_SHADOW 0x00200000
38#define CPU_BASED_MOV_DR_EXITING 0x00800000
39#define CPU_BASED_UNCOND_IO_EXITING 0x01000000
40#define CPU_BASED_ACTIVATE_IO_BITMAP 0x02000000
41#define CPU_BASED_MSR_BITMAPS 0x10000000
42#define CPU_BASED_MONITOR_EXITING 0x20000000
43#define CPU_BASED_PAUSE_EXITING 0x40000000
44
45#define PIN_BASED_EXT_INTR_MASK 0x1
46#define PIN_BASED_NMI_EXITING 0x8
47
48#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
49#define VM_EXIT_HOST_ADD_SPACE_SIZE 0x00000200
50
51
52/* VMCS Encodings */
53enum vmcs_field {
54 GUEST_ES_SELECTOR = 0x00000800,
55 GUEST_CS_SELECTOR = 0x00000802,
56 GUEST_SS_SELECTOR = 0x00000804,
57 GUEST_DS_SELECTOR = 0x00000806,
58 GUEST_FS_SELECTOR = 0x00000808,
59 GUEST_GS_SELECTOR = 0x0000080a,
60 GUEST_LDTR_SELECTOR = 0x0000080c,
61 GUEST_TR_SELECTOR = 0x0000080e,
62 HOST_ES_SELECTOR = 0x00000c00,
63 HOST_CS_SELECTOR = 0x00000c02,
64 HOST_SS_SELECTOR = 0x00000c04,
65 HOST_DS_SELECTOR = 0x00000c06,
66 HOST_FS_SELECTOR = 0x00000c08,
67 HOST_GS_SELECTOR = 0x00000c0a,
68 HOST_TR_SELECTOR = 0x00000c0c,
69 IO_BITMAP_A = 0x00002000,
70 IO_BITMAP_A_HIGH = 0x00002001,
71 IO_BITMAP_B = 0x00002002,
72 IO_BITMAP_B_HIGH = 0x00002003,
73 MSR_BITMAP = 0x00002004,
74 MSR_BITMAP_HIGH = 0x00002005,
75 VM_EXIT_MSR_STORE_ADDR = 0x00002006,
76 VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007,
77 VM_EXIT_MSR_LOAD_ADDR = 0x00002008,
78 VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009,
79 VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a,
80 VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b,
81 TSC_OFFSET = 0x00002010,
82 TSC_OFFSET_HIGH = 0x00002011,
83 VIRTUAL_APIC_PAGE_ADDR = 0x00002012,
84 VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013,
85 VMCS_LINK_POINTER = 0x00002800,
86 VMCS_LINK_POINTER_HIGH = 0x00002801,
87 GUEST_IA32_DEBUGCTL = 0x00002802,
88 GUEST_IA32_DEBUGCTL_HIGH = 0x00002803,
89 PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
90 CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
91 EXCEPTION_BITMAP = 0x00004004,
92 PAGE_FAULT_ERROR_CODE_MASK = 0x00004006,
93 PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008,
94 CR3_TARGET_COUNT = 0x0000400a,
95 VM_EXIT_CONTROLS = 0x0000400c,
96 VM_EXIT_MSR_STORE_COUNT = 0x0000400e,
97 VM_EXIT_MSR_LOAD_COUNT = 0x00004010,
98 VM_ENTRY_CONTROLS = 0x00004012,
99 VM_ENTRY_MSR_LOAD_COUNT = 0x00004014,
100 VM_ENTRY_INTR_INFO_FIELD = 0x00004016,
101 VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018,
102 VM_ENTRY_INSTRUCTION_LEN = 0x0000401a,
103 TPR_THRESHOLD = 0x0000401c,
104 SECONDARY_VM_EXEC_CONTROL = 0x0000401e,
105 VM_INSTRUCTION_ERROR = 0x00004400,
106 VM_EXIT_REASON = 0x00004402,
107 VM_EXIT_INTR_INFO = 0x00004404,
108 VM_EXIT_INTR_ERROR_CODE = 0x00004406,
109 IDT_VECTORING_INFO_FIELD = 0x00004408,
110 IDT_VECTORING_ERROR_CODE = 0x0000440a,
111 VM_EXIT_INSTRUCTION_LEN = 0x0000440c,
112 VMX_INSTRUCTION_INFO = 0x0000440e,
113 GUEST_ES_LIMIT = 0x00004800,
114 GUEST_CS_LIMIT = 0x00004802,
115 GUEST_SS_LIMIT = 0x00004804,
116 GUEST_DS_LIMIT = 0x00004806,
117 GUEST_FS_LIMIT = 0x00004808,
118 GUEST_GS_LIMIT = 0x0000480a,
119 GUEST_LDTR_LIMIT = 0x0000480c,
120 GUEST_TR_LIMIT = 0x0000480e,
121 GUEST_GDTR_LIMIT = 0x00004810,
122 GUEST_IDTR_LIMIT = 0x00004812,
123 GUEST_ES_AR_BYTES = 0x00004814,
124 GUEST_CS_AR_BYTES = 0x00004816,
125 GUEST_SS_AR_BYTES = 0x00004818,
126 GUEST_DS_AR_BYTES = 0x0000481a,
127 GUEST_FS_AR_BYTES = 0x0000481c,
128 GUEST_GS_AR_BYTES = 0x0000481e,
129 GUEST_LDTR_AR_BYTES = 0x00004820,
130 GUEST_TR_AR_BYTES = 0x00004822,
131 GUEST_INTERRUPTIBILITY_INFO = 0x00004824,
132 GUEST_ACTIVITY_STATE = 0X00004826,
133 GUEST_SYSENTER_CS = 0x0000482A,
134 HOST_IA32_SYSENTER_CS = 0x00004c00,
135 CR0_GUEST_HOST_MASK = 0x00006000,
136 CR4_GUEST_HOST_MASK = 0x00006002,
137 CR0_READ_SHADOW = 0x00006004,
138 CR4_READ_SHADOW = 0x00006006,
139 CR3_TARGET_VALUE0 = 0x00006008,
140 CR3_TARGET_VALUE1 = 0x0000600a,
141 CR3_TARGET_VALUE2 = 0x0000600c,
142 CR3_TARGET_VALUE3 = 0x0000600e,
143 EXIT_QUALIFICATION = 0x00006400,
144 GUEST_LINEAR_ADDRESS = 0x0000640a,
145 GUEST_CR0 = 0x00006800,
146 GUEST_CR3 = 0x00006802,
147 GUEST_CR4 = 0x00006804,
148 GUEST_ES_BASE = 0x00006806,
149 GUEST_CS_BASE = 0x00006808,
150 GUEST_SS_BASE = 0x0000680a,
151 GUEST_DS_BASE = 0x0000680c,
152 GUEST_FS_BASE = 0x0000680e,
153 GUEST_GS_BASE = 0x00006810,
154 GUEST_LDTR_BASE = 0x00006812,
155 GUEST_TR_BASE = 0x00006814,
156 GUEST_GDTR_BASE = 0x00006816,
157 GUEST_IDTR_BASE = 0x00006818,
158 GUEST_DR7 = 0x0000681a,
159 GUEST_RSP = 0x0000681c,
160 GUEST_RIP = 0x0000681e,
161 GUEST_RFLAGS = 0x00006820,
162 GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822,
163 GUEST_SYSENTER_ESP = 0x00006824,
164 GUEST_SYSENTER_EIP = 0x00006826,
165 HOST_CR0 = 0x00006c00,
166 HOST_CR3 = 0x00006c02,
167 HOST_CR4 = 0x00006c04,
168 HOST_FS_BASE = 0x00006c06,
169 HOST_GS_BASE = 0x00006c08,
170 HOST_TR_BASE = 0x00006c0a,
171 HOST_GDTR_BASE = 0x00006c0c,
172 HOST_IDTR_BASE = 0x00006c0e,
173 HOST_IA32_SYSENTER_ESP = 0x00006c10,
174 HOST_IA32_SYSENTER_EIP = 0x00006c12,
175 HOST_RSP = 0x00006c14,
176 HOST_RIP = 0x00006c16,
177};
178
179#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
180
181#define EXIT_REASON_EXCEPTION_NMI 0
182#define EXIT_REASON_EXTERNAL_INTERRUPT 1
183
184#define EXIT_REASON_PENDING_INTERRUPT 7
185
186#define EXIT_REASON_TASK_SWITCH 9
187#define EXIT_REASON_CPUID 10
188#define EXIT_REASON_HLT 12
189#define EXIT_REASON_INVLPG 14
190#define EXIT_REASON_RDPMC 15
191#define EXIT_REASON_RDTSC 16
192#define EXIT_REASON_VMCALL 18
193#define EXIT_REASON_VMCLEAR 19
194#define EXIT_REASON_VMLAUNCH 20
195#define EXIT_REASON_VMPTRLD 21
196#define EXIT_REASON_VMPTRST 22
197#define EXIT_REASON_VMREAD 23
198#define EXIT_REASON_VMRESUME 24
199#define EXIT_REASON_VMWRITE 25
200#define EXIT_REASON_VMOFF 26
201#define EXIT_REASON_VMON 27
202#define EXIT_REASON_CR_ACCESS 28
203#define EXIT_REASON_DR_ACCESS 29
204#define EXIT_REASON_IO_INSTRUCTION 30
205#define EXIT_REASON_MSR_READ 31
206#define EXIT_REASON_MSR_WRITE 32
207#define EXIT_REASON_MWAIT_INSTRUCTION 36
208
209/*
210 * Interruption-information format
211 */
212#define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
213#define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
214#define INTR_INFO_DELIEVER_CODE_MASK 0x800 /* 11 */
215#define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
216
217#define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK
218#define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK
219#define VECTORING_INFO_DELIEVER_CODE_MASK INTR_INFO_DELIEVER_CODE_MASK
220#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK
221
222#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
223#define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */
224
225/*
226 * Exit Qualifications for MOV for Control Register Access
227 */
228#define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control register */
229#define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */
230#define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose register */
231#define LMSW_SOURCE_DATA_SHIFT 16
232#define LMSW_SOURCE_DATA (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */
233#define REG_EAX (0 << 8)
234#define REG_ECX (1 << 8)
235#define REG_EDX (2 << 8)
236#define REG_EBX (3 << 8)
237#define REG_ESP (4 << 8)
238#define REG_EBP (5 << 8)
239#define REG_ESI (6 << 8)
240#define REG_EDI (7 << 8)
241#define REG_R8 (8 << 8)
242#define REG_R9 (9 << 8)
243#define REG_R10 (10 << 8)
244#define REG_R11 (11 << 8)
245#define REG_R12 (12 << 8)
246#define REG_R13 (13 << 8)
247#define REG_R14 (14 << 8)
248#define REG_R15 (15 << 8)
249
250/*
251 * Exit Qualifications for MOV for Debug Register Access
252 */
253#define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug register */
254#define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
255#define TYPE_MOV_TO_DR (0 << 4)
256#define TYPE_MOV_FROM_DR (1 << 4)
257#define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose register */
258
259
260/* segment AR */
261#define SEGMENT_AR_L_MASK (1 << 13)
262
263/* entry controls */
264#define VM_ENTRY_CONTROLS_IA32E_MASK (1 << 9)
265
266#define AR_TYPE_ACCESSES_MASK 1
267#define AR_TYPE_READABLE_MASK (1 << 1)
268#define AR_TYPE_WRITEABLE_MASK (1 << 2)
269#define AR_TYPE_CODE_MASK (1 << 3)
270#define AR_TYPE_MASK 0x0f
271#define AR_TYPE_BUSY_64_TSS 11
272#define AR_TYPE_BUSY_32_TSS 11
273#define AR_TYPE_BUSY_16_TSS 3
274#define AR_TYPE_LDT 2
275
276#define AR_UNUSABLE_MASK (1 << 16)
277#define AR_S_MASK (1 << 4)
278#define AR_P_MASK (1 << 7)
279#define AR_L_MASK (1 << 13)
280#define AR_DB_MASK (1 << 14)
281#define AR_G_MASK (1 << 15)
282#define AR_DPL_SHIFT 5
283#define AR_DPL(ar) (((ar) >> AR_DPL_SHIFT) & 3)
284
285#define AR_RESERVD_MASK 0xfffe0f00
286
287#define CR4_VMXE 0x2000
288
289#define MSR_IA32_VMX_BASIC_MSR 0x480
290#define MSR_IA32_FEATURE_CONTROL 0x03a
291#define MSR_IA32_VMX_PINBASED_CTLS_MSR 0x481
292#define MSR_IA32_VMX_PROCBASED_CTLS_MSR 0x482
293#define MSR_IA32_VMX_EXIT_CTLS_MSR 0x483
294#define MSR_IA32_VMX_ENTRY_CTLS_MSR 0x484
295
296#endif
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
new file mode 100644
index 000000000000..1bff3e925fda
--- /dev/null
+++ b/drivers/kvm/x86_emulate.c
@@ -0,0 +1,1409 @@
1/******************************************************************************
2 * x86_emulate.c
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privieged instructions:
10 *
11 * Copyright (C) 2006 Qumranet
12 *
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
15 *
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
18 *
19 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
20 */
21
22#ifndef __KERNEL__
23#include <stdio.h>
24#include <stdint.h>
25#include <public/xen.h>
26#define DPRINTF(_f, _a ...) printf( _f , ## _a )
27#else
28#include "kvm.h"
29#define DPRINTF(x...) do {} while (0)
30#endif
31#include "x86_emulate.h"
32#include <linux/module.h>
33
34/*
35 * Opcode effective-address decode tables.
36 * Note that we only emulate instructions that have at least one memory
37 * operand (excluding implicit stack references). We assume that stack
38 * references and instruction fetches will never occur in special memory
39 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
40 * not be handled.
41 */
42
43/* Operand sizes: 8-bit operands or specified/overridden size. */
44#define ByteOp (1<<0) /* 8-bit operands. */
45/* Destination operand type. */
46#define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
47#define DstReg (2<<1) /* Register operand. */
48#define DstMem (3<<1) /* Memory operand. */
49#define DstMask (3<<1)
50/* Source operand type. */
51#define SrcNone (0<<3) /* No source operand. */
52#define SrcImplicit (0<<3) /* Source operand is implicit in the opcode. */
53#define SrcReg (1<<3) /* Register operand. */
54#define SrcMem (2<<3) /* Memory operand. */
55#define SrcMem16 (3<<3) /* Memory operand (16-bit). */
56#define SrcMem32 (4<<3) /* Memory operand (32-bit). */
57#define SrcImm (5<<3) /* Immediate operand. */
58#define SrcImmByte (6<<3) /* 8-bit sign-extended immediate operand. */
59#define SrcMask (7<<3)
60/* Generic ModRM decode. */
61#define ModRM (1<<6)
62/* Destination is only written; never read. */
63#define Mov (1<<7)
64
65static u8 opcode_table[256] = {
66 /* 0x00 - 0x07 */
67 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
68 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
69 0, 0, 0, 0,
70 /* 0x08 - 0x0F */
71 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
72 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
73 0, 0, 0, 0,
74 /* 0x10 - 0x17 */
75 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
76 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
77 0, 0, 0, 0,
78 /* 0x18 - 0x1F */
79 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
80 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
81 0, 0, 0, 0,
82 /* 0x20 - 0x27 */
83 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
84 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
85 0, 0, 0, 0,
86 /* 0x28 - 0x2F */
87 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
88 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
89 0, 0, 0, 0,
90 /* 0x30 - 0x37 */
91 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
92 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
93 0, 0, 0, 0,
94 /* 0x38 - 0x3F */
95 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
96 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
97 0, 0, 0, 0,
98 /* 0x40 - 0x4F */
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 /* 0x50 - 0x5F */
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 /* 0x60 - 0x6F */
103 0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 /* 0x70 - 0x7F */
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 /* 0x80 - 0x87 */
108 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
109 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
110 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
111 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
112 /* 0x88 - 0x8F */
113 ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
114 ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
115 0, 0, 0, DstMem | SrcNone | ModRM | Mov,
116 /* 0x90 - 0x9F */
117 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
118 /* 0xA0 - 0xA7 */
119 ByteOp | DstReg | SrcMem | Mov, DstReg | SrcMem | Mov,
120 ByteOp | DstMem | SrcReg | Mov, DstMem | SrcReg | Mov,
121 ByteOp | ImplicitOps | Mov, ImplicitOps | Mov,
122 ByteOp | ImplicitOps, ImplicitOps,
123 /* 0xA8 - 0xAF */
124 0, 0, ByteOp | ImplicitOps | Mov, ImplicitOps | Mov,
125 ByteOp | ImplicitOps | Mov, ImplicitOps | Mov,
126 ByteOp | ImplicitOps, ImplicitOps,
127 /* 0xB0 - 0xBF */
128 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
129 /* 0xC0 - 0xC7 */
130 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM, 0, 0,
131 0, 0, ByteOp | DstMem | SrcImm | ModRM | Mov,
132 DstMem | SrcImm | ModRM | Mov,
133 /* 0xC8 - 0xCF */
134 0, 0, 0, 0, 0, 0, 0, 0,
135 /* 0xD0 - 0xD7 */
136 ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
137 ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
138 0, 0, 0, 0,
139 /* 0xD8 - 0xDF */
140 0, 0, 0, 0, 0, 0, 0, 0,
141 /* 0xE0 - 0xEF */
142 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
143 /* 0xF0 - 0xF7 */
144 0, 0, 0, 0,
145 0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
146 /* 0xF8 - 0xFF */
147 0, 0, 0, 0,
148 0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM
149};
150
151static u8 twobyte_table[256] = {
152 /* 0x00 - 0x0F */
153 0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0,
154 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
155 /* 0x10 - 0x1F */
156 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
157 /* 0x20 - 0x2F */
158 ModRM | ImplicitOps, ModRM, ModRM | ImplicitOps, ModRM, 0, 0, 0, 0,
159 0, 0, 0, 0, 0, 0, 0, 0,
160 /* 0x30 - 0x3F */
161 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
162 /* 0x40 - 0x47 */
163 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
164 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
165 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
166 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
167 /* 0x48 - 0x4F */
168 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
169 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
170 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
171 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
172 /* 0x50 - 0x5F */
173 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
174 /* 0x60 - 0x6F */
175 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
176 /* 0x70 - 0x7F */
177 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
178 /* 0x80 - 0x8F */
179 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
180 /* 0x90 - 0x9F */
181 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
182 /* 0xA0 - 0xA7 */
183 0, 0, 0, DstMem | SrcReg | ModRM, 0, 0, 0, 0,
184 /* 0xA8 - 0xAF */
185 0, 0, 0, DstMem | SrcReg | ModRM, 0, 0, 0, 0,
186 /* 0xB0 - 0xB7 */
187 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 0,
188 DstMem | SrcReg | ModRM,
189 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
190 DstReg | SrcMem16 | ModRM | Mov,
191 /* 0xB8 - 0xBF */
192 0, 0, DstMem | SrcImmByte | ModRM, DstMem | SrcReg | ModRM,
193 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
194 DstReg | SrcMem16 | ModRM | Mov,
195 /* 0xC0 - 0xCF */
196 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0, 0,
197 /* 0xD0 - 0xDF */
198 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
199 /* 0xE0 - 0xEF */
200 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
201 /* 0xF0 - 0xFF */
202 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
203};
204
205/*
206 * Tell the emulator that of the Group 7 instructions (sgdt, lidt, etc.) we
207 * are interested only in invlpg and not in any of the rest.
208 *
209 * invlpg is a special instruction in that the data it references may not
210 * be mapped.
211 */
212void kvm_emulator_want_group7_invlpg(void)
213{
214 twobyte_table[1] &= ~SrcMem;
215}
216EXPORT_SYMBOL_GPL(kvm_emulator_want_group7_invlpg);
217
218/* Type, address-of, and value of an instruction's operand. */
219struct operand {
220 enum { OP_REG, OP_MEM, OP_IMM } type;
221 unsigned int bytes;
222 unsigned long val, orig_val, *ptr;
223};
224
225/* EFLAGS bit definitions. */
226#define EFLG_OF (1<<11)
227#define EFLG_DF (1<<10)
228#define EFLG_SF (1<<7)
229#define EFLG_ZF (1<<6)
230#define EFLG_AF (1<<4)
231#define EFLG_PF (1<<2)
232#define EFLG_CF (1<<0)
233
234/*
235 * Instruction emulation:
236 * Most instructions are emulated directly via a fragment of inline assembly
237 * code. This allows us to save/restore EFLAGS and thus very easily pick up
238 * any modified flags.
239 */
240
241#if defined(CONFIG_X86_64)
242#define _LO32 "k" /* force 32-bit operand */
243#define _STK "%%rsp" /* stack pointer */
244#elif defined(__i386__)
245#define _LO32 "" /* force 32-bit operand */
246#define _STK "%%esp" /* stack pointer */
247#endif
248
249/*
250 * These EFLAGS bits are restored from saved value during emulation, and
251 * any changes are written back to the saved value after emulation.
252 */
253#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
254
255/* Before executing instruction: restore necessary bits in EFLAGS. */
256#define _PRE_EFLAGS(_sav, _msk, _tmp) \
257 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); */ \
258 "push %"_sav"; " \
259 "movl %"_msk",%"_LO32 _tmp"; " \
260 "andl %"_LO32 _tmp",("_STK"); " \
261 "pushf; " \
262 "notl %"_LO32 _tmp"; " \
263 "andl %"_LO32 _tmp",("_STK"); " \
264 "pop %"_tmp"; " \
265 "orl %"_LO32 _tmp",("_STK"); " \
266 "popf; " \
267 /* _sav &= ~msk; */ \
268 "movl %"_msk",%"_LO32 _tmp"; " \
269 "notl %"_LO32 _tmp"; " \
270 "andl %"_LO32 _tmp",%"_sav"; "
271
272/* After executing instruction: write-back necessary bits in EFLAGS. */
273#define _POST_EFLAGS(_sav, _msk, _tmp) \
274 /* _sav |= EFLAGS & _msk; */ \
275 "pushf; " \
276 "pop %"_tmp"; " \
277 "andl %"_msk",%"_LO32 _tmp"; " \
278 "orl %"_LO32 _tmp",%"_sav"; "
279
280/* Raw emulation: instruction has two explicit operands. */
281#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
282 do { \
283 unsigned long _tmp; \
284 \
285 switch ((_dst).bytes) { \
286 case 2: \
287 __asm__ __volatile__ ( \
288 _PRE_EFLAGS("0","4","2") \
289 _op"w %"_wx"3,%1; " \
290 _POST_EFLAGS("0","4","2") \
291 : "=m" (_eflags), "=m" ((_dst).val), \
292 "=&r" (_tmp) \
293 : _wy ((_src).val), "i" (EFLAGS_MASK) ); \
294 break; \
295 case 4: \
296 __asm__ __volatile__ ( \
297 _PRE_EFLAGS("0","4","2") \
298 _op"l %"_lx"3,%1; " \
299 _POST_EFLAGS("0","4","2") \
300 : "=m" (_eflags), "=m" ((_dst).val), \
301 "=&r" (_tmp) \
302 : _ly ((_src).val), "i" (EFLAGS_MASK) ); \
303 break; \
304 case 8: \
305 __emulate_2op_8byte(_op, _src, _dst, \
306 _eflags, _qx, _qy); \
307 break; \
308 } \
309 } while (0)
310
311#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
312 do { \
313 unsigned long _tmp; \
314 switch ( (_dst).bytes ) \
315 { \
316 case 1: \
317 __asm__ __volatile__ ( \
318 _PRE_EFLAGS("0","4","2") \
319 _op"b %"_bx"3,%1; " \
320 _POST_EFLAGS("0","4","2") \
321 : "=m" (_eflags), "=m" ((_dst).val), \
322 "=&r" (_tmp) \
323 : _by ((_src).val), "i" (EFLAGS_MASK) ); \
324 break; \
325 default: \
326 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
327 _wx, _wy, _lx, _ly, _qx, _qy); \
328 break; \
329 } \
330 } while (0)
331
332/* Source operand is byte-sized and may be restricted to just %cl. */
333#define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
334 __emulate_2op(_op, _src, _dst, _eflags, \
335 "b", "c", "b", "c", "b", "c", "b", "c")
336
337/* Source operand is byte, word, long or quad sized. */
338#define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
339 __emulate_2op(_op, _src, _dst, _eflags, \
340 "b", "q", "w", "r", _LO32, "r", "", "r")
341
342/* Source operand is word, long or quad sized. */
343#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
344 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
345 "w", "r", _LO32, "r", "", "r")
346
347/* Instruction has only one explicit operand (no source operand). */
348#define emulate_1op(_op, _dst, _eflags) \
349 do { \
350 unsigned long _tmp; \
351 \
352 switch ( (_dst).bytes ) \
353 { \
354 case 1: \
355 __asm__ __volatile__ ( \
356 _PRE_EFLAGS("0","3","2") \
357 _op"b %1; " \
358 _POST_EFLAGS("0","3","2") \
359 : "=m" (_eflags), "=m" ((_dst).val), \
360 "=&r" (_tmp) \
361 : "i" (EFLAGS_MASK) ); \
362 break; \
363 case 2: \
364 __asm__ __volatile__ ( \
365 _PRE_EFLAGS("0","3","2") \
366 _op"w %1; " \
367 _POST_EFLAGS("0","3","2") \
368 : "=m" (_eflags), "=m" ((_dst).val), \
369 "=&r" (_tmp) \
370 : "i" (EFLAGS_MASK) ); \
371 break; \
372 case 4: \
373 __asm__ __volatile__ ( \
374 _PRE_EFLAGS("0","3","2") \
375 _op"l %1; " \
376 _POST_EFLAGS("0","3","2") \
377 : "=m" (_eflags), "=m" ((_dst).val), \
378 "=&r" (_tmp) \
379 : "i" (EFLAGS_MASK) ); \
380 break; \
381 case 8: \
382 __emulate_1op_8byte(_op, _dst, _eflags); \
383 break; \
384 } \
385 } while (0)
386
387/* Emulate an instruction with quadword operands (x86/64 only). */
388#if defined(CONFIG_X86_64)
389#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) \
390 do { \
391 __asm__ __volatile__ ( \
392 _PRE_EFLAGS("0","4","2") \
393 _op"q %"_qx"3,%1; " \
394 _POST_EFLAGS("0","4","2") \
395 : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \
396 : _qy ((_src).val), "i" (EFLAGS_MASK) ); \
397 } while (0)
398
399#define __emulate_1op_8byte(_op, _dst, _eflags) \
400 do { \
401 __asm__ __volatile__ ( \
402 _PRE_EFLAGS("0","3","2") \
403 _op"q %1; " \
404 _POST_EFLAGS("0","3","2") \
405 : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \
406 : "i" (EFLAGS_MASK) ); \
407 } while (0)
408
409#elif defined(__i386__)
410#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy)
411#define __emulate_1op_8byte(_op, _dst, _eflags)
412#endif /* __i386__ */
413
414/* Fetch next part of the instruction being emulated. */
415#define insn_fetch(_type, _size, _eip) \
416({ unsigned long _x; \
417 rc = ops->read_std((unsigned long)(_eip) + ctxt->cs_base, &_x, \
418 (_size), ctxt); \
419 if ( rc != 0 ) \
420 goto done; \
421 (_eip) += (_size); \
422 (_type)_x; \
423})
424
425/* Access/update address held in a register, based on addressing mode. */
426#define register_address(base, reg) \
427 ((base) + ((ad_bytes == sizeof(unsigned long)) ? (reg) : \
428 ((reg) & ((1UL << (ad_bytes << 3)) - 1))))
429
430#define register_address_increment(reg, inc) \
431 do { \
432 /* signed type ensures sign extension to long */ \
433 int _inc = (inc); \
434 if ( ad_bytes == sizeof(unsigned long) ) \
435 (reg) += _inc; \
436 else \
437 (reg) = ((reg) & ~((1UL << (ad_bytes << 3)) - 1)) | \
438 (((reg) + _inc) & ((1UL << (ad_bytes << 3)) - 1)); \
439 } while (0)
440
441void *decode_register(u8 modrm_reg, unsigned long *regs,
442 int highbyte_regs)
443{
444 void *p;
445
446 p = &regs[modrm_reg];
447 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
448 p = (unsigned char *)&regs[modrm_reg & 3] + 1;
449 return p;
450}
451
452static int read_descriptor(struct x86_emulate_ctxt *ctxt,
453 struct x86_emulate_ops *ops,
454 void *ptr,
455 u16 *size, unsigned long *address, int op_bytes)
456{
457 int rc;
458
459 if (op_bytes == 2)
460 op_bytes = 3;
461 *address = 0;
462 rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2, ctxt);
463 if (rc)
464 return rc;
465 rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes, ctxt);
466 return rc;
467}
468
469int
470x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
471{
472 u8 b, d, sib, twobyte = 0, rex_prefix = 0;
473 u8 modrm, modrm_mod = 0, modrm_reg = 0, modrm_rm = 0;
474 unsigned long *override_base = NULL;
475 unsigned int op_bytes, ad_bytes, lock_prefix = 0, rep_prefix = 0, i;
476 int rc = 0;
477 struct operand src, dst;
478 unsigned long cr2 = ctxt->cr2;
479 int mode = ctxt->mode;
480 unsigned long modrm_ea;
481 int use_modrm_ea, index_reg = 0, base_reg = 0, scale, rip_relative = 0;
482
483 /* Shadow copy of register state. Committed on successful emulation. */
484 unsigned long _regs[NR_VCPU_REGS];
485 unsigned long _eip = ctxt->vcpu->rip, _eflags = ctxt->eflags;
486 unsigned long modrm_val = 0;
487
488 memcpy(_regs, ctxt->vcpu->regs, sizeof _regs);
489
490 switch (mode) {
491 case X86EMUL_MODE_REAL:
492 case X86EMUL_MODE_PROT16:
493 op_bytes = ad_bytes = 2;
494 break;
495 case X86EMUL_MODE_PROT32:
496 op_bytes = ad_bytes = 4;
497 break;
498#ifdef CONFIG_X86_64
499 case X86EMUL_MODE_PROT64:
500 op_bytes = 4;
501 ad_bytes = 8;
502 break;
503#endif
504 default:
505 return -1;
506 }
507
508 /* Legacy prefixes. */
509 for (i = 0; i < 8; i++) {
510 switch (b = insn_fetch(u8, 1, _eip)) {
511 case 0x66: /* operand-size override */
512 op_bytes ^= 6; /* switch between 2/4 bytes */
513 break;
514 case 0x67: /* address-size override */
515 if (mode == X86EMUL_MODE_PROT64)
516 ad_bytes ^= 12; /* switch between 4/8 bytes */
517 else
518 ad_bytes ^= 6; /* switch between 2/4 bytes */
519 break;
520 case 0x2e: /* CS override */
521 override_base = &ctxt->cs_base;
522 break;
523 case 0x3e: /* DS override */
524 override_base = &ctxt->ds_base;
525 break;
526 case 0x26: /* ES override */
527 override_base = &ctxt->es_base;
528 break;
529 case 0x64: /* FS override */
530 override_base = &ctxt->fs_base;
531 break;
532 case 0x65: /* GS override */
533 override_base = &ctxt->gs_base;
534 break;
535 case 0x36: /* SS override */
536 override_base = &ctxt->ss_base;
537 break;
538 case 0xf0: /* LOCK */
539 lock_prefix = 1;
540 break;
541 case 0xf3: /* REP/REPE/REPZ */
542 rep_prefix = 1;
543 break;
544 case 0xf2: /* REPNE/REPNZ */
545 break;
546 default:
547 goto done_prefixes;
548 }
549 }
550
551done_prefixes:
552
553 /* REX prefix. */
554 if ((mode == X86EMUL_MODE_PROT64) && ((b & 0xf0) == 0x40)) {
555 rex_prefix = b;
556 if (b & 8)
557 op_bytes = 8; /* REX.W */
558 modrm_reg = (b & 4) << 1; /* REX.R */
559 index_reg = (b & 2) << 2; /* REX.X */
560 modrm_rm = base_reg = (b & 1) << 3; /* REG.B */
561 b = insn_fetch(u8, 1, _eip);
562 }
563
564 /* Opcode byte(s). */
565 d = opcode_table[b];
566 if (d == 0) {
567 /* Two-byte opcode? */
568 if (b == 0x0f) {
569 twobyte = 1;
570 b = insn_fetch(u8, 1, _eip);
571 d = twobyte_table[b];
572 }
573
574 /* Unrecognised? */
575 if (d == 0)
576 goto cannot_emulate;
577 }
578
579 /* ModRM and SIB bytes. */
580 if (d & ModRM) {
581 modrm = insn_fetch(u8, 1, _eip);
582 modrm_mod |= (modrm & 0xc0) >> 6;
583 modrm_reg |= (modrm & 0x38) >> 3;
584 modrm_rm |= (modrm & 0x07);
585 modrm_ea = 0;
586 use_modrm_ea = 1;
587
588 if (modrm_mod == 3) {
589 modrm_val = *(unsigned long *)
590 decode_register(modrm_rm, _regs, d & ByteOp);
591 goto modrm_done;
592 }
593
594 if (ad_bytes == 2) {
595 unsigned bx = _regs[VCPU_REGS_RBX];
596 unsigned bp = _regs[VCPU_REGS_RBP];
597 unsigned si = _regs[VCPU_REGS_RSI];
598 unsigned di = _regs[VCPU_REGS_RDI];
599
600 /* 16-bit ModR/M decode. */
601 switch (modrm_mod) {
602 case 0:
603 if (modrm_rm == 6)
604 modrm_ea += insn_fetch(u16, 2, _eip);
605 break;
606 case 1:
607 modrm_ea += insn_fetch(s8, 1, _eip);
608 break;
609 case 2:
610 modrm_ea += insn_fetch(u16, 2, _eip);
611 break;
612 }
613 switch (modrm_rm) {
614 case 0:
615 modrm_ea += bx + si;
616 break;
617 case 1:
618 modrm_ea += bx + di;
619 break;
620 case 2:
621 modrm_ea += bp + si;
622 break;
623 case 3:
624 modrm_ea += bp + di;
625 break;
626 case 4:
627 modrm_ea += si;
628 break;
629 case 5:
630 modrm_ea += di;
631 break;
632 case 6:
633 if (modrm_mod != 0)
634 modrm_ea += bp;
635 break;
636 case 7:
637 modrm_ea += bx;
638 break;
639 }
640 if (modrm_rm == 2 || modrm_rm == 3 ||
641 (modrm_rm == 6 && modrm_mod != 0))
642 if (!override_base)
643 override_base = &ctxt->ss_base;
644 modrm_ea = (u16)modrm_ea;
645 } else {
646 /* 32/64-bit ModR/M decode. */
647 switch (modrm_rm) {
648 case 4:
649 case 12:
650 sib = insn_fetch(u8, 1, _eip);
651 index_reg |= (sib >> 3) & 7;
652 base_reg |= sib & 7;
653 scale = sib >> 6;
654
655 switch (base_reg) {
656 case 5:
657 if (modrm_mod != 0)
658 modrm_ea += _regs[base_reg];
659 else
660 modrm_ea += insn_fetch(s32, 4, _eip);
661 break;
662 default:
663 modrm_ea += _regs[base_reg];
664 }
665 switch (index_reg) {
666 case 4:
667 break;
668 default:
669 modrm_ea += _regs[index_reg] << scale;
670
671 }
672 break;
673 case 5:
674 if (modrm_mod != 0)
675 modrm_ea += _regs[modrm_rm];
676 else if (mode == X86EMUL_MODE_PROT64)
677 rip_relative = 1;
678 break;
679 default:
680 modrm_ea += _regs[modrm_rm];
681 break;
682 }
683 switch (modrm_mod) {
684 case 0:
685 if (modrm_rm == 5)
686 modrm_ea += insn_fetch(s32, 4, _eip);
687 break;
688 case 1:
689 modrm_ea += insn_fetch(s8, 1, _eip);
690 break;
691 case 2:
692 modrm_ea += insn_fetch(s32, 4, _eip);
693 break;
694 }
695 }
696 if (!override_base)
697 override_base = &ctxt->ds_base;
698 if (mode == X86EMUL_MODE_PROT64 &&
699 override_base != &ctxt->fs_base &&
700 override_base != &ctxt->gs_base)
701 override_base = NULL;
702
703 if (override_base)
704 modrm_ea += *override_base;
705
706 if (rip_relative) {
707 modrm_ea += _eip;
708 switch (d & SrcMask) {
709 case SrcImmByte:
710 modrm_ea += 1;
711 break;
712 case SrcImm:
713 if (d & ByteOp)
714 modrm_ea += 1;
715 else
716 if (op_bytes == 8)
717 modrm_ea += 4;
718 else
719 modrm_ea += op_bytes;
720 }
721 }
722 if (ad_bytes != 8)
723 modrm_ea = (u32)modrm_ea;
724 cr2 = modrm_ea;
725 modrm_done:
726 ;
727 }
728
729 /* Decode and fetch the destination operand: register or memory. */
730 switch (d & DstMask) {
731 case ImplicitOps:
732 /* Special instructions do their own operand decoding. */
733 goto special_insn;
734 case DstReg:
735 dst.type = OP_REG;
736 if ((d & ByteOp)
737 && !(twobyte_table && (b == 0xb6 || b == 0xb7))) {
738 dst.ptr = decode_register(modrm_reg, _regs,
739 (rex_prefix == 0));
740 dst.val = *(u8 *) dst.ptr;
741 dst.bytes = 1;
742 } else {
743 dst.ptr = decode_register(modrm_reg, _regs, 0);
744 switch ((dst.bytes = op_bytes)) {
745 case 2:
746 dst.val = *(u16 *)dst.ptr;
747 break;
748 case 4:
749 dst.val = *(u32 *)dst.ptr;
750 break;
751 case 8:
752 dst.val = *(u64 *)dst.ptr;
753 break;
754 }
755 }
756 break;
757 case DstMem:
758 dst.type = OP_MEM;
759 dst.ptr = (unsigned long *)cr2;
760 dst.bytes = (d & ByteOp) ? 1 : op_bytes;
761 if (!(d & Mov) && /* optimisation - avoid slow emulated read */
762 ((rc = ops->read_emulated((unsigned long)dst.ptr,
763 &dst.val, dst.bytes, ctxt)) != 0))
764 goto done;
765 break;
766 }
767 dst.orig_val = dst.val;
768
769 /*
770 * Decode and fetch the source operand: register, memory
771 * or immediate.
772 */
773 switch (d & SrcMask) {
774 case SrcNone:
775 break;
776 case SrcReg:
777 src.type = OP_REG;
778 if (d & ByteOp) {
779 src.ptr = decode_register(modrm_reg, _regs,
780 (rex_prefix == 0));
781 src.val = src.orig_val = *(u8 *) src.ptr;
782 src.bytes = 1;
783 } else {
784 src.ptr = decode_register(modrm_reg, _regs, 0);
785 switch ((src.bytes = op_bytes)) {
786 case 2:
787 src.val = src.orig_val = *(u16 *) src.ptr;
788 break;
789 case 4:
790 src.val = src.orig_val = *(u32 *) src.ptr;
791 break;
792 case 8:
793 src.val = src.orig_val = *(u64 *) src.ptr;
794 break;
795 }
796 }
797 break;
798 case SrcMem16:
799 src.bytes = 2;
800 goto srcmem_common;
801 case SrcMem32:
802 src.bytes = 4;
803 goto srcmem_common;
804 case SrcMem:
805 src.bytes = (d & ByteOp) ? 1 : op_bytes;
806 srcmem_common:
807 src.type = OP_MEM;
808 src.ptr = (unsigned long *)cr2;
809 if ((rc = ops->read_emulated((unsigned long)src.ptr,
810 &src.val, src.bytes, ctxt)) != 0)
811 goto done;
812 src.orig_val = src.val;
813 break;
814 case SrcImm:
815 src.type = OP_IMM;
816 src.ptr = (unsigned long *)_eip;
817 src.bytes = (d & ByteOp) ? 1 : op_bytes;
818 if (src.bytes == 8)
819 src.bytes = 4;
820 /* NB. Immediates are sign-extended as necessary. */
821 switch (src.bytes) {
822 case 1:
823 src.val = insn_fetch(s8, 1, _eip);
824 break;
825 case 2:
826 src.val = insn_fetch(s16, 2, _eip);
827 break;
828 case 4:
829 src.val = insn_fetch(s32, 4, _eip);
830 break;
831 }
832 break;
833 case SrcImmByte:
834 src.type = OP_IMM;
835 src.ptr = (unsigned long *)_eip;
836 src.bytes = 1;
837 src.val = insn_fetch(s8, 1, _eip);
838 break;
839 }
840
841 if (twobyte)
842 goto twobyte_insn;
843
844 switch (b) {
845 case 0x00 ... 0x05:
846 add: /* add */
847 emulate_2op_SrcV("add", src, dst, _eflags);
848 break;
849 case 0x08 ... 0x0d:
850 or: /* or */
851 emulate_2op_SrcV("or", src, dst, _eflags);
852 break;
853 case 0x10 ... 0x15:
854 adc: /* adc */
855 emulate_2op_SrcV("adc", src, dst, _eflags);
856 break;
857 case 0x18 ... 0x1d:
858 sbb: /* sbb */
859 emulate_2op_SrcV("sbb", src, dst, _eflags);
860 break;
861 case 0x20 ... 0x25:
862 and: /* and */
863 emulate_2op_SrcV("and", src, dst, _eflags);
864 break;
865 case 0x28 ... 0x2d:
866 sub: /* sub */
867 emulate_2op_SrcV("sub", src, dst, _eflags);
868 break;
869 case 0x30 ... 0x35:
870 xor: /* xor */
871 emulate_2op_SrcV("xor", src, dst, _eflags);
872 break;
873 case 0x38 ... 0x3d:
874 cmp: /* cmp */
875 emulate_2op_SrcV("cmp", src, dst, _eflags);
876 break;
877 case 0x63: /* movsxd */
878 if (mode != X86EMUL_MODE_PROT64)
879 goto cannot_emulate;
880 dst.val = (s32) src.val;
881 break;
882 case 0x80 ... 0x83: /* Grp1 */
883 switch (modrm_reg) {
884 case 0:
885 goto add;
886 case 1:
887 goto or;
888 case 2:
889 goto adc;
890 case 3:
891 goto sbb;
892 case 4:
893 goto and;
894 case 5:
895 goto sub;
896 case 6:
897 goto xor;
898 case 7:
899 goto cmp;
900 }
901 break;
902 case 0x84 ... 0x85:
903 test: /* test */
904 emulate_2op_SrcV("test", src, dst, _eflags);
905 break;
906 case 0x86 ... 0x87: /* xchg */
907 /* Write back the register source. */
908 switch (dst.bytes) {
909 case 1:
910 *(u8 *) src.ptr = (u8) dst.val;
911 break;
912 case 2:
913 *(u16 *) src.ptr = (u16) dst.val;
914 break;
915 case 4:
916 *src.ptr = (u32) dst.val;
917 break; /* 64b reg: zero-extend */
918 case 8:
919 *src.ptr = dst.val;
920 break;
921 }
922 /*
923 * Write back the memory destination with implicit LOCK
924 * prefix.
925 */
926 dst.val = src.val;
927 lock_prefix = 1;
928 break;
929 case 0xa0 ... 0xa1: /* mov */
930 dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
931 dst.val = src.val;
932 _eip += ad_bytes; /* skip src displacement */
933 break;
934 case 0xa2 ... 0xa3: /* mov */
935 dst.val = (unsigned long)_regs[VCPU_REGS_RAX];
936 _eip += ad_bytes; /* skip dst displacement */
937 break;
938 case 0x88 ... 0x8b: /* mov */
939 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
940 dst.val = src.val;
941 break;
942 case 0x8f: /* pop (sole member of Grp1a) */
943 /* 64-bit mode: POP always pops a 64-bit operand. */
944 if (mode == X86EMUL_MODE_PROT64)
945 dst.bytes = 8;
946 if ((rc = ops->read_std(register_address(ctxt->ss_base,
947 _regs[VCPU_REGS_RSP]),
948 &dst.val, dst.bytes, ctxt)) != 0)
949 goto done;
950 register_address_increment(_regs[VCPU_REGS_RSP], dst.bytes);
951 break;
952 case 0xc0 ... 0xc1:
953 grp2: /* Grp2 */
954 switch (modrm_reg) {
955 case 0: /* rol */
956 emulate_2op_SrcB("rol", src, dst, _eflags);
957 break;
958 case 1: /* ror */
959 emulate_2op_SrcB("ror", src, dst, _eflags);
960 break;
961 case 2: /* rcl */
962 emulate_2op_SrcB("rcl", src, dst, _eflags);
963 break;
964 case 3: /* rcr */
965 emulate_2op_SrcB("rcr", src, dst, _eflags);
966 break;
967 case 4: /* sal/shl */
968 case 6: /* sal/shl */
969 emulate_2op_SrcB("sal", src, dst, _eflags);
970 break;
971 case 5: /* shr */
972 emulate_2op_SrcB("shr", src, dst, _eflags);
973 break;
974 case 7: /* sar */
975 emulate_2op_SrcB("sar", src, dst, _eflags);
976 break;
977 }
978 break;
979 case 0xd0 ... 0xd1: /* Grp2 */
980 src.val = 1;
981 goto grp2;
982 case 0xd2 ... 0xd3: /* Grp2 */
983 src.val = _regs[VCPU_REGS_RCX];
984 goto grp2;
985 case 0xf6 ... 0xf7: /* Grp3 */
986 switch (modrm_reg) {
987 case 0 ... 1: /* test */
988 /*
989 * Special case in Grp3: test has an immediate
990 * source operand.
991 */
992 src.type = OP_IMM;
993 src.ptr = (unsigned long *)_eip;
994 src.bytes = (d & ByteOp) ? 1 : op_bytes;
995 if (src.bytes == 8)
996 src.bytes = 4;
997 switch (src.bytes) {
998 case 1:
999 src.val = insn_fetch(s8, 1, _eip);
1000 break;
1001 case 2:
1002 src.val = insn_fetch(s16, 2, _eip);
1003 break;
1004 case 4:
1005 src.val = insn_fetch(s32, 4, _eip);
1006 break;
1007 }
1008 goto test;
1009 case 2: /* not */
1010 dst.val = ~dst.val;
1011 break;
1012 case 3: /* neg */
1013 emulate_1op("neg", dst, _eflags);
1014 break;
1015 default:
1016 goto cannot_emulate;
1017 }
1018 break;
1019 case 0xfe ... 0xff: /* Grp4/Grp5 */
1020 switch (modrm_reg) {
1021 case 0: /* inc */
1022 emulate_1op("inc", dst, _eflags);
1023 break;
1024 case 1: /* dec */
1025 emulate_1op("dec", dst, _eflags);
1026 break;
1027 case 6: /* push */
1028 /* 64-bit mode: PUSH always pushes a 64-bit operand. */
1029 if (mode == X86EMUL_MODE_PROT64) {
1030 dst.bytes = 8;
1031 if ((rc = ops->read_std((unsigned long)dst.ptr,
1032 &dst.val, 8,
1033 ctxt)) != 0)
1034 goto done;
1035 }
1036 register_address_increment(_regs[VCPU_REGS_RSP],
1037 -dst.bytes);
1038 if ((rc = ops->write_std(
1039 register_address(ctxt->ss_base,
1040 _regs[VCPU_REGS_RSP]),
1041 dst.val, dst.bytes, ctxt)) != 0)
1042 goto done;
1043 dst.val = dst.orig_val; /* skanky: disable writeback */
1044 break;
1045 default:
1046 goto cannot_emulate;
1047 }
1048 break;
1049 }
1050
1051writeback:
1052 if ((d & Mov) || (dst.orig_val != dst.val)) {
1053 switch (dst.type) {
1054 case OP_REG:
1055 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1056 switch (dst.bytes) {
1057 case 1:
1058 *(u8 *)dst.ptr = (u8)dst.val;
1059 break;
1060 case 2:
1061 *(u16 *)dst.ptr = (u16)dst.val;
1062 break;
1063 case 4:
1064 *dst.ptr = (u32)dst.val;
1065 break; /* 64b: zero-ext */
1066 case 8:
1067 *dst.ptr = dst.val;
1068 break;
1069 }
1070 break;
1071 case OP_MEM:
1072 if (lock_prefix)
1073 rc = ops->cmpxchg_emulated((unsigned long)dst.
1074 ptr, dst.orig_val,
1075 dst.val, dst.bytes,
1076 ctxt);
1077 else
1078 rc = ops->write_emulated((unsigned long)dst.ptr,
1079 dst.val, dst.bytes,
1080 ctxt);
1081 if (rc != 0)
1082 goto done;
1083 default:
1084 break;
1085 }
1086 }
1087
1088 /* Commit shadow register state. */
1089 memcpy(ctxt->vcpu->regs, _regs, sizeof _regs);
1090 ctxt->eflags = _eflags;
1091 ctxt->vcpu->rip = _eip;
1092
1093done:
1094 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
1095
1096special_insn:
1097 if (twobyte)
1098 goto twobyte_special_insn;
1099 if (rep_prefix) {
1100 if (_regs[VCPU_REGS_RCX] == 0) {
1101 ctxt->vcpu->rip = _eip;
1102 goto done;
1103 }
1104 _regs[VCPU_REGS_RCX]--;
1105 _eip = ctxt->vcpu->rip;
1106 }
1107 switch (b) {
1108 case 0xa4 ... 0xa5: /* movs */
1109 dst.type = OP_MEM;
1110 dst.bytes = (d & ByteOp) ? 1 : op_bytes;
1111 dst.ptr = (unsigned long *)register_address(ctxt->es_base,
1112 _regs[VCPU_REGS_RDI]);
1113 if ((rc = ops->read_emulated(register_address(
1114 override_base ? *override_base : ctxt->ds_base,
1115 _regs[VCPU_REGS_RSI]), &dst.val, dst.bytes, ctxt)) != 0)
1116 goto done;
1117 register_address_increment(_regs[VCPU_REGS_RSI],
1118 (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
1119 register_address_increment(_regs[VCPU_REGS_RDI],
1120 (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
1121 break;
1122 case 0xa6 ... 0xa7: /* cmps */
1123 DPRINTF("Urk! I don't handle CMPS.\n");
1124 goto cannot_emulate;
1125 case 0xaa ... 0xab: /* stos */
1126 dst.type = OP_MEM;
1127 dst.bytes = (d & ByteOp) ? 1 : op_bytes;
1128 dst.ptr = (unsigned long *)cr2;
1129 dst.val = _regs[VCPU_REGS_RAX];
1130 register_address_increment(_regs[VCPU_REGS_RDI],
1131 (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
1132 break;
1133 case 0xac ... 0xad: /* lods */
1134 dst.type = OP_REG;
1135 dst.bytes = (d & ByteOp) ? 1 : op_bytes;
1136 dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
1137 if ((rc = ops->read_emulated(cr2, &dst.val, dst.bytes, ctxt)) != 0)
1138 goto done;
1139 register_address_increment(_regs[VCPU_REGS_RSI],
1140 (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
1141 break;
1142 case 0xae ... 0xaf: /* scas */
1143 DPRINTF("Urk! I don't handle SCAS.\n");
1144 goto cannot_emulate;
1145 }
1146 goto writeback;
1147
1148twobyte_insn:
1149 switch (b) {
1150 case 0x01: /* lgdt, lidt, lmsw */
1151 switch (modrm_reg) {
1152 u16 size;
1153 unsigned long address;
1154
1155 case 2: /* lgdt */
1156 rc = read_descriptor(ctxt, ops, src.ptr,
1157 &size, &address, op_bytes);
1158 if (rc)
1159 goto done;
1160 realmode_lgdt(ctxt->vcpu, size, address);
1161 break;
1162 case 3: /* lidt */
1163 rc = read_descriptor(ctxt, ops, src.ptr,
1164 &size, &address, op_bytes);
1165 if (rc)
1166 goto done;
1167 realmode_lidt(ctxt->vcpu, size, address);
1168 break;
1169 case 4: /* smsw */
1170 if (modrm_mod != 3)
1171 goto cannot_emulate;
1172 *(u16 *)&_regs[modrm_rm]
1173 = realmode_get_cr(ctxt->vcpu, 0);
1174 break;
1175 case 6: /* lmsw */
1176 if (modrm_mod != 3)
1177 goto cannot_emulate;
1178 realmode_lmsw(ctxt->vcpu, (u16)modrm_val, &_eflags);
1179 break;
1180 case 7: /* invlpg*/
1181 emulate_invlpg(ctxt->vcpu, cr2);
1182 break;
1183 default:
1184 goto cannot_emulate;
1185 }
1186 break;
1187 case 0x21: /* mov from dr to reg */
1188 if (modrm_mod != 3)
1189 goto cannot_emulate;
1190 rc = emulator_get_dr(ctxt, modrm_reg, &_regs[modrm_rm]);
1191 break;
1192 case 0x23: /* mov from reg to dr */
1193 if (modrm_mod != 3)
1194 goto cannot_emulate;
1195 rc = emulator_set_dr(ctxt, modrm_reg, _regs[modrm_rm]);
1196 break;
1197 case 0x40 ... 0x4f: /* cmov */
1198 dst.val = dst.orig_val = src.val;
1199 d &= ~Mov; /* default to no move */
1200 /*
1201 * First, assume we're decoding an even cmov opcode
1202 * (lsb == 0).
1203 */
1204 switch ((b & 15) >> 1) {
1205 case 0: /* cmovo */
1206 d |= (_eflags & EFLG_OF) ? Mov : 0;
1207 break;
1208 case 1: /* cmovb/cmovc/cmovnae */
1209 d |= (_eflags & EFLG_CF) ? Mov : 0;
1210 break;
1211 case 2: /* cmovz/cmove */
1212 d |= (_eflags & EFLG_ZF) ? Mov : 0;
1213 break;
1214 case 3: /* cmovbe/cmovna */
1215 d |= (_eflags & (EFLG_CF | EFLG_ZF)) ? Mov : 0;
1216 break;
1217 case 4: /* cmovs */
1218 d |= (_eflags & EFLG_SF) ? Mov : 0;
1219 break;
1220 case 5: /* cmovp/cmovpe */
1221 d |= (_eflags & EFLG_PF) ? Mov : 0;
1222 break;
1223 case 7: /* cmovle/cmovng */
1224 d |= (_eflags & EFLG_ZF) ? Mov : 0;
1225 /* fall through */
1226 case 6: /* cmovl/cmovnge */
1227 d |= (!(_eflags & EFLG_SF) !=
1228 !(_eflags & EFLG_OF)) ? Mov : 0;
1229 break;
1230 }
1231 /* Odd cmov opcodes (lsb == 1) have inverted sense. */
1232 d ^= (b & 1) ? Mov : 0;
1233 break;
1234 case 0xb0 ... 0xb1: /* cmpxchg */
1235 /*
1236 * Save real source value, then compare EAX against
1237 * destination.
1238 */
1239 src.orig_val = src.val;
1240 src.val = _regs[VCPU_REGS_RAX];
1241 emulate_2op_SrcV("cmp", src, dst, _eflags);
1242 /* Always write back. The question is: where to? */
1243 d |= Mov;
1244 if (_eflags & EFLG_ZF) {
1245 /* Success: write back to memory. */
1246 dst.val = src.orig_val;
1247 } else {
1248 /* Failure: write the value we saw to EAX. */
1249 dst.type = OP_REG;
1250 dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
1251 }
1252 break;
1253 case 0xa3:
1254 bt: /* bt */
1255 src.val &= (dst.bytes << 3) - 1; /* only subword offset */
1256 emulate_2op_SrcV_nobyte("bt", src, dst, _eflags);
1257 break;
1258 case 0xb3:
1259 btr: /* btr */
1260 src.val &= (dst.bytes << 3) - 1; /* only subword offset */
1261 emulate_2op_SrcV_nobyte("btr", src, dst, _eflags);
1262 break;
1263 case 0xab:
1264 bts: /* bts */
1265 src.val &= (dst.bytes << 3) - 1; /* only subword offset */
1266 emulate_2op_SrcV_nobyte("bts", src, dst, _eflags);
1267 break;
1268 case 0xb6 ... 0xb7: /* movzx */
1269 dst.bytes = op_bytes;
1270 dst.val = (d & ByteOp) ? (u8) src.val : (u16) src.val;
1271 break;
1272 case 0xbb:
1273 btc: /* btc */
1274 src.val &= (dst.bytes << 3) - 1; /* only subword offset */
1275 emulate_2op_SrcV_nobyte("btc", src, dst, _eflags);
1276 break;
1277 case 0xba: /* Grp8 */
1278 switch (modrm_reg & 3) {
1279 case 0:
1280 goto bt;
1281 case 1:
1282 goto bts;
1283 case 2:
1284 goto btr;
1285 case 3:
1286 goto btc;
1287 }
1288 break;
1289 case 0xbe ... 0xbf: /* movsx */
1290 dst.bytes = op_bytes;
1291 dst.val = (d & ByteOp) ? (s8) src.val : (s16) src.val;
1292 break;
1293 }
1294 goto writeback;
1295
1296twobyte_special_insn:
1297 /* Disable writeback. */
1298 dst.orig_val = dst.val;
1299 switch (b) {
1300 case 0x0d: /* GrpP (prefetch) */
1301 case 0x18: /* Grp16 (prefetch/nop) */
1302 break;
1303 case 0x06:
1304 emulate_clts(ctxt->vcpu);
1305 break;
1306 case 0x20: /* mov cr, reg */
1307 if (modrm_mod != 3)
1308 goto cannot_emulate;
1309 _regs[modrm_rm] = realmode_get_cr(ctxt->vcpu, modrm_reg);
1310 break;
1311 case 0x22: /* mov reg, cr */
1312 if (modrm_mod != 3)
1313 goto cannot_emulate;
1314 realmode_set_cr(ctxt->vcpu, modrm_reg, modrm_val, &_eflags);
1315 break;
1316 case 0xc7: /* Grp9 (cmpxchg8b) */
1317#if defined(__i386__)
1318 {
1319 unsigned long old_lo, old_hi;
1320 if (((rc = ops->read_emulated(cr2 + 0, &old_lo, 4,
1321 ctxt)) != 0)
1322 || ((rc = ops->read_emulated(cr2 + 4, &old_hi, 4,
1323 ctxt)) != 0))
1324 goto done;
1325 if ((old_lo != _regs[VCPU_REGS_RAX])
1326 || (old_hi != _regs[VCPU_REGS_RDI])) {
1327 _regs[VCPU_REGS_RAX] = old_lo;
1328 _regs[VCPU_REGS_RDX] = old_hi;
1329 _eflags &= ~EFLG_ZF;
1330 } else if (ops->cmpxchg8b_emulated == NULL) {
1331 rc = X86EMUL_UNHANDLEABLE;
1332 goto done;
1333 } else {
1334 if ((rc = ops->cmpxchg8b_emulated(cr2, old_lo,
1335 old_hi,
1336 _regs[VCPU_REGS_RBX],
1337 _regs[VCPU_REGS_RCX],
1338 ctxt)) != 0)
1339 goto done;
1340 _eflags |= EFLG_ZF;
1341 }
1342 break;
1343 }
1344#elif defined(CONFIG_X86_64)
1345 {
1346 unsigned long old, new;
1347 if ((rc = ops->read_emulated(cr2, &old, 8, ctxt)) != 0)
1348 goto done;
1349 if (((u32) (old >> 0) != (u32) _regs[VCPU_REGS_RAX]) ||
1350 ((u32) (old >> 32) != (u32) _regs[VCPU_REGS_RDX])) {
1351 _regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1352 _regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1353 _eflags &= ~EFLG_ZF;
1354 } else {
1355 new = (_regs[VCPU_REGS_RCX] << 32) | (u32) _regs[VCPU_REGS_RBX];
1356 if ((rc = ops->cmpxchg_emulated(cr2, old,
1357 new, 8, ctxt)) != 0)
1358 goto done;
1359 _eflags |= EFLG_ZF;
1360 }
1361 break;
1362 }
1363#endif
1364 }
1365 goto writeback;
1366
1367cannot_emulate:
1368 DPRINTF("Cannot emulate %02x\n", b);
1369 return -1;
1370}
1371
1372#ifdef __XEN__
1373
1374#include <asm/mm.h>
1375#include <asm/uaccess.h>
1376
1377int
1378x86_emulate_read_std(unsigned long addr,
1379 unsigned long *val,
1380 unsigned int bytes, struct x86_emulate_ctxt *ctxt)
1381{
1382 unsigned int rc;
1383
1384 *val = 0;
1385
1386 if ((rc = copy_from_user((void *)val, (void *)addr, bytes)) != 0) {
1387 propagate_page_fault(addr + bytes - rc, 0); /* read fault */
1388 return X86EMUL_PROPAGATE_FAULT;
1389 }
1390
1391 return X86EMUL_CONTINUE;
1392}
1393
1394int
1395x86_emulate_write_std(unsigned long addr,
1396 unsigned long val,
1397 unsigned int bytes, struct x86_emulate_ctxt *ctxt)
1398{
1399 unsigned int rc;
1400
1401 if ((rc = copy_to_user((void *)addr, (void *)&val, bytes)) != 0) {
1402 propagate_page_fault(addr + bytes - rc, PGERR_write_access);
1403 return X86EMUL_PROPAGATE_FAULT;
1404 }
1405
1406 return X86EMUL_CONTINUE;
1407}
1408
1409#endif
diff --git a/drivers/kvm/x86_emulate.h b/drivers/kvm/x86_emulate.h
new file mode 100644
index 000000000000..5d41bd55125e
--- /dev/null
+++ b/drivers/kvm/x86_emulate.h
@@ -0,0 +1,185 @@
1/******************************************************************************
2 * x86_emulate.h
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
9 */
10
11#ifndef __X86_EMULATE_H__
12#define __X86_EMULATE_H__
13
14struct x86_emulate_ctxt;
15
16/*
17 * x86_emulate_ops:
18 *
19 * These operations represent the instruction emulator's interface to memory.
20 * There are two categories of operation: those that act on ordinary memory
21 * regions (*_std), and those that act on memory regions known to require
22 * special treatment or emulation (*_emulated).
23 *
24 * The emulator assumes that an instruction accesses only one 'emulated memory'
25 * location, that this location is the given linear faulting address (cr2), and
26 * that this is one of the instruction's data operands. Instruction fetches and
27 * stack operations are assumed never to access emulated memory. The emulator
28 * automatically deduces which operand of a string-move operation is accessing
29 * emulated memory, and assumes that the other operand accesses normal memory.
30 *
31 * NOTES:
32 * 1. The emulator isn't very smart about emulated vs. standard memory.
33 * 'Emulated memory' access addresses should be checked for sanity.
34 * 'Normal memory' accesses may fault, and the caller must arrange to
35 * detect and handle reentrancy into the emulator via recursive faults.
36 * Accesses may be unaligned and may cross page boundaries.
37 * 2. If the access fails (cannot emulate, or a standard access faults) then
38 * it is up to the memop to propagate the fault to the guest VM via
39 * some out-of-band mechanism, unknown to the emulator. The memop signals
40 * failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
41 * then immediately bail.
42 * 3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
43 * cmpxchg8b_emulated need support 8-byte accesses.
44 * 4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
45 */
46/* Access completed successfully: continue emulation as normal. */
47#define X86EMUL_CONTINUE 0
48/* Access is unhandleable: bail from emulation and return error to caller. */
49#define X86EMUL_UNHANDLEABLE 1
50/* Terminate emulation but return success to the caller. */
51#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
52#define X86EMUL_RETRY_INSTR 2 /* retry the instruction for some reason */
53#define X86EMUL_CMPXCHG_FAILED 2 /* cmpxchg did not see expected value */
54struct x86_emulate_ops {
55 /*
56 * read_std: Read bytes of standard (non-emulated/special) memory.
57 * Used for instruction fetch, stack operations, and others.
58 * @addr: [IN ] Linear address from which to read.
59 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
60 * @bytes: [IN ] Number of bytes to read from memory.
61 */
62 int (*read_std)(unsigned long addr,
63 unsigned long *val,
64 unsigned int bytes, struct x86_emulate_ctxt * ctxt);
65
66 /*
67 * write_std: Write bytes of standard (non-emulated/special) memory.
68 * Used for stack operations, and others.
69 * @addr: [IN ] Linear address to which to write.
70 * @val: [IN ] Value to write to memory (low-order bytes used as
71 * required).
72 * @bytes: [IN ] Number of bytes to write to memory.
73 */
74 int (*write_std)(unsigned long addr,
75 unsigned long val,
76 unsigned int bytes, struct x86_emulate_ctxt * ctxt);
77
78 /*
79 * read_emulated: Read bytes from emulated/special memory area.
80 * @addr: [IN ] Linear address from which to read.
81 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
82 * @bytes: [IN ] Number of bytes to read from memory.
83 */
84 int (*read_emulated) (unsigned long addr,
85 unsigned long *val,
86 unsigned int bytes,
87 struct x86_emulate_ctxt * ctxt);
88
89 /*
90 * write_emulated: Read bytes from emulated/special memory area.
91 * @addr: [IN ] Linear address to which to write.
92 * @val: [IN ] Value to write to memory (low-order bytes used as
93 * required).
94 * @bytes: [IN ] Number of bytes to write to memory.
95 */
96 int (*write_emulated) (unsigned long addr,
97 unsigned long val,
98 unsigned int bytes,
99 struct x86_emulate_ctxt * ctxt);
100
101 /*
102 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
103 * emulated/special memory area.
104 * @addr: [IN ] Linear address to access.
105 * @old: [IN ] Value expected to be current at @addr.
106 * @new: [IN ] Value to write to @addr.
107 * @bytes: [IN ] Number of bytes to access using CMPXCHG.
108 */
109 int (*cmpxchg_emulated) (unsigned long addr,
110 unsigned long old,
111 unsigned long new,
112 unsigned int bytes,
113 struct x86_emulate_ctxt * ctxt);
114
115 /*
116 * cmpxchg8b_emulated: Emulate an atomic (LOCKed) CMPXCHG8B operation on an
117 * emulated/special memory area.
118 * @addr: [IN ] Linear address to access.
119 * @old: [IN ] Value expected to be current at @addr.
120 * @new: [IN ] Value to write to @addr.
121 * NOTES:
122 * 1. This function is only ever called when emulating a real CMPXCHG8B.
123 * 2. This function is *never* called on x86/64 systems.
124 * 2. Not defining this function (i.e., specifying NULL) is equivalent
125 * to defining a function that always returns X86EMUL_UNHANDLEABLE.
126 */
127 int (*cmpxchg8b_emulated) (unsigned long addr,
128 unsigned long old_lo,
129 unsigned long old_hi,
130 unsigned long new_lo,
131 unsigned long new_hi,
132 struct x86_emulate_ctxt * ctxt);
133};
134
135struct cpu_user_regs;
136
137struct x86_emulate_ctxt {
138 /* Register state before/after emulation. */
139 struct kvm_vcpu *vcpu;
140
141 /* Linear faulting address (if emulating a page-faulting instruction). */
142 unsigned long eflags;
143 unsigned long cr2;
144
145 /* Emulated execution mode, represented by an X86EMUL_MODE value. */
146 int mode;
147
148 unsigned long cs_base;
149 unsigned long ds_base;
150 unsigned long es_base;
151 unsigned long ss_base;
152 unsigned long gs_base;
153 unsigned long fs_base;
154};
155
156/* Execution mode, passed to the emulator. */
157#define X86EMUL_MODE_REAL 0 /* Real mode. */
158#define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */
159#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
160#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
161
162/* Host execution mode. */
163#if defined(__i386__)
164#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
165#elif defined(CONFIG_X86_64)
166#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
167#endif
168
169/*
170 * x86_emulate_memop: Emulate an instruction that faulted attempting to
171 * read/write a 'special' memory area.
172 * Returns -1 on failure, 0 on success.
173 */
174int x86_emulate_memop(struct x86_emulate_ctxt *ctxt,
175 struct x86_emulate_ops *ops);
176
177/*
178 * Given the 'reg' portion of a ModRM byte, and a register block, return a
179 * pointer into the block that addresses the relevant register.
180 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
181 */
182void *decode_register(u8 modrm_reg, unsigned long *regs,
183 int highbyte_regs);
184
185#endif /* __X86_EMULATE_H__ */
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 176142c61492..7399ba791116 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -12,7 +12,7 @@ config NEW_LEDS
12 12
13config LEDS_CLASS 13config LEDS_CLASS
14 tristate "LED Class Support" 14 tristate "LED Class Support"
15 depends NEW_LEDS 15 depends on NEW_LEDS
16 help 16 help
17 This option enables the led sysfs class in /sys/class/leds. You'll 17 This option enables the led sysfs class in /sys/class/leds. You'll
18 need this to do anything useful with LEDs. If unsure, say N. 18 need this to do anything useful with LEDs. If unsure, say N.
@@ -21,28 +21,28 @@ comment "LED drivers"
21 21
22config LEDS_CORGI 22config LEDS_CORGI
23 tristate "LED Support for the Sharp SL-C7x0 series" 23 tristate "LED Support for the Sharp SL-C7x0 series"
24 depends LEDS_CLASS && PXA_SHARP_C7xx 24 depends on LEDS_CLASS && PXA_SHARP_C7xx
25 help 25 help
26 This option enables support for the LEDs on Sharp Zaurus 26 This option enables support for the LEDs on Sharp Zaurus
27 SL-C7x0 series (C700, C750, C760, C860). 27 SL-C7x0 series (C700, C750, C760, C860).
28 28
29config LEDS_LOCOMO 29config LEDS_LOCOMO
30 tristate "LED Support for Locomo device" 30 tristate "LED Support for Locomo device"
31 depends LEDS_CLASS && SHARP_LOCOMO 31 depends on LEDS_CLASS && SHARP_LOCOMO
32 help 32 help
33 This option enables support for the LEDs on Sharp Locomo. 33 This option enables support for the LEDs on Sharp Locomo.
34 Zaurus models SL-5500 and SL-5600. 34 Zaurus models SL-5500 and SL-5600.
35 35
36config LEDS_SPITZ 36config LEDS_SPITZ
37 tristate "LED Support for the Sharp SL-Cxx00 series" 37 tristate "LED Support for the Sharp SL-Cxx00 series"
38 depends LEDS_CLASS && PXA_SHARP_Cxx00 38 depends on LEDS_CLASS && PXA_SHARP_Cxx00
39 help 39 help
40 This option enables support for the LEDs on Sharp Zaurus 40 This option enables support for the LEDs on Sharp Zaurus
41 SL-Cxx00 series (C1000, C3000, C3100). 41 SL-Cxx00 series (C1000, C3000, C3100).
42 42
43config LEDS_IXP4XX 43config LEDS_IXP4XX
44 tristate "LED Support for GPIO connected LEDs on IXP4XX processors" 44 tristate "LED Support for GPIO connected LEDs on IXP4XX processors"
45 depends LEDS_CLASS && ARCH_IXP4XX 45 depends on LEDS_CLASS && ARCH_IXP4XX
46 help 46 help
47 This option enables support for the LEDs connected to GPIO 47 This option enables support for the LEDs connected to GPIO
48 outputs of the Intel IXP4XX processors. To be useful the 48 outputs of the Intel IXP4XX processors. To be useful the
@@ -51,7 +51,7 @@ config LEDS_IXP4XX
51 51
52config LEDS_TOSA 52config LEDS_TOSA
53 tristate "LED Support for the Sharp SL-6000 series" 53 tristate "LED Support for the Sharp SL-6000 series"
54 depends LEDS_CLASS && PXA_SHARPSL 54 depends on LEDS_CLASS && PXA_SHARPSL
55 help 55 help
56 This option enables support for the LEDs on Sharp Zaurus 56 This option enables support for the LEDs on Sharp Zaurus
57 SL-6000 series. 57 SL-6000 series.
@@ -65,7 +65,7 @@ config LEDS_S3C24XX
65 65
66config LEDS_AMS_DELTA 66config LEDS_AMS_DELTA
67 tristate "LED Support for the Amstrad Delta (E3)" 67 tristate "LED Support for the Amstrad Delta (E3)"
68 depends LEDS_CLASS && MACH_AMS_DELTA 68 depends on LEDS_CLASS && MACH_AMS_DELTA
69 help 69 help
70 This option enables support for the LEDs on Amstrad Delta (E3). 70 This option enables support for the LEDs on Amstrad Delta (E3).
71 71
@@ -86,7 +86,7 @@ comment "LED Triggers"
86 86
87config LEDS_TRIGGERS 87config LEDS_TRIGGERS
88 bool "LED Trigger support" 88 bool "LED Trigger support"
89 depends NEW_LEDS 89 depends on NEW_LEDS
90 help 90 help
91 This option enables trigger support for the leds class. 91 This option enables trigger support for the leds class.
92 These triggers allow kernel events to drive the LEDs and can 92 These triggers allow kernel events to drive the LEDs and can
@@ -94,21 +94,21 @@ config LEDS_TRIGGERS
94 94
95config LEDS_TRIGGER_TIMER 95config LEDS_TRIGGER_TIMER
96 tristate "LED Timer Trigger" 96 tristate "LED Timer Trigger"
97 depends LEDS_TRIGGERS 97 depends on LEDS_TRIGGERS
98 help 98 help
99 This allows LEDs to be controlled by a programmable timer 99 This allows LEDs to be controlled by a programmable timer
100 via sysfs. If unsure, say Y. 100 via sysfs. If unsure, say Y.
101 101
102config LEDS_TRIGGER_IDE_DISK 102config LEDS_TRIGGER_IDE_DISK
103 bool "LED IDE Disk Trigger" 103 bool "LED IDE Disk Trigger"
104 depends LEDS_TRIGGERS && BLK_DEV_IDEDISK 104 depends on LEDS_TRIGGERS && BLK_DEV_IDEDISK
105 help 105 help
106 This allows LEDs to be controlled by IDE disk activity. 106 This allows LEDs to be controlled by IDE disk activity.
107 If unsure, say Y. 107 If unsure, say Y.
108 108
109config LEDS_TRIGGER_HEARTBEAT 109config LEDS_TRIGGER_HEARTBEAT
110 tristate "LED Heartbeat Trigger" 110 tristate "LED Heartbeat Trigger"
111 depends LEDS_TRIGGERS 111 depends on LEDS_TRIGGERS
112 help 112 help
113 This allows LEDs to be controlled by a CPU load average. 113 This allows LEDs to be controlled by a CPU load average.
114 The flash frequency is a hyperbolic function of the 1-minute 114 The flash frequency is a hyperbolic function of the 1-minute
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index d43ea81d6df9..7cec6de5e2b0 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -828,7 +828,7 @@ static ssize_t adb_write(struct file *file, const char __user *buf,
828 if (!access_ok(VERIFY_READ, buf, count)) 828 if (!access_ok(VERIFY_READ, buf, count))
829 return -EFAULT; 829 return -EFAULT;
830 830
831 req = (struct adb_request *) kmalloc(sizeof(struct adb_request), 831 req = kmalloc(sizeof(struct adb_request),
832 GFP_KERNEL); 832 GFP_KERNEL);
833 if (req == NULL) 833 if (req == NULL)
834 return -ENOMEM; 834 return -ENOMEM;
diff --git a/drivers/macintosh/apm_emu.c b/drivers/macintosh/apm_emu.c
index 8862a83b8d84..4300c628f8af 100644
--- a/drivers/macintosh/apm_emu.c
+++ b/drivers/macintosh/apm_emu.c
@@ -321,7 +321,7 @@ static int do_open(struct inode * inode, struct file * filp)
321{ 321{
322 struct apm_user * as; 322 struct apm_user * as;
323 323
324 as = (struct apm_user *)kmalloc(sizeof(*as), GFP_KERNEL); 324 as = kmalloc(sizeof(*as), GFP_KERNEL);
325 if (as == NULL) { 325 if (as == NULL) {
326 printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n", 326 printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n",
327 sizeof(*as)); 327 sizeof(*as));
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 6dde27ab79a8..6f30459b9385 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -945,7 +945,7 @@ static struct smu_sdbp_header *smu_create_sdb_partition(int id)
945 */ 945 */
946 tlen = sizeof(struct property) + len + 18; 946 tlen = sizeof(struct property) + len + 18;
947 947
948 prop = kcalloc(tlen, 1, GFP_KERNEL); 948 prop = kzalloc(tlen, GFP_KERNEL);
949 if (prop == NULL) 949 if (prop == NULL)
950 return NULL; 950 return NULL;
951 hdr = (struct smu_sdbp_header *)(prop + 1); 951 hdr = (struct smu_sdbp_header *)(prop + 1);
diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c
index d9986f3a3fbf..93e6ef9233f9 100644
--- a/drivers/macintosh/via-pmu68k.c
+++ b/drivers/macintosh/via-pmu68k.c
@@ -847,7 +847,7 @@ pbook_pci_save(void)
847 n_pbook_pci_saves = npci; 847 n_pbook_pci_saves = npci;
848 if (npci == 0) 848 if (npci == 0)
849 return; 849 return;
850 ps = (struct pci_save *) kmalloc(npci * sizeof(*ps), GFP_KERNEL); 850 ps = kmalloc(npci * sizeof(*ps), GFP_KERNEL);
851 pbook_pci_saves = ps; 851 pbook_pci_saves = ps;
852 if (ps == NULL) 852 if (ps == NULL)
853 return; 853 return;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index c92c1521546d..4540ade6b6b5 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -215,6 +215,7 @@ config DM_CRYPT
215 tristate "Crypt target support" 215 tristate "Crypt target support"
216 depends on BLK_DEV_DM && EXPERIMENTAL 216 depends on BLK_DEV_DM && EXPERIMENTAL
217 select CRYPTO 217 select CRYPTO
218 select CRYPTO_CBC
218 ---help--- 219 ---help---
219 This device-mapper target allows you to create a device that 220 This device-mapper target allows you to create a device that
220 transparently encrypts the data on it. You'll need to activate 221 transparently encrypts the data on it. You'll need to activate
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index a7a5ab554338..4ebd0f2a75ec 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -173,7 +173,7 @@ static int make_request(request_queue_t *q, struct bio *bio)
173 conf_t *conf = (conf_t*)mddev->private; 173 conf_t *conf = (conf_t*)mddev->private;
174 int failit = 0; 174 int failit = 0;
175 175
176 if (bio->bi_rw & 1) { 176 if (bio_data_dir(bio) == WRITE) {
177 /* write request */ 177 /* write request */
178 if (atomic_read(&conf->counters[WriteAll])) { 178 if (atomic_read(&conf->counters[WriteAll])) {
179 /* special case - don't decrement, don't generic_make_request, 179 /* special case - don't decrement, don't generic_make_request,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 53bd46dba0cb..21e2a7b08841 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3314,6 +3314,10 @@ static int do_md_stop(mddev_t * mddev, int mode)
3314 3314
3315 module_put(mddev->pers->owner); 3315 module_put(mddev->pers->owner);
3316 mddev->pers = NULL; 3316 mddev->pers = NULL;
3317
3318 set_capacity(disk, 0);
3319 mddev->changed = 1;
3320
3317 if (mddev->ro) 3321 if (mddev->ro)
3318 mddev->ro = 0; 3322 mddev->ro = 0;
3319 } 3323 }
@@ -3333,7 +3337,7 @@ static int do_md_stop(mddev_t * mddev, int mode)
3333 if (mode == 0) { 3337 if (mode == 0) {
3334 mdk_rdev_t *rdev; 3338 mdk_rdev_t *rdev;
3335 struct list_head *tmp; 3339 struct list_head *tmp;
3336 struct gendisk *disk; 3340
3337 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 3341 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3338 3342
3339 bitmap_destroy(mddev); 3343 bitmap_destroy(mddev);
@@ -3358,10 +3362,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
3358 mddev->raid_disks = 0; 3362 mddev->raid_disks = 0;
3359 mddev->recovery_cp = 0; 3363 mddev->recovery_cp = 0;
3360 3364
3361 disk = mddev->gendisk;
3362 if (disk)
3363 set_capacity(disk, 0);
3364 mddev->changed = 1;
3365 } else if (mddev->pers) 3365 } else if (mddev->pers)
3366 printk(KERN_INFO "md: %s switched to read-only mode.\n", 3366 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3367 mdname(mddev)); 3367 mdname(mddev));
@@ -3371,6 +3371,7 @@ out:
3371 return err; 3371 return err;
3372} 3372}
3373 3373
3374#ifndef MODULE
3374static void autorun_array(mddev_t *mddev) 3375static void autorun_array(mddev_t *mddev)
3375{ 3376{
3376 mdk_rdev_t *rdev; 3377 mdk_rdev_t *rdev;
@@ -3485,6 +3486,7 @@ static void autorun_devices(int part)
3485 } 3486 }
3486 printk(KERN_INFO "md: ... autorun DONE.\n"); 3487 printk(KERN_INFO "md: ... autorun DONE.\n");
3487} 3488}
3489#endif /* !MODULE */
3488 3490
3489static int get_version(void __user * arg) 3491static int get_version(void __user * arg)
3490{ 3492{
@@ -3722,6 +3724,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
3722 if (err) 3724 if (err)
3723 export_rdev(rdev); 3725 export_rdev(rdev);
3724 3726
3727 md_update_sb(mddev, 1);
3725 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3728 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3726 md_wakeup_thread(mddev->thread); 3729 md_wakeup_thread(mddev->thread);
3727 return err; 3730 return err;
@@ -5273,7 +5276,6 @@ void md_do_sync(mddev_t *mddev)
5273 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 5276 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5274 5277
5275 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 5278 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5276 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
5277 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 5279 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5278 mddev->curr_resync > 2) { 5280 mddev->curr_resync > 2) {
5279 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 5281 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
@@ -5297,6 +5299,7 @@ void md_do_sync(mddev_t *mddev)
5297 rdev->recovery_offset = mddev->curr_resync; 5299 rdev->recovery_offset = mddev->curr_resync;
5298 } 5300 }
5299 } 5301 }
5302 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5300 5303
5301 skip: 5304 skip:
5302 mddev->curr_resync = 0; 5305 mddev->curr_resync = 0;
@@ -5593,7 +5596,7 @@ static void autostart_arrays(int part)
5593 autorun_devices(part); 5596 autorun_devices(part);
5594} 5597}
5595 5598
5596#endif 5599#endif /* !MODULE */
5597 5600
5598static __exit void md_exit(void) 5601static __exit void md_exit(void)
5599{ 5602{
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 656fae912fe3..b30f74be3982 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1736,7 +1736,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1736 /* take from bio_init */ 1736 /* take from bio_init */
1737 bio->bi_next = NULL; 1737 bio->bi_next = NULL;
1738 bio->bi_flags |= 1 << BIO_UPTODATE; 1738 bio->bi_flags |= 1 << BIO_UPTODATE;
1739 bio->bi_rw = 0; 1739 bio->bi_rw = READ;
1740 bio->bi_vcnt = 0; 1740 bio->bi_vcnt = 0;
1741 bio->bi_idx = 0; 1741 bio->bi_idx = 0;
1742 bio->bi_phys_segments = 0; 1742 bio->bi_phys_segments = 0;
@@ -1951,6 +1951,7 @@ static int run(mddev_t *mddev)
1951 !test_bit(In_sync, &disk->rdev->flags)) { 1951 !test_bit(In_sync, &disk->rdev->flags)) {
1952 disk->head_position = 0; 1952 disk->head_position = 0;
1953 mddev->degraded++; 1953 mddev->degraded++;
1954 conf->fullsync = 1;
1954 } 1955 }
1955 } 1956 }
1956 if (mddev->degraded == conf->raid_disks) { 1957 if (mddev->degraded == conf->raid_disks) {
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7492d6033ac6..f0141910bb8d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1785,7 +1785,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1785 biolist = bio; 1785 biolist = bio;
1786 bio->bi_private = r10_bio; 1786 bio->bi_private = r10_bio;
1787 bio->bi_end_io = end_sync_read; 1787 bio->bi_end_io = end_sync_read;
1788 bio->bi_rw = 0; 1788 bio->bi_rw = READ;
1789 bio->bi_sector = r10_bio->devs[j].addr + 1789 bio->bi_sector = r10_bio->devs[j].addr +
1790 conf->mirrors[d].rdev->data_offset; 1790 conf->mirrors[d].rdev->data_offset;
1791 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 1791 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
@@ -1801,7 +1801,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1801 biolist = bio; 1801 biolist = bio;
1802 bio->bi_private = r10_bio; 1802 bio->bi_private = r10_bio;
1803 bio->bi_end_io = end_sync_write; 1803 bio->bi_end_io = end_sync_write;
1804 bio->bi_rw = 1; 1804 bio->bi_rw = WRITE;
1805 bio->bi_sector = r10_bio->devs[k].addr + 1805 bio->bi_sector = r10_bio->devs[k].addr +
1806 conf->mirrors[i].rdev->data_offset; 1806 conf->mirrors[i].rdev->data_offset;
1807 bio->bi_bdev = conf->mirrors[i].rdev->bdev; 1807 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
@@ -1870,7 +1870,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1870 biolist = bio; 1870 biolist = bio;
1871 bio->bi_private = r10_bio; 1871 bio->bi_private = r10_bio;
1872 bio->bi_end_io = end_sync_read; 1872 bio->bi_end_io = end_sync_read;
1873 bio->bi_rw = 0; 1873 bio->bi_rw = READ;
1874 bio->bi_sector = r10_bio->devs[i].addr + 1874 bio->bi_sector = r10_bio->devs[i].addr +
1875 conf->mirrors[d].rdev->data_offset; 1875 conf->mirrors[d].rdev->data_offset;
1876 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 1876 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 52914d5cec76..be008f034ada 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -134,6 +134,8 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
134 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 134 if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
135 list_add_tail(&sh->lru, &conf->inactive_list); 135 list_add_tail(&sh->lru, &conf->inactive_list);
136 wake_up(&conf->wait_for_stripe); 136 wake_up(&conf->wait_for_stripe);
137 if (conf->retry_read_aligned)
138 md_wakeup_thread(conf->mddev->thread);
137 } 139 }
138 } 140 }
139 } 141 }
@@ -542,35 +544,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
542 } 544 }
543 545
544 if (uptodate) { 546 if (uptodate) {
545#if 0
546 struct bio *bio;
547 unsigned long flags;
548 spin_lock_irqsave(&conf->device_lock, flags);
549 /* we can return a buffer if we bypassed the cache or
550 * if the top buffer is not in highmem. If there are
551 * multiple buffers, leave the extra work to
552 * handle_stripe
553 */
554 buffer = sh->bh_read[i];
555 if (buffer &&
556 (!PageHighMem(buffer->b_page)
557 || buffer->b_page == bh->b_page )
558 ) {
559 sh->bh_read[i] = buffer->b_reqnext;
560 buffer->b_reqnext = NULL;
561 } else
562 buffer = NULL;
563 spin_unlock_irqrestore(&conf->device_lock, flags);
564 if (sh->bh_page[i]==bh->b_page)
565 set_buffer_uptodate(bh);
566 if (buffer) {
567 if (buffer->b_page != bh->b_page)
568 memcpy(buffer->b_data, bh->b_data, bh->b_size);
569 buffer->b_end_io(buffer, 1);
570 }
571#else
572 set_bit(R5_UPTODATE, &sh->dev[i].flags); 547 set_bit(R5_UPTODATE, &sh->dev[i].flags);
573#endif
574 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 548 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
575 rdev = conf->disks[i].rdev; 549 rdev = conf->disks[i].rdev;
576 printk(KERN_INFO "raid5:%s: read error corrected (%lu sectors at %llu on %s)\n", 550 printk(KERN_INFO "raid5:%s: read error corrected (%lu sectors at %llu on %s)\n",
@@ -616,14 +590,6 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
616 } 590 }
617 } 591 }
618 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 592 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
619#if 0
620 /* must restore b_page before unlocking buffer... */
621 if (sh->bh_page[i] != bh->b_page) {
622 bh->b_page = sh->bh_page[i];
623 bh->b_data = page_address(bh->b_page);
624 clear_buffer_uptodate(bh);
625 }
626#endif
627 clear_bit(R5_LOCKED, &sh->dev[i].flags); 593 clear_bit(R5_LOCKED, &sh->dev[i].flags);
628 set_bit(STRIPE_HANDLE, &sh->state); 594 set_bit(STRIPE_HANDLE, &sh->state);
629 release_stripe(sh); 595 release_stripe(sh);
@@ -821,7 +787,8 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
821static sector_t compute_blocknr(struct stripe_head *sh, int i) 787static sector_t compute_blocknr(struct stripe_head *sh, int i)
822{ 788{
823 raid5_conf_t *conf = sh->raid_conf; 789 raid5_conf_t *conf = sh->raid_conf;
824 int raid_disks = sh->disks, data_disks = raid_disks - 1; 790 int raid_disks = sh->disks;
791 int data_disks = raid_disks - conf->max_degraded;
825 sector_t new_sector = sh->sector, check; 792 sector_t new_sector = sh->sector, check;
826 int sectors_per_chunk = conf->chunk_size >> 9; 793 int sectors_per_chunk = conf->chunk_size >> 9;
827 sector_t stripe; 794 sector_t stripe;
@@ -857,7 +824,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
857 } 824 }
858 break; 825 break;
859 case 6: 826 case 6:
860 data_disks = raid_disks - 2;
861 if (i == raid6_next_disk(sh->pd_idx, raid_disks)) 827 if (i == raid6_next_disk(sh->pd_idx, raid_disks))
862 return 0; /* It is the Q disk */ 828 return 0; /* It is the Q disk */
863 switch (conf->algorithm) { 829 switch (conf->algorithm) {
@@ -1353,8 +1319,10 @@ static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
1353 int pd_idx, dd_idx; 1319 int pd_idx, dd_idx;
1354 int chunk_offset = sector_div(stripe, sectors_per_chunk); 1320 int chunk_offset = sector_div(stripe, sectors_per_chunk);
1355 1321
1356 raid5_compute_sector(stripe*(disks-1)*sectors_per_chunk 1322 raid5_compute_sector(stripe * (disks - conf->max_degraded)
1357 + chunk_offset, disks, disks-1, &dd_idx, &pd_idx, conf); 1323 *sectors_per_chunk + chunk_offset,
1324 disks, disks - conf->max_degraded,
1325 &dd_idx, &pd_idx, conf);
1358 return pd_idx; 1326 return pd_idx;
1359} 1327}
1360 1328
@@ -1615,15 +1583,6 @@ static void handle_stripe5(struct stripe_head *sh)
1615 } else if (test_bit(R5_Insync, &dev->flags)) { 1583 } else if (test_bit(R5_Insync, &dev->flags)) {
1616 set_bit(R5_LOCKED, &dev->flags); 1584 set_bit(R5_LOCKED, &dev->flags);
1617 set_bit(R5_Wantread, &dev->flags); 1585 set_bit(R5_Wantread, &dev->flags);
1618#if 0
1619 /* if I am just reading this block and we don't have
1620 a failed drive, or any pending writes then sidestep the cache */
1621 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
1622 ! syncing && !failed && !to_write) {
1623 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
1624 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
1625 }
1626#endif
1627 locked++; 1586 locked++;
1628 PRINTK("Reading block %d (sync=%d)\n", 1587 PRINTK("Reading block %d (sync=%d)\n",
1629 i, syncing); 1588 i, syncing);
@@ -1641,9 +1600,6 @@ static void handle_stripe5(struct stripe_head *sh)
1641 dev = &sh->dev[i]; 1600 dev = &sh->dev[i];
1642 if ((dev->towrite || i == sh->pd_idx) && 1601 if ((dev->towrite || i == sh->pd_idx) &&
1643 (!test_bit(R5_LOCKED, &dev->flags) 1602 (!test_bit(R5_LOCKED, &dev->flags)
1644#if 0
1645|| sh->bh_page[i]!=bh->b_page
1646#endif
1647 ) && 1603 ) &&
1648 !test_bit(R5_UPTODATE, &dev->flags)) { 1604 !test_bit(R5_UPTODATE, &dev->flags)) {
1649 if (test_bit(R5_Insync, &dev->flags) 1605 if (test_bit(R5_Insync, &dev->flags)
@@ -1655,9 +1611,6 @@ static void handle_stripe5(struct stripe_head *sh)
1655 /* Would I have to read this buffer for reconstruct_write */ 1611 /* Would I have to read this buffer for reconstruct_write */
1656 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 1612 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1657 (!test_bit(R5_LOCKED, &dev->flags) 1613 (!test_bit(R5_LOCKED, &dev->flags)
1658#if 0
1659|| sh->bh_page[i] != bh->b_page
1660#endif
1661 ) && 1614 ) &&
1662 !test_bit(R5_UPTODATE, &dev->flags)) { 1615 !test_bit(R5_UPTODATE, &dev->flags)) {
1663 if (test_bit(R5_Insync, &dev->flags)) rcw++; 1616 if (test_bit(R5_Insync, &dev->flags)) rcw++;
@@ -1865,23 +1818,25 @@ static void handle_stripe5(struct stripe_head *sh)
1865 return_bi = bi->bi_next; 1818 return_bi = bi->bi_next;
1866 bi->bi_next = NULL; 1819 bi->bi_next = NULL;
1867 bi->bi_size = 0; 1820 bi->bi_size = 0;
1868 bi->bi_end_io(bi, bytes, 0); 1821 bi->bi_end_io(bi, bytes,
1822 test_bit(BIO_UPTODATE, &bi->bi_flags)
1823 ? 0 : -EIO);
1869 } 1824 }
1870 for (i=disks; i-- ;) { 1825 for (i=disks; i-- ;) {
1871 int rw; 1826 int rw;
1872 struct bio *bi; 1827 struct bio *bi;
1873 mdk_rdev_t *rdev; 1828 mdk_rdev_t *rdev;
1874 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 1829 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
1875 rw = 1; 1830 rw = WRITE;
1876 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 1831 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1877 rw = 0; 1832 rw = READ;
1878 else 1833 else
1879 continue; 1834 continue;
1880 1835
1881 bi = &sh->dev[i].req; 1836 bi = &sh->dev[i].req;
1882 1837
1883 bi->bi_rw = rw; 1838 bi->bi_rw = rw;
1884 if (rw) 1839 if (rw == WRITE)
1885 bi->bi_end_io = raid5_end_write_request; 1840 bi->bi_end_io = raid5_end_write_request;
1886 else 1841 else
1887 bi->bi_end_io = raid5_end_read_request; 1842 bi->bi_end_io = raid5_end_read_request;
@@ -1917,7 +1872,7 @@ static void handle_stripe5(struct stripe_head *sh)
1917 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 1872 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1918 generic_make_request(bi); 1873 generic_make_request(bi);
1919 } else { 1874 } else {
1920 if (rw == 1) 1875 if (rw == WRITE)
1921 set_bit(STRIPE_DEGRADED, &sh->state); 1876 set_bit(STRIPE_DEGRADED, &sh->state);
1922 PRINTK("skip op %ld on disc %d for sector %llu\n", 1877 PRINTK("skip op %ld on disc %d for sector %llu\n",
1923 bi->bi_rw, i, (unsigned long long)sh->sector); 1878 bi->bi_rw, i, (unsigned long long)sh->sector);
@@ -2193,15 +2148,6 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2193 } else if (test_bit(R5_Insync, &dev->flags)) { 2148 } else if (test_bit(R5_Insync, &dev->flags)) {
2194 set_bit(R5_LOCKED, &dev->flags); 2149 set_bit(R5_LOCKED, &dev->flags);
2195 set_bit(R5_Wantread, &dev->flags); 2150 set_bit(R5_Wantread, &dev->flags);
2196#if 0
2197 /* if I am just reading this block and we don't have
2198 a failed drive, or any pending writes then sidestep the cache */
2199 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
2200 ! syncing && !failed && !to_write) {
2201 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
2202 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
2203 }
2204#endif
2205 locked++; 2151 locked++;
2206 PRINTK("Reading block %d (sync=%d)\n", 2152 PRINTK("Reading block %d (sync=%d)\n",
2207 i, syncing); 2153 i, syncing);
@@ -2220,9 +2166,6 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2220 if (!test_bit(R5_OVERWRITE, &dev->flags) 2166 if (!test_bit(R5_OVERWRITE, &dev->flags)
2221 && i != pd_idx && i != qd_idx 2167 && i != pd_idx && i != qd_idx
2222 && (!test_bit(R5_LOCKED, &dev->flags) 2168 && (!test_bit(R5_LOCKED, &dev->flags)
2223#if 0
2224 || sh->bh_page[i] != bh->b_page
2225#endif
2226 ) && 2169 ) &&
2227 !test_bit(R5_UPTODATE, &dev->flags)) { 2170 !test_bit(R5_UPTODATE, &dev->flags)) {
2228 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2171 if (test_bit(R5_Insync, &dev->flags)) rcw++;
@@ -2418,23 +2361,25 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2418 return_bi = bi->bi_next; 2361 return_bi = bi->bi_next;
2419 bi->bi_next = NULL; 2362 bi->bi_next = NULL;
2420 bi->bi_size = 0; 2363 bi->bi_size = 0;
2421 bi->bi_end_io(bi, bytes, 0); 2364 bi->bi_end_io(bi, bytes,
2365 test_bit(BIO_UPTODATE, &bi->bi_flags)
2366 ? 0 : -EIO);
2422 } 2367 }
2423 for (i=disks; i-- ;) { 2368 for (i=disks; i-- ;) {
2424 int rw; 2369 int rw;
2425 struct bio *bi; 2370 struct bio *bi;
2426 mdk_rdev_t *rdev; 2371 mdk_rdev_t *rdev;
2427 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 2372 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
2428 rw = 1; 2373 rw = WRITE;
2429 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 2374 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
2430 rw = 0; 2375 rw = READ;
2431 else 2376 else
2432 continue; 2377 continue;
2433 2378
2434 bi = &sh->dev[i].req; 2379 bi = &sh->dev[i].req;
2435 2380
2436 bi->bi_rw = rw; 2381 bi->bi_rw = rw;
2437 if (rw) 2382 if (rw == WRITE)
2438 bi->bi_end_io = raid5_end_write_request; 2383 bi->bi_end_io = raid5_end_write_request;
2439 else 2384 else
2440 bi->bi_end_io = raid5_end_read_request; 2385 bi->bi_end_io = raid5_end_read_request;
@@ -2470,7 +2415,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2470 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 2415 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
2471 generic_make_request(bi); 2416 generic_make_request(bi);
2472 } else { 2417 } else {
2473 if (rw == 1) 2418 if (rw == WRITE)
2474 set_bit(STRIPE_DEGRADED, &sh->state); 2419 set_bit(STRIPE_DEGRADED, &sh->state);
2475 PRINTK("skip op %ld on disc %d for sector %llu\n", 2420 PRINTK("skip op %ld on disc %d for sector %llu\n",
2476 bi->bi_rw, i, (unsigned long long)sh->sector); 2421 bi->bi_rw, i, (unsigned long long)sh->sector);
@@ -2611,6 +2556,180 @@ static int raid5_congested(void *data, int bits)
2611 return 0; 2556 return 0;
2612} 2557}
2613 2558
2559/* We want read requests to align with chunks where possible,
2560 * but write requests don't need to.
2561 */
2562static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
2563{
2564 mddev_t *mddev = q->queuedata;
2565 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
2566 int max;
2567 unsigned int chunk_sectors = mddev->chunk_size >> 9;
2568 unsigned int bio_sectors = bio->bi_size >> 9;
2569
2570 if (bio_data_dir(bio) == WRITE)
2571 return biovec->bv_len; /* always allow writes to be mergeable */
2572
2573 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
2574 if (max < 0) max = 0;
2575 if (max <= biovec->bv_len && bio_sectors == 0)
2576 return biovec->bv_len;
2577 else
2578 return max;
2579}
2580
2581
2582static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
2583{
2584 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
2585 unsigned int chunk_sectors = mddev->chunk_size >> 9;
2586 unsigned int bio_sectors = bio->bi_size >> 9;
2587
2588 return chunk_sectors >=
2589 ((sector & (chunk_sectors - 1)) + bio_sectors);
2590}
2591
2592/*
2593 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
2594 * later sampled by raid5d.
2595 */
2596static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf)
2597{
2598 unsigned long flags;
2599
2600 spin_lock_irqsave(&conf->device_lock, flags);
2601
2602 bi->bi_next = conf->retry_read_aligned_list;
2603 conf->retry_read_aligned_list = bi;
2604
2605 spin_unlock_irqrestore(&conf->device_lock, flags);
2606 md_wakeup_thread(conf->mddev->thread);
2607}
2608
2609
2610static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
2611{
2612 struct bio *bi;
2613
2614 bi = conf->retry_read_aligned;
2615 if (bi) {
2616 conf->retry_read_aligned = NULL;
2617 return bi;
2618 }
2619 bi = conf->retry_read_aligned_list;
2620 if(bi) {
2621 conf->retry_read_aligned = bi->bi_next;
2622 bi->bi_next = NULL;
2623 bi->bi_phys_segments = 1; /* biased count of active stripes */
2624 bi->bi_hw_segments = 0; /* count of processed stripes */
2625 }
2626
2627 return bi;
2628}
2629
2630
2631/*
2632 * The "raid5_align_endio" should check if the read succeeded and if it
2633 * did, call bio_endio on the original bio (having bio_put the new bio
2634 * first).
2635 * If the read failed..
2636 */
2637static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
2638{
2639 struct bio* raid_bi = bi->bi_private;
2640 mddev_t *mddev;
2641 raid5_conf_t *conf;
2642 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
2643 mdk_rdev_t *rdev;
2644
2645 if (bi->bi_size)
2646 return 1;
2647 bio_put(bi);
2648
2649 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
2650 conf = mddev_to_conf(mddev);
2651 rdev = (void*)raid_bi->bi_next;
2652 raid_bi->bi_next = NULL;
2653
2654 rdev_dec_pending(rdev, conf->mddev);
2655
2656 if (!error && uptodate) {
2657 bio_endio(raid_bi, bytes, 0);
2658 if (atomic_dec_and_test(&conf->active_aligned_reads))
2659 wake_up(&conf->wait_for_stripe);
2660 return 0;
2661 }
2662
2663
2664 PRINTK("raid5_align_endio : io error...handing IO for a retry\n");
2665
2666 add_bio_to_retry(raid_bi, conf);
2667 return 0;
2668}
2669
2670static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
2671{
2672 mddev_t *mddev = q->queuedata;
2673 raid5_conf_t *conf = mddev_to_conf(mddev);
2674 const unsigned int raid_disks = conf->raid_disks;
2675 const unsigned int data_disks = raid_disks - conf->max_degraded;
2676 unsigned int dd_idx, pd_idx;
2677 struct bio* align_bi;
2678 mdk_rdev_t *rdev;
2679
2680 if (!in_chunk_boundary(mddev, raid_bio)) {
2681 printk("chunk_aligned_read : non aligned\n");
2682 return 0;
2683 }
2684 /*
2685 * use bio_clone to make a copy of the bio
2686 */
2687 align_bi = bio_clone(raid_bio, GFP_NOIO);
2688 if (!align_bi)
2689 return 0;
2690 /*
2691 * set bi_end_io to a new function, and set bi_private to the
2692 * original bio.
2693 */
2694 align_bi->bi_end_io = raid5_align_endio;
2695 align_bi->bi_private = raid_bio;
2696 /*
2697 * compute position
2698 */
2699 align_bi->bi_sector = raid5_compute_sector(raid_bio->bi_sector,
2700 raid_disks,
2701 data_disks,
2702 &dd_idx,
2703 &pd_idx,
2704 conf);
2705
2706 rcu_read_lock();
2707 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
2708 if (rdev && test_bit(In_sync, &rdev->flags)) {
2709 atomic_inc(&rdev->nr_pending);
2710 rcu_read_unlock();
2711 raid_bio->bi_next = (void*)rdev;
2712 align_bi->bi_bdev = rdev->bdev;
2713 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
2714 align_bi->bi_sector += rdev->data_offset;
2715
2716 spin_lock_irq(&conf->device_lock);
2717 wait_event_lock_irq(conf->wait_for_stripe,
2718 conf->quiesce == 0,
2719 conf->device_lock, /* nothing */);
2720 atomic_inc(&conf->active_aligned_reads);
2721 spin_unlock_irq(&conf->device_lock);
2722
2723 generic_make_request(align_bi);
2724 return 1;
2725 } else {
2726 rcu_read_unlock();
2727 bio_put(align_bi);
2728 return 0;
2729 }
2730}
2731
2732
2614static int make_request(request_queue_t *q, struct bio * bi) 2733static int make_request(request_queue_t *q, struct bio * bi)
2615{ 2734{
2616 mddev_t *mddev = q->queuedata; 2735 mddev_t *mddev = q->queuedata;
@@ -2632,6 +2751,11 @@ static int make_request(request_queue_t *q, struct bio * bi)
2632 disk_stat_inc(mddev->gendisk, ios[rw]); 2751 disk_stat_inc(mddev->gendisk, ios[rw]);
2633 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); 2752 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
2634 2753
2754 if (rw == READ &&
2755 mddev->reshape_position == MaxSector &&
2756 chunk_aligned_read(q,bi))
2757 return 0;
2758
2635 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 2759 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
2636 last_sector = bi->bi_sector + (bi->bi_size>>9); 2760 last_sector = bi->bi_sector + (bi->bi_size>>9);
2637 bi->bi_next = NULL; 2761 bi->bi_next = NULL;
@@ -2739,7 +2863,9 @@ static int make_request(request_queue_t *q, struct bio * bi)
2739 if ( rw == WRITE ) 2863 if ( rw == WRITE )
2740 md_write_end(mddev); 2864 md_write_end(mddev);
2741 bi->bi_size = 0; 2865 bi->bi_size = 0;
2742 bi->bi_end_io(bi, bytes, 0); 2866 bi->bi_end_io(bi, bytes,
2867 test_bit(BIO_UPTODATE, &bi->bi_flags)
2868 ? 0 : -EIO);
2743 } 2869 }
2744 return 0; 2870 return 0;
2745} 2871}
@@ -2950,6 +3076,74 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
2950 return STRIPE_SECTORS; 3076 return STRIPE_SECTORS;
2951} 3077}
2952 3078
3079static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3080{
3081 /* We may not be able to submit a whole bio at once as there
3082 * may not be enough stripe_heads available.
3083 * We cannot pre-allocate enough stripe_heads as we may need
3084 * more than exist in the cache (if we allow ever large chunks).
3085 * So we do one stripe head at a time and record in
3086 * ->bi_hw_segments how many have been done.
3087 *
3088 * We *know* that this entire raid_bio is in one chunk, so
3089 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
3090 */
3091 struct stripe_head *sh;
3092 int dd_idx, pd_idx;
3093 sector_t sector, logical_sector, last_sector;
3094 int scnt = 0;
3095 int remaining;
3096 int handled = 0;
3097
3098 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3099 sector = raid5_compute_sector( logical_sector,
3100 conf->raid_disks,
3101 conf->raid_disks - conf->max_degraded,
3102 &dd_idx,
3103 &pd_idx,
3104 conf);
3105 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
3106
3107 for (; logical_sector < last_sector;
3108 logical_sector += STRIPE_SECTORS, scnt++) {
3109
3110 if (scnt < raid_bio->bi_hw_segments)
3111 /* already done this stripe */
3112 continue;
3113
3114 sh = get_active_stripe(conf, sector, conf->raid_disks, pd_idx, 1);
3115
3116 if (!sh) {
3117 /* failed to get a stripe - must wait */
3118 raid_bio->bi_hw_segments = scnt;
3119 conf->retry_read_aligned = raid_bio;
3120 return handled;
3121 }
3122
3123 set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
3124 add_stripe_bio(sh, raid_bio, dd_idx, 0);
3125 handle_stripe(sh, NULL);
3126 release_stripe(sh);
3127 handled++;
3128 }
3129 spin_lock_irq(&conf->device_lock);
3130 remaining = --raid_bio->bi_phys_segments;
3131 spin_unlock_irq(&conf->device_lock);
3132 if (remaining == 0) {
3133 int bytes = raid_bio->bi_size;
3134
3135 raid_bio->bi_size = 0;
3136 raid_bio->bi_end_io(raid_bio, bytes,
3137 test_bit(BIO_UPTODATE, &raid_bio->bi_flags)
3138 ? 0 : -EIO);
3139 }
3140 if (atomic_dec_and_test(&conf->active_aligned_reads))
3141 wake_up(&conf->wait_for_stripe);
3142 return handled;
3143}
3144
3145
3146
2953/* 3147/*
2954 * This is our raid5 kernel thread. 3148 * This is our raid5 kernel thread.
2955 * 3149 *
@@ -2971,6 +3165,7 @@ static void raid5d (mddev_t *mddev)
2971 spin_lock_irq(&conf->device_lock); 3165 spin_lock_irq(&conf->device_lock);
2972 while (1) { 3166 while (1) {
2973 struct list_head *first; 3167 struct list_head *first;
3168 struct bio *bio;
2974 3169
2975 if (conf->seq_flush != conf->seq_write) { 3170 if (conf->seq_flush != conf->seq_write) {
2976 int seq = conf->seq_flush; 3171 int seq = conf->seq_flush;
@@ -2987,6 +3182,16 @@ static void raid5d (mddev_t *mddev)
2987 !list_empty(&conf->delayed_list)) 3182 !list_empty(&conf->delayed_list))
2988 raid5_activate_delayed(conf); 3183 raid5_activate_delayed(conf);
2989 3184
3185 while ((bio = remove_bio_from_retry(conf))) {
3186 int ok;
3187 spin_unlock_irq(&conf->device_lock);
3188 ok = retry_aligned_read(conf, bio);
3189 spin_lock_irq(&conf->device_lock);
3190 if (!ok)
3191 break;
3192 handled++;
3193 }
3194
2990 if (list_empty(&conf->handle_list)) 3195 if (list_empty(&conf->handle_list))
2991 break; 3196 break;
2992 3197
@@ -3174,6 +3379,7 @@ static int run(mddev_t *mddev)
3174 INIT_LIST_HEAD(&conf->inactive_list); 3379 INIT_LIST_HEAD(&conf->inactive_list);
3175 atomic_set(&conf->active_stripes, 0); 3380 atomic_set(&conf->active_stripes, 0);
3176 atomic_set(&conf->preread_active_stripes, 0); 3381 atomic_set(&conf->preread_active_stripes, 0);
3382 atomic_set(&conf->active_aligned_reads, 0);
3177 3383
3178 PRINTK("raid5: run(%s) called.\n", mdname(mddev)); 3384 PRINTK("raid5: run(%s) called.\n", mdname(mddev));
3179 3385
@@ -3320,6 +3526,8 @@ static int run(mddev_t *mddev)
3320 mddev->array_size = mddev->size * (conf->previous_raid_disks - 3526 mddev->array_size = mddev->size * (conf->previous_raid_disks -
3321 conf->max_degraded); 3527 conf->max_degraded);
3322 3528
3529 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
3530
3323 return 0; 3531 return 0;
3324abort: 3532abort:
3325 if (conf) { 3533 if (conf) {
@@ -3694,7 +3902,8 @@ static void raid5_quiesce(mddev_t *mddev, int state)
3694 spin_lock_irq(&conf->device_lock); 3902 spin_lock_irq(&conf->device_lock);
3695 conf->quiesce = 1; 3903 conf->quiesce = 1;
3696 wait_event_lock_irq(conf->wait_for_stripe, 3904 wait_event_lock_irq(conf->wait_for_stripe,
3697 atomic_read(&conf->active_stripes) == 0, 3905 atomic_read(&conf->active_stripes) == 0 &&
3906 atomic_read(&conf->active_aligned_reads) == 0,
3698 conf->device_lock, /* nothing */); 3907 conf->device_lock, /* nothing */);
3699 spin_unlock_irq(&conf->device_lock); 3908 spin_unlock_irq(&conf->device_lock);
3700 break; 3909 break;
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 9f7e1fe8c97e..87410dbd3df4 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -67,6 +67,7 @@ source "drivers/media/common/Kconfig"
67 67
68config VIDEO_TUNER 68config VIDEO_TUNER
69 tristate 69 tristate
70 depends on I2C
70 71
71config VIDEO_BUF 72config VIDEO_BUF
72 tristate 73 tristate
@@ -82,6 +83,7 @@ config VIDEO_IR
82 83
83config VIDEO_TVEEPROM 84config VIDEO_TVEEPROM
84 tristate 85 tristate
86 depends on I2C
85 87
86config USB_DABUSB 88config USB_DABUSB
87 tristate "DABUSB driver" 89 tristate "DABUSB driver"
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/common/ir-keymaps.c
index db753443587a..f51e02fe3655 100644
--- a/drivers/media/common/ir-keymaps.c
+++ b/drivers/media/common/ir-keymaps.c
@@ -1552,3 +1552,58 @@ IR_KEYTAB_TYPE ir_codes_norwood[IR_KEYTAB_SIZE] = {
1552}; 1552};
1553 1553
1554EXPORT_SYMBOL_GPL(ir_codes_norwood); 1554EXPORT_SYMBOL_GPL(ir_codes_norwood);
1555
1556/* From reading the following remotes:
1557 * Zenith Universal 7 / TV Mode 807 / VCR Mode 837
1558 * Hauppauge (from NOVA-CI-s box product)
1559 * This is a "middle of the road" approach, differences are noted
1560 */
1561IR_KEYTAB_TYPE ir_codes_budget_ci_old[IR_KEYTAB_SIZE] = {
1562 [ 0x00 ] = KEY_0,
1563 [ 0x01 ] = KEY_1,
1564 [ 0x02 ] = KEY_2,
1565 [ 0x03 ] = KEY_3,
1566 [ 0x04 ] = KEY_4,
1567 [ 0x05 ] = KEY_5,
1568 [ 0x06 ] = KEY_6,
1569 [ 0x07 ] = KEY_7,
1570 [ 0x08 ] = KEY_8,
1571 [ 0x09 ] = KEY_9,
1572 [ 0x0a ] = KEY_ENTER,
1573 [ 0x0b ] = KEY_RED,
1574 [ 0x0c ] = KEY_POWER, /* RADIO on Hauppauge */
1575 [ 0x0d ] = KEY_MUTE,
1576 [ 0x0f ] = KEY_A, /* TV on Hauppauge */
1577 [ 0x10 ] = KEY_VOLUMEUP,
1578 [ 0x11 ] = KEY_VOLUMEDOWN,
1579 [ 0x14 ] = KEY_B,
1580 [ 0x1c ] = KEY_UP,
1581 [ 0x1d ] = KEY_DOWN,
1582 [ 0x1e ] = KEY_OPTION, /* RESERVED on Hauppauge */
1583 [ 0x1f ] = KEY_BREAK,
1584 [ 0x20 ] = KEY_CHANNELUP,
1585 [ 0x21 ] = KEY_CHANNELDOWN,
1586 [ 0x22 ] = KEY_PREVIOUS, /* Prev. Ch on Zenith, SOURCE on Hauppauge */
1587 [ 0x24 ] = KEY_RESTART,
1588 [ 0x25 ] = KEY_OK,
1589 [ 0x26 ] = KEY_CYCLEWINDOWS, /* MINIMIZE on Hauppauge */
1590 [ 0x28 ] = KEY_ENTER, /* VCR mode on Zenith */
1591 [ 0x29 ] = KEY_PAUSE,
1592 [ 0x2b ] = KEY_RIGHT,
1593 [ 0x2c ] = KEY_LEFT,
1594 [ 0x2e ] = KEY_MENU, /* FULL SCREEN on Hauppauge */
1595 [ 0x30 ] = KEY_SLOW,
1596 [ 0x31 ] = KEY_PREVIOUS, /* VCR mode on Zenith */
1597 [ 0x32 ] = KEY_REWIND,
1598 [ 0x34 ] = KEY_FASTFORWARD,
1599 [ 0x35 ] = KEY_PLAY,
1600 [ 0x36 ] = KEY_STOP,
1601 [ 0x37 ] = KEY_RECORD,
1602 [ 0x38 ] = KEY_TUNER, /* TV/VCR on Zenith */
1603 [ 0x3a ] = KEY_C,
1604 [ 0x3c ] = KEY_EXIT,
1605 [ 0x3d ] = KEY_POWER2,
1606 [ 0x3e ] = KEY_TUNER,
1607};
1608
1609EXPORT_SYMBOL_GPL(ir_codes_budget_ci_old);
diff --git a/drivers/media/common/saa7146_i2c.c b/drivers/media/common/saa7146_i2c.c
index 5297a365c928..8c85efc26527 100644
--- a/drivers/media/common/saa7146_i2c.c
+++ b/drivers/media/common/saa7146_i2c.c
@@ -189,13 +189,21 @@ static int saa7146_i2c_writeout(struct saa7146_dev *dev, u32* dword, int short_d
189 saa7146_write(dev, I2C_TRANSFER, *dword); 189 saa7146_write(dev, I2C_TRANSFER, *dword);
190 190
191 dev->i2c_op = 1; 191 dev->i2c_op = 1;
192 SAA7146_ISR_CLEAR(dev, MASK_16|MASK_17);
192 SAA7146_IER_ENABLE(dev, MASK_16|MASK_17); 193 SAA7146_IER_ENABLE(dev, MASK_16|MASK_17);
193 saa7146_write(dev, MC2, (MASK_00 | MASK_16)); 194 saa7146_write(dev, MC2, (MASK_00 | MASK_16));
194 195
195 wait_event_interruptible(dev->i2c_wq, dev->i2c_op == 0); 196 timeout = HZ/100 + 1; /* 10ms */
196 if (signal_pending (current)) { 197 timeout = wait_event_interruptible_timeout(dev->i2c_wq, dev->i2c_op == 0, timeout);
197 /* a signal arrived */ 198 if (timeout == -ERESTARTSYS || dev->i2c_op) {
198 return -ERESTARTSYS; 199 SAA7146_IER_DISABLE(dev, MASK_16|MASK_17);
200 SAA7146_ISR_CLEAR(dev, MASK_16|MASK_17);
201 if (timeout == -ERESTARTSYS)
202 /* a signal arrived */
203 return -ERESTARTSYS;
204
205 printk(KERN_WARNING "saa7146_i2c_writeout: timed out waiting for end of xfer\n");
206 return -EIO;
199 } 207 }
200 status = saa7146_read(dev, I2C_STATUS); 208 status = saa7146_read(dev, I2C_STATUS);
201 } else { 209 } else {
diff --git a/drivers/media/dvb/b2c2/Kconfig b/drivers/media/dvb/b2c2/Kconfig
index a0dcd59da76e..79875958930e 100644
--- a/drivers/media/dvb/b2c2/Kconfig
+++ b/drivers/media/dvb/b2c2/Kconfig
@@ -9,6 +9,7 @@ config DVB_B2C2_FLEXCOP
9 select DVB_STV0297 if !DVB_FE_CUSTOMISE 9 select DVB_STV0297 if !DVB_FE_CUSTOMISE
10 select DVB_BCM3510 if !DVB_FE_CUSTOMISE 10 select DVB_BCM3510 if !DVB_FE_CUSTOMISE
11 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 11 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
12 select DVB_TUNER_LGH06XF if !DVB_FE_CUSTOMISE
12 help 13 help
13 Support for the digital TV receiver chip made by B2C2 Inc. included in 14 Support for the digital TV receiver chip made by B2C2 Inc. included in
14 Technisats PCI cards and USB boxes. 15 Technisats PCI cards and USB boxes.
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
index b8ba87863457..c2b35e366242 100644
--- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
@@ -14,7 +14,7 @@
14#include "stv0297.h" 14#include "stv0297.h"
15#include "mt312.h" 15#include "mt312.h"
16#include "lgdt330x.h" 16#include "lgdt330x.h"
17#include "lg_h06xf.h" 17#include "lgh06xf.h"
18#include "dvb-pll.h" 18#include "dvb-pll.h"
19 19
20/* lnb control */ 20/* lnb control */
@@ -303,12 +303,6 @@ static int flexcop_fe_request_firmware(struct dvb_frontend* fe, const struct fir
303 return request_firmware(fw, name, fc->dev); 303 return request_firmware(fw, name, fc->dev);
304} 304}
305 305
306static int lgdt3303_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params)
307{
308 struct flexcop_device *fc = fe->dvb->priv;
309 return lg_h06xf_pll_set(fe, &fc->i2c_adap, params);
310}
311
312static struct lgdt330x_config air2pc_atsc_hd5000_config = { 306static struct lgdt330x_config air2pc_atsc_hd5000_config = {
313 .demod_address = 0x59, 307 .demod_address = 0x59,
314 .demod_chip = LGDT3303, 308 .demod_chip = LGDT3303,
@@ -533,7 +527,7 @@ int flexcop_frontend_init(struct flexcop_device *fc)
533 /* try the air atsc 3nd generation (lgdt3303) */ 527 /* try the air atsc 3nd generation (lgdt3303) */
534 if ((fc->fe = dvb_attach(lgdt330x_attach, &air2pc_atsc_hd5000_config, &fc->i2c_adap)) != NULL) { 528 if ((fc->fe = dvb_attach(lgdt330x_attach, &air2pc_atsc_hd5000_config, &fc->i2c_adap)) != NULL) {
535 fc->dev_type = FC_AIR_ATSC3; 529 fc->dev_type = FC_AIR_ATSC3;
536 fc->fe->ops.tuner_ops.set_params = lgdt3303_tuner_set_params; 530 dvb_attach(lgh06xf_attach, fc->fe, &fc->i2c_adap);
537 info("found the lgdt3303 at i2c address: 0x%02x",air2pc_atsc_hd5000_config.demod_address); 531 info("found the lgdt3303 at i2c address: 0x%02x",air2pc_atsc_hd5000_config.demod_address);
538 } else 532 } else
539 /* try the air atsc 1nd generation (bcm3510)/panasonic ct10s */ 533 /* try the air atsc 1nd generation (bcm3510)/panasonic ct10s */
diff --git a/drivers/media/dvb/bt8xx/Kconfig b/drivers/media/dvb/bt8xx/Kconfig
index ae2ff5dc238d..dd66b60fbc98 100644
--- a/drivers/media/dvb/bt8xx/Kconfig
+++ b/drivers/media/dvb/bt8xx/Kconfig
@@ -1,13 +1,13 @@
1config DVB_BT8XX 1config DVB_BT8XX
2 tristate "BT8xx based PCI cards" 2 tristate "BT8xx based PCI cards"
3 depends on DVB_CORE && PCI && I2C && VIDEO_BT848 3 depends on DVB_CORE && PCI && I2C && VIDEO_BT848
4 select DVB_PLL
5 select DVB_MT352 if !DVB_FE_CUSTOMISE 4 select DVB_MT352 if !DVB_FE_CUSTOMISE
6 select DVB_SP887X if !DVB_FE_CUSTOMISE 5 select DVB_SP887X if !DVB_FE_CUSTOMISE
7 select DVB_NXT6000 if !DVB_FE_CUSTOMISE 6 select DVB_NXT6000 if !DVB_FE_CUSTOMISE
8 select DVB_CX24110 if !DVB_FE_CUSTOMISE 7 select DVB_CX24110 if !DVB_FE_CUSTOMISE
9 select DVB_OR51211 if !DVB_FE_CUSTOMISE 8 select DVB_OR51211 if !DVB_FE_CUSTOMISE
10 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 9 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
10 select DVB_TUNER_LGH06XF if !DVB_FE_CUSTOMISE
11 select DVB_ZL10353 if !DVB_FE_CUSTOMISE 11 select DVB_ZL10353 if !DVB_FE_CUSTOMISE
12 select FW_LOADER 12 select FW_LOADER
13 help 13 help
diff --git a/drivers/media/dvb/bt8xx/dst_ca.c b/drivers/media/dvb/bt8xx/dst_ca.c
index 240ad084fa78..50bc32a8bd55 100644
--- a/drivers/media/dvb/bt8xx/dst_ca.c
+++ b/drivers/media/dvb/bt8xx/dst_ca.c
@@ -480,7 +480,7 @@ static int ca_send_message(struct dst_state *state, struct ca_msg *p_ca_message,
480 struct ca_msg *hw_buffer; 480 struct ca_msg *hw_buffer;
481 int result = 0; 481 int result = 0;
482 482
483 if ((hw_buffer = (struct ca_msg *) kmalloc(sizeof (struct ca_msg), GFP_KERNEL)) == NULL) { 483 if ((hw_buffer = kmalloc(sizeof (struct ca_msg), GFP_KERNEL)) == NULL) {
484 dprintk(verbose, DST_CA_ERROR, 1, " Memory allocation failure"); 484 dprintk(verbose, DST_CA_ERROR, 1, " Memory allocation failure");
485 return -ENOMEM; 485 return -ENOMEM;
486 } 486 }
diff --git a/drivers/media/dvb/bt8xx/dvb-bt8xx.c b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
index 14e69a736eda..3e35931af35d 100644
--- a/drivers/media/dvb/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
@@ -34,7 +34,6 @@
34#include "dvb_frontend.h" 34#include "dvb_frontend.h"
35#include "dvb-bt8xx.h" 35#include "dvb-bt8xx.h"
36#include "bt878.h" 36#include "bt878.h"
37#include "dvb-pll.h"
38 37
39static int debug; 38static int debug;
40 39
@@ -568,12 +567,6 @@ static struct mt352_config digitv_alps_tded4_config = {
568 .demod_init = digitv_alps_tded4_demod_init, 567 .demod_init = digitv_alps_tded4_demod_init,
569}; 568};
570 569
571static int tdvs_tua6034_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
572{
573 struct dvb_bt8xx_card *card = (struct dvb_bt8xx_card *) fe->dvb->priv;
574 return lg_h06xf_pll_set(fe, card->i2c_adapter, params);
575}
576
577static struct lgdt330x_config tdvs_tua6034_config = { 570static struct lgdt330x_config tdvs_tua6034_config = {
578 .demod_address = 0x0e, 571 .demod_address = 0x0e,
579 .demod_chip = LGDT3303, 572 .demod_chip = LGDT3303,
@@ -616,7 +609,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
616 lgdt330x_reset(card); 609 lgdt330x_reset(card);
617 card->fe = dvb_attach(lgdt330x_attach, &tdvs_tua6034_config, card->i2c_adapter); 610 card->fe = dvb_attach(lgdt330x_attach, &tdvs_tua6034_config, card->i2c_adapter);
618 if (card->fe != NULL) { 611 if (card->fe != NULL) {
619 card->fe->ops.tuner_ops.set_params = tdvs_tua6034_tuner_set_params; 612 dvb_attach(lgh06xf_attach, card->fe, card->i2c_adapter);
620 dprintk ("dvb_bt8xx: lgdt330x detected\n"); 613 dprintk ("dvb_bt8xx: lgdt330x detected\n");
621 } 614 }
622 break; 615 break;
@@ -664,7 +657,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
664 657
665 case BTTV_BOARD_TWINHAN_DST: 658 case BTTV_BOARD_TWINHAN_DST:
666 /* DST is not a frontend driver !!! */ 659 /* DST is not a frontend driver !!! */
667 state = (struct dst_state *) kmalloc(sizeof (struct dst_state), GFP_KERNEL); 660 state = kmalloc(sizeof (struct dst_state), GFP_KERNEL);
668 if (!state) { 661 if (!state) {
669 printk("dvb_bt8xx: No memory\n"); 662 printk("dvb_bt8xx: No memory\n");
670 break; 663 break;
diff --git a/drivers/media/dvb/bt8xx/dvb-bt8xx.h b/drivers/media/dvb/bt8xx/dvb-bt8xx.h
index 4745a9017a19..e75f4173c059 100644
--- a/drivers/media/dvb/bt8xx/dvb-bt8xx.h
+++ b/drivers/media/dvb/bt8xx/dvb-bt8xx.h
@@ -37,7 +37,7 @@
37#include "cx24110.h" 37#include "cx24110.h"
38#include "or51211.h" 38#include "or51211.h"
39#include "lgdt330x.h" 39#include "lgdt330x.h"
40#include "lg_h06xf.h" 40#include "lgh06xf.h"
41#include "zl10353.h" 41#include "zl10353.h"
42 42
43struct dvb_bt8xx_card { 43struct dvb_bt8xx_card {
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c
index 9123147e376f..d64b96cb0c46 100644
--- a/drivers/media/dvb/cinergyT2/cinergyT2.c
+++ b/drivers/media/dvb/cinergyT2/cinergyT2.c
@@ -746,6 +746,7 @@ static void cinergyt2_query_rc (struct work_struct *work)
746 dprintk(1, "rc_input_event=%d Up\n", cinergyt2->rc_input_event); 746 dprintk(1, "rc_input_event=%d Up\n", cinergyt2->rc_input_event);
747 input_report_key(cinergyt2->rc_input_dev, 747 input_report_key(cinergyt2->rc_input_dev,
748 cinergyt2->rc_input_event, 0); 748 cinergyt2->rc_input_event, 0);
749 input_sync(cinergyt2->rc_input_dev);
749 cinergyt2->rc_input_event = KEY_MAX; 750 cinergyt2->rc_input_event = KEY_MAX;
750 } 751 }
751 cinergyt2->rc_last_code = ~0; 752 cinergyt2->rc_last_code = ~0;
@@ -783,6 +784,7 @@ static void cinergyt2_query_rc (struct work_struct *work)
783 dprintk(1, "rc_input_event=%d\n", cinergyt2->rc_input_event); 784 dprintk(1, "rc_input_event=%d\n", cinergyt2->rc_input_event);
784 input_report_key(cinergyt2->rc_input_dev, 785 input_report_key(cinergyt2->rc_input_dev,
785 cinergyt2->rc_input_event, 1); 786 cinergyt2->rc_input_event, 1);
787 input_sync(cinergyt2->rc_input_dev);
786 cinergyt2->rc_last_code = rc_events[n].value; 788 cinergyt2->rc_last_code = rc_events[n].value;
787 } 789 }
788 } 790 }
@@ -798,8 +800,9 @@ static int cinergyt2_register_rc(struct cinergyt2 *cinergyt2)
798{ 800{
799 struct input_dev *input_dev; 801 struct input_dev *input_dev;
800 int i; 802 int i;
803 int err;
801 804
802 cinergyt2->rc_input_dev = input_dev = input_allocate_device(); 805 input_dev = input_allocate_device();
803 if (!input_dev) 806 if (!input_dev)
804 return -ENOMEM; 807 return -ENOMEM;
805 808
@@ -817,7 +820,13 @@ static int cinergyt2_register_rc(struct cinergyt2 *cinergyt2)
817 input_dev->keycodesize = 0; 820 input_dev->keycodesize = 0;
818 input_dev->keycodemax = 0; 821 input_dev->keycodemax = 0;
819 822
820 input_register_device(cinergyt2->rc_input_dev); 823 err = input_register_device(input_dev);
824 if (err) {
825 input_free_device(input_dev);
826 return err;
827 }
828
829 cinergyt2->rc_input_dev = input_dev;
821 schedule_delayed_work(&cinergyt2->rc_query_work, HZ/2); 830 schedule_delayed_work(&cinergyt2->rc_query_work, HZ/2);
822 831
823 return 0; 832 return 0;
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index a263b3f3c21d..ad52143602cd 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -69,6 +69,8 @@ config DVB_USB_DIBUSB_MC
69config DVB_USB_DIB0700 69config DVB_USB_DIB0700
70 tristate "DiBcom DiB0700 USB DVB devices (see help for supported devices)" 70 tristate "DiBcom DiB0700 USB DVB devices (see help for supported devices)"
71 depends on DVB_USB 71 depends on DVB_USB
72 select DVB_DIB7000P
73 select DVB_DIB7000M
72 select DVB_DIB3000MC 74 select DVB_DIB3000MC
73 select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE 75 select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
74 help 76 help
@@ -96,6 +98,7 @@ config DVB_USB_CXUSB
96 depends on DVB_USB 98 depends on DVB_USB
97 select DVB_CX22702 if !DVB_FE_CUSTOMISE 99 select DVB_CX22702 if !DVB_FE_CUSTOMISE
98 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 100 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
101 select DVB_TUNER_LGH06XF if !DVB_FE_CUSTOMISE
99 select DVB_MT352 if !DVB_FE_CUSTOMISE 102 select DVB_MT352 if !DVB_FE_CUSTOMISE
100 select DVB_ZL10353 if !DVB_FE_CUSTOMISE 103 select DVB_ZL10353 if !DVB_FE_CUSTOMISE
101 help 104 help
@@ -157,6 +160,17 @@ config DVB_USB_NOVA_T_USB2
157 help 160 help
158 Say Y here to support the Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 receiver. 161 Say Y here to support the Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 receiver.
159 162
163config DVB_USB_TTUSB2
164 tristate "Pinnacle 400e DVB-S USB2.0 support"
165 depends on DVB_USB
166 select DVB_TDA10086 if !DVB_FE_CUSTOMISE
167 select DVB_LNBP21 if !DVB_FE_CUSTOMISE
168 select DVB_TDA826X if !DVB_FE_CUSTOMISE
169 help
170 Say Y here to support the Pinnacle 400e DVB-S USB2.0 receiver. The
171 firmware protocol used by this module is similar to the one used by the
172 old ttusb-driver - that's why the module is called dvb-usb-ttusb2.ko.
173
160config DVB_USB_DTT200U 174config DVB_USB_DTT200U
161 tristate "WideView WT-200U and WT-220U (pen) DVB-T USB2.0 support (Yakumo/Hama/Typhoon/Yuan)" 175 tristate "WideView WT-200U and WT-220U (pen) DVB-T USB2.0 support (Yakumo/Hama/Typhoon/Yuan)"
162 depends on DVB_USB 176 depends on DVB_USB
diff --git a/drivers/media/dvb/dvb-usb/Makefile b/drivers/media/dvb/dvb-usb/Makefile
index e239107998e5..154d593bbb02 100644
--- a/drivers/media/dvb/dvb-usb/Makefile
+++ b/drivers/media/dvb/dvb-usb/Makefile
@@ -36,6 +36,9 @@ obj-$(CONFIG_DVB_USB_DIGITV) += dvb-usb-digitv.o
36dvb-usb-cxusb-objs = cxusb.o 36dvb-usb-cxusb-objs = cxusb.o
37obj-$(CONFIG_DVB_USB_CXUSB) += dvb-usb-cxusb.o 37obj-$(CONFIG_DVB_USB_CXUSB) += dvb-usb-cxusb.o
38 38
39dvb-usb-ttusb2-objs = ttusb2.o
40obj-$(CONFIG_DVB_USB_TTUSB2) += dvb-usb-ttusb2.o
41
39dvb-usb-dib0700-objs = dib0700_core.o dib0700_devices.o 42dvb-usb-dib0700-objs = dib0700_core.o dib0700_devices.o
40obj-$(CONFIG_DVB_USB_DIB0700) += dvb-usb-dib0700.o 43obj-$(CONFIG_DVB_USB_DIB0700) += dvb-usb-dib0700.o
41 44
diff --git a/drivers/media/dvb/dvb-usb/a800.c b/drivers/media/dvb/dvb-usb/a800.c
index 2ed3eb62d787..a6c5f19f680d 100644
--- a/drivers/media/dvb/dvb-usb/a800.c
+++ b/drivers/media/dvb/dvb-usb/a800.c
@@ -116,24 +116,24 @@ static struct dvb_usb_device_properties a800_properties = {
116 { 116 {
117 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, 117 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
118 .pid_filter_count = 32, 118 .pid_filter_count = 32,
119 .streaming_ctrl = dibusb2_0_streaming_ctrl, 119 .streaming_ctrl = dibusb2_0_streaming_ctrl,
120 .pid_filter = dibusb_pid_filter, 120 .pid_filter = dibusb_pid_filter,
121 .pid_filter_ctrl = dibusb_pid_filter_ctrl, 121 .pid_filter_ctrl = dibusb_pid_filter_ctrl,
122 122
123 .frontend_attach = dibusb_dib3000mc_frontend_attach, 123 .frontend_attach = dibusb_dib3000mc_frontend_attach,
124 .tuner_attach = dibusb_dib3000mc_tuner_attach, 124 .tuner_attach = dibusb_dib3000mc_tuner_attach,
125 125
126 /* parameter for the MPEG2-data transfer */ 126 /* parameter for the MPEG2-data transfer */
127 .stream = { 127 .stream = {
128 .type = USB_BULK, 128 .type = USB_BULK,
129 .count = 7, 129 .count = 7,
130 .endpoint = 0x06, 130 .endpoint = 0x06,
131 .u = { 131 .u = {
132 .bulk = { 132 .bulk = {
133 .buffersize = 4096, 133 .buffersize = 4096,
134 } 134 }
135 } 135 }
136 }, 136 },
137 137
138 .size_of_priv = sizeof(struct dibusb_state), 138 .size_of_priv = sizeof(struct dibusb_state),
139 }, 139 },
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index 43f39069ef34..15d12fce34df 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -14,12 +14,12 @@
14 * TODO: Use the cx25840-driver for the analogue part 14 * TODO: Use the cx25840-driver for the analogue part
15 * 15 *
16 * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de) 16 * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de)
17 * Copyright (C) 2005 Michael Krufky (mkrufky@m1k.net) 17 * Copyright (C) 2006 Michael Krufky (mkrufky@linuxtv.org)
18 * Copyright (C) 2006 Chris Pascoe (c.pascoe@itee.uq.edu.au) 18 * Copyright (C) 2006 Chris Pascoe (c.pascoe@itee.uq.edu.au)
19 * 19 *
20 * This program is free software; you can redistribute it and/or modify it 20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the Free 21 * under the terms of the GNU General Public License as published by the Free
22 * Software Foundation, version 2. 22 * Software Foundation, version 2.
23 * 23 *
24 * see Documentation/dvb/README.dvb-usb for more information 24 * see Documentation/dvb/README.dvb-usb for more information
25 */ 25 */
@@ -27,29 +27,29 @@
27 27
28#include "cx22702.h" 28#include "cx22702.h"
29#include "lgdt330x.h" 29#include "lgdt330x.h"
30#include "lg_h06xf.h" 30#include "lgh06xf.h"
31#include "mt352.h" 31#include "mt352.h"
32#include "mt352_priv.h" 32#include "mt352_priv.h"
33#include "zl10353.h" 33#include "zl10353.h"
34 34
35/* debug */ 35/* debug */
36int dvb_usb_cxusb_debug; 36int dvb_usb_cxusb_debug;
37module_param_named(debug,dvb_usb_cxusb_debug, int, 0644); 37module_param_named(debug, dvb_usb_cxusb_debug, int, 0644);
38MODULE_PARM_DESC(debug, "set debugging level (1=rc (or-able))." DVB_USB_DEBUG_STATUS); 38MODULE_PARM_DESC(debug, "set debugging level (1=rc (or-able))." DVB_USB_DEBUG_STATUS);
39 39
40static int cxusb_ctrl_msg(struct dvb_usb_device *d, 40static int cxusb_ctrl_msg(struct dvb_usb_device *d,
41 u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen) 41 u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
42{ 42{
43 int wo = (rbuf == NULL || rlen == 0); /* write-only */ 43 int wo = (rbuf == NULL || rlen == 0); /* write-only */
44 u8 sndbuf[1+wlen]; 44 u8 sndbuf[1+wlen];
45 memset(sndbuf,0,1+wlen); 45 memset(sndbuf, 0, 1+wlen);
46 46
47 sndbuf[0] = cmd; 47 sndbuf[0] = cmd;
48 memcpy(&sndbuf[1],wbuf,wlen); 48 memcpy(&sndbuf[1], wbuf, wlen);
49 if (wo) 49 if (wo)
50 dvb_usb_generic_write(d,sndbuf,1+wlen); 50 dvb_usb_generic_write(d, sndbuf, 1+wlen);
51 else 51 else
52 dvb_usb_generic_rw(d,sndbuf,1+wlen,rbuf,rlen,0); 52 dvb_usb_generic_rw(d, sndbuf, 1+wlen, rbuf, rlen, 0);
53 53
54 return 0; 54 return 0;
55} 55}
@@ -58,14 +58,14 @@ static int cxusb_ctrl_msg(struct dvb_usb_device *d,
58static void cxusb_gpio_tuner(struct dvb_usb_device *d, int onoff) 58static void cxusb_gpio_tuner(struct dvb_usb_device *d, int onoff)
59{ 59{
60 struct cxusb_state *st = d->priv; 60 struct cxusb_state *st = d->priv;
61 u8 o[2],i; 61 u8 o[2], i;
62 62
63 if (st->gpio_write_state[GPIO_TUNER] == onoff) 63 if (st->gpio_write_state[GPIO_TUNER] == onoff)
64 return; 64 return;
65 65
66 o[0] = GPIO_TUNER; 66 o[0] = GPIO_TUNER;
67 o[1] = onoff; 67 o[1] = onoff;
68 cxusb_ctrl_msg(d,CMD_GPIO_WRITE,o,2,&i,1); 68 cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1);
69 69
70 if (i != 0x01) 70 if (i != 0x01)
71 deb_info("gpio_write failed.\n"); 71 deb_info("gpio_write failed.\n");
@@ -74,7 +74,8 @@ static void cxusb_gpio_tuner(struct dvb_usb_device *d, int onoff)
74} 74}
75 75
76/* I2C */ 76/* I2C */
77static int cxusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num) 77static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
78 int num)
78{ 79{
79 struct dvb_usb_device *d = i2c_get_adapdata(adap); 80 struct dvb_usb_device *d = i2c_get_adapdata(adap);
80 int i; 81 int i;
@@ -89,12 +90,12 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num)
89 90
90 if (d->udev->descriptor.idVendor == USB_VID_MEDION) 91 if (d->udev->descriptor.idVendor == USB_VID_MEDION)
91 switch (msg[i].addr) { 92 switch (msg[i].addr) {
92 case 0x63: 93 case 0x63:
93 cxusb_gpio_tuner(d,0); 94 cxusb_gpio_tuner(d, 0);
94 break; 95 break;
95 default: 96 default:
96 cxusb_gpio_tuner(d,1); 97 cxusb_gpio_tuner(d, 1);
97 break; 98 break;
98 } 99 }
99 100
100 /* read request */ 101 /* read request */
@@ -103,26 +104,27 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num)
103 obuf[0] = msg[i].len; 104 obuf[0] = msg[i].len;
104 obuf[1] = msg[i+1].len; 105 obuf[1] = msg[i+1].len;
105 obuf[2] = msg[i].addr; 106 obuf[2] = msg[i].addr;
106 memcpy(&obuf[3],msg[i].buf,msg[i].len); 107 memcpy(&obuf[3], msg[i].buf, msg[i].len);
107 108
108 if (cxusb_ctrl_msg(d, CMD_I2C_READ, 109 if (cxusb_ctrl_msg(d, CMD_I2C_READ,
109 obuf, 3+msg[i].len, 110 obuf, 3+msg[i].len,
110 ibuf, 1+msg[i+1].len) < 0) 111 ibuf, 1+msg[i+1].len) < 0)
111 break; 112 break;
112 113
113 if (ibuf[0] != 0x08) 114 if (ibuf[0] != 0x08)
114 deb_i2c("i2c read may have failed\n"); 115 deb_i2c("i2c read may have failed\n");
115 116
116 memcpy(msg[i+1].buf,&ibuf[1],msg[i+1].len); 117 memcpy(msg[i+1].buf, &ibuf[1], msg[i+1].len);
117 118
118 i++; 119 i++;
119 } else { /* write */ 120 } else { /* write */
120 u8 obuf[2+msg[i].len], ibuf; 121 u8 obuf[2+msg[i].len], ibuf;
121 obuf[0] = msg[i].addr; 122 obuf[0] = msg[i].addr;
122 obuf[1] = msg[i].len; 123 obuf[1] = msg[i].len;
123 memcpy(&obuf[2],msg[i].buf,msg[i].len); 124 memcpy(&obuf[2], msg[i].buf, msg[i].len);
124 125
125 if (cxusb_ctrl_msg(d,CMD_I2C_WRITE, obuf, 2+msg[i].len, &ibuf,1) < 0) 126 if (cxusb_ctrl_msg(d, CMD_I2C_WRITE, obuf,
127 2+msg[i].len, &ibuf,1) < 0)
126 break; 128 break;
127 if (ibuf != 0x08) 129 if (ibuf != 0x08)
128 deb_i2c("i2c write may have failed\n"); 130 deb_i2c("i2c write may have failed\n");
@@ -324,16 +326,8 @@ static int cxusb_mt352_demod_init(struct dvb_frontend* fe)
324 return 0; 326 return 0;
325} 327}
326 328
327static int cxusb_lgh064f_tuner_set_params(struct dvb_frontend *fe,
328 struct dvb_frontend_parameters *fep)
329{
330 struct dvb_usb_adapter *adap = fe->dvb->priv;
331 return lg_h06xf_pll_set(fe, &adap->dev->i2c_adap, fep);
332}
333
334static struct cx22702_config cxusb_cx22702_config = { 329static struct cx22702_config cxusb_cx22702_config = {
335 .demod_address = 0x63, 330 .demod_address = 0x63,
336
337 .output_mode = CX22702_PARALLEL_OUTPUT, 331 .output_mode = CX22702_PARALLEL_OUTPUT,
338}; 332};
339 333
@@ -374,31 +368,27 @@ static int cxusb_fmd1216me_tuner_attach(struct dvb_usb_adapter *adap)
374 368
375static int cxusb_dee1601_tuner_attach(struct dvb_usb_adapter *adap) 369static int cxusb_dee1601_tuner_attach(struct dvb_usb_adapter *adap)
376{ 370{
377 adap->pll_addr = 0x61; 371 dvb_attach(dvb_pll_attach, adap->fe, 0x61,
378 adap->pll_desc = &dvb_pll_thomson_dtt7579; 372 NULL, &dvb_pll_thomson_dtt7579);
379 adap->fe->ops.tuner_ops.calc_regs = dvb_usb_tuner_calc_regs;
380 return 0; 373 return 0;
381} 374}
382 375
383static int cxusb_lgz201_tuner_attach(struct dvb_usb_adapter *adap) 376static int cxusb_lgz201_tuner_attach(struct dvb_usb_adapter *adap)
384{ 377{
385 adap->pll_addr = 0x61; 378 dvb_attach(dvb_pll_attach, adap->fe, 0x61, NULL, &dvb_pll_lg_z201);
386 adap->pll_desc = &dvb_pll_lg_z201;
387 adap->fe->ops.tuner_ops.calc_regs = dvb_usb_tuner_calc_regs;
388 return 0; 379 return 0;
389} 380}
390 381
391static int cxusb_dtt7579_tuner_attach(struct dvb_usb_adapter *adap) 382static int cxusb_dtt7579_tuner_attach(struct dvb_usb_adapter *adap)
392{ 383{
393 adap->pll_addr = 0x60; 384 dvb_attach(dvb_pll_attach, adap->fe, 0x60,
394 adap->pll_desc = &dvb_pll_thomson_dtt7579; 385 NULL, &dvb_pll_thomson_dtt7579);
395 adap->fe->ops.tuner_ops.calc_regs = dvb_usb_tuner_calc_regs;
396 return 0; 386 return 0;
397} 387}
398 388
399static int cxusb_lgdt3303_tuner_attach(struct dvb_usb_adapter *adap) 389static int cxusb_lgh064f_tuner_attach(struct dvb_usb_adapter *adap)
400{ 390{
401 adap->fe->ops.tuner_ops.set_params = cxusb_lgh064f_tuner_set_params; 391 dvb_attach(lgh06xf_attach, adap->fe, &adap->dev->i2c_adap);
402 return 0; 392 return 0;
403} 393}
404 394
@@ -410,7 +400,8 @@ static int cxusb_cx22702_frontend_attach(struct dvb_usb_adapter *adap)
410 400
411 cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, &b, 1); 401 cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, &b, 1);
412 402
413 if ((adap->fe = dvb_attach(cx22702_attach, &cxusb_cx22702_config, &adap->dev->i2c_adap)) != NULL) 403 if ((adap->fe = dvb_attach(cx22702_attach, &cxusb_cx22702_config,
404 &adap->dev->i2c_adap)) != NULL)
414 return 0; 405 return 0;
415 406
416 return -EIO; 407 return -EIO;
@@ -423,7 +414,8 @@ static int cxusb_lgdt3303_frontend_attach(struct dvb_usb_adapter *adap)
423 414
424 cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, NULL, 0); 415 cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, NULL, 0);
425 416
426 if ((adap->fe = dvb_attach(lgdt330x_attach, &cxusb_lgdt3303_config, &adap->dev->i2c_adap)) != NULL) 417 if ((adap->fe = dvb_attach(lgdt330x_attach, &cxusb_lgdt3303_config,
418 &adap->dev->i2c_adap)) != NULL)
427 return 0; 419 return 0;
428 420
429 return -EIO; 421 return -EIO;
@@ -437,7 +429,8 @@ static int cxusb_mt352_frontend_attach(struct dvb_usb_adapter *adap)
437 429
438 cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, NULL, 0); 430 cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, NULL, 0);
439 431
440 if ((adap->fe = dvb_attach(mt352_attach, &cxusb_mt352_config, &adap->dev->i2c_adap)) != NULL) 432 if ((adap->fe = dvb_attach(mt352_attach, &cxusb_mt352_config,
433 &adap->dev->i2c_adap)) != NULL)
441 return 0; 434 return 0;
442 435
443 return -EIO; 436 return -EIO;
@@ -450,8 +443,11 @@ static int cxusb_dee1601_frontend_attach(struct dvb_usb_adapter *adap)
450 443
451 cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, NULL, 0); 444 cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, NULL, 0);
452 445
453 if (((adap->fe = dvb_attach(mt352_attach, &cxusb_dee1601_config, &adap->dev->i2c_adap)) != NULL) || 446 if (((adap->fe = dvb_attach(mt352_attach, &cxusb_dee1601_config,
454 ((adap->fe = dvb_attach(zl10353_attach, &cxusb_zl10353_dee1601_config, &adap->dev->i2c_adap)) != NULL)) 447 &adap->dev->i2c_adap)) != NULL) ||
448 ((adap->fe = dvb_attach(zl10353_attach,
449 &cxusb_zl10353_dee1601_config,
450 &adap->dev->i2c_adap)) != NULL))
455 return 0; 451 return 0;
456 452
457 return -EIO; 453 return -EIO;
@@ -463,7 +459,8 @@ static int cxusb_dee1601_frontend_attach(struct dvb_usb_adapter *adap)
463 */ 459 */
464 460
465#define BLUEBIRD_01_ID_OFFSET 6638 461#define BLUEBIRD_01_ID_OFFSET 6638
466static int bluebird_patch_dvico_firmware_download(struct usb_device *udev, const struct firmware *fw) 462static int bluebird_patch_dvico_firmware_download(struct usb_device *udev,
463 const struct firmware *fw)
467{ 464{
468 if (fw->size < BLUEBIRD_01_ID_OFFSET + 4) 465 if (fw->size < BLUEBIRD_01_ID_OFFSET + 4)
469 return -EINVAL; 466 return -EINVAL;
@@ -471,10 +468,12 @@ static int bluebird_patch_dvico_firmware_download(struct usb_device *udev, const
471 if (fw->data[BLUEBIRD_01_ID_OFFSET] == (USB_VID_DVICO & 0xff) && 468 if (fw->data[BLUEBIRD_01_ID_OFFSET] == (USB_VID_DVICO & 0xff) &&
472 fw->data[BLUEBIRD_01_ID_OFFSET + 1] == USB_VID_DVICO >> 8) { 469 fw->data[BLUEBIRD_01_ID_OFFSET + 1] == USB_VID_DVICO >> 8) {
473 470
474 fw->data[BLUEBIRD_01_ID_OFFSET + 2] = udev->descriptor.idProduct + 1; 471 fw->data[BLUEBIRD_01_ID_OFFSET + 2] =
475 fw->data[BLUEBIRD_01_ID_OFFSET + 3] = udev->descriptor.idProduct >> 8; 472 udev->descriptor.idProduct + 1;
473 fw->data[BLUEBIRD_01_ID_OFFSET + 3] =
474 udev->descriptor.idProduct >> 8;
476 475
477 return usb_cypress_load_firmware(udev,fw,CYPRESS_FX2); 476 return usb_cypress_load_firmware(udev, fw, CYPRESS_FX2);
478 } 477 }
479 478
480 return -EINVAL; 479 return -EINVAL;
@@ -488,7 +487,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties;
488static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties; 487static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties;
489 488
490static int cxusb_probe(struct usb_interface *intf, 489static int cxusb_probe(struct usb_interface *intf,
491 const struct usb_device_id *id) 490 const struct usb_device_id *id)
492{ 491{
493 if (dvb_usb_device_init(intf,&cxusb_medion_properties,THIS_MODULE,NULL) == 0 || 492 if (dvb_usb_device_init(intf,&cxusb_medion_properties,THIS_MODULE,NULL) == 0 ||
494 dvb_usb_device_init(intf,&cxusb_bluebird_lgh064f_properties,THIS_MODULE,NULL) == 0 || 493 dvb_usb_device_init(intf,&cxusb_bluebird_lgh064f_properties,THIS_MODULE,NULL) == 0 ||
@@ -502,20 +501,20 @@ static int cxusb_probe(struct usb_interface *intf,
502} 501}
503 502
504static struct usb_device_id cxusb_table [] = { 503static struct usb_device_id cxusb_table [] = {
505 { USB_DEVICE(USB_VID_MEDION, USB_PID_MEDION_MD95700) }, 504 { USB_DEVICE(USB_VID_MEDION, USB_PID_MEDION_MD95700) },
506 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LG064F_COLD) }, 505 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LG064F_COLD) },
507 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LG064F_WARM) }, 506 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LG064F_WARM) },
508 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_1_COLD) }, 507 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_1_COLD) },
509 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM) }, 508 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM) },
510 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LGZ201_COLD) }, 509 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LGZ201_COLD) },
511 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LGZ201_WARM) }, 510 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LGZ201_WARM) },
512 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_TH7579_COLD) }, 511 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_TH7579_COLD) },
513 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_TH7579_WARM) }, 512 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_TH7579_WARM) },
514 { USB_DEVICE(USB_VID_DVICO, USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD) }, 513 { USB_DEVICE(USB_VID_DVICO, USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD) },
515 { USB_DEVICE(USB_VID_DVICO, USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM) }, 514 { USB_DEVICE(USB_VID_DVICO, USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM) },
516 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD) }, 515 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD) },
517 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM) }, 516 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM) },
518 {} /* Terminating entry */ 517 {} /* Terminating entry */
519}; 518};
520MODULE_DEVICE_TABLE (usb, cxusb_table); 519MODULE_DEVICE_TABLE (usb, cxusb_table);
521 520
@@ -529,20 +528,20 @@ static struct dvb_usb_device_properties cxusb_medion_properties = {
529 .num_adapters = 1, 528 .num_adapters = 1,
530 .adapter = { 529 .adapter = {
531 { 530 {
532 .streaming_ctrl = cxusb_streaming_ctrl, 531 .streaming_ctrl = cxusb_streaming_ctrl,
533 .frontend_attach = cxusb_cx22702_frontend_attach, 532 .frontend_attach = cxusb_cx22702_frontend_attach,
534 .tuner_attach = cxusb_fmd1216me_tuner_attach, 533 .tuner_attach = cxusb_fmd1216me_tuner_attach,
535 /* parameter for the MPEG2-data transfer */ 534 /* parameter for the MPEG2-data transfer */
536 .stream = { 535 .stream = {
537 .type = USB_BULK, 536 .type = USB_BULK,
538 .count = 5, 537 .count = 5,
539 .endpoint = 0x02, 538 .endpoint = 0x02,
540 .u = { 539 .u = {
541 .bulk = { 540 .bulk = {
542 .buffersize = 8192, 541 .buffersize = 8192,
543 } 542 }
544 } 543 }
545 }, 544 },
546 545
547 }, 546 },
548 }, 547 },
@@ -575,21 +574,21 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgh064f_properties = {
575 .num_adapters = 1, 574 .num_adapters = 1,
576 .adapter = { 575 .adapter = {
577 { 576 {
578 .streaming_ctrl = cxusb_streaming_ctrl, 577 .streaming_ctrl = cxusb_streaming_ctrl,
579 .frontend_attach = cxusb_lgdt3303_frontend_attach, 578 .frontend_attach = cxusb_lgdt3303_frontend_attach,
580 .tuner_attach = cxusb_lgdt3303_tuner_attach, 579 .tuner_attach = cxusb_lgh064f_tuner_attach,
581 580
582 /* parameter for the MPEG2-data transfer */ 581 /* parameter for the MPEG2-data transfer */
583 .stream = { 582 .stream = {
584 .type = USB_BULK, 583 .type = USB_BULK,
585 .count = 5, 584 .count = 5,
586 .endpoint = 0x02, 585 .endpoint = 0x02,
587 .u = { 586 .u = {
588 .bulk = { 587 .bulk = {
589 .buffersize = 8192, 588 .buffersize = 8192,
590 } 589 }
591 } 590 }
592 }, 591 },
593 }, 592 },
594 }, 593 },
595 594
@@ -627,20 +626,20 @@ static struct dvb_usb_device_properties cxusb_bluebird_dee1601_properties = {
627 .num_adapters = 1, 626 .num_adapters = 1,
628 .adapter = { 627 .adapter = {
629 { 628 {
630 .streaming_ctrl = cxusb_streaming_ctrl, 629 .streaming_ctrl = cxusb_streaming_ctrl,
631 .frontend_attach = cxusb_dee1601_frontend_attach, 630 .frontend_attach = cxusb_dee1601_frontend_attach,
632 .tuner_attach = cxusb_dee1601_tuner_attach, 631 .tuner_attach = cxusb_dee1601_tuner_attach,
633 /* parameter for the MPEG2-data transfer */ 632 /* parameter for the MPEG2-data transfer */
634 .stream = { 633 .stream = {
635 .type = USB_BULK, 634 .type = USB_BULK,
636 .count = 5, 635 .count = 5,
637 .endpoint = 0x04, 636 .endpoint = 0x04,
638 .u = { 637 .u = {
639 .bulk = { 638 .bulk = {
640 .buffersize = 8192, 639 .buffersize = 8192,
641 } 640 }
642 } 641 }
643 }, 642 },
644 }, 643 },
645 }, 644 },
646 645
@@ -686,21 +685,21 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = {
686 .num_adapters = 2, 685 .num_adapters = 2,
687 .adapter = { 686 .adapter = {
688 { 687 {
689 .streaming_ctrl = cxusb_streaming_ctrl, 688 .streaming_ctrl = cxusb_streaming_ctrl,
690 .frontend_attach = cxusb_mt352_frontend_attach, 689 .frontend_attach = cxusb_mt352_frontend_attach,
691 .tuner_attach = cxusb_lgz201_tuner_attach, 690 .tuner_attach = cxusb_lgz201_tuner_attach,
692 691
693 /* parameter for the MPEG2-data transfer */ 692 /* parameter for the MPEG2-data transfer */
694 .stream = { 693 .stream = {
695 .type = USB_BULK, 694 .type = USB_BULK,
696 .count = 5, 695 .count = 5,
697 .endpoint = 0x04, 696 .endpoint = 0x04,
698 .u = { 697 .u = {
699 .bulk = { 698 .bulk = {
700 .buffersize = 8192, 699 .buffersize = 8192,
701 } 700 }
702 } 701 }
703 }, 702 },
704 }, 703 },
705 }, 704 },
706 .power_ctrl = cxusb_bluebird_power_ctrl, 705 .power_ctrl = cxusb_bluebird_power_ctrl,
@@ -736,21 +735,21 @@ static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties = {
736 .num_adapters = 1, 735 .num_adapters = 1,
737 .adapter = { 736 .adapter = {
738 { 737 {
739 .streaming_ctrl = cxusb_streaming_ctrl, 738 .streaming_ctrl = cxusb_streaming_ctrl,
740 .frontend_attach = cxusb_mt352_frontend_attach, 739 .frontend_attach = cxusb_mt352_frontend_attach,
741 .tuner_attach = cxusb_dtt7579_tuner_attach, 740 .tuner_attach = cxusb_dtt7579_tuner_attach,
742 741
743 /* parameter for the MPEG2-data transfer */ 742 /* parameter for the MPEG2-data transfer */
744 .stream = { 743 .stream = {
745 .type = USB_BULK, 744 .type = USB_BULK,
746 .count = 5, 745 .count = 5,
747 .endpoint = 0x04, 746 .endpoint = 0x04,
748 .u = { 747 .u = {
749 .bulk = { 748 .bulk = {
750 .buffersize = 8192, 749 .buffersize = 8192,
751 } 750 }
752 } 751 }
753 }, 752 },
754 }, 753 },
755 }, 754 },
756 .power_ctrl = cxusb_bluebird_power_ctrl, 755 .power_ctrl = cxusb_bluebird_power_ctrl,
@@ -776,7 +775,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties = {
776static struct usb_driver cxusb_driver = { 775static struct usb_driver cxusb_driver = {
777 .name = "dvb_usb_cxusb", 776 .name = "dvb_usb_cxusb",
778 .probe = cxusb_probe, 777 .probe = cxusb_probe,
779 .disconnect = dvb_usb_device_exit, 778 .disconnect = dvb_usb_device_exit,
780 .id_table = cxusb_table, 779 .id_table = cxusb_table,
781}; 780};
782 781
@@ -802,7 +801,7 @@ module_init (cxusb_module_init);
802module_exit (cxusb_module_exit); 801module_exit (cxusb_module_exit);
803 802
804MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); 803MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
805MODULE_AUTHOR("Michael Krufky <mkrufky@m1k.net>"); 804MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
806MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>"); 805MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
807MODULE_DESCRIPTION("Driver for Conexant USB2.0 hybrid reference design"); 806MODULE_DESCRIPTION("Driver for Conexant USB2.0 hybrid reference design");
808MODULE_VERSION("1.0-alpha"); 807MODULE_VERSION("1.0-alpha");
diff --git a/drivers/media/dvb/dvb-usb/dib0700.h b/drivers/media/dvb/dvb-usb/dib0700.h
index ac84347f9d4c..cda3adea24fb 100644
--- a/drivers/media/dvb/dvb-usb/dib0700.h
+++ b/drivers/media/dvb/dvb-usb/dib0700.h
@@ -24,18 +24,23 @@ extern int dvb_usb_dib0700_debug;
24#define REQUEST_I2C_WRITE 0x3 24#define REQUEST_I2C_WRITE 0x3
25#define REQUEST_POLL_RC 0x4 25#define REQUEST_POLL_RC 0x4
26#define REQUEST_JUMPRAM 0x8 26#define REQUEST_JUMPRAM 0x8
27#define REQUEST_SET_CLOCK 0xB
27#define REQUEST_SET_GPIO 0xC 28#define REQUEST_SET_GPIO 0xC
28#define REQUEST_ENABLE_VIDEO 0xF 29#define REQUEST_ENABLE_VIDEO 0xF
29 // 1 Byte: 4MSB(1 = enable streaming, 0 = disable streaming) 4LSB(Video Mode: 0 = MPEG2 188Bytes, 1 = Analog) 30 // 1 Byte: 4MSB(1 = enable streaming, 0 = disable streaming) 4LSB(Video Mode: 0 = MPEG2 188Bytes, 1 = Analog)
30 // 2 Byte: MPEG2 mode: 4MSB(1 = Master Mode, 0 = Slave Mode) 4LSB(Channel 1 = bit0, Channel 2 = bit1) 31 // 2 Byte: MPEG2 mode: 4MSB(1 = Master Mode, 0 = Slave Mode) 4LSB(Channel 1 = bit0, Channel 2 = bit1)
31 // 2 Byte: Analog mode: 4MSB(0 = 625 lines, 1 = 525 lines) 4LSB( " " ) 32 // 2 Byte: Analog mode: 4MSB(0 = 625 lines, 1 = 525 lines) 4LSB( " " )
33#define REQUEST_GET_VERSION 0x15
32 34
33struct dib0700_state { 35struct dib0700_state {
34 u8 channel_state; 36 u8 channel_state;
35 u16 mt2060_if1[2]; 37 u16 mt2060_if1[2];
38
39 u8 is_dib7000pc;
36}; 40};
37 41
38extern int dib0700_set_gpio(struct dvb_usb_device *, enum dib07x0_gpios gpio, u8 gpio_dir, u8 gpio_val); 42extern int dib0700_set_gpio(struct dvb_usb_device *, enum dib07x0_gpios gpio, u8 gpio_dir, u8 gpio_val);
43extern int dib0700_ctrl_clock(struct dvb_usb_device *d, u32 clk_MHz, u8 clock_out_gp3);
39extern int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw); 44extern int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw);
40extern int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff); 45extern int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff);
41extern struct i2c_algorithm dib0700_i2c_algo; 46extern struct i2c_algorithm dib0700_i2c_algo;
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index dca6c6985661..6a4d150784a6 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -135,14 +135,46 @@ struct i2c_algorithm dib0700_i2c_algo = {
135int dib0700_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, 135int dib0700_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props,
136 struct dvb_usb_device_description **desc, int *cold) 136 struct dvb_usb_device_description **desc, int *cold)
137{ 137{
138 u8 buf[3] = { REQUEST_SET_GPIO, 4, (GPIO_IN << 7) | (0 << 6) }; // GPIO4 is save - used for I2C 138 u8 b[16];
139 *cold = usb_control_msg(udev, usb_sndctrlpipe(udev,0), 139 s16 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev,0),
140 buf[0], USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, buf, 3, USB_CTRL_GET_TIMEOUT) != 3; 140 REQUEST_GET_VERSION, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, b, 16, USB_CTRL_GET_TIMEOUT);
141
142 deb_info("FW GET_VERSION length: %d\n",ret);
143
144 *cold = ret <= 0;
141 145
142 deb_info("cold: %d\n", *cold); 146 deb_info("cold: %d\n", *cold);
143 return 0; 147 return 0;
144} 148}
145 149
150static int dib0700_set_clock(struct dvb_usb_device *d, u8 en_pll,
151 u8 pll_src, u8 pll_range, u8 clock_gpio3, u16 pll_prediv,
152 u16 pll_loopdiv, u16 free_div, u16 dsuScaler)
153{
154 u8 b[10];
155 b[0] = REQUEST_SET_CLOCK;
156 b[1] = (en_pll << 7) | (pll_src << 6) | (pll_range << 5) | (clock_gpio3 << 4);
157 b[2] = (pll_prediv >> 8) & 0xff; // MSB
158 b[3] = pll_prediv & 0xff; // LSB
159 b[4] = (pll_loopdiv >> 8) & 0xff; // MSB
160 b[5] = pll_loopdiv & 0xff; // LSB
161 b[6] = (free_div >> 8) & 0xff; // MSB
162 b[7] = free_div & 0xff; // LSB
163 b[8] = (dsuScaler >> 8) & 0xff; // MSB
164 b[9] = dsuScaler & 0xff; // LSB
165
166 return dib0700_ctrl_wr(d, b, 10);
167}
168
169int dib0700_ctrl_clock(struct dvb_usb_device *d, u32 clk_MHz, u8 clock_out_gp3)
170{
171 switch (clk_MHz) {
172 case 72: dib0700_set_clock(d, 1, 0, 1, clock_out_gp3, 2, 24, 0, 0x4c); break;
173 default: return -EINVAL;
174 }
175 return 0;
176}
177
146static int dib0700_jumpram(struct usb_device *udev, u32 address) 178static int dib0700_jumpram(struct usb_device *udev, u32 address)
147{ 179{
148 int ret, actlen; 180 int ret, actlen;
@@ -197,7 +229,7 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
197 /* start the firmware */ 229 /* start the firmware */
198 if ((ret = dib0700_jumpram(udev, 0x70000000)) == 0) { 230 if ((ret = dib0700_jumpram(udev, 0x70000000)) == 0) {
199 info("firmware started successfully."); 231 info("firmware started successfully.");
200 msleep(100); 232 msleep(500);
201 } 233 }
202 } else 234 } else
203 ret = -EIO; 235 ret = -EIO;
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index e473bfed226b..2208757d9017 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -9,6 +9,8 @@
9#include "dib0700.h" 9#include "dib0700.h"
10 10
11#include "dib3000mc.h" 11#include "dib3000mc.h"
12#include "dib7000m.h"
13#include "dib7000p.h"
12#include "mt2060.h" 14#include "mt2060.h"
13 15
14static int force_lna_activation; 16static int force_lna_activation;
@@ -95,37 +97,189 @@ static int bristol_tuner_attach(struct dvb_usb_adapter *adap)
95} 97}
96 98
97/* STK7700P: Hauppauge Nova-T Stick, AVerMedia Volar */ 99/* STK7700P: Hauppauge Nova-T Stick, AVerMedia Volar */
98/* 100static struct dibx000_agc_config stk7700p_7000m_mt2060_agc_config = {
99static struct mt2060_config stk7000p_mt2060_config = { 101 BAND_UHF | BAND_VHF, // band_caps
100 0x60 102
103 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0,
104 * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */
105 (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), // setup
106
107 712, // inv_gain
108 41, // time_stabiliz
109
110 0, // alpha_level
111 118, // thlock
112
113 0, // wbd_inv
114 4095, // wbd_ref
115 0, // wbd_sel
116 0, // wbd_alpha
117
118 42598, // agc1_max
119 17694, // agc1_min
120 45875, // agc2_max
121 2621, // agc2_min
122 0, // agc1_pt1
123 76, // agc1_pt2
124 139, // agc1_pt3
125 52, // agc1_slope1
126 59, // agc1_slope2
127 107, // agc2_pt1
128 172, // agc2_pt2
129 57, // agc2_slope1
130 70, // agc2_slope2
131
132 21, // alpha_mant
133 25, // alpha_exp
134 28, // beta_mant
135 48, // beta_exp
136
137 1, // perform_agc_softsplit
138 { 0, // split_min
139 107, // split_max
140 51800, // global_split_min
141 24700 // global_split_max
142 },
143};
144
145static struct dibx000_agc_config stk7700p_7000p_mt2060_agc_config = {
146 BAND_UHF | BAND_VHF,
147
148 /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0,
149 * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */
150 (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), // setup
151
152 712, // inv_gain
153 41, // time_stabiliz
154
155 0, // alpha_level
156 118, // thlock
157
158 0, // wbd_inv
159 4095, // wbd_ref
160 0, // wbd_sel
161 0, // wbd_alpha
162
163 42598, // agc1_max
164 16384, // agc1_min
165 42598, // agc2_max
166 0, // agc2_min
167
168 0, // agc1_pt1
169 137, // agc1_pt2
170 255, // agc1_pt3
171
172 0, // agc1_slope1
173 255, // agc1_slope2
174
175 0, // agc2_pt1
176 0, // agc2_pt2
177
178 0, // agc2_slope1
179 41, // agc2_slope2
180
181 15, // alpha_mant
182 25, // alpha_exp
183
184 28, // beta_mant
185 48, // beta_exp
186
187 0, // perform_agc_softsplit
188};
189
190static struct dibx000_bandwidth_config stk7700p_pll_config = {
191 60000, 30000, // internal, sampling
192 1, 8, 3, 1, 0, // pll_cfg: prediv, ratio, range, reset, bypass
193 0, 0, 1, 1, 0, // misc: refdiv, bypclk_div, IO_CLK_en_core, ADClkSrc, modulo
194 (3 << 14) | (1 << 12) | (524 << 0), // sad_cfg: refsel, sel, freq_15k
195 60258167, // ifreq
196 20452225, // timf
197};
198
199static struct dib7000m_config stk7700p_dib7000m_config = {
200 .dvbt_mode = 1,
201 .output_mpeg2_in_188_bytes = 1,
202 .quartz_direct = 1,
203
204 .agc_config_count = 1,
205 .agc = &stk7700p_7000m_mt2060_agc_config,
206 .bw = &stk7700p_pll_config,
207
208 .gpio_dir = DIB7000M_GPIO_DEFAULT_DIRECTIONS,
209 .gpio_val = DIB7000M_GPIO_DEFAULT_VALUES,
210 .gpio_pwm_pos = DIB7000M_GPIO_DEFAULT_PWM_POS,
211};
212
213static struct dib7000p_config stk7700p_dib7000p_config = {
214 .output_mpeg2_in_188_bytes = 1,
215
216 .agc = &stk7700p_7000p_mt2060_agc_config,
217 .bw = &stk7700p_pll_config,
218
219 .gpio_dir = DIB7000M_GPIO_DEFAULT_DIRECTIONS,
220 .gpio_val = DIB7000M_GPIO_DEFAULT_VALUES,
221 .gpio_pwm_pos = DIB7000M_GPIO_DEFAULT_PWM_POS,
101}; 222};
102*/
103 223
104static int stk7700p_frontend_attach(struct dvb_usb_adapter *adap) 224static int stk7700p_frontend_attach(struct dvb_usb_adapter *adap)
105{ 225{
226 struct dib0700_state *st = adap->dev->priv;
106 /* unless there is no real power management in DVB - we leave the device on GPIO6 */ 227 /* unless there is no real power management in DVB - we leave the device on GPIO6 */
107 dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(10); 228
108 dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); 229 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
109 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); 230 dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(50);
231
232 dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10);
233 dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
234
110 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(10); 235 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(10);
236 dib0700_ctrl_clock(adap->dev, 72, 1);
237 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(100);
238
239 dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
240
241 st->mt2060_if1[0] = 1220;
242
243 if (dib7000pc_detection(&adap->dev->i2c_adap)) {
244 adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 18, &stk7700p_dib7000p_config);
245 st->is_dib7000pc = 1;
246 } else
247 adap->fe = dvb_attach(dib7000m_attach, &adap->dev->i2c_adap, 18, &stk7700p_dib7000m_config);
111 248
112// adap->fe = dib7000m_attach(&adap->dev->i2c_adap, &stk7700p_dib7000m_config, 18); 249 return adap->fe == NULL ? -ENODEV : 0;
113 return 0;
114} 250}
115 251
252static struct mt2060_config stk7700p_mt2060_config = {
253 0x60
254};
255
116static int stk7700p_tuner_attach(struct dvb_usb_adapter *adap) 256static int stk7700p_tuner_attach(struct dvb_usb_adapter *adap)
117{ 257{
118// tun_i2c = dib7000m_get_tuner_i2c_master(adap->fe, 1); 258 struct dib0700_state *st = adap->dev->priv;
119// return mt2060_attach(adap->fe, tun_i2c, &stk3000p_mt2060_config, if1); 259 struct i2c_adapter *tun_i2c;
120 return 0; 260
261 if (st->is_dib7000pc)
262 tun_i2c = dib7000p_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_TUNER, 1);
263 else
264 tun_i2c = dib7000m_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_TUNER, 1);
265
266 return dvb_attach(mt2060_attach, adap->fe, tun_i2c, &stk7700p_mt2060_config,
267 st->mt2060_if1[0]) == NULL ? -ENODEV : 0;
121} 268}
122 269
123struct usb_device_id dib0700_usb_id_table[] = { 270struct usb_device_id dib0700_usb_id_table[] = {
124 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK7700P) }, 271 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK7700P) },
272 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK7700P_PC) },
273
125 { USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_HAUPPAUGE_NOVA_T_500) }, 274 { USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_HAUPPAUGE_NOVA_T_500) },
126 { USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_HAUPPAUGE_NOVA_T_500_2) }, 275 { USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_HAUPPAUGE_NOVA_T_500_2) },
127 { USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_HAUPPAUGE_NOVA_T_STICK) }, 276 { USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_HAUPPAUGE_NOVA_T_STICK) },
128 { USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR) }, 277 { USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR) },
278 { USB_DEVICE(USB_VID_COMPRO, USB_PID_COMPRO_VIDEOMATE_U500) },
279 { USB_DEVICE(USB_VID_UNIWILL, USB_PID_UNIWILL_STK7700P) },
280 { USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_STK7700P) },
281 { USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_HAUPPAUGE_NOVA_T_STICK_2) },
282 { USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_2) },
129 { } /* Terminating entry */ 283 { } /* Terminating entry */
130}; 284};
131MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table); 285MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -167,20 +321,32 @@ struct dvb_usb_device_properties dib0700_devices[] = {
167 }, 321 },
168 }, 322 },
169 323
170 .num_device_descs = 3, 324 .num_device_descs = 6,
171 .devices = { 325 .devices = {
172 { "DiBcom STK7700P reference design", 326 { "DiBcom STK7700P reference design",
173 { &dib0700_usb_id_table[0], NULL }, 327 { &dib0700_usb_id_table[0], &dib0700_usb_id_table[1] },
174 { NULL }, 328 { NULL },
175 }, 329 },
176 { "Hauppauge Nova-T Stick", 330 { "Hauppauge Nova-T Stick",
177 { &dib0700_usb_id_table[3], NULL }, 331 { &dib0700_usb_id_table[4], &dib0700_usb_id_table[9], NULL },
178 { NULL }, 332 { NULL },
179 }, 333 },
180 { "AVerMedia AVerTV DVB-T Volar", 334 { "AVerMedia AVerTV DVB-T Volar",
181 { &dib0700_usb_id_table[4], NULL }, 335 { &dib0700_usb_id_table[5], &dib0700_usb_id_table[10] },
182 { NULL }, 336 { NULL },
183 }, 337 },
338 { "Compro Videomate U500",
339 { &dib0700_usb_id_table[6], NULL },
340 { NULL },
341 },
342 { "Uniwill STK7700P based (Hama and others)",
343 { &dib0700_usb_id_table[7], NULL },
344 { NULL },
345 },
346 { "Leadtek Winfast DTV Dongle (STK7700P based)",
347 { &dib0700_usb_id_table[8], NULL },
348 { NULL },
349 }
184 } 350 }
185 }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, 351 }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
186 352
@@ -202,7 +368,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
202 .num_device_descs = 1, 368 .num_device_descs = 1,
203 .devices = { 369 .devices = {
204 { "Hauppauge Nova-T 500 Dual DVB-T", 370 { "Hauppauge Nova-T 500 Dual DVB-T",
205 { &dib0700_usb_id_table[1], &dib0700_usb_id_table[2], NULL }, 371 { &dib0700_usb_id_table[2], &dib0700_usb_id_table[3], NULL },
206 { NULL }, 372 { NULL },
207 }, 373 },
208 } 374 }
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mb.c b/drivers/media/dvb/dvb-usb/dibusb-mb.c
index 4fe363e48352..7a6ae8f482e0 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mb.c
@@ -163,23 +163,23 @@ static struct dvb_usb_device_properties dibusb1_1_properties = {
163 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, 163 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
164 .pid_filter_count = 16, 164 .pid_filter_count = 16,
165 165
166 .streaming_ctrl = dibusb_streaming_ctrl, 166 .streaming_ctrl = dibusb_streaming_ctrl,
167 .pid_filter = dibusb_pid_filter, 167 .pid_filter = dibusb_pid_filter,
168 .pid_filter_ctrl = dibusb_pid_filter_ctrl, 168 .pid_filter_ctrl = dibusb_pid_filter_ctrl,
169 .frontend_attach = dibusb_dib3000mb_frontend_attach, 169 .frontend_attach = dibusb_dib3000mb_frontend_attach,
170 .tuner_attach = dibusb_tuner_probe_and_attach, 170 .tuner_attach = dibusb_tuner_probe_and_attach,
171 171
172 /* parameter for the MPEG2-data transfer */ 172 /* parameter for the MPEG2-data transfer */
173 .stream = { 173 .stream = {
174 .type = USB_BULK, 174 .type = USB_BULK,
175 .count = 7, 175 .count = 7,
176 .endpoint = 0x02, 176 .endpoint = 0x02,
177 .u = { 177 .u = {
178 .bulk = { 178 .bulk = {
179 .buffersize = 4096, 179 .buffersize = 4096,
180 } 180 }
181 } 181 }
182 }, 182 },
183 .size_of_priv = sizeof(struct dibusb_state), 183 .size_of_priv = sizeof(struct dibusb_state),
184 } 184 }
185 }, 185 },
@@ -248,23 +248,23 @@ static struct dvb_usb_device_properties dibusb1_1_an2235_properties = {
248 .caps = DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF | DVB_USB_ADAP_HAS_PID_FILTER, 248 .caps = DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF | DVB_USB_ADAP_HAS_PID_FILTER,
249 .pid_filter_count = 16, 249 .pid_filter_count = 16,
250 250
251 .streaming_ctrl = dibusb_streaming_ctrl, 251 .streaming_ctrl = dibusb_streaming_ctrl,
252 .pid_filter = dibusb_pid_filter, 252 .pid_filter = dibusb_pid_filter,
253 .pid_filter_ctrl = dibusb_pid_filter_ctrl, 253 .pid_filter_ctrl = dibusb_pid_filter_ctrl,
254 .frontend_attach = dibusb_dib3000mb_frontend_attach, 254 .frontend_attach = dibusb_dib3000mb_frontend_attach,
255 .tuner_attach = dibusb_tuner_probe_and_attach, 255 .tuner_attach = dibusb_tuner_probe_and_attach,
256 256
257 /* parameter for the MPEG2-data transfer */ 257 /* parameter for the MPEG2-data transfer */
258 .stream = { 258 .stream = {
259 .type = USB_BULK, 259 .type = USB_BULK,
260 .count = 7, 260 .count = 7,
261 .endpoint = 0x02, 261 .endpoint = 0x02,
262 .u = { 262 .u = {
263 .bulk = { 263 .bulk = {
264 .buffersize = 4096, 264 .buffersize = 4096,
265 } 265 }
266 } 266 }
267 }, 267 },
268 .size_of_priv = sizeof(struct dibusb_state), 268 .size_of_priv = sizeof(struct dibusb_state),
269 }, 269 },
270 }, 270 },
@@ -312,22 +312,23 @@ static struct dvb_usb_device_properties dibusb2_0b_properties = {
312 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, 312 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
313 .pid_filter_count = 16, 313 .pid_filter_count = 16,
314 314
315 .streaming_ctrl = dibusb2_0_streaming_ctrl, 315 .streaming_ctrl = dibusb2_0_streaming_ctrl,
316 .pid_filter = dibusb_pid_filter, 316 .pid_filter = dibusb_pid_filter,
317 .pid_filter_ctrl = dibusb_pid_filter_ctrl, 317 .pid_filter_ctrl = dibusb_pid_filter_ctrl,
318 .frontend_attach = dibusb_dib3000mb_frontend_attach, 318 .frontend_attach = dibusb_dib3000mb_frontend_attach,
319 .tuner_attach = dibusb_thomson_tuner_attach, 319 .tuner_attach = dibusb_thomson_tuner_attach,
320 /* parameter for the MPEG2-data transfer */ 320
321 /* parameter for the MPEG2-data transfer */
321 .stream = { 322 .stream = {
322 .type = USB_BULK, 323 .type = USB_BULK,
323 .count = 7, 324 .count = 7,
324 .endpoint = 0x06, 325 .endpoint = 0x06,
325 .u = { 326 .u = {
326 .bulk = { 327 .bulk = {
327 .buffersize = 4096, 328 .buffersize = 4096,
328 } 329 }
329 } 330 }
330 }, 331 },
331 .size_of_priv = sizeof(struct dibusb_state), 332 .size_of_priv = sizeof(struct dibusb_state),
332 } 333 }
333 }, 334 },
@@ -369,22 +370,22 @@ static struct dvb_usb_device_properties artec_t1_usb2_properties = {
369 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, 370 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
370 .pid_filter_count = 16, 371 .pid_filter_count = 16,
371 372
372 .streaming_ctrl = dibusb2_0_streaming_ctrl, 373 .streaming_ctrl = dibusb2_0_streaming_ctrl,
373 .pid_filter = dibusb_pid_filter, 374 .pid_filter = dibusb_pid_filter,
374 .pid_filter_ctrl = dibusb_pid_filter_ctrl, 375 .pid_filter_ctrl = dibusb_pid_filter_ctrl,
375 .frontend_attach = dibusb_dib3000mb_frontend_attach, 376 .frontend_attach = dibusb_dib3000mb_frontend_attach,
376 .tuner_attach = dibusb_tuner_probe_and_attach, 377 .tuner_attach = dibusb_tuner_probe_and_attach,
377 /* parameter for the MPEG2-data transfer */ 378 /* parameter for the MPEG2-data transfer */
378 .stream = { 379 .stream = {
379 .type = USB_BULK, 380 .type = USB_BULK,
380 .count = 7, 381 .count = 7,
381 .endpoint = 0x06, 382 .endpoint = 0x06,
382 .u = { 383 .u = {
383 .bulk = { 384 .bulk = {
384 .buffersize = 4096, 385 .buffersize = 4096,
385 } 386 }
386 } 387 }
387 }, 388 },
388 .size_of_priv = sizeof(struct dibusb_state), 389 .size_of_priv = sizeof(struct dibusb_state),
389 } 390 }
390 }, 391 },
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mc.c b/drivers/media/dvb/dvb-usb/dibusb-mc.c
index a0fd37efc04b..e7ea3e753d6d 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mc.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mc.c
@@ -54,23 +54,23 @@ static struct dvb_usb_device_properties dibusb_mc_properties = {
54 { 54 {
55 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, 55 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
56 .pid_filter_count = 32, 56 .pid_filter_count = 32,
57 .streaming_ctrl = dibusb2_0_streaming_ctrl, 57 .streaming_ctrl = dibusb2_0_streaming_ctrl,
58 .pid_filter = dibusb_pid_filter, 58 .pid_filter = dibusb_pid_filter,
59 .pid_filter_ctrl = dibusb_pid_filter_ctrl, 59 .pid_filter_ctrl = dibusb_pid_filter_ctrl,
60 .frontend_attach = dibusb_dib3000mc_frontend_attach, 60 .frontend_attach = dibusb_dib3000mc_frontend_attach,
61 .tuner_attach = dibusb_dib3000mc_tuner_attach, 61 .tuner_attach = dibusb_dib3000mc_tuner_attach,
62 62
63 /* parameter for the MPEG2-data transfer */ 63 /* parameter for the MPEG2-data transfer */
64 .stream = { 64 .stream = {
65 .type = USB_BULK, 65 .type = USB_BULK,
66 .count = 7, 66 .count = 7,
67 .endpoint = 0x06, 67 .endpoint = 0x06,
68 .u = { 68 .u = {
69 .bulk = { 69 .bulk = {
70 .buffersize = 4096, 70 .buffersize = 4096,
71 } 71 }
72 } 72 }
73 }, 73 },
74 .size_of_priv = sizeof(struct dibusb_state), 74 .size_of_priv = sizeof(struct dibusb_state),
75 } 75 }
76 }, 76 },
diff --git a/drivers/media/dvb/dvb-usb/digitv.c b/drivers/media/dvb/dvb-usb/digitv.c
index 8fb34375c1fb..4a198d4755b0 100644
--- a/drivers/media/dvb/dvb-usb/digitv.c
+++ b/drivers/media/dvb/dvb-usb/digitv.c
@@ -274,20 +274,20 @@ static struct dvb_usb_device_properties digitv_properties = {
274 .num_adapters = 1, 274 .num_adapters = 1,
275 .adapter = { 275 .adapter = {
276 { 276 {
277 .frontend_attach = digitv_frontend_attach, 277 .frontend_attach = digitv_frontend_attach,
278 .tuner_attach = digitv_tuner_attach, 278 .tuner_attach = digitv_tuner_attach,
279 279
280 /* parameter for the MPEG2-data transfer */ 280 /* parameter for the MPEG2-data transfer */
281 .stream = { 281 .stream = {
282 .type = USB_BULK, 282 .type = USB_BULK,
283 .count = 7, 283 .count = 7,
284 .endpoint = 0x02, 284 .endpoint = 0x02,
285 .u = { 285 .u = {
286 .bulk = { 286 .bulk = {
287 .buffersize = 4096, 287 .buffersize = 4096,
288 } 288 }
289 } 289 }
290 }, 290 },
291 } 291 }
292 }, 292 },
293 .identify_state = digitv_identify_state, 293 .identify_state = digitv_identify_state,
diff --git a/drivers/media/dvb/dvb-usb/dtt200u.c b/drivers/media/dvb/dvb-usb/dtt200u.c
index fa43a41d753b..7dbe14321019 100644
--- a/drivers/media/dvb/dvb-usb/dtt200u.c
+++ b/drivers/media/dvb/dvb-usb/dtt200u.c
@@ -268,20 +268,20 @@ static struct dvb_usb_device_properties wt220u_zl0353_properties = {
268 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_NEED_PID_FILTERING, 268 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_NEED_PID_FILTERING,
269 .pid_filter_count = 15, 269 .pid_filter_count = 15,
270 270
271 .streaming_ctrl = dtt200u_streaming_ctrl, 271 .streaming_ctrl = dtt200u_streaming_ctrl,
272 .pid_filter = dtt200u_pid_filter, 272 .pid_filter = dtt200u_pid_filter,
273 .frontend_attach = dtt200u_frontend_attach, 273 .frontend_attach = dtt200u_frontend_attach,
274 /* parameter for the MPEG2-data transfer */ 274 /* parameter for the MPEG2-data transfer */
275 .stream = { 275 .stream = {
276 .type = USB_BULK, 276 .type = USB_BULK,
277 .count = 7, 277 .count = 7,
278 .endpoint = 0x02, 278 .endpoint = 0x02,
279 .u = { 279 .u = {
280 .bulk = { 280 .bulk = {
281 .buffersize = 4096, 281 .buffersize = 4096,
282 } 282 }
283 } 283 }
284 }, 284 },
285 } 285 }
286 }, 286 },
287 .power_ctrl = dtt200u_power_ctrl, 287 .power_ctrl = dtt200u_power_ctrl,
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 4d6b069536ce..299382dcb81d 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -33,6 +33,7 @@
33#define USB_VID_VISIONPLUS 0x13d3 33#define USB_VID_VISIONPLUS 0x13d3
34#define USB_VID_TWINHAN 0x1822 34#define USB_VID_TWINHAN 0x1822
35#define USB_VID_ULTIMA_ELECTRONIC 0x05d8 35#define USB_VID_ULTIMA_ELECTRONIC 0x05d8
36#define USB_VID_UNIWILL 0x1584
36#define USB_VID_WIDEVIEW 0x14aa 37#define USB_VID_WIDEVIEW 0x14aa
37 38
38/* Product IDs */ 39/* Product IDs */
@@ -46,6 +47,7 @@
46#define USB_PID_COMPRO_DVBU2000_WARM 0xd001 47#define USB_PID_COMPRO_DVBU2000_WARM 0xd001
47#define USB_PID_COMPRO_DVBU2000_UNK_COLD 0x010c 48#define USB_PID_COMPRO_DVBU2000_UNK_COLD 0x010c
48#define USB_PID_COMPRO_DVBU2000_UNK_WARM 0x010d 49#define USB_PID_COMPRO_DVBU2000_UNK_WARM 0x010d
50#define USB_PID_COMPRO_VIDEOMATE_U500 0x1e78
49#define USB_PID_DIBCOM_HOOK_DEFAULT 0x0064 51#define USB_PID_DIBCOM_HOOK_DEFAULT 0x0064
50#define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM 0x0065 52#define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM 0x0065
51#define USB_PID_DIBCOM_MOD3000_COLD 0x0bb8 53#define USB_PID_DIBCOM_MOD3000_COLD 0x0bb8
@@ -53,7 +55,9 @@
53#define USB_PID_DIBCOM_MOD3001_COLD 0x0bc6 55#define USB_PID_DIBCOM_MOD3001_COLD 0x0bc6
54#define USB_PID_DIBCOM_MOD3001_WARM 0x0bc7 56#define USB_PID_DIBCOM_MOD3001_WARM 0x0bc7
55#define USB_PID_DIBCOM_STK7700P 0x1e14 57#define USB_PID_DIBCOM_STK7700P 0x1e14
58#define USB_PID_DIBCOM_STK7700P_PC 0x1e78
56#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131 59#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131
60#define USB_PID_UNIWILL_STK7700P 0x6003
57#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0 61#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0
58#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1 62#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1
59#define USB_PID_KWORLD_VSTREAM_COLD 0x17de 63#define USB_PID_KWORLD_VSTREAM_COLD 0x17de
@@ -97,7 +101,9 @@
97#define USB_PID_HAUPPAUGE_NOVA_T_500 0x9941 101#define USB_PID_HAUPPAUGE_NOVA_T_500 0x9941
98#define USB_PID_HAUPPAUGE_NOVA_T_500_2 0x9950 102#define USB_PID_HAUPPAUGE_NOVA_T_500_2 0x9950
99#define USB_PID_HAUPPAUGE_NOVA_T_STICK 0x7050 103#define USB_PID_HAUPPAUGE_NOVA_T_STICK 0x7050
100#define USB_PID_AVERMEDIA_VOLAR 0x1234 104#define USB_PID_HAUPPAUGE_NOVA_T_STICK_2 0x7060
105#define USB_PID_AVERMEDIA_VOLAR 0xa807
106#define USB_PID_AVERMEDIA_VOLAR_2 0xb808
101#define USB_PID_NEBULA_DIGITV 0x0201 107#define USB_PID_NEBULA_DIGITV 0x0201
102#define USB_PID_DVICO_BLUEBIRD_LGDT 0xd820 108#define USB_PID_DVICO_BLUEBIRD_LGDT 0xd820
103#define USB_PID_DVICO_BLUEBIRD_LG064F_COLD 0xd500 109#define USB_PID_DVICO_BLUEBIRD_LG064F_COLD 0xd500
@@ -110,8 +116,8 @@
110#define USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM 0xdb51 116#define USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM 0xdb51
111#define USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD 0xdb58 117#define USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD 0xdb58
112#define USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM 0xdb59 118#define USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM 0xdb59
113#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD 0xdb54 119#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD 0xdb54
114#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM 0xdb55 120#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM 0xdb55
115#define USB_PID_MEDION_MD95700 0x0932 121#define USB_PID_MEDION_MD95700 0x0932
116#define USB_PID_KYE_DVB_T_COLD 0x701e 122#define USB_PID_KYE_DVB_T_COLD 0x701e
117#define USB_PID_KYE_DVB_T_WARM 0x701f 123#define USB_PID_KYE_DVB_T_WARM 0x701f
@@ -125,7 +131,9 @@
125#define USB_PID_GRANDTEC_DVBT_USB2_WARM 0x0bc7 131#define USB_PID_GRANDTEC_DVBT_USB2_WARM 0x0bc7
126#define USB_PID_WINFAST_DTV_DONGLE_COLD 0x6025 132#define USB_PID_WINFAST_DTV_DONGLE_COLD 0x6025
127#define USB_PID_WINFAST_DTV_DONGLE_WARM 0x6026 133#define USB_PID_WINFAST_DTV_DONGLE_WARM 0x6026
134#define USB_PID_WINFAST_DTV_DONGLE_STK7700P 0x6f00
128#define USB_PID_GENPIX_8PSK_COLD 0x0200 135#define USB_PID_GENPIX_8PSK_COLD 0x0200
129#define USB_PID_GENPIX_8PSK_WARM 0x0201 136#define USB_PID_GENPIX_8PSK_WARM 0x0201
130 137
138
131#endif 139#endif
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index 794e4471561c..19ff5978bc91 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -90,7 +90,9 @@ schedule:
90 90
91int dvb_usb_remote_init(struct dvb_usb_device *d) 91int dvb_usb_remote_init(struct dvb_usb_device *d)
92{ 92{
93 struct input_dev *input_dev;
93 int i; 94 int i;
95 int err;
94 96
95 if (d->props.rc_key_map == NULL || 97 if (d->props.rc_key_map == NULL ||
96 d->props.rc_query == NULL || 98 d->props.rc_query == NULL ||
@@ -100,23 +102,24 @@ int dvb_usb_remote_init(struct dvb_usb_device *d)
100 usb_make_path(d->udev, d->rc_phys, sizeof(d->rc_phys)); 102 usb_make_path(d->udev, d->rc_phys, sizeof(d->rc_phys));
101 strlcat(d->rc_phys, "/ir0", sizeof(d->rc_phys)); 103 strlcat(d->rc_phys, "/ir0", sizeof(d->rc_phys));
102 104
103 d->rc_input_dev = input_allocate_device(); 105 input_dev = input_allocate_device();
104 if (!d->rc_input_dev) 106 if (!input_dev)
105 return -ENOMEM; 107 return -ENOMEM;
106 108
107 d->rc_input_dev->evbit[0] = BIT(EV_KEY); 109 input_dev->evbit[0] = BIT(EV_KEY);
108 d->rc_input_dev->keycodesize = sizeof(unsigned char); 110 input_dev->keycodesize = sizeof(unsigned char);
109 d->rc_input_dev->keycodemax = KEY_MAX; 111 input_dev->keycodemax = KEY_MAX;
110 d->rc_input_dev->name = "IR-receiver inside an USB DVB receiver"; 112 input_dev->name = "IR-receiver inside an USB DVB receiver";
111 d->rc_input_dev->phys = d->rc_phys; 113 input_dev->phys = d->rc_phys;
112 usb_to_input_id(d->udev, &d->rc_input_dev->id); 114 usb_to_input_id(d->udev, &input_dev->id);
113 d->rc_input_dev->cdev.dev = &d->udev->dev; 115 input_dev->cdev.dev = &d->udev->dev;
114 116
115 /* set the bits for the keys */ 117 /* set the bits for the keys */
116 deb_rc("key map size: %d\n", d->props.rc_key_map_size); 118 deb_rc("key map size: %d\n", d->props.rc_key_map_size);
117 for (i = 0; i < d->props.rc_key_map_size; i++) { 119 for (i = 0; i < d->props.rc_key_map_size; i++) {
118 deb_rc("setting bit for event %d item %d\n",d->props.rc_key_map[i].event, i); 120 deb_rc("setting bit for event %d item %d\n",
119 set_bit(d->props.rc_key_map[i].event, d->rc_input_dev->keybit); 121 d->props.rc_key_map[i].event, i);
122 set_bit(d->props.rc_key_map[i].event, input_dev->keybit);
120 } 123 }
121 124
122 /* Start the remote-control polling. */ 125 /* Start the remote-control polling. */
@@ -124,10 +127,16 @@ int dvb_usb_remote_init(struct dvb_usb_device *d)
124 d->props.rc_interval = 100; /* default */ 127 d->props.rc_interval = 100; /* default */
125 128
126 /* setting these two values to non-zero, we have to manage key repeats */ 129 /* setting these two values to non-zero, we have to manage key repeats */
127 d->rc_input_dev->rep[REP_PERIOD] = d->props.rc_interval; 130 input_dev->rep[REP_PERIOD] = d->props.rc_interval;
128 d->rc_input_dev->rep[REP_DELAY] = d->props.rc_interval + 150; 131 input_dev->rep[REP_DELAY] = d->props.rc_interval + 150;
129 132
130 input_register_device(d->rc_input_dev); 133 err = input_register_device(input_dev);
134 if (err) {
135 input_free_device(input_dev);
136 return err;
137 }
138
139 d->rc_input_dev = input_dev;
131 140
132 INIT_DELAYED_WORK(&d->rc_query_work, dvb_usb_read_remote_control); 141 INIT_DELAYED_WORK(&d->rc_query_work, dvb_usb_read_remote_control);
133 142
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.c b/drivers/media/dvb/dvb-usb/gp8psk.c
index 7375eb20166d..518d67fca5e8 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk.c
@@ -194,19 +194,19 @@ static struct dvb_usb_device_properties gp8psk_properties = {
194 .num_adapters = 1, 194 .num_adapters = 1,
195 .adapter = { 195 .adapter = {
196 { 196 {
197 .streaming_ctrl = gp8psk_streaming_ctrl, 197 .streaming_ctrl = gp8psk_streaming_ctrl,
198 .frontend_attach = gp8psk_frontend_attach, 198 .frontend_attach = gp8psk_frontend_attach,
199 /* parameter for the MPEG2-data transfer */ 199 /* parameter for the MPEG2-data transfer */
200 .stream = { 200 .stream = {
201 .type = USB_BULK, 201 .type = USB_BULK,
202 .count = 7, 202 .count = 7,
203 .endpoint = 0x82, 203 .endpoint = 0x82,
204 .u = { 204 .u = {
205 .bulk = { 205 .bulk = {
206 .buffersize = 8192, 206 .buffersize = 8192,
207 } 207 }
208 } 208 }
209 }, 209 },
210 } 210 }
211 }, 211 },
212 .power_ctrl = gp8psk_power_ctrl, 212 .power_ctrl = gp8psk_power_ctrl,
diff --git a/drivers/media/dvb/dvb-usb/nova-t-usb2.c b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
index a58874c790b2..d48622e76b1b 100644
--- a/drivers/media/dvb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
@@ -163,23 +163,23 @@ static struct dvb_usb_device_properties nova_t_properties = {
163 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, 163 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
164 .pid_filter_count = 32, 164 .pid_filter_count = 32,
165 165
166 .streaming_ctrl = dibusb2_0_streaming_ctrl, 166 .streaming_ctrl = dibusb2_0_streaming_ctrl,
167 .pid_filter = dibusb_pid_filter, 167 .pid_filter = dibusb_pid_filter,
168 .pid_filter_ctrl = dibusb_pid_filter_ctrl, 168 .pid_filter_ctrl = dibusb_pid_filter_ctrl,
169 .frontend_attach = dibusb_dib3000mc_frontend_attach, 169 .frontend_attach = dibusb_dib3000mc_frontend_attach,
170 .tuner_attach = dibusb_dib3000mc_tuner_attach, 170 .tuner_attach = dibusb_dib3000mc_tuner_attach,
171 171
172 /* parameter for the MPEG2-data transfer */ 172 /* parameter for the MPEG2-data transfer */
173 .stream = { 173 .stream = {
174 .type = USB_BULK, 174 .type = USB_BULK,
175 .count = 7, 175 .count = 7,
176 .endpoint = 0x06, 176 .endpoint = 0x06,
177 .u = { 177 .u = {
178 .bulk = { 178 .bulk = {
179 .buffersize = 4096, 179 .buffersize = 4096,
180 } 180 }
181 } 181 }
182 }, 182 },
183 183
184 .size_of_priv = sizeof(struct dibusb_state), 184 .size_of_priv = sizeof(struct dibusb_state),
185 } 185 }
diff --git a/drivers/media/dvb/dvb-usb/ttusb2.c b/drivers/media/dvb/dvb-usb/ttusb2.c
new file mode 100644
index 000000000000..95d29976ed78
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/ttusb2.c
@@ -0,0 +1,270 @@
1/* DVB USB compliant linux driver for Technotrend DVB USB boxes and clones
2 * (e.g. Pinnacle 400e DVB-S USB2.0).
3 *
4 * The Pinnacle 400e uses the same protocol as the Technotrend USB1.1 boxes.
5 *
6 * TDA8263 + TDA10086
7 *
8 * I2C addresses:
9 * 0x08 - LNBP21PD - LNB power supply
10 * 0x0e - TDA10086 - Demodulator
11 * 0x50 - FX2 eeprom
12 * 0x60 - TDA8263 - Tuner
13 * 0x78 ???
14 *
15 * Copyright (c) 2002 Holger Waechtler <holger@convergence.de>
16 * Copyright (c) 2003 Felix Domke <tmbinc@elitedvb.net>
17 * Copyright (C) 2005-6 Patrick Boettcher <pb@linuxtv.org>
18 *
19 * This program is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by the Free
21 * Software Foundation, version 2.
22 *
23 * see Documentation/dvb/README.dvb-usb for more information
24 */
25#define DVB_USB_LOG_PREFIX "ttusb2"
26#include "dvb-usb.h"
27
28#include "ttusb2.h"
29
30#include "tda826x.h"
31#include "tda10086.h"
32#include "lnbp21.h"
33
34/* debug */
35static int dvb_usb_ttusb2_debug;
36#define deb_info(args...) dprintk(dvb_usb_ttusb2_debug,0x01,args)
37module_param_named(debug,dvb_usb_ttusb2_debug, int, 0644);
38MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able))." DVB_USB_DEBUG_STATUS);
39
40struct ttusb2_state {
41 u8 id;
42};
43
44static int ttusb2_msg(struct dvb_usb_device *d, u8 cmd,
45 u8 *wbuf, int wlen, u8 *rbuf, int rlen)
46{
47 struct ttusb2_state *st = d->priv;
48 u8 s[wlen+4],r[64] = { 0 };
49 int ret = 0;
50
51 memset(s,0,wlen+4);
52
53 s[0] = 0xaa;
54 s[1] = ++st->id;
55 s[2] = cmd;
56 s[3] = wlen;
57 memcpy(&s[4],wbuf,wlen);
58
59 ret = dvb_usb_generic_rw(d, s, wlen+4, r, 64, 0);
60
61 if (ret != 0 ||
62 r[0] != 0x55 ||
63 r[1] != s[1] ||
64 r[2] != cmd ||
65 (rlen > 0 && r[3] != rlen)) {
66 warn("there might have been an error during control message transfer. (rlen = %d, was %d)",rlen,r[3]);
67 return -EIO;
68 }
69
70 if (rlen > 0)
71 memcpy(rbuf, &r[4], rlen);
72
73 return 0;
74}
75
76static int ttusb2_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num)
77{
78 struct dvb_usb_device *d = i2c_get_adapdata(adap);
79 static u8 obuf[60], ibuf[60];
80 int i,read;
81
82 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
83 return -EAGAIN;
84
85 if (num > 2)
86 warn("more than 2 i2c messages at a time is not handled yet. TODO.");
87
88 for (i = 0; i < num; i++) {
89 read = i+1 < num && (msg[i+1].flags & I2C_M_RD);
90
91 obuf[0] = (msg[i].addr << 1) | read;
92 obuf[1] = msg[i].len;
93
94 /* read request */
95 if (read)
96 obuf[2] = msg[i+1].len;
97 else
98 obuf[2] = 0;
99
100 memcpy(&obuf[3],msg[i].buf,msg[i].len);
101
102 if (ttusb2_msg(d, CMD_I2C_XFER, obuf, msg[i].len+3, ibuf, obuf[2] + 3) < 0) {
103 err("i2c transfer failed.");
104 break;
105 }
106
107 if (read) {
108 memcpy(msg[i+1].buf,&ibuf[3],msg[i+1].len);
109 i++;
110 }
111 }
112
113 mutex_unlock(&d->i2c_mutex);
114 return i;
115}
116
117static u32 ttusb2_i2c_func(struct i2c_adapter *adapter)
118{
119 return I2C_FUNC_I2C;
120}
121
122static struct i2c_algorithm ttusb2_i2c_algo = {
123 .master_xfer = ttusb2_i2c_xfer,
124 .functionality = ttusb2_i2c_func,
125};
126
127/* Callbacks for DVB USB */
128static int ttusb2_identify_state (struct usb_device *udev, struct
129 dvb_usb_device_properties *props, struct dvb_usb_device_description **desc,
130 int *cold)
131{
132 *cold = udev->descriptor.iManufacturer == 0 && udev->descriptor.iProduct == 0;
133 return 0;
134}
135
136static int ttusb2_power_ctrl(struct dvb_usb_device *d, int onoff)
137{
138 u8 b = onoff;
139 ttusb2_msg(d, CMD_POWER, &b, 0, NULL, 0);
140 return ttusb2_msg(d, CMD_POWER, &b, 1, NULL, 0);
141}
142
143
144static struct tda10086_config tda10086_config = {
145 .demod_address = 0x0e,
146 .invert = 0,
147};
148
149static int ttusb2_frontend_attach(struct dvb_usb_adapter *adap)
150{
151 if (usb_set_interface(adap->dev->udev,0,3) < 0)
152 err("set interface to alts=3 failed");
153
154 if ((adap->fe = dvb_attach(tda10086_attach, &tda10086_config, &adap->dev->i2c_adap)) == NULL) {
155 deb_info("TDA10086 attach failed\n");
156 return -ENODEV;
157 }
158
159 return 0;
160}
161
162static int ttusb2_tuner_attach(struct dvb_usb_adapter *adap)
163{
164 if (dvb_attach(tda826x_attach, adap->fe, 0x60, &adap->dev->i2c_adap, 0) == NULL) {
165 deb_info("TDA8263 attach failed\n");
166 return -ENODEV;
167 }
168
169 if (dvb_attach(lnbp21_attach, adap->fe, &adap->dev->i2c_adap, 0, 0) == NULL) {
170 deb_info("LNBP21 attach failed\n");
171 return -ENODEV;
172 }
173 return 0;
174}
175
176/* DVB USB Driver stuff */
177static struct dvb_usb_device_properties ttusb2_properties;
178
179static int ttusb2_probe(struct usb_interface *intf,
180 const struct usb_device_id *id)
181{
182 return dvb_usb_device_init(intf,&ttusb2_properties,THIS_MODULE,NULL);
183}
184
185static struct usb_device_id ttusb2_table [] = {
186 { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PCTV_400E) },
187 {} /* Terminating entry */
188};
189MODULE_DEVICE_TABLE (usb, ttusb2_table);
190
191static struct dvb_usb_device_properties ttusb2_properties = {
192 .caps = DVB_USB_IS_AN_I2C_ADAPTER,
193
194 .usb_ctrl = CYPRESS_FX2,
195 .firmware = "dvb-usb-pctv-400e-01.fw",
196
197 .size_of_priv = sizeof(struct ttusb2_state),
198
199 .num_adapters = 1,
200 .adapter = {
201 {
202 .streaming_ctrl = NULL, // ttusb2_streaming_ctrl,
203
204 .frontend_attach = ttusb2_frontend_attach,
205 .tuner_attach = ttusb2_tuner_attach,
206
207 /* parameter for the MPEG2-data transfer */
208 .stream = {
209 .type = USB_ISOC,
210 .count = 5,
211 .endpoint = 0x02,
212 .u = {
213 .isoc = {
214 .framesperurb = 4,
215 .framesize = 940,
216 .interval = 1,
217 }
218 }
219 }
220 }
221 },
222
223 .power_ctrl = ttusb2_power_ctrl,
224 .identify_state = ttusb2_identify_state,
225
226 .i2c_algo = &ttusb2_i2c_algo,
227
228 .generic_bulk_ctrl_endpoint = 0x01,
229
230 .num_device_descs = 1,
231 .devices = {
232 { "Pinnacle 400e DVB-S USB2.0",
233 { &ttusb2_table[0], NULL },
234 { NULL },
235 },
236 }
237};
238
239static struct usb_driver ttusb2_driver = {
240 .name = "dvb_usb_ttusb2",
241 .probe = ttusb2_probe,
242 .disconnect = dvb_usb_device_exit,
243 .id_table = ttusb2_table,
244};
245
246/* module stuff */
247static int __init ttusb2_module_init(void)
248{
249 int result;
250 if ((result = usb_register(&ttusb2_driver))) {
251 err("usb_register failed. Error number %d",result);
252 return result;
253 }
254
255 return 0;
256}
257
258static void __exit ttusb2_module_exit(void)
259{
260 /* deregister this driver from the USB subsystem */
261 usb_deregister(&ttusb2_driver);
262}
263
264module_init (ttusb2_module_init);
265module_exit (ttusb2_module_exit);
266
267MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
268MODULE_DESCRIPTION("Driver for Pinnacle PCTV 400e DVB-S USB2.0");
269MODULE_VERSION("1.0");
270MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/ttusb2.h b/drivers/media/dvb/dvb-usb/ttusb2.h
new file mode 100644
index 000000000000..52a63af40896
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/ttusb2.h
@@ -0,0 +1,70 @@
1/* DVB USB compliant linux driver for Technotrend DVB USB boxes and clones
2 * (e.g. Pinnacle 400e DVB-S USB2.0).
3 *
4 * Copyright (c) 2002 Holger Waechtler <holger@convergence.de>
5 * Copyright (c) 2003 Felix Domke <tmbinc@elitedvb.net>
6 * Copyright (C) 2005-6 Patrick Boettcher <pb@linuxtv.de>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation, version 2.
11 *
12 * see Documentation/dvb/README.dvb-usb for more information
13 */
14#ifndef _DVB_USB_TTUSB2_H_
15#define _DVB_USB_TTUSB2_H_
16
17/* TTUSB protocol
18 *
19 * always to messages (out/in)
20 * out message:
21 * 0xaa <id> <cmdbyte> <datalen> <data...>
22 *
23 * in message (complete block is always 0x40 bytes long)
24 * 0x55 <id> <cmdbyte> <datalen> <data...>
25 *
26 * id is incremented for each transaction
27 */
28
29#define CMD_DSP_DOWNLOAD 0x13
30/* out data: <byte>[28]
31 * last block must be empty */
32
33#define CMD_DSP_BOOT 0x14
34/* out data: nothing */
35
36#define CMD_POWER 0x15
37/* out data: <on=1/off=0> */
38
39#define CMD_LNB 0x16
40/* out data: <power=1> <18V=0,13V=1> <tone> <??=1> <??=1> */
41
42#define CMD_GET_VERSION 0x17
43/* in data: <version_byte>[5] */
44
45#define CMD_DISEQC 0x18
46/* out data: <master=0xff/burst=??> <cmdlen> <cmdbytes>[cmdlen] */
47
48#define CMD_PID_ENABLE 0x22
49/* out data: <index> <type: ts=1/sec=2> <pid msb> <pid lsb> */
50
51#define CMD_PID_DISABLE 0x23
52/* out data: <index> */
53
54#define CMD_FILTER_ENABLE 0x24
55/* out data: <index> <pid_idx> <filter>[12] <mask>[12] */
56
57#define CMD_FILTER_DISABLE 0x25
58/* out data: <index> */
59
60#define CMD_GET_DSP_VERSION 0x26
61/* in data: <version_byte>[28] */
62
63#define CMD_I2C_XFER 0x31
64/* out data: <addr << 1> <sndlen> <rcvlen> <data>[sndlen]
65 * in data: <addr << 1> <sndlen> <rcvlen> <data>[rcvlen] */
66
67#define CMD_I2C_BITRATE 0x32
68/* out data: <default=0> */
69
70#endif
diff --git a/drivers/media/dvb/dvb-usb/umt-010.c b/drivers/media/dvb/dvb-usb/umt-010.c
index f9941ea88b3e..f77b48f76582 100644
--- a/drivers/media/dvb/dvb-usb/umt-010.c
+++ b/drivers/media/dvb/dvb-usb/umt-010.c
@@ -99,21 +99,21 @@ static struct dvb_usb_device_properties umt_properties = {
99 .num_adapters = 1, 99 .num_adapters = 1,
100 .adapter = { 100 .adapter = {
101 { 101 {
102 .streaming_ctrl = dibusb2_0_streaming_ctrl, 102 .streaming_ctrl = dibusb2_0_streaming_ctrl,
103 .frontend_attach = umt_mt352_frontend_attach, 103 .frontend_attach = umt_mt352_frontend_attach,
104 .tuner_attach = umt_tuner_attach, 104 .tuner_attach = umt_tuner_attach,
105 105
106 /* parameter for the MPEG2-data transfer */ 106 /* parameter for the MPEG2-data transfer */
107 .stream = { 107 .stream = {
108 .type = USB_BULK, 108 .type = USB_BULK,
109 .count = 20, 109 .count = 20,
110 .endpoint = 0x06, 110 .endpoint = 0x06,
111 .u = { 111 .u = {
112 .bulk = { 112 .bulk = {
113 .buffersize = 512, 113 .buffersize = 512,
114 } 114 }
115 } 115 }
116 }, 116 },
117 117
118 .size_of_priv = sizeof(struct dibusb_state), 118 .size_of_priv = sizeof(struct dibusb_state),
119 } 119 }
diff --git a/drivers/media/dvb/dvb-usb/vp702x.c b/drivers/media/dvb/dvb-usb/vp702x.c
index 02bd61aaac66..16533b31a82d 100644
--- a/drivers/media/dvb/dvb-usb/vp702x.c
+++ b/drivers/media/dvb/dvb-usb/vp702x.c
@@ -275,22 +275,22 @@ static struct dvb_usb_device_properties vp702x_properties = {
275 .caps = DVB_USB_ADAP_RECEIVES_204_BYTE_TS, 275 .caps = DVB_USB_ADAP_RECEIVES_204_BYTE_TS,
276 276
277 .streaming_ctrl = vp702x_streaming_ctrl, 277 .streaming_ctrl = vp702x_streaming_ctrl,
278 .frontend_attach = vp702x_frontend_attach, 278 .frontend_attach = vp702x_frontend_attach,
279 279
280 /* parameter for the MPEG2-data transfer */ 280 /* parameter for the MPEG2-data transfer */
281 .stream = { 281 .stream = {
282 .type = USB_BULK, 282 .type = USB_BULK,
283 .count = 10, 283 .count = 10,
284 .endpoint = 0x02, 284 .endpoint = 0x02,
285 .u = { 285 .u = {
286 .bulk = { 286 .bulk = {
287 .buffersize = 4096, 287 .buffersize = 4096,
288 } 288 }
289 } 289 }
290 }, 290 },
291 .size_of_priv = sizeof(struct vp702x_state), 291 .size_of_priv = sizeof(struct vp702x_state),
292 } 292 }
293 }, 293 },
294 .read_mac_address = vp702x_read_mac_addr, 294 .read_mac_address = vp702x_read_mac_addr,
295 295
296 .rc_key_map = vp702x_rc_keys, 296 .rc_key_map = vp702x_rc_keys,
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index b4cf002703a7..69a46b3607a2 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -125,7 +125,25 @@ static struct dvb_usb_rc_key vp7045_rc_keys[] = {
125 { 0x00, 0x00, KEY_TAB }, /* Tab */ 125 { 0x00, 0x00, KEY_TAB }, /* Tab */
126 { 0x00, 0x48, KEY_INFO }, /* Preview */ 126 { 0x00, 0x48, KEY_INFO }, /* Preview */
127 { 0x00, 0x04, KEY_LIST }, /* RecordList */ 127 { 0x00, 0x04, KEY_LIST }, /* RecordList */
128 { 0x00, 0x0f, KEY_TEXT } /* Teletext */ 128 { 0x00, 0x0f, KEY_TEXT }, /* Teletext */
129 { 0x00, 0x41, KEY_PREVIOUSSONG },
130 { 0x00, 0x42, KEY_NEXTSONG },
131 { 0x00, 0x4b, KEY_UP },
132 { 0x00, 0x51, KEY_DOWN },
133 { 0x00, 0x4e, KEY_LEFT },
134 { 0x00, 0x52, KEY_RIGHT },
135 { 0x00, 0x4f, KEY_ENTER },
136 { 0x00, 0x13, KEY_CANCEL },
137 { 0x00, 0x4a, KEY_CLEAR },
138 { 0x00, 0x54, KEY_PRINT }, /* Capture */
139 { 0x00, 0x43, KEY_SUBTITLE }, /* Subtitle/CC */
140 { 0x00, 0x08, KEY_VIDEO }, /* A/V */
141 { 0x00, 0x07, KEY_SLEEP }, /* Hibernate */
142 { 0x00, 0x45, KEY_ZOOM }, /* Zoom+ */
143 { 0x00, 0x18, KEY_RED},
144 { 0x00, 0x53, KEY_GREEN},
145 { 0x00, 0x5e, KEY_YELLOW},
146 { 0x00, 0x5f, KEY_BLUE}
129}; 147};
130 148
131static int vp7045_rc_query(struct dvb_usb_device *d, u32 *event, int *state) 149static int vp7045_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
@@ -217,18 +235,18 @@ static struct dvb_usb_device_properties vp7045_properties = {
217 .num_adapters = 1, 235 .num_adapters = 1,
218 .adapter = { 236 .adapter = {
219 { 237 {
220 .frontend_attach = vp7045_frontend_attach, 238 .frontend_attach = vp7045_frontend_attach,
221 /* parameter for the MPEG2-data transfer */ 239 /* parameter for the MPEG2-data transfer */
222 .stream = { 240 .stream = {
223 .type = USB_BULK, 241 .type = USB_BULK,
224 .count = 7, 242 .count = 7,
225 .endpoint = 0x02, 243 .endpoint = 0x02,
226 .u = { 244 .u = {
227 .bulk = { 245 .bulk = {
228 .buffersize = 4096, 246 .buffersize = 4096,
229 } 247 }
230 } 248 }
231 }, 249 },
232 } 250 }
233 }, 251 },
234 .power_ctrl = vp7045_power_ctrl, 252 .power_ctrl = vp7045_power_ctrl,
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index aebb8d6f26f8..af314bb1dcac 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -172,6 +172,22 @@ config DVB_DIB3000MC
172 A DVB-T tuner module. Designed for mobile usage. Say Y when you want 172 A DVB-T tuner module. Designed for mobile usage. Say Y when you want
173 to support this frontend. 173 to support this frontend.
174 174
175config DVB_DIB7000M
176 tristate "DiBcom 7000MA/MB/PA/PB/MC"
177 depends on DVB_CORE && I2C
178 default m if DVB_FE_CUSTOMISE
179 help
180 A DVB-T tuner module. Designed for mobile usage. Say Y when you want
181 to support this frontend.
182
183config DVB_DIB7000P
184 tristate "DiBcom 7000PC"
185 depends on DVB_CORE && I2C
186 default m if DVB_FE_CUSTOMISE
187 help
188 A DVB-T tuner module. Designed for mobile usage. Say Y when you want
189 to support this frontend.
190
175comment "DVB-C (cable) frontends" 191comment "DVB-C (cable) frontends"
176 depends on DVB_CORE 192 depends on DVB_CORE
177 193
@@ -281,6 +297,14 @@ config DVB_TUNER_MT2060
281 help 297 help
282 A driver for the silicon IF tuner MT2060 from Microtune. 298 A driver for the silicon IF tuner MT2060 from Microtune.
283 299
300config DVB_TUNER_LGH06XF
301 tristate "LG TDVS-H06xF ATSC tuner"
302 depends on DVB_CORE && I2C
303 select DVB_PLL
304 default m if DVB_FE_CUSTOMISE
305 help
306 A driver for the LG TDVS-H06xF ATSC tuner family.
307
284comment "Miscellaneous devices" 308comment "Miscellaneous devices"
285 depends on DVB_CORE 309 depends on DVB_CORE
286 310
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index dce9cf0c75c0..3fa6e5d32a9c 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -13,6 +13,8 @@ obj-$(CONFIG_DVB_TDA8083) += tda8083.o
13obj-$(CONFIG_DVB_L64781) += l64781.o 13obj-$(CONFIG_DVB_L64781) += l64781.o
14obj-$(CONFIG_DVB_DIB3000MB) += dib3000mb.o 14obj-$(CONFIG_DVB_DIB3000MB) += dib3000mb.o
15obj-$(CONFIG_DVB_DIB3000MC) += dib3000mc.o dibx000_common.o 15obj-$(CONFIG_DVB_DIB3000MC) += dib3000mc.o dibx000_common.o
16obj-$(CONFIG_DVB_DIB7000M) += dib7000m.o dibx000_common.o
17obj-$(CONFIG_DVB_DIB7000P) += dib7000p.o dibx000_common.o
16obj-$(CONFIG_DVB_MT312) += mt312.o 18obj-$(CONFIG_DVB_MT312) += mt312.o
17obj-$(CONFIG_DVB_VES1820) += ves1820.o 19obj-$(CONFIG_DVB_VES1820) += ves1820.o
18obj-$(CONFIG_DVB_VES1X93) += ves1x93.o 20obj-$(CONFIG_DVB_VES1X93) += ves1x93.o
@@ -37,3 +39,4 @@ obj-$(CONFIG_DVB_TDA10086) += tda10086.o
37obj-$(CONFIG_DVB_TDA826X) += tda826x.o 39obj-$(CONFIG_DVB_TDA826X) += tda826x.o
38obj-$(CONFIG_DVB_TUNER_MT2060) += mt2060.o 40obj-$(CONFIG_DVB_TUNER_MT2060) += mt2060.o
39obj-$(CONFIG_DVB_TUA6100) += tua6100.o 41obj-$(CONFIG_DVB_TUA6100) += tua6100.o
42obj-$(CONFIG_DVB_TUNER_LGH06XF) += lgh06xf.o
diff --git a/drivers/media/dvb/frontends/dib3000mc.c b/drivers/media/dvb/frontends/dib3000mc.c
index 3561a777568c..5da66178006c 100644
--- a/drivers/media/dvb/frontends/dib3000mc.c
+++ b/drivers/media/dvb/frontends/dib3000mc.c
@@ -511,16 +511,11 @@ static int dib3000mc_autosearch_start(struct dvb_frontend *demod, struct dibx000
511 511
512 512
513 /* a channel for autosearch */ 513 /* a channel for autosearch */
514 reg = 0;
515 if (chan->nfft == -1 && chan->guard == -1) reg = 7;
516 if (chan->nfft == -1 && chan->guard != -1) reg = 2;
517 if (chan->nfft != -1 && chan->guard == -1) reg = 3;
518
519 fchan.nfft = 1; fchan.guard = 0; fchan.nqam = 2; 514 fchan.nfft = 1; fchan.guard = 0; fchan.nqam = 2;
520 fchan.vit_alpha = 1; fchan.vit_code_rate_hp = 2; fchan.vit_code_rate_lp = 2; 515 fchan.vit_alpha = 1; fchan.vit_code_rate_hp = 2; fchan.vit_code_rate_lp = 2;
521 fchan.vit_hrch = 0; fchan.vit_select_hp = 1; 516 fchan.vit_hrch = 0; fchan.vit_select_hp = 1;
522 517
523 dib3000mc_set_channel_cfg(state, &fchan, reg); 518 dib3000mc_set_channel_cfg(state, &fchan, 7);
524 519
525 reg = dib3000mc_read_word(state, 0); 520 reg = dib3000mc_read_word(state, 0);
526 dib3000mc_write_word(state, 0, reg | (1 << 8)); 521 dib3000mc_write_word(state, 0, reg | (1 << 8));
diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c
new file mode 100644
index 000000000000..f5d40aa3d27f
--- /dev/null
+++ b/drivers/media/dvb/frontends/dib7000m.c
@@ -0,0 +1,1191 @@
1/*
2 * Linux-DVB Driver for DiBcom's DiB7000M and
3 * first generation DiB7000P-demodulator-family.
4 *
5 * Copyright (C) 2005-6 DiBcom (http://www.dibcom.fr/)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation, version 2.
10 */
11#include <linux/kernel.h>
12#include <linux/i2c.h>
13
14#include "dvb_frontend.h"
15
16#include "dib7000m.h"
17
18static int debug;
19module_param(debug, int, 0644);
20MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
21
22#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB7000M:"); printk(args); } } while (0)
23
24struct dib7000m_state {
25 struct dvb_frontend demod;
26 struct dib7000m_config cfg;
27
28 u8 i2c_addr;
29 struct i2c_adapter *i2c_adap;
30
31 struct dibx000_i2c_master i2c_master;
32
33/* offset is 1 in case of the 7000MC */
34 u8 reg_offs;
35
36 u16 wbd_ref;
37
38 u8 current_band;
39 fe_bandwidth_t current_bandwidth;
40 struct dibx000_agc_config *current_agc;
41 u32 timf;
42
43 u16 revision;
44};
45
46enum dib7000m_power_mode {
47 DIB7000M_POWER_ALL = 0,
48
49 DIB7000M_POWER_NO,
50 DIB7000M_POWER_INTERF_ANALOG_AGC,
51 DIB7000M_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD,
52 DIB7000M_POWER_COR4_CRY_ESRAM_MOUT_NUD,
53 DIB7000M_POWER_INTERFACE_ONLY,
54};
55
56static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
57{
58 u8 wb[2] = { (reg >> 8) | 0x80, reg & 0xff };
59 u8 rb[2];
60 struct i2c_msg msg[2] = {
61 { .addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2 },
62 { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 },
63 };
64
65 if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
66 dprintk("i2c read error on %d\n",reg);
67
68 return (rb[0] << 8) | rb[1];
69}
70
71static int dib7000m_write_word(struct dib7000m_state *state, u16 reg, u16 val)
72{
73 u8 b[4] = {
74 (reg >> 8) & 0xff, reg & 0xff,
75 (val >> 8) & 0xff, val & 0xff,
76 };
77 struct i2c_msg msg = {
78 .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4
79 };
80 return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
81}
82static int dib7000m_set_output_mode(struct dib7000m_state *state, int mode)
83{
84 int ret = 0;
85 u16 outreg, fifo_threshold, smo_mode,
86 sram = 0x0005; /* by default SRAM output is disabled */
87
88 outreg = 0;
89 fifo_threshold = 1792;
90 smo_mode = (dib7000m_read_word(state, 294 + state->reg_offs) & 0x0010) | (1 << 1);
91
92 dprintk("-I- Setting output mode for demod %p to %d\n",
93 &state->demod, mode);
94
95 switch (mode) {
96 case OUTMODE_MPEG2_PAR_GATED_CLK: // STBs with parallel gated clock
97 outreg = (1 << 10); /* 0x0400 */
98 break;
99 case OUTMODE_MPEG2_PAR_CONT_CLK: // STBs with parallel continues clock
100 outreg = (1 << 10) | (1 << 6); /* 0x0440 */
101 break;
102 case OUTMODE_MPEG2_SERIAL: // STBs with serial input
103 outreg = (1 << 10) | (2 << 6) | (0 << 1); /* 0x0482 */
104 break;
105 case OUTMODE_DIVERSITY:
106 if (state->cfg.hostbus_diversity)
107 outreg = (1 << 10) | (4 << 6); /* 0x0500 */
108 else
109 sram |= 0x0c00;
110 break;
111 case OUTMODE_MPEG2_FIFO: // e.g. USB feeding
112 smo_mode |= (3 << 1);
113 fifo_threshold = 512;
114 outreg = (1 << 10) | (5 << 6);
115 break;
116 case OUTMODE_HIGH_Z: // disable
117 outreg = 0;
118 break;
119 default:
120 dprintk("Unhandled output_mode passed to be set for demod %p\n",&state->demod);
121 break;
122 }
123
124 if (state->cfg.output_mpeg2_in_188_bytes)
125 smo_mode |= (1 << 5) ;
126
127 ret |= dib7000m_write_word(state, 294 + state->reg_offs, smo_mode);
128 ret |= dib7000m_write_word(state, 295 + state->reg_offs, fifo_threshold); /* synchronous fread */
129 ret |= dib7000m_write_word(state, 1795, outreg);
130 ret |= dib7000m_write_word(state, 1805, sram);
131
132 return ret;
133}
134
135static int dib7000m_set_power_mode(struct dib7000m_state *state, enum dib7000m_power_mode mode)
136{
137 /* by default everything is going to be powered off */
138 u16 reg_903 = 0xffff, reg_904 = 0xffff, reg_905 = 0xffff, reg_906 = 0x3fff;
139
140 /* now, depending on the requested mode, we power on */
141 switch (mode) {
142 /* power up everything in the demod */
143 case DIB7000M_POWER_ALL:
144 reg_903 = 0x0000; reg_904 = 0x0000; reg_905 = 0x0000; reg_906 = 0x0000;
145 break;
146
147 /* just leave power on the control-interfaces: GPIO and (I2C or SDIO or SRAM) */
148 case DIB7000M_POWER_INTERFACE_ONLY: /* TODO power up either SDIO or I2C or SRAM */
149 reg_905 &= ~((1 << 7) | (1 << 6) | (1 << 5) | (1 << 2));
150 break;
151
152 case DIB7000M_POWER_INTERF_ANALOG_AGC:
153 reg_903 &= ~((1 << 15) | (1 << 14) | (1 << 11) | (1 << 10));
154 reg_905 &= ~((1 << 7) | (1 << 6) | (1 << 5) | (1 << 4) | (1 << 2));
155 reg_906 &= ~((1 << 0));
156 break;
157
158 case DIB7000M_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD:
159 reg_903 = 0x0000; reg_904 = 0x801f; reg_905 = 0x0000; reg_906 = 0x0000;
160 break;
161
162 case DIB7000M_POWER_COR4_CRY_ESRAM_MOUT_NUD:
163 reg_903 = 0x0000; reg_904 = 0x8000; reg_905 = 0x010b; reg_906 = 0x0000;
164 break;
165 case DIB7000M_POWER_NO:
166 break;
167 }
168
169 /* always power down unused parts */
170 if (!state->cfg.mobile_mode)
171 reg_904 |= (1 << 7) | (1 << 6) | (1 << 4) | (1 << 2) | (1 << 1);
172
173 /* P_sdio_select_clk = 0 on MC */
174 if (state->revision != 0x4000)
175 reg_906 <<= 1;
176
177 dib7000m_write_word(state, 903, reg_903);
178 dib7000m_write_word(state, 904, reg_904);
179 dib7000m_write_word(state, 905, reg_905);
180 dib7000m_write_word(state, 906, reg_906);
181
182 return 0;
183}
184
185static int dib7000m_set_adc_state(struct dib7000m_state *state, enum dibx000_adc_states no)
186{
187 int ret = 0;
188 u16 reg_913 = dib7000m_read_word(state, 913),
189 reg_914 = dib7000m_read_word(state, 914);
190
191 switch (no) {
192 case DIBX000_SLOW_ADC_ON:
193 reg_914 |= (1 << 1) | (1 << 0);
194 ret |= dib7000m_write_word(state, 914, reg_914);
195 reg_914 &= ~(1 << 1);
196 break;
197
198 case DIBX000_SLOW_ADC_OFF:
199 reg_914 |= (1 << 1) | (1 << 0);
200 break;
201
202 case DIBX000_ADC_ON:
203 if (state->revision == 0x4000) { // workaround for PA/MA
204 // power-up ADC
205 dib7000m_write_word(state, 913, 0);
206 dib7000m_write_word(state, 914, reg_914 & 0x3);
207 // power-down bandgag
208 dib7000m_write_word(state, 913, (1 << 15));
209 dib7000m_write_word(state, 914, reg_914 & 0x3);
210 }
211
212 reg_913 &= 0x0fff;
213 reg_914 &= 0x0003;
214 break;
215
216 case DIBX000_ADC_OFF: // leave the VBG voltage on
217 reg_913 |= (1 << 14) | (1 << 13) | (1 << 12);
218 reg_914 |= (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2);
219 break;
220
221 case DIBX000_VBG_ENABLE:
222 reg_913 &= ~(1 << 15);
223 break;
224
225 case DIBX000_VBG_DISABLE:
226 reg_913 |= (1 << 15);
227 break;
228
229 default:
230 break;
231 }
232
233// dprintk("-D- 913: %x, 914: %x\n", reg_913, reg_914);
234
235 ret |= dib7000m_write_word(state, 913, reg_913);
236 ret |= dib7000m_write_word(state, 914, reg_914);
237
238 return ret;
239}
240
241static int dib7000m_set_bandwidth(struct dvb_frontend *demod, u8 bw_idx)
242{
243 struct dib7000m_state *state = demod->demodulator_priv;
244 u32 timf;
245
246 // store the current bandwidth for later use
247 state->current_bandwidth = bw_idx;
248
249 if (state->timf == 0) {
250 dprintk("-D- Using default timf\n");
251 timf = state->cfg.bw->timf;
252 } else {
253 dprintk("-D- Using updated timf\n");
254 timf = state->timf;
255 }
256
257 timf = timf * (BW_INDEX_TO_KHZ(bw_idx) / 100) / 80;
258
259 dib7000m_write_word(state, 23, (timf >> 16) & 0xffff);
260 dib7000m_write_word(state, 24, (timf ) & 0xffff);
261
262 return 0;
263}
264
265static int dib7000m_sad_calib(struct dib7000m_state *state)
266{
267
268/* internal */
269// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth
270 dib7000m_write_word(state, 929, (0 << 1) | (0 << 0));
271 dib7000m_write_word(state, 930, 776); // 0.625*3.3 / 4096
272
273 /* do the calibration */
274 dib7000m_write_word(state, 929, (1 << 0));
275 dib7000m_write_word(state, 929, (0 << 0));
276
277 msleep(1);
278
279 return 0;
280}
281
282static void dib7000m_reset_pll_common(struct dib7000m_state *state, const struct dibx000_bandwidth_config *bw)
283{
284 dib7000m_write_word(state, 18, ((bw->internal*1000) >> 16) & 0xffff);
285 dib7000m_write_word(state, 19, (bw->internal*1000) & 0xffff);
286 dib7000m_write_word(state, 21, (bw->ifreq >> 16) & 0xffff);
287 dib7000m_write_word(state, 22, bw->ifreq & 0xffff);
288
289 dib7000m_write_word(state, 928, bw->sad_cfg);
290}
291
292static void dib7000m_reset_pll(struct dib7000m_state *state)
293{
294 const struct dibx000_bandwidth_config *bw = state->cfg.bw;
295 u16 reg_907,reg_910;
296
297 /* default */
298 reg_907 = (bw->pll_bypass << 15) | (bw->modulo << 7) |
299 (bw->ADClkSrc << 6) | (bw->IO_CLK_en_core << 5) | (bw->bypclk_div << 2) |
300 (bw->enable_refdiv << 1) | (0 << 0);
301 reg_910 = (((bw->pll_ratio >> 6) & 0x3) << 3) | (bw->pll_range << 1) | bw->pll_reset;
302
303 // for this oscillator frequency should be 30 MHz for the Master (default values in the board_parameters give that value)
304 // this is only working only for 30 MHz crystals
305 if (!state->cfg.quartz_direct) {
306 reg_910 |= (1 << 5); // forcing the predivider to 1
307
308 // if the previous front-end is baseband, its output frequency is 15 MHz (prev freq divided by 2)
309 if(state->cfg.input_clk_is_div_2)
310 reg_907 |= (16 << 9);
311 else // otherwise the previous front-end puts out its input (default 30MHz) - no extra division necessary
312 reg_907 |= (8 << 9);
313 } else {
314 reg_907 |= (bw->pll_ratio & 0x3f) << 9;
315 reg_910 |= (bw->pll_prediv << 5);
316 }
317
318 dib7000m_write_word(state, 910, reg_910); // pll cfg
319 dib7000m_write_word(state, 907, reg_907); // clk cfg0
320 dib7000m_write_word(state, 908, 0x0006); // clk_cfg1
321
322 dib7000m_reset_pll_common(state, bw);
323}
324
325static void dib7000mc_reset_pll(struct dib7000m_state *state)
326{
327 const struct dibx000_bandwidth_config *bw = state->cfg.bw;
328
329 // clk_cfg0
330 dib7000m_write_word(state, 907, (bw->pll_prediv << 8) | (bw->pll_ratio << 0));
331
332 // clk_cfg1
333 //dib7000m_write_word(state, 908, (1 << 14) | (3 << 12) |(0 << 11) |
334 dib7000m_write_word(state, 908, (0 << 14) | (3 << 12) |(0 << 11) |
335 (bw->IO_CLK_en_core << 10) | (bw->bypclk_div << 5) | (bw->enable_refdiv << 4) |
336 (bw->pll_bypass << 3) | (bw->pll_range << 1) | (bw->pll_reset << 0));
337
338 // smpl_cfg
339 dib7000m_write_word(state, 910, (1 << 12) | (2 << 10) | (bw->modulo << 8) | (bw->ADClkSrc << 7));
340
341 dib7000m_reset_pll_common(state, bw);
342}
343
344static int dib7000m_reset_gpio(struct dib7000m_state *st)
345{
346 /* reset the GPIOs */
347 dprintk("-D- gpio dir: %x: gpio val: %x, gpio pwm pos: %x\n",
348 st->cfg.gpio_dir, st->cfg.gpio_val,st->cfg.gpio_pwm_pos);
349
350 dib7000m_write_word(st, 773, st->cfg.gpio_dir);
351 dib7000m_write_word(st, 774, st->cfg.gpio_val);
352
353 /* TODO 782 is P_gpio_od */
354
355 dib7000m_write_word(st, 775, st->cfg.gpio_pwm_pos);
356
357 dib7000m_write_word(st, 780, st->cfg.pwm_freq_div);
358 return 0;
359}
360
361static int dib7000m_demod_reset(struct dib7000m_state *state)
362{
363 dib7000m_set_power_mode(state, DIB7000M_POWER_ALL);
364
365 /* always leave the VBG voltage on - it consumes almost nothing but takes a long time to start */
366 dib7000m_set_adc_state(state, DIBX000_VBG_ENABLE);
367
368 /* restart all parts */
369 dib7000m_write_word(state, 898, 0xffff);
370 dib7000m_write_word(state, 899, 0xffff);
371 dib7000m_write_word(state, 900, 0xff0f);
372 dib7000m_write_word(state, 901, 0xfffc);
373
374 dib7000m_write_word(state, 898, 0);
375 dib7000m_write_word(state, 899, 0);
376 dib7000m_write_word(state, 900, 0);
377 dib7000m_write_word(state, 901, 0);
378
379 if (state->revision == 0x4000)
380 dib7000m_reset_pll(state);
381 else
382 dib7000mc_reset_pll(state);
383
384 if (dib7000m_reset_gpio(state) != 0)
385 dprintk("-E- GPIO reset was not successful.\n");
386
387 if (dib7000m_set_output_mode(state, OUTMODE_HIGH_Z) != 0)
388 dprintk("-E- OUTPUT_MODE could not be resetted.\n");
389
390 /* unforce divstr regardless whether i2c enumeration was done or not */
391 dib7000m_write_word(state, 1794, dib7000m_read_word(state, 1794) & ~(1 << 1) );
392
393 dib7000m_set_bandwidth(&state->demod, BANDWIDTH_8_MHZ);
394
395 dib7000m_set_adc_state(state, DIBX000_SLOW_ADC_ON);
396 dib7000m_sad_calib(state);
397 dib7000m_set_adc_state(state, DIBX000_SLOW_ADC_OFF);
398
399 dib7000m_set_power_mode(state, DIB7000M_POWER_INTERFACE_ONLY);
400
401 return 0;
402}
403
404static void dib7000m_restart_agc(struct dib7000m_state *state)
405{
406 // P_restart_iqc & P_restart_agc
407 dib7000m_write_word(state, 898, 0x0c00);
408 dib7000m_write_word(state, 898, 0x0000);
409}
410
411static int dib7000m_agc_soft_split(struct dib7000m_state *state)
412{
413 u16 agc,split_offset;
414
415 if(!state->current_agc || !state->current_agc->perform_agc_softsplit || state->current_agc->split.max == 0)
416 return 0;
417
418 // n_agc_global
419 agc = dib7000m_read_word(state, 390);
420
421 if (agc > state->current_agc->split.min_thres)
422 split_offset = state->current_agc->split.min;
423 else if (agc < state->current_agc->split.max_thres)
424 split_offset = state->current_agc->split.max;
425 else
426 split_offset = state->current_agc->split.max *
427 (agc - state->current_agc->split.min_thres) /
428 (state->current_agc->split.max_thres - state->current_agc->split.min_thres);
429
430 dprintk("AGC split_offset: %d\n",split_offset);
431
432 // P_agc_force_split and P_agc_split_offset
433 return dib7000m_write_word(state, 103, (dib7000m_read_word(state, 103) & 0xff00) | split_offset);
434}
435
436static int dib7000m_update_lna(struct dib7000m_state *state)
437{
438 int i;
439 u16 dyn_gain;
440
441 // when there is no LNA to program return immediatly
442 if (state->cfg.update_lna == NULL)
443 return 0;
444
445 msleep(60);
446 for (i = 0; i < 20; i++) {
447 // read dyn_gain here (because it is demod-dependent and not tuner)
448 dyn_gain = dib7000m_read_word(state, 390);
449
450 dprintk("agc global: %d\n", dyn_gain);
451
452 if (state->cfg.update_lna(&state->demod,dyn_gain)) { // LNA has changed
453 dib7000m_restart_agc(state);
454 msleep(60);
455 } else
456 break;
457 }
458 return 0;
459}
460
461static void dib7000m_set_agc_config(struct dib7000m_state *state, u8 band)
462{
463 struct dibx000_agc_config *agc = NULL;
464 int i;
465 if (state->current_band == band)
466 return;
467 state->current_band = band;
468
469 for (i = 0; i < state->cfg.agc_config_count; i++)
470 if (state->cfg.agc[i].band_caps & band) {
471 agc = &state->cfg.agc[i];
472 break;
473 }
474
475 if (agc == NULL) {
476 dprintk("-E- No valid AGC configuration found for band 0x%02x\n",band);
477 return;
478 }
479
480 state->current_agc = agc;
481
482 /* AGC */
483 dib7000m_write_word(state, 72 , agc->setup);
484 dib7000m_write_word(state, 73 , agc->inv_gain);
485 dib7000m_write_word(state, 74 , agc->time_stabiliz);
486 dib7000m_write_word(state, 97 , (agc->alpha_level << 12) | agc->thlock);
487
488 // Demod AGC loop configuration
489 dib7000m_write_word(state, 98, (agc->alpha_mant << 5) | agc->alpha_exp);
490 dib7000m_write_word(state, 99, (agc->beta_mant << 6) | agc->beta_exp);
491
492 dprintk("-D- WBD: ref: %d, sel: %d, active: %d, alpha: %d\n",
493 state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel);
494
495 /* AGC continued */
496 if (state->wbd_ref != 0)
497 dib7000m_write_word(state, 102, state->wbd_ref);
498 else // use default
499 dib7000m_write_word(state, 102, agc->wbd_ref);
500
501 dib7000m_write_word(state, 103, (agc->wbd_alpha << 9) | (agc->perform_agc_softsplit << 8) );
502 dib7000m_write_word(state, 104, agc->agc1_max);
503 dib7000m_write_word(state, 105, agc->agc1_min);
504 dib7000m_write_word(state, 106, agc->agc2_max);
505 dib7000m_write_word(state, 107, agc->agc2_min);
506 dib7000m_write_word(state, 108, (agc->agc1_pt1 << 8) | agc->agc1_pt2 );
507 dib7000m_write_word(state, 109, (agc->agc1_slope1 << 8) | agc->agc1_slope2);
508 dib7000m_write_word(state, 110, (agc->agc2_pt1 << 8) | agc->agc2_pt2);
509 dib7000m_write_word(state, 111, (agc->agc2_slope1 << 8) | agc->agc2_slope2);
510
511 if (state->revision > 0x4000) { // settings for the MC
512 dib7000m_write_word(state, 71, agc->agc1_pt3);
513// dprintk("-D- 929: %x %d %d\n",
514// (dib7000m_read_word(state, 929) & 0xffe3) | (agc->wbd_inv << 4) | (agc->wbd_sel << 2), agc->wbd_inv, agc->wbd_sel);
515 dib7000m_write_word(state, 929, (dib7000m_read_word(state, 929) & 0xffe3) | (agc->wbd_inv << 4) | (agc->wbd_sel << 2));
516 } else {
517 // wrong default values
518 u16 b[9] = { 676, 696, 717, 737, 758, 778, 799, 819, 840 };
519 for (i = 0; i < 9; i++)
520 dib7000m_write_word(state, 88 + i, b[i]);
521 }
522}
523
524static void dib7000m_update_timf_freq(struct dib7000m_state *state)
525{
526 u32 timf = (dib7000m_read_word(state, 436) << 16) | dib7000m_read_word(state, 437);
527 state->timf = timf * 80 / (BW_INDEX_TO_KHZ(state->current_bandwidth) / 100);
528 dib7000m_write_word(state, 23, (u16) (timf >> 16));
529 dib7000m_write_word(state, 24, (u16) (timf & 0xffff));
530 dprintk("-D- Updated timf_frequency: %d (default: %d)\n",state->timf, state->cfg.bw->timf);
531}
532
533static void dib7000m_set_channel(struct dib7000m_state *state, struct dibx000_ofdm_channel *ch, u8 seq)
534{
535 u16 value, est[4];
536
537 dib7000m_set_agc_config(state, BAND_OF_FREQUENCY(ch->RF_kHz));
538
539 /* nfft, guard, qam, alpha */
540 dib7000m_write_word(state, 0, (ch->nfft << 7) | (ch->guard << 5) | (ch->nqam << 3) | (ch->vit_alpha));
541 dib7000m_write_word(state, 5, (seq << 4));
542
543 /* P_dintl_native, P_dintlv_inv, P_vit_hrch, P_vit_code_rate, P_vit_select_hp */
544 value = (ch->intlv_native << 6) | (ch->vit_hrch << 4) | (ch->vit_select_hp & 0x1);
545 if (ch->vit_hrch == 0 || ch->vit_select_hp == 1)
546 value |= (ch->vit_code_rate_hp << 1);
547 else
548 value |= (ch->vit_code_rate_lp << 1);
549 dib7000m_write_word(state, 267 + state->reg_offs, value);
550
551 /* offset loop parameters */
552
553 /* P_timf_alpha = 6, P_corm_alpha=6, P_corm_thres=0x80 */
554 dib7000m_write_word(state, 26, (6 << 12) | (6 << 8) | 0x80);
555
556 /* P_ctrl_inh_cor=0, P_ctrl_alpha_cor=4, P_ctrl_inh_isi=1, P_ctrl_alpha_isi=3, P_ctrl_inh_cor4=1, P_ctrl_alpha_cor4=3 */
557 dib7000m_write_word(state, 29, (0 << 14) | (4 << 10) | (1 << 9) | (3 << 5) | (1 << 4) | (0x3));
558
559 /* P_ctrl_freeze_pha_shift=0, P_ctrl_pha_off_max=3 */
560 dib7000m_write_word(state, 32, (0 << 4) | 0x3);
561
562 /* P_ctrl_sfreq_inh=0, P_ctrl_sfreq_step=5 */
563 dib7000m_write_word(state, 33, (0 << 4) | 0x5);
564
565 /* P_dvsy_sync_wait */
566 switch (ch->nfft) {
567 case 1: value = 256; break;
568 case 2: value = 128; break;
569 case 0:
570 default: value = 64; break;
571 }
572 value *= ((1 << (ch->guard)) * 3 / 2); // add 50% SFN margin
573 value <<= 4;
574
575 /* deactive the possibility of diversity reception if extended interleave - not for 7000MC */
576 /* P_dvsy_sync_mode = 0, P_dvsy_sync_enable=1, P_dvcb_comb_mode=2 */
577 if (ch->intlv_native || state->revision > 0x4000)
578 value |= (1 << 2) | (2 << 0);
579 else
580 value |= 0;
581 dib7000m_write_word(state, 266 + state->reg_offs, value);
582
583 /* channel estimation fine configuration */
584 switch (ch->nqam) {
585 case 2:
586 est[0] = 0x0148; /* P_adp_regul_cnt 0.04 */
587 est[1] = 0xfff0; /* P_adp_noise_cnt -0.002 */
588 est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */
589 est[3] = 0xfff8; /* P_adp_noise_ext -0.001 */
590 break;
591 case 1:
592 est[0] = 0x023d; /* P_adp_regul_cnt 0.07 */
593 est[1] = 0xffdf; /* P_adp_noise_cnt -0.004 */
594 est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */
595 est[3] = 0xfff0; /* P_adp_noise_ext -0.002 */
596 break;
597 default:
598 est[0] = 0x099a; /* P_adp_regul_cnt 0.3 */
599 est[1] = 0xffae; /* P_adp_noise_cnt -0.01 */
600 est[2] = 0x0333; /* P_adp_regul_ext 0.1 */
601 est[3] = 0xfff8; /* P_adp_noise_ext -0.002 */
602 break;
603 }
604 for (value = 0; value < 4; value++)
605 dib7000m_write_word(state, 214 + value + state->reg_offs, est[value]);
606
607 // set power-up level: interf+analog+AGC
608 dib7000m_set_power_mode(state, DIB7000M_POWER_INTERF_ANALOG_AGC);
609 dib7000m_set_adc_state(state, DIBX000_ADC_ON);
610
611 msleep(7);
612
613 //AGC initialization
614 if (state->cfg.agc_control)
615 state->cfg.agc_control(&state->demod, 1);
616
617 dib7000m_restart_agc(state);
618
619 // wait AGC rough lock time
620 msleep(5);
621
622 dib7000m_update_lna(state);
623 dib7000m_agc_soft_split(state);
624
625 // wait AGC accurate lock time
626 msleep(7);
627
628 if (state->cfg.agc_control)
629 state->cfg.agc_control(&state->demod, 0);
630
631 // set power-up level: autosearch
632 dib7000m_set_power_mode(state, DIB7000M_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD);
633}
634
635static int dib7000m_autosearch_start(struct dvb_frontend *demod, struct dibx000_ofdm_channel *ch)
636{
637 struct dib7000m_state *state = demod->demodulator_priv;
638 struct dibx000_ofdm_channel auto_ch;
639 int ret = 0;
640 u32 value;
641
642 INIT_OFDM_CHANNEL(&auto_ch);
643 auto_ch.RF_kHz = ch->RF_kHz;
644 auto_ch.Bw = ch->Bw;
645 auto_ch.nqam = 2;
646 auto_ch.guard = 0;
647 auto_ch.nfft = 1;
648 auto_ch.vit_alpha = 1;
649 auto_ch.vit_select_hp = 1;
650 auto_ch.vit_code_rate_hp = 2;
651 auto_ch.vit_code_rate_lp = 3;
652 auto_ch.vit_hrch = 0;
653 auto_ch.intlv_native = 1;
654
655 dib7000m_set_channel(state, &auto_ch, 7);
656
657 // always use the setting for 8MHz here lock_time for 7,6 MHz are longer
658 value = 30 * state->cfg.bw->internal;
659 ret |= dib7000m_write_word(state, 6, (u16) ((value >> 16) & 0xffff)); // lock0 wait time
660 ret |= dib7000m_write_word(state, 7, (u16) (value & 0xffff)); // lock0 wait time
661 value = 100 * state->cfg.bw->internal;
662 ret |= dib7000m_write_word(state, 8, (u16) ((value >> 16) & 0xffff)); // lock1 wait time
663 ret |= dib7000m_write_word(state, 9, (u16) (value & 0xffff)); // lock1 wait time
664 value = 500 * state->cfg.bw->internal;
665 ret |= dib7000m_write_word(state, 10, (u16) ((value >> 16) & 0xffff)); // lock2 wait time
666 ret |= dib7000m_write_word(state, 11, (u16) (value & 0xffff)); // lock2 wait time
667
668 // start search
669 value = dib7000m_read_word(state, 0);
670 ret |= dib7000m_write_word(state, 0, value | (1 << 9));
671
672 /* clear n_irq_pending */
673 if (state->revision == 0x4000)
674 dib7000m_write_word(state, 1793, 0);
675 else
676 dib7000m_read_word(state, 537);
677
678 ret |= dib7000m_write_word(state, 0, (u16) value);
679
680 return ret;
681}
682
683static int dib7000m_autosearch_irq(struct dib7000m_state *state, u16 reg)
684{
685 u16 irq_pending = dib7000m_read_word(state, reg);
686
687 if (irq_pending & 0x1) { // failed
688 dprintk("#\n");
689 return 1;
690 }
691
692 if (irq_pending & 0x2) { // succeeded
693 dprintk("!\n");
694 return 2;
695 }
696 return 0; // still pending
697}
698
699static int dib7000m_autosearch_is_irq(struct dvb_frontend *demod)
700{
701 struct dib7000m_state *state = demod->demodulator_priv;
702 if (state->revision == 0x4000)
703 return dib7000m_autosearch_irq(state, 1793);
704 else
705 return dib7000m_autosearch_irq(state, 537);
706}
707
708static int dib7000m_tune(struct dvb_frontend *demod, struct dibx000_ofdm_channel *ch)
709{
710 struct dib7000m_state *state = demod->demodulator_priv;
711 int ret = 0;
712 u16 value;
713
714 // we are already tuned - just resuming from suspend
715 if (ch != NULL)
716 dib7000m_set_channel(state, ch, 0);
717 else
718 return -EINVAL;
719
720 // restart demod
721 ret |= dib7000m_write_word(state, 898, 0x4000);
722 ret |= dib7000m_write_word(state, 898, 0x0000);
723 msleep(45);
724
725 ret |= dib7000m_set_power_mode(state, DIB7000M_POWER_COR4_CRY_ESRAM_MOUT_NUD);
726 /* P_ctrl_inh_cor=0, P_ctrl_alpha_cor=4, P_ctrl_inh_isi=0, P_ctrl_alpha_isi=3, P_ctrl_inh_cor4=1, P_ctrl_alpha_cor4=3 */
727 ret |= dib7000m_write_word(state, 29, (0 << 14) | (4 << 10) | (0 << 9) | (3 << 5) | (1 << 4) | (0x3));
728
729 // never achieved a lock with that bandwidth so far - wait for timfreq to update
730 if (state->timf == 0)
731 msleep(200);
732
733 //dump_reg(state);
734 /* P_timf_alpha, P_corm_alpha=6, P_corm_thres=0x80 */
735 value = (6 << 8) | 0x80;
736 switch (ch->nfft) {
737 case 0: value |= (7 << 12); break;
738 case 1: value |= (9 << 12); break;
739 case 2: value |= (8 << 12); break;
740 }
741 ret |= dib7000m_write_word(state, 26, value);
742
743 /* P_ctrl_freeze_pha_shift=0, P_ctrl_pha_off_max */
744 value = (0 << 4);
745 switch (ch->nfft) {
746 case 0: value |= 0x6; break;
747 case 1: value |= 0x8; break;
748 case 2: value |= 0x7; break;
749 }
750 ret |= dib7000m_write_word(state, 32, value);
751
752 /* P_ctrl_sfreq_inh=0, P_ctrl_sfreq_step */
753 value = (0 << 4);
754 switch (ch->nfft) {
755 case 0: value |= 0x6; break;
756 case 1: value |= 0x8; break;
757 case 2: value |= 0x7; break;
758 }
759 ret |= dib7000m_write_word(state, 33, value);
760
761 // we achieved a lock - it's time to update the osc freq
762 if ((dib7000m_read_word(state, 535) >> 6) & 0x1)
763 dib7000m_update_timf_freq(state);
764
765 return ret;
766}
767
768static int dib7000m_init(struct dvb_frontend *demod)
769{
770 struct dib7000m_state *state = demod->demodulator_priv;
771 int ret = 0;
772 u8 o = state->reg_offs;
773
774 dib7000m_set_power_mode(state, DIB7000M_POWER_ALL);
775
776 if (dib7000m_set_adc_state(state, DIBX000_SLOW_ADC_ON) != 0)
777 dprintk("-E- could not start Slow ADC\n");
778
779 if (state->cfg.dvbt_mode)
780 dib7000m_write_word(state, 1796, 0x0); // select DVB-T output
781
782 if (state->cfg.mobile_mode)
783 ret |= dib7000m_write_word(state, 261 + o, 2);
784 else
785 ret |= dib7000m_write_word(state, 224 + o, 1);
786
787 ret |= dib7000m_write_word(state, 173 + o, 0);
788 ret |= dib7000m_write_word(state, 174 + o, 0);
789 ret |= dib7000m_write_word(state, 175 + o, 0);
790 ret |= dib7000m_write_word(state, 176 + o, 0);
791 ret |= dib7000m_write_word(state, 177 + o, 0);
792 ret |= dib7000m_write_word(state, 178 + o, 0);
793 ret |= dib7000m_write_word(state, 179 + o, 0);
794 ret |= dib7000m_write_word(state, 180 + o, 0);
795
796 // P_corm_thres Lock algorithms configuration
797 ret |= dib7000m_write_word(state, 26, 0x6680);
798
799 // P_palf_alpha_regul, P_palf_filter_freeze, P_palf_filter_on
800 ret |= dib7000m_write_word(state, 170 + o, 0x0410);
801 // P_fft_nb_to_cut
802 ret |= dib7000m_write_word(state, 182 + o, 8192);
803 // P_pha3_thres
804 ret |= dib7000m_write_word(state, 195 + o, 0x0ccd);
805 // P_cti_use_cpe, P_cti_use_prog
806 ret |= dib7000m_write_word(state, 196 + o, 0);
807 // P_cspu_regul, P_cspu_win_cut
808 ret |= dib7000m_write_word(state, 205 + o, 0x200f);
809 // P_adp_regul_cnt
810 ret |= dib7000m_write_word(state, 214 + o, 0x023d);
811 // P_adp_noise_cnt
812 ret |= dib7000m_write_word(state, 215 + o, 0x00a4);
813 // P_adp_regul_ext
814 ret |= dib7000m_write_word(state, 216 + o, 0x00a4);
815 // P_adp_noise_ext
816 ret |= dib7000m_write_word(state, 217 + o, 0x7ff0);
817 // P_adp_fil
818 ret |= dib7000m_write_word(state, 218 + o, 0x3ccc);
819
820 // P_2d_byp_ti_num
821 ret |= dib7000m_write_word(state, 226 + o, 0);
822
823 // P_fec_*
824 ret |= dib7000m_write_word(state, 281 + o, 0x0010);
825 // P_smo_mode, P_smo_rs_discard, P_smo_fifo_flush, P_smo_pid_parse, P_smo_error_discard
826 ret |= dib7000m_write_word(state, 294 + o,0x0062);
827
828 // P_iqc_alpha_pha, P_iqc_alpha_amp, P_iqc_dcc_alpha, ...
829 if(state->cfg.tuner_is_baseband)
830 ret |= dib7000m_write_word(state, 36, 0x0755);
831 else
832 ret |= dib7000m_write_word(state, 36, 0x1f55);
833
834 // auto search configuration
835 ret |= dib7000m_write_word(state, 2, 0x0004);
836 ret |= dib7000m_write_word(state, 3, 0x1000);
837 ret |= dib7000m_write_word(state, 4, 0x0814);
838 ret |= dib7000m_write_word(state, 6, 0x001b);
839 ret |= dib7000m_write_word(state, 7, 0x7740);
840 ret |= dib7000m_write_word(state, 8, 0x005b);
841 ret |= dib7000m_write_word(state, 9, 0x8d80);
842 ret |= dib7000m_write_word(state, 10, 0x01c9);
843 ret |= dib7000m_write_word(state, 11, 0xc380);
844 ret |= dib7000m_write_word(state, 12, 0x0000);
845 ret |= dib7000m_write_word(state, 13, 0x0080);
846 ret |= dib7000m_write_word(state, 14, 0x0000);
847 ret |= dib7000m_write_word(state, 15, 0x0090);
848 ret |= dib7000m_write_word(state, 16, 0x0001);
849 ret |= dib7000m_write_word(state, 17, 0xd4c0);
850 ret |= dib7000m_write_word(state, 263 + o,0x0001);
851
852 // P_divclksel=3 P_divbitsel=1
853 if (state->revision == 0x4000)
854 dib7000m_write_word(state, 909, (3 << 10) | (1 << 6));
855 else
856 dib7000m_write_word(state, 909, (3 << 4) | 1);
857
858 // Tuner IO bank: max drive (14mA)
859 ret |= dib7000m_write_word(state, 912 ,0x2c8a);
860
861 ret |= dib7000m_write_word(state, 1817, 1);
862
863 return ret;
864}
865
866static int dib7000m_sleep(struct dvb_frontend *demod)
867{
868 struct dib7000m_state *st = demod->demodulator_priv;
869 dib7000m_set_output_mode(st, OUTMODE_HIGH_Z);
870 return dib7000m_set_power_mode(st, DIB7000M_POWER_INTERFACE_ONLY) |
871 dib7000m_set_adc_state(st, DIBX000_SLOW_ADC_OFF) |
872 dib7000m_set_adc_state(st, DIBX000_ADC_OFF);
873}
874
875static int dib7000m_identify(struct dib7000m_state *state)
876{
877 u16 value;
878 if ((value = dib7000m_read_word(state, 896)) != 0x01b3) {
879 dprintk("-E- DiB7000M: wrong Vendor ID (read=0x%x)\n",value);
880 return -EREMOTEIO;
881 }
882
883 state->revision = dib7000m_read_word(state, 897);
884 if (state->revision != 0x4000 &&
885 state->revision != 0x4001 &&
886 state->revision != 0x4002) {
887 dprintk("-E- DiB7000M: wrong Device ID (%x)\n",value);
888 return -EREMOTEIO;
889 }
890
891 /* protect this driver to be used with 7000PC */
892 if (state->revision == 0x4000 && dib7000m_read_word(state, 769) == 0x4000) {
893 dprintk("-E- DiB7000M: this driver does not work with DiB7000PC\n");
894 return -EREMOTEIO;
895 }
896
897 switch (state->revision) {
898 case 0x4000: dprintk("-I- found DiB7000MA/PA/MB/PB\n"); break;
899 case 0x4001: state->reg_offs = 1; dprintk("-I- found DiB7000HC\n"); break;
900 case 0x4002: state->reg_offs = 1; dprintk("-I- found DiB7000MC\n"); break;
901 }
902
903 return 0;
904}
905
906
907static int dib7000m_get_frontend(struct dvb_frontend* fe,
908 struct dvb_frontend_parameters *fep)
909{
910 struct dib7000m_state *state = fe->demodulator_priv;
911 u16 tps = dib7000m_read_word(state,480);
912
913 fep->inversion = INVERSION_AUTO;
914
915 fep->u.ofdm.bandwidth = state->current_bandwidth;
916
917 switch ((tps >> 8) & 0x3) {
918 case 0: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K; break;
919 case 1: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K; break;
920 /* case 2: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_4K; break; */
921 }
922
923 switch (tps & 0x3) {
924 case 0: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_32; break;
925 case 1: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_16; break;
926 case 2: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_8; break;
927 case 3: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_4; break;
928 }
929
930 switch ((tps >> 14) & 0x3) {
931 case 0: fep->u.ofdm.constellation = QPSK; break;
932 case 1: fep->u.ofdm.constellation = QAM_16; break;
933 case 2:
934 default: fep->u.ofdm.constellation = QAM_64; break;
935 }
936
937 /* as long as the frontend_param structure is fixed for hierarchical transmission I refuse to use it */
938 /* (tps >> 13) & 0x1 == hrch is used, (tps >> 10) & 0x7 == alpha */
939
940 fep->u.ofdm.hierarchy_information = HIERARCHY_NONE;
941 switch ((tps >> 5) & 0x7) {
942 case 1: fep->u.ofdm.code_rate_HP = FEC_1_2; break;
943 case 2: fep->u.ofdm.code_rate_HP = FEC_2_3; break;
944 case 3: fep->u.ofdm.code_rate_HP = FEC_3_4; break;
945 case 5: fep->u.ofdm.code_rate_HP = FEC_5_6; break;
946 case 7:
947 default: fep->u.ofdm.code_rate_HP = FEC_7_8; break;
948
949 }
950
951 switch ((tps >> 2) & 0x7) {
952 case 1: fep->u.ofdm.code_rate_LP = FEC_1_2; break;
953 case 2: fep->u.ofdm.code_rate_LP = FEC_2_3; break;
954 case 3: fep->u.ofdm.code_rate_LP = FEC_3_4; break;
955 case 5: fep->u.ofdm.code_rate_LP = FEC_5_6; break;
956 case 7:
957 default: fep->u.ofdm.code_rate_LP = FEC_7_8; break;
958 }
959
960 /* native interleaver: (dib7000m_read_word(state, 481) >> 5) & 0x1 */
961
962 return 0;
963}
964
965static int dib7000m_set_frontend(struct dvb_frontend* fe,
966 struct dvb_frontend_parameters *fep)
967{
968 struct dib7000m_state *state = fe->demodulator_priv;
969 struct dibx000_ofdm_channel ch;
970
971 INIT_OFDM_CHANNEL(&ch);
972 FEP2DIB(fep,&ch);
973
974 state->current_bandwidth = fep->u.ofdm.bandwidth;
975 dib7000m_set_bandwidth(fe, fep->u.ofdm.bandwidth);
976
977 if (fe->ops.tuner_ops.set_params)
978 fe->ops.tuner_ops.set_params(fe, fep);
979
980 if (fep->u.ofdm.transmission_mode == TRANSMISSION_MODE_AUTO ||
981 fep->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO ||
982 fep->u.ofdm.constellation == QAM_AUTO ||
983 fep->u.ofdm.code_rate_HP == FEC_AUTO) {
984 int i = 800, found;
985
986 dib7000m_autosearch_start(fe, &ch);
987 do {
988 msleep(1);
989 found = dib7000m_autosearch_is_irq(fe);
990 } while (found == 0 && i--);
991
992 dprintk("autosearch returns: %d\n",found);
993 if (found == 0 || found == 1)
994 return 0; // no channel found
995
996 dib7000m_get_frontend(fe, fep);
997 FEP2DIB(fep, &ch);
998 }
999
1000 /* make this a config parameter */
1001 dib7000m_set_output_mode(state, OUTMODE_MPEG2_FIFO);
1002
1003 return dib7000m_tune(fe, &ch);
1004}
1005
1006static int dib7000m_read_status(struct dvb_frontend *fe, fe_status_t *stat)
1007{
1008 struct dib7000m_state *state = fe->demodulator_priv;
1009 u16 lock = dib7000m_read_word(state, 535);
1010
1011 *stat = 0;
1012
1013 if (lock & 0x8000)
1014 *stat |= FE_HAS_SIGNAL;
1015 if (lock & 0x3000)
1016 *stat |= FE_HAS_CARRIER;
1017 if (lock & 0x0100)
1018 *stat |= FE_HAS_VITERBI;
1019 if (lock & 0x0010)
1020 *stat |= FE_HAS_SYNC;
1021 if (lock & 0x0008)
1022 *stat |= FE_HAS_LOCK;
1023
1024 return 0;
1025}
1026
1027static int dib7000m_read_ber(struct dvb_frontend *fe, u32 *ber)
1028{
1029 struct dib7000m_state *state = fe->demodulator_priv;
1030 *ber = (dib7000m_read_word(state, 526) << 16) | dib7000m_read_word(state, 527);
1031 return 0;
1032}
1033
1034static int dib7000m_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
1035{
1036 struct dib7000m_state *state = fe->demodulator_priv;
1037 *unc = dib7000m_read_word(state, 534);
1038 return 0;
1039}
1040
1041static int dib7000m_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
1042{
1043 struct dib7000m_state *state = fe->demodulator_priv;
1044 u16 val = dib7000m_read_word(state, 390);
1045 *strength = 65535 - val;
1046 return 0;
1047}
1048
1049static int dib7000m_read_snr(struct dvb_frontend* fe, u16 *snr)
1050{
1051 *snr = 0x0000;
1052 return 0;
1053}
1054
1055static int dib7000m_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune)
1056{
1057 tune->min_delay_ms = 1000;
1058 return 0;
1059}
1060
1061static void dib7000m_release(struct dvb_frontend *demod)
1062{
1063 struct dib7000m_state *st = demod->demodulator_priv;
1064 dibx000_exit_i2c_master(&st->i2c_master);
1065 kfree(st);
1066}
1067
1068struct i2c_adapter * dib7000m_get_i2c_master(struct dvb_frontend *demod, enum dibx000_i2c_interface intf, int gating)
1069{
1070 struct dib7000m_state *st = demod->demodulator_priv;
1071 return dibx000_get_i2c_adapter(&st->i2c_master, intf, gating);
1072}
1073EXPORT_SYMBOL(dib7000m_get_i2c_master);
1074
1075int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, struct dib7000m_config cfg[])
1076{
1077 struct dib7000m_state st = { .i2c_adap = i2c };
1078 int k = 0;
1079 u8 new_addr = 0;
1080
1081 for (k = no_of_demods-1; k >= 0; k--) {
1082 st.cfg = cfg[k];
1083
1084 /* designated i2c address */
1085 new_addr = (0x40 + k) << 1;
1086 st.i2c_addr = new_addr;
1087 if (dib7000m_identify(&st) != 0) {
1088 st.i2c_addr = default_addr;
1089 if (dib7000m_identify(&st) != 0) {
1090 dprintk("DiB7000M #%d: not identified\n", k);
1091 return -EIO;
1092 }
1093 }
1094
1095 /* start diversity to pull_down div_str - just for i2c-enumeration */
1096 dib7000m_set_output_mode(&st, OUTMODE_DIVERSITY);
1097
1098 dib7000m_write_word(&st, 1796, 0x0); // select DVB-T output
1099
1100 /* set new i2c address and force divstart */
1101 dib7000m_write_word(&st, 1794, (new_addr << 2) | 0x2);
1102
1103 dprintk("IC %d initialized (to i2c_address 0x%x)\n", k, new_addr);
1104 }
1105
1106 for (k = 0; k < no_of_demods; k++) {
1107 st.cfg = cfg[k];
1108 st.i2c_addr = (0x40 + k) << 1;
1109
1110 // unforce divstr
1111 dib7000m_write_word(&st,1794, st.i2c_addr << 2);
1112
1113 /* deactivate div - it was just for i2c-enumeration */
1114 dib7000m_set_output_mode(&st, OUTMODE_HIGH_Z);
1115 }
1116
1117 return 0;
1118}
1119EXPORT_SYMBOL(dib7000m_i2c_enumeration);
1120
1121static struct dvb_frontend_ops dib7000m_ops;
1122struct dvb_frontend * dib7000m_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000m_config *cfg)
1123{
1124 struct dvb_frontend *demod;
1125 struct dib7000m_state *st;
1126 st = kzalloc(sizeof(struct dib7000m_state), GFP_KERNEL);
1127 if (st == NULL)
1128 return NULL;
1129
1130 memcpy(&st->cfg, cfg, sizeof(struct dib7000m_config));
1131 st->i2c_adap = i2c_adap;
1132 st->i2c_addr = i2c_addr;
1133
1134 demod = &st->demod;
1135 demod->demodulator_priv = st;
1136 memcpy(&st->demod.ops, &dib7000m_ops, sizeof(struct dvb_frontend_ops));
1137
1138 if (dib7000m_identify(st) != 0)
1139 goto error;
1140
1141 if (st->revision == 0x4000)
1142 dibx000_init_i2c_master(&st->i2c_master, DIB7000, st->i2c_adap, st->i2c_addr);
1143 else
1144 dibx000_init_i2c_master(&st->i2c_master, DIB7000MC, st->i2c_adap, st->i2c_addr);
1145
1146 dib7000m_demod_reset(st);
1147
1148 return demod;
1149
1150error:
1151 kfree(st);
1152 return NULL;
1153}
1154EXPORT_SYMBOL(dib7000m_attach);
1155
1156static struct dvb_frontend_ops dib7000m_ops = {
1157 .info = {
1158 .name = "DiBcom 7000MA/MB/PA/PB/MC",
1159 .type = FE_OFDM,
1160 .frequency_min = 44250000,
1161 .frequency_max = 867250000,
1162 .frequency_stepsize = 62500,
1163 .caps = FE_CAN_INVERSION_AUTO |
1164 FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
1165 FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
1166 FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
1167 FE_CAN_TRANSMISSION_MODE_AUTO |
1168 FE_CAN_GUARD_INTERVAL_AUTO |
1169 FE_CAN_RECOVER |
1170 FE_CAN_HIERARCHY_AUTO,
1171 },
1172
1173 .release = dib7000m_release,
1174
1175 .init = dib7000m_init,
1176 .sleep = dib7000m_sleep,
1177
1178 .set_frontend = dib7000m_set_frontend,
1179 .get_tune_settings = dib7000m_fe_get_tune_settings,
1180 .get_frontend = dib7000m_get_frontend,
1181
1182 .read_status = dib7000m_read_status,
1183 .read_ber = dib7000m_read_ber,
1184 .read_signal_strength = dib7000m_read_signal_strength,
1185 .read_snr = dib7000m_read_snr,
1186 .read_ucblocks = dib7000m_read_unc_blocks,
1187};
1188
1189MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
1190MODULE_DESCRIPTION("Driver for the DiBcom 7000MA/MB/PA/PB/MC COFDM demodulator");
1191MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/dib7000m.h b/drivers/media/dvb/frontends/dib7000m.h
new file mode 100644
index 000000000000..597e9cc2da62
--- /dev/null
+++ b/drivers/media/dvb/frontends/dib7000m.h
@@ -0,0 +1,51 @@
1#ifndef DIB7000M_H
2#define DIB7000M_H
3
4#include "dibx000_common.h"
5
6struct dib7000m_config {
7 u8 dvbt_mode;
8 u8 output_mpeg2_in_188_bytes;
9 u8 hostbus_diversity;
10 u8 tuner_is_baseband;
11 u8 mobile_mode;
12 int (*update_lna) (struct dvb_frontend *, u16 agc_global);
13
14 u8 agc_config_count;
15 struct dibx000_agc_config *agc;
16
17 struct dibx000_bandwidth_config *bw;
18
19#define DIB7000M_GPIO_DEFAULT_DIRECTIONS 0xffff
20 u16 gpio_dir;
21#define DIB7000M_GPIO_DEFAULT_VALUES 0x0000
22 u16 gpio_val;
23#define DIB7000M_GPIO_PWM_POS0(v) ((v & 0xf) << 12)
24#define DIB7000M_GPIO_PWM_POS1(v) ((v & 0xf) << 8 )
25#define DIB7000M_GPIO_PWM_POS2(v) ((v & 0xf) << 4 )
26#define DIB7000M_GPIO_PWM_POS3(v) (v & 0xf)
27#define DIB7000M_GPIO_DEFAULT_PWM_POS 0xffff
28 u16 gpio_pwm_pos;
29
30 u16 pwm_freq_div;
31
32 u8 quartz_direct;
33
34 u8 input_clk_is_div_2;
35
36 int (*agc_control) (struct dvb_frontend *, u8 before);
37};
38
39#define DEFAULT_DIB7000M_I2C_ADDRESS 18
40
41extern struct dvb_frontend * dib7000m_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000m_config *cfg);
42extern struct i2c_adapter * dib7000m_get_i2c_master(struct dvb_frontend *, enum dibx000_i2c_interface, int);
43
44/* TODO
45extern INT dib7000m_set_gpio(struct dibDemod *demod, UCHAR num, UCHAR dir, UCHAR val);
46extern INT dib7000m_enable_vbg_voltage(struct dibDemod *demod);
47extern void dib7000m_set_hostbus_diversity(struct dibDemod *demod, UCHAR onoff);
48extern USHORT dib7000m_get_current_agc_global(struct dibDemod *demod);
49*/
50
51#endif
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
new file mode 100644
index 000000000000..0349a4b5da3f
--- /dev/null
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -0,0 +1,1019 @@
1/*
2 * Linux-DVB Driver for DiBcom's second generation DiB7000P (PC).
3 *
4 * Copyright (C) 2005-6 DiBcom (http://www.dibcom.fr/)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2.
9 */
10#include <linux/kernel.h>
11#include <linux/i2c.h>
12
13#include "dvb_frontend.h"
14
15#include "dib7000p.h"
16
17static int debug;
18module_param(debug, int, 0644);
19MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
20
21#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB7000P:"); printk(args); } } while (0)
22
23struct dib7000p_state {
24 struct dvb_frontend demod;
25 struct dib7000p_config cfg;
26
27 u8 i2c_addr;
28 struct i2c_adapter *i2c_adap;
29
30 struct dibx000_i2c_master i2c_master;
31
32 u16 wbd_ref;
33
34 u8 current_band;
35 fe_bandwidth_t current_bandwidth;
36 struct dibx000_agc_config *current_agc;
37 u32 timf;
38
39 u16 gpio_dir;
40 u16 gpio_val;
41};
42
43enum dib7000p_power_mode {
44 DIB7000P_POWER_ALL = 0,
45 DIB7000P_POWER_INTERFACE_ONLY,
46};
47
48static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
49{
50 u8 wb[2] = { reg >> 8, reg & 0xff };
51 u8 rb[2];
52 struct i2c_msg msg[2] = {
53 { .addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2 },
54 { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 },
55 };
56
57 if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
58 dprintk("i2c read error on %d\n",reg);
59
60 return (rb[0] << 8) | rb[1];
61}
62
63static int dib7000p_write_word(struct dib7000p_state *state, u16 reg, u16 val)
64{
65 u8 b[4] = {
66 (reg >> 8) & 0xff, reg & 0xff,
67 (val >> 8) & 0xff, val & 0xff,
68 };
69 struct i2c_msg msg = {
70 .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4
71 };
72 return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
73}
74static int dib7000p_set_output_mode(struct dib7000p_state *state, int mode)
75{
76 int ret = 0;
77 u16 outreg, fifo_threshold, smo_mode;
78
79 outreg = 0;
80 fifo_threshold = 1792;
81 smo_mode = (dib7000p_read_word(state, 235) & 0x0010) | (1 << 1);
82
83 dprintk("-I- Setting output mode for demod %p to %d\n",
84 &state->demod, mode);
85
86 switch (mode) {
87 case OUTMODE_MPEG2_PAR_GATED_CLK: // STBs with parallel gated clock
88 outreg = (1 << 10); /* 0x0400 */
89 break;
90 case OUTMODE_MPEG2_PAR_CONT_CLK: // STBs with parallel continues clock
91 outreg = (1 << 10) | (1 << 6); /* 0x0440 */
92 break;
93 case OUTMODE_MPEG2_SERIAL: // STBs with serial input
94 outreg = (1 << 10) | (2 << 6) | (0 << 1); /* 0x0480 */
95 break;
96 case OUTMODE_DIVERSITY:
97 if (state->cfg.hostbus_diversity)
98 outreg = (1 << 10) | (4 << 6); /* 0x0500 */
99 else
100 outreg = (1 << 11);
101 break;
102 case OUTMODE_MPEG2_FIFO: // e.g. USB feeding
103 smo_mode |= (3 << 1);
104 fifo_threshold = 512;
105 outreg = (1 << 10) | (5 << 6);
106 break;
107 case OUTMODE_HIGH_Z: // disable
108 outreg = 0;
109 break;
110 default:
111 dprintk("Unhandled output_mode passed to be set for demod %p\n",&state->demod);
112 break;
113 }
114
115 if (state->cfg.output_mpeg2_in_188_bytes)
116 smo_mode |= (1 << 5) ;
117
118 ret |= dib7000p_write_word(state, 235, smo_mode);
119 ret |= dib7000p_write_word(state, 236, fifo_threshold); /* synchronous fread */
120 ret |= dib7000p_write_word(state, 1286, outreg); /* P_Div_active */
121
122 return ret;
123}
124
125static int dib7000p_set_power_mode(struct dib7000p_state *state, enum dib7000p_power_mode mode)
126{
127 /* by default everything is powered off */
128 u16 reg_774 = 0xffff, reg_775 = 0xffff, reg_776 = 0x0007, reg_899 = 0x0003,
129 reg_1280 = (0xfe00) | (dib7000p_read_word(state, 1280) & 0x01ff);
130
131 /* now, depending on the requested mode, we power on */
132 switch (mode) {
133 /* power up everything in the demod */
134 case DIB7000P_POWER_ALL:
135 reg_774 = 0x0000; reg_775 = 0x0000; reg_776 = 0x0; reg_899 = 0x0; reg_1280 &= 0x01ff;
136 break;
137 /* just leave power on the control-interfaces: GPIO and (I2C or SDIO) */
138 case DIB7000P_POWER_INTERFACE_ONLY: /* TODO power up either SDIO or I2C */
139 reg_1280 &= ~((1 << 14) | (1 << 13) | (1 << 12) | (1 << 10));
140 break;
141/* TODO following stuff is just converted from the dib7000-driver - check when is used what */
142 }
143
144 dib7000p_write_word(state, 774, reg_774);
145 dib7000p_write_word(state, 775, reg_775);
146 dib7000p_write_word(state, 776, reg_776);
147 dib7000p_write_word(state, 899, reg_899);
148 dib7000p_write_word(state, 1280, reg_1280);
149
150 return 0;
151}
152
153static void dib7000p_set_adc_state(struct dib7000p_state *state, enum dibx000_adc_states no)
154{
155 u16 reg_908 = dib7000p_read_word(state, 908),
156 reg_909 = dib7000p_read_word(state, 909);
157
158 switch (no) {
159 case DIBX000_SLOW_ADC_ON:
160 reg_909 |= (1 << 1) | (1 << 0);
161 dib7000p_write_word(state, 909, reg_909);
162 reg_909 &= ~(1 << 1);
163 break;
164
165 case DIBX000_SLOW_ADC_OFF:
166 reg_909 |= (1 << 1) | (1 << 0);
167 break;
168
169 case DIBX000_ADC_ON:
170 reg_908 &= 0x0fff;
171 reg_909 &= 0x0003;
172 break;
173
174 case DIBX000_ADC_OFF: // leave the VBG voltage on
175 reg_908 |= (1 << 14) | (1 << 13) | (1 << 12);
176 reg_909 |= (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2);
177 break;
178
179 case DIBX000_VBG_ENABLE:
180 reg_908 &= ~(1 << 15);
181 break;
182
183 case DIBX000_VBG_DISABLE:
184 reg_908 |= (1 << 15);
185 break;
186
187 default:
188 break;
189 }
190
191// dprintk("908: %x, 909: %x\n", reg_908, reg_909);
192
193 dib7000p_write_word(state, 908, reg_908);
194 dib7000p_write_word(state, 909, reg_909);
195}
196
197static int dib7000p_set_bandwidth(struct dvb_frontend *demod, u8 BW_Idx)
198{
199 struct dib7000p_state *state = demod->demodulator_priv;
200 u32 timf;
201
202 // store the current bandwidth for later use
203 state->current_bandwidth = BW_Idx;
204
205 if (state->timf == 0) {
206 dprintk("-D- Using default timf\n");
207 timf = state->cfg.bw->timf;
208 } else {
209 dprintk("-D- Using updated timf\n");
210 timf = state->timf;
211 }
212
213 timf = timf * (BW_INDEX_TO_KHZ(BW_Idx) / 100) / 80;
214
215 dprintk("timf: %d\n",timf);
216
217 dib7000p_write_word(state, 23, (timf >> 16) & 0xffff);
218 dib7000p_write_word(state, 24, (timf ) & 0xffff);
219
220 return 0;
221}
222
223static int dib7000p_sad_calib(struct dib7000p_state *state)
224{
225/* internal */
226// dib7000p_write_word(state, 72, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth
227 dib7000p_write_word(state, 73, (0 << 1) | (0 << 0));
228 dib7000p_write_word(state, 74, 776); // 0.625*3.3 / 4096
229
230 /* do the calibration */
231 dib7000p_write_word(state, 73, (1 << 0));
232 dib7000p_write_word(state, 73, (0 << 0));
233
234 msleep(1);
235
236 return 0;
237}
238
239static void dib7000p_reset_pll(struct dib7000p_state *state)
240{
241 struct dibx000_bandwidth_config *bw = &state->cfg.bw[0];
242
243 dib7000p_write_word(state, 903, (bw->pll_prediv << 5) | (((bw->pll_ratio >> 6) & 0x3) << 3) | (bw->pll_range << 1) | bw->pll_reset);
244 dib7000p_write_word(state, 900, ((bw->pll_ratio & 0x3f) << 9) | (bw->pll_bypass << 15) | (bw->modulo << 7) | (bw->ADClkSrc << 6) |
245 (bw->IO_CLK_en_core << 5) | (bw->bypclk_div << 2) | (bw->enable_refdiv << 1) | (0 << 0));
246
247 dib7000p_write_word(state, 18, ((bw->internal*1000) >> 16) & 0xffff);
248 dib7000p_write_word(state, 19, (bw->internal*1000 ) & 0xffff);
249 dib7000p_write_word(state, 21, (bw->ifreq >> 16) & 0xffff);
250 dib7000p_write_word(state, 22, (bw->ifreq ) & 0xffff);
251
252 dib7000p_write_word(state, 72, bw->sad_cfg);
253}
254
255static int dib7000p_reset_gpio(struct dib7000p_state *st)
256{
257 /* reset the GPIOs */
258 dprintk("-D- gpio dir: %x: gpio val: %x, gpio pwm pos: %x\n",st->gpio_dir, st->gpio_val,st->cfg.gpio_pwm_pos);
259
260 dib7000p_write_word(st, 1029, st->gpio_dir);
261 dib7000p_write_word(st, 1030, st->gpio_val);
262
263 /* TODO 1031 is P_gpio_od */
264
265 dib7000p_write_word(st, 1032, st->cfg.gpio_pwm_pos);
266
267 dib7000p_write_word(st, 1037, st->cfg.pwm_freq_div);
268 return 0;
269}
270
271static int dib7000p_demod_reset(struct dib7000p_state *state)
272{
273 dib7000p_set_power_mode(state, DIB7000P_POWER_ALL);
274
275 dib7000p_set_adc_state(state, DIBX000_VBG_ENABLE);
276
277 /* restart all parts */
278 dib7000p_write_word(state, 770, 0xffff);
279 dib7000p_write_word(state, 771, 0xffff);
280 dib7000p_write_word(state, 772, 0x001f);
281 dib7000p_write_word(state, 898, 0x0003);
282 /* except i2c, sdio, gpio - control interfaces */
283 dib7000p_write_word(state, 1280, 0x01fc - ((1 << 7) | (1 << 6) | (1 << 5)) );
284
285 dib7000p_write_word(state, 770, 0);
286 dib7000p_write_word(state, 771, 0);
287 dib7000p_write_word(state, 772, 0);
288 dib7000p_write_word(state, 898, 0);
289 dib7000p_write_word(state, 1280, 0);
290
291 /* default */
292 dib7000p_reset_pll(state);
293
294 if (dib7000p_reset_gpio(state) != 0)
295 dprintk("-E- GPIO reset was not successful.\n");
296
297 if (dib7000p_set_output_mode(state, OUTMODE_HIGH_Z) != 0)
298 dprintk("-E- OUTPUT_MODE could not be resetted.\n");
299
300 /* unforce divstr regardless whether i2c enumeration was done or not */
301 dib7000p_write_word(state, 1285, dib7000p_read_word(state, 1285) & ~(1 << 1) );
302
303 dib7000p_set_power_mode(state, DIB7000P_POWER_INTERFACE_ONLY);
304
305 return 0;
306}
307
308static void dib7000p_restart_agc(struct dib7000p_state *state)
309{
310 // P_restart_iqc & P_restart_agc
311 dib7000p_write_word(state, 770, 0x0c00);
312 dib7000p_write_word(state, 770, 0x0000);
313}
314
315static void dib7000p_update_lna(struct dib7000p_state *state)
316{
317 int i;
318 u16 dyn_gain;
319
320 // when there is no LNA to program return immediatly
321 if (state->cfg.update_lna == NULL)
322 return;
323
324 for (i = 0; i < 5; i++) {
325 // read dyn_gain here (because it is demod-dependent and not tuner)
326 dyn_gain = dib7000p_read_word(state, 394);
327
328 if (state->cfg.update_lna(&state->demod,dyn_gain)) { // LNA has changed
329 dib7000p_restart_agc(state);
330 msleep(5);
331 } else
332 break;
333 }
334}
335
336static void dib7000p_pll_clk_cfg(struct dib7000p_state *state)
337{
338 u16 tmp = 0;
339 tmp = dib7000p_read_word(state, 903);
340 dib7000p_write_word(state, 903, (tmp | 0x1)); //pwr-up pll
341 tmp = dib7000p_read_word(state, 900);
342 dib7000p_write_word(state, 900, (tmp & 0x7fff) | (1 << 6)); //use High freq clock
343}
344
345static void dib7000p_update_timf_freq(struct dib7000p_state *state)
346{
347 u32 timf = (dib7000p_read_word(state, 427) << 16) | dib7000p_read_word(state, 428);
348 state->timf = timf * 80 / (BW_INDEX_TO_KHZ(state->current_bandwidth) / 100);
349 dib7000p_write_word(state, 23, (u16) (timf >> 16));
350 dib7000p_write_word(state, 24, (u16) (timf & 0xffff));
351 dprintk("-D- Updated timf_frequency: %d (default: %d)\n",state->timf, state->cfg.bw->timf);
352}
353
354static void dib7000p_set_channel(struct dib7000p_state *state, struct dibx000_ofdm_channel *ch, u8 seq)
355{
356 u16 tmp, est[4]; // reg_26, reg_32, reg_33, reg_187, reg_188, reg_189, reg_190, reg_207, reg_208;
357
358 /* nfft, guard, qam, alpha */
359 dib7000p_write_word(state, 0, (ch->nfft << 7) | (ch->guard << 5) | (ch->nqam << 3) | (ch->vit_alpha));
360 dib7000p_write_word(state, 5, (seq << 4) | 1); /* do not force tps, search list 0 */
361
362 /* P_dintl_native, P_dintlv_inv, P_vit_hrch, P_vit_code_rate, P_vit_select_hp */
363 tmp = (ch->intlv_native << 6) | (ch->vit_hrch << 4) | (ch->vit_select_hp & 0x1);
364 if (ch->vit_hrch == 0 || ch->vit_select_hp == 1)
365 tmp |= (ch->vit_code_rate_hp << 1);
366 else
367 tmp |= (ch->vit_code_rate_lp << 1);
368 dib7000p_write_word(state, 208, tmp);
369
370 /* P_dvsy_sync_wait */
371 switch (ch->nfft) {
372 case 1: tmp = 256; break;
373 case 2: tmp = 128; break;
374 case 0:
375 default: tmp = 64; break;
376 }
377 tmp *= ((1 << (ch->guard)) * 3 / 2); // add 50% SFN margin
378 tmp <<= 4;
379
380 /* deactive the possibility of diversity reception if extended interleave */
381 /* P_dvsy_sync_mode = 0, P_dvsy_sync_enable=1, P_dvcb_comb_mode=2 */
382 if (ch->intlv_native || ch->nfft == 1)
383 tmp |= (1 << 2) | (2 << 0);
384 dib7000p_write_word(state, 207, tmp);
385
386 dib7000p_write_word(state, 26, 0x6680); // timf(6xxx)
387 dib7000p_write_word(state, 29, 0x1273); // isi inh1273 on1073
388 dib7000p_write_word(state, 32, 0x0003); // pha_off_max(xxx3)
389 dib7000p_write_word(state, 33, 0x0005); // sfreq(xxx5)
390
391 /* channel estimation fine configuration */
392 switch (ch->nqam) {
393 case 2:
394 est[0] = 0x0148; /* P_adp_regul_cnt 0.04 */
395 est[1] = 0xfff0; /* P_adp_noise_cnt -0.002 */
396 est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */
397 est[3] = 0xfff8; /* P_adp_noise_ext -0.001 */
398 break;
399 case 1:
400 est[0] = 0x023d; /* P_adp_regul_cnt 0.07 */
401 est[1] = 0xffdf; /* P_adp_noise_cnt -0.004 */
402 est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */
403 est[3] = 0xfff0; /* P_adp_noise_ext -0.002 */
404 break;
405 default:
406 est[0] = 0x099a; /* P_adp_regul_cnt 0.3 */
407 est[1] = 0xffae; /* P_adp_noise_cnt -0.01 */
408 est[2] = 0x0333; /* P_adp_regul_ext 0.1 */
409 est[3] = 0xfff8; /* P_adp_noise_ext -0.002 */
410 break;
411 }
412 for (tmp = 0; tmp < 4; tmp++)
413 dib7000p_write_word(state, 187 + tmp, est[tmp]);
414
415 // set power-up level: interf+analog+AGC
416 dib7000p_set_power_mode(state, DIB7000P_POWER_ALL);
417 dib7000p_set_adc_state(state, DIBX000_ADC_ON);
418 dib7000p_pll_clk_cfg(state);
419 msleep(7);
420
421 // AGC initialization
422 if (state->cfg.agc_control)
423 state->cfg.agc_control(&state->demod, 1);
424
425 dib7000p_restart_agc(state);
426
427 // wait AGC rough lock time
428 msleep(5);
429
430 dib7000p_update_lna(state);
431
432 // wait AGC accurate lock time
433 msleep(7);
434 if (state->cfg.agc_control)
435 state->cfg.agc_control(&state->demod, 0);
436}
437
438static int dib7000p_autosearch_start(struct dvb_frontend *demod, struct dibx000_ofdm_channel *ch)
439{
440 struct dib7000p_state *state = demod->demodulator_priv;
441 struct dibx000_ofdm_channel auto_ch;
442 u32 value;
443
444 INIT_OFDM_CHANNEL(&auto_ch);
445 auto_ch.RF_kHz = ch->RF_kHz;
446 auto_ch.Bw = ch->Bw;
447 auto_ch.nqam = 2;
448 auto_ch.guard = 0;
449 auto_ch.nfft = 1;
450 auto_ch.vit_alpha = 1;
451 auto_ch.vit_select_hp = 1;
452 auto_ch.vit_code_rate_hp = 2;
453 auto_ch.vit_code_rate_lp = 3;
454 auto_ch.vit_hrch = 0;
455 auto_ch.intlv_native = 1;
456
457 dib7000p_set_channel(state, &auto_ch, 7);
458
459 // always use the setting for 8MHz here lock_time for 7,6 MHz are longer
460 value = 30 * state->cfg.bw->internal;
461 dib7000p_write_word(state, 6, (u16) ((value >> 16) & 0xffff)); // lock0 wait time
462 dib7000p_write_word(state, 7, (u16) (value & 0xffff)); // lock0 wait time
463 value = 100 * state->cfg.bw->internal;
464 dib7000p_write_word(state, 8, (u16) ((value >> 16) & 0xffff)); // lock1 wait time
465 dib7000p_write_word(state, 9, (u16) (value & 0xffff)); // lock1 wait time
466 value = 500 * state->cfg.bw->internal;
467 dib7000p_write_word(state, 10, (u16) ((value >> 16) & 0xffff)); // lock2 wait time
468 dib7000p_write_word(state, 11, (u16) (value & 0xffff)); // lock2 wait time
469
470 value = dib7000p_read_word(state, 0);
471 dib7000p_write_word(state, 0, (1 << 9) | value);
472 dib7000p_read_word(state, 1284);
473 dib7000p_write_word(state, 0, (u16) value);
474
475 return 0;
476}
477
478static int dib7000p_autosearch_is_irq(struct dvb_frontend *demod)
479{
480 struct dib7000p_state *state = demod->demodulator_priv;
481 u16 irq_pending = dib7000p_read_word(state, 1284);
482
483 if (irq_pending & 0x1) // failed
484 return 1;
485
486 if (irq_pending & 0x2) // succeeded
487 return 2;
488
489 return 0; // still pending
490}
491
492static int dib7000p_tune(struct dvb_frontend *demod, struct dibx000_ofdm_channel *ch)
493{
494 struct dib7000p_state *state = demod->demodulator_priv;
495 u16 tmp = 0;
496
497 if (ch != NULL)
498 dib7000p_set_channel(state, ch, 0);
499 else
500 return -EINVAL;
501
502 // restart demod
503 dib7000p_write_word(state, 770, 0x4000);
504 dib7000p_write_word(state, 770, 0x0000);
505 msleep(45);
506
507 /* P_ctrl_inh_cor=0, P_ctrl_alpha_cor=4, P_ctrl_inh_isi=0, P_ctrl_alpha_isi=3, P_ctrl_inh_cor4=1, P_ctrl_alpha_cor4=3 */
508 dib7000p_write_word(state, 29, (0 << 14) | (4 << 10) | (0 << 9) | (3 << 5) | (1 << 4) | (0x3));
509
510 // never achieved a lock with that bandwidth so far - wait for osc-freq to update
511 if (state->timf == 0)
512 msleep(200);
513
514 /* offset loop parameters */
515
516 /* P_timf_alpha, P_corm_alpha=6, P_corm_thres=0x80 */
517 tmp = (6 << 8) | 0x80;
518 switch (ch->nfft) {
519 case 0: tmp |= (7 << 12); break;
520 case 1: tmp |= (9 << 12); break;
521 case 2: tmp |= (8 << 12); break;
522 }
523 dib7000p_write_word(state, 26, tmp); /* timf_a(6xxx) */
524
525 /* P_ctrl_freeze_pha_shift=0, P_ctrl_pha_off_max */
526 tmp = (0 << 4);
527 switch (ch->nfft) {
528 case 0: tmp |= 0x6; break;
529 case 1: tmp |= 0x8; break;
530 case 2: tmp |= 0x7; break;
531 }
532 dib7000p_write_word(state, 32, tmp);
533
534 /* P_ctrl_sfreq_inh=0, P_ctrl_sfreq_step */
535 tmp = (0 << 4);
536 switch (ch->nfft) {
537 case 0: tmp |= 0x6; break;
538 case 1: tmp |= 0x8; break;
539 case 2: tmp |= 0x7; break;
540 }
541 dib7000p_write_word(state, 33, tmp);
542
543 tmp = dib7000p_read_word(state,509);
544 if (!((tmp >> 6) & 0x1)) {
545 /* restart the fec */
546 tmp = dib7000p_read_word(state,771);
547 dib7000p_write_word(state, 771, tmp | (1 << 1));
548 dib7000p_write_word(state, 771, tmp);
549 msleep(10);
550 tmp = dib7000p_read_word(state,509);
551 }
552
553 // we achieved a lock - it's time to update the osc freq
554 if ((tmp >> 6) & 0x1)
555 dib7000p_update_timf_freq(state);
556
557 return 0;
558}
559
560static int dib7000p_init(struct dvb_frontend *demod)
561{
562 struct dibx000_agc_config *agc;
563 struct dib7000p_state *state = demod->demodulator_priv;
564 int ret = 0;
565
566 // Demodulator default configuration
567 agc = state->cfg.agc;
568
569 dib7000p_set_power_mode(state, DIB7000P_POWER_ALL);
570 dib7000p_set_adc_state(state, DIBX000_SLOW_ADC_ON);
571
572 /* AGC */
573 ret |= dib7000p_write_word(state, 75 , agc->setup );
574 ret |= dib7000p_write_word(state, 76 , agc->inv_gain );
575 ret |= dib7000p_write_word(state, 77 , agc->time_stabiliz );
576 ret |= dib7000p_write_word(state, 100, (agc->alpha_level << 12) | agc->thlock);
577
578 // Demod AGC loop configuration
579 ret |= dib7000p_write_word(state, 101, (agc->alpha_mant << 5) | agc->alpha_exp);
580 ret |= dib7000p_write_word(state, 102, (agc->beta_mant << 6) | agc->beta_exp);
581
582 /* AGC continued */
583 dprintk("-D- WBD: ref: %d, sel: %d, active: %d, alpha: %d\n",
584 state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel);
585
586 if (state->wbd_ref != 0)
587 ret |= dib7000p_write_word(state, 105, (agc->wbd_inv << 12) | state->wbd_ref);
588 else
589 ret |= dib7000p_write_word(state, 105, (agc->wbd_inv << 12) | agc->wbd_ref);
590
591 ret |= dib7000p_write_word(state, 106, (agc->wbd_sel << 13) | (agc->wbd_alpha << 9) | (agc->perform_agc_softsplit << 8) );
592
593 ret |= dib7000p_write_word(state, 107, agc->agc1_max);
594 ret |= dib7000p_write_word(state, 108, agc->agc1_min);
595 ret |= dib7000p_write_word(state, 109, agc->agc2_max);
596 ret |= dib7000p_write_word(state, 110, agc->agc2_min);
597 ret |= dib7000p_write_word(state, 111, (agc->agc1_pt1 << 8) | agc->agc1_pt2 );
598 ret |= dib7000p_write_word(state, 112, agc->agc1_pt3);
599 ret |= dib7000p_write_word(state, 113, (agc->agc1_slope1 << 8) | agc->agc1_slope2);
600 ret |= dib7000p_write_word(state, 114, (agc->agc2_pt1 << 8) | agc->agc2_pt2);
601 ret |= dib7000p_write_word(state, 115, (agc->agc2_slope1 << 8) | agc->agc2_slope2);
602
603 /* disable power smoothing */
604 ret |= dib7000p_write_word(state, 145, 0);
605 ret |= dib7000p_write_word(state, 146, 0);
606 ret |= dib7000p_write_word(state, 147, 0);
607 ret |= dib7000p_write_word(state, 148, 0);
608 ret |= dib7000p_write_word(state, 149, 0);
609 ret |= dib7000p_write_word(state, 150, 0);
610 ret |= dib7000p_write_word(state, 151, 0);
611 ret |= dib7000p_write_word(state, 152, 0);
612
613 // P_timf_alpha=6, P_corm_alpha=6, P_corm_thres=128 default: 6,4,26
614 ret |= dib7000p_write_word(state, 26 ,0x6680);
615
616 // P_palf_filter_on=1, P_palf_filter_freeze=0, P_palf_alpha_regul=16
617 ret |= dib7000p_write_word(state, 142,0x0410);
618 // P_fft_freq_dir=1, P_fft_nb_to_cut=0
619 ret |= dib7000p_write_word(state, 154,1 << 13);
620 // P_pha3_thres, default 0x3000
621 ret |= dib7000p_write_word(state, 168,0x0ccd);
622 // P_cti_use_cpe=0, P_cti_use_prog=0, P_cti_win_len=16, default: 0x0010
623 //ret |= dib7000p_write_word(state, 169,0x0010);
624 // P_cspu_regul=512, P_cspu_win_cut=15, default: 0x2005
625 ret |= dib7000p_write_word(state, 183,0x200f);
626 // P_adp_regul_cnt=573, default: 410
627 ret |= dib7000p_write_word(state, 187,0x023d);
628 // P_adp_noise_cnt=
629 ret |= dib7000p_write_word(state, 188,0x00a4);
630 // P_adp_regul_ext
631 ret |= dib7000p_write_word(state, 189,0x00a4);
632 // P_adp_noise_ext
633 ret |= dib7000p_write_word(state, 190,0x7ff0);
634 // P_adp_fil
635 ret |= dib7000p_write_word(state, 191,0x3ccc);
636
637 ret |= dib7000p_write_word(state, 222,0x0010);
638 // P_smo_mode, P_smo_rs_discard, P_smo_fifo_flush, P_smo_pid_parse, P_smo_error_discard
639 ret |= dib7000p_write_word(state, 235,0x0062);
640
641 // P_iqc_alpha_pha, P_iqc_alpha_amp_dcc_alpha, ...
642 if(state->cfg.tuner_is_baseband)
643 ret |= dib7000p_write_word(state, 36,0x0755);
644 else
645 ret |= dib7000p_write_word(state, 36,0x1f55);
646
647 // auto search configuration
648 ret |= dib7000p_write_word(state, 2 ,0x0004);
649 ret |= dib7000p_write_word(state, 3 ,0x1000);
650
651 /* Equal Lock */
652 ret |= dib7000p_write_word(state, 4 ,0x0814);
653
654 ret |= dib7000p_write_word(state, 6 ,0x001b);
655 ret |= dib7000p_write_word(state, 7 ,0x7740);
656 ret |= dib7000p_write_word(state, 8 ,0x005b);
657 ret |= dib7000p_write_word(state, 9 ,0x8d80);
658 ret |= dib7000p_write_word(state, 10 ,0x01c9);
659 ret |= dib7000p_write_word(state, 11 ,0xc380);
660 ret |= dib7000p_write_word(state, 12 ,0x0000);
661 ret |= dib7000p_write_word(state, 13 ,0x0080);
662 ret |= dib7000p_write_word(state, 14 ,0x0000);
663 ret |= dib7000p_write_word(state, 15 ,0x0090);
664 ret |= dib7000p_write_word(state, 16 ,0x0001);
665 ret |= dib7000p_write_word(state, 17 ,0xd4c0);
666
667 // P_clk_cfg1
668 ret |= dib7000p_write_word(state, 901, 0x0006);
669
670 // P_divclksel=3 P_divbitsel=1
671 ret |= dib7000p_write_word(state, 902, (3 << 10) | (1 << 6));
672
673 // Tuner IO bank: max drive (14mA) + divout pads max drive
674 ret |= dib7000p_write_word(state, 905, 0x2c8e);
675
676 ret |= dib7000p_set_bandwidth(&state->demod, BANDWIDTH_8_MHZ);
677 dib7000p_sad_calib(state);
678
679 return ret;
680}
681
682static int dib7000p_sleep(struct dvb_frontend *demod)
683{
684 struct dib7000p_state *state = demod->demodulator_priv;
685 return dib7000p_set_output_mode(state, OUTMODE_HIGH_Z) | dib7000p_set_power_mode(state, DIB7000P_POWER_INTERFACE_ONLY);
686}
687
688static int dib7000p_identify(struct dib7000p_state *st)
689{
690 u16 value;
691 dprintk("-I- DiB7000PC: checking demod on I2C address: %d (%x)\n",
692 st->i2c_addr, st->i2c_addr);
693
694 if ((value = dib7000p_read_word(st, 768)) != 0x01b3) {
695 dprintk("-E- DiB7000PC: wrong Vendor ID (read=0x%x)\n",value);
696 return -EREMOTEIO;
697 }
698
699 if ((value = dib7000p_read_word(st, 769)) != 0x4000) {
700 dprintk("-E- DiB7000PC: wrong Device ID (%x)\n",value);
701 return -EREMOTEIO;
702 }
703
704 return 0;
705}
706
707
708static int dib7000p_get_frontend(struct dvb_frontend* fe,
709 struct dvb_frontend_parameters *fep)
710{
711 struct dib7000p_state *state = fe->demodulator_priv;
712 u16 tps = dib7000p_read_word(state,463);
713
714 fep->inversion = INVERSION_AUTO;
715
716 fep->u.ofdm.bandwidth = state->current_bandwidth;
717
718 switch ((tps >> 8) & 0x3) {
719 case 0: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K; break;
720 case 1: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K; break;
721 /* case 2: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_4K; break; */
722 }
723
724 switch (tps & 0x3) {
725 case 0: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_32; break;
726 case 1: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_16; break;
727 case 2: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_8; break;
728 case 3: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_4; break;
729 }
730
731 switch ((tps >> 14) & 0x3) {
732 case 0: fep->u.ofdm.constellation = QPSK; break;
733 case 1: fep->u.ofdm.constellation = QAM_16; break;
734 case 2:
735 default: fep->u.ofdm.constellation = QAM_64; break;
736 }
737
738 /* as long as the frontend_param structure is fixed for hierarchical transmission I refuse to use it */
739 /* (tps >> 13) & 0x1 == hrch is used, (tps >> 10) & 0x7 == alpha */
740
741 fep->u.ofdm.hierarchy_information = HIERARCHY_NONE;
742 switch ((tps >> 5) & 0x7) {
743 case 1: fep->u.ofdm.code_rate_HP = FEC_1_2; break;
744 case 2: fep->u.ofdm.code_rate_HP = FEC_2_3; break;
745 case 3: fep->u.ofdm.code_rate_HP = FEC_3_4; break;
746 case 5: fep->u.ofdm.code_rate_HP = FEC_5_6; break;
747 case 7:
748 default: fep->u.ofdm.code_rate_HP = FEC_7_8; break;
749
750 }
751
752 switch ((tps >> 2) & 0x7) {
753 case 1: fep->u.ofdm.code_rate_LP = FEC_1_2; break;
754 case 2: fep->u.ofdm.code_rate_LP = FEC_2_3; break;
755 case 3: fep->u.ofdm.code_rate_LP = FEC_3_4; break;
756 case 5: fep->u.ofdm.code_rate_LP = FEC_5_6; break;
757 case 7:
758 default: fep->u.ofdm.code_rate_LP = FEC_7_8; break;
759 }
760
761 /* native interleaver: (dib7000p_read_word(state, 464) >> 5) & 0x1 */
762
763 return 0;
764}
765
766static int dib7000p_set_frontend(struct dvb_frontend* fe,
767 struct dvb_frontend_parameters *fep)
768{
769 struct dib7000p_state *state = fe->demodulator_priv;
770 struct dibx000_ofdm_channel ch;
771
772 INIT_OFDM_CHANNEL(&ch);
773 FEP2DIB(fep,&ch);
774
775 state->current_bandwidth = fep->u.ofdm.bandwidth;
776 dib7000p_set_bandwidth(fe, fep->u.ofdm.bandwidth);
777
778 if (fe->ops.tuner_ops.set_params)
779 fe->ops.tuner_ops.set_params(fe, fep);
780
781 if (fep->u.ofdm.transmission_mode == TRANSMISSION_MODE_AUTO ||
782 fep->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO ||
783 fep->u.ofdm.constellation == QAM_AUTO ||
784 fep->u.ofdm.code_rate_HP == FEC_AUTO) {
785 int i = 800, found;
786
787 dib7000p_autosearch_start(fe, &ch);
788 do {
789 msleep(1);
790 found = dib7000p_autosearch_is_irq(fe);
791 } while (found == 0 && i--);
792
793 dprintk("autosearch returns: %d\n",found);
794 if (found == 0 || found == 1)
795 return 0; // no channel found
796
797 dib7000p_get_frontend(fe, fep);
798 FEP2DIB(fep, &ch);
799 }
800
801 /* make this a config parameter */
802 dib7000p_set_output_mode(state, OUTMODE_MPEG2_FIFO);
803
804 return dib7000p_tune(fe, &ch);
805}
806
807static int dib7000p_read_status(struct dvb_frontend *fe, fe_status_t *stat)
808{
809 struct dib7000p_state *state = fe->demodulator_priv;
810 u16 lock = dib7000p_read_word(state, 509);
811
812 *stat = 0;
813
814 if (lock & 0x8000)
815 *stat |= FE_HAS_SIGNAL;
816 if (lock & 0x3000)
817 *stat |= FE_HAS_CARRIER;
818 if (lock & 0x0100)
819 *stat |= FE_HAS_VITERBI;
820 if (lock & 0x0010)
821 *stat |= FE_HAS_SYNC;
822 if (lock & 0x0008)
823 *stat |= FE_HAS_LOCK;
824
825 return 0;
826}
827
828static int dib7000p_read_ber(struct dvb_frontend *fe, u32 *ber)
829{
830 struct dib7000p_state *state = fe->demodulator_priv;
831 *ber = (dib7000p_read_word(state, 500) << 16) | dib7000p_read_word(state, 501);
832 return 0;
833}
834
835static int dib7000p_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
836{
837 struct dib7000p_state *state = fe->demodulator_priv;
838 *unc = dib7000p_read_word(state, 506);
839 return 0;
840}
841
842static int dib7000p_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
843{
844 struct dib7000p_state *state = fe->demodulator_priv;
845 u16 val = dib7000p_read_word(state, 394);
846 *strength = 65535 - val;
847 return 0;
848}
849
850static int dib7000p_read_snr(struct dvb_frontend* fe, u16 *snr)
851{
852 *snr = 0x0000;
853 return 0;
854}
855
856static int dib7000p_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune)
857{
858 tune->min_delay_ms = 1000;
859 return 0;
860}
861
862static void dib7000p_release(struct dvb_frontend *demod)
863{
864 struct dib7000p_state *st = demod->demodulator_priv;
865 dibx000_exit_i2c_master(&st->i2c_master);
866 kfree(st);
867}
868
869int dib7000pc_detection(struct i2c_adapter *i2c_adap)
870{
871 u8 tx[2], rx[2];
872 struct i2c_msg msg[2] = {
873 { .addr = 18 >> 1, .flags = 0, .buf = tx, .len = 2 },
874 { .addr = 18 >> 1, .flags = I2C_M_RD, .buf = rx, .len = 2 },
875 };
876
877 tx[0] = 0x03;
878 tx[1] = 0x00;
879
880 if (i2c_transfer(i2c_adap, msg, 2) == 2)
881 if (rx[0] == 0x01 && rx[1] == 0xb3) {
882 dprintk("-D- DiB7000PC detected\n");
883 return 1;
884 }
885
886 msg[0].addr = msg[1].addr = 0x40;
887
888 if (i2c_transfer(i2c_adap, msg, 2) == 2)
889 if (rx[0] == 0x01 && rx[1] == 0xb3) {
890 dprintk("-D- DiB7000PC detected\n");
891 return 1;
892 }
893
894 dprintk("-D- DiB7000PC not detected\n");
895 return 0;
896}
897EXPORT_SYMBOL(dib7000pc_detection);
898
899struct i2c_adapter * dib7000p_get_i2c_master(struct dvb_frontend *demod, enum dibx000_i2c_interface intf, int gating)
900{
901 struct dib7000p_state *st = demod->demodulator_priv;
902 return dibx000_get_i2c_adapter(&st->i2c_master, intf, gating);
903}
904EXPORT_SYMBOL(dib7000p_get_i2c_master);
905
906int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, struct dib7000p_config cfg[])
907{
908 struct dib7000p_state st = { .i2c_adap = i2c };
909 int k = 0;
910 u8 new_addr = 0;
911
912 for (k = no_of_demods-1; k >= 0; k--) {
913 st.cfg = cfg[k];
914
915 /* designated i2c address */
916 new_addr = (0x40 + k) << 1;
917 st.i2c_addr = new_addr;
918 if (dib7000p_identify(&st) != 0) {
919 st.i2c_addr = default_addr;
920 if (dib7000p_identify(&st) != 0) {
921 dprintk("DiB7000P #%d: not identified\n", k);
922 return -EIO;
923 }
924 }
925
926 /* start diversity to pull_down div_str - just for i2c-enumeration */
927 dib7000p_set_output_mode(&st, OUTMODE_DIVERSITY);
928
929 /* set new i2c address and force divstart */
930 dib7000p_write_word(&st, 1285, (new_addr << 2) | 0x2);
931
932 dprintk("IC %d initialized (to i2c_address 0x%x)\n", k, new_addr);
933 }
934
935 for (k = 0; k < no_of_demods; k++) {
936 st.cfg = cfg[k];
937 st.i2c_addr = (0x40 + k) << 1;
938
939 // unforce divstr
940 dib7000p_write_word(&st, 1285, st.i2c_addr << 2);
941
942 /* deactivate div - it was just for i2c-enumeration */
943 dib7000p_set_output_mode(&st, OUTMODE_HIGH_Z);
944 }
945
946 return 0;
947}
948EXPORT_SYMBOL(dib7000p_i2c_enumeration);
949
950static struct dvb_frontend_ops dib7000p_ops;
951struct dvb_frontend * dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg)
952{
953 struct dvb_frontend *demod;
954 struct dib7000p_state *st;
955 st = kzalloc(sizeof(struct dib7000p_state), GFP_KERNEL);
956 if (st == NULL)
957 return NULL;
958
959 memcpy(&st->cfg, cfg, sizeof(struct dib7000p_config));
960 st->i2c_adap = i2c_adap;
961 st->i2c_addr = i2c_addr;
962 st->gpio_val = cfg->gpio_val;
963 st->gpio_dir = cfg->gpio_dir;
964
965 demod = &st->demod;
966 demod->demodulator_priv = st;
967 memcpy(&st->demod.ops, &dib7000p_ops, sizeof(struct dvb_frontend_ops));
968
969 if (dib7000p_identify(st) != 0)
970 goto error;
971
972 dibx000_init_i2c_master(&st->i2c_master, DIB7000P, st->i2c_adap, st->i2c_addr);
973
974 dib7000p_demod_reset(st);
975
976 return demod;
977
978error:
979 kfree(st);
980 return NULL;
981}
982EXPORT_SYMBOL(dib7000p_attach);
983
984static struct dvb_frontend_ops dib7000p_ops = {
985 .info = {
986 .name = "DiBcom 7000PC",
987 .type = FE_OFDM,
988 .frequency_min = 44250000,
989 .frequency_max = 867250000,
990 .frequency_stepsize = 62500,
991 .caps = FE_CAN_INVERSION_AUTO |
992 FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
993 FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
994 FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
995 FE_CAN_TRANSMISSION_MODE_AUTO |
996 FE_CAN_GUARD_INTERVAL_AUTO |
997 FE_CAN_RECOVER |
998 FE_CAN_HIERARCHY_AUTO,
999 },
1000
1001 .release = dib7000p_release,
1002
1003 .init = dib7000p_init,
1004 .sleep = dib7000p_sleep,
1005
1006 .set_frontend = dib7000p_set_frontend,
1007 .get_tune_settings = dib7000p_fe_get_tune_settings,
1008 .get_frontend = dib7000p_get_frontend,
1009
1010 .read_status = dib7000p_read_status,
1011 .read_ber = dib7000p_read_ber,
1012 .read_signal_strength = dib7000p_read_signal_strength,
1013 .read_snr = dib7000p_read_snr,
1014 .read_ucblocks = dib7000p_read_unc_blocks,
1015};
1016
1017MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
1018MODULE_DESCRIPTION("Driver for the DiBcom 7000PC COFDM demodulator");
1019MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/dib7000p.h b/drivers/media/dvb/frontends/dib7000p.h
new file mode 100644
index 000000000000..79465cf1aced
--- /dev/null
+++ b/drivers/media/dvb/frontends/dib7000p.h
@@ -0,0 +1,46 @@
1#ifndef DIB7000P_H
2#define DIB7000P_H
3
4#include "dibx000_common.h"
5
6struct dib7000p_config {
7 u8 output_mpeg2_in_188_bytes;
8 u8 hostbus_diversity;
9 u8 tuner_is_baseband;
10 int (*update_lna) (struct dvb_frontend *, u16 agc_global);
11
12 struct dibx000_agc_config *agc;
13 struct dibx000_bandwidth_config *bw;
14
15#define DIB7000P_GPIO_DEFAULT_DIRECTIONS 0xffff
16 u16 gpio_dir;
17#define DIB7000P_GPIO_DEFAULT_VALUES 0x0000
18 u16 gpio_val;
19#define DIB7000P_GPIO_PWM_POS0(v) ((v & 0xf) << 12)
20#define DIB7000P_GPIO_PWM_POS1(v) ((v & 0xf) << 8 )
21#define DIB7000P_GPIO_PWM_POS2(v) ((v & 0xf) << 4 )
22#define DIB7000P_GPIO_PWM_POS3(v) (v & 0xf)
23#define DIB7000P_GPIO_DEFAULT_PWM_POS 0xffff
24 u16 gpio_pwm_pos;
25
26 u16 pwm_freq_div;
27
28 u8 quartz_direct;
29
30 int (*agc_control) (struct dvb_frontend *, u8 before);
31};
32
33#define DEFAULT_DIB7000P_I2C_ADDRESS 18
34
35extern struct dvb_frontend * dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
36extern struct i2c_adapter * dib7000p_get_i2c_master(struct dvb_frontend *, enum dibx000_i2c_interface, int);
37extern int dib7000pc_detection(struct i2c_adapter *i2c_adap);
38
39/* TODO
40extern INT dib7000p_set_gpio(struct dibDemod *demod, UCHAR num, UCHAR dir, UCHAR val);
41extern INT dib7000p_enable_vbg_voltage(struct dibDemod *demod);
42extern void dib7000p_set_hostbus_diversity(struct dibDemod *demod, UCHAR onoff);
43extern USHORT dib7000p_get_current_agc_global(struct dibDemod *demod);
44*/
45
46#endif
diff --git a/drivers/media/dvb/frontends/dibx000_common.h b/drivers/media/dvb/frontends/dibx000_common.h
index bb0c65f8aee8..a1df604366c3 100644
--- a/drivers/media/dvb/frontends/dibx000_common.h
+++ b/drivers/media/dvb/frontends/dibx000_common.h
@@ -32,6 +32,13 @@ extern void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst);
32#define BAND_LBAND 0x01 32#define BAND_LBAND 0x01
33#define BAND_UHF 0x02 33#define BAND_UHF 0x02
34#define BAND_VHF 0x04 34#define BAND_VHF 0x04
35#define BAND_SBAND 0x08
36#define BAND_FM 0x10
37
38#define BAND_OF_FREQUENCY(freq_kHz) ( (freq_kHz) <= 115000 ? BAND_FM : \
39 (freq_kHz) <= 250000 ? BAND_VHF : \
40 (freq_kHz) <= 863000 ? BAND_UHF : \
41 (freq_kHz) <= 2000000 ? BAND_LBAND : BAND_SBAND )
35 42
36struct dibx000_agc_config { 43struct dibx000_agc_config {
37 /* defines the capabilities of this AGC-setting - using the BAND_-defines*/ 44 /* defines the capabilities of this AGC-setting - using the BAND_-defines*/
@@ -129,6 +136,7 @@ enum dibx000_adc_states {
129 136
130/* I hope I can get rid of the following kludge in the near future */ 137/* I hope I can get rid of the following kludge in the near future */
131struct dibx000_ofdm_channel { 138struct dibx000_ofdm_channel {
139 u32 RF_kHz;
132 u8 Bw; 140 u8 Bw;
133 s16 nfft; 141 s16 nfft;
134 s16 guard; 142 s16 guard;
@@ -138,9 +146,11 @@ struct dibx000_ofdm_channel {
138 s16 vit_alpha; 146 s16 vit_alpha;
139 s16 vit_code_rate_hp; 147 s16 vit_code_rate_hp;
140 s16 vit_code_rate_lp; 148 s16 vit_code_rate_lp;
149 u8 intlv_native;
141}; 150};
142 151
143#define FEP2DIB(fep,ch) \ 152#define FEP2DIB(fep,ch) \
153 (ch)->RF_kHz = (fep)->frequency / 1000; \
144 (ch)->Bw = (fep)->u.ofdm.bandwidth; \ 154 (ch)->Bw = (fep)->u.ofdm.bandwidth; \
145 (ch)->nfft = (fep)->u.ofdm.transmission_mode == TRANSMISSION_MODE_AUTO ? -1 : (fep)->u.ofdm.transmission_mode; \ 155 (ch)->nfft = (fep)->u.ofdm.transmission_mode == TRANSMISSION_MODE_AUTO ? -1 : (fep)->u.ofdm.transmission_mode; \
146 (ch)->guard = (fep)->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO ? -1 : (fep)->u.ofdm.guard_interval; \ 156 (ch)->guard = (fep)->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO ? -1 : (fep)->u.ofdm.guard_interval; \
@@ -149,7 +159,8 @@ struct dibx000_ofdm_channel {
149 (ch)->vit_select_hp = 1; \ 159 (ch)->vit_select_hp = 1; \
150 (ch)->vit_alpha = 1; \ 160 (ch)->vit_alpha = 1; \
151 (ch)->vit_code_rate_hp = (fep)->u.ofdm.code_rate_HP == FEC_AUTO ? -1 : (fep)->u.ofdm.code_rate_HP; \ 161 (ch)->vit_code_rate_hp = (fep)->u.ofdm.code_rate_HP == FEC_AUTO ? -1 : (fep)->u.ofdm.code_rate_HP; \
152 (ch)->vit_code_rate_lp = (fep)->u.ofdm.code_rate_LP == FEC_AUTO ? -1 : (fep)->u.ofdm.code_rate_LP; 162 (ch)->vit_code_rate_lp = (fep)->u.ofdm.code_rate_LP == FEC_AUTO ? -1 : (fep)->u.ofdm.code_rate_LP; \
163 (ch)->intlv_native = 1;
153 164
154#define INIT_OFDM_CHANNEL(ch) do {\ 165#define INIT_OFDM_CHANNEL(ch) do {\
155 (ch)->Bw = 0; \ 166 (ch)->Bw = 0; \
diff --git a/drivers/media/dvb/frontends/dvb-pll.c b/drivers/media/dvb/frontends/dvb-pll.c
index b7e7108ee5b3..62de760c844f 100644
--- a/drivers/media/dvb/frontends/dvb-pll.c
+++ b/drivers/media/dvb/frontends/dvb-pll.c
@@ -472,14 +472,14 @@ int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
472 printk("pll: %s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n", 472 printk("pll: %s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n",
473 desc->name, div, buf[0], buf[1], buf[2], buf[3]); 473 desc->name, div, buf[0], buf[1], buf[2], buf[3]);
474 474
475 return 0; 475 // calculate the frequency we set it to
476 return (div * desc->entries[i].stepsize) - desc->entries[i].offset;
476} 477}
477EXPORT_SYMBOL(dvb_pll_configure); 478EXPORT_SYMBOL(dvb_pll_configure);
478 479
479static int dvb_pll_release(struct dvb_frontend *fe) 480static int dvb_pll_release(struct dvb_frontend *fe)
480{ 481{
481 if (fe->tuner_priv) 482 kfree(fe->tuner_priv);
482 kfree(fe->tuner_priv);
483 fe->tuner_priv = NULL; 483 fe->tuner_priv = NULL;
484 return 0; 484 return 0;
485} 485}
@@ -489,7 +489,8 @@ static int dvb_pll_sleep(struct dvb_frontend *fe)
489 struct dvb_pll_priv *priv = fe->tuner_priv; 489 struct dvb_pll_priv *priv = fe->tuner_priv;
490 u8 buf[4]; 490 u8 buf[4];
491 struct i2c_msg msg = 491 struct i2c_msg msg =
492 { .addr = priv->pll_i2c_address, .flags = 0, .buf = buf, .len = sizeof(buf) }; 492 { .addr = priv->pll_i2c_address, .flags = 0,
493 .buf = buf, .len = sizeof(buf) };
493 int i; 494 int i;
494 int result; 495 int result;
495 496
@@ -517,16 +518,16 @@ static int dvb_pll_sleep(struct dvb_frontend *fe)
517 return 0; 518 return 0;
518} 519}
519 520
520static int dvb_pll_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) 521static int dvb_pll_set_params(struct dvb_frontend *fe,
522 struct dvb_frontend_parameters *params)
521{ 523{
522 struct dvb_pll_priv *priv = fe->tuner_priv; 524 struct dvb_pll_priv *priv = fe->tuner_priv;
523 u8 buf[4]; 525 u8 buf[4];
524 struct i2c_msg msg = 526 struct i2c_msg msg =
525 { .addr = priv->pll_i2c_address, .flags = 0, .buf = buf, .len = sizeof(buf) }; 527 { .addr = priv->pll_i2c_address, .flags = 0,
528 .buf = buf, .len = sizeof(buf) };
526 int result; 529 int result;
527 u32 div; 530 u32 bandwidth = 0, frequency = 0;
528 int i;
529 u32 bandwidth = 0;
530 531
531 if (priv->i2c == NULL) 532 if (priv->i2c == NULL)
532 return -EINVAL; 533 return -EINVAL;
@@ -536,8 +537,11 @@ static int dvb_pll_set_params(struct dvb_frontend *fe, struct dvb_frontend_param
536 bandwidth = params->u.ofdm.bandwidth; 537 bandwidth = params->u.ofdm.bandwidth;
537 } 538 }
538 539
539 if ((result = dvb_pll_configure(priv->pll_desc, buf, params->frequency, bandwidth)) != 0) 540 if ((result = dvb_pll_configure(priv->pll_desc, buf,
541 params->frequency, bandwidth)) < 0)
540 return result; 542 return result;
543 else
544 frequency = result;
541 545
542 if (fe->ops.i2c_gate_ctrl) 546 if (fe->ops.i2c_gate_ctrl)
543 fe->ops.i2c_gate_ctrl(fe, 1); 547 fe->ops.i2c_gate_ctrl(fe, 1);
@@ -545,26 +549,19 @@ static int dvb_pll_set_params(struct dvb_frontend *fe, struct dvb_frontend_param
545 return result; 549 return result;
546 } 550 }
547 551
548 // calculate the frequency we set it to 552 priv->frequency = frequency;
549 for (i = 0; i < priv->pll_desc->count; i++) {
550 if (params->frequency > priv->pll_desc->entries[i].limit)
551 continue;
552 break;
553 }
554 div = (params->frequency + priv->pll_desc->entries[i].offset) / priv->pll_desc->entries[i].stepsize;
555 priv->frequency = (div * priv->pll_desc->entries[i].stepsize) - priv->pll_desc->entries[i].offset;
556 priv->bandwidth = bandwidth; 553 priv->bandwidth = bandwidth;
557 554
558 return 0; 555 return 0;
559} 556}
560 557
561static int dvb_pll_calc_regs(struct dvb_frontend *fe, struct dvb_frontend_parameters *params, u8 *buf, int buf_len) 558static int dvb_pll_calc_regs(struct dvb_frontend *fe,
559 struct dvb_frontend_parameters *params,
560 u8 *buf, int buf_len)
562{ 561{
563 struct dvb_pll_priv *priv = fe->tuner_priv; 562 struct dvb_pll_priv *priv = fe->tuner_priv;
564 int result; 563 int result;
565 u32 div; 564 u32 bandwidth = 0, frequency = 0;
566 int i;
567 u32 bandwidth = 0;
568 565
569 if (buf_len < 5) 566 if (buf_len < 5)
570 return -EINVAL; 567 return -EINVAL;
@@ -574,18 +571,15 @@ static int dvb_pll_calc_regs(struct dvb_frontend *fe, struct dvb_frontend_parame
574 bandwidth = params->u.ofdm.bandwidth; 571 bandwidth = params->u.ofdm.bandwidth;
575 } 572 }
576 573
577 if ((result = dvb_pll_configure(priv->pll_desc, buf+1, params->frequency, bandwidth)) != 0) 574 if ((result = dvb_pll_configure(priv->pll_desc, buf+1,
575 params->frequency, bandwidth)) < 0)
578 return result; 576 return result;
577 else
578 frequency = result;
579
579 buf[0] = priv->pll_i2c_address; 580 buf[0] = priv->pll_i2c_address;
580 581
581 // calculate the frequency we set it to 582 priv->frequency = frequency;
582 for (i = 0; i < priv->pll_desc->count; i++) {
583 if (params->frequency > priv->pll_desc->entries[i].limit)
584 continue;
585 break;
586 }
587 div = (params->frequency + priv->pll_desc->entries[i].offset) / priv->pll_desc->entries[i].stepsize;
588 priv->frequency = (div * priv->pll_desc->entries[i].stepsize) - priv->pll_desc->entries[i].offset;
589 priv->bandwidth = bandwidth; 583 priv->bandwidth = bandwidth;
590 584
591 return 5; 585 return 5;
@@ -614,10 +608,13 @@ static struct dvb_tuner_ops dvb_pll_tuner_ops = {
614 .get_bandwidth = dvb_pll_get_bandwidth, 608 .get_bandwidth = dvb_pll_get_bandwidth,
615}; 609};
616 610
617struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, struct i2c_adapter *i2c, struct dvb_pll_desc *desc) 611struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
612 struct i2c_adapter *i2c,
613 struct dvb_pll_desc *desc)
618{ 614{
619 u8 b1 [] = { 0 }; 615 u8 b1 [] = { 0 };
620 struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .buf = b1, .len = 1 }; 616 struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD,
617 .buf = b1, .len = 1 };
621 struct dvb_pll_priv *priv = NULL; 618 struct dvb_pll_priv *priv = NULL;
622 int ret; 619 int ret;
623 620
@@ -640,7 +637,9 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, struc
640 priv->i2c = i2c; 637 priv->i2c = i2c;
641 priv->pll_desc = desc; 638 priv->pll_desc = desc;
642 639
643 memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops, sizeof(struct dvb_tuner_ops)); 640 memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops,
641 sizeof(struct dvb_tuner_ops));
642
644 strncpy(fe->ops.tuner_ops.info.name, desc->name, 128); 643 strncpy(fe->ops.tuner_ops.info.name, desc->name, 128);
645 fe->ops.tuner_ops.info.frequency_min = desc->min; 644 fe->ops.tuner_ops.info.frequency_min = desc->min;
646 fe->ops.tuner_ops.info.frequency_min = desc->max; 645 fe->ops.tuner_ops.info.frequency_min = desc->max;
diff --git a/drivers/media/dvb/frontends/dvb-pll.h b/drivers/media/dvb/frontends/dvb-pll.h
index ed5ac5a361ae..681186a5e5eb 100644
--- a/drivers/media/dvb/frontends/dvb-pll.h
+++ b/drivers/media/dvb/frontends/dvb-pll.h
@@ -48,7 +48,7 @@ extern struct dvb_pll_desc dvb_pll_philips_td1316;
48extern struct dvb_pll_desc dvb_pll_thomson_fe6600; 48extern struct dvb_pll_desc dvb_pll_thomson_fe6600;
49 49
50extern int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf, 50extern int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
51 u32 freq, int bandwidth); 51 u32 freq, int bandwidth);
52 52
53/** 53/**
54 * Attach a dvb-pll to the supplied frontend structure. 54 * Attach a dvb-pll to the supplied frontend structure.
@@ -59,6 +59,9 @@ extern int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
59 * @param desc dvb_pll_desc to use. 59 * @param desc dvb_pll_desc to use.
60 * @return Frontend pointer on success, NULL on failure 60 * @return Frontend pointer on success, NULL on failure
61 */ 61 */
62extern struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, struct i2c_adapter *i2c, struct dvb_pll_desc *desc); 62extern struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe,
63 int pll_addr,
64 struct i2c_adapter *i2c,
65 struct dvb_pll_desc *desc);
63 66
64#endif 67#endif
diff --git a/drivers/media/dvb/frontends/lg_h06xf.h b/drivers/media/dvb/frontends/lg_h06xf.h
deleted file mode 100644
index 754d51d11120..000000000000
--- a/drivers/media/dvb/frontends/lg_h06xf.h
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * lg_h06xf.h - ATSC Tuner support for LG TDVS-H06xF
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#ifndef _LG_H06XF_H_
20#define _LG_H06XF_H_
21#include "dvb-pll.h"
22
23static int lg_h06xf_pll_set(struct dvb_frontend* fe, struct i2c_adapter* i2c_adap,
24 struct dvb_frontend_parameters* params)
25{
26 u8 buf[4];
27 struct i2c_msg msg = { .addr = 0x61, .flags = 0,
28 .buf = buf, .len = sizeof(buf) };
29 int err;
30
31 dvb_pll_configure(&dvb_pll_lg_tdvs_h06xf, buf, params->frequency, 0);
32 if (fe->ops.i2c_gate_ctrl)
33 fe->ops.i2c_gate_ctrl(fe, 1);
34 if ((err = i2c_transfer(i2c_adap, &msg, 1)) != 1) {
35 printk(KERN_WARNING "lg_h06xf: %s error "
36 "(addr %02x <- %02x, err = %i)\n",
37 __FUNCTION__, buf[0], buf[1], err);
38 if (err < 0)
39 return err;
40 else
41 return -EREMOTEIO;
42 }
43
44 /* Set the Auxiliary Byte. */
45 buf[0] = buf[2];
46 buf[0] &= ~0x20;
47 buf[0] |= 0x18;
48 buf[1] = 0x50;
49 msg.len = 2;
50 if (fe->ops.i2c_gate_ctrl)
51 fe->ops.i2c_gate_ctrl(fe, 1);
52 if ((err = i2c_transfer(i2c_adap, &msg, 1)) != 1) {
53 printk(KERN_WARNING "lg_h06xf: %s error "
54 "(addr %02x <- %02x, err = %i)\n",
55 __FUNCTION__, buf[0], buf[1], err);
56 if (err < 0)
57 return err;
58 else
59 return -EREMOTEIO;
60 }
61
62 return 0;
63}
64#endif
diff --git a/drivers/media/dvb/frontends/lgdt330x.c b/drivers/media/dvb/frontends/lgdt330x.c
index 9a354708bd20..68aad0f6519f 100644
--- a/drivers/media/dvb/frontends/lgdt330x.c
+++ b/drivers/media/dvb/frontends/lgdt330x.c
@@ -31,9 +31,6 @@
31 * Air2PC/AirStar 2 ATSC 3rd generation (HD5000) 31 * Air2PC/AirStar 2 ATSC 3rd generation (HD5000)
32 * pcHDTV HD5500 32 * pcHDTV HD5500
33 * 33 *
34 * TODO:
35 * signal strength always returns 0.
36 *
37 */ 34 */
38 35
39#include <linux/kernel.h> 36#include <linux/kernel.h>
@@ -46,9 +43,13 @@
46#include <asm/byteorder.h> 43#include <asm/byteorder.h>
47 44
48#include "dvb_frontend.h" 45#include "dvb_frontend.h"
46#include "dvb_math.h"
49#include "lgdt330x_priv.h" 47#include "lgdt330x_priv.h"
50#include "lgdt330x.h" 48#include "lgdt330x.h"
51 49
50/* Use Equalizer Mean Squared Error instead of Phaser Tracker MSE */
51/* #define USE_EQMSE */
52
52static int debug = 0; 53static int debug = 0;
53module_param(debug, int, 0644); 54module_param(debug, int, 0644);
54MODULE_PARM_DESC(debug,"Turn on/off lgdt330x frontend debugging (default:off)."); 55MODULE_PARM_DESC(debug,"Turn on/off lgdt330x frontend debugging (default:off).");
@@ -68,6 +69,7 @@ struct lgdt330x_state
68 69
69 /* Demodulator private data */ 70 /* Demodulator private data */
70 fe_modulation_t current_modulation; 71 fe_modulation_t current_modulation;
72 u32 snr; /* Result of last SNR calculation */
71 73
72 /* Tuner private data */ 74 /* Tuner private data */
73 u32 current_frequency; 75 u32 current_frequency;
@@ -302,10 +304,10 @@ static int lgdt330x_set_parameters(struct dvb_frontend* fe,
302 static u8 lgdt3303_8vsb_44_data[] = { 304 static u8 lgdt3303_8vsb_44_data[] = {
303 0x04, 0x00, 305 0x04, 0x00,
304 0x0d, 0x40, 306 0x0d, 0x40,
305 0x0e, 0x87, 307 0x0e, 0x87,
306 0x0f, 0x8e, 308 0x0f, 0x8e,
307 0x10, 0x01, 309 0x10, 0x01,
308 0x47, 0x8b }; 310 0x47, 0x8b };
309 311
310 /* 312 /*
311 * Array of byte pairs <address, value> 313 * Array of byte pairs <address, value>
@@ -435,9 +437,6 @@ static int lgdt3302_read_status(struct dvb_frontend* fe, fe_status_t* status)
435 /* Test signal does not exist flag */ 437 /* Test signal does not exist flag */
436 /* as well as the AGC lock flag. */ 438 /* as well as the AGC lock flag. */
437 *status |= FE_HAS_SIGNAL; 439 *status |= FE_HAS_SIGNAL;
438 } else {
439 /* Without a signal all other status bits are meaningless */
440 return 0;
441 } 440 }
442 441
443 /* 442 /*
@@ -500,9 +499,6 @@ static int lgdt3303_read_status(struct dvb_frontend* fe, fe_status_t* status)
500 /* Test input signal does not exist flag */ 499 /* Test input signal does not exist flag */
501 /* as well as the AGC lock flag. */ 500 /* as well as the AGC lock flag. */
502 *status |= FE_HAS_SIGNAL; 501 *status |= FE_HAS_SIGNAL;
503 } else {
504 /* Without a signal all other status bits are meaningless */
505 return 0;
506 } 502 }
507 503
508 /* Carrier Recovery Lock Status Register */ 504 /* Carrier Recovery Lock Status Register */
@@ -543,151 +539,150 @@ static int lgdt3303_read_status(struct dvb_frontend* fe, fe_status_t* status)
543 return 0; 539 return 0;
544} 540}
545 541
546static int lgdt330x_read_signal_strength(struct dvb_frontend* fe, u16* strength) 542/* Calculate SNR estimation (scaled by 2^24)
543
544 8-VSB SNR equations from LGDT3302 and LGDT3303 datasheets, QAM
545 equations from LGDT3303 datasheet. VSB is the same between the '02
546 and '03, so maybe QAM is too? Perhaps someone with a newer datasheet
547 that has QAM information could verify?
548
549 For 8-VSB: (two ways, take your pick)
550 LGDT3302:
551 SNR_EQ = 10 * log10(25 * 24^2 / EQ_MSE)
552 LGDT3303:
553 SNR_EQ = 10 * log10(25 * 32^2 / EQ_MSE)
554 LGDT3302 & LGDT3303:
555 SNR_PT = 10 * log10(25 * 32^2 / PT_MSE) (we use this one)
556 For 64-QAM:
557 SNR = 10 * log10( 688128 / MSEQAM)
558 For 256-QAM:
559 SNR = 10 * log10( 696320 / MSEQAM)
560
561 We re-write the snr equation as:
562 SNR * 2^24 = 10*(c - intlog10(MSE))
563 Where for 256-QAM, c = log10(696320) * 2^24, and so on. */
564
565static u32 calculate_snr(u32 mse, u32 c)
547{ 566{
548 /* not directly available. */ 567 if (mse == 0) /* No signal */
549 *strength = 0; 568 return 0;
550 return 0; 569
570 mse = intlog10(mse);
571 if (mse > c) {
572 /* Negative SNR, which is possible, but realisticly the
573 demod will lose lock before the signal gets this bad. The
574 API only allows for unsigned values, so just return 0 */
575 return 0;
576 }
577 return 10*(c - mse);
551} 578}
552 579
553static int lgdt3302_read_snr(struct dvb_frontend* fe, u16* snr) 580static int lgdt3302_read_snr(struct dvb_frontend* fe, u16* snr)
554{ 581{
555#ifdef SNR_IN_DB
556 /*
557 * Spec sheet shows formula for SNR_EQ = 10 log10(25 * 24**2 / noise)
558 * and SNR_PH = 10 log10(25 * 32**2 / noise) for equalizer and phase tracker
559 * respectively. The following tables are built on these formulas.
560 * The usual definition is SNR = 20 log10(signal/noise)
561 * If the specification is wrong the value retuned is 1/2 the actual SNR in db.
562 *
563 * This table is a an ordered list of noise values computed by the
564 * formula from the spec sheet such that the index into the table
565 * starting at 43 or 45 is the SNR value in db. There are duplicate noise
566 * value entries at the beginning because the SNR varies more than
567 * 1 db for a change of 1 digit in noise at very small values of noise.
568 *
569 * Examples from SNR_EQ table:
570 * noise SNR
571 * 0 43
572 * 1 42
573 * 2 39
574 * 3 37
575 * 4 36
576 * 5 35
577 * 6 34
578 * 7 33
579 * 8 33
580 * 9 32
581 * 10 32
582 * 11 31
583 * 12 31
584 * 13 30
585 */
586
587 static const u32 SNR_EQ[] =
588 { 1, 2, 2, 2, 3, 3, 4, 4, 5, 7,
589 9, 11, 13, 17, 21, 26, 33, 41, 52, 65,
590 81, 102, 129, 162, 204, 257, 323, 406, 511, 644,
591 810, 1020, 1284, 1616, 2035, 2561, 3224, 4059, 5110, 6433,
592 8098, 10195, 12835, 16158, 20341, 25608, 32238, 40585, 51094, 64323,
593 80978, 101945, 128341, 161571, 203406, 256073, 0x40000
594 };
595
596 static const u32 SNR_PH[] =
597 { 1, 2, 2, 2, 3, 3, 4, 5, 6, 8,
598 10, 12, 15, 19, 23, 29, 37, 46, 58, 73,
599 91, 115, 144, 182, 229, 288, 362, 456, 574, 722,
600 909, 1144, 1440, 1813, 2282, 2873, 3617, 4553, 5732, 7216,
601 9084, 11436, 14396, 18124, 22817, 28724, 36161, 45524, 57312, 72151,
602 90833, 114351, 143960, 181235, 228161, 0x080000
603 };
604
605 static u8 buf[5];/* read data buffer */
606 static u32 noise; /* noise value */
607 static u32 snr_db; /* index into SNR_EQ[] */
608 struct lgdt330x_state* state = (struct lgdt330x_state*) fe->demodulator_priv; 582 struct lgdt330x_state* state = (struct lgdt330x_state*) fe->demodulator_priv;
583 u8 buf[5]; /* read data buffer */
584 u32 noise; /* noise value */
585 u32 c; /* per-modulation SNR calculation constant */
609 586
610 /* read both equalizer and phase tracker noise data */ 587 switch(state->current_modulation) {
611 i2c_read_demod_bytes(state, EQPH_ERR0, buf, sizeof(buf)); 588 case VSB_8:
612 589 i2c_read_demod_bytes(state, LGDT3302_EQPH_ERR0, buf, 5);
613 if (state->current_modulation == VSB_8) { 590#ifdef USE_EQMSE
614 /* Equalizer Mean-Square Error Register for VSB */ 591 /* Use Equalizer Mean-Square Error Register */
592 /* SNR for ranges from -15.61 to +41.58 */
615 noise = ((buf[0] & 7) << 16) | (buf[1] << 8) | buf[2]; 593 noise = ((buf[0] & 7) << 16) | (buf[1] << 8) | buf[2];
616 594 c = 69765745; /* log10(25*24^2)*2^24 */
617 /*
618 * Look up noise value in table.
619 * A better search algorithm could be used...
620 * watch out there are duplicate entries.
621 */
622 for (snr_db = 0; snr_db < sizeof(SNR_EQ); snr_db++) {
623 if (noise < SNR_EQ[snr_db]) {
624 *snr = 43 - snr_db;
625 break;
626 }
627 }
628 } else {
629 /* Phase Tracker Mean-Square Error Register for QAM */
630 noise = ((buf[0] & 7<<3) << 13) | (buf[3] << 8) | buf[4];
631
632 /* Look up noise value in table. */
633 for (snr_db = 0; snr_db < sizeof(SNR_PH); snr_db++) {
634 if (noise < SNR_PH[snr_db]) {
635 *snr = 45 - snr_db;
636 break;
637 }
638 }
639 }
640#else 595#else
641 /* Return the raw noise value */ 596 /* Use Phase Tracker Mean-Square Error Register */
642 static u8 buf[5];/* read data buffer */ 597 /* SNR for ranges from -13.11 to +44.08 */
643 static u32 noise; /* noise value */
644 struct lgdt330x_state* state = (struct lgdt330x_state*) fe->demodulator_priv;
645
646 /* read both equalizer and pase tracker noise data */
647 i2c_read_demod_bytes(state, EQPH_ERR0, buf, sizeof(buf));
648
649 if (state->current_modulation == VSB_8) {
650 /* Phase Tracker Mean-Square Error Register for VSB */
651 noise = ((buf[0] & 7<<3) << 13) | (buf[3] << 8) | buf[4]; 598 noise = ((buf[0] & 7<<3) << 13) | (buf[3] << 8) | buf[4];
652 } else { 599 c = 73957994; /* log10(25*32^2)*2^24 */
653 600#endif
654 /* Carrier Recovery Mean-Square Error for QAM */ 601 break;
655 i2c_read_demod_bytes(state, 0x1a, buf, 2); 602 case QAM_64:
603 case QAM_256:
604 i2c_read_demod_bytes(state, CARRIER_MSEQAM1, buf, 2);
656 noise = ((buf[0] & 3) << 8) | buf[1]; 605 noise = ((buf[0] & 3) << 8) | buf[1];
606 c = state->current_modulation == QAM_64 ? 97939837 : 98026066;
607 /* log10(688128)*2^24 and log10(696320)*2^24 */
608 break;
609 default:
610 printk(KERN_ERR "lgdt330x: %s: Modulation set to unsupported value\n",
611 __FUNCTION__);
612 return -EREMOTEIO; /* return -EDRIVER_IS_GIBBERED; */
657 } 613 }
658 614
659 /* Small values for noise mean signal is better so invert noise */ 615 state->snr = calculate_snr(noise, c);
660 *snr = ~noise; 616 *snr = (state->snr) >> 16; /* Convert from 8.24 fixed-point to 8.8 */
661#endif
662 617
663 dprintk("%s: noise = 0x%05x, snr = %idb\n",__FUNCTION__, noise, *snr); 618 dprintk("%s: noise = 0x%08x, snr = %d.%02d dB\n", __FUNCTION__, noise,
619 state->snr >> 24, (((state->snr>>8) & 0xffff) * 100) >> 16);
664 620
665 return 0; 621 return 0;
666} 622}
667 623
668static int lgdt3303_read_snr(struct dvb_frontend* fe, u16* snr) 624static int lgdt3303_read_snr(struct dvb_frontend* fe, u16* snr)
669{ 625{
670 /* Return the raw noise value */
671 static u8 buf[5];/* read data buffer */
672 static u32 noise; /* noise value */
673 struct lgdt330x_state* state = (struct lgdt330x_state*) fe->demodulator_priv; 626 struct lgdt330x_state* state = (struct lgdt330x_state*) fe->demodulator_priv;
627 u8 buf[5]; /* read data buffer */
628 u32 noise; /* noise value */
629 u32 c; /* per-modulation SNR calculation constant */
674 630
675 if (state->current_modulation == VSB_8) { 631 switch(state->current_modulation) {
676 632 case VSB_8:
677 i2c_read_demod_bytes(state, 0x6e, buf, 5); 633 i2c_read_demod_bytes(state, LGDT3303_EQPH_ERR0, buf, 5);
678 /* Phase Tracker Mean-Square Error Register for VSB */ 634#ifdef USE_EQMSE
635 /* Use Equalizer Mean-Square Error Register */
636 /* SNR for ranges from -16.12 to +44.08 */
637 noise = ((buf[0] & 0x78) << 13) | (buf[1] << 8) | buf[2];
638 c = 73957994; /* log10(25*32^2)*2^24 */
639#else
640 /* Use Phase Tracker Mean-Square Error Register */
641 /* SNR for ranges from -13.11 to +44.08 */
679 noise = ((buf[0] & 7) << 16) | (buf[3] << 8) | buf[4]; 642 noise = ((buf[0] & 7) << 16) | (buf[3] << 8) | buf[4];
680 } else { 643 c = 73957994; /* log10(25*32^2)*2^24 */
681 644#endif
682 /* Carrier Recovery Mean-Square Error for QAM */ 645 break;
683 i2c_read_demod_bytes(state, 0x1a, buf, 2); 646 case QAM_64:
647 case QAM_256:
648 i2c_read_demod_bytes(state, CARRIER_MSEQAM1, buf, 2);
684 noise = (buf[0] << 8) | buf[1]; 649 noise = (buf[0] << 8) | buf[1];
650 c = state->current_modulation == QAM_64 ? 97939837 : 98026066;
651 /* log10(688128)*2^24 and log10(696320)*2^24 */
652 break;
653 default:
654 printk(KERN_ERR "lgdt330x: %s: Modulation set to unsupported value\n",
655 __FUNCTION__);
656 return -EREMOTEIO; /* return -EDRIVER_IS_GIBBERED; */
685 } 657 }
686 658
687 /* Small values for noise mean signal is better so invert noise */ 659 state->snr = calculate_snr(noise, c);
688 *snr = ~noise; 660 *snr = (state->snr) >> 16; /* Convert from 8.24 fixed-point to 8.8 */
661
662 dprintk("%s: noise = 0x%08x, snr = %d.%02d dB\n", __FUNCTION__, noise,
663 state->snr >> 24, (((state->snr >> 8) & 0xffff) * 100) >> 16);
664
665 return 0;
666}
667
668static int lgdt330x_read_signal_strength(struct dvb_frontend* fe, u16* strength)
669{
670 /* Calculate Strength from SNR up to 35dB */
671 /* Even though the SNR can go higher than 35dB, there is some comfort */
672 /* factor in having a range of strong signals that can show at 100% */
673 struct lgdt330x_state* state = (struct lgdt330x_state*) fe->demodulator_priv;
674 u16 snr;
675 int ret;
689 676
690 dprintk("%s: noise = 0x%05x, snr = %idb\n",__FUNCTION__, noise, *snr); 677 ret = fe->ops.read_snr(fe, &snr);
678 if (ret != 0)
679 return ret;
680 /* Rather than use the 8.8 value snr, use state->snr which is 8.24 */
681 /* scale the range 0 - 35*2^24 into 0 - 65535 */
682 if (state->snr >= 8960 * 0x10000)
683 *strength = 0xffff;
684 else
685 *strength = state->snr / 8960;
691 686
692 return 0; 687 return 0;
693} 688}
diff --git a/drivers/media/dvb/frontends/lgdt330x_priv.h b/drivers/media/dvb/frontends/lgdt330x_priv.h
index 59b7c5b9012d..38c76695abfe 100644
--- a/drivers/media/dvb/frontends/lgdt330x_priv.h
+++ b/drivers/media/dvb/frontends/lgdt330x_priv.h
@@ -51,14 +51,19 @@ enum I2C_REG {
51 AGC_RFIF_ACC2= 0x3b, 51 AGC_RFIF_ACC2= 0x3b,
52 AGC_STATUS= 0x3f, 52 AGC_STATUS= 0x3f,
53 SYNC_STATUS_VSB= 0x43, 53 SYNC_STATUS_VSB= 0x43,
54 EQPH_ERR0= 0x47,
55 EQ_ERR1= 0x48,
56 EQ_ERR2= 0x49,
57 PH_ERR1= 0x4a,
58 PH_ERR2= 0x4b,
59 DEMUX_CONTROL= 0x66, 54 DEMUX_CONTROL= 0x66,
55 LGDT3302_EQPH_ERR0= 0x47,
56 LGDT3302_EQ_ERR1= 0x48,
57 LGDT3302_EQ_ERR2= 0x49,
58 LGDT3302_PH_ERR1= 0x4a,
59 LGDT3302_PH_ERR2= 0x4b,
60 LGDT3302_PACKET_ERR_COUNTER1= 0x6a, 60 LGDT3302_PACKET_ERR_COUNTER1= 0x6a,
61 LGDT3302_PACKET_ERR_COUNTER2= 0x6b, 61 LGDT3302_PACKET_ERR_COUNTER2= 0x6b,
62 LGDT3303_EQPH_ERR0= 0x6e,
63 LGDT3303_EQ_ERR1= 0x6f,
64 LGDT3303_EQ_ERR2= 0x70,
65 LGDT3303_PH_ERR1= 0x71,
66 LGDT3303_PH_ERR2= 0x72,
62 LGDT3303_PACKET_ERR_COUNTER1= 0x8b, 67 LGDT3303_PACKET_ERR_COUNTER1= 0x8b,
63 LGDT3303_PACKET_ERR_COUNTER2= 0x8c, 68 LGDT3303_PACKET_ERR_COUNTER2= 0x8c,
64}; 69};
diff --git a/drivers/media/dvb/frontends/lgh06xf.c b/drivers/media/dvb/frontends/lgh06xf.c
new file mode 100644
index 000000000000..2202d0cc878b
--- /dev/null
+++ b/drivers/media/dvb/frontends/lgh06xf.c
@@ -0,0 +1,134 @@
1/*
2 * lgh06xf.c - ATSC Tuner support for LG TDVS-H06xF
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#include "dvb-pll.h"
20#include "lgh06xf.h"
21
22#define LG_H06XF_PLL_I2C_ADDR 0x61
23
24struct lgh06xf_priv {
25 struct i2c_adapter *i2c;
26 u32 frequency;
27};
28
29static int lgh06xf_release(struct dvb_frontend *fe)
30{
31 kfree(fe->tuner_priv);
32 fe->tuner_priv = NULL;
33 return 0;
34}
35
36static int lgh06xf_set_params(struct dvb_frontend* fe,
37 struct dvb_frontend_parameters* params)
38{
39 struct lgh06xf_priv *priv = fe->tuner_priv;
40 u8 buf[4];
41 struct i2c_msg msg = { .addr = LG_H06XF_PLL_I2C_ADDR, .flags = 0,
42 .buf = buf, .len = sizeof(buf) };
43 u32 frequency;
44 int result;
45
46 if ((result = dvb_pll_configure(&dvb_pll_lg_tdvs_h06xf, buf,
47 params->frequency, 0)) < 0)
48 return result;
49 else
50 frequency = result;
51
52 if (fe->ops.i2c_gate_ctrl)
53 fe->ops.i2c_gate_ctrl(fe, 1);
54 if ((result = i2c_transfer(priv->i2c, &msg, 1)) != 1) {
55 printk(KERN_WARNING "lgh06xf: %s error "
56 "(addr %02x <- %02x, result = %i)\n",
57 __FUNCTION__, buf[0], buf[1], result);
58 if (result < 0)
59 return result;
60 else
61 return -EREMOTEIO;
62 }
63
64 /* Set the Auxiliary Byte. */
65 buf[0] = buf[2];
66 buf[0] &= ~0x20;
67 buf[0] |= 0x18;
68 buf[1] = 0x50;
69 msg.len = 2;
70 if (fe->ops.i2c_gate_ctrl)
71 fe->ops.i2c_gate_ctrl(fe, 1);
72 if ((result = i2c_transfer(priv->i2c, &msg, 1)) != 1) {
73 printk(KERN_WARNING "lgh06xf: %s error "
74 "(addr %02x <- %02x, result = %i)\n",
75 __FUNCTION__, buf[0], buf[1], result);
76 if (result < 0)
77 return result;
78 else
79 return -EREMOTEIO;
80 }
81
82 priv->frequency = frequency;
83
84 return 0;
85}
86
87static int lgh06xf_get_frequency(struct dvb_frontend *fe, u32 *frequency)
88{
89 struct lgh06xf_priv *priv = fe->tuner_priv;
90 *frequency = priv->frequency;
91 return 0;
92}
93
94static struct dvb_tuner_ops lgh06xf_tuner_ops = {
95 .release = lgh06xf_release,
96 .set_params = lgh06xf_set_params,
97 .get_frequency = lgh06xf_get_frequency,
98};
99
100struct dvb_frontend* lgh06xf_attach(struct dvb_frontend *fe,
101 struct i2c_adapter *i2c)
102{
103 struct lgh06xf_priv *priv = NULL;
104
105 priv = kzalloc(sizeof(struct lgh06xf_priv), GFP_KERNEL);
106 if (priv == NULL)
107 return NULL;
108
109 priv->i2c = i2c;
110
111 memcpy(&fe->ops.tuner_ops, &lgh06xf_tuner_ops,
112 sizeof(struct dvb_tuner_ops));
113
114 strlcpy(fe->ops.tuner_ops.info.name, dvb_pll_lg_tdvs_h06xf.name,
115 sizeof(fe->ops.tuner_ops.info.name));
116
117 fe->ops.tuner_ops.info.frequency_min = dvb_pll_lg_tdvs_h06xf.min;
118 fe->ops.tuner_ops.info.frequency_max = dvb_pll_lg_tdvs_h06xf.max;
119
120 fe->tuner_priv = priv;
121 return fe;
122}
123
124EXPORT_SYMBOL(lgh06xf_attach);
125
126MODULE_DESCRIPTION("LG TDVS-H06xF ATSC Tuner support");
127MODULE_AUTHOR("Michael Krufky");
128MODULE_LICENSE("GPL");
129
130/*
131 * Local variables:
132 * c-basic-offset: 8
133 * End:
134 */
diff --git a/drivers/media/dvb/frontends/lgh06xf.h b/drivers/media/dvb/frontends/lgh06xf.h
new file mode 100644
index 000000000000..510b4bedfb24
--- /dev/null
+++ b/drivers/media/dvb/frontends/lgh06xf.h
@@ -0,0 +1,35 @@
1/*
2 * lgh06xf.h - ATSC Tuner support for LG TDVS-H06xF
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#ifndef _LGH06XF_H_
20#define _LGH06XF_H_
21#include "dvb_frontend.h"
22
23#if defined(CONFIG_DVB_TUNER_LGH06XF) || (defined(CONFIG_DVB_TUNER_LGH06XF_MODULE) && defined(MODULE))
24extern struct dvb_frontend* lgh06xf_attach(struct dvb_frontend* fe,
25 struct i2c_adapter *i2c);
26#else
27static inline struct dvb_frontend* lgh06xf_attach(struct dvb_frontend* fe,
28 struct i2c_adapter *i2c)
29{
30 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __FUNCTION__);
31 return NULL;
32}
33#endif /* CONFIG_DVB_TUNER_LGH06XF */
34
35#endif /* _LGH06XF_H_ */
diff --git a/drivers/media/dvb/frontends/or51132.c b/drivers/media/dvb/frontends/or51132.c
index d20ab30c1e83..5a3a6e53cda2 100644
--- a/drivers/media/dvb/frontends/or51132.c
+++ b/drivers/media/dvb/frontends/or51132.c
@@ -40,6 +40,7 @@
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <asm/byteorder.h> 41#include <asm/byteorder.h>
42 42
43#include "dvb_math.h"
43#include "dvb_frontend.h" 44#include "dvb_frontend.h"
44#include "dvb-pll.h" 45#include "dvb-pll.h"
45#include "or51132.h" 46#include "or51132.h"
@@ -62,6 +63,7 @@ struct or51132_state
62 63
63 /* Demodulator private data */ 64 /* Demodulator private data */
64 fe_modulation_t current_modulation; 65 fe_modulation_t current_modulation;
66 u32 snr; /* Result of last SNR calculation */
65 67
66 /* Tuner private data */ 68 /* Tuner private data */
67 u32 current_frequency; 69 u32 current_frequency;
@@ -465,124 +467,128 @@ static int or51132_read_status(struct dvb_frontend* fe, fe_status_t* status)
465 return 0; 467 return 0;
466} 468}
467 469
468/* log10-1 table at .5 increments from 1 to 100.5 */ 470/* Calculate SNR estimation (scaled by 2^24)
469static unsigned int i100x20log10[] = {
470 0, 352, 602, 795, 954, 1088, 1204, 1306, 1397, 1480,
471 1556, 1625, 1690, 1750, 1806, 1858, 1908, 1955, 2000, 2042,
472 2082, 2121, 2158, 2193, 2227, 2260, 2292, 2322, 2352, 2380,
473 2408, 2434, 2460, 2486, 2510, 2534, 2557, 2580, 2602, 2623,
474 2644, 2664, 2684, 2704, 2723, 2742, 2760, 2778, 2795, 2813,
475 2829, 2846, 2862, 2878, 2894, 2909, 2924, 2939, 2954, 2968,
476 2982, 2996, 3010, 3023, 3037, 3050, 3062, 3075, 3088, 3100,
477 3112, 3124, 3136, 3148, 3159, 3170, 3182, 3193, 3204, 3214,
478 3225, 3236, 3246, 3256, 3266, 3276, 3286, 3296, 3306, 3316,
479 3325, 3334, 3344, 3353, 3362, 3371, 3380, 3389, 3397, 3406,
480 3415, 3423, 3432, 3440, 3448, 3456, 3464, 3472, 3480, 3488,
481 3496, 3504, 3511, 3519, 3526, 3534, 3541, 3549, 3556, 3563,
482 3570, 3577, 3584, 3591, 3598, 3605, 3612, 3619, 3625, 3632,
483 3639, 3645, 3652, 3658, 3665, 3671, 3677, 3683, 3690, 3696,
484 3702, 3708, 3714, 3720, 3726, 3732, 3738, 3744, 3750, 3755,
485 3761, 3767, 3772, 3778, 3784, 3789, 3795, 3800, 3806, 3811,
486 3816, 3822, 3827, 3832, 3838, 3843, 3848, 3853, 3858, 3863,
487 3868, 3874, 3879, 3884, 3888, 3893, 3898, 3903, 3908, 3913,
488 3918, 3922, 3927, 3932, 3936, 3941, 3946, 3950, 3955, 3960,
489 3964, 3969, 3973, 3978, 3982, 3986, 3991, 3995, 4000, 4004,
490};
491 471
492static unsigned int denom[] = {1,1,100,1000,10000,100000,1000000,10000000,100000000}; 472 8-VSB SNR and QAM equations from Oren datasheets
493 473
494static unsigned int i20Log10(unsigned short val) 474 For 8-VSB:
495{ 475 SNR[dB] = 10 * log10(897152044.8282 / MSE^2 ) - K
496 unsigned int rntval = 100; 476
497 unsigned int tmp = val; 477 Where K = 0 if NTSC rejection filter is OFF; and
498 unsigned int exp = 1; 478 K = 3 if NTSC rejection filter is ON
479
480 For QAM64:
481 SNR[dB] = 10 * log10(897152044.8282 / MSE^2 )
499 482
500 while(tmp > 100) {tmp /= 100; exp++;} 483 For QAM256:
484 SNR[dB] = 10 * log10(907832426.314266 / MSE^2 )
501 485
502 val = (2 * val)/denom[exp]; 486 We re-write the snr equation as:
503 if (exp > 1) rntval = 2000*exp; 487 SNR * 2^24 = 10*(c - 2*intlog10(MSE))
488 Where for QAM256, c = log10(907832426.314266) * 2^24
489 and for 8-VSB and QAM64, c = log10(897152044.8282) * 2^24 */
504 490
505 rntval += i100x20log10[val]; 491static u32 calculate_snr(u32 mse, u32 c)
506 return rntval; 492{
493 if (mse == 0) /* No signal */
494 return 0;
495
496 mse = 2*intlog10(mse);
497 if (mse > c) {
498 /* Negative SNR, which is possible, but realisticly the
499 demod will lose lock before the signal gets this bad. The
500 API only allows for unsigned values, so just return 0 */
501 return 0;
502 }
503 return 10*(c - mse);
507} 504}
508 505
509static int or51132_read_signal_strength(struct dvb_frontend* fe, u16* strength) 506static int or51132_read_snr(struct dvb_frontend* fe, u16* snr)
510{ 507{
511 struct or51132_state* state = fe->demodulator_priv; 508 struct or51132_state* state = fe->demodulator_priv;
512 unsigned char rec_buf[2]; 509 u8 rec_buf[2];
513 unsigned char snd_buf[2]; 510 u8 snd_buf[2];
514 u8 rcvr_stat; 511 u32 noise;
515 u16 snr_equ; 512 u32 c;
516 u32 signal_strength; 513 u32 usK;
517 int usK;
518 514
515 /* Register is same for VSB or QAM firmware */
519 snd_buf[0]=0x04; 516 snd_buf[0]=0x04;
520 snd_buf[1]=0x02; /* SNR after Equalizer */ 517 snd_buf[1]=0x02; /* SNR after Equalizer */
521 msleep(30); /* 30ms */ 518 msleep(30); /* 30ms */
522 if (i2c_writebytes(state,state->config->demod_address,snd_buf,2)) { 519 if (i2c_writebytes(state,state->config->demod_address,snd_buf,2)) {
523 printk(KERN_WARNING "or51132: read_status write error\n"); 520 printk(KERN_WARNING "or51132: snr write error\n");
524 return -1; 521 return -EREMOTEIO;
525 } 522 }
526 msleep(30); /* 30ms */ 523 msleep(30); /* 30ms */
527 if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) { 524 if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) {
528 printk(KERN_WARNING "or51132: read_status read error\n"); 525 printk(KERN_WARNING "or51132: snr read error\n");
529 return -1; 526 return -EREMOTEIO;
530 } 527 }
531 snr_equ = rec_buf[0] | (rec_buf[1] << 8); 528 noise = rec_buf[0] | (rec_buf[1] << 8);
532 dprintk("read_signal_strength snr_equ %x %x (%i)\n",rec_buf[0],rec_buf[1],snr_equ); 529 dprintk("read_snr noise %x %x (%i)\n",rec_buf[0],rec_buf[1],noise);
533 530
534 /* Receiver Status */ 531 /* Read status, contains modulation type for QAM_AUTO and
532 NTSC filter for VSB */
535 snd_buf[0]=0x04; 533 snd_buf[0]=0x04;
536 snd_buf[1]=0x00; 534 snd_buf[1]=0x00; /* Status register */
537 msleep(30); /* 30ms */ 535 msleep(30); /* 30ms */
538 if (i2c_writebytes(state,state->config->demod_address,snd_buf,2)) { 536 if (i2c_writebytes(state,state->config->demod_address,snd_buf,2)) {
539 printk(KERN_WARNING "or51132: read_signal_strength read_status write error\n"); 537 printk(KERN_WARNING "or51132: status write error\n");
540 return -1; 538 return -EREMOTEIO;
541 } 539 }
542 msleep(30); /* 30ms */ 540 msleep(30); /* 30ms */
543 if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) { 541 if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) {
544 printk(KERN_WARNING "or51132: read_signal_strength read_status read error\n"); 542 printk(KERN_WARNING "or51132: status read error\n");
545 return -1; 543 return -EREMOTEIO;
546 } 544 }
547 dprintk("read_signal_strength read_status %x %x\n",rec_buf[0],rec_buf[1]);
548 rcvr_stat = rec_buf[1];
549 usK = (rcvr_stat & 0x10) ? 3 : 0;
550 545
551 /* The value reported back from the frontend will be FFFF=100% 0000=0% */ 546 usK = 0;
552 signal_strength = (((8952 - i20Log10(snr_equ) - usK*100)/3+5)*65535)/1000; 547 switch (rec_buf[0]) {
553 if (signal_strength > 0xffff) 548 case 0x06:
554 *strength = 0xffff; 549 usK = (rec_buf[1] & 0x10) ? 0x03000000 : 0;
555 else 550 /* Fall through to QAM64 case */
556 *strength = signal_strength; 551 case 0x43:
557 dprintk("read_signal_strength %i\n",*strength); 552 c = 150204167;
553 break;
554 case 0x45:
555 c = 150290396;
556 break;
557 default:
558 printk(KERN_ERR "or51132: unknown status 0x%02x\n", rec_buf[0]);
559 return -EREMOTEIO;
560 }
561 dprintk("%s: modulation %02x, NTSC rej O%s\n", __FUNCTION__,
562 rec_buf[0], rec_buf[1]&0x10?"n":"ff");
563
564 /* Calculate SNR using noise, c, and NTSC rejection correction */
565 state->snr = calculate_snr(noise, c) - usK;
566 *snr = (state->snr) >> 16;
567
568 dprintk("%s: noise = 0x%08x, snr = %d.%02d dB\n", __FUNCTION__, noise,
569 state->snr >> 24, (((state->snr>>8) & 0xffff) * 100) >> 16);
558 570
559 return 0; 571 return 0;
560} 572}
561 573
562static int or51132_read_snr(struct dvb_frontend* fe, u16* snr) 574static int or51132_read_signal_strength(struct dvb_frontend* fe, u16* strength)
563{ 575{
564 struct or51132_state* state = fe->demodulator_priv; 576 /* Calculate Strength from SNR up to 35dB */
565 unsigned char rec_buf[2]; 577 /* Even though the SNR can go higher than 35dB, there is some comfort */
566 unsigned char snd_buf[2]; 578 /* factor in having a range of strong signals that can show at 100% */
567 u16 snr_equ; 579 struct or51132_state* state = (struct or51132_state*) fe->demodulator_priv;
568 580 u16 snr;
569 snd_buf[0]=0x04; 581 int ret;
570 snd_buf[1]=0x02; /* SNR after Equalizer */
571 msleep(30); /* 30ms */
572 if (i2c_writebytes(state,state->config->demod_address,snd_buf,2)) {
573 printk(KERN_WARNING "or51132: read_snr write error\n");
574 return -1;
575 }
576 msleep(30); /* 30ms */
577 if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) {
578 printk(KERN_WARNING "or51132: read_snr dvr read error\n");
579 return -1;
580 }
581 snr_equ = rec_buf[0] | (rec_buf[1] << 8);
582 dprintk("read_snr snr_equ %x %x (%i)\n",rec_buf[0],rec_buf[1],snr_equ);
583 582
584 *snr = 0xFFFF - snr_equ; 583 ret = fe->ops.read_snr(fe, &snr);
585 dprintk("read_snr %i\n",*snr); 584 if (ret != 0)
585 return ret;
586 /* Rather than use the 8.8 value snr, use state->snr which is 8.24 */
587 /* scale the range 0 - 35*2^24 into 0 - 65535 */
588 if (state->snr >= 8960 * 0x10000)
589 *strength = 0xffff;
590 else
591 *strength = state->snr / 8960;
586 592
587 return 0; 593 return 0;
588} 594}
diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
index 2bf124b53689..048d7cfe12d3 100644
--- a/drivers/media/dvb/frontends/or51211.c
+++ b/drivers/media/dvb/frontends/or51211.c
@@ -39,6 +39,7 @@
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <asm/byteorder.h> 40#include <asm/byteorder.h>
41 41
42#include "dvb_math.h"
42#include "dvb_frontend.h" 43#include "dvb_frontend.h"
43#include "or51211.h" 44#include "or51211.h"
44 45
@@ -63,6 +64,7 @@ struct or51211_state {
63 64
64 /* Demodulator private data */ 65 /* Demodulator private data */
65 u8 initialized:1; 66 u8 initialized:1;
67 u32 snr; /* Result of last SNR claculation */
66 68
67 /* Tuner private data */ 69 /* Tuner private data */
68 u32 current_frequency; 70 u32 current_frequency;
@@ -292,107 +294,81 @@ static int or51211_read_status(struct dvb_frontend* fe, fe_status_t* status)
292 return 0; 294 return 0;
293} 295}
294 296
295/* log10-1 table at .5 increments from 1 to 100.5 */ 297/* Calculate SNR estimation (scaled by 2^24)
296static unsigned int i100x20log10[] = {
297 0, 352, 602, 795, 954, 1088, 1204, 1306, 1397, 1480,
298 1556, 1625, 1690, 1750, 1806, 1858, 1908, 1955, 2000, 2042,
299 2082, 2121, 2158, 2193, 2227, 2260, 2292, 2322, 2352, 2380,
300 2408, 2434, 2460, 2486, 2510, 2534, 2557, 2580, 2602, 2623,
301 2644, 2664, 2684, 2704, 2723, 2742, 2760, 2778, 2795, 2813,
302 2829, 2846, 2862, 2878, 2894, 2909, 2924, 2939, 2954, 2968,
303 2982, 2996, 3010, 3023, 3037, 3050, 3062, 3075, 3088, 3100,
304 3112, 3124, 3136, 3148, 3159, 3170, 3182, 3193, 3204, 3214,
305 3225, 3236, 3246, 3256, 3266, 3276, 3286, 3296, 3306, 3316,
306 3325, 3334, 3344, 3353, 3362, 3371, 3380, 3389, 3397, 3406,
307 3415, 3423, 3432, 3440, 3448, 3456, 3464, 3472, 3480, 3488,
308 3496, 3504, 3511, 3519, 3526, 3534, 3541, 3549, 3556, 3563,
309 3570, 3577, 3584, 3591, 3598, 3605, 3612, 3619, 3625, 3632,
310 3639, 3645, 3652, 3658, 3665, 3671, 3677, 3683, 3690, 3696,
311 3702, 3708, 3714, 3720, 3726, 3732, 3738, 3744, 3750, 3755,
312 3761, 3767, 3772, 3778, 3784, 3789, 3795, 3800, 3806, 3811,
313 3816, 3822, 3827, 3832, 3838, 3843, 3848, 3853, 3858, 3863,
314 3868, 3874, 3879, 3884, 3888, 3893, 3898, 3903, 3908, 3913,
315 3918, 3922, 3927, 3932, 3936, 3941, 3946, 3950, 3955, 3960,
316 3964, 3969, 3973, 3978, 3982, 3986, 3991, 3995, 4000, 4004,
317};
318
319static unsigned int denom[] = {1,1,100,1000,10000,100000,1000000,10000000,100000000};
320 298
321static unsigned int i20Log10(unsigned short val) 299 8-VSB SNR equation from Oren datasheets
322{
323 unsigned int rntval = 100;
324 unsigned int tmp = val;
325 unsigned int exp = 1;
326 300
327 while(tmp > 100) {tmp /= 100; exp++;} 301 For 8-VSB:
302 SNR[dB] = 10 * log10(219037.9454 / MSE^2 )
328 303
329 val = (2 * val)/denom[exp]; 304 We re-write the snr equation as:
330 if (exp > 1) rntval = 2000*exp; 305 SNR * 2^24 = 10*(c - 2*intlog10(MSE))
306 Where for 8-VSB, c = log10(219037.9454) * 2^24 */
331 307
332 rntval += i100x20log10[val]; 308static u32 calculate_snr(u32 mse, u32 c)
333 return rntval; 309{
310 if (mse == 0) /* No signal */
311 return 0;
312
313 mse = 2*intlog10(mse);
314 if (mse > c) {
315 /* Negative SNR, which is possible, but realisticly the
316 demod will lose lock before the signal gets this bad. The
317 API only allows for unsigned values, so just return 0 */
318 return 0;
319 }
320 return 10*(c - mse);
334} 321}
335 322
336static int or51211_read_signal_strength(struct dvb_frontend* fe, u16* strength) 323static int or51211_read_snr(struct dvb_frontend* fe, u16* snr)
337{ 324{
338 struct or51211_state* state = fe->demodulator_priv; 325 struct or51211_state* state = fe->demodulator_priv;
339 u8 rec_buf[2]; 326 u8 rec_buf[2];
340 u8 snd_buf[4]; 327 u8 snd_buf[3];
341 u8 snr_equ;
342 u32 signal_strength;
343 328
344 /* SNR after Equalizer */ 329 /* SNR after Equalizer */
345 snd_buf[0] = 0x04; 330 snd_buf[0] = 0x04;
346 snd_buf[1] = 0x00; 331 snd_buf[1] = 0x00;
347 snd_buf[2] = 0x04; 332 snd_buf[2] = 0x04;
348 snd_buf[3] = 0x00;
349 333
350 if (i2c_writebytes(state,state->config->demod_address,snd_buf,3)) { 334 if (i2c_writebytes(state,state->config->demod_address,snd_buf,3)) {
351 printk(KERN_WARNING "or51211: read_status write error\n"); 335 printk(KERN_WARNING "%s: error writing snr reg\n",
336 __FUNCTION__);
352 return -1; 337 return -1;
353 } 338 }
354 msleep(3);
355 if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) { 339 if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) {
356 printk(KERN_WARNING "or51211: read_status read error\n"); 340 printk(KERN_WARNING "%s: read_status read error\n",
341 __FUNCTION__);
357 return -1; 342 return -1;
358 } 343 }
359 snr_equ = rec_buf[0] & 0xff;
360 344
361 /* The value reported back from the frontend will be FFFF=100% 0000=0% */ 345 state->snr = calculate_snr(rec_buf[0], 89599047);
362 signal_strength = (((5334 - i20Log10(snr_equ))/3+5)*65535)/1000; 346 *snr = (state->snr) >> 16;
363 if (signal_strength > 0xffff) 347
364 *strength = 0xffff; 348 dprintk("%s: noise = 0x%02x, snr = %d.%02d dB\n", __FUNCTION__, rec_buf[0],
365 else 349 state->snr >> 24, (((state->snr>>8) & 0xffff) * 100) >> 16);
366 *strength = signal_strength;
367 dprintk("read_signal_strength %i\n",*strength);
368 350
369 return 0; 351 return 0;
370} 352}
371 353
372static int or51211_read_snr(struct dvb_frontend* fe, u16* snr) 354static int or51211_read_signal_strength(struct dvb_frontend* fe, u16* strength)
373{ 355{
374 struct or51211_state* state = fe->demodulator_priv; 356 /* Calculate Strength from SNR up to 35dB */
375 u8 rec_buf[2]; 357 /* Even though the SNR can go higher than 35dB, there is some comfort */
376 u8 snd_buf[4]; 358 /* factor in having a range of strong signals that can show at 100% */
377 359 struct or51211_state* state = (struct or51211_state*)fe->demodulator_priv;
378 /* SNR after Equalizer */ 360 u16 snr;
379 snd_buf[0] = 0x04; 361 int ret;
380 snd_buf[1] = 0x00; 362
381 snd_buf[2] = 0x04; 363 ret = fe->ops.read_snr(fe, &snr);
382 snd_buf[3] = 0x00; 364 if (ret != 0)
383 365 return ret;
384 if (i2c_writebytes(state,state->config->demod_address,snd_buf,3)) { 366 /* Rather than use the 8.8 value snr, use state->snr which is 8.24 */
385 printk(KERN_WARNING "or51211: read_status write error\n"); 367 /* scale the range 0 - 35*2^24 into 0 - 65535 */
386 return -1; 368 if (state->snr >= 8960 * 0x10000)
387 } 369 *strength = 0xffff;
388 msleep(3); 370 else
389 if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) { 371 *strength = state->snr / 8960;
390 printk(KERN_WARNING "or51211: read_status read error\n");
391 return -1;
392 }
393 *snr = rec_buf[0] & 0xff;
394
395 dprintk("read_snr %i\n",*snr);
396 372
397 return 0; 373 return 0;
398} 374}
diff --git a/drivers/media/dvb/frontends/tda1004x.c b/drivers/media/dvb/frontends/tda1004x.c
index 11e0dca9a2d7..00e4bcd9f1a4 100644
--- a/drivers/media/dvb/frontends/tda1004x.c
+++ b/drivers/media/dvb/frontends/tda1004x.c
@@ -648,18 +648,24 @@ static int tda10046_init(struct dvb_frontend* fe)
648 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x0a); // AGC setup 648 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x0a); // AGC setup
649 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x00); // set AGC polarities 649 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x00); // set AGC polarities
650 break; 650 break;
651 case TDA10046_AGC_TDA827X: 651 case TDA10046_AGC_TDA827X_GP11:
652 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x02); // AGC setup 652 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x02); // AGC setup
653 tda1004x_write_byteI(state, TDA10046H_AGC_THR, 0x70); // AGC Threshold 653 tda1004x_write_byteI(state, TDA10046H_AGC_THR, 0x70); // AGC Threshold
654 tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize 654 tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize
655 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x6a); // set AGC polarities 655 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x6a); // set AGC polarities
656 break; 656 break;
657 case TDA10046_AGC_TDA827X_GPL: 657 case TDA10046_AGC_TDA827X_GP00:
658 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x02); // AGC setup 658 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x02); // AGC setup
659 tda1004x_write_byteI(state, TDA10046H_AGC_THR, 0x70); // AGC Threshold 659 tda1004x_write_byteI(state, TDA10046H_AGC_THR, 0x70); // AGC Threshold
660 tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize 660 tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize
661 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x60); // set AGC polarities 661 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x60); // set AGC polarities
662 break; 662 break;
663 case TDA10046_AGC_TDA827X_GP01:
664 tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x02); // AGC setup
665 tda1004x_write_byteI(state, TDA10046H_AGC_THR, 0x70); // AGC Threshold
666 tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize
667 tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x62); // set AGC polarities
668 break;
663 } 669 }
664 tda1004x_write_byteI(state, TDA1004X_CONFADC2, 0x38); 670 tda1004x_write_byteI(state, TDA1004X_CONFADC2, 0x38);
665 tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE1, 0x61); // Turn both AGC outputs on 671 tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE1, 0x61); // Turn both AGC outputs on
diff --git a/drivers/media/dvb/frontends/tda1004x.h b/drivers/media/dvb/frontends/tda1004x.h
index 605ad2dfc09d..ec502d71b83c 100644
--- a/drivers/media/dvb/frontends/tda1004x.h
+++ b/drivers/media/dvb/frontends/tda1004x.h
@@ -35,8 +35,9 @@ enum tda10046_agc {
35 TDA10046_AGC_DEFAULT, /* original configuration */ 35 TDA10046_AGC_DEFAULT, /* original configuration */
36 TDA10046_AGC_IFO_AUTO_NEG, /* IF AGC only, automatic, negtive */ 36 TDA10046_AGC_IFO_AUTO_NEG, /* IF AGC only, automatic, negtive */
37 TDA10046_AGC_IFO_AUTO_POS, /* IF AGC only, automatic, positive */ 37 TDA10046_AGC_IFO_AUTO_POS, /* IF AGC only, automatic, positive */
38 TDA10046_AGC_TDA827X, /* IF AGC only, special setup for tda827x */ 38 TDA10046_AGC_TDA827X_GP11, /* IF AGC only, special setup for tda827x */
39 TDA10046_AGC_TDA827X_GPL, /* same as above, but GPIOs 0 */ 39 TDA10046_AGC_TDA827X_GP00, /* same as above, but GPIOs 0 */
40 TDA10046_AGC_TDA827X_GP01, /* same as above, but GPIO3=0 GPIO1=1*/
40}; 41};
41 42
42enum tda10046_if { 43enum tda10046_if {
diff --git a/drivers/media/dvb/frontends/tda8083.c b/drivers/media/dvb/frontends/tda8083.c
index 3aa45ebbac3d..67415c9db6f7 100644
--- a/drivers/media/dvb/frontends/tda8083.c
+++ b/drivers/media/dvb/frontends/tda8083.c
@@ -262,12 +262,29 @@ static int tda8083_read_status(struct dvb_frontend* fe, fe_status_t* status)
262 if (sync & 0x10) 262 if (sync & 0x10)
263 *status |= FE_HAS_SYNC; 263 *status |= FE_HAS_SYNC;
264 264
265 if (sync & 0x20) /* frontend can not lock */
266 *status |= FE_TIMEDOUT;
267
265 if ((sync & 0x1f) == 0x1f) 268 if ((sync & 0x1f) == 0x1f)
266 *status |= FE_HAS_LOCK; 269 *status |= FE_HAS_LOCK;
267 270
268 return 0; 271 return 0;
269} 272}
270 273
274static int tda8083_read_ber(struct dvb_frontend* fe, u32* ber)
275{
276 struct tda8083_state* state = fe->demodulator_priv;
277 int ret;
278 u8 buf[3];
279
280 if ((ret = tda8083_readregs(state, 0x0b, buf, sizeof(buf))))
281 return ret;
282
283 *ber = ((buf[0] & 0x1f) << 16) | (buf[1] << 8) | buf[2];
284
285 return 0;
286}
287
271static int tda8083_read_signal_strength(struct dvb_frontend* fe, u16* strength) 288static int tda8083_read_signal_strength(struct dvb_frontend* fe, u16* strength)
272{ 289{
273 struct tda8083_state* state = fe->demodulator_priv; 290 struct tda8083_state* state = fe->demodulator_priv;
@@ -288,6 +305,17 @@ static int tda8083_read_snr(struct dvb_frontend* fe, u16* snr)
288 return 0; 305 return 0;
289} 306}
290 307
308static int tda8083_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
309{
310 struct tda8083_state* state = fe->demodulator_priv;
311
312 *ucblocks = tda8083_readreg(state, 0x0f);
313 if (*ucblocks == 0xff)
314 *ucblocks = 0xffffffff;
315
316 return 0;
317}
318
291static int tda8083_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p) 319static int tda8083_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
292{ 320{
293 struct tda8083_state* state = fe->demodulator_priv; 321 struct tda8083_state* state = fe->demodulator_priv;
@@ -440,6 +468,8 @@ static struct dvb_frontend_ops tda8083_ops = {
440 .read_status = tda8083_read_status, 468 .read_status = tda8083_read_status,
441 .read_signal_strength = tda8083_read_signal_strength, 469 .read_signal_strength = tda8083_read_signal_strength,
442 .read_snr = tda8083_read_snr, 470 .read_snr = tda8083_read_snr,
471 .read_ber = tda8083_read_ber,
472 .read_ucblocks = tda8083_read_ucblocks,
443 473
444 .diseqc_send_master_cmd = tda8083_send_diseqc_msg, 474 .diseqc_send_master_cmd = tda8083_send_diseqc_msg,
445 .diseqc_send_burst = tda8083_diseqc_send_burst, 475 .diseqc_send_burst = tda8083_diseqc_send_burst,
diff --git a/drivers/media/dvb/frontends/tda826x.c b/drivers/media/dvb/frontends/tda826x.c
index 34815b0b97e4..79f971dc52b6 100644
--- a/drivers/media/dvb/frontends/tda826x.c
+++ b/drivers/media/dvb/frontends/tda826x.c
@@ -42,8 +42,7 @@ struct tda826x_priv {
42 42
43static int tda826x_release(struct dvb_frontend *fe) 43static int tda826x_release(struct dvb_frontend *fe)
44{ 44{
45 if (fe->tuner_priv) 45 kfree(fe->tuner_priv);
46 kfree(fe->tuner_priv);
47 fe->tuner_priv = NULL; 46 fe->tuner_priv = NULL;
48 return 0; 47 return 0;
49} 48}
@@ -133,18 +132,21 @@ struct dvb_frontend *tda826x_attach(struct dvb_frontend *fe, int addr, struct i2
133{ 132{
134 struct tda826x_priv *priv = NULL; 133 struct tda826x_priv *priv = NULL;
135 u8 b1 [] = { 0, 0 }; 134 u8 b1 [] = { 0, 0 };
136 struct i2c_msg msg = { .addr = addr, .flags = I2C_M_RD, .buf = b1, .len = 2 }; 135 struct i2c_msg msg[2] = {
136 { .addr = addr, .flags = 0, .buf = NULL, .len = 0 },
137 { .addr = addr, .flags = I2C_M_RD, .buf = b1, .len = 2 }
138 };
137 int ret; 139 int ret;
138 140
139 dprintk("%s:\n", __FUNCTION__); 141 dprintk("%s:\n", __FUNCTION__);
140 142
141 if (fe->ops.i2c_gate_ctrl) 143 if (fe->ops.i2c_gate_ctrl)
142 fe->ops.i2c_gate_ctrl(fe, 1); 144 fe->ops.i2c_gate_ctrl(fe, 1);
143 ret = i2c_transfer (i2c, &msg, 1); 145 ret = i2c_transfer (i2c, msg, 2);
144 if (fe->ops.i2c_gate_ctrl) 146 if (fe->ops.i2c_gate_ctrl)
145 fe->ops.i2c_gate_ctrl(fe, 0); 147 fe->ops.i2c_gate_ctrl(fe, 0);
146 148
147 if (ret != 1) 149 if (ret != 2)
148 return NULL; 150 return NULL;
149 if (!(b1[1] & 0x80)) 151 if (!(b1[1] & 0x80))
150 return NULL; 152 return NULL;
diff --git a/drivers/media/dvb/frontends/tua6100.c b/drivers/media/dvb/frontends/tua6100.c
index 88554393a9bf..6ba0029dcf2e 100644
--- a/drivers/media/dvb/frontends/tua6100.c
+++ b/drivers/media/dvb/frontends/tua6100.c
@@ -43,8 +43,7 @@ struct tua6100_priv {
43 43
44static int tua6100_release(struct dvb_frontend *fe) 44static int tua6100_release(struct dvb_frontend *fe)
45{ 45{
46 if (fe->tuner_priv) 46 kfree(fe->tuner_priv);
47 kfree(fe->tuner_priv);
48 fe->tuner_priv = NULL; 47 fe->tuner_priv = NULL;
49 return 0; 48 return 0;
50} 49}
diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c
index 8e4ce101eb22..ffda71dfdd65 100644
--- a/drivers/media/dvb/pluto2/pluto2.c
+++ b/drivers/media/dvb/pluto2/pluto2.c
@@ -650,7 +650,7 @@ static int __devinit pluto2_probe(struct pci_dev *pdev,
650 /* dvb */ 650 /* dvb */
651 ret = dvb_register_adapter(&pluto->dvb_adapter, DRIVER_NAME, THIS_MODULE, &pdev->dev); 651 ret = dvb_register_adapter(&pluto->dvb_adapter, DRIVER_NAME, THIS_MODULE, &pdev->dev);
652 if (ret < 0) 652 if (ret < 0)
653 goto err_i2c_bit_del_bus; 653 goto err_i2c_del_adapter;
654 654
655 dvb_adapter = &pluto->dvb_adapter; 655 dvb_adapter = &pluto->dvb_adapter;
656 656
@@ -712,8 +712,8 @@ err_dvb_dmx_release:
712 dvb_dmx_release(dvbdemux); 712 dvb_dmx_release(dvbdemux);
713err_dvb_unregister_adapter: 713err_dvb_unregister_adapter:
714 dvb_unregister_adapter(dvb_adapter); 714 dvb_unregister_adapter(dvb_adapter);
715err_i2c_bit_del_bus: 715err_i2c_del_adapter:
716 i2c_bit_del_bus(&pluto->i2c_adap); 716 i2c_del_adapter(&pluto->i2c_adap);
717err_pluto_hw_exit: 717err_pluto_hw_exit:
718 pluto_hw_exit(pluto); 718 pluto_hw_exit(pluto);
719err_free_irq: 719err_free_irq:
@@ -748,7 +748,7 @@ static void __devexit pluto2_remove(struct pci_dev *pdev)
748 dvb_dmxdev_release(&pluto->dmxdev); 748 dvb_dmxdev_release(&pluto->dmxdev);
749 dvb_dmx_release(dvbdemux); 749 dvb_dmx_release(dvbdemux);
750 dvb_unregister_adapter(dvb_adapter); 750 dvb_unregister_adapter(dvb_adapter);
751 i2c_bit_del_bus(&pluto->i2c_adap); 751 i2c_del_adapter(&pluto->i2c_adap);
752 pluto_hw_exit(pluto); 752 pluto_hw_exit(pluto);
753 free_irq(pdev->irq, pluto); 753 free_irq(pdev->irq, pluto);
754 pci_iounmap(pdev, pluto->io_mem); 754 pci_iounmap(pdev, pluto->io_mem);
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index 95531a624991..eec7ccf41f8b 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -92,6 +92,7 @@ config DVB_BUDGET_CI
92 select DVB_STV0299 if !DVB_FE_CUSTOMISE 92 select DVB_STV0299 if !DVB_FE_CUSTOMISE
93 select DVB_TDA1004X if !DVB_FE_CUSTOMISE 93 select DVB_TDA1004X if !DVB_FE_CUSTOMISE
94 select DVB_LNBP21 if !DVB_FE_CUSTOMISE 94 select DVB_LNBP21 if !DVB_FE_CUSTOMISE
95 select VIDEO_IR
95 help 96 help
96 Support for simple SAA7146 based DVB cards 97 Support for simple SAA7146 based DVB cards
97 (so called Budget- or Nova-PCI cards) without onboard 98 (so called Budget- or Nova-PCI cards) without onboard
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index bba23bcd1b11..366c1371ee97 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -2828,7 +2828,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
2828 2828
2829static struct saa7146_extension av7110_extension = { 2829static struct saa7146_extension av7110_extension = {
2830 .name = "dvb", 2830 .name = "dvb",
2831 .flags = SAA7146_I2C_SHORT_DELAY, 2831 .flags = SAA7146_USE_I2C_IRQ,
2832 2832
2833 .module = THIS_MODULE, 2833 .module = THIS_MODULE,
2834 .pci_tbl = &pci_tbl[0], 2834 .pci_tbl = &pci_tbl[0],
diff --git a/drivers/media/dvb/ttpci/av7110_ir.c b/drivers/media/dvb/ttpci/av7110_ir.c
index d54bbcdde2cc..e4544ea2b89b 100644
--- a/drivers/media/dvb/ttpci/av7110_ir.c
+++ b/drivers/media/dvb/ttpci/av7110_ir.c
@@ -48,7 +48,8 @@ static void av7110_emit_keyup(unsigned long data)
48 if (!data || !test_bit(data, input_dev->key)) 48 if (!data || !test_bit(data, input_dev->key))
49 return; 49 return;
50 50
51 input_event(input_dev, EV_KEY, data, !!0); 51 input_report_key(input_dev, data, 0);
52 input_sync(input_dev);
52} 53}
53 54
54 55
@@ -115,14 +116,17 @@ static void av7110_emit_key(unsigned long parm)
115 del_timer(&keyup_timer); 116 del_timer(&keyup_timer);
116 if (keyup_timer.data != keycode || new_toggle != old_toggle) { 117 if (keyup_timer.data != keycode || new_toggle != old_toggle) {
117 delay_timer_finished = 0; 118 delay_timer_finished = 0;
118 input_event(input_dev, EV_KEY, keyup_timer.data, !!0); 119 input_event(input_dev, EV_KEY, keyup_timer.data, 0);
119 input_event(input_dev, EV_KEY, keycode, !0); 120 input_event(input_dev, EV_KEY, keycode, 1);
120 } else 121 input_sync(input_dev);
121 if (delay_timer_finished) 122 } else if (delay_timer_finished) {
122 input_event(input_dev, EV_KEY, keycode, 2); 123 input_event(input_dev, EV_KEY, keycode, 2);
124 input_sync(input_dev);
125 }
123 } else { 126 } else {
124 delay_timer_finished = 0; 127 delay_timer_finished = 0;
125 input_event(input_dev, EV_KEY, keycode, !0); 128 input_event(input_dev, EV_KEY, keycode, 1);
129 input_sync(input_dev);
126 } 130 }
127 131
128 keyup_timer.expires = jiffies + UP_TIMEOUT; 132 keyup_timer.expires = jiffies + UP_TIMEOUT;
@@ -211,6 +215,7 @@ static void ir_handler(struct av7110 *av7110, u32 ircom)
211int __devinit av7110_ir_init(struct av7110 *av7110) 215int __devinit av7110_ir_init(struct av7110 *av7110)
212{ 216{
213 static struct proc_dir_entry *e; 217 static struct proc_dir_entry *e;
218 int err;
214 219
215 if (av_cnt >= sizeof av_list/sizeof av_list[0]) 220 if (av_cnt >= sizeof av_list/sizeof av_list[0])
216 return -ENOSPC; 221 return -ENOSPC;
@@ -231,7 +236,11 @@ int __devinit av7110_ir_init(struct av7110 *av7110)
231 set_bit(EV_KEY, input_dev->evbit); 236 set_bit(EV_KEY, input_dev->evbit);
232 set_bit(EV_REP, input_dev->evbit); 237 set_bit(EV_REP, input_dev->evbit);
233 input_register_keys(); 238 input_register_keys();
234 input_register_device(input_dev); 239 err = input_register_device(input_dev);
240 if (err) {
241 input_free_device(input_dev);
242 return err;
243 }
235 input_dev->timer.function = input_repeat_key; 244 input_dev->timer.function = input_repeat_key;
236 245
237 e = create_proc_entry("av7110_ir", S_IFREG | S_IRUGO | S_IWUSR, NULL); 246 e = create_proc_entry("av7110_ir", S_IFREG | S_IRUGO | S_IWUSR, NULL);
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 2235ff8b8a1d..89ab4b59155c 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -360,7 +360,7 @@ static int ciintf_init(struct budget_av *budget_av)
360 saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTLO); 360 saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTLO);
361 361
362 /* Enable DEBI pins */ 362 /* Enable DEBI pins */
363 saa7146_write(saa, MC1, saa7146_read(saa, MC1) | (0x800 << 16) | 0x800); 363 saa7146_write(saa, MC1, MASK_27 | MASK_11);
364 364
365 /* register CI interface */ 365 /* register CI interface */
366 budget_av->ca.owner = THIS_MODULE; 366 budget_av->ca.owner = THIS_MODULE;
@@ -386,7 +386,7 @@ static int ciintf_init(struct budget_av *budget_av)
386 return 0; 386 return 0;
387 387
388error: 388error:
389 saa7146_write(saa, MC1, saa7146_read(saa, MC1) | (0x800 << 16)); 389 saa7146_write(saa, MC1, MASK_27);
390 return result; 390 return result;
391} 391}
392 392
@@ -403,7 +403,7 @@ static void ciintf_deinit(struct budget_av *budget_av)
403 dvb_ca_en50221_release(&budget_av->ca); 403 dvb_ca_en50221_release(&budget_av->ca);
404 404
405 /* disable DEBI pins */ 405 /* disable DEBI pins */
406 saa7146_write(saa, MC1, saa7146_read(saa, MC1) | (0x800 << 16)); 406 saa7146_write(saa, MC1, MASK_27);
407} 407}
408 408
409 409
@@ -655,6 +655,10 @@ static struct tda10021_config philips_cu1216_config = {
655 .demod_address = 0x0c, 655 .demod_address = 0x0c,
656}; 656};
657 657
658static struct tda10021_config philips_cu1216_config_altaddress = {
659 .demod_address = 0x0d,
660};
661
658 662
659 663
660 664
@@ -831,7 +835,7 @@ static int philips_sd1878_tda8261_tuner_set_params(struct dvb_frontend *fe,
831 return -EINVAL; 835 return -EINVAL;
832 836
833 rc=dvb_pll_configure(&dvb_pll_philips_sd1878_tda8261, buf, 837 rc=dvb_pll_configure(&dvb_pll_philips_sd1878_tda8261, buf,
834 params->frequency, 0); 838 params->frequency, 0);
835 if(rc < 0) return rc; 839 if(rc < 0) return rc;
836 840
837 if (fe->ops.i2c_gate_ctrl) 841 if (fe->ops.i2c_gate_ctrl)
@@ -914,6 +918,7 @@ static u8 read_pwm(struct budget_av *budget_av)
914#define SUBID_DVBS_TV_STAR_CI 0x0016 918#define SUBID_DVBS_TV_STAR_CI 0x0016
915#define SUBID_DVBS_EASYWATCH_1 0x001a 919#define SUBID_DVBS_EASYWATCH_1 0x001a
916#define SUBID_DVBS_EASYWATCH 0x001e 920#define SUBID_DVBS_EASYWATCH 0x001e
921#define SUBID_DVBC_EASYWATCH 0x002a
917#define SUBID_DVBC_KNC1 0x0020 922#define SUBID_DVBC_KNC1 0x0020
918#define SUBID_DVBC_KNC1_PLUS 0x0021 923#define SUBID_DVBC_KNC1_PLUS 0x0021
919#define SUBID_DVBC_CINERGY1200 0x1156 924#define SUBID_DVBC_CINERGY1200 0x1156
@@ -947,11 +952,15 @@ static void frontend_init(struct budget_av *budget_av)
947 /* Enable / PowerON Frontend */ 952 /* Enable / PowerON Frontend */
948 saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTLO); 953 saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTLO);
949 954
955 /* Wait for PowerON */
956 msleep(100);
957
950 /* additional setup necessary for the PLUS cards */ 958 /* additional setup necessary for the PLUS cards */
951 switch (saa->pci->subsystem_device) { 959 switch (saa->pci->subsystem_device) {
952 case SUBID_DVBS_KNC1_PLUS: 960 case SUBID_DVBS_KNC1_PLUS:
953 case SUBID_DVBC_KNC1_PLUS: 961 case SUBID_DVBC_KNC1_PLUS:
954 case SUBID_DVBT_KNC1_PLUS: 962 case SUBID_DVBT_KNC1_PLUS:
963 case SUBID_DVBC_EASYWATCH:
955 saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTHI); 964 saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTHI);
956 break; 965 break;
957 } 966 }
@@ -1006,10 +1015,15 @@ static void frontend_init(struct budget_av *budget_av)
1006 case SUBID_DVBC_KNC1: 1015 case SUBID_DVBC_KNC1:
1007 case SUBID_DVBC_KNC1_PLUS: 1016 case SUBID_DVBC_KNC1_PLUS:
1008 case SUBID_DVBC_CINERGY1200: 1017 case SUBID_DVBC_CINERGY1200:
1018 case SUBID_DVBC_EASYWATCH:
1009 budget_av->reinitialise_demod = 1; 1019 budget_av->reinitialise_demod = 1;
1010 fe = dvb_attach(tda10021_attach, &philips_cu1216_config, 1020 fe = dvb_attach(tda10021_attach, &philips_cu1216_config,
1011 &budget_av->budget.i2c_adap, 1021 &budget_av->budget.i2c_adap,
1012 read_pwm(budget_av)); 1022 read_pwm(budget_av));
1023 if (fe == NULL)
1024 fe = dvb_attach(tda10021_attach, &philips_cu1216_config_altaddress,
1025 &budget_av->budget.i2c_adap,
1026 read_pwm(budget_av));
1013 if (fe) { 1027 if (fe) {
1014 budget_av->tda10021_poclkp = 1; 1028 budget_av->tda10021_poclkp = 1;
1015 budget_av->tda10021_set_frontend = fe->ops.set_frontend; 1029 budget_av->tda10021_set_frontend = fe->ops.set_frontend;
@@ -1242,6 +1256,7 @@ MAKE_BUDGET_INFO(knc1t, "KNC1 DVB-T", BUDGET_KNC1T);
1242MAKE_BUDGET_INFO(kncxs, "KNC TV STAR DVB-S", BUDGET_TVSTAR); 1256MAKE_BUDGET_INFO(kncxs, "KNC TV STAR DVB-S", BUDGET_TVSTAR);
1243MAKE_BUDGET_INFO(satewpls, "Satelco EasyWatch DVB-S light", BUDGET_TVSTAR); 1257MAKE_BUDGET_INFO(satewpls, "Satelco EasyWatch DVB-S light", BUDGET_TVSTAR);
1244MAKE_BUDGET_INFO(satewpls1, "Satelco EasyWatch DVB-S light", BUDGET_KNC1S); 1258MAKE_BUDGET_INFO(satewpls1, "Satelco EasyWatch DVB-S light", BUDGET_KNC1S);
1259MAKE_BUDGET_INFO(satewplc, "Satelco EasyWatch DVB-C", BUDGET_KNC1CP);
1245MAKE_BUDGET_INFO(knc1sp, "KNC1 DVB-S Plus", BUDGET_KNC1SP); 1260MAKE_BUDGET_INFO(knc1sp, "KNC1 DVB-S Plus", BUDGET_KNC1SP);
1246MAKE_BUDGET_INFO(knc1cp, "KNC1 DVB-C Plus", BUDGET_KNC1CP); 1261MAKE_BUDGET_INFO(knc1cp, "KNC1 DVB-C Plus", BUDGET_KNC1CP);
1247MAKE_BUDGET_INFO(knc1tp, "KNC1 DVB-T Plus", BUDGET_KNC1TP); 1262MAKE_BUDGET_INFO(knc1tp, "KNC1 DVB-T Plus", BUDGET_KNC1TP);
@@ -1260,6 +1275,7 @@ static struct pci_device_id pci_tbl[] = {
1260 MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0016), 1275 MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0016),
1261 MAKE_EXTENSION_PCI(satewpls, 0x1894, 0x001e), 1276 MAKE_EXTENSION_PCI(satewpls, 0x1894, 0x001e),
1262 MAKE_EXTENSION_PCI(satewpls1, 0x1894, 0x001a), 1277 MAKE_EXTENSION_PCI(satewpls1, 0x1894, 0x001a),
1278 MAKE_EXTENSION_PCI(satewplc, 0x1894, 0x002a),
1263 MAKE_EXTENSION_PCI(knc1c, 0x1894, 0x0020), 1279 MAKE_EXTENSION_PCI(knc1c, 0x1894, 0x0020),
1264 MAKE_EXTENSION_PCI(knc1cp, 0x1894, 0x0021), 1280 MAKE_EXTENSION_PCI(knc1cp, 0x1894, 0x0021),
1265 MAKE_EXTENSION_PCI(knc1t, 0x1894, 0x0030), 1281 MAKE_EXTENSION_PCI(knc1t, 0x1894, 0x0030),
@@ -1277,7 +1293,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
1277 1293
1278static struct saa7146_extension budget_extension = { 1294static struct saa7146_extension budget_extension = {
1279 .name = "budget_av", 1295 .name = "budget_av",
1280 .flags = SAA7146_I2C_SHORT_DELAY, 1296 .flags = SAA7146_USE_I2C_IRQ,
1281 1297
1282 .pci_tbl = pci_tbl, 1298 .pci_tbl = pci_tbl,
1283 1299
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index cd5ec489af1c..f2066b47baee 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -37,6 +37,7 @@
37#include <linux/interrupt.h> 37#include <linux/interrupt.h>
38#include <linux/input.h> 38#include <linux/input.h>
39#include <linux/spinlock.h> 39#include <linux/spinlock.h>
40#include <media/ir-common.h>
40 41
41#include "dvb_ca_en50221.h" 42#include "dvb_ca_en50221.h"
42#include "stv0299.h" 43#include "stv0299.h"
@@ -72,162 +73,218 @@
72#define SLOTSTATUS_READY 8 73#define SLOTSTATUS_READY 8
73#define SLOTSTATUS_OCCUPIED (SLOTSTATUS_PRESENT|SLOTSTATUS_RESET|SLOTSTATUS_READY) 74#define SLOTSTATUS_OCCUPIED (SLOTSTATUS_PRESENT|SLOTSTATUS_RESET|SLOTSTATUS_READY)
74 75
76/* Milliseconds during which key presses are regarded as key repeat and during
77 * which the debounce logic is active
78 */
79#define IR_REPEAT_TIMEOUT 350
80
81/* RC5 device wildcard */
82#define IR_DEVICE_ANY 255
83
84/* Some remotes sends multiple sequences per keypress (e.g. Zenith sends two),
85 * this setting allows the superflous sequences to be ignored
86 */
87static int debounce = 0;
88module_param(debounce, int, 0644);
89MODULE_PARM_DESC(debounce, "ignore repeated IR sequences (default: 0 = ignore no sequences)");
90
91static int rc5_device = -1;
92module_param(rc5_device, int, 0644);
93MODULE_PARM_DESC(rc5_device, "only IR commands to given RC5 device (device = 0 - 31, any device = 255, default: autodetect)");
94
95static int ir_debug = 0;
96module_param(ir_debug, int, 0644);
97MODULE_PARM_DESC(ir_debug, "enable debugging information for IR decoding");
98
99struct budget_ci_ir {
100 struct input_dev *dev;
101 struct tasklet_struct msp430_irq_tasklet;
102 char name[72]; /* 40 + 32 for (struct saa7146_dev).name */
103 char phys[32];
104 struct ir_input_state state;
105 int rc5_device;
106};
107
75struct budget_ci { 108struct budget_ci {
76 struct budget budget; 109 struct budget budget;
77 struct input_dev *input_dev;
78 struct tasklet_struct msp430_irq_tasklet;
79 struct tasklet_struct ciintf_irq_tasklet; 110 struct tasklet_struct ciintf_irq_tasklet;
80 int slot_status; 111 int slot_status;
81 int ci_irq; 112 int ci_irq;
82 struct dvb_ca_en50221 ca; 113 struct dvb_ca_en50221 ca;
83 char ir_dev_name[50]; 114 struct budget_ci_ir ir;
84 u8 tuner_pll_address; /* used for philips_tdm1316l configs */ 115 u8 tuner_pll_address; /* used for philips_tdm1316l configs */
85}; 116};
86 117
87/* from reading the following remotes: 118static void msp430_ir_keyup(unsigned long data)
88 Zenith Universal 7 / TV Mode 807 / VCR Mode 837
89 Hauppauge (from NOVA-CI-s box product)
90 i've taken a "middle of the road" approach and note the differences
91*/
92static u16 key_map[64] = {
93 /* 0x0X */
94 KEY_0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, KEY_7, KEY_8,
95 KEY_9,
96 KEY_ENTER,
97 KEY_RED,
98 KEY_POWER, /* RADIO on Hauppauge */
99 KEY_MUTE,
100 0,
101 KEY_A, /* TV on Hauppauge */
102 /* 0x1X */
103 KEY_VOLUMEUP, KEY_VOLUMEDOWN,
104 0, 0,
105 KEY_B,
106 0, 0, 0, 0, 0, 0, 0,
107 KEY_UP, KEY_DOWN,
108 KEY_OPTION, /* RESERVED on Hauppauge */
109 KEY_BREAK,
110 /* 0x2X */
111 KEY_CHANNELUP, KEY_CHANNELDOWN,
112 KEY_PREVIOUS, /* Prev. Ch on Zenith, SOURCE on Hauppauge */
113 0, KEY_RESTART, KEY_OK,
114 KEY_CYCLEWINDOWS, /* MINIMIZE on Hauppauge */
115 0,
116 KEY_ENTER, /* VCR mode on Zenith */
117 KEY_PAUSE,
118 0,
119 KEY_RIGHT, KEY_LEFT,
120 0,
121 KEY_MENU, /* FULL SCREEN on Hauppauge */
122 0,
123 /* 0x3X */
124 KEY_SLOW,
125 KEY_PREVIOUS, /* VCR mode on Zenith */
126 KEY_REWIND,
127 0,
128 KEY_FASTFORWARD,
129 KEY_PLAY, KEY_STOP,
130 KEY_RECORD,
131 KEY_TUNER, /* TV/VCR on Zenith */
132 0,
133 KEY_C,
134 0,
135 KEY_EXIT,
136 KEY_POWER2,
137 KEY_TUNER, /* VCR mode on Zenith */
138 0,
139};
140
141static void msp430_ir_debounce(unsigned long data)
142{ 119{
143 struct input_dev *dev = (struct input_dev *) data; 120 struct budget_ci_ir *ir = (struct budget_ci_ir *) data;
144 121 ir_input_nokey(ir->dev, &ir->state);
145 if (dev->rep[0] == 0 || dev->rep[0] == ~0) {
146 input_event(dev, EV_KEY, key_map[dev->repeat_key], !!0);
147 return;
148 }
149
150 dev->rep[0] = 0;
151 dev->timer.expires = jiffies + HZ * 350 / 1000;
152 add_timer(&dev->timer);
153 input_event(dev, EV_KEY, key_map[dev->repeat_key], 2); /* REPEAT */
154} 122}
155 123
156static void msp430_ir_interrupt(unsigned long data) 124static void msp430_ir_interrupt(unsigned long data)
157{ 125{
158 struct budget_ci *budget_ci = (struct budget_ci *) data; 126 struct budget_ci *budget_ci = (struct budget_ci *) data;
159 struct input_dev *dev = budget_ci->input_dev; 127 struct input_dev *dev = budget_ci->ir.dev;
160 unsigned int code = 128 static int bounces = 0;
161 ttpci_budget_debiread(&budget_ci->budget, DEBINOSWAP, DEBIADDR_IR, 2, 1, 0) >> 8; 129 int device;
130 int toggle;
131 static int prev_toggle = -1;
132 static u32 ir_key;
133 u32 command = ttpci_budget_debiread(&budget_ci->budget, DEBINOSWAP, DEBIADDR_IR, 2, 1, 0) >> 8;
134
135 /*
136 * The msp430 chip can generate two different bytes, command and device
137 *
138 * type1: X1CCCCCC, C = command bits (0 - 63)
139 * type2: X0TDDDDD, D = device bits (0 - 31), T = RC5 toggle bit
140 *
141 * More than one command byte may be generated before the device byte
142 * Only when we have both, a correct keypress is generated
143 */
144
145 /* Is this a RC5 command byte? */
146 if (command & 0x40) {
147 if (ir_debug)
148 printk("budget_ci: received command byte 0x%02x\n", command);
149 ir_key = command & 0x3f;
150 return;
151 }
162 152
163 if (code & 0x40) { 153 /* It's a RC5 device byte */
164 code &= 0x3f; 154 if (ir_debug)
155 printk("budget_ci: received device byte 0x%02x\n", command);
156 device = command & 0x1f;
157 toggle = command & 0x20;
165 158
166 if (timer_pending(&dev->timer)) { 159 if (budget_ci->ir.rc5_device != IR_DEVICE_ANY && budget_ci->ir.rc5_device != device)
167 if (code == dev->repeat_key) { 160 return;
168 ++dev->rep[0];
169 return;
170 }
171 del_timer(&dev->timer);
172 input_event(dev, EV_KEY, key_map[dev->repeat_key], !!0);
173 }
174 161
175 if (!key_map[code]) { 162 /* Ignore repeated key sequences if requested */
176 printk("DVB (%s): no key for %02x!\n", __FUNCTION__, code); 163 if (toggle == prev_toggle && ir_key == dev->repeat_key &&
177 return; 164 bounces > 0 && timer_pending(&dev->timer)) {
178 } 165 if (ir_debug)
166 printk("budget_ci: debounce logic ignored IR command\n");
167 bounces--;
168 return;
169 }
170 prev_toggle = toggle;
179 171
180 /* initialize debounce and repeat */ 172 /* Are we still waiting for a keyup event? */
181 dev->repeat_key = code; 173 if (del_timer(&dev->timer))
182 /* Zenith remote _always_ sends 2 sequences */ 174 ir_input_nokey(dev, &budget_ci->ir.state);
183 dev->rep[0] = ~0; 175
184 /* 350 milliseconds */ 176 /* Generate keypress */
185 dev->timer.expires = jiffies + HZ * 350 / 1000; 177 if (ir_debug)
186 /* MAKE */ 178 printk("budget_ci: generating keypress 0x%02x\n", ir_key);
187 input_event(dev, EV_KEY, key_map[code], !0); 179 ir_input_keydown(dev, &budget_ci->ir.state, ir_key, (ir_key & (command << 8)));
188 add_timer(&dev->timer); 180
181 /* Do we want to delay the keyup event? */
182 if (debounce) {
183 bounces = debounce;
184 mod_timer(&dev->timer, jiffies + msecs_to_jiffies(IR_REPEAT_TIMEOUT));
185 } else {
186 ir_input_nokey(dev, &budget_ci->ir.state);
189 } 187 }
190} 188}
191 189
192static int msp430_ir_init(struct budget_ci *budget_ci) 190static int msp430_ir_init(struct budget_ci *budget_ci)
193{ 191{
194 struct saa7146_dev *saa = budget_ci->budget.dev; 192 struct saa7146_dev *saa = budget_ci->budget.dev;
195 struct input_dev *input_dev; 193 struct input_dev *input_dev = budget_ci->ir.dev;
196 int i; 194 int error;
195
196 budget_ci->ir.dev = input_dev = input_allocate_device();
197 if (!input_dev) {
198 printk(KERN_ERR "budget_ci: IR interface initialisation failed\n");
199 error = -ENOMEM;
200 goto out1;
201 }
202
203 snprintf(budget_ci->ir.name, sizeof(budget_ci->ir.name),
204 "Budget-CI dvb ir receiver %s", saa->name);
205 snprintf(budget_ci->ir.phys, sizeof(budget_ci->ir.phys),
206 "pci-%s/ir0", pci_name(saa->pci));
197 207
198 budget_ci->input_dev = input_dev = input_allocate_device(); 208 input_dev->name = budget_ci->ir.name;
199 if (!input_dev)
200 return -ENOMEM;
201 209
202 sprintf(budget_ci->ir_dev_name, "Budget-CI dvb ir receiver %s", saa->name); 210 input_dev->phys = budget_ci->ir.phys;
211 input_dev->id.bustype = BUS_PCI;
212 input_dev->id.version = 1;
213 if (saa->pci->subsystem_vendor) {
214 input_dev->id.vendor = saa->pci->subsystem_vendor;
215 input_dev->id.product = saa->pci->subsystem_device;
216 } else {
217 input_dev->id.vendor = saa->pci->vendor;
218 input_dev->id.product = saa->pci->device;
219 }
220 input_dev->cdev.dev = &saa->pci->dev;
203 221
204 input_dev->name = budget_ci->ir_dev_name; 222 /* Select keymap and address */
223 switch (budget_ci->budget.dev->pci->subsystem_device) {
224 case 0x100c:
225 case 0x100f:
226 case 0x1010:
227 case 0x1011:
228 case 0x1012:
229 case 0x1017:
230 /* The hauppauge keymap is a superset of these remotes */
231 ir_input_init(input_dev, &budget_ci->ir.state,
232 IR_TYPE_RC5, ir_codes_hauppauge_new);
233
234 if (rc5_device < 0)
235 budget_ci->ir.rc5_device = 0x1f;
236 else
237 budget_ci->ir.rc5_device = rc5_device;
238 break;
239 default:
240 /* unknown remote */
241 ir_input_init(input_dev, &budget_ci->ir.state,
242 IR_TYPE_RC5, ir_codes_budget_ci_old);
243
244 if (rc5_device < 0)
245 budget_ci->ir.rc5_device = IR_DEVICE_ANY;
246 else
247 budget_ci->ir.rc5_device = rc5_device;
248 break;
249 }
205 250
206 set_bit(EV_KEY, input_dev->evbit); 251 /* initialise the key-up debounce timeout handler */
207 for (i = 0; i < ARRAY_SIZE(key_map); i++) 252 input_dev->timer.function = msp430_ir_keyup;
208 if (key_map[i]) 253 input_dev->timer.data = (unsigned long) &budget_ci->ir;
209 set_bit(key_map[i], input_dev->keybit);
210 254
211 input_register_device(budget_ci->input_dev); 255 error = input_register_device(input_dev);
256 if (error) {
257 printk(KERN_ERR "budget_ci: could not init driver for IR device (code %d)\n", error);
258 goto out2;
259 }
212 260
213 input_dev->timer.function = msp430_ir_debounce; 261 tasklet_init(&budget_ci->ir.msp430_irq_tasklet, msp430_ir_interrupt,
262 (unsigned long) budget_ci);
214 263
215 saa7146_write(saa, IER, saa7146_read(saa, IER) | MASK_06); 264 SAA7146_IER_ENABLE(saa, MASK_06);
216 saa7146_setgpio(saa, 3, SAA7146_GPIO_IRQHI); 265 saa7146_setgpio(saa, 3, SAA7146_GPIO_IRQHI);
217 266
218 return 0; 267 return 0;
268
269out2:
270 input_free_device(input_dev);
271out1:
272 return error;
219} 273}
220 274
221static void msp430_ir_deinit(struct budget_ci *budget_ci) 275static void msp430_ir_deinit(struct budget_ci *budget_ci)
222{ 276{
223 struct saa7146_dev *saa = budget_ci->budget.dev; 277 struct saa7146_dev *saa = budget_ci->budget.dev;
224 struct input_dev *dev = budget_ci->input_dev; 278 struct input_dev *dev = budget_ci->ir.dev;
225 279
226 saa7146_write(saa, IER, saa7146_read(saa, IER) & ~MASK_06); 280 SAA7146_IER_DISABLE(saa, MASK_06);
227 saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT); 281 saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT);
282 tasklet_kill(&budget_ci->ir.msp430_irq_tasklet);
228 283
229 if (del_timer(&dev->timer)) 284 if (del_timer(&dev->timer)) {
230 input_event(dev, EV_KEY, key_map[dev->repeat_key], !!0); 285 ir_input_nokey(dev, &budget_ci->ir.state);
286 input_sync(dev);
287 }
231 288
232 input_unregister_device(dev); 289 input_unregister_device(dev);
233} 290}
@@ -428,7 +485,7 @@ static int ciintf_init(struct budget_ci *budget_ci)
428 memset(&budget_ci->ca, 0, sizeof(struct dvb_ca_en50221)); 485 memset(&budget_ci->ca, 0, sizeof(struct dvb_ca_en50221));
429 486
430 // enable DEBI pins 487 // enable DEBI pins
431 saa7146_write(saa, MC1, saa7146_read(saa, MC1) | (0x800 << 16) | 0x800); 488 saa7146_write(saa, MC1, MASK_27 | MASK_11);
432 489
433 // test if it is there 490 // test if it is there
434 ci_version = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CIVERSION, 1, 1, 0); 491 ci_version = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CIVERSION, 1, 1, 0);
@@ -480,7 +537,7 @@ static int ciintf_init(struct budget_ci *budget_ci)
480 } else { 537 } else {
481 saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQHI); 538 saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQHI);
482 } 539 }
483 saa7146_write(saa, IER, saa7146_read(saa, IER) | MASK_03); 540 SAA7146_IER_ENABLE(saa, MASK_03);
484 } 541 }
485 542
486 // enable interface 543 // enable interface
@@ -502,7 +559,7 @@ static int ciintf_init(struct budget_ci *budget_ci)
502 return 0; 559 return 0;
503 560
504error: 561error:
505 saa7146_write(saa, MC1, saa7146_read(saa, MC1) | (0x800 << 16)); 562 saa7146_write(saa, MC1, MASK_27);
506 return result; 563 return result;
507} 564}
508 565
@@ -512,7 +569,7 @@ static void ciintf_deinit(struct budget_ci *budget_ci)
512 569
513 // disable CI interrupts 570 // disable CI interrupts
514 if (budget_ci->ci_irq) { 571 if (budget_ci->ci_irq) {
515 saa7146_write(saa, IER, saa7146_read(saa, IER) & ~MASK_03); 572 SAA7146_IER_DISABLE(saa, MASK_03);
516 saa7146_setgpio(saa, 0, SAA7146_GPIO_INPUT); 573 saa7146_setgpio(saa, 0, SAA7146_GPIO_INPUT);
517 tasklet_kill(&budget_ci->ciintf_irq_tasklet); 574 tasklet_kill(&budget_ci->ciintf_irq_tasklet);
518 } 575 }
@@ -530,7 +587,7 @@ static void ciintf_deinit(struct budget_ci *budget_ci)
530 dvb_ca_en50221_release(&budget_ci->ca); 587 dvb_ca_en50221_release(&budget_ci->ca);
531 588
532 // disable DEBI pins 589 // disable DEBI pins
533 saa7146_write(saa, MC1, saa7146_read(saa, MC1) | (0x800 << 16)); 590 saa7146_write(saa, MC1, MASK_27);
534} 591}
535 592
536static void budget_ci_irq(struct saa7146_dev *dev, u32 * isr) 593static void budget_ci_irq(struct saa7146_dev *dev, u32 * isr)
@@ -540,7 +597,7 @@ static void budget_ci_irq(struct saa7146_dev *dev, u32 * isr)
540 dprintk(8, "dev: %p, budget_ci: %p\n", dev, budget_ci); 597 dprintk(8, "dev: %p, budget_ci: %p\n", dev, budget_ci);
541 598
542 if (*isr & MASK_06) 599 if (*isr & MASK_06)
543 tasklet_schedule(&budget_ci->msp430_irq_tasklet); 600 tasklet_schedule(&budget_ci->ir.msp430_irq_tasklet);
544 601
545 if (*isr & MASK_10) 602 if (*isr & MASK_10)
546 ttpci_budget_irq10_handler(dev, isr); 603 ttpci_budget_irq10_handler(dev, isr);
@@ -835,7 +892,7 @@ static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe, struc
835 band = 1; 892 band = 1;
836 } else if (tuner_frequency < 200000000) { 893 } else if (tuner_frequency < 200000000) {
837 cp = 6; 894 cp = 6;
838 band = 1; 895 band = 2;
839 } else if (tuner_frequency < 290000000) { 896 } else if (tuner_frequency < 290000000) {
840 cp = 3; 897 cp = 3;
841 band = 2; 898 band = 2;
@@ -1083,24 +1140,23 @@ static int budget_ci_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
1083 struct budget_ci *budget_ci; 1140 struct budget_ci *budget_ci;
1084 int err; 1141 int err;
1085 1142
1086 if (!(budget_ci = kmalloc(sizeof(struct budget_ci), GFP_KERNEL))) 1143 budget_ci = kzalloc(sizeof(struct budget_ci), GFP_KERNEL);
1087 return -ENOMEM; 1144 if (!budget_ci) {
1145 err = -ENOMEM;
1146 goto out1;
1147 }
1088 1148
1089 dprintk(2, "budget_ci: %p\n", budget_ci); 1149 dprintk(2, "budget_ci: %p\n", budget_ci);
1090 1150
1091 budget_ci->budget.ci_present = 0;
1092
1093 dev->ext_priv = budget_ci; 1151 dev->ext_priv = budget_ci;
1094 1152
1095 if ((err = ttpci_budget_init(&budget_ci->budget, dev, info, THIS_MODULE))) { 1153 err = ttpci_budget_init(&budget_ci->budget, dev, info, THIS_MODULE);
1096 kfree(budget_ci); 1154 if (err)
1097 return err; 1155 goto out2;
1098 }
1099
1100 tasklet_init(&budget_ci->msp430_irq_tasklet, msp430_ir_interrupt,
1101 (unsigned long) budget_ci);
1102 1156
1103 msp430_ir_init(budget_ci); 1157 err = msp430_ir_init(budget_ci);
1158 if (err)
1159 goto out3;
1104 1160
1105 ciintf_init(budget_ci); 1161 ciintf_init(budget_ci);
1106 1162
@@ -1110,6 +1166,13 @@ static int budget_ci_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
1110 ttpci_budget_init_hooks(&budget_ci->budget); 1166 ttpci_budget_init_hooks(&budget_ci->budget);
1111 1167
1112 return 0; 1168 return 0;
1169
1170out3:
1171 ttpci_budget_deinit(&budget_ci->budget);
1172out2:
1173 kfree(budget_ci);
1174out1:
1175 return err;
1113} 1176}
1114 1177
1115static int budget_ci_detach(struct saa7146_dev *dev) 1178static int budget_ci_detach(struct saa7146_dev *dev)
@@ -1120,16 +1183,13 @@ static int budget_ci_detach(struct saa7146_dev *dev)
1120 1183
1121 if (budget_ci->budget.ci_present) 1184 if (budget_ci->budget.ci_present)
1122 ciintf_deinit(budget_ci); 1185 ciintf_deinit(budget_ci);
1186 msp430_ir_deinit(budget_ci);
1123 if (budget_ci->budget.dvb_frontend) { 1187 if (budget_ci->budget.dvb_frontend) {
1124 dvb_unregister_frontend(budget_ci->budget.dvb_frontend); 1188 dvb_unregister_frontend(budget_ci->budget.dvb_frontend);
1125 dvb_frontend_detach(budget_ci->budget.dvb_frontend); 1189 dvb_frontend_detach(budget_ci->budget.dvb_frontend);
1126 } 1190 }
1127 err = ttpci_budget_deinit(&budget_ci->budget); 1191 err = ttpci_budget_deinit(&budget_ci->budget);
1128 1192
1129 tasklet_kill(&budget_ci->msp430_irq_tasklet);
1130
1131 msp430_ir_deinit(budget_ci);
1132
1133 // disable frontend and CI interface 1193 // disable frontend and CI interface
1134 saa7146_setgpio(saa, 2, SAA7146_GPIO_INPUT); 1194 saa7146_setgpio(saa, 2, SAA7146_GPIO_INPUT);
1135 1195
@@ -1162,7 +1222,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
1162 1222
1163static struct saa7146_extension budget_extension = { 1223static struct saa7146_extension budget_extension = {
1164 .name = "budget_ci dvb", 1224 .name = "budget_ci dvb",
1165 .flags = SAA7146_I2C_SHORT_DELAY, 1225 .flags = SAA7146_USE_I2C_IRQ,
1166 1226
1167 .module = THIS_MODULE, 1227 .module = THIS_MODULE,
1168 .pci_tbl = &pci_tbl[0], 1228 .pci_tbl = &pci_tbl[0],
diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
index 56f1c80defc6..9268a82bada6 100644
--- a/drivers/media/dvb/ttpci/budget.c
+++ b/drivers/media/dvb/ttpci/budget.c
@@ -555,7 +555,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
555 555
556static struct saa7146_extension budget_extension = { 556static struct saa7146_extension budget_extension = {
557 .name = "budget dvb", 557 .name = "budget dvb",
558 .flags = SAA7146_I2C_SHORT_DELAY, 558 .flags = SAA7146_USE_I2C_IRQ,
559 559
560 .module = THIS_MODULE, 560 .module = THIS_MODULE,
561 .pci_tbl = pci_tbl, 561 .pci_tbl = pci_tbl,
diff --git a/drivers/media/dvb/ttusb-dec/ttusb_dec.c b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
index 10b121ada833..bd6e7baae2ec 100644
--- a/drivers/media/dvb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
@@ -238,6 +238,7 @@ static void ttusb_dec_handle_irq( struct urb *urb)
238 * for now lets report each signal as a key down and up*/ 238 * for now lets report each signal as a key down and up*/
239 dprintk("%s:rc signal:%d\n", __FUNCTION__, buffer[4]); 239 dprintk("%s:rc signal:%d\n", __FUNCTION__, buffer[4]);
240 input_report_key(dec->rc_input_dev, rc_keys[buffer[4] - 1], 1); 240 input_report_key(dec->rc_input_dev, rc_keys[buffer[4] - 1], 1);
241 input_sync(dec->rc_input_dev);
241 input_report_key(dec->rc_input_dev, rc_keys[buffer[4] - 1], 0); 242 input_report_key(dec->rc_input_dev, rc_keys[buffer[4] - 1], 0);
242 input_sync(dec->rc_input_dev); 243 input_sync(dec->rc_input_dev);
243 } 244 }
@@ -1187,11 +1188,12 @@ static int ttusb_init_rc( struct ttusb_dec *dec)
1187 struct input_dev *input_dev; 1188 struct input_dev *input_dev;
1188 u8 b[] = { 0x00, 0x01 }; 1189 u8 b[] = { 0x00, 0x01 };
1189 int i; 1190 int i;
1191 int err;
1190 1192
1191 usb_make_path(dec->udev, dec->rc_phys, sizeof(dec->rc_phys)); 1193 usb_make_path(dec->udev, dec->rc_phys, sizeof(dec->rc_phys));
1192 strlcpy(dec->rc_phys, "/input0", sizeof(dec->rc_phys)); 1194 strlcpy(dec->rc_phys, "/input0", sizeof(dec->rc_phys));
1193 1195
1194 dec->rc_input_dev = input_dev = input_allocate_device(); 1196 input_dev = input_allocate_device();
1195 if (!input_dev) 1197 if (!input_dev)
1196 return -ENOMEM; 1198 return -ENOMEM;
1197 1199
@@ -1205,8 +1207,13 @@ static int ttusb_init_rc( struct ttusb_dec *dec)
1205 for (i = 0; i < ARRAY_SIZE(rc_keys); i++) 1207 for (i = 0; i < ARRAY_SIZE(rc_keys); i++)
1206 set_bit(rc_keys[i], input_dev->keybit); 1208 set_bit(rc_keys[i], input_dev->keybit);
1207 1209
1208 input_register_device(input_dev); 1210 err = input_register_device(input_dev);
1211 if (err) {
1212 input_free_device(input_dev);
1213 return err;
1214 }
1209 1215
1216 dec->rc_input_dev = input_dev;
1210 if (usb_submit_urb(dec->irq_urb, GFP_KERNEL)) 1217 if (usb_submit_urb(dec->irq_urb, GFP_KERNEL))
1211 printk("%s: usb_submit_urb failed\n",__FUNCTION__); 1218 printk("%s: usb_submit_urb failed\n",__FUNCTION__);
1212 /* enable irq pipe */ 1219 /* enable irq pipe */
diff --git a/drivers/media/dvb/ttusb-dec/ttusbdecfe.c b/drivers/media/dvb/ttusb-dec/ttusbdecfe.c
index 42f39a89bc4d..a6fb1d6a7b5d 100644
--- a/drivers/media/dvb/ttusb-dec/ttusbdecfe.c
+++ b/drivers/media/dvb/ttusb-dec/ttusbdecfe.c
@@ -195,7 +195,7 @@ struct dvb_frontend* ttusbdecfe_dvbt_attach(const struct ttusbdecfe_config* conf
195 struct ttusbdecfe_state* state = NULL; 195 struct ttusbdecfe_state* state = NULL;
196 196
197 /* allocate memory for the internal state */ 197 /* allocate memory for the internal state */
198 state = (struct ttusbdecfe_state*) kmalloc(sizeof(struct ttusbdecfe_state), GFP_KERNEL); 198 state = kmalloc(sizeof(struct ttusbdecfe_state), GFP_KERNEL);
199 if (state == NULL) 199 if (state == NULL)
200 return NULL; 200 return NULL;
201 201
@@ -215,7 +215,7 @@ struct dvb_frontend* ttusbdecfe_dvbs_attach(const struct ttusbdecfe_config* conf
215 struct ttusbdecfe_state* state = NULL; 215 struct ttusbdecfe_state* state = NULL;
216 216
217 /* allocate memory for the internal state */ 217 /* allocate memory for the internal state */
218 state = (struct ttusbdecfe_state*) kmalloc(sizeof(struct ttusbdecfe_state), GFP_KERNEL); 218 state = kmalloc(sizeof(struct ttusbdecfe_state), GFP_KERNEL);
219 if (state == NULL) 219 if (state == NULL)
220 return NULL; 220 return NULL;
221 221
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index b8fde5cf4735..29a11c1db1b7 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -184,6 +184,14 @@ config VIDEO_KS0127
184 To compile this driver as a module, choose M here: the 184 To compile this driver as a module, choose M here: the
185 module will be called ks0127. 185 module will be called ks0127.
186 186
187config VIDEO_OV7670
188 tristate "OmniVision OV7670 sensor support"
189 depends on I2C && VIDEO_V4L2
190 ---help---
191 This is a Video4Linux2 sensor-level driver for the OmniVision
192 OV7670 VGA camera. It currently only works with the M88ALP01
193 controller.
194
187config VIDEO_SAA7110 195config VIDEO_SAA7110
188 tristate "Philips SAA7110 video decoder" 196 tristate "Philips SAA7110 video decoder"
189 depends on VIDEO_V4L1 && I2C 197 depends on VIDEO_V4L1 && I2C
@@ -567,18 +575,6 @@ config VIDEO_ZORAN_AVS6EYES
567 help 575 help
568 Support for the AverMedia 6 Eyes video surveillance card. 576 Support for the AverMedia 6 Eyes video surveillance card.
569 577
570config VIDEO_ZR36120
571 tristate "Zoran ZR36120/36125 Video For Linux"
572 depends on PCI && I2C && VIDEO_V4L1 && BROKEN
573 help
574 Support for ZR36120/ZR36125 based frame grabber/overlay boards.
575 This includes the Victor II, WaveWatcher, Video Wonder, Maxi-TV,
576 and Buster boards. Please read the material in
577 <file:Documentation/video4linux/zr36120.txt> for more information.
578
579 To compile this driver as a module, choose M here: the
580 module will be called zr36120.
581
582config VIDEO_MEYE 578config VIDEO_MEYE
583 tristate "Sony Vaio Picturebook Motion Eye Video For Linux" 579 tristate "Sony Vaio Picturebook Motion Eye Video For Linux"
584 depends on PCI && SONYPI && VIDEO_V4L1 580 depends on PCI && SONYPI && VIDEO_V4L1
@@ -670,6 +666,15 @@ config VIDEO_M32R_AR_M64278
670 To compile this driver as a module, choose M here: the 666 To compile this driver as a module, choose M here: the
671 module will be called arv. 667 module will be called arv.
672 668
669config VIDEO_CAFE_CCIC
670 tristate "Marvell 88ALP01 (Cafe) CMOS Camera Controller support"
671 depends on I2C && VIDEO_V4L2
672 select VIDEO_OV7670
673 ---help---
674 This is a video4linux2 driver for the Marvell 88ALP01 integrated
675 CMOS camera controller. This is the controller found on first-
676 generation OLPC systems.
677
673# 678#
674# USB Multimedia device configuration 679# USB Multimedia device configuration
675# 680#
@@ -681,6 +686,8 @@ source "drivers/media/video/pvrusb2/Kconfig"
681 686
682source "drivers/media/video/em28xx/Kconfig" 687source "drivers/media/video/em28xx/Kconfig"
683 688
689source "drivers/media/video/usbvision/Kconfig"
690
684source "drivers/media/video/usbvideo/Kconfig" 691source "drivers/media/video/usbvideo/Kconfig"
685 692
686source "drivers/media/video/et61x251/Kconfig" 693source "drivers/media/video/et61x251/Kconfig"
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index af57abce8a6e..9b1f3f06bb7c 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -2,7 +2,6 @@
2# Makefile for the video capture/playback device drivers. 2# Makefile for the video capture/playback device drivers.
3# 3#
4 4
5zoran-objs := zr36120.o zr36120_i2c.o zr36120_mem.o
6zr36067-objs := zoran_procfs.o zoran_device.o \ 5zr36067-objs := zoran_procfs.o zoran_device.o \
7 zoran_driver.o zoran_card.o 6 zoran_driver.o zoran_card.o
8tuner-objs := tuner-core.o tuner-types.o tuner-simple.o \ 7tuner-objs := tuner-core.o tuner-types.o tuner-simple.o \
@@ -23,7 +22,6 @@ obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
23obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o 22obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o
24obj-$(CONFIG_SOUND_TVMIXER) += tvmixer.o 23obj-$(CONFIG_SOUND_TVMIXER) += tvmixer.o
25 24
26obj-$(CONFIG_VIDEO_ZR36120) += zoran.o
27obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o 25obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o
28obj-$(CONFIG_VIDEO_SAA5246A) += saa5246a.o 26obj-$(CONFIG_VIDEO_SAA5246A) += saa5246a.o
29obj-$(CONFIG_VIDEO_SAA5249) += saa5249.o 27obj-$(CONFIG_VIDEO_SAA5249) += saa5249.o
@@ -64,6 +62,7 @@ obj-$(CONFIG_VIDEO_MEYE) += meye.o
64obj-$(CONFIG_VIDEO_SAA7134) += ir-kbd-i2c.o saa7134/ 62obj-$(CONFIG_VIDEO_SAA7134) += ir-kbd-i2c.o saa7134/
65obj-$(CONFIG_VIDEO_CX88) += cx88/ 63obj-$(CONFIG_VIDEO_CX88) += cx88/
66obj-$(CONFIG_VIDEO_EM28XX) += em28xx/ 64obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
65obj-$(CONFIG_VIDEO_USBVISION) += usbvision/
67obj-$(CONFIG_VIDEO_TVP5150) += tvp5150.o 66obj-$(CONFIG_VIDEO_TVP5150) += tvp5150.o
68obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/ 67obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
69obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o 68obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
@@ -92,6 +91,9 @@ obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
92obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o 91obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
93obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o 92obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
94 93
94obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o
95obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
96
95obj-$(CONFIG_USB_DABUSB) += dabusb.o 97obj-$(CONFIG_USB_DABUSB) += dabusb.o
96obj-$(CONFIG_USB_OV511) += ov511.o 98obj-$(CONFIG_USB_OV511) += ov511.o
97obj-$(CONFIG_USB_SE401) += se401.o 99obj-$(CONFIG_USB_SE401) += se401.o
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 6e1ddad9f0c1..3c8e4742dccc 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -1793,7 +1793,7 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg)
1793 memset(i,0,sizeof(*i)); 1793 memset(i,0,sizeof(*i));
1794 i->index = n; 1794 i->index = n;
1795 i->type = V4L2_INPUT_TYPE_CAMERA; 1795 i->type = V4L2_INPUT_TYPE_CAMERA;
1796 i->audioset = 0; 1796 i->audioset = 1;
1797 if (i->index == bttv_tvcards[btv->c.type].tuner) { 1797 if (i->index == bttv_tvcards[btv->c.type].tuner) {
1798 sprintf(i->name, "Television"); 1798 sprintf(i->name, "Television");
1799 i->type = V4L2_INPUT_TYPE_TUNER; 1799 i->type = V4L2_INPUT_TYPE_TUNER;
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index 70de6c96e201..62b873076e09 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -479,11 +479,7 @@ int __devexit fini_bttv_i2c(struct bttv *btv)
479 if (0 != btv->i2c_rc) 479 if (0 != btv->i2c_rc)
480 return 0; 480 return 0;
481 481
482 if (btv->use_i2c_hw) { 482 return i2c_del_adapter(&btv->c.i2c_adap);
483 return i2c_del_adapter(&btv->c.i2c_adap);
484 } else {
485 return i2c_bit_del_bus(&btv->c.i2c_adap);
486 }
487} 483}
488 484
489/* 485/*
diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c
index 933d6db09acb..cbc012f71f52 100644
--- a/drivers/media/video/bt8xx/bttv-input.c
+++ b/drivers/media/video/bt8xx/bttv-input.c
@@ -259,24 +259,59 @@ static void bttv_rc5_timer_keyup(unsigned long data)
259 259
260/* ---------------------------------------------------------------------- */ 260/* ---------------------------------------------------------------------- */
261 261
262static void bttv_ir_start(struct bttv *btv, struct bttv_ir *ir)
263{
264 if (ir->polling) {
265 init_timer(&ir->timer);
266 ir->timer.function = bttv_input_timer;
267 ir->timer.data = (unsigned long)btv;
268 ir->timer.expires = jiffies + HZ;
269 add_timer(&ir->timer);
270 } else if (ir->rc5_gpio) {
271 /* set timer_end for code completion */
272 init_timer(&ir->timer_end);
273 ir->timer_end.function = bttv_rc5_timer_end;
274 ir->timer_end.data = (unsigned long)ir;
275
276 init_timer(&ir->timer_keyup);
277 ir->timer_keyup.function = bttv_rc5_timer_keyup;
278 ir->timer_keyup.data = (unsigned long)ir;
279 }
280}
281
282static void bttv_ir_stop(struct bttv *btv)
283{
284 if (btv->remote->polling) {
285 del_timer_sync(&btv->remote->timer);
286 flush_scheduled_work();
287 }
288
289 if (btv->remote->rc5_gpio) {
290 u32 gpio;
291
292 del_timer_sync(&btv->remote->timer_end);
293 flush_scheduled_work();
294
295 gpio = bttv_gpio_read(&btv->c);
296 bttv_gpio_write(&btv->c, gpio & ~(1 << 4));
297 }
298}
299
262int bttv_input_init(struct bttv *btv) 300int bttv_input_init(struct bttv *btv)
263{ 301{
264 struct bttv_ir *ir; 302 struct bttv_ir *ir;
265 IR_KEYTAB_TYPE *ir_codes = NULL; 303 IR_KEYTAB_TYPE *ir_codes = NULL;
266 struct input_dev *input_dev; 304 struct input_dev *input_dev;
267 int ir_type = IR_TYPE_OTHER; 305 int ir_type = IR_TYPE_OTHER;
306 int err = -ENOMEM;
268 307
269 if (!btv->has_remote) 308 if (!btv->has_remote)
270 return -ENODEV; 309 return -ENODEV;
271 310
272 ir = kzalloc(sizeof(*ir),GFP_KERNEL); 311 ir = kzalloc(sizeof(*ir),GFP_KERNEL);
273 input_dev = input_allocate_device(); 312 input_dev = input_allocate_device();
274 if (!ir || !input_dev) { 313 if (!ir || !input_dev)
275 kfree(ir); 314 goto err_out_free;
276 input_free_device(input_dev);
277 return -ENOMEM;
278 }
279 memset(ir,0,sizeof(*ir));
280 315
281 /* detect & configure */ 316 /* detect & configure */
282 switch (btv->c.type) { 317 switch (btv->c.type) {
@@ -348,10 +383,9 @@ int bttv_input_init(struct bttv *btv)
348 break; 383 break;
349 } 384 }
350 if (NULL == ir_codes) { 385 if (NULL == ir_codes) {
351 dprintk(KERN_INFO "Ooops: IR config error [card=%d]\n",btv->c.type); 386 dprintk(KERN_INFO "Ooops: IR config error [card=%d]\n", btv->c.type);
352 kfree(ir); 387 err = -ENODEV;
353 input_free_device(input_dev); 388 goto err_out_free;
354 return -ENODEV;
355 } 389 }
356 390
357 if (ir->rc5_gpio) { 391 if (ir->rc5_gpio) {
@@ -389,32 +423,26 @@ int bttv_input_init(struct bttv *btv)
389 input_dev->cdev.dev = &btv->c.pci->dev; 423 input_dev->cdev.dev = &btv->c.pci->dev;
390 424
391 btv->remote = ir; 425 btv->remote = ir;
392 if (ir->polling) { 426 bttv_ir_start(btv, ir);
393 init_timer(&ir->timer);
394 ir->timer.function = bttv_input_timer;
395 ir->timer.data = (unsigned long)btv;
396 ir->timer.expires = jiffies + HZ;
397 add_timer(&ir->timer);
398 } else if (ir->rc5_gpio) {
399 /* set timer_end for code completion */
400 init_timer(&ir->timer_end);
401 ir->timer_end.function = bttv_rc5_timer_end;
402 ir->timer_end.data = (unsigned long)ir;
403
404 init_timer(&ir->timer_keyup);
405 ir->timer_keyup.function = bttv_rc5_timer_keyup;
406 ir->timer_keyup.data = (unsigned long)ir;
407 }
408 427
409 /* all done */ 428 /* all done */
410 input_register_device(btv->remote->dev); 429 err = input_register_device(btv->remote->dev);
411 printk(DEVNAME ": %s detected at %s\n",ir->name,ir->phys); 430 if (err)
431 goto err_out_stop;
412 432
413 /* the remote isn't as bouncy as a keyboard */ 433 /* the remote isn't as bouncy as a keyboard */
414 ir->dev->rep[REP_DELAY] = repeat_delay; 434 ir->dev->rep[REP_DELAY] = repeat_delay;
415 ir->dev->rep[REP_PERIOD] = repeat_period; 435 ir->dev->rep[REP_PERIOD] = repeat_period;
416 436
417 return 0; 437 return 0;
438
439 err_out_stop:
440 bttv_ir_stop(btv);
441 btv->remote = NULL;
442 err_out_free:
443 input_free_device(input_dev);
444 kfree(ir);
445 return err;
418} 446}
419 447
420void bttv_input_fini(struct bttv *btv) 448void bttv_input_fini(struct bttv *btv)
@@ -422,22 +450,7 @@ void bttv_input_fini(struct bttv *btv)
422 if (btv->remote == NULL) 450 if (btv->remote == NULL)
423 return; 451 return;
424 452
425 if (btv->remote->polling) { 453 bttv_ir_stop(btv);
426 del_timer_sync(&btv->remote->timer);
427 flush_scheduled_work();
428 }
429
430
431 if (btv->remote->rc5_gpio) {
432 u32 gpio;
433
434 del_timer_sync(&btv->remote->timer_end);
435 flush_scheduled_work();
436
437 gpio = bttv_gpio_read(&btv->c);
438 bttv_gpio_write(&btv->c, gpio & ~(1 << 4));
439 }
440
441 input_unregister_device(btv->remote->dev); 454 input_unregister_device(btv->remote->dev);
442 kfree(btv->remote); 455 kfree(btv->remote);
443 btv->remote = NULL; 456 btv->remote = NULL;
diff --git a/drivers/media/video/cafe_ccic-regs.h b/drivers/media/video/cafe_ccic-regs.h
new file mode 100644
index 000000000000..b2c22a0d6643
--- /dev/null
+++ b/drivers/media/video/cafe_ccic-regs.h
@@ -0,0 +1,160 @@
1/*
2 * Register definitions for the m88alp01 camera interface. Offsets in bytes
3 * as given in the spec.
4 *
5 * Copyright 2006 One Laptop Per Child Association, Inc.
6 *
7 * Written by Jonathan Corbet, corbet@lwn.net.
8 *
9 * This file may be distributed under the terms of the GNU General
10 * Public License, version 2.
11 */
12#define REG_Y0BAR 0x00
13#define REG_Y1BAR 0x04
14#define REG_Y2BAR 0x08
15/* ... */
16
17#define REG_IMGPITCH 0x24 /* Image pitch register */
18#define IMGP_YP_SHFT 2 /* Y pitch params */
19#define IMGP_YP_MASK 0x00003ffc /* Y pitch field */
20#define IMGP_UVP_SHFT 18 /* UV pitch (planar) */
21#define IMGP_UVP_MASK 0x3ffc0000
22#define REG_IRQSTATRAW 0x28 /* RAW IRQ Status */
23#define IRQ_EOF0 0x00000001 /* End of frame 0 */
24#define IRQ_EOF1 0x00000002 /* End of frame 1 */
25#define IRQ_EOF2 0x00000004 /* End of frame 2 */
26#define IRQ_SOF0 0x00000008 /* Start of frame 0 */
27#define IRQ_SOF1 0x00000010 /* Start of frame 1 */
28#define IRQ_SOF2 0x00000020 /* Start of frame 2 */
29#define IRQ_OVERFLOW 0x00000040 /* FIFO overflow */
30#define IRQ_TWSIW 0x00010000 /* TWSI (smbus) write */
31#define IRQ_TWSIR 0x00020000 /* TWSI read */
32#define IRQ_TWSIE 0x00040000 /* TWSI error */
33#define TWSIIRQS (IRQ_TWSIW|IRQ_TWSIR|IRQ_TWSIE)
34#define FRAMEIRQS (IRQ_EOF0|IRQ_EOF1|IRQ_EOF2|IRQ_SOF0|IRQ_SOF1|IRQ_SOF2)
35#define ALLIRQS (TWSIIRQS|FRAMEIRQS|IRQ_OVERFLOW)
36#define REG_IRQMASK 0x2c /* IRQ mask - same bits as IRQSTAT */
37#define REG_IRQSTAT 0x30 /* IRQ status / clear */
38
39#define REG_IMGSIZE 0x34 /* Image size */
40#define IMGSZ_V_MASK 0x1fff0000
41#define IMGSZ_V_SHIFT 16
42#define IMGSZ_H_MASK 0x00003fff
43#define REG_IMGOFFSET 0x38 /* IMage offset */
44
45#define REG_CTRL0 0x3c /* Control 0 */
46#define C0_ENABLE 0x00000001 /* Makes the whole thing go */
47
48/* Mask for all the format bits */
49#define C0_DF_MASK 0x00fffffc /* Bits 2-23 */
50
51/* RGB ordering */
52#define C0_RGB4_RGBX 0x00000000
53#define C0_RGB4_XRGB 0x00000004
54#define C0_RGB4_BGRX 0x00000008
55#define C0_RGB4_XBGR 0x0000000c
56#define C0_RGB5_RGGB 0x00000000
57#define C0_RGB5_GRBG 0x00000004
58#define C0_RGB5_GBRG 0x00000008
59#define C0_RGB5_BGGR 0x0000000c
60
61/* Spec has two fields for DIN and DOUT, but they must match, so
62 combine them here. */
63#define C0_DF_YUV 0x00000000 /* Data is YUV */
64#define C0_DF_RGB 0x000000a0 /* ... RGB */
65#define C0_DF_BAYER 0x00000140 /* ... Bayer */
66/* 8-8-8 must be missing from the below - ask */
67#define C0_RGBF_565 0x00000000
68#define C0_RGBF_444 0x00000800
69#define C0_RGB_BGR 0x00001000 /* Blue comes first */
70#define C0_YUV_PLANAR 0x00000000 /* YUV 422 planar format */
71#define C0_YUV_PACKED 0x00008000 /* YUV 422 packed */
72#define C0_YUV_420PL 0x0000a000 /* YUV 420 planar */
73/* Think that 420 packed must be 111 - ask */
74#define C0_YUVE_YUYV 0x00000000 /* Y1CbY0Cr */
75#define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
76#define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
77#define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
78#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */
79#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */
80#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */
81#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */
82/* Bayer bits 18,19 if needed */
83#define C0_HPOL_LOW 0x01000000 /* HSYNC polarity active low */
84#define C0_VPOL_LOW 0x02000000 /* VSYNC polarity active low */
85#define C0_VCLK_LOW 0x04000000 /* VCLK on falling edge */
86#define C0_DOWNSCALE 0x08000000 /* Enable downscaler */
87#define C0_SIFM_MASK 0xc0000000 /* SIF mode bits */
88#define C0_SIF_HVSYNC 0x00000000 /* Use H/VSYNC */
89#define CO_SOF_NOSYNC 0x40000000 /* Use inband active signaling */
90
91
92#define REG_CTRL1 0x40 /* Control 1 */
93#define C1_444ALPHA 0x00f00000 /* Alpha field in RGB444 */
94#define C1_ALPHA_SHFT 20
95#define C1_DMAB32 0x00000000 /* 32-byte DMA burst */
96#define C1_DMAB16 0x02000000 /* 16-byte DMA burst */
97#define C1_DMAB64 0x04000000 /* 64-byte DMA burst */
98#define C1_DMAB_MASK 0x06000000
99#define C1_TWOBUFS 0x08000000 /* Use only two DMA buffers */
100#define C1_PWRDWN 0x10000000 /* Power down */
101
102#define REG_CLKCTRL 0x88 /* Clock control */
103#define CLK_DIV_MASK 0x0000ffff /* Upper bits RW "reserved" */
104
105#define REG_GPR 0xb4 /* General purpose register. This
106 controls inputs to the power and reset
107 pins on the OV7670 used with OLPC;
108 other deployments could differ. */
109#define GPR_C1EN 0x00000020 /* Pad 1 (power down) enable */
110#define GPR_C0EN 0x00000010 /* Pad 0 (reset) enable */
111#define GPR_C1 0x00000002 /* Control 1 value */
112/*
113 * Control 0 is wired to reset on OLPC machines. For ov7x sensors,
114 * it is active low, for 0v6x, instead, it's active high. What
115 * fun.
116 */
117#define GPR_C0 0x00000001 /* Control 0 value */
118
119#define REG_TWSIC0 0xb8 /* TWSI (smbus) control 0 */
120#define TWSIC0_EN 0x00000001 /* TWSI enable */
121#define TWSIC0_MODE 0x00000002 /* 1 = 16-bit, 0 = 8-bit */
122#define TWSIC0_SID 0x000003fc /* Slave ID */
123#define TWSIC0_SID_SHIFT 2
124#define TWSIC0_CLKDIV 0x0007fc00 /* Clock divider */
125#define TWSIC0_MASKACK 0x00400000 /* Mask ack from sensor */
126#define TWSIC0_OVMAGIC 0x00800000 /* Make it work on OV sensors */
127
128#define REG_TWSIC1 0xbc /* TWSI control 1 */
129#define TWSIC1_DATA 0x0000ffff /* Data to/from camchip */
130#define TWSIC1_ADDR 0x00ff0000 /* Address (register) */
131#define TWSIC1_ADDR_SHIFT 16
132#define TWSIC1_READ 0x01000000 /* Set for read op */
133#define TWSIC1_WSTAT 0x02000000 /* Write status */
134#define TWSIC1_RVALID 0x04000000 /* Read data valid */
135#define TWSIC1_ERROR 0x08000000 /* Something screwed up */
136
137
138#define REG_UBAR 0xc4 /* Upper base address register */
139
140/*
141 * Here's the weird global control registers which are said to live
142 * way up here.
143 */
144#define REG_GL_CSR 0x3004 /* Control/status register */
145#define GCSR_SRS 0x00000001 /* SW Reset set */
146#define GCSR_SRC 0x00000002 /* SW Reset clear */
147#define GCSR_MRS 0x00000004 /* Master reset set */
148#define GCSR_MRC 0x00000008 /* HW Reset clear */
149#define GCSR_CCIC_EN 0x00004000 /* CCIC Clock enable */
150#define REG_GL_IMASK 0x300c /* Interrupt mask register */
151#define GIMSK_CCIC_EN 0x00000004 /* CCIC Interrupt enable */
152
153#define REG_LEN REG_GL_IMASK + 4
154
155
156/*
157 * Useful stuff that probably belongs somewhere global.
158 */
159#define VGA_WIDTH 640
160#define VGA_HEIGHT 480
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
new file mode 100644
index 000000000000..e347c7ebc984
--- /dev/null
+++ b/drivers/media/video/cafe_ccic.c
@@ -0,0 +1,2228 @@
1/*
2 * A driver for the CMOS camera controller in the Marvell 88ALP01 "cafe"
3 * multifunction chip. Currently works with the Omnivision OV7670
4 * sensor.
5 *
6 * Copyright 2006 One Laptop Per Child Association, Inc.
7 *
8 * Written by Jonathan Corbet, corbet@lwn.net.
9 *
10 * This file may be distributed under the terms of the GNU General
11 * Public License, version 2.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17#include <linux/init.h>
18#include <linux/fs.h>
19#include <linux/pci.h>
20#include <linux/i2c.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/videodev2.h>
24#include <media/v4l2-common.h>
25#include <linux/device.h>
26#include <linux/wait.h>
27#include <linux/list.h>
28#include <linux/dma-mapping.h>
29#include <linux/delay.h>
30#include <linux/debugfs.h>
31#include <linux/jiffies.h>
32#include <linux/vmalloc.h>
33
34#include <asm/uaccess.h>
35#include <asm/io.h>
36
37#include "cafe_ccic-regs.h"
38
39#define CAFE_VERSION 0x000001
40
41
42/*
43 * Parameters.
44 */
45MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
46MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
47MODULE_LICENSE("GPL");
48MODULE_SUPPORTED_DEVICE("Video");
49
50/*
51 * Internal DMA buffer management. Since the controller cannot do S/G I/O,
52 * we must have physically contiguous buffers to bring frames into.
53 * These parameters control how many buffers we use, whether we
54 * allocate them at load time (better chance of success, but nails down
55 * memory) or when somebody tries to use the camera (riskier), and,
56 * for load-time allocation, how big they should be.
57 *
58 * The controller can cycle through three buffers. We could use
59 * more by flipping pointers around, but it probably makes little
60 * sense.
61 */
62
63#define MAX_DMA_BUFS 3
64static int alloc_bufs_at_load = 0;
65module_param(alloc_bufs_at_load, bool, 0444);
66MODULE_PARM_DESC(alloc_bufs_at_load,
67 "Non-zero value causes DMA buffers to be allocated at module "
68 "load time. This increases the chances of successfully getting "
69 "those buffers, but at the cost of nailing down the memory from "
70 "the outset.");
71
72static int n_dma_bufs = 3;
73module_param(n_dma_bufs, uint, 0644);
74MODULE_PARM_DESC(n_dma_bufs,
75 "The number of DMA buffers to allocate. Can be either two "
76 "(saves memory, makes timing tighter) or three.");
77
78static int dma_buf_size = VGA_WIDTH * VGA_HEIGHT * 2; /* Worst case */
79module_param(dma_buf_size, uint, 0444);
80MODULE_PARM_DESC(dma_buf_size,
81 "The size of the allocated DMA buffers. If actual operating "
82 "parameters require larger buffers, an attempt to reallocate "
83 "will be made.");
84
85static int min_buffers = 1;
86module_param(min_buffers, uint, 0644);
87MODULE_PARM_DESC(min_buffers,
88 "The minimum number of streaming I/O buffers we are willing "
89 "to work with.");
90
91static int max_buffers = 10;
92module_param(max_buffers, uint, 0644);
93MODULE_PARM_DESC(max_buffers,
94 "The maximum number of streaming I/O buffers an application "
95 "will be allowed to allocate. These buffers are big and live "
96 "in vmalloc space.");
97
98static int flip = 0;
99module_param(flip, bool, 0444);
100MODULE_PARM_DESC(flip,
101 "If set, the sensor will be instructed to flip the image "
102 "vertically.");
103
104
105enum cafe_state {
106 S_NOTREADY, /* Not yet initialized */
107 S_IDLE, /* Just hanging around */
108 S_FLAKED, /* Some sort of problem */
109 S_SINGLEREAD, /* In read() */
110 S_SPECREAD, /* Speculative read (for future read()) */
111 S_STREAMING /* Streaming data */
112};
113
114/*
115 * Tracking of streaming I/O buffers.
116 */
117struct cafe_sio_buffer {
118 struct list_head list;
119 struct v4l2_buffer v4lbuf;
120 char *buffer; /* Where it lives in kernel space */
121 int mapcount;
122 struct cafe_camera *cam;
123};
124
125/*
126 * A description of one of our devices.
127 * Locking: controlled by s_mutex. Certain fields, however, require
128 * the dev_lock spinlock; they are marked as such by comments.
129 * dev_lock is also required for access to device registers.
130 */
131struct cafe_camera
132{
133 enum cafe_state state;
134 unsigned long flags; /* Buffer status, mainly (dev_lock) */
135 int users; /* How many open FDs */
136 struct file *owner; /* Who has data access (v4l2) */
137
138 /*
139 * Subsystem structures.
140 */
141 struct pci_dev *pdev;
142 struct video_device v4ldev;
143 struct i2c_adapter i2c_adapter;
144 struct i2c_client *sensor;
145
146 unsigned char __iomem *regs;
147 struct list_head dev_list; /* link to other devices */
148
149 /* DMA buffers */
150 unsigned int nbufs; /* How many are alloc'd */
151 int next_buf; /* Next to consume (dev_lock) */
152 unsigned int dma_buf_size; /* allocated size */
153 void *dma_bufs[MAX_DMA_BUFS]; /* Internal buffer addresses */
154 dma_addr_t dma_handles[MAX_DMA_BUFS]; /* Buffer bus addresses */
155 unsigned int specframes; /* Unconsumed spec frames (dev_lock) */
156 unsigned int sequence; /* Frame sequence number */
157 unsigned int buf_seq[MAX_DMA_BUFS]; /* Sequence for individual buffers */
158
159 /* Streaming buffers */
160 unsigned int n_sbufs; /* How many we have */
161 struct cafe_sio_buffer *sb_bufs; /* The array of housekeeping structs */
162 struct list_head sb_avail; /* Available for data (we own) (dev_lock) */
163 struct list_head sb_full; /* With data (user space owns) (dev_lock) */
164 struct tasklet_struct s_tasklet;
165
166 /* Current operating parameters */
167 enum v4l2_chip_ident sensor_type; /* Currently ov7670 only */
168 struct v4l2_pix_format pix_format;
169
170 /* Locks */
171 struct mutex s_mutex; /* Access to this structure */
172 spinlock_t dev_lock; /* Access to device */
173
174 /* Misc */
175 wait_queue_head_t smbus_wait; /* Waiting on i2c events */
176 wait_queue_head_t iowait; /* Waiting on frame data */
177#ifdef CONFIG_VIDEO_ADV_DEBUG
178 struct dentry *dfs_regs;
179 struct dentry *dfs_cam_regs;
180#endif
181};
182
183/*
184 * Status flags. Always manipulated with bit operations.
185 */
186#define CF_BUF0_VALID 0 /* Buffers valid - first three */
187#define CF_BUF1_VALID 1
188#define CF_BUF2_VALID 2
189#define CF_DMA_ACTIVE 3 /* A frame is incoming */
190#define CF_CONFIG_NEEDED 4 /* Must configure hardware */
191
192
193
194/*
195 * Start over with DMA buffers - dev_lock needed.
196 */
197static void cafe_reset_buffers(struct cafe_camera *cam)
198{
199 int i;
200
201 cam->next_buf = -1;
202 for (i = 0; i < cam->nbufs; i++)
203 clear_bit(i, &cam->flags);
204 cam->specframes = 0;
205}
206
207static inline int cafe_needs_config(struct cafe_camera *cam)
208{
209 return test_bit(CF_CONFIG_NEEDED, &cam->flags);
210}
211
212static void cafe_set_config_needed(struct cafe_camera *cam, int needed)
213{
214 if (needed)
215 set_bit(CF_CONFIG_NEEDED, &cam->flags);
216 else
217 clear_bit(CF_CONFIG_NEEDED, &cam->flags);
218}
219
220
221
222
223/*
224 * Debugging and related.
225 */
226#define cam_err(cam, fmt, arg...) \
227 dev_err(&(cam)->pdev->dev, fmt, ##arg);
228#define cam_warn(cam, fmt, arg...) \
229 dev_warn(&(cam)->pdev->dev, fmt, ##arg);
230#define cam_dbg(cam, fmt, arg...) \
231 dev_dbg(&(cam)->pdev->dev, fmt, ##arg);
232
233
234/* ---------------------------------------------------------------------*/
235/*
236 * We keep a simple list of known devices to search at open time.
237 */
238static LIST_HEAD(cafe_dev_list);
239static DEFINE_MUTEX(cafe_dev_list_lock);
240
241static void cafe_add_dev(struct cafe_camera *cam)
242{
243 mutex_lock(&cafe_dev_list_lock);
244 list_add_tail(&cam->dev_list, &cafe_dev_list);
245 mutex_unlock(&cafe_dev_list_lock);
246}
247
248static void cafe_remove_dev(struct cafe_camera *cam)
249{
250 mutex_lock(&cafe_dev_list_lock);
251 list_del(&cam->dev_list);
252 mutex_unlock(&cafe_dev_list_lock);
253}
254
255static struct cafe_camera *cafe_find_dev(int minor)
256{
257 struct cafe_camera *cam;
258
259 mutex_lock(&cafe_dev_list_lock);
260 list_for_each_entry(cam, &cafe_dev_list, dev_list) {
261 if (cam->v4ldev.minor == minor)
262 goto done;
263 }
264 cam = NULL;
265 done:
266 mutex_unlock(&cafe_dev_list_lock);
267 return cam;
268}
269
270
271static struct cafe_camera *cafe_find_by_pdev(struct pci_dev *pdev)
272{
273 struct cafe_camera *cam;
274
275 mutex_lock(&cafe_dev_list_lock);
276 list_for_each_entry(cam, &cafe_dev_list, dev_list) {
277 if (cam->pdev == pdev)
278 goto done;
279 }
280 cam = NULL;
281 done:
282 mutex_unlock(&cafe_dev_list_lock);
283 return cam;
284}
285
286
287/* ------------------------------------------------------------------------ */
288/*
289 * Device register I/O
290 */
291static inline void cafe_reg_write(struct cafe_camera *cam, unsigned int reg,
292 unsigned int val)
293{
294 iowrite32(val, cam->regs + reg);
295}
296
297static inline unsigned int cafe_reg_read(struct cafe_camera *cam,
298 unsigned int reg)
299{
300 return ioread32(cam->regs + reg);
301}
302
303
304static inline void cafe_reg_write_mask(struct cafe_camera *cam, unsigned int reg,
305 unsigned int val, unsigned int mask)
306{
307 unsigned int v = cafe_reg_read(cam, reg);
308
309 v = (v & ~mask) | (val & mask);
310 cafe_reg_write(cam, reg, v);
311}
312
313static inline void cafe_reg_clear_bit(struct cafe_camera *cam,
314 unsigned int reg, unsigned int val)
315{
316 cafe_reg_write_mask(cam, reg, 0, val);
317}
318
319static inline void cafe_reg_set_bit(struct cafe_camera *cam,
320 unsigned int reg, unsigned int val)
321{
322 cafe_reg_write_mask(cam, reg, val, val);
323}
324
325
326
327/* -------------------------------------------------------------------- */
328/*
329 * The I2C/SMBUS interface to the camera itself starts here. The
330 * controller handles SMBUS itself, presenting a relatively simple register
331 * interface; all we have to do is to tell it where to route the data.
332 */
333#define CAFE_SMBUS_TIMEOUT (HZ) /* generous */
334
335static int cafe_smbus_write_done(struct cafe_camera *cam)
336{
337 unsigned long flags;
338 int c1;
339
340 /*
341 * We must delay after the interrupt, or the controller gets confused
342 * and never does give us good status. Fortunately, we don't do this
343 * often.
344 */
345 udelay(20);
346 spin_lock_irqsave(&cam->dev_lock, flags);
347 c1 = cafe_reg_read(cam, REG_TWSIC1);
348 spin_unlock_irqrestore(&cam->dev_lock, flags);
349 return (c1 & (TWSIC1_WSTAT|TWSIC1_ERROR)) != TWSIC1_WSTAT;
350}
351
352static int cafe_smbus_write_data(struct cafe_camera *cam,
353 u16 addr, u8 command, u8 value)
354{
355 unsigned int rval;
356 unsigned long flags;
357
358 spin_lock_irqsave(&cam->dev_lock, flags);
359 rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
360 rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
361 /*
362 * Marvell sez set clkdiv to all 1's for now.
363 */
364 rval |= TWSIC0_CLKDIV;
365 cafe_reg_write(cam, REG_TWSIC0, rval);
366 (void) cafe_reg_read(cam, REG_TWSIC1); /* force write */
367 rval = value | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
368 cafe_reg_write(cam, REG_TWSIC1, rval);
369 spin_unlock_irqrestore(&cam->dev_lock, flags);
370 msleep(2); /* Required or things flake */
371
372 wait_event_timeout(cam->smbus_wait, cafe_smbus_write_done(cam),
373 CAFE_SMBUS_TIMEOUT);
374 spin_lock_irqsave(&cam->dev_lock, flags);
375 rval = cafe_reg_read(cam, REG_TWSIC1);
376 spin_unlock_irqrestore(&cam->dev_lock, flags);
377
378 if (rval & TWSIC1_WSTAT) {
379 cam_err(cam, "SMBUS write (%02x/%02x/%02x) timed out\n", addr,
380 command, value);
381 return -EIO;
382 }
383 if (rval & TWSIC1_ERROR) {
384 cam_err(cam, "SMBUS write (%02x/%02x/%02x) error\n", addr,
385 command, value);
386 return -EIO;
387 }
388 return 0;
389}
390
391
392
393static int cafe_smbus_read_done(struct cafe_camera *cam)
394{
395 unsigned long flags;
396 int c1;
397
398 /*
399 * We must delay after the interrupt, or the controller gets confused
400 * and never does give us good status. Fortunately, we don't do this
401 * often.
402 */
403 udelay(20);
404 spin_lock_irqsave(&cam->dev_lock, flags);
405 c1 = cafe_reg_read(cam, REG_TWSIC1);
406 spin_unlock_irqrestore(&cam->dev_lock, flags);
407 return c1 & (TWSIC1_RVALID|TWSIC1_ERROR);
408}
409
410
411
412static int cafe_smbus_read_data(struct cafe_camera *cam,
413 u16 addr, u8 command, u8 *value)
414{
415 unsigned int rval;
416 unsigned long flags;
417
418 spin_lock_irqsave(&cam->dev_lock, flags);
419 rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
420 rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
421 /*
422 * Marvel sez set clkdiv to all 1's for now.
423 */
424 rval |= TWSIC0_CLKDIV;
425 cafe_reg_write(cam, REG_TWSIC0, rval);
426 (void) cafe_reg_read(cam, REG_TWSIC1); /* force write */
427 rval = TWSIC1_READ | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
428 cafe_reg_write(cam, REG_TWSIC1, rval);
429 spin_unlock_irqrestore(&cam->dev_lock, flags);
430
431 wait_event_timeout(cam->smbus_wait,
432 cafe_smbus_read_done(cam), CAFE_SMBUS_TIMEOUT);
433 spin_lock_irqsave(&cam->dev_lock, flags);
434 rval = cafe_reg_read(cam, REG_TWSIC1);
435 spin_unlock_irqrestore(&cam->dev_lock, flags);
436
437 if (rval & TWSIC1_ERROR) {
438 cam_err(cam, "SMBUS read (%02x/%02x) error\n", addr, command);
439 return -EIO;
440 }
441 if (! (rval & TWSIC1_RVALID)) {
442 cam_err(cam, "SMBUS read (%02x/%02x) timed out\n", addr,
443 command);
444 return -EIO;
445 }
446 *value = rval & 0xff;
447 return 0;
448}
449
450/*
451 * Perform a transfer over SMBUS. This thing is called under
452 * the i2c bus lock, so we shouldn't race with ourselves...
453 */
454static int cafe_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
455 unsigned short flags, char rw, u8 command,
456 int size, union i2c_smbus_data *data)
457{
458 struct cafe_camera *cam = i2c_get_adapdata(adapter);
459 int ret = -EINVAL;
460
461 /*
462 * Refuse to talk to anything but OV cam chips. We should
463 * never even see an attempt to do so, but one never knows.
464 */
465 if (cam->sensor && addr != cam->sensor->addr) {
466 cam_err(cam, "funky smbus addr %d\n", addr);
467 return -EINVAL;
468 }
469 /*
470 * This interface would appear to only do byte data ops. OK
471 * it can do word too, but the cam chip has no use for that.
472 */
473 if (size != I2C_SMBUS_BYTE_DATA) {
474 cam_err(cam, "funky xfer size %d\n", size);
475 return -EINVAL;
476 }
477
478 if (rw == I2C_SMBUS_WRITE)
479 ret = cafe_smbus_write_data(cam, addr, command, data->byte);
480 else if (rw == I2C_SMBUS_READ)
481 ret = cafe_smbus_read_data(cam, addr, command, &data->byte);
482 return ret;
483}
484
485
486static void cafe_smbus_enable_irq(struct cafe_camera *cam)
487{
488 unsigned long flags;
489
490 spin_lock_irqsave(&cam->dev_lock, flags);
491 cafe_reg_set_bit(cam, REG_IRQMASK, TWSIIRQS);
492 spin_unlock_irqrestore(&cam->dev_lock, flags);
493}
494
495static u32 cafe_smbus_func(struct i2c_adapter *adapter)
496{
497 return I2C_FUNC_SMBUS_READ_BYTE_DATA |
498 I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
499}
500
501static struct i2c_algorithm cafe_smbus_algo = {
502 .smbus_xfer = cafe_smbus_xfer,
503 .functionality = cafe_smbus_func
504};
505
506/* Somebody is on the bus */
507static int cafe_cam_init(struct cafe_camera *cam);
508static void cafe_ctlr_stop_dma(struct cafe_camera *cam);
509static void cafe_ctlr_power_down(struct cafe_camera *cam);
510
511static int cafe_smbus_attach(struct i2c_client *client)
512{
513 struct cafe_camera *cam = i2c_get_adapdata(client->adapter);
514
515 /*
516 * Don't talk to chips we don't recognize.
517 */
518 if (client->driver->id == I2C_DRIVERID_OV7670) {
519 cam->sensor = client;
520 return cafe_cam_init(cam);
521 }
522 return -EINVAL;
523}
524
525static int cafe_smbus_detach(struct i2c_client *client)
526{
527 struct cafe_camera *cam = i2c_get_adapdata(client->adapter);
528
529 if (cam->sensor == client) {
530 cafe_ctlr_stop_dma(cam);
531 cafe_ctlr_power_down(cam);
532 cam_err(cam, "lost the sensor!\n");
533 cam->sensor = NULL; /* Bummer, no camera */
534 cam->state = S_NOTREADY;
535 }
536 return 0;
537}
538
539static int cafe_smbus_setup(struct cafe_camera *cam)
540{
541 struct i2c_adapter *adap = &cam->i2c_adapter;
542 int ret;
543
544 cafe_smbus_enable_irq(cam);
545 adap->id = I2C_HW_SMBUS_CAFE;
546 adap->class = I2C_CLASS_CAM_DIGITAL;
547 adap->owner = THIS_MODULE;
548 adap->client_register = cafe_smbus_attach;
549 adap->client_unregister = cafe_smbus_detach;
550 adap->algo = &cafe_smbus_algo;
551 strcpy(adap->name, "cafe_ccic");
552 i2c_set_adapdata(adap, cam);
553 ret = i2c_add_adapter(adap);
554 if (ret)
555 printk(KERN_ERR "Unable to register cafe i2c adapter\n");
556 return ret;
557}
558
559static void cafe_smbus_shutdown(struct cafe_camera *cam)
560{
561 i2c_del_adapter(&cam->i2c_adapter);
562}
563
564
565/* ------------------------------------------------------------------- */
566/*
567 * Deal with the controller.
568 */
569
570/*
571 * Do everything we think we need to have the interface operating
572 * according to the desired format.
573 */
574static void cafe_ctlr_dma(struct cafe_camera *cam)
575{
576 /*
577 * Store the first two Y buffers (we aren't supporting
578 * planar formats for now, so no UV bufs). Then either
579 * set the third if it exists, or tell the controller
580 * to just use two.
581 */
582 cafe_reg_write(cam, REG_Y0BAR, cam->dma_handles[0]);
583 cafe_reg_write(cam, REG_Y1BAR, cam->dma_handles[1]);
584 if (cam->nbufs > 2) {
585 cafe_reg_write(cam, REG_Y2BAR, cam->dma_handles[2]);
586 cafe_reg_clear_bit(cam, REG_CTRL1, C1_TWOBUFS);
587 }
588 else
589 cafe_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
590 cafe_reg_write(cam, REG_UBAR, 0); /* 32 bits only for now */
591}
592
593static void cafe_ctlr_image(struct cafe_camera *cam)
594{
595 int imgsz;
596 struct v4l2_pix_format *fmt = &cam->pix_format;
597
598 imgsz = ((fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK) |
599 (fmt->bytesperline & IMGSZ_H_MASK);
600 cafe_reg_write(cam, REG_IMGSIZE, imgsz);
601 cafe_reg_write(cam, REG_IMGOFFSET, 0);
602 /* YPITCH just drops the last two bits */
603 cafe_reg_write_mask(cam, REG_IMGPITCH, fmt->bytesperline,
604 IMGP_YP_MASK);
605 /*
606 * Tell the controller about the image format we are using.
607 */
608 switch (cam->pix_format.pixelformat) {
609 case V4L2_PIX_FMT_YUYV:
610 cafe_reg_write_mask(cam, REG_CTRL0,
611 C0_DF_YUV|C0_YUV_PACKED|C0_YUVE_YUYV,
612 C0_DF_MASK);
613 break;
614
615 case V4L2_PIX_FMT_RGB444:
616 cafe_reg_write_mask(cam, REG_CTRL0,
617 C0_DF_RGB|C0_RGBF_444|C0_RGB4_XRGB,
618 C0_DF_MASK);
619 /* Alpha value? */
620 break;
621
622 case V4L2_PIX_FMT_RGB565:
623 cafe_reg_write_mask(cam, REG_CTRL0,
624 C0_DF_RGB|C0_RGBF_565|C0_RGB5_BGGR,
625 C0_DF_MASK);
626 break;
627
628 default:
629 cam_err(cam, "Unknown format %x\n", cam->pix_format.pixelformat);
630 break;
631 }
632 /*
633 * Make sure it knows we want to use hsync/vsync.
634 */
635 cafe_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC,
636 C0_SIFM_MASK);
637}
638
639
640/*
641 * Configure the controller for operation; caller holds the
642 * device mutex.
643 */
644static int cafe_ctlr_configure(struct cafe_camera *cam)
645{
646 unsigned long flags;
647
648 spin_lock_irqsave(&cam->dev_lock, flags);
649 cafe_ctlr_dma(cam);
650 cafe_ctlr_image(cam);
651 cafe_set_config_needed(cam, 0);
652 spin_unlock_irqrestore(&cam->dev_lock, flags);
653 return 0;
654}
655
656static void cafe_ctlr_irq_enable(struct cafe_camera *cam)
657{
658 /*
659 * Clear any pending interrupts, since we do not
660 * expect to have I/O active prior to enabling.
661 */
662 cafe_reg_write(cam, REG_IRQSTAT, FRAMEIRQS);
663 cafe_reg_set_bit(cam, REG_IRQMASK, FRAMEIRQS);
664}
665
666static void cafe_ctlr_irq_disable(struct cafe_camera *cam)
667{
668 cafe_reg_clear_bit(cam, REG_IRQMASK, FRAMEIRQS);
669}
670
671/*
672 * Make the controller start grabbing images. Everything must
673 * be set up before doing this.
674 */
675static void cafe_ctlr_start(struct cafe_camera *cam)
676{
677 /* set_bit performs a read, so no other barrier should be
678 needed here */
679 cafe_reg_set_bit(cam, REG_CTRL0, C0_ENABLE);
680}
681
682static void cafe_ctlr_stop(struct cafe_camera *cam)
683{
684 cafe_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
685}
686
687static void cafe_ctlr_init(struct cafe_camera *cam)
688{
689 unsigned long flags;
690
691 spin_lock_irqsave(&cam->dev_lock, flags);
692 /*
693 * Added magic to bring up the hardware on the B-Test board
694 */
695 cafe_reg_write(cam, 0x3038, 0x8);
696 cafe_reg_write(cam, 0x315c, 0x80008);
697 /*
698 * Go through the dance needed to wake the device up.
699 * Note that these registers are global and shared
700 * with the NAND and SD devices. Interaction between the
701 * three still needs to be examined.
702 */
703 cafe_reg_write(cam, REG_GL_CSR, GCSR_SRS|GCSR_MRS); /* Needed? */
704 cafe_reg_write(cam, REG_GL_CSR, GCSR_SRC|GCSR_MRC);
705 cafe_reg_write(cam, REG_GL_CSR, GCSR_SRC|GCSR_MRS);
706 mdelay(5); /* FIXME revisit this */
707 cafe_reg_write(cam, REG_GL_CSR, GCSR_CCIC_EN|GCSR_SRC|GCSR_MRC);
708 cafe_reg_set_bit(cam, REG_GL_IMASK, GIMSK_CCIC_EN);
709 /*
710 * Make sure it's not powered down.
711 */
712 cafe_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
713 /*
714 * Turn off the enable bit. It sure should be off anyway,
715 * but it's good to be sure.
716 */
717 cafe_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
718 /*
719 * Mask all interrupts.
720 */
721 cafe_reg_write(cam, REG_IRQMASK, 0);
722 /*
723 * Clock the sensor appropriately. Controller clock should
724 * be 48MHz, sensor "typical" value is half that.
725 */
726 cafe_reg_write_mask(cam, REG_CLKCTRL, 2, CLK_DIV_MASK);
727 spin_unlock_irqrestore(&cam->dev_lock, flags);
728}
729
730
731/*
732 * Stop the controller, and don't return until we're really sure that no
733 * further DMA is going on.
734 */
735static void cafe_ctlr_stop_dma(struct cafe_camera *cam)
736{
737 unsigned long flags;
738
739 /*
740 * Theory: stop the camera controller (whether it is operating
741 * or not). Delay briefly just in case we race with the SOF
742 * interrupt, then wait until no DMA is active.
743 */
744 spin_lock_irqsave(&cam->dev_lock, flags);
745 cafe_ctlr_stop(cam);
746 spin_unlock_irqrestore(&cam->dev_lock, flags);
747 mdelay(1);
748 wait_event_timeout(cam->iowait,
749 !test_bit(CF_DMA_ACTIVE, &cam->flags), HZ);
750 if (test_bit(CF_DMA_ACTIVE, &cam->flags))
751 cam_err(cam, "Timeout waiting for DMA to end\n");
752 /* This would be bad news - what now? */
753 spin_lock_irqsave(&cam->dev_lock, flags);
754 cam->state = S_IDLE;
755 cafe_ctlr_irq_disable(cam);
756 spin_unlock_irqrestore(&cam->dev_lock, flags);
757}
758
759/*
760 * Power up and down.
761 */
762static void cafe_ctlr_power_up(struct cafe_camera *cam)
763{
764 unsigned long flags;
765
766 spin_lock_irqsave(&cam->dev_lock, flags);
767 cafe_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
768 /*
769 * Put the sensor into operational mode (assumes OLPC-style
770 * wiring). Control 0 is reset - set to 1 to operate.
771 * Control 1 is power down, set to 0 to operate.
772 */
773 cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
774 mdelay(1); /* Marvell says 1ms will do it */
775 cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
776 mdelay(1); /* Enough? */
777 spin_unlock_irqrestore(&cam->dev_lock, flags);
778}
779
780static void cafe_ctlr_power_down(struct cafe_camera *cam)
781{
782 unsigned long flags;
783
784 spin_lock_irqsave(&cam->dev_lock, flags);
785 cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C1);
786 cafe_reg_set_bit(cam, REG_CTRL1, C1_PWRDWN);
787 spin_unlock_irqrestore(&cam->dev_lock, flags);
788}
789
790/* -------------------------------------------------------------------- */
791/*
792 * Communications with the sensor.
793 */
794
795static int __cafe_cam_cmd(struct cafe_camera *cam, int cmd, void *arg)
796{
797 struct i2c_client *sc = cam->sensor;
798 int ret;
799
800 if (sc == NULL || sc->driver == NULL || sc->driver->command == NULL)
801 return -EINVAL;
802 ret = sc->driver->command(sc, cmd, arg);
803 if (ret == -EPERM) /* Unsupported command */
804 return 0;
805 return ret;
806}
807
808static int __cafe_cam_reset(struct cafe_camera *cam)
809{
810 int zero = 0;
811 return __cafe_cam_cmd(cam, VIDIOC_INT_RESET, &zero);
812}
813
814/*
815 * We have found the sensor on the i2c. Let's try to have a
816 * conversation.
817 */
818static int cafe_cam_init(struct cafe_camera *cam)
819{
820 int ret;
821
822 mutex_lock(&cam->s_mutex);
823 if (cam->state != S_NOTREADY)
824 cam_warn(cam, "Cam init with device in funky state %d",
825 cam->state);
826 ret = __cafe_cam_reset(cam);
827 if (ret)
828 goto out;
829 ret = __cafe_cam_cmd(cam, VIDIOC_INT_G_CHIP_IDENT, &cam->sensor_type);
830 if (ret)
831 goto out;
832// if (cam->sensor->addr != OV7xx0_SID) {
833 if (cam->sensor_type != V4L2_IDENT_OV7670) {
834 cam_err(cam, "Unsupported sensor type %d", cam->sensor->addr);
835 ret = -EINVAL;
836 goto out;
837 }
838/* Get/set parameters? */
839 ret = 0;
840 cam->state = S_IDLE;
841 out:
842 mutex_unlock(&cam->s_mutex);
843 return ret;
844}
845
846/*
847 * Configure the sensor to match the parameters we have. Caller should
848 * hold s_mutex
849 */
850static int cafe_cam_set_flip(struct cafe_camera *cam)
851{
852 struct v4l2_control ctrl;
853
854 memset(&ctrl, 0, sizeof(ctrl));
855 ctrl.id = V4L2_CID_VFLIP;
856 ctrl.value = flip;
857 return __cafe_cam_cmd(cam, VIDIOC_S_CTRL, &ctrl);
858}
859
860
861static int cafe_cam_configure(struct cafe_camera *cam)
862{
863 struct v4l2_format fmt;
864 int ret, zero = 0;
865
866 if (cam->state != S_IDLE)
867 return -EINVAL;
868 fmt.fmt.pix = cam->pix_format;
869 ret = __cafe_cam_cmd(cam, VIDIOC_INT_INIT, &zero);
870 if (ret == 0)
871 ret = __cafe_cam_cmd(cam, VIDIOC_S_FMT, &fmt);
872 /*
873 * OV7670 does weird things if flip is set *before* format...
874 */
875 ret += cafe_cam_set_flip(cam);
876 return ret;
877}
878
879/* -------------------------------------------------------------------- */
880/*
881 * DMA buffer management. These functions need s_mutex held.
882 */
883
884/* FIXME: this is inefficient as hell, since dma_alloc_coherent just
885 * does a get_free_pages() call, and we waste a good chunk of an orderN
886 * allocation. Should try to allocate the whole set in one chunk.
887 */
888static int cafe_alloc_dma_bufs(struct cafe_camera *cam, int loadtime)
889{
890 int i;
891
892 cafe_set_config_needed(cam, 1);
893 if (loadtime)
894 cam->dma_buf_size = dma_buf_size;
895 else
896 cam->dma_buf_size = cam->pix_format.sizeimage;
897 if (n_dma_bufs > 3)
898 n_dma_bufs = 3;
899
900 cam->nbufs = 0;
901 for (i = 0; i < n_dma_bufs; i++) {
902 cam->dma_bufs[i] = dma_alloc_coherent(&cam->pdev->dev,
903 cam->dma_buf_size, cam->dma_handles + i,
904 GFP_KERNEL);
905 if (cam->dma_bufs[i] == NULL) {
906 cam_warn(cam, "Failed to allocate DMA buffer\n");
907 break;
908 }
909 /* For debug, remove eventually */
910 memset(cam->dma_bufs[i], 0xcc, cam->dma_buf_size);
911 (cam->nbufs)++;
912 }
913
914 switch (cam->nbufs) {
915 case 1:
916 dma_free_coherent(&cam->pdev->dev, cam->dma_buf_size,
917 cam->dma_bufs[0], cam->dma_handles[0]);
918 cam->nbufs = 0;
919 case 0:
920 cam_err(cam, "Insufficient DMA buffers, cannot operate\n");
921 return -ENOMEM;
922
923 case 2:
924 if (n_dma_bufs > 2)
925 cam_warn(cam, "Will limp along with only 2 buffers\n");
926 break;
927 }
928 return 0;
929}
930
931static void cafe_free_dma_bufs(struct cafe_camera *cam)
932{
933 int i;
934
935 for (i = 0; i < cam->nbufs; i++) {
936 dma_free_coherent(&cam->pdev->dev, cam->dma_buf_size,
937 cam->dma_bufs[i], cam->dma_handles[i]);
938 cam->dma_bufs[i] = NULL;
939 }
940 cam->nbufs = 0;
941}
942
943
944
945
946
947/* ----------------------------------------------------------------------- */
948/*
949 * Here starts the V4L2 interface code.
950 */
951
952/*
953 * Read an image from the device.
954 */
955static ssize_t cafe_deliver_buffer(struct cafe_camera *cam,
956 char __user *buffer, size_t len, loff_t *pos)
957{
958 int bufno;
959 unsigned long flags;
960
961 spin_lock_irqsave(&cam->dev_lock, flags);
962 if (cam->next_buf < 0) {
963 cam_err(cam, "deliver_buffer: No next buffer\n");
964 spin_unlock_irqrestore(&cam->dev_lock, flags);
965 return -EIO;
966 }
967 bufno = cam->next_buf;
968 clear_bit(bufno, &cam->flags);
969 if (++(cam->next_buf) >= cam->nbufs)
970 cam->next_buf = 0;
971 if (! test_bit(cam->next_buf, &cam->flags))
972 cam->next_buf = -1;
973 cam->specframes = 0;
974 spin_unlock_irqrestore(&cam->dev_lock, flags);
975
976 if (len > cam->pix_format.sizeimage)
977 len = cam->pix_format.sizeimage;
978 if (copy_to_user(buffer, cam->dma_bufs[bufno], len))
979 return -EFAULT;
980 (*pos) += len;
981 return len;
982}
983
984/*
985 * Get everything ready, and start grabbing frames.
986 */
987static int cafe_read_setup(struct cafe_camera *cam, enum cafe_state state)
988{
989 int ret;
990 unsigned long flags;
991
992 /*
993 * Configuration. If we still don't have DMA buffers,
994 * make one last, desperate attempt.
995 */
996 if (cam->nbufs == 0)
997 if (cafe_alloc_dma_bufs(cam, 0))
998 return -ENOMEM;
999
1000 if (cafe_needs_config(cam)) {
1001 cafe_cam_configure(cam);
1002 ret = cafe_ctlr_configure(cam);
1003 if (ret)
1004 return ret;
1005 }
1006
1007 /*
1008 * Turn it loose.
1009 */
1010 spin_lock_irqsave(&cam->dev_lock, flags);
1011 cafe_reset_buffers(cam);
1012 cafe_ctlr_irq_enable(cam);
1013 cam->state = state;
1014 cafe_ctlr_start(cam);
1015 spin_unlock_irqrestore(&cam->dev_lock, flags);
1016 return 0;
1017}
1018
1019
1020static ssize_t cafe_v4l_read(struct file *filp,
1021 char __user *buffer, size_t len, loff_t *pos)
1022{
1023 struct cafe_camera *cam = filp->private_data;
1024 int ret;
1025
1026 /*
1027 * Perhaps we're in speculative read mode and already
1028 * have data?
1029 */
1030 mutex_lock(&cam->s_mutex);
1031 if (cam->state == S_SPECREAD) {
1032 if (cam->next_buf >= 0) {
1033 ret = cafe_deliver_buffer(cam, buffer, len, pos);
1034 if (ret != 0)
1035 goto out_unlock;
1036 }
1037 } else if (cam->state == S_FLAKED || cam->state == S_NOTREADY) {
1038 ret = -EIO;
1039 goto out_unlock;
1040 } else if (cam->state != S_IDLE) {
1041 ret = -EBUSY;
1042 goto out_unlock;
1043 }
1044
1045 /*
1046 * v4l2: multiple processes can open the device, but only
1047 * one gets to grab data from it.
1048 */
1049 if (cam->owner && cam->owner != filp) {
1050 ret = -EBUSY;
1051 goto out_unlock;
1052 }
1053 cam->owner = filp;
1054
1055 /*
1056 * Do setup if need be.
1057 */
1058 if (cam->state != S_SPECREAD) {
1059 ret = cafe_read_setup(cam, S_SINGLEREAD);
1060 if (ret)
1061 goto out_unlock;
1062 }
1063 /*
1064 * Wait for something to happen. This should probably
1065 * be interruptible (FIXME).
1066 */
1067 wait_event_timeout(cam->iowait, cam->next_buf >= 0, HZ);
1068 if (cam->next_buf < 0) {
1069 cam_err(cam, "read() operation timed out\n");
1070 cafe_ctlr_stop_dma(cam);
1071 ret = -EIO;
1072 goto out_unlock;
1073 }
1074 /*
1075 * Give them their data and we should be done.
1076 */
1077 ret = cafe_deliver_buffer(cam, buffer, len, pos);
1078
1079 out_unlock:
1080 mutex_unlock(&cam->s_mutex);
1081 return ret;
1082}
1083
1084
1085
1086
1087
1088
1089
1090
1091/*
1092 * Streaming I/O support.
1093 */
1094
1095
1096
1097static int cafe_vidioc_streamon(struct file *filp, void *priv,
1098 enum v4l2_buf_type type)
1099{
1100 struct cafe_camera *cam = filp->private_data;
1101 int ret = -EINVAL;
1102
1103 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1104 goto out;
1105 mutex_lock(&cam->s_mutex);
1106 if (cam->state != S_IDLE || cam->n_sbufs == 0)
1107 goto out_unlock;
1108
1109 cam->sequence = 0;
1110 ret = cafe_read_setup(cam, S_STREAMING);
1111
1112 out_unlock:
1113 mutex_unlock(&cam->s_mutex);
1114 out:
1115 return ret;
1116}
1117
1118
1119static int cafe_vidioc_streamoff(struct file *filp, void *priv,
1120 enum v4l2_buf_type type)
1121{
1122 struct cafe_camera *cam = filp->private_data;
1123 int ret = -EINVAL;
1124
1125 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1126 goto out;
1127 mutex_lock(&cam->s_mutex);
1128 if (cam->state != S_STREAMING)
1129 goto out_unlock;
1130
1131 cafe_ctlr_stop_dma(cam);
1132 ret = 0;
1133
1134 out_unlock:
1135 mutex_unlock(&cam->s_mutex);
1136 out:
1137 return ret;
1138}
1139
1140
1141
1142static int cafe_setup_siobuf(struct cafe_camera *cam, int index)
1143{
1144 struct cafe_sio_buffer *buf = cam->sb_bufs + index;
1145
1146 INIT_LIST_HEAD(&buf->list);
1147 buf->v4lbuf.length = PAGE_ALIGN(cam->pix_format.sizeimage);
1148 buf->buffer = vmalloc_user(buf->v4lbuf.length);
1149 if (buf->buffer == NULL)
1150 return -ENOMEM;
1151 buf->mapcount = 0;
1152 buf->cam = cam;
1153
1154 buf->v4lbuf.index = index;
1155 buf->v4lbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1156 buf->v4lbuf.field = V4L2_FIELD_NONE;
1157 buf->v4lbuf.memory = V4L2_MEMORY_MMAP;
1158 /*
1159 * Offset: must be 32-bit even on a 64-bit system. video-buf
1160 * just uses the length times the index, but the spec warns
1161 * against doing just that - vma merging problems. So we
1162 * leave a gap between each pair of buffers.
1163 */
1164 buf->v4lbuf.m.offset = 2*index*buf->v4lbuf.length;
1165 return 0;
1166}
1167
1168static int cafe_free_sio_buffers(struct cafe_camera *cam)
1169{
1170 int i;
1171
1172 /*
1173 * If any buffers are mapped, we cannot free them at all.
1174 */
1175 for (i = 0; i < cam->n_sbufs; i++)
1176 if (cam->sb_bufs[i].mapcount > 0)
1177 return -EBUSY;
1178 /*
1179 * OK, let's do it.
1180 */
1181 for (i = 0; i < cam->n_sbufs; i++)
1182 vfree(cam->sb_bufs[i].buffer);
1183 cam->n_sbufs = 0;
1184 kfree(cam->sb_bufs);
1185 cam->sb_bufs = NULL;
1186 INIT_LIST_HEAD(&cam->sb_avail);
1187 INIT_LIST_HEAD(&cam->sb_full);
1188 return 0;
1189}
1190
1191
1192
1193static int cafe_vidioc_reqbufs(struct file *filp, void *priv,
1194 struct v4l2_requestbuffers *req)
1195{
1196 struct cafe_camera *cam = filp->private_data;
1197 int ret;
1198
1199 /*
1200 * Make sure it's something we can do. User pointers could be
1201 * implemented without great pain, but that's not been done yet.
1202 */
1203 if (req->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1204 return -EINVAL;
1205 if (req->memory != V4L2_MEMORY_MMAP)
1206 return -EINVAL;
1207 /*
1208 * If they ask for zero buffers, they really want us to stop streaming
1209 * (if it's happening) and free everything. Should we check owner?
1210 */
1211 mutex_lock(&cam->s_mutex);
1212 if (req->count == 0) {
1213 if (cam->state == S_STREAMING)
1214 cafe_ctlr_stop_dma(cam);
1215 ret = cafe_free_sio_buffers (cam);
1216 goto out;
1217 }
1218 /*
1219 * Device needs to be idle and working. We *could* try to do the
1220 * right thing in S_SPECREAD by shutting things down, but it
1221 * probably doesn't matter.
1222 */
1223 if (cam->state != S_IDLE || (cam->owner && cam->owner != filp)) {
1224 ret = -EBUSY;
1225 goto out;
1226 }
1227 cam->owner = filp;
1228
1229 if (req->count < min_buffers)
1230 req->count = min_buffers;
1231 else if (req->count > max_buffers)
1232 req->count = max_buffers;
1233 if (cam->n_sbufs > 0) {
1234 ret = cafe_free_sio_buffers(cam);
1235 if (ret)
1236 goto out;
1237 }
1238
1239 cam->sb_bufs = kzalloc(req->count*sizeof(struct cafe_sio_buffer),
1240 GFP_KERNEL);
1241 if (cam->sb_bufs == NULL) {
1242 ret = -ENOMEM;
1243 goto out;
1244 }
1245 for (cam->n_sbufs = 0; cam->n_sbufs < req->count; (cam->n_sbufs++)) {
1246 ret = cafe_setup_siobuf(cam, cam->n_sbufs);
1247 if (ret)
1248 break;
1249 }
1250
1251 if (cam->n_sbufs == 0) /* no luck at all - ret already set */
1252 kfree(cam->sb_bufs);
1253 else
1254 ret = 0;
1255 req->count = cam->n_sbufs; /* In case of partial success */
1256
1257 out:
1258 mutex_unlock(&cam->s_mutex);
1259 return ret;
1260}
1261
1262
1263static int cafe_vidioc_querybuf(struct file *filp, void *priv,
1264 struct v4l2_buffer *buf)
1265{
1266 struct cafe_camera *cam = filp->private_data;
1267 int ret = -EINVAL;
1268
1269 mutex_lock(&cam->s_mutex);
1270 if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1271 goto out;
1272 if (buf->index < 0 || buf->index >= cam->n_sbufs)
1273 goto out;
1274 *buf = cam->sb_bufs[buf->index].v4lbuf;
1275 ret = 0;
1276 out:
1277 mutex_unlock(&cam->s_mutex);
1278 return ret;
1279}
1280
1281static int cafe_vidioc_qbuf(struct file *filp, void *priv,
1282 struct v4l2_buffer *buf)
1283{
1284 struct cafe_camera *cam = filp->private_data;
1285 struct cafe_sio_buffer *sbuf;
1286 int ret = -EINVAL;
1287 unsigned long flags;
1288
1289 mutex_lock(&cam->s_mutex);
1290 if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1291 goto out;
1292 if (buf->index < 0 || buf->index >= cam->n_sbufs)
1293 goto out;
1294 sbuf = cam->sb_bufs + buf->index;
1295 if (sbuf->v4lbuf.flags & V4L2_BUF_FLAG_QUEUED) {
1296 ret = 0; /* Already queued?? */
1297 goto out;
1298 }
1299 if (sbuf->v4lbuf.flags & V4L2_BUF_FLAG_DONE) {
1300 /* Spec doesn't say anything, seems appropriate tho */
1301 ret = -EBUSY;
1302 goto out;
1303 }
1304 sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_QUEUED;
1305 spin_lock_irqsave(&cam->dev_lock, flags);
1306 list_add(&sbuf->list, &cam->sb_avail);
1307 spin_unlock_irqrestore(&cam->dev_lock, flags);
1308 ret = 0;
1309 out:
1310 mutex_unlock(&cam->s_mutex);
1311 return ret;
1312}
1313
1314static int cafe_vidioc_dqbuf(struct file *filp, void *priv,
1315 struct v4l2_buffer *buf)
1316{
1317 struct cafe_camera *cam = filp->private_data;
1318 struct cafe_sio_buffer *sbuf;
1319 int ret = -EINVAL;
1320 unsigned long flags;
1321
1322 mutex_lock(&cam->s_mutex);
1323 if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1324 goto out_unlock;
1325 if (cam->state != S_STREAMING)
1326 goto out_unlock;
1327 if (list_empty(&cam->sb_full) && filp->f_flags & O_NONBLOCK) {
1328 ret = -EAGAIN;
1329 goto out_unlock;
1330 }
1331
1332 while (list_empty(&cam->sb_full) && cam->state == S_STREAMING) {
1333 mutex_unlock(&cam->s_mutex);
1334 if (wait_event_interruptible(cam->iowait,
1335 !list_empty(&cam->sb_full))) {
1336 ret = -ERESTARTSYS;
1337 goto out;
1338 }
1339 mutex_lock(&cam->s_mutex);
1340 }
1341
1342 if (cam->state != S_STREAMING)
1343 ret = -EINTR;
1344 else {
1345 spin_lock_irqsave(&cam->dev_lock, flags);
1346 /* Should probably recheck !list_empty() here */
1347 sbuf = list_entry(cam->sb_full.next,
1348 struct cafe_sio_buffer, list);
1349 list_del_init(&sbuf->list);
1350 spin_unlock_irqrestore(&cam->dev_lock, flags);
1351 sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_DONE;
1352 *buf = sbuf->v4lbuf;
1353 ret = 0;
1354 }
1355
1356 out_unlock:
1357 mutex_unlock(&cam->s_mutex);
1358 out:
1359 return ret;
1360}
1361
1362
1363
1364static void cafe_v4l_vm_open(struct vm_area_struct *vma)
1365{
1366 struct cafe_sio_buffer *sbuf = vma->vm_private_data;
1367 /*
1368 * Locking: done under mmap_sem, so we don't need to
1369 * go back to the camera lock here.
1370 */
1371 sbuf->mapcount++;
1372}
1373
1374
1375static void cafe_v4l_vm_close(struct vm_area_struct *vma)
1376{
1377 struct cafe_sio_buffer *sbuf = vma->vm_private_data;
1378
1379 mutex_lock(&sbuf->cam->s_mutex);
1380 sbuf->mapcount--;
1381 /* Docs say we should stop I/O too... */
1382 if (sbuf->mapcount == 0)
1383 sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_MAPPED;
1384 mutex_unlock(&sbuf->cam->s_mutex);
1385}
1386
1387static struct vm_operations_struct cafe_v4l_vm_ops = {
1388 .open = cafe_v4l_vm_open,
1389 .close = cafe_v4l_vm_close
1390};
1391
1392
1393static int cafe_v4l_mmap(struct file *filp, struct vm_area_struct *vma)
1394{
1395 struct cafe_camera *cam = filp->private_data;
1396 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
1397 int ret = -EINVAL;
1398 int i;
1399 struct cafe_sio_buffer *sbuf = NULL;
1400
1401 if (! (vma->vm_flags & VM_WRITE) || ! (vma->vm_flags & VM_SHARED))
1402 return -EINVAL;
1403 /*
1404 * Find the buffer they are looking for.
1405 */
1406 mutex_lock(&cam->s_mutex);
1407 for (i = 0; i < cam->n_sbufs; i++)
1408 if (cam->sb_bufs[i].v4lbuf.m.offset == offset) {
1409 sbuf = cam->sb_bufs + i;
1410 break;
1411 }
1412 if (sbuf == NULL)
1413 goto out;
1414
1415 ret = remap_vmalloc_range(vma, sbuf->buffer, 0);
1416 if (ret)
1417 goto out;
1418 vma->vm_flags |= VM_DONTEXPAND;
1419 vma->vm_private_data = sbuf;
1420 vma->vm_ops = &cafe_v4l_vm_ops;
1421 sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_MAPPED;
1422 cafe_v4l_vm_open(vma);
1423 ret = 0;
1424 out:
1425 mutex_unlock(&cam->s_mutex);
1426 return ret;
1427}
1428
1429
1430
1431static int cafe_v4l_open(struct inode *inode, struct file *filp)
1432{
1433 struct cafe_camera *cam;
1434
1435 cam = cafe_find_dev(iminor(inode));
1436 if (cam == NULL)
1437 return -ENODEV;
1438 filp->private_data = cam;
1439
1440 mutex_lock(&cam->s_mutex);
1441 if (cam->users == 0) {
1442 cafe_ctlr_power_up(cam);
1443 __cafe_cam_reset(cam);
1444 cafe_set_config_needed(cam, 1);
1445 /* FIXME make sure this is complete */
1446 }
1447 (cam->users)++;
1448 mutex_unlock(&cam->s_mutex);
1449 return 0;
1450}
1451
1452
1453static int cafe_v4l_release(struct inode *inode, struct file *filp)
1454{
1455 struct cafe_camera *cam = filp->private_data;
1456
1457 mutex_lock(&cam->s_mutex);
1458 (cam->users)--;
1459 if (filp == cam->owner) {
1460 cafe_ctlr_stop_dma(cam);
1461 cafe_free_sio_buffers(cam);
1462 cam->owner = NULL;
1463 }
1464 if (cam->users == 0) {
1465 cafe_ctlr_power_down(cam);
1466 if (! alloc_bufs_at_load)
1467 cafe_free_dma_bufs(cam);
1468 }
1469 mutex_unlock(&cam->s_mutex);
1470 return 0;
1471}
1472
1473
1474
1475static unsigned int cafe_v4l_poll(struct file *filp,
1476 struct poll_table_struct *pt)
1477{
1478 struct cafe_camera *cam = filp->private_data;
1479
1480 poll_wait(filp, &cam->iowait, pt);
1481 if (cam->next_buf >= 0)
1482 return POLLIN | POLLRDNORM;
1483 return 0;
1484}
1485
1486
1487
1488static int cafe_vidioc_queryctrl(struct file *filp, void *priv,
1489 struct v4l2_queryctrl *qc)
1490{
1491 struct cafe_camera *cam = filp->private_data;
1492 int ret;
1493
1494 mutex_lock(&cam->s_mutex);
1495 ret = __cafe_cam_cmd(cam, VIDIOC_QUERYCTRL, qc);
1496 mutex_unlock(&cam->s_mutex);
1497 return ret;
1498}
1499
1500
1501static int cafe_vidioc_g_ctrl(struct file *filp, void *priv,
1502 struct v4l2_control *ctrl)
1503{
1504 struct cafe_camera *cam = filp->private_data;
1505 int ret;
1506
1507 mutex_lock(&cam->s_mutex);
1508 ret = __cafe_cam_cmd(cam, VIDIOC_G_CTRL, ctrl);
1509 mutex_unlock(&cam->s_mutex);
1510 return ret;
1511}
1512
1513
1514static int cafe_vidioc_s_ctrl(struct file *filp, void *priv,
1515 struct v4l2_control *ctrl)
1516{
1517 struct cafe_camera *cam = filp->private_data;
1518 int ret;
1519
1520 mutex_lock(&cam->s_mutex);
1521 ret = __cafe_cam_cmd(cam, VIDIOC_S_CTRL, ctrl);
1522 mutex_unlock(&cam->s_mutex);
1523 return ret;
1524}
1525
1526
1527
1528
1529
1530static int cafe_vidioc_querycap(struct file *file, void *priv,
1531 struct v4l2_capability *cap)
1532{
1533 strcpy(cap->driver, "cafe_ccic");
1534 strcpy(cap->card, "cafe_ccic");
1535 cap->version = CAFE_VERSION;
1536 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
1537 V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
1538 return 0;
1539}
1540
1541
1542/*
1543 * The default format we use until somebody says otherwise.
1544 */
1545static struct v4l2_pix_format cafe_def_pix_format = {
1546 .width = VGA_WIDTH,
1547 .height = VGA_HEIGHT,
1548 .pixelformat = V4L2_PIX_FMT_YUYV,
1549 .field = V4L2_FIELD_NONE,
1550 .bytesperline = VGA_WIDTH*2,
1551 .sizeimage = VGA_WIDTH*VGA_HEIGHT*2,
1552};
1553
1554static int cafe_vidioc_enum_fmt_cap(struct file *filp,
1555 void *priv, struct v4l2_fmtdesc *fmt)
1556{
1557 struct cafe_camera *cam = priv;
1558 int ret;
1559
1560 if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1561 return -EINVAL;
1562 mutex_lock(&cam->s_mutex);
1563 ret = __cafe_cam_cmd(cam, VIDIOC_ENUM_FMT, fmt);
1564 mutex_unlock(&cam->s_mutex);
1565 return ret;
1566}
1567
1568
1569static int cafe_vidioc_try_fmt_cap (struct file *filp, void *priv,
1570 struct v4l2_format *fmt)
1571{
1572 struct cafe_camera *cam = priv;
1573 int ret;
1574
1575 mutex_lock(&cam->s_mutex);
1576 ret = __cafe_cam_cmd(cam, VIDIOC_TRY_FMT, fmt);
1577 mutex_unlock(&cam->s_mutex);
1578 return ret;
1579}
1580
1581static int cafe_vidioc_s_fmt_cap(struct file *filp, void *priv,
1582 struct v4l2_format *fmt)
1583{
1584 struct cafe_camera *cam = priv;
1585 int ret;
1586
1587 /*
1588 * Can't do anything if the device is not idle
1589 * Also can't if there are streaming buffers in place.
1590 */
1591 if (cam->state != S_IDLE || cam->n_sbufs > 0)
1592 return -EBUSY;
1593 /*
1594 * See if the formatting works in principle.
1595 */
1596 ret = cafe_vidioc_try_fmt_cap(filp, priv, fmt);
1597 if (ret)
1598 return ret;
1599 /*
1600 * Now we start to change things for real, so let's do it
1601 * under lock.
1602 */
1603 mutex_lock(&cam->s_mutex);
1604 cam->pix_format = fmt->fmt.pix;
1605 /*
1606 * Make sure we have appropriate DMA buffers.
1607 */
1608 ret = -ENOMEM;
1609 if (cam->nbufs > 0 && cam->dma_buf_size < cam->pix_format.sizeimage)
1610 cafe_free_dma_bufs(cam);
1611 if (cam->nbufs == 0) {
1612 if (cafe_alloc_dma_bufs(cam, 0))
1613 goto out;
1614 }
1615 /*
1616 * It looks like this might work, so let's program the sensor.
1617 */
1618 ret = cafe_cam_configure(cam);
1619 if (! ret)
1620 ret = cafe_ctlr_configure(cam);
1621 out:
1622 mutex_unlock(&cam->s_mutex);
1623 return ret;
1624}
1625
1626/*
1627 * Return our stored notion of how the camera is/should be configured.
1628 * The V4l2 spec wants us to be smarter, and actually get this from
1629 * the camera (and not mess with it at open time). Someday.
1630 */
1631static int cafe_vidioc_g_fmt_cap(struct file *filp, void *priv,
1632 struct v4l2_format *f)
1633{
1634 struct cafe_camera *cam = priv;
1635
1636 f->fmt.pix = cam->pix_format;
1637 return 0;
1638}
1639
1640/*
1641 * We only have one input - the sensor - so minimize the nonsense here.
1642 */
1643static int cafe_vidioc_enum_input(struct file *filp, void *priv,
1644 struct v4l2_input *input)
1645{
1646 if (input->index != 0)
1647 return -EINVAL;
1648
1649 input->type = V4L2_INPUT_TYPE_CAMERA;
1650 input->std = V4L2_STD_ALL; /* Not sure what should go here */
1651 strcpy(input->name, "Camera");
1652 return 0;
1653}
1654
1655static int cafe_vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
1656{
1657 *i = 0;
1658 return 0;
1659}
1660
1661static int cafe_vidioc_s_input(struct file *filp, void *priv, unsigned int i)
1662{
1663 if (i != 0)
1664 return -EINVAL;
1665 return 0;
1666}
1667
1668/* from vivi.c */
1669static int cafe_vidioc_s_std(struct file *filp, void *priv, v4l2_std_id *a)
1670{
1671 return 0;
1672}
1673
1674/*
1675 * G/S_PARM. Most of this is done by the sensor, but we are
1676 * the level which controls the number of read buffers.
1677 */
1678static int cafe_vidioc_g_parm(struct file *filp, void *priv,
1679 struct v4l2_streamparm *parms)
1680{
1681 struct cafe_camera *cam = priv;
1682 int ret;
1683
1684 mutex_lock(&cam->s_mutex);
1685 ret = __cafe_cam_cmd(cam, VIDIOC_G_PARM, parms);
1686 mutex_unlock(&cam->s_mutex);
1687 parms->parm.capture.readbuffers = n_dma_bufs;
1688 return ret;
1689}
1690
1691static int cafe_vidioc_s_parm(struct file *filp, void *priv,
1692 struct v4l2_streamparm *parms)
1693{
1694 struct cafe_camera *cam = priv;
1695 int ret;
1696
1697 mutex_lock(&cam->s_mutex);
1698 ret = __cafe_cam_cmd(cam, VIDIOC_S_PARM, parms);
1699 mutex_unlock(&cam->s_mutex);
1700 parms->parm.capture.readbuffers = n_dma_bufs;
1701 return ret;
1702}
1703
1704
1705static void cafe_v4l_dev_release(struct video_device *vd)
1706{
1707 struct cafe_camera *cam = container_of(vd, struct cafe_camera, v4ldev);
1708
1709 kfree(cam);
1710}
1711
1712
1713/*
1714 * This template device holds all of those v4l2 methods; we
1715 * clone it for specific real devices.
1716 */
1717
1718static struct file_operations cafe_v4l_fops = {
1719 .owner = THIS_MODULE,
1720 .open = cafe_v4l_open,
1721 .release = cafe_v4l_release,
1722 .read = cafe_v4l_read,
1723 .poll = cafe_v4l_poll,
1724 .mmap = cafe_v4l_mmap,
1725 .ioctl = video_ioctl2,
1726 .llseek = no_llseek,
1727};
1728
1729static struct video_device cafe_v4l_template = {
1730 .name = "cafe",
1731 .type = VFL_TYPE_GRABBER,
1732 .type2 = VID_TYPE_CAPTURE,
1733 .minor = -1, /* Get one dynamically */
1734 .tvnorms = V4L2_STD_NTSC_M,
1735 .current_norm = V4L2_STD_NTSC_M, /* make mplayer happy */
1736
1737 .fops = &cafe_v4l_fops,
1738 .release = cafe_v4l_dev_release,
1739
1740 .vidioc_querycap = cafe_vidioc_querycap,
1741 .vidioc_enum_fmt_cap = cafe_vidioc_enum_fmt_cap,
1742 .vidioc_try_fmt_cap = cafe_vidioc_try_fmt_cap,
1743 .vidioc_s_fmt_cap = cafe_vidioc_s_fmt_cap,
1744 .vidioc_g_fmt_cap = cafe_vidioc_g_fmt_cap,
1745 .vidioc_enum_input = cafe_vidioc_enum_input,
1746 .vidioc_g_input = cafe_vidioc_g_input,
1747 .vidioc_s_input = cafe_vidioc_s_input,
1748 .vidioc_s_std = cafe_vidioc_s_std,
1749 .vidioc_reqbufs = cafe_vidioc_reqbufs,
1750 .vidioc_querybuf = cafe_vidioc_querybuf,
1751 .vidioc_qbuf = cafe_vidioc_qbuf,
1752 .vidioc_dqbuf = cafe_vidioc_dqbuf,
1753 .vidioc_streamon = cafe_vidioc_streamon,
1754 .vidioc_streamoff = cafe_vidioc_streamoff,
1755 .vidioc_queryctrl = cafe_vidioc_queryctrl,
1756 .vidioc_g_ctrl = cafe_vidioc_g_ctrl,
1757 .vidioc_s_ctrl = cafe_vidioc_s_ctrl,
1758 .vidioc_g_parm = cafe_vidioc_g_parm,
1759 .vidioc_s_parm = cafe_vidioc_s_parm,
1760};
1761
1762
1763
1764
1765
1766
1767
1768/* ---------------------------------------------------------------------- */
1769/*
1770 * Interrupt handler stuff
1771 */
1772
1773
1774
1775static void cafe_frame_tasklet(unsigned long data)
1776{
1777 struct cafe_camera *cam = (struct cafe_camera *) data;
1778 int i;
1779 unsigned long flags;
1780 struct cafe_sio_buffer *sbuf;
1781
1782 spin_lock_irqsave(&cam->dev_lock, flags);
1783 for (i = 0; i < cam->nbufs; i++) {
1784 int bufno = cam->next_buf;
1785 if (bufno < 0) { /* "will never happen" */
1786 cam_err(cam, "No valid bufs in tasklet!\n");
1787 break;
1788 }
1789 if (++(cam->next_buf) >= cam->nbufs)
1790 cam->next_buf = 0;
1791 if (! test_bit(bufno, &cam->flags))
1792 continue;
1793 if (list_empty(&cam->sb_avail))
1794 break; /* Leave it valid, hope for better later */
1795 clear_bit(bufno, &cam->flags);
1796 /*
1797 * We could perhaps drop the spinlock during this
1798 * big copy. Something to consider.
1799 */
1800 sbuf = list_entry(cam->sb_avail.next,
1801 struct cafe_sio_buffer, list);
1802 memcpy(sbuf->buffer, cam->dma_bufs[bufno],
1803 cam->pix_format.sizeimage);
1804 sbuf->v4lbuf.bytesused = cam->pix_format.sizeimage;
1805 sbuf->v4lbuf.sequence = cam->buf_seq[bufno];
1806 sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_QUEUED;
1807 sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_DONE;
1808 list_move_tail(&sbuf->list, &cam->sb_full);
1809 }
1810 if (! list_empty(&cam->sb_full))
1811 wake_up(&cam->iowait);
1812 spin_unlock_irqrestore(&cam->dev_lock, flags);
1813}
1814
1815
1816
1817static void cafe_frame_complete(struct cafe_camera *cam, int frame)
1818{
1819 /*
1820 * Basic frame housekeeping.
1821 */
1822 if (test_bit(frame, &cam->flags) && printk_ratelimit())
1823 cam_err(cam, "Frame overrun on %d, frames lost\n", frame);
1824 set_bit(frame, &cam->flags);
1825 clear_bit(CF_DMA_ACTIVE, &cam->flags);
1826 if (cam->next_buf < 0)
1827 cam->next_buf = frame;
1828 cam->buf_seq[frame] = ++(cam->sequence);
1829
1830 switch (cam->state) {
1831 /*
1832 * If in single read mode, try going speculative.
1833 */
1834 case S_SINGLEREAD:
1835 cam->state = S_SPECREAD;
1836 cam->specframes = 0;
1837 wake_up(&cam->iowait);
1838 break;
1839
1840 /*
1841 * If we are already doing speculative reads, and nobody is
1842 * reading them, just stop.
1843 */
1844 case S_SPECREAD:
1845 if (++(cam->specframes) >= cam->nbufs) {
1846 cafe_ctlr_stop(cam);
1847 cafe_ctlr_irq_disable(cam);
1848 cam->state = S_IDLE;
1849 }
1850 wake_up(&cam->iowait);
1851 break;
1852 /*
1853 * For the streaming case, we defer the real work to the
1854 * camera tasklet.
1855 *
1856 * FIXME: if the application is not consuming the buffers,
1857 * we should eventually put things on hold and restart in
1858 * vidioc_dqbuf().
1859 */
1860 case S_STREAMING:
1861 tasklet_schedule(&cam->s_tasklet);
1862 break;
1863
1864 default:
1865 cam_err(cam, "Frame interrupt in non-operational state\n");
1866 break;
1867 }
1868}
1869
1870
1871
1872
1873static void cafe_frame_irq(struct cafe_camera *cam, unsigned int irqs)
1874{
1875 unsigned int frame;
1876
1877 cafe_reg_write(cam, REG_IRQSTAT, FRAMEIRQS); /* Clear'em all */
1878 /*
1879 * Handle any frame completions. There really should
1880 * not be more than one of these, or we have fallen
1881 * far behind.
1882 */
1883 for (frame = 0; frame < cam->nbufs; frame++)
1884 if (irqs & (IRQ_EOF0 << frame))
1885 cafe_frame_complete(cam, frame);
1886 /*
1887 * If a frame starts, note that we have DMA active. This
1888 * code assumes that we won't get multiple frame interrupts
1889 * at once; may want to rethink that.
1890 */
1891 if (irqs & (IRQ_SOF0 | IRQ_SOF1 | IRQ_SOF2))
1892 set_bit(CF_DMA_ACTIVE, &cam->flags);
1893}
1894
1895
1896
1897static irqreturn_t cafe_irq(int irq, void *data)
1898{
1899 struct cafe_camera *cam = data;
1900 unsigned int irqs;
1901
1902 spin_lock(&cam->dev_lock);
1903 irqs = cafe_reg_read(cam, REG_IRQSTAT);
1904 if ((irqs & ALLIRQS) == 0) {
1905 spin_unlock(&cam->dev_lock);
1906 return IRQ_NONE;
1907 }
1908 if (irqs & FRAMEIRQS)
1909 cafe_frame_irq(cam, irqs);
1910 if (irqs & TWSIIRQS) {
1911 cafe_reg_write(cam, REG_IRQSTAT, TWSIIRQS);
1912 wake_up(&cam->smbus_wait);
1913 }
1914 spin_unlock(&cam->dev_lock);
1915 return IRQ_HANDLED;
1916}
1917
1918
1919/* -------------------------------------------------------------------------- */
1920#ifdef CONFIG_VIDEO_ADV_DEBUG
1921/*
1922 * Debugfs stuff.
1923 */
1924
1925static char cafe_debug_buf[1024];
1926static struct dentry *cafe_dfs_root;
1927
1928static void cafe_dfs_setup(void)
1929{
1930 cafe_dfs_root = debugfs_create_dir("cafe_ccic", NULL);
1931 if (IS_ERR(cafe_dfs_root)) {
1932 cafe_dfs_root = NULL; /* Never mind */
1933 printk(KERN_NOTICE "cafe_ccic unable to set up debugfs\n");
1934 }
1935}
1936
1937static void cafe_dfs_shutdown(void)
1938{
1939 if (cafe_dfs_root)
1940 debugfs_remove(cafe_dfs_root);
1941}
1942
1943static int cafe_dfs_open(struct inode *inode, struct file *file)
1944{
1945 file->private_data = inode->i_private;
1946 return 0;
1947}
1948
1949static ssize_t cafe_dfs_read_regs(struct file *file,
1950 char __user *buf, size_t count, loff_t *ppos)
1951{
1952 struct cafe_camera *cam = file->private_data;
1953 char *s = cafe_debug_buf;
1954 int offset;
1955
1956 for (offset = 0; offset < 0x44; offset += 4)
1957 s += sprintf(s, "%02x: %08x\n", offset,
1958 cafe_reg_read(cam, offset));
1959 for (offset = 0x88; offset <= 0x90; offset += 4)
1960 s += sprintf(s, "%02x: %08x\n", offset,
1961 cafe_reg_read(cam, offset));
1962 for (offset = 0xb4; offset <= 0xbc; offset += 4)
1963 s += sprintf(s, "%02x: %08x\n", offset,
1964 cafe_reg_read(cam, offset));
1965 for (offset = 0x3000; offset <= 0x300c; offset += 4)
1966 s += sprintf(s, "%04x: %08x\n", offset,
1967 cafe_reg_read(cam, offset));
1968 return simple_read_from_buffer(buf, count, ppos, cafe_debug_buf,
1969 s - cafe_debug_buf);
1970}
1971
1972static struct file_operations cafe_dfs_reg_ops = {
1973 .owner = THIS_MODULE,
1974 .read = cafe_dfs_read_regs,
1975 .open = cafe_dfs_open
1976};
1977
1978static ssize_t cafe_dfs_read_cam(struct file *file,
1979 char __user *buf, size_t count, loff_t *ppos)
1980{
1981 struct cafe_camera *cam = file->private_data;
1982 char *s = cafe_debug_buf;
1983 int offset;
1984
1985 if (! cam->sensor)
1986 return -EINVAL;
1987 for (offset = 0x0; offset < 0x8a; offset++)
1988 {
1989 u8 v;
1990
1991 cafe_smbus_read_data(cam, cam->sensor->addr, offset, &v);
1992 s += sprintf(s, "%02x: %02x\n", offset, v);
1993 }
1994 return simple_read_from_buffer(buf, count, ppos, cafe_debug_buf,
1995 s - cafe_debug_buf);
1996}
1997
1998static struct file_operations cafe_dfs_cam_ops = {
1999 .owner = THIS_MODULE,
2000 .read = cafe_dfs_read_cam,
2001 .open = cafe_dfs_open
2002};
2003
2004
2005
2006static void cafe_dfs_cam_setup(struct cafe_camera *cam)
2007{
2008 char fname[40];
2009
2010 if (!cafe_dfs_root)
2011 return;
2012 sprintf(fname, "regs-%d", cam->v4ldev.minor);
2013 cam->dfs_regs = debugfs_create_file(fname, 0444, cafe_dfs_root,
2014 cam, &cafe_dfs_reg_ops);
2015 sprintf(fname, "cam-%d", cam->v4ldev.minor);
2016 cam->dfs_cam_regs = debugfs_create_file(fname, 0444, cafe_dfs_root,
2017 cam, &cafe_dfs_cam_ops);
2018}
2019
2020
2021static void cafe_dfs_cam_shutdown(struct cafe_camera *cam)
2022{
2023 if (! IS_ERR(cam->dfs_regs))
2024 debugfs_remove(cam->dfs_regs);
2025 if (! IS_ERR(cam->dfs_cam_regs))
2026 debugfs_remove(cam->dfs_cam_regs);
2027}
2028
2029#else
2030
2031#define cafe_dfs_setup()
2032#define cafe_dfs_shutdown()
2033#define cafe_dfs_cam_setup(cam)
2034#define cafe_dfs_cam_shutdown(cam)
2035#endif /* CONFIG_VIDEO_ADV_DEBUG */
2036
2037
2038
2039
2040/* ------------------------------------------------------------------------*/
2041/*
2042 * PCI interface stuff.
2043 */
2044
2045static int cafe_pci_probe(struct pci_dev *pdev,
2046 const struct pci_device_id *id)
2047{
2048 int ret;
2049 u16 classword;
2050 struct cafe_camera *cam;
2051 /*
2052 * Make sure we have a camera here - we'll get calls for
2053 * the other cafe devices as well.
2054 */
2055 pci_read_config_word(pdev, PCI_CLASS_DEVICE, &classword);
2056 if (classword != PCI_CLASS_MULTIMEDIA_VIDEO)
2057 return -ENODEV;
2058 /*
2059 * Start putting together one of our big camera structures.
2060 */
2061 ret = -ENOMEM;
2062 cam = kzalloc(sizeof(struct cafe_camera), GFP_KERNEL);
2063 if (cam == NULL)
2064 goto out;
2065 mutex_init(&cam->s_mutex);
2066 mutex_lock(&cam->s_mutex);
2067 spin_lock_init(&cam->dev_lock);
2068 cam->state = S_NOTREADY;
2069 cafe_set_config_needed(cam, 1);
2070 init_waitqueue_head(&cam->smbus_wait);
2071 init_waitqueue_head(&cam->iowait);
2072 cam->pdev = pdev;
2073 cam->pix_format = cafe_def_pix_format;
2074 INIT_LIST_HEAD(&cam->dev_list);
2075 INIT_LIST_HEAD(&cam->sb_avail);
2076 INIT_LIST_HEAD(&cam->sb_full);
2077 tasklet_init(&cam->s_tasklet, cafe_frame_tasklet, (unsigned long) cam);
2078 /*
2079 * Get set up on the PCI bus.
2080 */
2081 ret = pci_enable_device(pdev);
2082 if (ret)
2083 goto out_free;
2084 pci_set_master(pdev);
2085
2086 ret = -EIO;
2087 cam->regs = pci_iomap(pdev, 0, 0);
2088 if (! cam->regs) {
2089 printk(KERN_ERR "Unable to ioremap cafe-ccic regs\n");
2090 goto out_free;
2091 }
2092 ret = request_irq(pdev->irq, cafe_irq, IRQF_SHARED, "cafe-ccic", cam);
2093 if (ret)
2094 goto out_iounmap;
2095 cafe_ctlr_init(cam);
2096 cafe_ctlr_power_up(cam);
2097 /*
2098 * Set up I2C/SMBUS communications
2099 */
2100 mutex_unlock(&cam->s_mutex); /* attach can deadlock */
2101 ret = cafe_smbus_setup(cam);
2102 if (ret)
2103 goto out_freeirq;
2104 /*
2105 * Get the v4l2 setup done.
2106 */
2107 mutex_lock(&cam->s_mutex);
2108 cam->v4ldev = cafe_v4l_template;
2109 cam->v4ldev.debug = 0;
2110// cam->v4ldev.debug = V4L2_DEBUG_IOCTL_ARG;
2111 ret = video_register_device(&cam->v4ldev, VFL_TYPE_GRABBER, -1);
2112 if (ret)
2113 goto out_smbus;
2114 /*
2115 * If so requested, try to get our DMA buffers now.
2116 */
2117 if (alloc_bufs_at_load) {
2118 if (cafe_alloc_dma_bufs(cam, 1))
2119 cam_warn(cam, "Unable to alloc DMA buffers at load"
2120 " will try again later.");
2121 }
2122
2123 cafe_dfs_cam_setup(cam);
2124 mutex_unlock(&cam->s_mutex);
2125 cafe_add_dev(cam);
2126 return 0;
2127
2128 out_smbus:
2129 cafe_smbus_shutdown(cam);
2130 out_freeirq:
2131 cafe_ctlr_power_down(cam);
2132 free_irq(pdev->irq, cam);
2133 out_iounmap:
2134 pci_iounmap(pdev, cam->regs);
2135 out_free:
2136 kfree(cam);
2137 out:
2138 return ret;
2139}
2140
2141
2142/*
2143 * Shut down an initialized device
2144 */
2145static void cafe_shutdown(struct cafe_camera *cam)
2146{
2147/* FIXME: Make sure we take care of everything here */
2148 cafe_dfs_cam_shutdown(cam);
2149 if (cam->n_sbufs > 0)
2150 /* What if they are still mapped? Shouldn't be, but... */
2151 cafe_free_sio_buffers(cam);
2152 cafe_remove_dev(cam);
2153 cafe_ctlr_stop_dma(cam);
2154 cafe_ctlr_power_down(cam);
2155 cafe_smbus_shutdown(cam);
2156 cafe_free_dma_bufs(cam);
2157 free_irq(cam->pdev->irq, cam);
2158 pci_iounmap(cam->pdev, cam->regs);
2159 video_unregister_device(&cam->v4ldev);
2160 /* kfree(cam); done in v4l_release () */
2161}
2162
2163
2164static void cafe_pci_remove(struct pci_dev *pdev)
2165{
2166 struct cafe_camera *cam = cafe_find_by_pdev(pdev);
2167
2168 if (cam == NULL) {
2169 cam_warn(cam, "pci_remove on unknown pdev %p\n", pdev);
2170 return;
2171 }
2172 mutex_lock(&cam->s_mutex);
2173 if (cam->users > 0)
2174 cam_warn(cam, "Removing a device with users!\n");
2175 cafe_shutdown(cam);
2176/* No unlock - it no longer exists */
2177}
2178
2179
2180
2181
2182static struct pci_device_id cafe_ids[] = {
2183 { PCI_DEVICE(0x1148, 0x4340) }, /* Temporary ID on devel board */
2184 { PCI_DEVICE(0x11ab, 0x4100) }, /* Eventual real ID */
2185 { PCI_DEVICE(0x11ab, 0x4102) }, /* Really eventual real ID */
2186 { 0, }
2187};
2188
2189MODULE_DEVICE_TABLE(pci, cafe_ids);
2190
2191static struct pci_driver cafe_pci_driver = {
2192 .name = "cafe1000-ccic",
2193 .id_table = cafe_ids,
2194 .probe = cafe_pci_probe,
2195 .remove = cafe_pci_remove,
2196};
2197
2198
2199
2200
2201static int __init cafe_init(void)
2202{
2203 int ret;
2204
2205 printk(KERN_NOTICE "Marvell M88ALP01 'CAFE' Camera Controller version %d\n",
2206 CAFE_VERSION);
2207 cafe_dfs_setup();
2208 ret = pci_register_driver(&cafe_pci_driver);
2209 if (ret) {
2210 printk(KERN_ERR "Unable to register cafe_ccic driver\n");
2211 goto out;
2212 }
2213 request_module("ov7670"); /* FIXME want something more general */
2214 ret = 0;
2215
2216 out:
2217 return ret;
2218}
2219
2220
2221static void __exit cafe_exit(void)
2222{
2223 pci_unregister_driver(&cafe_pci_driver);
2224 cafe_dfs_shutdown();
2225}
2226
2227module_init(cafe_init);
2228module_exit(cafe_exit);
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index 0f9d96963618..b2a66ba625f9 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -53,6 +53,7 @@ config VIDEO_CX88_DVB
53 select DVB_OR51132 if !DVB_FE_CUSTOMISE 53 select DVB_OR51132 if !DVB_FE_CUSTOMISE
54 select DVB_CX22702 if !DVB_FE_CUSTOMISE 54 select DVB_CX22702 if !DVB_FE_CUSTOMISE
55 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 55 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
56 select DVB_TUNER_LGH06XF if !DVB_FE_CUSTOMISE
56 select DVB_NXT200X if !DVB_FE_CUSTOMISE 57 select DVB_NXT200X if !DVB_FE_CUSTOMISE
57 select DVB_CX24123 if !DVB_FE_CUSTOMISE 58 select DVB_CX24123 if !DVB_FE_CUSTOMISE
58 select DVB_ISL6421 if !DVB_FE_CUSTOMISE 59 select DVB_ISL6421 if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 46738321adaf..0cf0360588e6 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -50,7 +50,6 @@ MODULE_PARM_DESC(debug,"enable debug messages [blackbird]");
50#define dprintk(level,fmt, arg...) if (debug >= level) \ 50#define dprintk(level,fmt, arg...) if (debug >= level) \
51 printk(KERN_DEBUG "%s/2-bb: " fmt, dev->core->name , ## arg) 51 printk(KERN_DEBUG "%s/2-bb: " fmt, dev->core->name , ## arg)
52 52
53static LIST_HEAD(cx8802_devlist);
54 53
55/* ------------------------------------------------------------------ */ 54/* ------------------------------------------------------------------ */
56 55
@@ -882,7 +881,7 @@ static int mpeg_do_ioctl(struct inode *inode, struct file *file,
882 BLACKBIRD_MPEG_CAPTURE, 881 BLACKBIRD_MPEG_CAPTURE,
883 BLACKBIRD_RAW_BITS_NONE); 882 BLACKBIRD_RAW_BITS_NONE);
884 883
885 cx88_do_ioctl(inode, file, 0, dev->core, cmd, arg, mpeg_do_ioctl); 884 cx88_do_ioctl(inode, file, 0, dev->core, cmd, arg, cx88_ioctl_hook);
886 885
887 blackbird_initialize_codec(dev); 886 blackbird_initialize_codec(dev);
888 cx88_set_scale(dev->core, dev->width, dev->height, 887 cx88_set_scale(dev->core, dev->width, dev->height,
@@ -914,11 +913,15 @@ static int mpeg_do_ioctl(struct inode *inode, struct file *file,
914 } 913 }
915 914
916 default: 915 default:
917 return cx88_do_ioctl(inode, file, 0, dev->core, cmd, arg, mpeg_do_ioctl); 916 return cx88_do_ioctl(inode, file, 0, dev->core, cmd, arg, cx88_ioctl_hook);
918 } 917 }
919 return 0; 918 return 0;
920} 919}
921 920
921int (*cx88_ioctl_hook)(struct inode *inode, struct file *file,
922 unsigned int cmd, void *arg);
923unsigned int (*cx88_ioctl_translator)(unsigned int cmd);
924
922static unsigned int mpeg_translate_ioctl(unsigned int cmd) 925static unsigned int mpeg_translate_ioctl(unsigned int cmd)
923{ 926{
924 return cmd; 927 return cmd;
@@ -927,33 +930,49 @@ static unsigned int mpeg_translate_ioctl(unsigned int cmd)
927static int mpeg_ioctl(struct inode *inode, struct file *file, 930static int mpeg_ioctl(struct inode *inode, struct file *file,
928 unsigned int cmd, unsigned long arg) 931 unsigned int cmd, unsigned long arg)
929{ 932{
930 cmd = mpeg_translate_ioctl( cmd ); 933 cmd = cx88_ioctl_translator( cmd );
931 return video_usercopy(inode, file, cmd, arg, mpeg_do_ioctl); 934 return video_usercopy(inode, file, cmd, arg, cx88_ioctl_hook);
932} 935}
933 936
934static int mpeg_open(struct inode *inode, struct file *file) 937static int mpeg_open(struct inode *inode, struct file *file)
935{ 938{
936 int minor = iminor(inode); 939 int minor = iminor(inode);
937 struct cx8802_dev *h,*dev = NULL; 940 struct cx8802_dev *dev = NULL;
938 struct cx8802_fh *fh; 941 struct cx8802_fh *fh;
939 struct list_head *list; 942 struct cx8802_driver *drv = NULL;
943 int err;
940 944
941 list_for_each(list,&cx8802_devlist) { 945 dev = cx8802_get_device(inode);
942 h = list_entry(list, struct cx8802_dev, devlist); 946
943 if (h->mpeg_dev->minor == minor) 947 dprintk( 1, "%s\n", __FUNCTION__);
944 dev = h; 948
945 } 949 if (dev == NULL)
946 if (NULL == dev)
947 return -ENODEV; 950 return -ENODEV;
948 951
949 if (blackbird_initialize_codec(dev) < 0) 952 /* Make sure we can acquire the hardware */
953 drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
954 if (drv) {
955 err = drv->request_acquire(drv);
956 if(err != 0) {
957 dprintk(1,"%s: Unable to acquire hardware, %d\n", __FUNCTION__, err);
958 return err;
959 }
960 }
961
962 if (blackbird_initialize_codec(dev) < 0) {
963 if (drv)
964 drv->request_release(drv);
950 return -EINVAL; 965 return -EINVAL;
966 }
951 dprintk(1,"open minor=%d\n",minor); 967 dprintk(1,"open minor=%d\n",minor);
952 968
953 /* allocate + initialize per filehandle data */ 969 /* allocate + initialize per filehandle data */
954 fh = kzalloc(sizeof(*fh),GFP_KERNEL); 970 fh = kzalloc(sizeof(*fh),GFP_KERNEL);
955 if (NULL == fh) 971 if (NULL == fh) {
972 if (drv)
973 drv->request_release(drv);
956 return -ENOMEM; 974 return -ENOMEM;
975 }
957 file->private_data = fh; 976 file->private_data = fh;
958 fh->dev = dev; 977 fh->dev = dev;
959 978
@@ -974,6 +993,8 @@ static int mpeg_open(struct inode *inode, struct file *file)
974static int mpeg_release(struct inode *inode, struct file *file) 993static int mpeg_release(struct inode *inode, struct file *file)
975{ 994{
976 struct cx8802_fh *fh = file->private_data; 995 struct cx8802_fh *fh = file->private_data;
996 struct cx8802_dev *dev = NULL;
997 struct cx8802_driver *drv = NULL;
977 998
978 /* blackbird_api_cmd(fh->dev, CX2341X_ENC_STOP_CAPTURE, 3, 0, BLACKBIRD_END_NOW, 0, 0x13); */ 999 /* blackbird_api_cmd(fh->dev, CX2341X_ENC_STOP_CAPTURE, 3, 0, BLACKBIRD_END_NOW, 0, 0x13); */
979 blackbird_api_cmd(fh->dev, CX2341X_ENC_STOP_CAPTURE, 3, 0, 1000 blackbird_api_cmd(fh->dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
@@ -992,6 +1013,16 @@ static int mpeg_release(struct inode *inode, struct file *file)
992 videobuf_mmap_free(&fh->mpegq); 1013 videobuf_mmap_free(&fh->mpegq);
993 file->private_data = NULL; 1014 file->private_data = NULL;
994 kfree(fh); 1015 kfree(fh);
1016
1017 /* Make sure we release the hardware */
1018 dev = cx8802_get_device(inode);
1019 if (dev == NULL)
1020 return -ENODEV;
1021
1022 drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
1023 if (drv)
1024 drv->request_release(drv);
1025
995 return 0; 1026 return 0;
996} 1027}
997 1028
@@ -1043,6 +1074,44 @@ static struct video_device cx8802_mpeg_template =
1043 1074
1044/* ------------------------------------------------------------------ */ 1075/* ------------------------------------------------------------------ */
1045 1076
1077/* The CX8802 MPEG API will call this when we can use the hardware */
1078static int cx8802_blackbird_advise_acquire(struct cx8802_driver *drv)
1079{
1080 struct cx88_core *core = drv->core;
1081 int err = 0;
1082
1083 switch (core->board) {
1084 case CX88_BOARD_HAUPPAUGE_HVR1300:
1085 /* By default, core setup will leave the cx22702 out of reset, on the bus.
1086 * We left the hardware on power up with the cx22702 active.
1087 * We're being given access to re-arrange the GPIOs.
1088 * Take the bus off the cx22702 and put the cx23416 on it.
1089 */
1090 cx_clear(MO_GP0_IO, 0x00000080); /* cx22702 in reset */
1091 cx_set(MO_GP0_IO, 0x00000004); /* Disable the cx22702 */
1092 break;
1093 default:
1094 err = -ENODEV;
1095 }
1096 return err;
1097}
1098
1099/* The CX8802 MPEG API will call this when we need to release the hardware */
1100static int cx8802_blackbird_advise_release(struct cx8802_driver *drv)
1101{
1102 struct cx88_core *core = drv->core;
1103 int err = 0;
1104
1105 switch (core->board) {
1106 case CX88_BOARD_HAUPPAUGE_HVR1300:
1107 /* Exit leaving the cx23416 on the bus */
1108 break;
1109 default:
1110 err = -ENODEV;
1111 }
1112 return err;
1113}
1114
1046static void blackbird_unregister_video(struct cx8802_dev *dev) 1115static void blackbird_unregister_video(struct cx8802_dev *dev)
1047{ 1116{
1048 if (dev->mpeg_dev) { 1117 if (dev->mpeg_dev) {
@@ -1073,28 +1142,23 @@ static int blackbird_register_video(struct cx8802_dev *dev)
1073 1142
1074/* ----------------------------------------------------------- */ 1143/* ----------------------------------------------------------- */
1075 1144
1076static int __devinit blackbird_probe(struct pci_dev *pci_dev, 1145static int cx8802_blackbird_probe(struct cx8802_driver *drv)
1077 const struct pci_device_id *pci_id)
1078{ 1146{
1079 struct cx8802_dev *dev; 1147 struct cx88_core *core = drv->core;
1080 struct cx88_core *core; 1148 struct cx8802_dev *dev = core->dvbdev;
1081 int err; 1149 int err;
1082 1150
1083 /* general setup */ 1151 dprintk( 1, "%s\n", __FUNCTION__);
1084 core = cx88_core_get(pci_dev); 1152 dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
1085 if (NULL == core) 1153 core->board,
1086 return -EINVAL; 1154 core->name,
1155 core->pci_bus,
1156 core->pci_slot);
1087 1157
1088 err = -ENODEV; 1158 err = -ENODEV;
1089 if (!(cx88_boards[core->board].mpeg & CX88_MPEG_BLACKBIRD)) 1159 if (!(cx88_boards[core->board].mpeg & CX88_MPEG_BLACKBIRD))
1090 goto fail_core; 1160 goto fail_core;
1091 1161
1092 err = -ENOMEM;
1093 dev = kzalloc(sizeof(*dev),GFP_KERNEL);
1094 if (NULL == dev)
1095 goto fail_core;
1096 dev->pci = pci_dev;
1097 dev->core = core;
1098 dev->width = 720; 1162 dev->width = 720;
1099 dev->height = 576; 1163 dev->height = 576;
1100 cx2341x_fill_defaults(&dev->params); 1164 cx2341x_fill_defaults(&dev->params);
@@ -1106,64 +1170,36 @@ static int __devinit blackbird_probe(struct pci_dev *pci_dev,
1106 dev->height = 576; 1170 dev->height = 576;
1107 } 1171 }
1108 1172
1109 err = cx8802_init_common(dev);
1110 if (0 != err)
1111 goto fail_free;
1112
1113 /* blackbird stuff */ 1173 /* blackbird stuff */
1114 printk("%s/2: cx23416 based mpeg encoder (blackbird reference design)\n", 1174 printk("%s/2: cx23416 based mpeg encoder (blackbird reference design)\n",
1115 core->name); 1175 core->name);
1116 host_setup(dev->core); 1176 host_setup(dev->core);
1117 1177
1118 list_add_tail(&dev->devlist,&cx8802_devlist);
1119 blackbird_register_video(dev); 1178 blackbird_register_video(dev);
1120 1179
1121 /* initial device configuration: needed ? */ 1180 /* initial device configuration: needed ? */
1122 1181
1123 return 0; 1182 return 0;
1124 1183
1125 fail_free:
1126 kfree(dev);
1127 fail_core: 1184 fail_core:
1128 cx88_core_put(core,pci_dev);
1129 return err; 1185 return err;
1130} 1186}
1131 1187
1132static void __devexit blackbird_remove(struct pci_dev *pci_dev) 1188static int cx8802_blackbird_remove(struct cx8802_driver *drv)
1133{ 1189{
1134 struct cx8802_dev *dev = pci_get_drvdata(pci_dev);
1135
1136 /* blackbird */ 1190 /* blackbird */
1137 blackbird_unregister_video(dev); 1191 blackbird_unregister_video(drv->core->dvbdev);
1138 list_del(&dev->devlist);
1139 1192
1140 /* common */ 1193 return 0;
1141 cx8802_fini_common(dev);
1142 cx88_core_put(dev->core,dev->pci);
1143 kfree(dev);
1144} 1194}
1145 1195
1146static struct pci_device_id cx8802_pci_tbl[] = { 1196static struct cx8802_driver cx8802_blackbird_driver = {
1147 { 1197 .type_id = CX88_MPEG_BLACKBIRD,
1148 .vendor = 0x14f1, 1198 .hw_access = CX8802_DRVCTL_SHARED,
1149 .device = 0x8802, 1199 .probe = cx8802_blackbird_probe,
1150 .subvendor = PCI_ANY_ID, 1200 .remove = cx8802_blackbird_remove,
1151 .subdevice = PCI_ANY_ID, 1201 .advise_acquire = cx8802_blackbird_advise_acquire,
1152 },{ 1202 .advise_release = cx8802_blackbird_advise_release,
1153 /* --- end of list --- */
1154 }
1155};
1156MODULE_DEVICE_TABLE(pci, cx8802_pci_tbl);
1157
1158static struct pci_driver blackbird_pci_driver = {
1159 .name = "cx88-blackbird",
1160 .id_table = cx8802_pci_tbl,
1161 .probe = blackbird_probe,
1162 .remove = __devexit_p(blackbird_remove),
1163#ifdef CONFIG_PM
1164 .suspend = cx8802_suspend_common,
1165 .resume = cx8802_resume_common,
1166#endif
1167}; 1203};
1168 1204
1169static int blackbird_init(void) 1205static int blackbird_init(void)
@@ -1176,17 +1212,22 @@ static int blackbird_init(void)
1176 printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n", 1212 printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
1177 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100); 1213 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
1178#endif 1214#endif
1179 return pci_register_driver(&blackbird_pci_driver); 1215 cx88_ioctl_hook = mpeg_do_ioctl;
1216 cx88_ioctl_translator = mpeg_translate_ioctl;
1217 return cx8802_register_driver(&cx8802_blackbird_driver);
1180} 1218}
1181 1219
1182static void blackbird_fini(void) 1220static void blackbird_fini(void)
1183{ 1221{
1184 pci_unregister_driver(&blackbird_pci_driver); 1222 cx8802_unregister_driver(&cx8802_blackbird_driver);
1185} 1223}
1186 1224
1187module_init(blackbird_init); 1225module_init(blackbird_init);
1188module_exit(blackbird_fini); 1226module_exit(blackbird_fini);
1189 1227
1228EXPORT_SYMBOL(cx88_ioctl_hook);
1229EXPORT_SYMBOL(cx88_ioctl_translator);
1230
1190/* ----------------------------------------------------------- */ 1231/* ----------------------------------------------------------- */
1191/* 1232/*
1192 * Local variables: 1233 * Local variables:
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index f764a57c56be..c791708b1336 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -281,18 +281,22 @@ struct cx88_board cx88_boards[] = {
281 .type = CX88_VMUX_TELEVISION, 281 .type = CX88_VMUX_TELEVISION,
282 .vmux = 0, 282 .vmux = 0,
283 .gpio0 = 0x0000bde2, 283 .gpio0 = 0x0000bde2,
284 .extadc = 1,
284 },{ 285 },{
285 .type = CX88_VMUX_COMPOSITE1, 286 .type = CX88_VMUX_COMPOSITE1,
286 .vmux = 1, 287 .vmux = 1,
287 .gpio0 = 0x0000bde6, 288 .gpio0 = 0x0000bde6,
289 .extadc = 1,
288 },{ 290 },{
289 .type = CX88_VMUX_SVIDEO, 291 .type = CX88_VMUX_SVIDEO,
290 .vmux = 2, 292 .vmux = 2,
291 .gpio0 = 0x0000bde6, 293 .gpio0 = 0x0000bde6,
294 .extadc = 1,
292 }}, 295 }},
293 .radio = { 296 .radio = {
294 .type = CX88_RADIO, 297 .type = CX88_RADIO,
295 .gpio0 = 0x0000bd62, 298 .gpio0 = 0x0000bd62,
299 .extadc = 1,
296 }, 300 },
297 .mpeg = CX88_MPEG_BLACKBIRD, 301 .mpeg = CX88_MPEG_BLACKBIRD,
298 }, 302 },
@@ -353,6 +357,7 @@ struct cx88_board cx88_boards[] = {
353 .type = CX88_VMUX_SVIDEO, 357 .type = CX88_VMUX_SVIDEO,
354 .vmux = 2, 358 .vmux = 2,
355 .gpio0 = 0x0000fde6, // 0x0000fda6 L,R RCA audio in? 359 .gpio0 = 0x0000fde6, // 0x0000fda6 L,R RCA audio in?
360 .extadc = 1,
356 }}, 361 }},
357 .radio = { 362 .radio = {
358 .type = CX88_RADIO, 363 .type = CX88_RADIO,
@@ -523,6 +528,7 @@ struct cx88_board cx88_boards[] = {
523 .input = {{ 528 .input = {{
524 .type = CX88_VMUX_TELEVISION, 529 .type = CX88_VMUX_TELEVISION,
525 .vmux = 0, 530 .vmux = 0,
531 .extadc = 1,
526 }}, 532 }},
527 .mpeg = CX88_MPEG_BLACKBIRD, 533 .mpeg = CX88_MPEG_BLACKBIRD,
528 }, 534 },
@@ -646,18 +652,22 @@ struct cx88_board cx88_boards[] = {
646 .type = CX88_VMUX_TELEVISION, 652 .type = CX88_VMUX_TELEVISION,
647 .vmux = 0, 653 .vmux = 0,
648 .gpio0 = 0x00009d80, 654 .gpio0 = 0x00009d80,
655 .extadc = 1,
649 },{ 656 },{
650 .type = CX88_VMUX_COMPOSITE1, 657 .type = CX88_VMUX_COMPOSITE1,
651 .vmux = 1, 658 .vmux = 1,
652 .gpio0 = 0x00009d76, 659 .gpio0 = 0x00009d76,
660 .extadc = 1,
653 },{ 661 },{
654 .type = CX88_VMUX_SVIDEO, 662 .type = CX88_VMUX_SVIDEO,
655 .vmux = 2, 663 .vmux = 2,
656 .gpio0 = 0x00009d76, 664 .gpio0 = 0x00009d76,
665 .extadc = 1,
657 }}, 666 }},
658 .radio = { 667 .radio = {
659 .type = CX88_RADIO, 668 .type = CX88_RADIO,
660 .gpio0 = 0x00009d00, 669 .gpio0 = 0x00009d00,
670 .extadc = 1,
661 }, 671 },
662 .mpeg = CX88_MPEG_BLACKBIRD, 672 .mpeg = CX88_MPEG_BLACKBIRD,
663 }, 673 },
@@ -786,25 +796,29 @@ struct cx88_board cx88_boards[] = {
786 .tuner_addr = ADDR_UNSET, 796 .tuner_addr = ADDR_UNSET,
787 .radio_addr = ADDR_UNSET, 797 .radio_addr = ADDR_UNSET,
788 .tda9887_conf = TDA9887_PRESENT, 798 .tda9887_conf = TDA9887_PRESENT,
789 .mpeg = CX88_MPEG_BLACKBIRD,
790 .input = {{ 799 .input = {{
791 .type = CX88_VMUX_COMPOSITE1, 800 .type = CX88_VMUX_COMPOSITE1,
792 .vmux = 0, 801 .vmux = 0,
793 .gpio0 = 0x0000cd73, 802 .gpio0 = 0x0000cd73,
803 .extadc = 1,
794 },{ 804 },{
795 .type = CX88_VMUX_SVIDEO, 805 .type = CX88_VMUX_SVIDEO,
796 .vmux = 1, 806 .vmux = 1,
797 .gpio0 = 0x0000cd73, 807 .gpio0 = 0x0000cd73,
808 .extadc = 1,
798 },{ 809 },{
799 .type = CX88_VMUX_TELEVISION, 810 .type = CX88_VMUX_TELEVISION,
800 .vmux = 3, 811 .vmux = 3,
801 .gpio0 = 0x0000cdb3, 812 .gpio0 = 0x0000cdb3,
813 .extadc = 1,
802 }}, 814 }},
803 .radio = { 815 .radio = {
804 .type = CX88_RADIO, 816 .type = CX88_RADIO,
805 .vmux = 2, 817 .vmux = 2,
806 .gpio0 = 0x0000cdf3, 818 .gpio0 = 0x0000cdf3,
819 .extadc = 1,
807 }, 820 },
821 .mpeg = CX88_MPEG_BLACKBIRD,
808 }, 822 },
809 [CX88_BOARD_KWORLD_VSTREAM_EXPERT_DVD] = { 823 [CX88_BOARD_KWORLD_VSTREAM_EXPERT_DVD] = {
810 /* Alexander Wold <awold@bigfoot.com> */ 824 /* Alexander Wold <awold@bigfoot.com> */
@@ -1050,7 +1064,6 @@ struct cx88_board cx88_boards[] = {
1050 .mpeg = CX88_MPEG_DVB, 1064 .mpeg = CX88_MPEG_DVB,
1051 }, 1065 },
1052 [CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT] = { 1066 [CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT] = {
1053 /* FIXME: Audio not working for s-video / composite inputs. */
1054 .name = "KWorld HardwareMpegTV XPert", 1067 .name = "KWorld HardwareMpegTV XPert",
1055 .tuner_type = TUNER_PHILIPS_TDA8290, 1068 .tuner_type = TUNER_PHILIPS_TDA8290,
1056 .radio_type = UNSET, 1069 .radio_type = UNSET,
@@ -1065,10 +1078,12 @@ struct cx88_board cx88_boards[] = {
1065 .type = CX88_VMUX_COMPOSITE1, 1078 .type = CX88_VMUX_COMPOSITE1,
1066 .vmux = 1, 1079 .vmux = 1,
1067 .gpio0 = 0x3de6, 1080 .gpio0 = 0x3de6,
1081 .extadc = 1,
1068 },{ 1082 },{
1069 .type = CX88_VMUX_SVIDEO, 1083 .type = CX88_VMUX_SVIDEO,
1070 .vmux = 2, 1084 .vmux = 2,
1071 .gpio0 = 0x3de6, 1085 .gpio0 = 0x3de6,
1086 .extadc = 1,
1072 }}, 1087 }},
1073 .radio = { 1088 .radio = {
1074 .type = CX88_RADIO, 1089 .type = CX88_RADIO,
@@ -1252,35 +1267,35 @@ struct cx88_board cx88_boards[] = {
1252 .gpio0 = 0x070b, 1267 .gpio0 = 0x070b,
1253 }}, 1268 }},
1254 }, 1269 },
1255 [CX88_BOARD_TE_DTV_250_OEM_SWANN] = { 1270 [CX88_BOARD_TE_DTV_250_OEM_SWANN] = {
1256 .name = "Shenzhen Tungsten Ages Tech TE-DTV-250 / Swann OEM", 1271 .name = "Shenzhen Tungsten Ages Tech TE-DTV-250 / Swann OEM",
1257 .tuner_type = TUNER_LG_PAL_NEW_TAPC, 1272 .tuner_type = TUNER_LG_PAL_NEW_TAPC,
1258 .radio_type = UNSET, 1273 .radio_type = UNSET,
1259 .tuner_addr = ADDR_UNSET, 1274 .tuner_addr = ADDR_UNSET,
1260 .radio_addr = ADDR_UNSET, 1275 .radio_addr = ADDR_UNSET,
1261 .input = {{ 1276 .input = {{
1262 .type = CX88_VMUX_TELEVISION, 1277 .type = CX88_VMUX_TELEVISION,
1263 .vmux = 0, 1278 .vmux = 0,
1264 .gpio0 = 0x003fffff, 1279 .gpio0 = 0x003fffff,
1265 .gpio1 = 0x00e00000, 1280 .gpio1 = 0x00e00000,
1266 .gpio2 = 0x003fffff, 1281 .gpio2 = 0x003fffff,
1267 .gpio3 = 0x02000000, 1282 .gpio3 = 0x02000000,
1268 },{ 1283 },{
1269 .type = CX88_VMUX_COMPOSITE1, 1284 .type = CX88_VMUX_COMPOSITE1,
1270 .vmux = 1, 1285 .vmux = 1,
1271 .gpio0 = 0x003fffff, 1286 .gpio0 = 0x003fffff,
1272 .gpio1 = 0x00e00000, 1287 .gpio1 = 0x00e00000,
1273 .gpio2 = 0x003fffff, 1288 .gpio2 = 0x003fffff,
1274 .gpio3 = 0x02000000, 1289 .gpio3 = 0x02000000,
1275 },{ 1290 },{
1276 .type = CX88_VMUX_SVIDEO, 1291 .type = CX88_VMUX_SVIDEO,
1277 .vmux = 2, 1292 .vmux = 2,
1278 .gpio0 = 0x003fffff, 1293 .gpio0 = 0x003fffff,
1279 .gpio1 = 0x00e00000, 1294 .gpio1 = 0x00e00000,
1280 .gpio2 = 0x003fffff, 1295 .gpio2 = 0x003fffff,
1281 .gpio3 = 0x02000000, 1296 .gpio3 = 0x02000000,
1282 }}, 1297 }},
1283 }, 1298 },
1284 [CX88_BOARD_HAUPPAUGE_HVR1300] = { 1299 [CX88_BOARD_HAUPPAUGE_HVR1300] = {
1285 .name = "Hauppauge WinTV-HVR1300 DVB-T/Hybrid MPEG Encoder", 1300 .name = "Hauppauge WinTV-HVR1300 DVB-T/Hybrid MPEG Encoder",
1286 .tuner_type = TUNER_PHILIPS_FMD1216ME_MK3, 1301 .tuner_type = TUNER_PHILIPS_FMD1216ME_MK3,
@@ -1293,17 +1308,20 @@ struct cx88_board cx88_boards[] = {
1293 .type = CX88_VMUX_TELEVISION, 1308 .type = CX88_VMUX_TELEVISION,
1294 .vmux = 0, 1309 .vmux = 0,
1295 .gpio0 = 0xe780, 1310 .gpio0 = 0xe780,
1311 .extadc = 1,
1296 },{ 1312 },{
1297 .type = CX88_VMUX_COMPOSITE1, 1313 .type = CX88_VMUX_COMPOSITE1,
1298 .vmux = 1, 1314 .vmux = 1,
1299 .gpio0 = 0xe780, 1315 .gpio0 = 0xe780,
1316 .extadc = 1,
1300 },{ 1317 },{
1301 .type = CX88_VMUX_SVIDEO, 1318 .type = CX88_VMUX_SVIDEO,
1302 .vmux = 2, 1319 .vmux = 2,
1303 .gpio0 = 0xe780, 1320 .gpio0 = 0xe780,
1321 .extadc = 1,
1304 }}, 1322 }},
1305 /* fixme: Add radio support */ 1323 /* fixme: Add radio support */
1306 .mpeg = CX88_MPEG_DVB, 1324 .mpeg = CX88_MPEG_DVB | CX88_MPEG_BLACKBIRD,
1307 }, 1325 },
1308}; 1326};
1309const unsigned int cx88_bcount = ARRAY_SIZE(cx88_boards); 1327const unsigned int cx88_bcount = ARRAY_SIZE(cx88_boards);
@@ -1513,6 +1531,10 @@ struct cx88_subid cx88_subids[] = {
1513 },{ 1531 },{
1514 .subvendor = 0x17de, 1532 .subvendor = 0x17de,
1515 .subdevice = 0x0840, 1533 .subdevice = 0x0840,
1534 .card = CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT,
1535 },{
1536 .subvendor = 0x1421,
1537 .subdevice = 0x0305,
1516 .card = CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT, 1538 .card = CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT,
1517 },{ 1539 },{
1518 .subvendor = 0x18ac, 1540 .subvendor = 0x18ac,
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index 4b655f2ef278..453af5e943ff 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -1153,7 +1153,7 @@ void cx88_core_put(struct cx88_core *core, struct pci_dev *pci)
1153 mutex_lock(&devlist); 1153 mutex_lock(&devlist);
1154 cx88_ir_fini(core); 1154 cx88_ir_fini(core);
1155 if (0 == core->i2c_rc) 1155 if (0 == core->i2c_rc)
1156 i2c_bit_del_bus(&core->i2c_adap); 1156 i2c_del_adapter(&core->i2c_adap);
1157 list_del(&core->devlist); 1157 list_del(&core->devlist);
1158 iounmap(core->lmmio); 1158 iounmap(core->lmmio);
1159 cx88_devcount--; 1159 cx88_devcount--;
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 0ef13e7efa2e..8b203354fccd 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -42,7 +42,7 @@
42#include "cx22702.h" 42#include "cx22702.h"
43#include "or51132.h" 43#include "or51132.h"
44#include "lgdt330x.h" 44#include "lgdt330x.h"
45#include "lg_h06xf.h" 45#include "lgh06xf.h"
46#include "nxt200x.h" 46#include "nxt200x.h"
47#include "cx24123.h" 47#include "cx24123.h"
48#include "isl6421.h" 48#include "isl6421.h"
@@ -57,7 +57,7 @@ module_param(debug, int, 0644);
57MODULE_PARM_DESC(debug,"enable debug messages [dvb]"); 57MODULE_PARM_DESC(debug,"enable debug messages [dvb]");
58 58
59#define dprintk(level,fmt, arg...) if (debug >= level) \ 59#define dprintk(level,fmt, arg...) if (debug >= level) \
60 printk(KERN_DEBUG "%s/2-dvb: " fmt, dev->core->name , ## arg) 60 printk(KERN_DEBUG "%s/2-dvb: " fmt, core->name, ## arg)
61 61
62/* ------------------------------------------------------------------ */ 62/* ------------------------------------------------------------------ */
63 63
@@ -74,8 +74,8 @@ static int dvb_buf_setup(struct videobuf_queue *q,
74 return 0; 74 return 0;
75} 75}
76 76
77static int dvb_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, 77static int dvb_buf_prepare(struct videobuf_queue *q,
78 enum v4l2_field field) 78 struct videobuf_buffer *vb, enum v4l2_field field)
79{ 79{
80 struct cx8802_dev *dev = q->priv_data; 80 struct cx8802_dev *dev = q->priv_data;
81 return cx8802_buf_prepare(q, dev, (struct cx88_buffer*)vb,field); 81 return cx8802_buf_prepare(q, dev, (struct cx88_buffer*)vb,field);
@@ -87,7 +87,8 @@ static void dvb_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
87 cx8802_buf_queue(dev, (struct cx88_buffer*)vb); 87 cx8802_buf_queue(dev, (struct cx88_buffer*)vb);
88} 88}
89 89
90static void dvb_buf_release(struct videobuf_queue *q, struct videobuf_buffer *vb) 90static void dvb_buf_release(struct videobuf_queue *q,
91 struct videobuf_buffer *vb)
91{ 92{
92 cx88_free_buffer(q, (struct cx88_buffer*)vb); 93 cx88_free_buffer(q, (struct cx88_buffer*)vb);
93} 94}
@@ -100,6 +101,26 @@ static struct videobuf_queue_ops dvb_qops = {
100}; 101};
101 102
102/* ------------------------------------------------------------------ */ 103/* ------------------------------------------------------------------ */
104
105static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire)
106{
107 struct cx8802_dev *dev= fe->dvb->priv;
108 struct cx8802_driver *drv = NULL;
109 int ret = 0;
110
111 drv = cx8802_get_driver(dev, CX88_MPEG_DVB);
112 if (drv) {
113 if (acquire)
114 ret = drv->request_acquire(drv);
115 else
116 ret = drv->request_release(drv);
117 }
118
119 return ret;
120}
121
122/* ------------------------------------------------------------------ */
123
103static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe) 124static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
104{ 125{
105 static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x39 }; 126 static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x39 };
@@ -268,35 +289,6 @@ static struct mt352_config dntv_live_dvbt_pro_config = {
268}; 289};
269#endif 290#endif
270 291
271static int dvico_hybrid_tuner_set_params(struct dvb_frontend *fe,
272 struct dvb_frontend_parameters *params)
273{
274 u8 pllbuf[4];
275 struct cx8802_dev *dev= fe->dvb->priv;
276 struct i2c_msg msg =
277 { .addr = dev->core->pll_addr, .flags = 0,
278 .buf = pllbuf, .len = 4 };
279 int err;
280
281 dvb_pll_configure(dev->core->pll_desc, pllbuf,
282 params->frequency,
283 params->u.ofdm.bandwidth);
284
285 if (fe->ops.i2c_gate_ctrl)
286 fe->ops.i2c_gate_ctrl(fe, 1);
287 if ((err = i2c_transfer(&dev->core->i2c_adap, &msg, 1)) != 1) {
288 printk(KERN_WARNING "cx88-dvb: %s error "
289 "(addr %02x <- %02x, err = %i)\n",
290 __FUNCTION__, pllbuf[0], pllbuf[1], err);
291 if (err < 0)
292 return err;
293 else
294 return -EREMOTEIO;
295 }
296
297 return 0;
298}
299
300static struct zl10353_config dvico_fusionhdtv_hybrid = { 292static struct zl10353_config dvico_fusionhdtv_hybrid = {
301 .demod_address = 0x0f, 293 .demod_address = 0x0f,
302 .no_tuner = 1, 294 .no_tuner = 1,
@@ -311,28 +303,12 @@ static struct cx22702_config connexant_refboard_config = {
311 .output_mode = CX22702_SERIAL_OUTPUT, 303 .output_mode = CX22702_SERIAL_OUTPUT,
312}; 304};
313 305
314static struct cx22702_config hauppauge_novat_config = { 306static struct cx22702_config hauppauge_hvr_config = {
315 .demod_address = 0x43,
316 .output_mode = CX22702_SERIAL_OUTPUT,
317};
318
319static struct cx22702_config hauppauge_hvr1100_config = {
320 .demod_address = 0x63, 307 .demod_address = 0x63,
321 .output_mode = CX22702_SERIAL_OUTPUT, 308 .output_mode = CX22702_SERIAL_OUTPUT,
322}; 309};
323 310
324static struct cx22702_config hauppauge_hvr1300_config = { 311static int or51132_set_ts_param(struct dvb_frontend* fe, int is_punctured)
325 .demod_address = 0x63,
326 .output_mode = CX22702_SERIAL_OUTPUT,
327};
328
329static struct cx22702_config hauppauge_hvr3000_config = {
330 .demod_address = 0x63,
331 .output_mode = CX22702_SERIAL_OUTPUT,
332};
333
334static int or51132_set_ts_param(struct dvb_frontend* fe,
335 int is_punctured)
336{ 312{
337 struct cx8802_dev *dev= fe->dvb->priv; 313 struct cx8802_dev *dev= fe->dvb->priv;
338 dev->ts_gen_cntrl = is_punctured ? 0x04 : 0x00; 314 dev->ts_gen_cntrl = is_punctured ? 0x04 : 0x00;
@@ -344,50 +320,6 @@ static struct or51132_config pchdtv_hd3000 = {
344 .set_ts_params = or51132_set_ts_param, 320 .set_ts_params = or51132_set_ts_param,
345}; 321};
346 322
347static int lgdt3302_tuner_set_params(struct dvb_frontend* fe,
348 struct dvb_frontend_parameters* params)
349{
350 /* FIXME make this routine use the tuner-simple code.
351 * It could probably be shared with a number of ATSC
352 * frontends. Many share the same tuner with analog TV. */
353
354 struct cx8802_dev *dev= fe->dvb->priv;
355 struct cx88_core *core = dev->core;
356 u8 buf[4];
357 struct i2c_msg msg =
358 { .addr = dev->core->pll_addr, .flags = 0, .buf = buf, .len = 4 };
359 int err;
360
361 dvb_pll_configure(core->pll_desc, buf, params->frequency, 0);
362 dprintk(1, "%s: tuner at 0x%02x bytes: 0x%02x 0x%02x 0x%02x 0x%02x\n",
363 __FUNCTION__, msg.addr, buf[0],buf[1],buf[2],buf[3]);
364
365 if (fe->ops.i2c_gate_ctrl)
366 fe->ops.i2c_gate_ctrl(fe, 1);
367 if ((err = i2c_transfer(&core->i2c_adap, &msg, 1)) != 1) {
368 printk(KERN_WARNING "cx88-dvb: %s error "
369 "(addr %02x <- %02x, err = %i)\n",
370 __FUNCTION__, buf[0], buf[1], err);
371 if (err < 0)
372 return err;
373 else
374 return -EREMOTEIO;
375 }
376 return 0;
377}
378
379static int lgdt3303_tuner_set_params(struct dvb_frontend* fe,
380 struct dvb_frontend_parameters* params)
381{
382 struct cx8802_dev *dev= fe->dvb->priv;
383 struct cx88_core *core = dev->core;
384
385 /* Put the analog decoder in standby to keep it quiet */
386 cx88_call_i2c_clients (dev->core, TUNER_SET_STANDBY, NULL);
387
388 return lg_h06xf_pll_set(fe, &core->i2c_adap, params);
389}
390
391static int lgdt330x_pll_rf_set(struct dvb_frontend* fe, int index) 323static int lgdt330x_pll_rf_set(struct dvb_frontend* fe, int index)
392{ 324{
393 struct cx8802_dev *dev= fe->dvb->priv; 325 struct cx8802_dev *dev= fe->dvb->priv;
@@ -432,8 +364,7 @@ static struct lgdt330x_config pchdtv_hd5500 = {
432 .set_ts_params = lgdt330x_set_ts_param, 364 .set_ts_params = lgdt330x_set_ts_param,
433}; 365};
434 366
435static int nxt200x_set_ts_param(struct dvb_frontend* fe, 367static int nxt200x_set_ts_param(struct dvb_frontend* fe, int is_punctured)
436 int is_punctured)
437{ 368{
438 struct cx8802_dev *dev= fe->dvb->priv; 369 struct cx8802_dev *dev= fe->dvb->priv;
439 dev->ts_gen_cntrl = is_punctured ? 0x04 : 0x00; 370 dev->ts_gen_cntrl = is_punctured ? 0x04 : 0x00;
@@ -469,11 +400,10 @@ static int kworld_dvbs_100_set_voltage(struct dvb_frontend* fe,
469 struct cx8802_dev *dev= fe->dvb->priv; 400 struct cx8802_dev *dev= fe->dvb->priv;
470 struct cx88_core *core = dev->core; 401 struct cx88_core *core = dev->core;
471 402
472 if (voltage == SEC_VOLTAGE_OFF) { 403 if (voltage == SEC_VOLTAGE_OFF)
473 cx_write(MO_GP0_IO, 0x000006fb); 404 cx_write(MO_GP0_IO, 0x000006fb);
474 } else { 405 else
475 cx_write(MO_GP0_IO, 0x000006f9); 406 cx_write(MO_GP0_IO, 0x000006f9);
476 }
477 407
478 if (core->prev_set_voltage) 408 if (core->prev_set_voltage)
479 return core->prev_set_voltage(fe, voltage); 409 return core->prev_set_voltage(fe, voltage);
@@ -522,7 +452,7 @@ static int dvb_register(struct cx8802_dev *dev)
522 switch (dev->core->board) { 452 switch (dev->core->board) {
523 case CX88_BOARD_HAUPPAUGE_DVB_T1: 453 case CX88_BOARD_HAUPPAUGE_DVB_T1:
524 dev->dvb.frontend = dvb_attach(cx22702_attach, 454 dev->dvb.frontend = dvb_attach(cx22702_attach,
525 &hauppauge_novat_config, 455 &connexant_refboard_config,
526 &dev->core->i2c_adap); 456 &dev->core->i2c_adap);
527 if (dev->dvb.frontend != NULL) { 457 if (dev->dvb.frontend != NULL) {
528 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61, 458 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
@@ -547,32 +477,11 @@ static int dvb_register(struct cx8802_dev *dev)
547 case CX88_BOARD_HAUPPAUGE_HVR1100: 477 case CX88_BOARD_HAUPPAUGE_HVR1100:
548 case CX88_BOARD_HAUPPAUGE_HVR1100LP: 478 case CX88_BOARD_HAUPPAUGE_HVR1100LP:
549 dev->dvb.frontend = dvb_attach(cx22702_attach, 479 dev->dvb.frontend = dvb_attach(cx22702_attach,
550 &hauppauge_hvr1100_config, 480 &hauppauge_hvr_config,
551 &dev->core->i2c_adap);
552 if (dev->dvb.frontend != NULL) {
553 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
554 &dev->core->i2c_adap,
555 &dvb_pll_fmd1216me);
556 }
557 break;
558 case CX88_BOARD_HAUPPAUGE_HVR1300:
559 dev->dvb.frontend = dvb_attach(cx22702_attach,
560 &hauppauge_hvr1300_config,
561 &dev->core->i2c_adap);
562 if (dev->dvb.frontend != NULL) {
563 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
564 &dev->core->i2c_adap,
565 &dvb_pll_fmd1216me);
566 }
567 break;
568 case CX88_BOARD_HAUPPAUGE_HVR3000:
569 dev->dvb.frontend = dvb_attach(cx22702_attach,
570 &hauppauge_hvr3000_config,
571 &dev->core->i2c_adap); 481 &dev->core->i2c_adap);
572 if (dev->dvb.frontend != NULL) { 482 if (dev->dvb.frontend != NULL) {
573 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61, 483 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
574 &dev->core->i2c_adap, 484 &dev->core->i2c_adap, &dvb_pll_fmd1216me);
575 &dvb_pll_fmd1216me);
576 } 485 }
577 break; 486 break;
578 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS: 487 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS:
@@ -647,18 +556,17 @@ static int dvb_register(struct cx8802_dev *dev)
647#endif 556#endif
648 break; 557 break;
649 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID: 558 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID:
650 dev->core->pll_addr = 0x61;
651 dev->core->pll_desc = &dvb_pll_thomson_fe6600;
652 dev->dvb.frontend = dvb_attach(zl10353_attach, 559 dev->dvb.frontend = dvb_attach(zl10353_attach,
653 &dvico_fusionhdtv_hybrid, 560 &dvico_fusionhdtv_hybrid,
654 &dev->core->i2c_adap); 561 &dev->core->i2c_adap);
655 if (dev->dvb.frontend != NULL) { 562 if (dev->dvb.frontend != NULL) {
656 dev->dvb.frontend->ops.tuner_ops.set_params = dvico_hybrid_tuner_set_params; 563 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
564 &dev->core->i2c_adap,
565 &dvb_pll_thomson_fe6600);
657 } 566 }
658 break; 567 break;
659 case CX88_BOARD_PCHDTV_HD3000: 568 case CX88_BOARD_PCHDTV_HD3000:
660 dev->dvb.frontend = dvb_attach(or51132_attach, 569 dev->dvb.frontend = dvb_attach(or51132_attach, &pchdtv_hd3000,
661 &pchdtv_hd3000,
662 &dev->core->i2c_adap); 570 &dev->core->i2c_adap);
663 if (dev->dvb.frontend != NULL) { 571 if (dev->dvb.frontend != NULL) {
664 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61, 572 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
@@ -679,13 +587,13 @@ static int dvb_register(struct cx8802_dev *dev)
679 587
680 /* Select RF connector callback */ 588 /* Select RF connector callback */
681 fusionhdtv_3_gold.pll_rf_set = lgdt330x_pll_rf_set; 589 fusionhdtv_3_gold.pll_rf_set = lgdt330x_pll_rf_set;
682 dev->core->pll_addr = 0x61;
683 dev->core->pll_desc = &dvb_pll_microtune_4042;
684 dev->dvb.frontend = dvb_attach(lgdt330x_attach, 590 dev->dvb.frontend = dvb_attach(lgdt330x_attach,
685 &fusionhdtv_3_gold, 591 &fusionhdtv_3_gold,
686 &dev->core->i2c_adap); 592 &dev->core->i2c_adap);
687 if (dev->dvb.frontend != NULL) { 593 if (dev->dvb.frontend != NULL) {
688 dev->dvb.frontend->ops.tuner_ops.set_params = lgdt3302_tuner_set_params; 594 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
595 &dev->core->i2c_adap,
596 &dvb_pll_microtune_4042);
689 } 597 }
690 } 598 }
691 break; 599 break;
@@ -699,13 +607,13 @@ static int dvb_register(struct cx8802_dev *dev)
699 mdelay(100); 607 mdelay(100);
700 cx_set(MO_GP0_IO, 9); 608 cx_set(MO_GP0_IO, 9);
701 mdelay(200); 609 mdelay(200);
702 dev->core->pll_addr = 0x61;
703 dev->core->pll_desc = &dvb_pll_thomson_dtt761x;
704 dev->dvb.frontend = dvb_attach(lgdt330x_attach, 610 dev->dvb.frontend = dvb_attach(lgdt330x_attach,
705 &fusionhdtv_3_gold, 611 &fusionhdtv_3_gold,
706 &dev->core->i2c_adap); 612 &dev->core->i2c_adap);
707 if (dev->dvb.frontend != NULL) { 613 if (dev->dvb.frontend != NULL) {
708 dev->dvb.frontend->ops.tuner_ops.set_params = lgdt3302_tuner_set_params; 614 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
615 &dev->core->i2c_adap,
616 &dvb_pll_thomson_dtt761x);
709 } 617 }
710 } 618 }
711 break; 619 break;
@@ -723,7 +631,8 @@ static int dvb_register(struct cx8802_dev *dev)
723 &fusionhdtv_5_gold, 631 &fusionhdtv_5_gold,
724 &dev->core->i2c_adap); 632 &dev->core->i2c_adap);
725 if (dev->dvb.frontend != NULL) { 633 if (dev->dvb.frontend != NULL) {
726 dev->dvb.frontend->ops.tuner_ops.set_params = lgdt3303_tuner_set_params; 634 dvb_attach(lgh06xf_attach, dev->dvb.frontend,
635 &dev->core->i2c_adap);
727 } 636 }
728 } 637 }
729 break; 638 break;
@@ -741,7 +650,8 @@ static int dvb_register(struct cx8802_dev *dev)
741 &pchdtv_hd5500, 650 &pchdtv_hd5500,
742 &dev->core->i2c_adap); 651 &dev->core->i2c_adap);
743 if (dev->dvb.frontend != NULL) { 652 if (dev->dvb.frontend != NULL) {
744 dev->dvb.frontend->ops.tuner_ops.set_params = lgdt3303_tuner_set_params; 653 dvb_attach(lgh06xf_attach, dev->dvb.frontend,
654 &dev->core->i2c_adap);
745 } 655 }
746 } 656 }
747 break; 657 break;
@@ -782,6 +692,24 @@ static int dvb_register(struct cx8802_dev *dev)
782 dev->dvb.frontend->ops.set_voltage = geniatech_dvbs_set_voltage; 692 dev->dvb.frontend->ops.set_voltage = geniatech_dvbs_set_voltage;
783 } 693 }
784 break; 694 break;
695 case CX88_BOARD_HAUPPAUGE_HVR1300:
696 dev->dvb.frontend = dvb_attach(cx22702_attach,
697 &hauppauge_hvr_config,
698 &dev->core->i2c_adap);
699 if (dev->dvb.frontend != NULL) {
700 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
701 &dev->core->i2c_adap, &dvb_pll_fmd1216me);
702 }
703 break;
704 case CX88_BOARD_HAUPPAUGE_HVR3000:
705 dev->dvb.frontend = dvb_attach(cx22702_attach,
706 &hauppauge_hvr_config,
707 &dev->core->i2c_adap);
708 if (dev->dvb.frontend != NULL) {
709 dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
710 &dev->core->i2c_adap, &dvb_pll_fmd1216me);
711 }
712 break;
785 default: 713 default:
786 printk("%s: The frontend of your DVB/ATSC card isn't supported yet\n", 714 printk("%s: The frontend of your DVB/ATSC card isn't supported yet\n",
787 dev->core->name); 715 dev->core->name);
@@ -796,6 +724,8 @@ static int dvb_register(struct cx8802_dev *dev)
796 dev->dvb.frontend->ops.info.frequency_min = dev->core->pll_desc->min; 724 dev->dvb.frontend->ops.info.frequency_min = dev->core->pll_desc->min;
797 dev->dvb.frontend->ops.info.frequency_max = dev->core->pll_desc->max; 725 dev->dvb.frontend->ops.info.frequency_max = dev->core->pll_desc->max;
798 } 726 }
727 /* Ensure all frontends negotiate bus access */
728 dev->dvb.frontend->ops.ts_bus_ctrl = cx88_dvb_bus_ctrl;
799 729
800 /* Put the analog decoder in standby to keep it quiet */ 730 /* Put the analog decoder in standby to keep it quiet */
801 cx88_call_i2c_clients (dev->core, TUNER_SET_STANDBY, NULL); 731 cx88_call_i2c_clients (dev->core, TUNER_SET_STANDBY, NULL);
@@ -806,37 +736,67 @@ static int dvb_register(struct cx8802_dev *dev)
806 736
807/* ----------------------------------------------------------- */ 737/* ----------------------------------------------------------- */
808 738
809static int __devinit dvb_probe(struct pci_dev *pci_dev, 739/* CX8802 MPEG -> mini driver - We have been given the hardware */
810 const struct pci_device_id *pci_id) 740static int cx8802_dvb_advise_acquire(struct cx8802_driver *drv)
811{ 741{
812 struct cx8802_dev *dev; 742 struct cx88_core *core = drv->core;
813 struct cx88_core *core; 743 int err = 0;
744 dprintk( 1, "%s\n", __FUNCTION__);
745
746 switch (core->board) {
747 case CX88_BOARD_HAUPPAUGE_HVR1300:
748 /* We arrive here with either the cx23416 or the cx22702
749 * on the bus. Take the bus from the cx23416 and enable the
750 * cx22702 demod
751 */
752 cx_set(MO_GP0_IO, 0x00000080); /* cx22702 out of reset and enable */
753 cx_clear(MO_GP0_IO, 0x00000004);
754 udelay(1000);
755 break;
756 default:
757 err = -ENODEV;
758 }
759 return err;
760}
761
762/* CX8802 MPEG -> mini driver - We no longer have the hardware */
763static int cx8802_dvb_advise_release(struct cx8802_driver *drv)
764{
765 struct cx88_core *core = drv->core;
766 int err = 0;
767 dprintk( 1, "%s\n", __FUNCTION__);
768
769 switch (core->board) {
770 case CX88_BOARD_HAUPPAUGE_HVR1300:
771 /* Do Nothing, leave the cx22702 on the bus. */
772 break;
773 default:
774 err = -ENODEV;
775 }
776 return err;
777}
778
779static int cx8802_dvb_probe(struct cx8802_driver *drv)
780{
781 struct cx88_core *core = drv->core;
782 struct cx8802_dev *dev = drv->core->dvbdev;
814 int err; 783 int err;
815 784
816 /* general setup */ 785 dprintk( 1, "%s\n", __FUNCTION__);
817 core = cx88_core_get(pci_dev); 786 dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
818 if (NULL == core) 787 core->board,
819 return -EINVAL; 788 core->name,
789 core->pci_bus,
790 core->pci_slot);
820 791
821 err = -ENODEV; 792 err = -ENODEV;
822 if (!(cx88_boards[core->board].mpeg & CX88_MPEG_DVB)) 793 if (!(cx88_boards[core->board].mpeg & CX88_MPEG_DVB))
823 goto fail_core; 794 goto fail_core;
824 795
825 err = -ENOMEM;
826 dev = kzalloc(sizeof(*dev),GFP_KERNEL);
827 if (NULL == dev)
828 goto fail_core;
829 dev->pci = pci_dev;
830 dev->core = core;
831
832 err = cx8802_init_common(dev);
833 if (0 != err)
834 goto fail_free;
835
836#ifdef HAVE_VP3054_I2C 796#ifdef HAVE_VP3054_I2C
837 err = vp3054_i2c_probe(dev); 797 err = vp3054_i2c_probe(dev);
838 if (0 != err) 798 if (0 != err)
839 goto fail_free; 799 goto fail_core;
840#endif 800#endif
841 801
842 /* dvb stuff */ 802 /* dvb stuff */
@@ -848,28 +808,16 @@ static int __devinit dvb_probe(struct pci_dev *pci_dev,
848 sizeof(struct cx88_buffer), 808 sizeof(struct cx88_buffer),
849 dev); 809 dev);
850 err = dvb_register(dev); 810 err = dvb_register(dev);
851 if (0 != err) 811 if (err != 0)
852 goto fail_fini; 812 printk("%s dvb_register failed err = %d\n", __FUNCTION__, err);
853 813
854 /* Maintain a reference to cx88-video can query the 8802 device. */
855 core->dvbdev = dev;
856 return 0;
857
858 fail_fini:
859 cx8802_fini_common(dev);
860 fail_free:
861 kfree(dev);
862 fail_core: 814 fail_core:
863 cx88_core_put(core,pci_dev);
864 return err; 815 return err;
865} 816}
866 817
867static void __devexit dvb_remove(struct pci_dev *pci_dev) 818static int cx8802_dvb_remove(struct cx8802_driver *drv)
868{ 819{
869 struct cx8802_dev *dev = pci_get_drvdata(pci_dev); 820 struct cx8802_dev *dev = drv->core->dvbdev;
870
871 /* Destroy any 8802 reference. */
872 dev->core->dvbdev = NULL;
873 821
874 /* dvb */ 822 /* dvb */
875 videobuf_dvb_unregister(&dev->dvb); 823 videobuf_dvb_unregister(&dev->dvb);
@@ -878,33 +826,16 @@ static void __devexit dvb_remove(struct pci_dev *pci_dev)
878 vp3054_i2c_remove(dev); 826 vp3054_i2c_remove(dev);
879#endif 827#endif
880 828
881 /* common */ 829 return 0;
882 cx8802_fini_common(dev);
883 cx88_core_put(dev->core,dev->pci);
884 kfree(dev);
885} 830}
886 831
887static struct pci_device_id cx8802_pci_tbl[] = { 832static struct cx8802_driver cx8802_dvb_driver = {
888 { 833 .type_id = CX88_MPEG_DVB,
889 .vendor = 0x14f1, 834 .hw_access = CX8802_DRVCTL_SHARED,
890 .device = 0x8802, 835 .probe = cx8802_dvb_probe,
891 .subvendor = PCI_ANY_ID, 836 .remove = cx8802_dvb_remove,
892 .subdevice = PCI_ANY_ID, 837 .advise_acquire = cx8802_dvb_advise_acquire,
893 },{ 838 .advise_release = cx8802_dvb_advise_release,
894 /* --- end of list --- */
895 }
896};
897MODULE_DEVICE_TABLE(pci, cx8802_pci_tbl);
898
899static struct pci_driver dvb_pci_driver = {
900 .name = "cx88-dvb",
901 .id_table = cx8802_pci_tbl,
902 .probe = dvb_probe,
903 .remove = __devexit_p(dvb_remove),
904#ifdef CONFIG_PM
905 .suspend = cx8802_suspend_common,
906 .resume = cx8802_resume_common,
907#endif
908}; 839};
909 840
910static int dvb_init(void) 841static int dvb_init(void)
@@ -917,12 +848,12 @@ static int dvb_init(void)
917 printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n", 848 printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
918 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100); 849 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
919#endif 850#endif
920 return pci_register_driver(&dvb_pci_driver); 851 return cx8802_register_driver(&cx8802_dvb_driver);
921} 852}
922 853
923static void dvb_fini(void) 854static void dvb_fini(void)
924{ 855{
925 pci_unregister_driver(&dvb_pci_driver); 856 cx8802_unregister_driver(&cx8802_dvb_driver);
926} 857}
927 858
928module_init(dvb_init); 859module_init(dvb_init);
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index e60a0a52e4b2..8136673fe9e8 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -155,6 +155,35 @@ static void cx88_ir_work(struct work_struct *work)
155 mod_timer(&ir->timer, timeout); 155 mod_timer(&ir->timer, timeout);
156} 156}
157 157
158static void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir)
159{
160 if (ir->polling) {
161 INIT_WORK(&ir->work, cx88_ir_work);
162 init_timer(&ir->timer);
163 ir->timer.function = ir_timer;
164 ir->timer.data = (unsigned long)ir;
165 schedule_work(&ir->work);
166 }
167 if (ir->sampling) {
168 core->pci_irqmask |= (1 << 18); /* IR_SMP_INT */
169 cx_write(MO_DDS_IO, 0xa80a80); /* 4 kHz sample rate */
170 cx_write(MO_DDSCFG_IO, 0x5); /* enable */
171 }
172}
173
174static void cx88_ir_stop(struct cx88_core *core, struct cx88_IR *ir)
175{
176 if (ir->sampling) {
177 cx_write(MO_DDSCFG_IO, 0x0);
178 core->pci_irqmask &= ~(1 << 18);
179 }
180
181 if (ir->polling) {
182 del_timer_sync(&ir->timer);
183 flush_scheduled_work();
184 }
185}
186
158/* ---------------------------------------------------------------------- */ 187/* ---------------------------------------------------------------------- */
159 188
160int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci) 189int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
@@ -163,14 +192,12 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
163 struct input_dev *input_dev; 192 struct input_dev *input_dev;
164 IR_KEYTAB_TYPE *ir_codes = NULL; 193 IR_KEYTAB_TYPE *ir_codes = NULL;
165 int ir_type = IR_TYPE_OTHER; 194 int ir_type = IR_TYPE_OTHER;
195 int err = -ENOMEM;
166 196
167 ir = kzalloc(sizeof(*ir), GFP_KERNEL); 197 ir = kzalloc(sizeof(*ir), GFP_KERNEL);
168 input_dev = input_allocate_device(); 198 input_dev = input_allocate_device();
169 if (!ir || !input_dev) { 199 if (!ir || !input_dev)
170 kfree(ir); 200 goto err_out_free;
171 input_free_device(input_dev);
172 return -ENOMEM;
173 }
174 201
175 ir->input = input_dev; 202 ir->input = input_dev;
176 203
@@ -280,9 +307,8 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
280 } 307 }
281 308
282 if (NULL == ir_codes) { 309 if (NULL == ir_codes) {
283 kfree(ir); 310 err = -ENODEV;
284 input_free_device(input_dev); 311 goto err_out_free;
285 return -ENODEV;
286 } 312 }
287 313
288 /* init input device */ 314 /* init input device */
@@ -307,23 +333,22 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
307 ir->core = core; 333 ir->core = core;
308 core->ir = ir; 334 core->ir = ir;
309 335
310 if (ir->polling) { 336 cx88_ir_start(core, ir);
311 INIT_WORK(&ir->work, cx88_ir_work);
312 init_timer(&ir->timer);
313 ir->timer.function = ir_timer;
314 ir->timer.data = (unsigned long)ir;
315 schedule_work(&ir->work);
316 }
317 if (ir->sampling) {
318 core->pci_irqmask |= (1 << 18); /* IR_SMP_INT */
319 cx_write(MO_DDS_IO, 0xa80a80); /* 4 kHz sample rate */
320 cx_write(MO_DDSCFG_IO, 0x5); /* enable */
321 }
322 337
323 /* all done */ 338 /* all done */
324 input_register_device(ir->input); 339 err = input_register_device(ir->input);
340 if (err)
341 goto err_out_stop;
325 342
326 return 0; 343 return 0;
344
345 err_out_stop:
346 cx88_ir_stop(core, ir);
347 core->ir = NULL;
348 err_out_free:
349 input_free_device(input_dev);
350 kfree(ir);
351 return err;
327} 352}
328 353
329int cx88_ir_fini(struct cx88_core *core) 354int cx88_ir_fini(struct cx88_core *core)
@@ -334,15 +359,7 @@ int cx88_ir_fini(struct cx88_core *core)
334 if (NULL == ir) 359 if (NULL == ir)
335 return 0; 360 return 0;
336 361
337 if (ir->sampling) { 362 cx88_ir_stop(core, ir);
338 cx_write(MO_DDSCFG_IO, 0x0);
339 core->pci_irqmask &= ~(1 << 18);
340 }
341 if (ir->polling) {
342 del_timer(&ir->timer);
343 flush_scheduled_work();
344 }
345
346 input_unregister_device(ir->input); 363 input_unregister_device(ir->input);
347 kfree(ir); 364 kfree(ir);
348 365
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 6b23a4e6f66d..1fe1a833c7c7 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -44,8 +44,12 @@ module_param(debug,int,0644);
44MODULE_PARM_DESC(debug,"enable debug messages [mpeg]"); 44MODULE_PARM_DESC(debug,"enable debug messages [mpeg]");
45 45
46#define dprintk(level,fmt, arg...) if (debug >= level) \ 46#define dprintk(level,fmt, arg...) if (debug >= level) \
47 printk(KERN_DEBUG "%s/2: " fmt, dev->core->name , ## arg) 47 printk(KERN_DEBUG "%s/2-mpeg: " fmt, dev->core->name, ## arg)
48 48
49#define mpeg_dbg(level,fmt, arg...) if (debug >= level) \
50 printk(KERN_DEBUG "%s/2-mpeg: " fmt, core->name, ## arg)
51
52static LIST_HEAD(cx8802_devlist);
49/* ------------------------------------------------------------------ */ 53/* ------------------------------------------------------------------ */
50 54
51static int cx8802_start_dma(struct cx8802_dev *dev, 55static int cx8802_start_dma(struct cx8802_dev *dev,
@@ -65,17 +69,13 @@ static int cx8802_start_dma(struct cx8802_dev *dev,
65 69
66 /* FIXME: this needs a review. 70 /* FIXME: this needs a review.
67 * also: move to cx88-blackbird + cx88-dvb source files? */ 71 * also: move to cx88-blackbird + cx88-dvb source files? */
68 if (cx88_boards[core->board].mpeg == (CX88_MPEG_DVB | CX88_MPEG_BLACKBIRD) ) {
69 /* Report a warning until the mini driver patch is applied,
70 * else the following conditions will set the dma registers incorrectly.
71 * This will be removed in the next major patch and changes to the conditions
72 * will be made.
73 */
74 printk(KERN_INFO "%s() board->(CX88_MPEG_DVB | CX88_MPEG_BLACKBIRD) is invalid\n", __FUNCTION__);
75 return -EINVAL;
76 }
77 72
78 if (cx88_boards[core->board].mpeg & CX88_MPEG_DVB) { 73 dprintk( 1, "core->active_type_id = 0x%08x\n", core->active_type_id);
74
75 if ( (core->active_type_id == CX88_MPEG_DVB) &&
76 (cx88_boards[core->board].mpeg & CX88_MPEG_DVB) ) {
77
78 dprintk( 1, "cx8802_start_dma doing .dvb\n");
79 /* negedge driven & software reset */ 79 /* negedge driven & software reset */
80 cx_write(TS_GEN_CNTRL, 0x0040 | dev->ts_gen_cntrl); 80 cx_write(TS_GEN_CNTRL, 0x0040 | dev->ts_gen_cntrl);
81 udelay(100); 81 udelay(100);
@@ -93,15 +93,17 @@ static int cx8802_start_dma(struct cx8802_dev *dev,
93 cx_write(MO_PINMUX_IO, 0x88); /* Enable MPEG parallel IO and video signal pins */ 93 cx_write(MO_PINMUX_IO, 0x88); /* Enable MPEG parallel IO and video signal pins */
94 udelay(100); 94 udelay(100);
95 break; 95 break;
96 case CX88_BOARD_HAUPPAUGE_HVR1300:
97 break;
96 default: 98 default:
97 cx_write(TS_SOP_STAT, 0x00); 99 cx_write(TS_SOP_STAT, 0x00);
98 break; 100 break;
99 } 101 }
100 cx_write(TS_GEN_CNTRL, dev->ts_gen_cntrl); 102 cx_write(TS_GEN_CNTRL, dev->ts_gen_cntrl);
101 udelay(100); 103 udelay(100);
102 } 104 } else if ( (core->active_type_id == CX88_MPEG_BLACKBIRD) &&
103 105 (cx88_boards[core->board].mpeg & CX88_MPEG_BLACKBIRD) ) {
104 if (cx88_boards[core->board].mpeg & CX88_MPEG_BLACKBIRD) { 106 dprintk( 1, "cx8802_start_dma doing .blackbird\n");
105 cx_write(MO_PINMUX_IO, 0x88); /* enable MPEG parallel IO */ 107 cx_write(MO_PINMUX_IO, 0x88); /* enable MPEG parallel IO */
106 108
107 cx_write(TS_GEN_CNTRL, 0x46); /* punctured clock TS & posedge driven & software reset */ 109 cx_write(TS_GEN_CNTRL, 0x46); /* punctured clock TS & posedge driven & software reset */
@@ -112,6 +114,10 @@ static int cx8802_start_dma(struct cx8802_dev *dev,
112 114
113 cx_write(TS_GEN_CNTRL, 0x06); /* punctured clock TS & posedge driven */ 115 cx_write(TS_GEN_CNTRL, 0x06); /* punctured clock TS & posedge driven */
114 udelay(100); 116 udelay(100);
117 } else {
118 printk( "%s() Failed. Unsupported value in .mpeg (0x%08x)\n", __FUNCTION__,
119 cx88_boards[core->board].mpeg );
120 return -EINVAL;
115 } 121 }
116 122
117 /* reset counter */ 123 /* reset counter */
@@ -542,8 +548,315 @@ int cx8802_resume_common(struct pci_dev *pci_dev)
542 return 0; 548 return 0;
543} 549}
544 550
551struct cx8802_dev * cx8802_get_device(struct inode *inode)
552{
553 int minor = iminor(inode);
554 struct cx8802_dev *h = NULL;
555 struct list_head *list;
556
557 list_for_each(list,&cx8802_devlist) {
558 h = list_entry(list, struct cx8802_dev, devlist);
559 if (h->mpeg_dev->minor == minor)
560 return h;
561 }
562
563 return NULL;
564}
565
566struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype)
567{
568 struct cx8802_dev *h = NULL;
569 struct cx8802_driver *d = NULL;
570 struct list_head *list;
571 struct list_head *list2;
572
573 list_for_each(list,&cx8802_devlist) {
574 h = list_entry(list, struct cx8802_dev, devlist);
575 if (h != dev)
576 continue;
577
578 list_for_each(list2, &h->drvlist.devlist) {
579 d = list_entry(list2, struct cx8802_driver, devlist);
580
581 /* only unregister the correct driver type */
582 if (d->type_id == btype) {
583 return d;
584 }
585 }
586 }
587
588 return NULL;
589}
590
591/* Driver asked for hardware access. */
592int cx8802_request_acquire(struct cx8802_driver *drv)
593{
594 struct cx88_core *core = drv->core;
595
596 /* Fail a request for hardware if the device is busy. */
597 if (core->active_type_id != CX88_BOARD_NONE)
598 return -EBUSY;
599
600 if (drv->advise_acquire)
601 {
602 core->active_type_id = drv->type_id;
603 drv->advise_acquire(drv);
604
605 mpeg_dbg(1,"%s() Post acquire GPIO=%x\n", __FUNCTION__, cx_read(MO_GP0_IO));
606 }
607
608 return 0;
609}
610
611/* Driver asked to release hardware. */
612int cx8802_request_release(struct cx8802_driver *drv)
613{
614 struct cx88_core *core = drv->core;
615
616 if (drv->advise_release)
617 {
618 drv->advise_release(drv);
619 core->active_type_id = CX88_BOARD_NONE;
620 mpeg_dbg(1,"%s() Post release GPIO=%x\n", __FUNCTION__, cx_read(MO_GP0_IO));
621 }
622
623 return 0;
624}
625
626static int cx8802_check_driver(struct cx8802_driver *drv)
627{
628 if (drv == NULL)
629 return -ENODEV;
630
631 if ((drv->type_id != CX88_MPEG_DVB) &&
632 (drv->type_id != CX88_MPEG_BLACKBIRD))
633 return -EINVAL;
634
635 if ((drv->hw_access != CX8802_DRVCTL_SHARED) &&
636 (drv->hw_access != CX8802_DRVCTL_EXCLUSIVE))
637 return -EINVAL;
638
639 if ((drv->probe == NULL) ||
640 (drv->remove == NULL) ||
641 (drv->advise_acquire == NULL) ||
642 (drv->advise_release == NULL))
643 return -EINVAL;
644
645 return 0;
646}
647
648int cx8802_register_driver(struct cx8802_driver *drv)
649{
650 struct cx8802_dev *h;
651 struct cx8802_driver *driver;
652 struct list_head *list;
653 int err = 0, i = 0;
654
655 printk(KERN_INFO "%s() ->registering driver type=%s access=%s\n", __FUNCTION__ ,
656 drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird",
657 drv->hw_access == CX8802_DRVCTL_SHARED ? "shared" : "exclusive");
658
659 if ((err = cx8802_check_driver(drv)) != 0) {
660 printk(KERN_INFO "%s() cx8802_driver is invalid\n", __FUNCTION__ );
661 return err;
662 }
663
664 list_for_each(list,&cx8802_devlist) {
665 h = list_entry(list, struct cx8802_dev, devlist);
666
667 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d]\n",
668 h->core->name,h->pci->subsystem_vendor,
669 h->pci->subsystem_device,cx88_boards[h->core->board].name,
670 h->core->board);
671
672 /* Bring up a new struct for each driver instance */
673 driver = kzalloc(sizeof(*drv),GFP_KERNEL);
674 if (driver == NULL)
675 return -ENOMEM;
676
677 /* Snapshot of the driver registration data */
678 drv->core = h->core;
679 drv->suspend = cx8802_suspend_common;
680 drv->resume = cx8802_resume_common;
681 drv->request_acquire = cx8802_request_acquire;
682 drv->request_release = cx8802_request_release;
683 memcpy(driver, drv, sizeof(*driver));
684
685 err = drv->probe(driver);
686 if (err == 0) {
687 i++;
688 mutex_lock(&drv->core->lock);
689 list_add_tail(&driver->devlist,&h->drvlist.devlist);
690 mutex_unlock(&drv->core->lock);
691 } else {
692 printk(KERN_ERR "%s() ->probe failed err = %d\n", __FUNCTION__, err);
693 }
694
695 }
696 if (i == 0)
697 err = -ENODEV;
698 else
699 err = 0;
700
701 return err;
702}
703
704int cx8802_unregister_driver(struct cx8802_driver *drv)
705{
706 struct cx8802_dev *h;
707 struct cx8802_driver *d;
708 struct list_head *list;
709 struct list_head *list2, *q;
710 int err = 0, i = 0;
711
712 printk(KERN_INFO "%s() ->unregistering driver type=%s\n", __FUNCTION__ ,
713 drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird");
714
715 list_for_each(list,&cx8802_devlist) {
716 i++;
717 h = list_entry(list, struct cx8802_dev, devlist);
718
719 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d]\n",
720 h->core->name,h->pci->subsystem_vendor,
721 h->pci->subsystem_device,cx88_boards[h->core->board].name,
722 h->core->board);
723
724 list_for_each_safe(list2, q, &h->drvlist.devlist) {
725 d = list_entry(list2, struct cx8802_driver, devlist);
726
727 /* only unregister the correct driver type */
728 if (d->type_id != drv->type_id)
729 continue;
730
731 err = d->remove(d);
732 if (err == 0) {
733 mutex_lock(&drv->core->lock);
734 list_del(list2);
735 mutex_unlock(&drv->core->lock);
736 } else
737 printk(KERN_ERR "%s() ->remove failed err = %d\n", __FUNCTION__, err);
738
739 }
740
741 }
742
743 return err;
744}
745
545/* ----------------------------------------------------------- */ 746/* ----------------------------------------------------------- */
747static int __devinit cx8802_probe(struct pci_dev *pci_dev,
748 const struct pci_device_id *pci_id)
749{
750 struct cx8802_dev *dev;
751 struct cx88_core *core;
752 int err;
753
754 /* general setup */
755 core = cx88_core_get(pci_dev);
756 if (NULL == core)
757 return -EINVAL;
546 758
759 printk("%s/2: cx2388x 8802 Driver Manager\n", core->name);
760
761 err = -ENODEV;
762 if (!cx88_boards[core->board].mpeg)
763 goto fail_core;
764
765 err = -ENOMEM;
766 dev = kzalloc(sizeof(*dev),GFP_KERNEL);
767 if (NULL == dev)
768 goto fail_core;
769 dev->pci = pci_dev;
770 dev->core = core;
771
772 err = cx8802_init_common(dev);
773 if (err != 0)
774 goto fail_free;
775
776 INIT_LIST_HEAD(&dev->drvlist.devlist);
777 list_add_tail(&dev->devlist,&cx8802_devlist);
778
779 /* Maintain a reference so cx88-video can query the 8802 device. */
780 core->dvbdev = dev;
781 return 0;
782
783 fail_free:
784 kfree(dev);
785 fail_core:
786 cx88_core_put(core,pci_dev);
787 return err;
788}
789
790static void __devexit cx8802_remove(struct pci_dev *pci_dev)
791{
792 struct cx8802_dev *dev;
793 struct cx8802_driver *h;
794 struct list_head *list;
795
796 dev = pci_get_drvdata(pci_dev);
797
798 dprintk( 1, "%s\n", __FUNCTION__);
799
800 list_for_each(list,&dev->drvlist.devlist) {
801 h = list_entry(list, struct cx8802_driver, devlist);
802 dprintk( 1, " ->driver\n");
803 if (h->remove == NULL) {
804 printk(KERN_ERR "%s .. skipping driver, no probe function\n", __FUNCTION__);
805 continue;
806 }
807 printk(KERN_INFO "%s .. Removing driver type %d\n", __FUNCTION__, h->type_id);
808 cx8802_unregister_driver(h);
809 list_del(&dev->drvlist.devlist);
810 }
811
812 /* Destroy any 8802 reference. */
813 dev->core->dvbdev = NULL;
814
815 /* common */
816 cx8802_fini_common(dev);
817 cx88_core_put(dev->core,dev->pci);
818 kfree(dev);
819}
820
821static struct pci_device_id cx8802_pci_tbl[] = {
822 {
823 .vendor = 0x14f1,
824 .device = 0x8802,
825 .subvendor = PCI_ANY_ID,
826 .subdevice = PCI_ANY_ID,
827 },{
828 /* --- end of list --- */
829 }
830};
831MODULE_DEVICE_TABLE(pci, cx8802_pci_tbl);
832
833static struct pci_driver cx8802_pci_driver = {
834 .name = "cx88-mpeg driver manager",
835 .id_table = cx8802_pci_tbl,
836 .probe = cx8802_probe,
837 .remove = __devexit_p(cx8802_remove),
838};
839
840static int cx8802_init(void)
841{
842 printk(KERN_INFO "cx2388x cx88-mpeg Driver Manager version %d.%d.%d loaded\n",
843 (CX88_VERSION_CODE >> 16) & 0xff,
844 (CX88_VERSION_CODE >> 8) & 0xff,
845 CX88_VERSION_CODE & 0xff);
846#ifdef SNAPSHOT
847 printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
848 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
849#endif
850 return pci_register_driver(&cx8802_pci_driver);
851}
852
853static void cx8802_fini(void)
854{
855 pci_unregister_driver(&cx8802_pci_driver);
856}
857
858module_init(cx8802_init);
859module_exit(cx8802_fini);
547EXPORT_SYMBOL(cx8802_buf_prepare); 860EXPORT_SYMBOL(cx8802_buf_prepare);
548EXPORT_SYMBOL(cx8802_buf_queue); 861EXPORT_SYMBOL(cx8802_buf_queue);
549EXPORT_SYMBOL(cx8802_cancel_buffers); 862EXPORT_SYMBOL(cx8802_cancel_buffers);
@@ -551,9 +864,10 @@ EXPORT_SYMBOL(cx8802_cancel_buffers);
551EXPORT_SYMBOL(cx8802_init_common); 864EXPORT_SYMBOL(cx8802_init_common);
552EXPORT_SYMBOL(cx8802_fini_common); 865EXPORT_SYMBOL(cx8802_fini_common);
553 866
554EXPORT_SYMBOL(cx8802_suspend_common); 867EXPORT_SYMBOL(cx8802_register_driver);
555EXPORT_SYMBOL(cx8802_resume_common); 868EXPORT_SYMBOL(cx8802_unregister_driver);
556 869EXPORT_SYMBOL(cx8802_get_device);
870EXPORT_SYMBOL(cx8802_get_driver);
557/* ----------------------------------------------------------- */ 871/* ----------------------------------------------------------- */
558/* 872/*
559 * Local variables: 873 * Local variables:
diff --git a/drivers/media/video/cx88/cx88-tvaudio.c b/drivers/media/video/cx88/cx88-tvaudio.c
index 58ba9f773524..3482e0114d43 100644
--- a/drivers/media/video/cx88/cx88-tvaudio.c
+++ b/drivers/media/video/cx88/cx88-tvaudio.c
@@ -143,19 +143,6 @@ static void set_audio_finish(struct cx88_core *core, u32 ctl)
143 cx88_start_audio_dma(core); 143 cx88_start_audio_dma(core);
144 144
145 if (cx88_boards[core->board].mpeg & CX88_MPEG_BLACKBIRD) { 145 if (cx88_boards[core->board].mpeg & CX88_MPEG_BLACKBIRD) {
146 /* sets sound input from external adc */
147 switch (core->board) {
148 case CX88_BOARD_HAUPPAUGE_ROSLYN:
149 case CX88_BOARD_KWORLD_MCE200_DELUXE:
150 case CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT:
151 case CX88_BOARD_PIXELVIEW_PLAYTV_P7000:
152 case CX88_BOARD_ASUS_PVR_416:
153 cx_clear(AUD_CTL, EN_I2SIN_ENABLE);
154 break;
155 default:
156 cx_set(AUD_CTL, EN_I2SIN_ENABLE);
157 }
158
159 cx_write(AUD_I2SINPUTCNTL, 4); 146 cx_write(AUD_I2SINPUTCNTL, 4);
160 cx_write(AUD_BAUDRATE, 1); 147 cx_write(AUD_BAUDRATE, 1);
161 /* 'pass-thru mode': this enables the i2s output to the mpeg encoder */ 148 /* 'pass-thru mode': this enables the i2s output to the mpeg encoder */
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 90e298d074d1..8613378428fd 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -454,6 +454,14 @@ static int video_mux(struct cx88_core *core, unsigned int input)
454 cx_clear(MO_FILTER_ODD, 0x00002020); 454 cx_clear(MO_FILTER_ODD, 0x00002020);
455 break; 455 break;
456 } 456 }
457
458 if (cx88_boards[core->board].mpeg & CX88_MPEG_BLACKBIRD) {
459 /* sets sound input from external adc */
460 if (INPUT(input)->extadc)
461 cx_set(AUD_CTL, EN_I2SIN_ENABLE);
462 else
463 cx_clear(AUD_CTL, EN_I2SIN_ENABLE);
464 }
457 return 0; 465 return 0;
458} 466}
459 467
@@ -1490,6 +1498,30 @@ int cx88_do_ioctl(struct inode *inode, struct file *file, int radio,
1490 mutex_unlock(&core->lock); 1498 mutex_unlock(&core->lock);
1491 return 0; 1499 return 0;
1492 } 1500 }
1501#ifdef CONFIG_VIDEO_ADV_DEBUG
1502 /* ioctls to allow direct acces to the cx2388x registers */
1503 case VIDIOC_INT_G_REGISTER:
1504 {
1505 struct v4l2_register *reg = arg;
1506
1507 if (reg->i2c_id != 0)
1508 return -EINVAL;
1509 /* cx2388x has a 24-bit register space */
1510 reg->val = cx_read(reg->reg&0xffffff);
1511 return 0;
1512 }
1513 case VIDIOC_INT_S_REGISTER:
1514 {
1515 struct v4l2_register *reg = arg;
1516
1517 if (reg->i2c_id != 0)
1518 return -EINVAL;
1519 if (!capable(CAP_SYS_ADMIN))
1520 return -EPERM;
1521 cx_write(reg->reg&0xffffff, reg->val);
1522 return 0;
1523 }
1524#endif
1493 1525
1494 default: 1526 default:
1495 return v4l_compat_translate_ioctl(inode,file,cmd,arg, 1527 return v4l_compat_translate_ioctl(inode,file,cmd,arg,
diff --git a/drivers/media/video/cx88/cx88-vp3054-i2c.c b/drivers/media/video/cx88/cx88-vp3054-i2c.c
index 2b4f1970c7df..6068c9bf82cd 100644
--- a/drivers/media/video/cx88/cx88-vp3054-i2c.c
+++ b/drivers/media/video/cx88/cx88-vp3054-i2c.c
@@ -168,7 +168,7 @@ void vp3054_i2c_remove(struct cx8802_dev *dev)
168 dev->core->board != CX88_BOARD_DNTV_LIVE_DVB_T_PRO) 168 dev->core->board != CX88_BOARD_DNTV_LIVE_DVB_T_PRO)
169 return; 169 return;
170 170
171 i2c_bit_del_bus(&vp3054_i2c->adap); 171 i2c_del_adapter(&vp3054_i2c->adap);
172 kfree(vp3054_i2c); 172 kfree(vp3054_i2c);
173} 173}
174 174
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index 3bc91aad4fe5..7054e941f1d7 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -74,6 +74,11 @@ enum cx88_board_type {
74 CX88_MPEG_BLACKBIRD 74 CX88_MPEG_BLACKBIRD
75}; 75};
76 76
77enum cx8802_board_access {
78 CX8802_DRVCTL_SHARED = 1,
79 CX8802_DRVCTL_EXCLUSIVE = 2,
80};
81
77/* ----------------------------------------------------------- */ 82/* ----------------------------------------------------------- */
78/* tv norms */ 83/* tv norms */
79 84
@@ -220,6 +225,7 @@ struct cx88_input {
220 enum cx88_itype type; 225 enum cx88_itype type;
221 unsigned int vmux; 226 unsigned int vmux;
222 u32 gpio0, gpio1, gpio2, gpio3; 227 u32 gpio0, gpio1, gpio2, gpio3;
228 unsigned int extadc:1;
223}; 229};
224 230
225struct cx88_board { 231struct cx88_board {
@@ -330,6 +336,7 @@ struct cx88_core {
330 336
331 /* cx88-video needs to access cx8802 for hybrid tuner pll access. */ 337 /* cx88-video needs to access cx8802 for hybrid tuner pll access. */
332 struct cx8802_dev *dvbdev; 338 struct cx8802_dev *dvbdev;
339 enum cx88_board_type active_type_id;
333}; 340};
334 341
335struct cx8800_dev; 342struct cx8800_dev;
@@ -405,6 +412,31 @@ struct cx8802_suspend_state {
405 int disabled; 412 int disabled;
406}; 413};
407 414
415struct cx8802_driver {
416 struct cx88_core *core;
417 struct list_head devlist;
418
419 /* Type of driver and access required */
420 enum cx88_board_type type_id;
421 enum cx8802_board_access hw_access;
422
423 /* MPEG 8802 internal only */
424 int (*suspend)(struct pci_dev *pci_dev, pm_message_t state);
425 int (*resume)(struct pci_dev *pci_dev);
426
427 /* MPEG 8802 -> mini driver - Driver probe and configuration */
428 int (*probe)(struct cx8802_driver *drv);
429 int (*remove)(struct cx8802_driver *drv);
430
431 /* MPEG 8802 -> mini driver - Access for hardware control */
432 int (*advise_acquire)(struct cx8802_driver *drv);
433 int (*advise_release)(struct cx8802_driver *drv);
434
435 /* MPEG 8802 <- mini driver - Access for hardware control */
436 int (*request_acquire)(struct cx8802_driver *drv);
437 int (*request_release)(struct cx8802_driver *drv);
438};
439
408struct cx8802_dev { 440struct cx8802_dev {
409 struct cx88_core *core; 441 struct cx88_core *core;
410 spinlock_t slock; 442 spinlock_t slock;
@@ -439,6 +471,9 @@ struct cx8802_dev {
439 471
440 /* mpeg params */ 472 /* mpeg params */
441 struct cx2341x_mpeg_params params; 473 struct cx2341x_mpeg_params params;
474
475 /* List of attached drivers */
476 struct cx8802_driver drvlist;
442}; 477};
443 478
444/* ----------------------------------------------------------- */ 479/* ----------------------------------------------------------- */
@@ -571,6 +606,11 @@ void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t);
571void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual); 606void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual);
572int cx88_audio_thread(void *data); 607int cx88_audio_thread(void *data);
573 608
609int cx8802_register_driver(struct cx8802_driver *drv);
610int cx8802_unregister_driver(struct cx8802_driver *drv);
611struct cx8802_dev * cx8802_get_device(struct inode *inode);
612struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype);
613
574/* ----------------------------------------------------------- */ 614/* ----------------------------------------------------------- */
575/* cx88-input.c */ 615/* cx88-input.c */
576 616
@@ -600,6 +640,13 @@ extern int cx88_do_ioctl(struct inode *inode, struct file *file, int radio,
600extern const u32 cx88_user_ctrls[]; 640extern const u32 cx88_user_ctrls[];
601extern int cx8800_ctrl_query(struct v4l2_queryctrl *qctrl); 641extern int cx8800_ctrl_query(struct v4l2_queryctrl *qctrl);
602 642
643/* ----------------------------------------------------------- */
644/* cx88-blackbird.c */
645/* used by cx88-ivtv ioctl emulation layer */
646extern int (*cx88_ioctl_hook)(struct inode *inode, struct file *file,
647 unsigned int cmd, void *arg);
648extern unsigned int (*cx88_ioctl_translator)(unsigned int cmd);
649
603/* 650/*
604 * Local variables: 651 * Local variables:
605 * c-basic-offset: 8 652 * c-basic-offset: 8
diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c
index b1012e92ee04..917021fc2993 100644
--- a/drivers/media/video/dabusb.c
+++ b/drivers/media/video/dabusb.c
@@ -218,7 +218,7 @@ static int dabusb_alloc_buffers (pdabusb_t s)
218 pipesize, packets, transfer_buffer_length); 218 pipesize, packets, transfer_buffer_length);
219 219
220 while (buffers < (s->total_buffer_size << 10)) { 220 while (buffers < (s->total_buffer_size << 10)) {
221 b = (pbuff_t) kzalloc (sizeof (buff_t), GFP_KERNEL); 221 b = kzalloc(sizeof (buff_t), GFP_KERNEL);
222 if (!b) { 222 if (!b) {
223 err("kzalloc(sizeof(buff_t))==NULL"); 223 err("kzalloc(sizeof(buff_t))==NULL");
224 goto err; 224 goto err;
@@ -659,7 +659,7 @@ static int dabusb_ioctl (struct inode *inode, struct file *file, unsigned int cm
659 switch (cmd) { 659 switch (cmd) {
660 660
661 case IOCTL_DAB_BULK: 661 case IOCTL_DAB_BULK:
662 pbulk = (pbulk_transfer_t) kmalloc (sizeof (bulk_transfer_t), GFP_KERNEL); 662 pbulk = kmalloc(sizeof (bulk_transfer_t), GFP_KERNEL);
663 663
664 if (!pbulk) { 664 if (!pbulk) {
665 ret = -ENOMEM; 665 ret = -ENOMEM;
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index ab87e7bfe84f..59edf58204de 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -305,15 +305,14 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
305 int ir_type; 305 int ir_type;
306 struct IR_i2c *ir; 306 struct IR_i2c *ir;
307 struct input_dev *input_dev; 307 struct input_dev *input_dev;
308 int err;
308 309
309 ir = kzalloc(sizeof(struct IR_i2c),GFP_KERNEL); 310 ir = kzalloc(sizeof(struct IR_i2c),GFP_KERNEL);
310 input_dev = input_allocate_device(); 311 input_dev = input_allocate_device();
311 if (!ir || !input_dev) { 312 if (!ir || !input_dev) {
312 input_free_device(input_dev); 313 err = -ENOMEM;
313 kfree(ir); 314 goto err_out_free;
314 return -ENOMEM;
315 } 315 }
316 memset(ir,0,sizeof(*ir));
317 316
318 ir->c = client_template; 317 ir->c = client_template;
319 ir->input = input_dev; 318 ir->input = input_dev;
@@ -355,32 +354,34 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
355 break; 354 break;
356 case 0x7a: 355 case 0x7a:
357 case 0x47: 356 case 0x47:
357 case 0x71:
358 /* Handled by saa7134-input */ 358 /* Handled by saa7134-input */
359 name = "SAA713x remote"; 359 name = "SAA713x remote";
360 ir_type = IR_TYPE_OTHER; 360 ir_type = IR_TYPE_OTHER;
361 break; 361 break;
362 default: 362 default:
363 /* shouldn't happen */ 363 /* shouldn't happen */
364 printk(DEVNAME ": Huh? unknown i2c address (0x%02x)?\n",addr); 364 printk(DEVNAME ": Huh? unknown i2c address (0x%02x)?\n", addr);
365 kfree(ir); 365 err = -ENODEV;
366 return -1; 366 goto err_out_free;
367 } 367 }
368 368
369 /* Sets name */ 369 /* Sets name */
370 snprintf(ir->c.name, sizeof(ir->c.name), "i2c IR (%s)", name); 370 snprintf(ir->c.name, sizeof(ir->c.name), "i2c IR (%s)", name);
371 ir->ir_codes=ir_codes; 371 ir->ir_codes = ir_codes;
372 372
373 /* register i2c device 373 /* register i2c device
374 * At device register, IR codes may be changed to be 374 * At device register, IR codes may be changed to be
375 * board dependent. 375 * board dependent.
376 */ 376 */
377 i2c_attach_client(&ir->c); 377 err = i2c_attach_client(&ir->c);
378 if (err)
379 goto err_out_free;
378 380
379 /* If IR not supported or disabled, unregisters driver */ 381 /* If IR not supported or disabled, unregisters driver */
380 if (ir->get_key == NULL) { 382 if (ir->get_key == NULL) {
381 i2c_detach_client(&ir->c); 383 err = -ENODEV;
382 kfree(ir); 384 goto err_out_detach;
383 return -1;
384 } 385 }
385 386
386 /* Phys addr can only be set after attaching (for ir->c.dev.bus_id) */ 387 /* Phys addr can only be set after attaching (for ir->c.dev.bus_id) */
@@ -389,15 +390,17 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
389 ir->c.dev.bus_id); 390 ir->c.dev.bus_id);
390 391
391 /* init + register input device */ 392 /* init + register input device */
392 ir_input_init(input_dev,&ir->ir,ir_type,ir->ir_codes); 393 ir_input_init(input_dev, &ir->ir, ir_type, ir->ir_codes);
393 input_dev->id.bustype = BUS_I2C; 394 input_dev->id.bustype = BUS_I2C;
394 input_dev->name = ir->c.name; 395 input_dev->name = ir->c.name;
395 input_dev->phys = ir->phys; 396 input_dev->phys = ir->phys;
396 397
397 /* register event device */ 398 err = input_register_device(ir->input);
398 input_register_device(ir->input); 399 if (err)
400 goto err_out_detach;
401
399 printk(DEVNAME ": %s detected at %s [%s]\n", 402 printk(DEVNAME ": %s detected at %s [%s]\n",
400 ir->input->name,ir->input->phys,adap->name); 403 ir->input->name, ir->input->phys, adap->name);
401 404
402 /* start polling via eventd */ 405 /* start polling via eventd */
403 INIT_WORK(&ir->work, ir_work); 406 INIT_WORK(&ir->work, ir_work);
@@ -407,6 +410,13 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
407 schedule_work(&ir->work); 410 schedule_work(&ir->work);
408 411
409 return 0; 412 return 0;
413
414 err_out_detach:
415 i2c_detach_client(&ir->c);
416 err_out_free:
417 input_free_device(input_dev);
418 kfree(ir);
419 return err;
410} 420}
411 421
412static int ir_detach(struct i2c_client *client) 422static int ir_detach(struct i2c_client *client)
@@ -414,7 +424,7 @@ static int ir_detach(struct i2c_client *client)
414 struct IR_i2c *ir = i2c_get_clientdata(client); 424 struct IR_i2c *ir = i2c_get_clientdata(client);
415 425
416 /* kill outstanding polls */ 426 /* kill outstanding polls */
417 del_timer(&ir->timer); 427 del_timer_sync(&ir->timer);
418 flush_scheduled_work(); 428 flush_scheduled_work();
419 429
420 /* unregister devices */ 430 /* unregister devices */
@@ -439,7 +449,7 @@ static int ir_probe(struct i2c_adapter *adap)
439 */ 449 */
440 450
441 static const int probe_bttv[] = { 0x1a, 0x18, 0x4b, 0x64, 0x30, -1}; 451 static const int probe_bttv[] = { 0x1a, 0x18, 0x4b, 0x64, 0x30, -1};
442 static const int probe_saa7134[] = { 0x7a, 0x47, -1 }; 452 static const int probe_saa7134[] = { 0x7a, 0x47, 0x71, -1 };
443 static const int probe_em28XX[] = { 0x30, 0x47, -1 }; 453 static const int probe_em28XX[] = { 0x30, 0x47, -1 };
444 const int *probe = NULL; 454 const int *probe = NULL;
445 struct i2c_client c; 455 struct i2c_client c;
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c
index b0aea4002d11..152cc6b3e152 100644
--- a/drivers/media/video/mxb.c
+++ b/drivers/media/video/mxb.c
@@ -160,10 +160,6 @@ static int mxb_probe(struct saa7146_dev* dev)
160 printk("mxb: saa7111 i2c module not available.\n"); 160 printk("mxb: saa7111 i2c module not available.\n");
161 return -ENODEV; 161 return -ENODEV;
162 } 162 }
163 if ((result = request_module("tuner")) < 0) {
164 printk("mxb: tuner i2c module not available.\n");
165 return -ENODEV;
166 }
167 if ((result = request_module("tea6420")) < 0) { 163 if ((result = request_module("tea6420")) < 0) {
168 printk("mxb: tea6420 i2c module not available.\n"); 164 printk("mxb: tea6420 i2c module not available.\n");
169 return -ENODEV; 165 return -ENODEV;
@@ -176,6 +172,10 @@ static int mxb_probe(struct saa7146_dev* dev)
176 printk("mxb: tda9840 i2c module not available.\n"); 172 printk("mxb: tda9840 i2c module not available.\n");
177 return -ENODEV; 173 return -ENODEV;
178 } 174 }
175 if ((result = request_module("tuner")) < 0) {
176 printk("mxb: tuner i2c module not available.\n");
177 return -ENODEV;
178 }
179 179
180 mxb = kzalloc(sizeof(struct mxb), GFP_KERNEL); 180 mxb = kzalloc(sizeof(struct mxb), GFP_KERNEL);
181 if( NULL == mxb ) { 181 if( NULL == mxb ) {
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
new file mode 100644
index 000000000000..5ed0adc4ca26
--- /dev/null
+++ b/drivers/media/video/ov7670.c
@@ -0,0 +1,1333 @@
1/*
2 * A V4L2 driver for OmniVision OV7670 cameras.
3 *
4 * Copyright 2006 One Laptop Per Child Association, Inc. Written
5 * by Jonathan Corbet with substantial inspiration from Mark
6 * McClelland's ovcamchip code.
7 *
8 * This file may be distributed under the terms of the GNU General
9 * Public License, version 2.
10 */
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/slab.h>
15#include <linux/delay.h>
16#include <linux/videodev.h>
17#include <media/v4l2-common.h>
18#include <linux/i2c.h>
19
20
21MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
22MODULE_DESCRIPTION("A low-level driver for OmniVision ov7670 sensors");
23MODULE_LICENSE("GPL");
24
25/*
26 * Basic window sizes. These probably belong somewhere more globally
27 * useful.
28 */
29#define VGA_WIDTH 640
30#define VGA_HEIGHT 480
31#define QVGA_WIDTH 320
32#define QVGA_HEIGHT 240
33#define CIF_WIDTH 352
34#define CIF_HEIGHT 288
35#define QCIF_WIDTH 176
36#define QCIF_HEIGHT 144
37
38/*
39 * Our nominal (default) frame rate.
40 */
41#define OV7670_FRAME_RATE 30
42
43/*
44 * The 7670 sits on i2c with ID 0x42
45 */
46#define OV7670_I2C_ADDR 0x42
47
48/* Registers */
49#define REG_GAIN 0x00 /* Gain lower 8 bits (rest in vref) */
50#define REG_BLUE 0x01 /* blue gain */
51#define REG_RED 0x02 /* red gain */
52#define REG_VREF 0x03 /* Pieces of GAIN, VSTART, VSTOP */
53#define REG_COM1 0x04 /* Control 1 */
54#define COM1_CCIR656 0x40 /* CCIR656 enable */
55#define REG_BAVE 0x05 /* U/B Average level */
56#define REG_GbAVE 0x06 /* Y/Gb Average level */
57#define REG_AECHH 0x07 /* AEC MS 5 bits */
58#define REG_RAVE 0x08 /* V/R Average level */
59#define REG_COM2 0x09 /* Control 2 */
60#define COM2_SSLEEP 0x10 /* Soft sleep mode */
61#define REG_PID 0x0a /* Product ID MSB */
62#define REG_VER 0x0b /* Product ID LSB */
63#define REG_COM3 0x0c /* Control 3 */
64#define COM3_SWAP 0x40 /* Byte swap */
65#define COM3_SCALEEN 0x08 /* Enable scaling */
66#define COM3_DCWEN 0x04 /* Enable downsamp/crop/window */
67#define REG_COM4 0x0d /* Control 4 */
68#define REG_COM5 0x0e /* All "reserved" */
69#define REG_COM6 0x0f /* Control 6 */
70#define REG_AECH 0x10 /* More bits of AEC value */
71#define REG_CLKRC 0x11 /* Clocl control */
72#define CLK_EXT 0x40 /* Use external clock directly */
73#define CLK_SCALE 0x3f /* Mask for internal clock scale */
74#define REG_COM7 0x12 /* Control 7 */
75#define COM7_RESET 0x80 /* Register reset */
76#define COM7_FMT_MASK 0x38
77#define COM7_FMT_VGA 0x00
78#define COM7_FMT_CIF 0x20 /* CIF format */
79#define COM7_FMT_QVGA 0x10 /* QVGA format */
80#define COM7_FMT_QCIF 0x08 /* QCIF format */
81#define COM7_RGB 0x04 /* bits 0 and 2 - RGB format */
82#define COM7_YUV 0x00 /* YUV */
83#define COM7_BAYER 0x01 /* Bayer format */
84#define COM7_PBAYER 0x05 /* "Processed bayer" */
85#define REG_COM8 0x13 /* Control 8 */
86#define COM8_FASTAEC 0x80 /* Enable fast AGC/AEC */
87#define COM8_AECSTEP 0x40 /* Unlimited AEC step size */
88#define COM8_BFILT 0x20 /* Band filter enable */
89#define COM8_AGC 0x04 /* Auto gain enable */
90#define COM8_AWB 0x02 /* White balance enable */
91#define COM8_AEC 0x01 /* Auto exposure enable */
92#define REG_COM9 0x14 /* Control 9 - gain ceiling */
93#define REG_COM10 0x15 /* Control 10 */
94#define COM10_HSYNC 0x40 /* HSYNC instead of HREF */
95#define COM10_PCLK_HB 0x20 /* Suppress PCLK on horiz blank */
96#define COM10_HREF_REV 0x08 /* Reverse HREF */
97#define COM10_VS_LEAD 0x04 /* VSYNC on clock leading edge */
98#define COM10_VS_NEG 0x02 /* VSYNC negative */
99#define COM10_HS_NEG 0x01 /* HSYNC negative */
100#define REG_HSTART 0x17 /* Horiz start high bits */
101#define REG_HSTOP 0x18 /* Horiz stop high bits */
102#define REG_VSTART 0x19 /* Vert start high bits */
103#define REG_VSTOP 0x1a /* Vert stop high bits */
104#define REG_PSHFT 0x1b /* Pixel delay after HREF */
105#define REG_MIDH 0x1c /* Manuf. ID high */
106#define REG_MIDL 0x1d /* Manuf. ID low */
107#define REG_MVFP 0x1e /* Mirror / vflip */
108#define MVFP_MIRROR 0x20 /* Mirror image */
109#define MVFP_FLIP 0x10 /* Vertical flip */
110
111#define REG_AEW 0x24 /* AGC upper limit */
112#define REG_AEB 0x25 /* AGC lower limit */
113#define REG_VPT 0x26 /* AGC/AEC fast mode op region */
114#define REG_HSYST 0x30 /* HSYNC rising edge delay */
115#define REG_HSYEN 0x31 /* HSYNC falling edge delay */
116#define REG_HREF 0x32 /* HREF pieces */
117#define REG_TSLB 0x3a /* lots of stuff */
118#define TSLB_YLAST 0x04 /* UYVY or VYUY - see com13 */
119#define REG_COM11 0x3b /* Control 11 */
120#define COM11_NIGHT 0x80 /* NIght mode enable */
121#define COM11_NMFR 0x60 /* Two bit NM frame rate */
122#define COM11_HZAUTO 0x10 /* Auto detect 50/60 Hz */
123#define COM11_50HZ 0x08 /* Manual 50Hz select */
124#define COM11_EXP 0x02
125#define REG_COM12 0x3c /* Control 12 */
126#define COM12_HREF 0x80 /* HREF always */
127#define REG_COM13 0x3d /* Control 13 */
128#define COM13_GAMMA 0x80 /* Gamma enable */
129#define COM13_UVSAT 0x40 /* UV saturation auto adjustment */
130#define COM13_UVSWAP 0x01 /* V before U - w/TSLB */
131#define REG_COM14 0x3e /* Control 14 */
132#define COM14_DCWEN 0x10 /* DCW/PCLK-scale enable */
133#define REG_EDGE 0x3f /* Edge enhancement factor */
134#define REG_COM15 0x40 /* Control 15 */
135#define COM15_R10F0 0x00 /* Data range 10 to F0 */
136#define COM15_R01FE 0x80 /* 01 to FE */
137#define COM15_R00FF 0xc0 /* 00 to FF */
138#define COM15_RGB565 0x10 /* RGB565 output */
139#define COM15_RGB555 0x30 /* RGB555 output */
140#define REG_COM16 0x41 /* Control 16 */
141#define COM16_AWBGAIN 0x08 /* AWB gain enable */
142#define REG_COM17 0x42 /* Control 17 */
143#define COM17_AECWIN 0xc0 /* AEC window - must match COM4 */
144#define COM17_CBAR 0x08 /* DSP Color bar */
145
146/*
147 * This matrix defines how the colors are generated, must be
148 * tweaked to adjust hue and saturation.
149 *
150 * Order: v-red, v-green, v-blue, u-red, u-green, u-blue
151 *
152 * They are nine-bit signed quantities, with the sign bit
153 * stored in 0x58. Sign for v-red is bit 0, and up from there.
154 */
155#define REG_CMATRIX_BASE 0x4f
156#define CMATRIX_LEN 6
157#define REG_CMATRIX_SIGN 0x58
158
159
160#define REG_BRIGHT 0x55 /* Brightness */
161#define REG_CONTRAS 0x56 /* Contrast control */
162
163#define REG_GFIX 0x69 /* Fix gain control */
164
165#define REG_RGB444 0x8c /* RGB 444 control */
166#define R444_ENABLE 0x02 /* Turn on RGB444, overrides 5x5 */
167#define R444_RGBX 0x01 /* Empty nibble at end */
168
169#define REG_HAECC1 0x9f /* Hist AEC/AGC control 1 */
170#define REG_HAECC2 0xa0 /* Hist AEC/AGC control 2 */
171
172#define REG_BD50MAX 0xa5 /* 50hz banding step limit */
173#define REG_HAECC3 0xa6 /* Hist AEC/AGC control 3 */
174#define REG_HAECC4 0xa7 /* Hist AEC/AGC control 4 */
175#define REG_HAECC5 0xa8 /* Hist AEC/AGC control 5 */
176#define REG_HAECC6 0xa9 /* Hist AEC/AGC control 6 */
177#define REG_HAECC7 0xaa /* Hist AEC/AGC control 7 */
178#define REG_BD60MAX 0xab /* 60hz banding step limit */
179
180
181/*
182 * Information we maintain about a known sensor.
183 */
184struct ov7670_format_struct; /* coming later */
185struct ov7670_info {
186 struct ov7670_format_struct *fmt; /* Current format */
187 unsigned char sat; /* Saturation value */
188 int hue; /* Hue value */
189};
190
191
192
193
194/*
195 * The default register settings, as obtained from OmniVision. There
196 * is really no making sense of most of these - lots of "reserved" values
197 * and such.
198 *
199 * These settings give VGA YUYV.
200 */
201
202struct regval_list {
203 unsigned char reg_num;
204 unsigned char value;
205};
206
207static struct regval_list ov7670_default_regs[] = {
208 { REG_COM7, COM7_RESET },
209/*
210 * Clock scale: 3 = 15fps
211 * 2 = 20fps
212 * 1 = 30fps
213 */
214 { REG_CLKRC, 0x1 }, /* OV: clock scale (30 fps) */
215 { REG_TSLB, 0x04 }, /* OV */
216 { REG_COM7, 0 }, /* VGA */
217 /*
218 * Set the hardware window. These values from OV don't entirely
219 * make sense - hstop is less than hstart. But they work...
220 */
221 { REG_HSTART, 0x13 }, { REG_HSTOP, 0x01 },
222 { REG_HREF, 0xb6 }, { REG_VSTART, 0x02 },
223 { REG_VSTOP, 0x7a }, { REG_VREF, 0x0a },
224
225 { REG_COM3, 0 }, { REG_COM14, 0 },
226 /* Mystery scaling numbers */
227 { 0x70, 0x3a }, { 0x71, 0x35 },
228 { 0x72, 0x11 }, { 0x73, 0xf0 },
229 { 0xa2, 0x02 }, { REG_COM10, 0x0 },
230
231 /* Gamma curve values */
232 { 0x7a, 0x20 }, { 0x7b, 0x10 },
233 { 0x7c, 0x1e }, { 0x7d, 0x35 },
234 { 0x7e, 0x5a }, { 0x7f, 0x69 },
235 { 0x80, 0x76 }, { 0x81, 0x80 },
236 { 0x82, 0x88 }, { 0x83, 0x8f },
237 { 0x84, 0x96 }, { 0x85, 0xa3 },
238 { 0x86, 0xaf }, { 0x87, 0xc4 },
239 { 0x88, 0xd7 }, { 0x89, 0xe8 },
240
241 /* AGC and AEC parameters. Note we start by disabling those features,
242 then turn them only after tweaking the values. */
243 { REG_COM8, COM8_FASTAEC | COM8_AECSTEP | COM8_BFILT },
244 { REG_GAIN, 0 }, { REG_AECH, 0 },
245 { REG_COM4, 0x40 }, /* magic reserved bit */
246 { REG_COM9, 0x18 }, /* 4x gain + magic rsvd bit */
247 { REG_BD50MAX, 0x05 }, { REG_BD60MAX, 0x07 },
248 { REG_AEW, 0x95 }, { REG_AEB, 0x33 },
249 { REG_VPT, 0xe3 }, { REG_HAECC1, 0x78 },
250 { REG_HAECC2, 0x68 }, { 0xa1, 0x03 }, /* magic */
251 { REG_HAECC3, 0xd8 }, { REG_HAECC4, 0xd8 },
252 { REG_HAECC5, 0xf0 }, { REG_HAECC6, 0x90 },
253 { REG_HAECC7, 0x94 },
254 { REG_COM8, COM8_FASTAEC|COM8_AECSTEP|COM8_BFILT|COM8_AGC|COM8_AEC },
255
256 /* Almost all of these are magic "reserved" values. */
257 { REG_COM5, 0x61 }, { REG_COM6, 0x4b },
258 { 0x16, 0x02 }, { REG_MVFP, 0x07|MVFP_MIRROR },
259 { 0x21, 0x02 }, { 0x22, 0x91 },
260 { 0x29, 0x07 }, { 0x33, 0x0b },
261 { 0x35, 0x0b }, { 0x37, 0x1d },
262 { 0x38, 0x71 }, { 0x39, 0x2a },
263 { REG_COM12, 0x78 }, { 0x4d, 0x40 },
264 { 0x4e, 0x20 }, { REG_GFIX, 0 },
265 { 0x6b, 0x4a }, { 0x74, 0x10 },
266 { 0x8d, 0x4f }, { 0x8e, 0 },
267 { 0x8f, 0 }, { 0x90, 0 },
268 { 0x91, 0 }, { 0x96, 0 },
269 { 0x9a, 0 }, { 0xb0, 0x84 },
270 { 0xb1, 0x0c }, { 0xb2, 0x0e },
271 { 0xb3, 0x82 }, { 0xb8, 0x0a },
272
273 /* More reserved magic, some of which tweaks white balance */
274 { 0x43, 0x0a }, { 0x44, 0xf0 },
275 { 0x45, 0x34 }, { 0x46, 0x58 },
276 { 0x47, 0x28 }, { 0x48, 0x3a },
277 { 0x59, 0x88 }, { 0x5a, 0x88 },
278 { 0x5b, 0x44 }, { 0x5c, 0x67 },
279 { 0x5d, 0x49 }, { 0x5e, 0x0e },
280 { 0x6c, 0x0a }, { 0x6d, 0x55 },
281 { 0x6e, 0x11 }, { 0x6f, 0x9f }, /* "9e for advance AWB" */
282 { 0x6a, 0x40 }, { REG_BLUE, 0x40 },
283 { REG_RED, 0x60 },
284 { REG_COM8, COM8_FASTAEC|COM8_AECSTEP|COM8_BFILT|COM8_AGC|COM8_AEC|COM8_AWB },
285
286 /* Matrix coefficients */
287 { 0x4f, 0x80 }, { 0x50, 0x80 },
288 { 0x51, 0 }, { 0x52, 0x22 },
289 { 0x53, 0x5e }, { 0x54, 0x80 },
290 { 0x58, 0x9e },
291
292 { REG_COM16, COM16_AWBGAIN }, { REG_EDGE, 0 },
293 { 0x75, 0x05 }, { 0x76, 0xe1 },
294 { 0x4c, 0 }, { 0x77, 0x01 },
295 { REG_COM13, 0xc3 }, { 0x4b, 0x09 },
296 { 0xc9, 0x60 }, { REG_COM16, 0x38 },
297 { 0x56, 0x40 },
298
299 { 0x34, 0x11 }, { REG_COM11, COM11_EXP|COM11_HZAUTO },
300 { 0xa4, 0x88 }, { 0x96, 0 },
301 { 0x97, 0x30 }, { 0x98, 0x20 },
302 { 0x99, 0x30 }, { 0x9a, 0x84 },
303 { 0x9b, 0x29 }, { 0x9c, 0x03 },
304 { 0x9d, 0x4c }, { 0x9e, 0x3f },
305 { 0x78, 0x04 },
306
307 /* Extra-weird stuff. Some sort of multiplexor register */
308 { 0x79, 0x01 }, { 0xc8, 0xf0 },
309 { 0x79, 0x0f }, { 0xc8, 0x00 },
310 { 0x79, 0x10 }, { 0xc8, 0x7e },
311 { 0x79, 0x0a }, { 0xc8, 0x80 },
312 { 0x79, 0x0b }, { 0xc8, 0x01 },
313 { 0x79, 0x0c }, { 0xc8, 0x0f },
314 { 0x79, 0x0d }, { 0xc8, 0x20 },
315 { 0x79, 0x09 }, { 0xc8, 0x80 },
316 { 0x79, 0x02 }, { 0xc8, 0xc0 },
317 { 0x79, 0x03 }, { 0xc8, 0x40 },
318 { 0x79, 0x05 }, { 0xc8, 0x30 },
319 { 0x79, 0x26 },
320
321 { 0xff, 0xff }, /* END MARKER */
322};
323
324
325/*
326 * Here we'll try to encapsulate the changes for just the output
327 * video format.
328 *
329 * RGB656 and YUV422 come from OV; RGB444 is homebrewed.
330 *
331 * IMPORTANT RULE: the first entry must be for COM7, see ov7670_s_fmt for why.
332 */
333
334
335static struct regval_list ov7670_fmt_yuv422[] = {
336 { REG_COM7, 0x0 }, /* Selects YUV mode */
337 { REG_RGB444, 0 }, /* No RGB444 please */
338 { REG_COM1, 0 },
339 { REG_COM15, COM15_R00FF },
340 { REG_COM9, 0x18 }, /* 4x gain ceiling; 0x8 is reserved bit */
341 { 0x4f, 0x80 }, /* "matrix coefficient 1" */
342 { 0x50, 0x80 }, /* "matrix coefficient 2" */
343 { 0x51, 0 }, /* vb */
344 { 0x52, 0x22 }, /* "matrix coefficient 4" */
345 { 0x53, 0x5e }, /* "matrix coefficient 5" */
346 { 0x54, 0x80 }, /* "matrix coefficient 6" */
347 { REG_COM13, COM13_GAMMA|COM13_UVSAT },
348 { 0xff, 0xff },
349};
350
351static struct regval_list ov7670_fmt_rgb565[] = {
352 { REG_COM7, COM7_RGB }, /* Selects RGB mode */
353 { REG_RGB444, 0 }, /* No RGB444 please */
354 { REG_COM1, 0x0 },
355 { REG_COM15, COM15_RGB565 },
356 { REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */
357 { 0x4f, 0xb3 }, /* "matrix coefficient 1" */
358 { 0x50, 0xb3 }, /* "matrix coefficient 2" */
359 { 0x51, 0 }, /* vb */
360 { 0x52, 0x3d }, /* "matrix coefficient 4" */
361 { 0x53, 0xa7 }, /* "matrix coefficient 5" */
362 { 0x54, 0xe4 }, /* "matrix coefficient 6" */
363 { REG_COM13, COM13_GAMMA|COM13_UVSAT },
364 { 0xff, 0xff },
365};
366
367static struct regval_list ov7670_fmt_rgb444[] = {
368 { REG_COM7, COM7_RGB }, /* Selects RGB mode */
369 { REG_RGB444, R444_ENABLE }, /* Enable xxxxrrrr ggggbbbb */
370 { REG_COM1, 0x40 }, /* Magic reserved bit */
371 { REG_COM15, COM15_R01FE|COM15_RGB565 }, /* Data range needed? */
372 { REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */
373 { 0x4f, 0xb3 }, /* "matrix coefficient 1" */
374 { 0x50, 0xb3 }, /* "matrix coefficient 2" */
375 { 0x51, 0 }, /* vb */
376 { 0x52, 0x3d }, /* "matrix coefficient 4" */
377 { 0x53, 0xa7 }, /* "matrix coefficient 5" */
378 { 0x54, 0xe4 }, /* "matrix coefficient 6" */
379 { REG_COM13, COM13_GAMMA|COM13_UVSAT|0x2 }, /* Magic rsvd bit */
380 { 0xff, 0xff },
381};
382
383
384
385
386/*
387 * Low-level register I/O.
388 */
389
390static int ov7670_read(struct i2c_client *c, unsigned char reg,
391 unsigned char *value)
392{
393 int ret;
394
395 ret = i2c_smbus_read_byte_data(c, reg);
396 if (ret >= 0)
397 *value = (unsigned char) ret;
398 return ret;
399}
400
401
402static int ov7670_write(struct i2c_client *c, unsigned char reg,
403 unsigned char value)
404{
405 return i2c_smbus_write_byte_data(c, reg, value);
406}
407
408
409/*
410 * Write a list of register settings; ff/ff stops the process.
411 */
412static int ov7670_write_array(struct i2c_client *c, struct regval_list *vals)
413{
414 while (vals->reg_num != 0xff || vals->value != 0xff) {
415 int ret = ov7670_write(c, vals->reg_num, vals->value);
416 if (ret < 0)
417 return ret;
418 vals++;
419 }
420 return 0;
421}
422
423
424/*
425 * Stuff that knows about the sensor.
426 */
427static void ov7670_reset(struct i2c_client *client)
428{
429 ov7670_write(client, REG_COM7, COM7_RESET);
430 msleep(1);
431}
432
433
434static int ov7670_init(struct i2c_client *client)
435{
436 return ov7670_write_array(client, ov7670_default_regs);
437}
438
439
440
441static int ov7670_detect(struct i2c_client *client)
442{
443 unsigned char v;
444 int ret;
445
446 ret = ov7670_init(client);
447 if (ret < 0)
448 return ret;
449 ret = ov7670_read(client, REG_MIDH, &v);
450 if (ret < 0)
451 return ret;
452 if (v != 0x7f) /* OV manuf. id. */
453 return -ENODEV;
454 ret = ov7670_read(client, REG_MIDL, &v);
455 if (ret < 0)
456 return ret;
457 if (v != 0xa2)
458 return -ENODEV;
459 /*
460 * OK, we know we have an OmniVision chip...but which one?
461 */
462 ret = ov7670_read(client, REG_PID, &v);
463 if (ret < 0)
464 return ret;
465 if (v != 0x76) /* PID + VER = 0x76 / 0x73 */
466 return -ENODEV;
467 ret = ov7670_read(client, REG_VER, &v);
468 if (ret < 0)
469 return ret;
470 if (v != 0x73) /* PID + VER = 0x76 / 0x73 */
471 return -ENODEV;
472 return 0;
473}
474
475
476/*
477 * Store information about the video data format. The color matrix
478 * is deeply tied into the format, so keep the relevant values here.
479 * The magic matrix nubmers come from OmniVision.
480 */
481static struct ov7670_format_struct {
482 __u8 *desc;
483 __u32 pixelformat;
484 struct regval_list *regs;
485 int cmatrix[CMATRIX_LEN];
486} ov7670_formats[] = {
487 {
488 .desc = "YUYV 4:2:2",
489 .pixelformat = V4L2_PIX_FMT_YUYV,
490 .regs = ov7670_fmt_yuv422,
491 .cmatrix = { 128, -128, 0, -34, -94, 128 },
492 },
493 {
494 .desc = "RGB 444",
495 .pixelformat = V4L2_PIX_FMT_RGB444,
496 .regs = ov7670_fmt_rgb444,
497 .cmatrix = { 179, -179, 0, -61, -176, 228 },
498 },
499 {
500 .desc = "RGB 565",
501 .pixelformat = V4L2_PIX_FMT_RGB565,
502 .regs = ov7670_fmt_rgb565,
503 .cmatrix = { 179, -179, 0, -61, -176, 228 },
504 },
505};
506#define N_OV7670_FMTS (sizeof(ov7670_formats)/sizeof(ov7670_formats[0]))
507
508/*
509 * All formats we support are 2 bytes/pixel.
510 */
511#define BYTES_PER_PIXEL 2
512
513/*
514 * Then there is the issue of window sizes. Try to capture the info here.
515 */
516
517/*
518 * QCIF mode is done (by OV) in a very strange way - it actually looks like
519 * VGA with weird scaling options - they do *not* use the canned QCIF mode
520 * which is allegedly provided by the sensor. So here's the weird register
521 * settings.
522 */
523static struct regval_list ov7670_qcif_regs[] = {
524 { REG_COM3, COM3_SCALEEN|COM3_DCWEN },
525 { REG_COM3, COM3_DCWEN },
526 { REG_COM14, COM14_DCWEN | 0x01},
527 { 0x73, 0xf1 },
528 { 0xa2, 0x52 },
529 { 0x7b, 0x1c },
530 { 0x7c, 0x28 },
531 { 0x7d, 0x3c },
532 { 0x7f, 0x69 },
533 { REG_COM9, 0x38 },
534 { 0xa1, 0x0b },
535 { 0x74, 0x19 },
536 { 0x9a, 0x80 },
537 { 0x43, 0x14 },
538 { REG_COM13, 0xc0 },
539 { 0xff, 0xff },
540};
541
542static struct ov7670_win_size {
543 int width;
544 int height;
545 unsigned char com7_bit;
546 int hstart; /* Start/stop values for the camera. Note */
547 int hstop; /* that they do not always make complete */
548 int vstart; /* sense to humans, but evidently the sensor */
549 int vstop; /* will do the right thing... */
550 struct regval_list *regs; /* Regs to tweak */
551/* h/vref stuff */
552} ov7670_win_sizes[] = {
553 /* VGA */
554 {
555 .width = VGA_WIDTH,
556 .height = VGA_HEIGHT,
557 .com7_bit = COM7_FMT_VGA,
558 .hstart = 158, /* These values from */
559 .hstop = 14, /* Omnivision */
560 .vstart = 10,
561 .vstop = 490,
562 .regs = NULL,
563 },
564 /* CIF */
565 {
566 .width = CIF_WIDTH,
567 .height = CIF_HEIGHT,
568 .com7_bit = COM7_FMT_CIF,
569 .hstart = 170, /* Empirically determined */
570 .hstop = 90,
571 .vstart = 14,
572 .vstop = 494,
573 .regs = NULL,
574 },
575 /* QVGA */
576 {
577 .width = QVGA_WIDTH,
578 .height = QVGA_HEIGHT,
579 .com7_bit = COM7_FMT_QVGA,
580 .hstart = 164, /* Empirically determined */
581 .hstop = 20,
582 .vstart = 14,
583 .vstop = 494,
584 .regs = NULL,
585 },
586 /* QCIF */
587 {
588 .width = QCIF_WIDTH,
589 .height = QCIF_HEIGHT,
590 .com7_bit = COM7_FMT_VGA, /* see comment above */
591 .hstart = 456, /* Empirically determined */
592 .hstop = 24,
593 .vstart = 14,
594 .vstop = 494,
595 .regs = ov7670_qcif_regs,
596 },
597};
598
599#define N_WIN_SIZES (sizeof(ov7670_win_sizes)/sizeof(ov7670_win_sizes[0]))
600
601
602/*
603 * Store a set of start/stop values into the camera.
604 */
605static int ov7670_set_hw(struct i2c_client *client, int hstart, int hstop,
606 int vstart, int vstop)
607{
608 int ret;
609 unsigned char v;
610/*
611 * Horizontal: 11 bits, top 8 live in hstart and hstop. Bottom 3 of
612 * hstart are in href[2:0], bottom 3 of hstop in href[5:3]. There is
613 * a mystery "edge offset" value in the top two bits of href.
614 */
615 ret = ov7670_write(client, REG_HSTART, (hstart >> 3) & 0xff);
616 ret += ov7670_write(client, REG_HSTOP, (hstop >> 3) & 0xff);
617 ret += ov7670_read(client, REG_HREF, &v);
618 v = (v & 0xc0) | ((hstop & 0x7) << 3) | (hstart & 0x7);
619 msleep(10);
620 ret += ov7670_write(client, REG_HREF, v);
621/*
622 * Vertical: similar arrangement, but only 10 bits.
623 */
624 ret += ov7670_write(client, REG_VSTART, (vstart >> 2) & 0xff);
625 ret += ov7670_write(client, REG_VSTOP, (vstop >> 2) & 0xff);
626 ret += ov7670_read(client, REG_VREF, &v);
627 v = (v & 0xf0) | ((vstop & 0x3) << 2) | (vstart & 0x3);
628 msleep(10);
629 ret += ov7670_write(client, REG_VREF, v);
630 return ret;
631}
632
633
634static int ov7670_enum_fmt(struct i2c_client *c, struct v4l2_fmtdesc *fmt)
635{
636 struct ov7670_format_struct *ofmt;
637
638 if (fmt->index >= N_OV7670_FMTS)
639 return -EINVAL;
640
641 ofmt = ov7670_formats + fmt->index;
642 fmt->flags = 0;
643 strcpy(fmt->description, ofmt->desc);
644 fmt->pixelformat = ofmt->pixelformat;
645 return 0;
646}
647
648
649static int ov7670_try_fmt(struct i2c_client *c, struct v4l2_format *fmt,
650 struct ov7670_format_struct **ret_fmt,
651 struct ov7670_win_size **ret_wsize)
652{
653 int index;
654 struct ov7670_win_size *wsize;
655 struct v4l2_pix_format *pix = &fmt->fmt.pix;
656
657 for (index = 0; index < N_OV7670_FMTS; index++)
658 if (ov7670_formats[index].pixelformat == pix->pixelformat)
659 break;
660 if (index >= N_OV7670_FMTS)
661 return -EINVAL;
662 if (ret_fmt != NULL)
663 *ret_fmt = ov7670_formats + index;
664 /*
665 * Fields: the OV devices claim to be progressive.
666 */
667 if (pix->field == V4L2_FIELD_ANY)
668 pix->field = V4L2_FIELD_NONE;
669 else if (pix->field != V4L2_FIELD_NONE)
670 return -EINVAL;
671 /*
672 * Round requested image size down to the nearest
673 * we support, but not below the smallest.
674 */
675 for (wsize = ov7670_win_sizes; wsize < ov7670_win_sizes + N_WIN_SIZES;
676 wsize++)
677 if (pix->width >= wsize->width && pix->height >= wsize->height)
678 break;
679 if (wsize >= ov7670_win_sizes + N_WIN_SIZES)
680 wsize--; /* Take the smallest one */
681 if (ret_wsize != NULL)
682 *ret_wsize = wsize;
683 /*
684 * Note the size we'll actually handle.
685 */
686 pix->width = wsize->width;
687 pix->height = wsize->height;
688 pix->bytesperline = pix->width*BYTES_PER_PIXEL;
689 pix->sizeimage = pix->height*pix->bytesperline;
690 return 0;
691}
692
693/*
694 * Set a format.
695 */
696static int ov7670_s_fmt(struct i2c_client *c, struct v4l2_format *fmt)
697{
698 int ret;
699 struct ov7670_format_struct *ovfmt;
700 struct ov7670_win_size *wsize;
701 struct ov7670_info *info = i2c_get_clientdata(c);
702 unsigned char com7;
703
704 ret = ov7670_try_fmt(c, fmt, &ovfmt, &wsize);
705 if (ret)
706 return ret;
707 /*
708 * COM7 is a pain in the ass, it doesn't like to be read then
709 * quickly written afterward. But we have everything we need
710 * to set it absolutely here, as long as the format-specific
711 * register sets list it first.
712 */
713 com7 = ovfmt->regs[0].value;
714 com7 |= wsize->com7_bit;
715 ov7670_write(c, REG_COM7, com7);
716 /*
717 * Now write the rest of the array. Also store start/stops
718 */
719 ov7670_write_array(c, ovfmt->regs + 1);
720 ov7670_set_hw(c, wsize->hstart, wsize->hstop, wsize->vstart,
721 wsize->vstop);
722 ret = 0;
723 if (wsize->regs)
724 ret = ov7670_write_array(c, wsize->regs);
725 info->fmt = ovfmt;
726 return 0;
727}
728
729/*
730 * Implement G/S_PARM. There is a "high quality" mode we could try
731 * to do someday; for now, we just do the frame rate tweak.
732 */
733static int ov7670_g_parm(struct i2c_client *c, struct v4l2_streamparm *parms)
734{
735 struct v4l2_captureparm *cp = &parms->parm.capture;
736 unsigned char clkrc;
737 int ret;
738
739 if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
740 return -EINVAL;
741 ret = ov7670_read(c, REG_CLKRC, &clkrc);
742 if (ret < 0)
743 return ret;
744 memset(cp, 0, sizeof(struct v4l2_captureparm));
745 cp->capability = V4L2_CAP_TIMEPERFRAME;
746 cp->timeperframe.numerator = 1;
747 cp->timeperframe.denominator = OV7670_FRAME_RATE;
748 if ((clkrc & CLK_EXT) == 0 && (clkrc & CLK_SCALE) > 1)
749 cp->timeperframe.denominator /= (clkrc & CLK_SCALE);
750 return 0;
751}
752
753static int ov7670_s_parm(struct i2c_client *c, struct v4l2_streamparm *parms)
754{
755 struct v4l2_captureparm *cp = &parms->parm.capture;
756 struct v4l2_fract *tpf = &cp->timeperframe;
757 unsigned char clkrc;
758 int ret, div;
759
760 if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
761 return -EINVAL;
762 if (cp->extendedmode != 0)
763 return -EINVAL;
764 /*
765 * CLKRC has a reserved bit, so let's preserve it.
766 */
767 ret = ov7670_read(c, REG_CLKRC, &clkrc);
768 if (ret < 0)
769 return ret;
770 if (tpf->numerator == 0 || tpf->denominator == 0)
771 div = 1; /* Reset to full rate */
772 else
773 div = (tpf->numerator*OV7670_FRAME_RATE)/tpf->denominator;
774 if (div == 0)
775 div = 1;
776 else if (div > CLK_SCALE)
777 div = CLK_SCALE;
778 clkrc = (clkrc & 0x80) | div;
779 tpf->numerator = 1;
780 tpf->denominator = OV7670_FRAME_RATE/div;
781 return ov7670_write(c, REG_CLKRC, clkrc);
782}
783
784
785
786/*
787 * Code for dealing with controls.
788 */
789
790
791
792
793
794static int ov7670_store_cmatrix(struct i2c_client *client,
795 int matrix[CMATRIX_LEN])
796{
797 int i, ret;
798 unsigned char signbits;
799
800 /*
801 * Weird crap seems to exist in the upper part of
802 * the sign bits register, so let's preserve it.
803 */
804 ret = ov7670_read(client, REG_CMATRIX_SIGN, &signbits);
805 signbits &= 0xc0;
806
807 for (i = 0; i < CMATRIX_LEN; i++) {
808 unsigned char raw;
809
810 if (matrix[i] < 0) {
811 signbits |= (1 << i);
812 if (matrix[i] < -255)
813 raw = 0xff;
814 else
815 raw = (-1 * matrix[i]) & 0xff;
816 }
817 else {
818 if (matrix[i] > 255)
819 raw = 0xff;
820 else
821 raw = matrix[i] & 0xff;
822 }
823 ret += ov7670_write(client, REG_CMATRIX_BASE + i, raw);
824 }
825 ret += ov7670_write(client, REG_CMATRIX_SIGN, signbits);
826 return ret;
827}
828
829
830/*
831 * Hue also requires messing with the color matrix. It also requires
832 * trig functions, which tend not to be well supported in the kernel.
833 * So here is a simple table of sine values, 0-90 degrees, in steps
834 * of five degrees. Values are multiplied by 1000.
835 *
836 * The following naive approximate trig functions require an argument
837 * carefully limited to -180 <= theta <= 180.
838 */
839#define SIN_STEP 5
840static const int ov7670_sin_table[] = {
841 0, 87, 173, 258, 342, 422,
842 499, 573, 642, 707, 766, 819,
843 866, 906, 939, 965, 984, 996,
844 1000
845};
846
847static int ov7670_sine(int theta)
848{
849 int chs = 1;
850 int sine;
851
852 if (theta < 0) {
853 theta = -theta;
854 chs = -1;
855 }
856 if (theta <= 90)
857 sine = ov7670_sin_table[theta/SIN_STEP];
858 else {
859 theta -= 90;
860 sine = 1000 - ov7670_sin_table[theta/SIN_STEP];
861 }
862 return sine*chs;
863}
864
865static int ov7670_cosine(int theta)
866{
867 theta = 90 - theta;
868 if (theta > 180)
869 theta -= 360;
870 else if (theta < -180)
871 theta += 360;
872 return ov7670_sine(theta);
873}
874
875
876
877
878static void ov7670_calc_cmatrix(struct ov7670_info *info,
879 int matrix[CMATRIX_LEN])
880{
881 int i;
882 /*
883 * Apply the current saturation setting first.
884 */
885 for (i = 0; i < CMATRIX_LEN; i++)
886 matrix[i] = (info->fmt->cmatrix[i]*info->sat) >> 7;
887 /*
888 * Then, if need be, rotate the hue value.
889 */
890 if (info->hue != 0) {
891 int sinth, costh, tmpmatrix[CMATRIX_LEN];
892
893 memcpy(tmpmatrix, matrix, CMATRIX_LEN*sizeof(int));
894 sinth = ov7670_sine(info->hue);
895 costh = ov7670_cosine(info->hue);
896
897 matrix[0] = (matrix[3]*sinth + matrix[0]*costh)/1000;
898 matrix[1] = (matrix[4]*sinth + matrix[1]*costh)/1000;
899 matrix[2] = (matrix[5]*sinth + matrix[2]*costh)/1000;
900 matrix[3] = (matrix[3]*costh - matrix[0]*sinth)/1000;
901 matrix[4] = (matrix[4]*costh - matrix[1]*sinth)/1000;
902 matrix[5] = (matrix[5]*costh - matrix[2]*sinth)/1000;
903 }
904}
905
906
907
908static int ov7670_t_sat(struct i2c_client *client, int value)
909{
910 struct ov7670_info *info = i2c_get_clientdata(client);
911 int matrix[CMATRIX_LEN];
912 int ret;
913
914 info->sat = value;
915 ov7670_calc_cmatrix(info, matrix);
916 ret = ov7670_store_cmatrix(client, matrix);
917 return ret;
918}
919
920static int ov7670_q_sat(struct i2c_client *client, __s32 *value)
921{
922 struct ov7670_info *info = i2c_get_clientdata(client);
923
924 *value = info->sat;
925 return 0;
926}
927
928static int ov7670_t_hue(struct i2c_client *client, int value)
929{
930 struct ov7670_info *info = i2c_get_clientdata(client);
931 int matrix[CMATRIX_LEN];
932 int ret;
933
934 if (value < -180 || value > 180)
935 return -EINVAL;
936 info->hue = value;
937 ov7670_calc_cmatrix(info, matrix);
938 ret = ov7670_store_cmatrix(client, matrix);
939 return ret;
940}
941
942
943static int ov7670_q_hue(struct i2c_client *client, __s32 *value)
944{
945 struct ov7670_info *info = i2c_get_clientdata(client);
946
947 *value = info->hue;
948 return 0;
949}
950
951
952/*
953 * Some weird registers seem to store values in a sign/magnitude format!
954 */
955static unsigned char ov7670_sm_to_abs(unsigned char v)
956{
957 if ((v & 0x80) == 0)
958 return v + 128;
959 else
960 return 128 - (v & 0x7f);
961}
962
963
964static unsigned char ov7670_abs_to_sm(unsigned char v)
965{
966 if (v > 127)
967 return v & 0x7f;
968 else
969 return (128 - v) | 0x80;
970}
971
972static int ov7670_t_brightness(struct i2c_client *client, int value)
973{
974 unsigned char com8, v;
975 int ret;
976
977 ov7670_read(client, REG_COM8, &com8);
978 com8 &= ~COM8_AEC;
979 ov7670_write(client, REG_COM8, com8);
980 v = ov7670_abs_to_sm(value);
981 ret = ov7670_write(client, REG_BRIGHT, v);
982 return ret;
983}
984
985static int ov7670_q_brightness(struct i2c_client *client, __s32 *value)
986{
987 unsigned char v;
988 int ret = ov7670_read(client, REG_BRIGHT, &v);
989
990 *value = ov7670_sm_to_abs(v);
991 return ret;
992}
993
994static int ov7670_t_contrast(struct i2c_client *client, int value)
995{
996 return ov7670_write(client, REG_CONTRAS, (unsigned char) value);
997}
998
999static int ov7670_q_contrast(struct i2c_client *client, __s32 *value)
1000{
1001 unsigned char v;
1002 int ret = ov7670_read(client, REG_CONTRAS, &v);
1003
1004 *value = v;
1005 return ret;
1006}
1007
1008static int ov7670_q_hflip(struct i2c_client *client, __s32 *value)
1009{
1010 int ret;
1011 unsigned char v;
1012
1013 ret = ov7670_read(client, REG_MVFP, &v);
1014 *value = (v & MVFP_MIRROR) == MVFP_MIRROR;
1015 return ret;
1016}
1017
1018
1019static int ov7670_t_hflip(struct i2c_client *client, int value)
1020{
1021 unsigned char v;
1022 int ret;
1023
1024 ret = ov7670_read(client, REG_MVFP, &v);
1025 if (value)
1026 v |= MVFP_MIRROR;
1027 else
1028 v &= ~MVFP_MIRROR;
1029 msleep(10); /* FIXME */
1030 ret += ov7670_write(client, REG_MVFP, v);
1031 return ret;
1032}
1033
1034
1035
1036static int ov7670_q_vflip(struct i2c_client *client, __s32 *value)
1037{
1038 int ret;
1039 unsigned char v;
1040
1041 ret = ov7670_read(client, REG_MVFP, &v);
1042 *value = (v & MVFP_FLIP) == MVFP_FLIP;
1043 return ret;
1044}
1045
1046
1047static int ov7670_t_vflip(struct i2c_client *client, int value)
1048{
1049 unsigned char v;
1050 int ret;
1051
1052 ret = ov7670_read(client, REG_MVFP, &v);
1053 if (value)
1054 v |= MVFP_FLIP;
1055 else
1056 v &= ~MVFP_FLIP;
1057 msleep(10); /* FIXME */
1058 ret += ov7670_write(client, REG_MVFP, v);
1059 return ret;
1060}
1061
1062
1063static struct ov7670_control {
1064 struct v4l2_queryctrl qc;
1065 int (*query)(struct i2c_client *c, __s32 *value);
1066 int (*tweak)(struct i2c_client *c, int value);
1067} ov7670_controls[] =
1068{
1069 {
1070 .qc = {
1071 .id = V4L2_CID_BRIGHTNESS,
1072 .type = V4L2_CTRL_TYPE_INTEGER,
1073 .name = "Brightness",
1074 .minimum = 0,
1075 .maximum = 255,
1076 .step = 1,
1077 .default_value = 0x80,
1078 .flags = V4L2_CTRL_FLAG_SLIDER
1079 },
1080 .tweak = ov7670_t_brightness,
1081 .query = ov7670_q_brightness,
1082 },
1083 {
1084 .qc = {
1085 .id = V4L2_CID_CONTRAST,
1086 .type = V4L2_CTRL_TYPE_INTEGER,
1087 .name = "Contrast",
1088 .minimum = 0,
1089 .maximum = 127,
1090 .step = 1,
1091 .default_value = 0x40, /* XXX ov7670 spec */
1092 .flags = V4L2_CTRL_FLAG_SLIDER
1093 },
1094 .tweak = ov7670_t_contrast,
1095 .query = ov7670_q_contrast,
1096 },
1097 {
1098 .qc = {
1099 .id = V4L2_CID_SATURATION,
1100 .type = V4L2_CTRL_TYPE_INTEGER,
1101 .name = "Saturation",
1102 .minimum = 0,
1103 .maximum = 256,
1104 .step = 1,
1105 .default_value = 0x80,
1106 .flags = V4L2_CTRL_FLAG_SLIDER
1107 },
1108 .tweak = ov7670_t_sat,
1109 .query = ov7670_q_sat,
1110 },
1111 {
1112 .qc = {
1113 .id = V4L2_CID_HUE,
1114 .type = V4L2_CTRL_TYPE_INTEGER,
1115 .name = "HUE",
1116 .minimum = -180,
1117 .maximum = 180,
1118 .step = 5,
1119 .default_value = 0,
1120 .flags = V4L2_CTRL_FLAG_SLIDER
1121 },
1122 .tweak = ov7670_t_hue,
1123 .query = ov7670_q_hue,
1124 },
1125 {
1126 .qc = {
1127 .id = V4L2_CID_VFLIP,
1128 .type = V4L2_CTRL_TYPE_BOOLEAN,
1129 .name = "Vertical flip",
1130 .minimum = 0,
1131 .maximum = 1,
1132 .step = 1,
1133 .default_value = 0,
1134 },
1135 .tweak = ov7670_t_vflip,
1136 .query = ov7670_q_vflip,
1137 },
1138 {
1139 .qc = {
1140 .id = V4L2_CID_HFLIP,
1141 .type = V4L2_CTRL_TYPE_BOOLEAN,
1142 .name = "Horizontal mirror",
1143 .minimum = 0,
1144 .maximum = 1,
1145 .step = 1,
1146 .default_value = 0,
1147 },
1148 .tweak = ov7670_t_hflip,
1149 .query = ov7670_q_hflip,
1150 },
1151};
1152#define N_CONTROLS (sizeof(ov7670_controls)/sizeof(ov7670_controls[0]))
1153
1154static struct ov7670_control *ov7670_find_control(__u32 id)
1155{
1156 int i;
1157
1158 for (i = 0; i < N_CONTROLS; i++)
1159 if (ov7670_controls[i].qc.id == id)
1160 return ov7670_controls + i;
1161 return NULL;
1162}
1163
1164
1165static int ov7670_queryctrl(struct i2c_client *client,
1166 struct v4l2_queryctrl *qc)
1167{
1168 struct ov7670_control *ctrl = ov7670_find_control(qc->id);
1169
1170 if (ctrl == NULL)
1171 return -EINVAL;
1172 *qc = ctrl->qc;
1173 return 0;
1174}
1175
1176static int ov7670_g_ctrl(struct i2c_client *client, struct v4l2_control *ctrl)
1177{
1178 struct ov7670_control *octrl = ov7670_find_control(ctrl->id);
1179 int ret;
1180
1181 if (octrl == NULL)
1182 return -EINVAL;
1183 ret = octrl->query(client, &ctrl->value);
1184 if (ret >= 0)
1185 return 0;
1186 return ret;
1187}
1188
1189static int ov7670_s_ctrl(struct i2c_client *client, struct v4l2_control *ctrl)
1190{
1191 struct ov7670_control *octrl = ov7670_find_control(ctrl->id);
1192 int ret;
1193
1194 if (octrl == NULL)
1195 return -EINVAL;
1196 ret = octrl->tweak(client, ctrl->value);
1197 if (ret >= 0)
1198 return 0;
1199 return ret;
1200}
1201
1202
1203
1204
1205
1206
1207/*
1208 * Basic i2c stuff.
1209 */
1210static struct i2c_driver ov7670_driver;
1211
1212static int ov7670_attach(struct i2c_adapter *adapter)
1213{
1214 int ret;
1215 struct i2c_client *client;
1216 struct ov7670_info *info;
1217
1218 /*
1219 * For now: only deal with adapters we recognize.
1220 */
1221 if (adapter->id != I2C_HW_SMBUS_CAFE)
1222 return -ENODEV;
1223
1224 client = kzalloc(sizeof (struct i2c_client), GFP_KERNEL);
1225 if (! client)
1226 return -ENOMEM;
1227 client->adapter = adapter;
1228 client->addr = OV7670_I2C_ADDR;
1229 client->driver = &ov7670_driver,
1230 strcpy(client->name, "OV7670");
1231 /*
1232 * Set up our info structure.
1233 */
1234 info = kzalloc(sizeof (struct ov7670_info), GFP_KERNEL);
1235 if (! info) {
1236 ret = -ENOMEM;
1237 goto out_free;
1238 }
1239 info->fmt = &ov7670_formats[0];
1240 info->sat = 128; /* Review this */
1241 i2c_set_clientdata(client, info);
1242
1243 /*
1244 * Make sure it's an ov7670
1245 */
1246 ret = ov7670_detect(client);
1247 if (ret)
1248 goto out_free_info;
1249 i2c_attach_client(client);
1250 return 0;
1251
1252 out_free_info:
1253 kfree(info);
1254 out_free:
1255 kfree(client);
1256 return ret;
1257}
1258
1259
1260static int ov7670_detach(struct i2c_client *client)
1261{
1262 i2c_detach_client(client);
1263 kfree(i2c_get_clientdata(client));
1264 kfree(client);
1265 return 0;
1266}
1267
1268
1269static int ov7670_command(struct i2c_client *client, unsigned int cmd,
1270 void *arg)
1271{
1272 switch (cmd) {
1273 case VIDIOC_INT_G_CHIP_IDENT:
1274 * (enum v4l2_chip_ident *) arg = V4L2_IDENT_OV7670;
1275 return 0;
1276
1277 case VIDIOC_INT_RESET:
1278 ov7670_reset(client);
1279 return 0;
1280
1281 case VIDIOC_INT_INIT:
1282 return ov7670_init(client);
1283
1284 case VIDIOC_ENUM_FMT:
1285 return ov7670_enum_fmt(client, (struct v4l2_fmtdesc *) arg);
1286 case VIDIOC_TRY_FMT:
1287 return ov7670_try_fmt(client, (struct v4l2_format *) arg, NULL, NULL);
1288 case VIDIOC_S_FMT:
1289 return ov7670_s_fmt(client, (struct v4l2_format *) arg);
1290 case VIDIOC_QUERYCTRL:
1291 return ov7670_queryctrl(client, (struct v4l2_queryctrl *) arg);
1292 case VIDIOC_S_CTRL:
1293 return ov7670_s_ctrl(client, (struct v4l2_control *) arg);
1294 case VIDIOC_G_CTRL:
1295 return ov7670_g_ctrl(client, (struct v4l2_control *) arg);
1296 case VIDIOC_S_PARM:
1297 return ov7670_s_parm(client, (struct v4l2_streamparm *) arg);
1298 case VIDIOC_G_PARM:
1299 return ov7670_g_parm(client, (struct v4l2_streamparm *) arg);
1300 }
1301 return -EINVAL;
1302}
1303
1304
1305
1306static struct i2c_driver ov7670_driver = {
1307 .driver = {
1308 .name = "ov7670",
1309 },
1310 .id = I2C_DRIVERID_OV7670,
1311 .class = I2C_CLASS_CAM_DIGITAL,
1312 .attach_adapter = ov7670_attach,
1313 .detach_client = ov7670_detach,
1314 .command = ov7670_command,
1315};
1316
1317
1318/*
1319 * Module initialization
1320 */
1321static int __init ov7670_mod_init(void)
1322{
1323 printk(KERN_NOTICE "OmniVision ov7670 sensor driver, at your service\n");
1324 return i2c_add_driver(&ov7670_driver);
1325}
1326
1327static void __exit ov7670_mod_exit(void)
1328{
1329 i2c_del_driver(&ov7670_driver);
1330}
1331
1332module_init(ov7670_mod_init);
1333module_exit(ov7670_mod_exit);
diff --git a/drivers/media/video/planb.c b/drivers/media/video/planb.c
index 368d6e219fa4..86d2884e16c6 100644
--- a/drivers/media/video/planb.c
+++ b/drivers/media/video/planb.c
@@ -138,7 +138,7 @@ static int grabbuf_alloc(struct planb *pb)
138 + MAX_LNUM 138 + MAX_LNUM
139#endif /* PLANB_GSCANLINE */ 139#endif /* PLANB_GSCANLINE */
140 ); 140 );
141 if ((pb->rawbuf = (unsigned char**) kmalloc (npage 141 if ((pb->rawbuf = kmalloc(npage
142 * sizeof(unsigned long), GFP_KERNEL)) == 0) 142 * sizeof(unsigned long), GFP_KERNEL)) == 0)
143 return -ENOMEM; 143 return -ENOMEM;
144 for (i = 0; i < npage; i++) { 144 for (i = 0; i < npage; i++) {
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
index c80c26be6e4d..848fb233d808 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -260,6 +260,22 @@ int pvr2_i2c_cx2584x_v4l_setup(struct pvr2_hdw *hdw,
260 sizeof(decoder_ops[0]))) - 1; 260 sizeof(decoder_ops[0]))) - 1;
261 hdw->decoder_ctrl = &ctxt->ctrl; 261 hdw->decoder_ctrl = &ctxt->ctrl;
262 cp->handler = &ctxt->handler; 262 cp->handler = &ctxt->handler;
263 {
264 /*
265 Mike Isely <isely@pobox.com> 19-Nov-2006 - This bit
266 of nuttiness for cx25840 causes that module to
267 correctly set up its video scaling. This is really
268 a problem in the cx25840 module itself, but we work
269 around it here. The problem has not been seen in
270 ivtv because there VBI is supported and set up. We
271 don't do VBI here (at least not yet) and thus we
272 never attempted to even set it up.
273 */
274 struct v4l2_format fmt;
275 memset(&fmt,0,sizeof(fmt));
276 fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
277 pvr2_i2c_client_cmd(ctxt->client,VIDIOC_S_FMT,&fmt);
278 }
263 pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x cx2584x V4L2 handler set up", 279 pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x cx2584x V4L2 handler set up",
264 cp->client->addr); 280 cp->client->addr);
265 return !0; 281 return !0;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 1f787333d18c..d2004965187b 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -356,28 +356,6 @@ static int ctrl_freq_set(struct pvr2_ctrl *cptr,int m,int v)
356 return 0; 356 return 0;
357} 357}
358 358
359static int ctrl_hres_max_get(struct pvr2_ctrl *cptr,int *vp)
360{
361 /* If we're dealing with a 24xxx device, force the horizontal
362 maximum to be 720 no matter what, since we can't get the device
363 to work properly with any other value. Otherwise just return
364 the normal value. */
365 *vp = cptr->info->def.type_int.max_value;
366 if (cptr->hdw->hdw_type == PVR2_HDW_TYPE_24XXX) *vp = 720;
367 return 0;
368}
369
370static int ctrl_hres_min_get(struct pvr2_ctrl *cptr,int *vp)
371{
372 /* If we're dealing with a 24xxx device, force the horizontal
373 minimum to be 720 no matter what, since we can't get the device
374 to work properly with any other value. Otherwise just return
375 the normal value. */
376 *vp = cptr->info->def.type_int.min_value;
377 if (cptr->hdw->hdw_type == PVR2_HDW_TYPE_24XXX) *vp = 720;
378 return 0;
379}
380
381static int ctrl_vres_max_get(struct pvr2_ctrl *cptr,int *vp) 359static int ctrl_vres_max_get(struct pvr2_ctrl *cptr,int *vp)
382{ 360{
383 /* Actual maximum depends on the video standard in effect. */ 361 /* Actual maximum depends on the video standard in effect. */
@@ -758,10 +736,6 @@ static const struct pvr2_ctl_info control_defs[] = {
758 .default_value = 720, 736 .default_value = 720,
759 DEFREF(res_hor), 737 DEFREF(res_hor),
760 DEFINT(19,720), 738 DEFINT(19,720),
761 /* Hook in check for clamp on horizontal resolution in
762 order to avoid unsolved problem involving cx25840. */
763 .get_max_value = ctrl_hres_max_get,
764 .get_min_value = ctrl_hres_min_get,
765 },{ 739 },{
766 .desc = "Vertical capture resolution", 740 .desc = "Vertical capture resolution",
767 .name = "resolution_ver", 741 .name = "resolution_ver",
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
index 3b9012f8e380..f9bb41d8f4f3 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -185,6 +185,79 @@ static int pvr2_i2c_basic_op(struct pvr2_hdw *hdw,
185 } 185 }
186} 186}
187 187
188
189/* This is a special entry point for cases of I2C transaction attempts to
190 the IR receiver. The implementation here simulates the IR receiver by
191 issuing a command to the FX2 firmware and using that response to return
192 what the real I2C receiver would have returned. We use this for 24xxx
193 devices, where the IR receiver chip has been removed and replaced with
194 FX2 related logic. */
195static int i2c_24xxx_ir(struct pvr2_hdw *hdw,
196 u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen)
197{
198 u8 dat[4];
199 unsigned int stat;
200
201 if (!(rlen || wlen)) {
202 /* This is a probe attempt. Just let it succeed. */
203 return 0;
204 }
205
206 /* We don't understand this kind of transaction */
207 if ((wlen != 0) || (rlen == 0)) return -EIO;
208
209 if (rlen < 3) {
210 /* Mike Isely <isely@pobox.com> Appears to be a probe
211 attempt from lirc. Just fill in zeroes and return. If
212 we try instead to do the full transaction here, then bad
213 things seem to happen within the lirc driver module
214 (version 0.8.0-7 sources from Debian, when run under
215 vanilla 2.6.17.6 kernel) - and I don't have the patience
216 to chase it down. */
217 if (rlen > 0) rdata[0] = 0;
218 if (rlen > 1) rdata[1] = 0;
219 return 0;
220 }
221
222 /* Issue a command to the FX2 to read the IR receiver. */
223 LOCK_TAKE(hdw->ctl_lock); do {
224 hdw->cmd_buffer[0] = 0xec;
225 stat = pvr2_send_request(hdw,
226 hdw->cmd_buffer,1,
227 hdw->cmd_buffer,4);
228 dat[0] = hdw->cmd_buffer[0];
229 dat[1] = hdw->cmd_buffer[1];
230 dat[2] = hdw->cmd_buffer[2];
231 dat[3] = hdw->cmd_buffer[3];
232 } while (0); LOCK_GIVE(hdw->ctl_lock);
233
234 /* Give up if that operation failed. */
235 if (stat != 0) return stat;
236
237 /* Mangle the results into something that looks like the real IR
238 receiver. */
239 rdata[2] = 0xc1;
240 if (dat[0] != 1) {
241 /* No code received. */
242 rdata[0] = 0;
243 rdata[1] = 0;
244 } else {
245 u16 val;
246 /* Mash the FX2 firmware-provided IR code into something
247 that the normal i2c chip-level driver expects. */
248 val = dat[1];
249 val <<= 8;
250 val |= dat[2];
251 val >>= 1;
252 val &= ~0x0003;
253 val |= 0x8000;
254 rdata[0] = (val >> 8) & 0xffu;
255 rdata[1] = val & 0xffu;
256 }
257
258 return 0;
259}
260
188/* This is a special entry point that is entered if an I2C operation is 261/* This is a special entry point that is entered if an I2C operation is
189 attempted to a wm8775 chip on model 24xxx hardware. Autodetect of this 262 attempted to a wm8775 chip on model 24xxx hardware. Autodetect of this
190 part doesn't work, but we know it is really there. So let's look for 263 part doesn't work, but we know it is really there. So let's look for
@@ -887,17 +960,17 @@ void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
887{ 960{
888 unsigned int idx; 961 unsigned int idx;
889 962
890 // The default action for all possible I2C addresses is just to do 963 /* The default action for all possible I2C addresses is just to do
891 // the transfer normally. 964 the transfer normally. */
892 for (idx = 0; idx < PVR2_I2C_FUNC_CNT; idx++) { 965 for (idx = 0; idx < PVR2_I2C_FUNC_CNT; idx++) {
893 hdw->i2c_func[idx] = pvr2_i2c_basic_op; 966 hdw->i2c_func[idx] = pvr2_i2c_basic_op;
894 } 967 }
895 968
896 // If however we're dealing with new hardware, insert some hacks in 969 /* However, deal with various special cases for 24xxx hardware. */
897 // the I2C transfer stack to let things work better.
898 if (hdw->hdw_type == PVR2_HDW_TYPE_24XXX) { 970 if (hdw->hdw_type == PVR2_HDW_TYPE_24XXX) {
899 hdw->i2c_func[0x1b] = i2c_hack_wm8775; 971 hdw->i2c_func[0x1b] = i2c_hack_wm8775;
900 hdw->i2c_func[0x44] = i2c_hack_cx25840; 972 hdw->i2c_func[0x44] = i2c_hack_cx25840;
973 hdw->i2c_func[0x18] = i2c_24xxx_ir;
901 } 974 }
902 975
903 // Configure the adapter and set up everything else related to it. 976 // Configure the adapter and set up everything else related to it.
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index f28398dd9d93..c2374ed7ba9f 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -851,7 +851,7 @@ static int saa711x_set_size(struct i2c_client *client, int width, int height)
851 851
852 /* On 60Hz, it is using a higher Vertical Output Size */ 852 /* On 60Hz, it is using a higher Vertical Output Size */
853 if (!is_50hz) 853 if (!is_50hz)
854 res+=(VRES_60HZ-480)>>1; 854 res += (VRES_60HZ - 480) >> 1;
855 855
856 /* height */ 856 /* height */
857 saa711x_write(client, R_CE_B_VERT_OUTPUT_WINDOW_LENGTH, 857 saa711x_write(client, R_CE_B_VERT_OUTPUT_WINDOW_LENGTH,
@@ -907,7 +907,7 @@ static int saa711x_set_size(struct i2c_client *client, int width, int height)
907 907
908 /* Activates task "B" */ 908 /* Activates task "B" */
909 saa711x_write(client, R_80_GLOBAL_CNTL_1, 909 saa711x_write(client, R_80_GLOBAL_CNTL_1,
910 saa711x_read(client,R_80_GLOBAL_CNTL_1)|0x20); 910 saa711x_read(client,R_80_GLOBAL_CNTL_1) | 0x20);
911 911
912 return 0; 912 return 0;
913} 913}
@@ -932,11 +932,11 @@ static void saa711x_set_v4lstd(struct i2c_client *client, v4l2_std_id std)
932 if (std & V4L2_STD_525_60) { 932 if (std & V4L2_STD_525_60) {
933 v4l_dbg(1, debug, client, "decoder set standard 60 Hz\n"); 933 v4l_dbg(1, debug, client, "decoder set standard 60 Hz\n");
934 saa711x_writeregs(client, saa7115_cfg_60hz_video); 934 saa711x_writeregs(client, saa7115_cfg_60hz_video);
935 saa711x_set_size(client,720,480); 935 saa711x_set_size(client, 720, 480);
936 } else { 936 } else {
937 v4l_dbg(1, debug, client, "decoder set standard 50 Hz\n"); 937 v4l_dbg(1, debug, client, "decoder set standard 50 Hz\n");
938 saa711x_writeregs(client, saa7115_cfg_50hz_video); 938 saa711x_writeregs(client, saa7115_cfg_50hz_video);
939 saa711x_set_size(client,720,576); 939 saa711x_set_size(client, 720, 576);
940 } 940 }
941 941
942 /* Register 0E - Bits D6-D4 on NO-AUTO mode 942 /* Register 0E - Bits D6-D4 on NO-AUTO mode
@@ -1464,13 +1464,13 @@ static int saa711x_attach(struct i2c_adapter *adapter, int address, int kind)
1464 client->driver = &i2c_driver_saa711x; 1464 client->driver = &i2c_driver_saa711x;
1465 snprintf(client->name, sizeof(client->name) - 1, "saa7115"); 1465 snprintf(client->name, sizeof(client->name) - 1, "saa7115");
1466 1466
1467 for (i=0;i<0x0f;i++) { 1467 for (i = 0; i < 0x0f; i++) {
1468 saa711x_write(client, 0, i); 1468 saa711x_write(client, 0, i);
1469 name[i] = (saa711x_read(client, 0) &0x0f) +'0'; 1469 name[i] = (saa711x_read(client, 0) & 0x0f) + '0';
1470 if (name[i]>'9') 1470 if (name[i] > '9')
1471 name[i]+='a'-'9'-1; 1471 name[i] += 'a' - '9' - 1;
1472 } 1472 }
1473 name[i]='\0'; 1473 name[i] = '\0';
1474 1474
1475 saa711x_write(client, 0, 5); 1475 saa711x_write(client, 0, 5);
1476 chip_id = saa711x_read(client, 0) & 0x0f; 1476 chip_id = saa711x_read(client, 0) & 0x0f;
diff --git a/drivers/media/video/saa7134/saa7134-alsa.c b/drivers/media/video/saa7134/saa7134-alsa.c
index 4abf5c03a740..ffb0f647a86d 100644
--- a/drivers/media/video/saa7134/saa7134-alsa.c
+++ b/drivers/media/video/saa7134/saa7134-alsa.c
@@ -1,10 +1,6 @@
1/* 1/*
2 * SAA713x ALSA support for V4L 2 * SAA713x ALSA support for V4L
3 * 3 *
4 *
5 * Caveats:
6 * - Volume doesn't work (it's always at max)
7 *
8 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation, version 2 6 * the Free Software Foundation, version 2
@@ -614,13 +610,18 @@ static int snd_card_saa7134_capture_open(struct snd_pcm_substream * substream)
614 snd_card_saa7134_pcm_t *pcm; 610 snd_card_saa7134_pcm_t *pcm;
615 snd_card_saa7134_t *saa7134 = snd_pcm_substream_chip(substream); 611 snd_card_saa7134_t *saa7134 = snd_pcm_substream_chip(substream);
616 struct saa7134_dev *dev = saa7134->dev; 612 struct saa7134_dev *dev = saa7134->dev;
617 int err; 613 int amux, err;
618 614
619 mutex_lock(&dev->dmasound.lock); 615 mutex_lock(&dev->dmasound.lock);
620 616
621 dev->dmasound.read_count = 0; 617 dev->dmasound.read_count = 0;
622 dev->dmasound.read_offset = 0; 618 dev->dmasound.read_offset = 0;
623 619
620 amux = dev->input->amux;
621 if ((amux < 1) || (amux > 3))
622 amux = 1;
623 dev->dmasound.input = amux - 1;
624
624 mutex_unlock(&dev->dmasound.lock); 625 mutex_unlock(&dev->dmasound.lock);
625 626
626 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); 627 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
@@ -713,6 +714,8 @@ static int snd_saa7134_volume_put(struct snd_kcontrol * kcontrol,
713 struct snd_ctl_elem_value * ucontrol) 714 struct snd_ctl_elem_value * ucontrol)
714{ 715{
715 snd_card_saa7134_t *chip = snd_kcontrol_chip(kcontrol); 716 snd_card_saa7134_t *chip = snd_kcontrol_chip(kcontrol);
717 struct saa7134_dev *dev = chip->dev;
718
716 int change, addr = kcontrol->private_value; 719 int change, addr = kcontrol->private_value;
717 int left, right; 720 int left, right;
718 721
@@ -727,10 +730,52 @@ static int snd_saa7134_volume_put(struct snd_kcontrol * kcontrol,
727 if (right > 20) 730 if (right > 20)
728 right = 20; 731 right = 20;
729 spin_lock_irq(&chip->mixer_lock); 732 spin_lock_irq(&chip->mixer_lock);
730 change = chip->mixer_volume[addr][0] != left || 733 change = 0;
731 chip->mixer_volume[addr][1] != right; 734 if (chip->mixer_volume[addr][0] != left) {
732 chip->mixer_volume[addr][0] = left; 735 change = 1;
733 chip->mixer_volume[addr][1] = right; 736 right = left;
737 }
738 if (chip->mixer_volume[addr][1] != right) {
739 change = 1;
740 left = right;
741 }
742 if (change) {
743 switch (dev->pci->device) {
744 case PCI_DEVICE_ID_PHILIPS_SAA7134:
745 switch (addr) {
746 case MIXER_ADDR_TVTUNER:
747 left = 20;
748 break;
749 case MIXER_ADDR_LINE1:
750 saa_andorb(SAA7134_ANALOG_IO_SELECT, 0x10,
751 (left > 10) ? 0x00 : 0x10);
752 break;
753 case MIXER_ADDR_LINE2:
754 saa_andorb(SAA7134_ANALOG_IO_SELECT, 0x20,
755 (left > 10) ? 0x00 : 0x20);
756 break;
757 }
758 break;
759 case PCI_DEVICE_ID_PHILIPS_SAA7133:
760 case PCI_DEVICE_ID_PHILIPS_SAA7135:
761 switch (addr) {
762 case MIXER_ADDR_TVTUNER:
763 left = 20;
764 break;
765 case MIXER_ADDR_LINE1:
766 saa_andorb(0x0594, 0x10,
767 (left > 10) ? 0x00 : 0x10);
768 break;
769 case MIXER_ADDR_LINE2:
770 saa_andorb(0x0594, 0x20,
771 (left > 10) ? 0x00 : 0x20);
772 break;
773 }
774 break;
775 }
776 chip->mixer_volume[addr][0] = left;
777 chip->mixer_volume[addr][1] = right;
778 }
734 spin_unlock_irq(&chip->mixer_lock); 779 spin_unlock_irq(&chip->mixer_lock);
735 return change; 780 return change;
736} 781}
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 51f0cfdcb680..4dead84aff46 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -2462,14 +2462,17 @@ struct saa7134_board saa7134_boards[] = {
2462 .vmux = 1, 2462 .vmux = 1,
2463 .amux = TV, 2463 .amux = TV,
2464 .tv = 1, 2464 .tv = 1,
2465 .gpio = 0x0000000,
2465 },{ 2466 },{
2466 .name = name_comp1, 2467 .name = name_comp1,
2467 .vmux = 3, 2468 .vmux = 3,
2468 .amux = LINE2, 2469 .amux = LINE2,
2470 .gpio = 0x0200000,
2469 },{ 2471 },{
2470 .name = name_svideo, 2472 .name = name_svideo,
2471 .vmux = 8, 2473 .vmux = 8,
2472 .amux = LINE2, 2474 .amux = LINE2,
2475 .gpio = 0x0200000,
2473 }}, 2476 }},
2474 .radio = { 2477 .radio = {
2475 .name = name_radio, 2478 .name = name_radio,
@@ -3022,6 +3025,158 @@ struct saa7134_board saa7134_boards[] = {
3022 .amux = LINE1, 3025 .amux = LINE1,
3023 }, 3026 },
3024 }, 3027 },
3028 [SAA7134_BOARD_PINNACLE_PCTV_310i] = {
3029 .name = "Pinnacle PCTV 310i",
3030 .audio_clock = 0x00187de7,
3031 .tuner_type = TUNER_PHILIPS_TDA8290,
3032 .radio_type = UNSET,
3033 .tuner_addr = ADDR_UNSET,
3034 .radio_addr = ADDR_UNSET,
3035 .mpeg = SAA7134_MPEG_DVB,
3036 .gpiomask = 0x000200000,
3037 .inputs = {{
3038 .name = name_tv,
3039 .vmux = 4,
3040 .amux = TV,
3041 .tv = 1,
3042 },{
3043 .name = name_comp1,
3044 .vmux = 1,
3045 .amux = LINE2,
3046 },{
3047 .name = name_comp2,
3048 .vmux = 0,
3049 .amux = LINE2,
3050 },{
3051 .name = name_svideo,
3052 .vmux = 8,
3053 .amux = LINE2,
3054 }},
3055 .radio = {
3056 .name = name_radio,
3057 .amux = TV,
3058 .gpio = 0x0200000,
3059 },
3060 },
3061 [SAA7134_BOARD_AVERMEDIA_STUDIO_507] = {
3062 /* Mikhail Fedotov <mo_fedotov@mail.ru> */
3063 .name = "Avermedia AVerTV Studio 507",
3064 .audio_clock = 0x00187de7,
3065 .tuner_type = TUNER_PHILIPS_FM1256_IH3,
3066 .radio_type = UNSET,
3067 .tuner_addr = ADDR_UNSET,
3068 .radio_addr = ADDR_UNSET,
3069 .tda9887_conf = TDA9887_PRESENT,
3070 .gpiomask = 0x03,
3071 .inputs = {{
3072 .name = name_tv,
3073 .vmux = 1,
3074 .amux = TV,
3075 .tv = 1,
3076 .gpio = 0x00,
3077 },{
3078 .name = name_comp1,
3079 .vmux = 0,
3080 .amux = LINE2,
3081 .gpio = 0x00,
3082 },{
3083 .name = name_comp2,
3084 .vmux = 3,
3085 .amux = LINE2,
3086 .gpio = 0x00,
3087 },{
3088 .name = name_svideo,
3089 .vmux = 8,
3090 .amux = LINE2,
3091 .gpio = 0x00,
3092 }},
3093 .radio = {
3094 .name = name_radio,
3095 .amux = LINE2,
3096 .gpio = 0x01,
3097 },
3098 .mute = {
3099 .name = name_mute,
3100 .amux = LINE1,
3101 .gpio = 0x00,
3102 },
3103 },
3104 [SAA7134_BOARD_VIDEOMATE_DVBT_200A] = {
3105 /* Francis Barber <fedora@barber-family.id.au> */
3106 .name = "Compro Videomate DVB-T200A",
3107 .audio_clock = 0x00187de7,
3108 .tuner_type = TUNER_ABSENT,
3109 .radio_type = UNSET,
3110 .tuner_addr = ADDR_UNSET,
3111 .radio_addr = ADDR_UNSET,
3112 .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE,
3113 .mpeg = SAA7134_MPEG_DVB,
3114 .inputs = {{
3115 .name = name_tv,
3116 .vmux = 3,
3117 .amux = TV,
3118 .tv = 1,
3119 },{
3120 .name = name_comp1,
3121 .vmux = 1,
3122 .amux = LINE2,
3123 },{
3124 .name = name_svideo,
3125 .vmux = 8,
3126 .amux = LINE2,
3127 }},
3128 },
3129 [SAA7134_BOARD_HAUPPAUGE_HVR1110] = {
3130 /* Thomas Genty <tomlohave@gmail.com> */
3131 .name = "Hauppauge WinTV-HVR1110 DVB-T/Hybrid",
3132 .audio_clock = 0x00187de7,
3133 .tuner_type = TUNER_PHILIPS_TDA8290,
3134 .radio_type = UNSET,
3135 .tuner_addr = ADDR_UNSET,
3136 .radio_addr = ADDR_UNSET,
3137 .mpeg = SAA7134_MPEG_DVB,
3138 .inputs = {{
3139 .name = name_tv,
3140 .vmux = 1,
3141 .amux = TV,
3142 .tv = 1,
3143 },{
3144 .name = name_comp1,
3145 .vmux = 3,
3146 .amux = LINE2, /* FIXME: audio doesn't work on svideo/composite */
3147 },{
3148 .name = name_svideo,
3149 .vmux = 8,
3150 .amux = LINE2, /* FIXME: audio doesn't work on svideo/composite */
3151 }},
3152 .radio = {
3153 .name = name_radio,
3154 .amux = TV,
3155 },
3156 },
3157 [SAA7134_BOARD_CINERGY_HT_PCMCIA] = {
3158 .name = "Terratec Cinergy HT PCMCIA",
3159 .audio_clock = 0x00187de7,
3160 .tuner_type = TUNER_PHILIPS_TDA8290,
3161 .radio_type = UNSET,
3162 .tuner_addr = ADDR_UNSET,
3163 .radio_addr = ADDR_UNSET,
3164 .mpeg = SAA7134_MPEG_DVB,
3165 .inputs = {{
3166 .name = name_tv,
3167 .vmux = 1,
3168 .amux = TV,
3169 .tv = 1,
3170 },{
3171 .name = name_comp1,
3172 .vmux = 0,
3173 .amux = LINE1,
3174 },{
3175 .name = name_svideo,
3176 .vmux = 6,
3177 .amux = LINE1,
3178 }},
3179 },
3025}; 3180};
3026 3181
3027const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards); 3182const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -3631,6 +3786,36 @@ struct pci_device_id saa7134_pci_tbl[] = {
3631 .subdevice = 0x4860, 3786 .subdevice = 0x4860,
3632 .driver_data = SAA7134_BOARD_ASUS_EUROPA2_HYBRID, 3787 .driver_data = SAA7134_BOARD_ASUS_EUROPA2_HYBRID,
3633 },{ 3788 },{
3789 .vendor = PCI_VENDOR_ID_PHILIPS,
3790 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
3791 .subvendor = 0x11bd,
3792 .subdevice = 0x002f,
3793 .driver_data = SAA7134_BOARD_PINNACLE_PCTV_310i,
3794 },{
3795 .vendor = PCI_VENDOR_ID_PHILIPS,
3796 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
3797 .subvendor = 0x1461, /* Avermedia Technologies Inc */
3798 .subdevice = 0x9715,
3799 .driver_data = SAA7134_BOARD_AVERMEDIA_STUDIO_507,
3800 },{
3801 .vendor = PCI_VENDOR_ID_PHILIPS,
3802 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
3803 .subvendor = 0x1043,
3804 .subdevice = 0x4876,
3805 .driver_data = SAA7134_BOARD_ASUSTeK_P7131_DUAL,
3806 },{
3807 .vendor = PCI_VENDOR_ID_PHILIPS,
3808 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
3809 .subvendor = 0x0070,
3810 .subdevice = 0x6701,
3811 .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110,
3812 },{
3813 .vendor = PCI_VENDOR_ID_PHILIPS,
3814 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
3815 .subvendor = 0x153b,
3816 .subdevice = 0x1172,
3817 .driver_data = SAA7134_BOARD_CINERGY_HT_PCMCIA,
3818 },{
3634 /* --- boards without eeprom + subsystem ID --- */ 3819 /* --- boards without eeprom + subsystem ID --- */
3635 .vendor = PCI_VENDOR_ID_PHILIPS, 3820 .vendor = PCI_VENDOR_ID_PHILIPS,
3636 .device = PCI_DEVICE_ID_PHILIPS_SAA7134, 3821 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -3717,6 +3902,7 @@ int saa7134_board_init1(struct saa7134_dev *dev)
3717 case SAA7134_BOARD_AVERMEDIA_305: 3902 case SAA7134_BOARD_AVERMEDIA_305:
3718 case SAA7134_BOARD_AVERMEDIA_STUDIO_307: 3903 case SAA7134_BOARD_AVERMEDIA_STUDIO_307:
3719 case SAA7134_BOARD_AVERMEDIA_307: 3904 case SAA7134_BOARD_AVERMEDIA_307:
3905 case SAA7134_BOARD_AVERMEDIA_STUDIO_507:
3720 case SAA7134_BOARD_AVERMEDIA_GO_007_FM: 3906 case SAA7134_BOARD_AVERMEDIA_GO_007_FM:
3721 case SAA7134_BOARD_AVERMEDIA_777: 3907 case SAA7134_BOARD_AVERMEDIA_777:
3722/* case SAA7134_BOARD_SABRENT_SBTTVFM: */ /* not finished yet */ 3908/* case SAA7134_BOARD_SABRENT_SBTTVFM: */ /* not finished yet */
@@ -3725,6 +3911,7 @@ int saa7134_board_init1(struct saa7134_dev *dev)
3725 case SAA7134_BOARD_VIDEOMATE_TV_GOLD_PLUSII: 3911 case SAA7134_BOARD_VIDEOMATE_TV_GOLD_PLUSII:
3726 case SAA7134_BOARD_VIDEOMATE_DVBT_300: 3912 case SAA7134_BOARD_VIDEOMATE_DVBT_300:
3727 case SAA7134_BOARD_VIDEOMATE_DVBT_200: 3913 case SAA7134_BOARD_VIDEOMATE_DVBT_200:
3914 case SAA7134_BOARD_VIDEOMATE_DVBT_200A:
3728 case SAA7134_BOARD_MANLI_MTV001: 3915 case SAA7134_BOARD_MANLI_MTV001:
3729 case SAA7134_BOARD_MANLI_MTV002: 3916 case SAA7134_BOARD_MANLI_MTV002:
3730 case SAA7134_BOARD_BEHOLD_409FM: 3917 case SAA7134_BOARD_BEHOLD_409FM:
@@ -3793,7 +3980,9 @@ int saa7134_board_init1(struct saa7134_dev *dev)
3793 break; 3980 break;
3794 /* i2c remotes */ 3981 /* i2c remotes */
3795 case SAA7134_BOARD_PINNACLE_PCTV_110i: 3982 case SAA7134_BOARD_PINNACLE_PCTV_110i:
3983 case SAA7134_BOARD_PINNACLE_PCTV_310i:
3796 case SAA7134_BOARD_UPMOST_PURPLE_TV: 3984 case SAA7134_BOARD_UPMOST_PURPLE_TV:
3985 case SAA7134_BOARD_HAUPPAUGE_HVR1110:
3797 dev->has_remote = SAA7134_REMOTE_I2C; 3986 dev->has_remote = SAA7134_REMOTE_I2C;
3798 break; 3987 break;
3799 case SAA7134_BOARD_AVERMEDIA_A169_B: 3988 case SAA7134_BOARD_AVERMEDIA_A169_B:
@@ -3924,9 +4113,11 @@ int saa7134_board_init2(struct saa7134_dev *dev)
3924 } 4113 }
3925 break; 4114 break;
3926 case SAA7134_BOARD_PHILIPS_TIGER: 4115 case SAA7134_BOARD_PHILIPS_TIGER:
4116 case SAA7134_BOARD_PINNACLE_PCTV_310i:
3927 case SAA7134_BOARD_TEVION_DVBT_220RF: 4117 case SAA7134_BOARD_TEVION_DVBT_220RF:
3928 case SAA7134_BOARD_ASUSTeK_P7131_DUAL: 4118 case SAA7134_BOARD_ASUSTeK_P7131_DUAL:
3929 case SAA7134_BOARD_MEDION_MD8800_QUADRO: 4119 case SAA7134_BOARD_MEDION_MD8800_QUADRO:
4120 case SAA7134_BOARD_HAUPPAUGE_HVR1110:
3930 /* this is a hybrid board, initialize to analog mode 4121 /* this is a hybrid board, initialize to analog mode
3931 * and configure firmware eeprom address 4122 * and configure firmware eeprom address
3932 */ 4123 */
@@ -3952,6 +4143,14 @@ int saa7134_board_init2(struct saa7134_dev *dev)
3952 i2c_transfer(&dev->i2c_adap, &msg, 1); 4143 i2c_transfer(&dev->i2c_adap, &msg, 1);
3953 } 4144 }
3954 break; 4145 break;
4146 case SAA7134_BOARD_CINERGY_HT_PCMCIA:
4147 /* make the tda10046 find its eeprom */
4148 {
4149 u8 data[] = { 0x3c, 0x33, 0x60};
4150 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
4151 i2c_transfer(&dev->i2c_adap, &msg, 1);
4152 }
4153 break;
3955 case SAA7134_BOARD_KWORLD_ATSC110: 4154 case SAA7134_BOARD_KWORLD_ATSC110:
3956 { 4155 {
3957 /* enable tuner */ 4156 /* enable tuner */
@@ -3964,6 +4163,29 @@ int saa7134_board_init2(struct saa7134_dev *dev)
3964 dev->name, i); 4163 dev->name, i);
3965 } 4164 }
3966 break; 4165 break;
4166 case SAA7134_BOARD_VIDEOMATE_DVBT_200:
4167 case SAA7134_BOARD_VIDEOMATE_DVBT_200A:
4168 /* The T200 and the T200A share the same pci id. Consequently,
4169 * we are going to query eeprom to try to find out which one we
4170 * are actually looking at. */
4171
4172 /* Don't do this if the board was specifically selected with an
4173 * insmod option or if we have the default configuration T200*/
4174 if(!dev->autodetected || (dev->eedata[0x41] == 0xd0))
4175 break;
4176 if(dev->eedata[0x41] == 0x02) {
4177 /* Reconfigure board as T200A */
4178 dev->board = SAA7134_BOARD_VIDEOMATE_DVBT_200A;
4179 dev->tuner_type = saa7134_boards[dev->board].tuner_type;
4180 dev->tda9887_conf = saa7134_boards[dev->board].tda9887_conf;
4181 printk(KERN_INFO "%s: Reconfigured board as %s\n",
4182 dev->name, saa7134_boards[dev->board].name);
4183 } else {
4184 printk(KERN_WARNING "%s: Unexpected tuner type info: %x in eeprom\n",
4185 dev->name, dev->eedata[0x41]);
4186 break;
4187 }
4188 break;
3967 } 4189 }
3968 return 0; 4190 return 0;
3969} 4191}
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 5c9e63dfbea6..ed038fff3b4f 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -889,15 +889,16 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
889 must_configure_manually(); 889 must_configure_manually();
890 dev->board = SAA7134_BOARD_UNKNOWN; 890 dev->board = SAA7134_BOARD_UNKNOWN;
891 } 891 }
892 dev->autodetected = card[dev->nr] != dev->board;
892 dev->tuner_type = saa7134_boards[dev->board].tuner_type; 893 dev->tuner_type = saa7134_boards[dev->board].tuner_type;
893 dev->tda9887_conf = saa7134_boards[dev->board].tda9887_conf; 894 dev->tda9887_conf = saa7134_boards[dev->board].tda9887_conf;
894 if (UNSET != tuner[dev->nr]) 895 if (UNSET != tuner[dev->nr])
895 dev->tuner_type = tuner[dev->nr]; 896 dev->tuner_type = tuner[dev->nr];
896 printk(KERN_INFO "%s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n", 897 printk(KERN_INFO "%s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
897 dev->name,pci_dev->subsystem_vendor, 898 dev->name,pci_dev->subsystem_vendor,
898 pci_dev->subsystem_device,saa7134_boards[dev->board].name, 899 pci_dev->subsystem_device,saa7134_boards[dev->board].name,
899 dev->board, card[dev->nr] == dev->board ? 900 dev->board, dev->autodetected ?
900 "insmod option" : "autodetected"); 901 "autodetected" : "insmod option");
901 902
902 /* get mmio */ 903 /* get mmio */
903 if (!request_mem_region(pci_resource_start(pci_dev,0), 904 if (!request_mem_region(pci_resource_start(pci_dev,0),
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 6b61d9b2fcb5..fa8339879095 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -50,6 +50,10 @@ static unsigned int antenna_pwr = 0;
50module_param(antenna_pwr, int, 0444); 50module_param(antenna_pwr, int, 0444);
51MODULE_PARM_DESC(antenna_pwr,"enable antenna power (Pinnacle 300i)"); 51MODULE_PARM_DESC(antenna_pwr,"enable antenna power (Pinnacle 300i)");
52 52
53static int use_frontent = 0;
54module_param(use_frontent, int, 0644);
55MODULE_PARM_DESC(use_frontent,"for cards with multiple frontends (0: terrestrial, 1: satellite)");
56
53/* ------------------------------------------------------------------ */ 57/* ------------------------------------------------------------------ */
54static int pinnacle_antenna_pwr(struct saa7134_dev *dev, int on) 58static int pinnacle_antenna_pwr(struct saa7134_dev *dev, int on)
55{ 59{
@@ -293,7 +297,7 @@ static int philips_tu1216_tuner_60_set_params(struct dvb_frontend *fe, struct dv
293 return philips_tda6651_pll_set(0x60, fe, params); 297 return philips_tda6651_pll_set(0x60, fe, params);
294} 298}
295 299
296static int philips_tu1216_request_firmware(struct dvb_frontend *fe, 300static int philips_tda1004x_request_firmware(struct dvb_frontend *fe,
297 const struct firmware **fw, char *name) 301 const struct firmware **fw, char *name)
298{ 302{
299 struct saa7134_dev *dev = fe->dvb->priv; 303 struct saa7134_dev *dev = fe->dvb->priv;
@@ -308,7 +312,7 @@ static struct tda1004x_config philips_tu1216_60_config = {
308 .xtal_freq = TDA10046_XTAL_4M, 312 .xtal_freq = TDA10046_XTAL_4M,
309 .agc_config = TDA10046_AGC_DEFAULT, 313 .agc_config = TDA10046_AGC_DEFAULT,
310 .if_freq = TDA10046_FREQ_3617, 314 .if_freq = TDA10046_FREQ_3617,
311 .request_firmware = philips_tu1216_request_firmware, 315 .request_firmware = philips_tda1004x_request_firmware,
312}; 316};
313 317
314/* ------------------------------------------------------------------ */ 318/* ------------------------------------------------------------------ */
@@ -331,12 +335,12 @@ static struct tda1004x_config philips_tu1216_61_config = {
331 .xtal_freq = TDA10046_XTAL_4M, 335 .xtal_freq = TDA10046_XTAL_4M,
332 .agc_config = TDA10046_AGC_DEFAULT, 336 .agc_config = TDA10046_AGC_DEFAULT,
333 .if_freq = TDA10046_FREQ_3617, 337 .if_freq = TDA10046_FREQ_3617,
334 .request_firmware = philips_tu1216_request_firmware, 338 .request_firmware = philips_tda1004x_request_firmware,
335}; 339};
336 340
337/* ------------------------------------------------------------------ */ 341/* ------------------------------------------------------------------ */
338 342
339static int philips_europa_tuner_init(struct dvb_frontend *fe) 343static int philips_td1316_tuner_init(struct dvb_frontend *fe)
340{ 344{
341 struct saa7134_dev *dev = fe->dvb->priv; 345 struct saa7134_dev *dev = fe->dvb->priv;
342 static u8 msg[] = { 0x0b, 0xf5, 0x86, 0xab }; 346 static u8 msg[] = { 0x0b, 0xf5, 0x86, 0xab };
@@ -347,18 +351,8 @@ static int philips_europa_tuner_init(struct dvb_frontend *fe)
347 fe->ops.i2c_gate_ctrl(fe, 1); 351 fe->ops.i2c_gate_ctrl(fe, 1);
348 if (i2c_transfer(&dev->i2c_adap, &init_msg, 1) != 1) 352 if (i2c_transfer(&dev->i2c_adap, &init_msg, 1) != 1)
349 return -EIO; 353 return -EIO;
350 msleep(1);
351
352 /* switch the board to dvb mode */
353 init_msg.addr = 0x43;
354 init_msg.len = 0x02;
355 msg[0] = 0x00;
356 msg[1] = 0x40;
357 if (fe->ops.i2c_gate_ctrl) 354 if (fe->ops.i2c_gate_ctrl)
358 fe->ops.i2c_gate_ctrl(fe, 1); 355 fe->ops.i2c_gate_ctrl(fe, 0);
359 if (i2c_transfer(&dev->i2c_adap, &init_msg, 1) != 1)
360 return -EIO;
361
362 return 0; 356 return 0;
363} 357}
364 358
@@ -367,6 +361,22 @@ static int philips_td1316_tuner_set_params(struct dvb_frontend *fe, struct dvb_f
367 return philips_tda6651_pll_set(0x61, fe, params); 361 return philips_tda6651_pll_set(0x61, fe, params);
368} 362}
369 363
364static int philips_europa_tuner_init(struct dvb_frontend *fe)
365{
366 struct saa7134_dev *dev = fe->dvb->priv;
367 static u8 msg[] = { 0x00, 0x40};
368 struct i2c_msg init_msg = {.addr = 0x43,.flags = 0,.buf = msg,.len = sizeof(msg) };
369
370
371 if (philips_td1316_tuner_init(fe))
372 return -EIO;
373 msleep(1);
374 if (i2c_transfer(&dev->i2c_adap, &init_msg, 1) != 1)
375 return -EIO;
376
377 return 0;
378}
379
370static int philips_europa_tuner_sleep(struct dvb_frontend *fe) 380static int philips_europa_tuner_sleep(struct dvb_frontend *fe)
371{ 381{
372 struct saa7134_dev *dev = fe->dvb->priv; 382 struct saa7134_dev *dev = fe->dvb->priv;
@@ -671,7 +681,7 @@ static struct tda1004x_config tda827x_lifeview_config = {
671 .invert = 1, 681 .invert = 1,
672 .invert_oclk = 0, 682 .invert_oclk = 0,
673 .xtal_freq = TDA10046_XTAL_16M, 683 .xtal_freq = TDA10046_XTAL_16M,
674 .agc_config = TDA10046_AGC_TDA827X, 684 .agc_config = TDA10046_AGC_TDA827X_GP11,
675 .if_freq = TDA10046_FREQ_045, 685 .if_freq = TDA10046_FREQ_045,
676 .request_firmware = NULL, 686 .request_firmware = NULL,
677}; 687};
@@ -812,32 +822,40 @@ static int philips_tda827xa_tuner_sleep(u8 addr, struct dvb_frontend *fe)
812 if (fe->ops.i2c_gate_ctrl) 822 if (fe->ops.i2c_gate_ctrl)
813 fe->ops.i2c_gate_ctrl(fe, 1); 823 fe->ops.i2c_gate_ctrl(fe, 1);
814 i2c_transfer(&dev->i2c_adap, &tuner_msg, 1); 824 i2c_transfer(&dev->i2c_adap, &tuner_msg, 1);
825 if (fe->ops.i2c_gate_ctrl)
826 fe->ops.i2c_gate_ctrl(fe, 0);
815 return 0; 827 return 0;
816} 828}
817 829
818/* ------------------------------------------------------------------ */ 830/* ------------------------------------------------------------------ */
819 831
820static int philips_tiger_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) 832static int tda8290_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
821{ 833{
822 int ret;
823 struct saa7134_dev *dev = fe->dvb->priv; 834 struct saa7134_dev *dev = fe->dvb->priv;
824 static u8 tda8290_close[] = { 0x21, 0xc0}; 835 static u8 tda8290_close[] = { 0x21, 0xc0};
825 static u8 tda8290_open[] = { 0x21, 0x80}; 836 static u8 tda8290_open[] = { 0x21, 0x80};
826 struct i2c_msg tda8290_msg = {.addr = 0x4b,.flags = 0, .len = 2}; 837 struct i2c_msg tda8290_msg = {.addr = 0x4b,.flags = 0, .len = 2};
827 838 if (enable) {
828 /* close tda8290 i2c bridge */ 839 tda8290_msg.buf = tda8290_close;
829 tda8290_msg.buf = tda8290_close; 840 } else {
830 ret = i2c_transfer(&dev->i2c_adap, &tda8290_msg, 1); 841 tda8290_msg.buf = tda8290_open;
831 if (ret != 1) 842 }
843 if (i2c_transfer(&dev->i2c_adap, &tda8290_msg, 1) != 1)
832 return -EIO; 844 return -EIO;
833 msleep(20); 845 msleep(20);
846 return 0;
847}
848
849/* ------------------------------------------------------------------ */
850
851static int philips_tiger_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
852{
853 int ret;
854
834 ret = philips_tda827xa_pll_set(0x61, fe, params); 855 ret = philips_tda827xa_pll_set(0x61, fe, params);
835 if (ret != 0) 856 if (ret != 0)
836 return ret; 857 return ret;
837 /* open tda8290 i2c bridge */ 858 return 0;
838 tda8290_msg.buf = tda8290_open;
839 i2c_transfer(&dev->i2c_adap, &tda8290_msg, 1);
840 return ret;
841} 859}
842 860
843static int philips_tiger_tuner_init(struct dvb_frontend *fe) 861static int philips_tiger_tuner_init(struct dvb_frontend *fe)
@@ -867,13 +885,80 @@ static struct tda1004x_config philips_tiger_config = {
867 .invert = 1, 885 .invert = 1,
868 .invert_oclk = 0, 886 .invert_oclk = 0,
869 .xtal_freq = TDA10046_XTAL_16M, 887 .xtal_freq = TDA10046_XTAL_16M,
870 .agc_config = TDA10046_AGC_TDA827X, 888 .agc_config = TDA10046_AGC_TDA827X_GP11,
889 .if_freq = TDA10046_FREQ_045,
890 .request_firmware = NULL,
891};
892/* ------------------------------------------------------------------ */
893
894static int cinergy_ht_tuner_init(struct dvb_frontend *fe)
895{
896 struct saa7134_dev *dev = fe->dvb->priv;
897 static u8 data[] = { 0x3c, 0x33, 0x62};
898 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
899
900 if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1)
901 return -EIO;
902 return 0;
903}
904
905static int cinergy_ht_tuner_sleep(struct dvb_frontend *fe)
906{
907 struct saa7134_dev *dev = fe->dvb->priv;
908 static u8 data[] = { 0x3c, 0x33, 0x60};
909 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
910
911 i2c_transfer(&dev->i2c_adap, &msg, 1);
912 philips_tda827xa_tuner_sleep( 0x61, fe);
913 return 0;
914}
915
916static struct tda1004x_config cinergy_ht_config = {
917 .demod_address = 0x08,
918 .invert = 1,
919 .invert_oclk = 0,
920 .xtal_freq = TDA10046_XTAL_16M,
921 .agc_config = TDA10046_AGC_TDA827X_GP01,
871 .if_freq = TDA10046_FREQ_045, 922 .if_freq = TDA10046_FREQ_045,
872 .request_firmware = NULL, 923 .request_firmware = NULL,
873}; 924};
874 925
875/* ------------------------------------------------------------------ */ 926/* ------------------------------------------------------------------ */
876 927
928static struct tda1004x_config pinnacle_pctv_310i_config = {
929 .demod_address = 0x08,
930 .invert = 1,
931 .invert_oclk = 0,
932 .xtal_freq = TDA10046_XTAL_16M,
933 .agc_config = TDA10046_AGC_TDA827X_GP11,
934 .if_freq = TDA10046_FREQ_045,
935 .request_firmware = philips_tda1004x_request_firmware,
936};
937
938/* ------------------------------------------------------------------ */
939
940static struct tda1004x_config hauppauge_hvr_1110_config = {
941 .demod_address = 0x08,
942 .invert = 1,
943 .invert_oclk = 0,
944 .xtal_freq = TDA10046_XTAL_16M,
945 .agc_config = TDA10046_AGC_TDA827X_GP11,
946 .if_freq = TDA10046_FREQ_045,
947 .request_firmware = philips_tda1004x_request_firmware,
948};
949
950/* ------------------------------------------------------------------ */
951
952static struct tda1004x_config asus_p7131_dual_config = {
953 .demod_address = 0x08,
954 .invert = 1,
955 .invert_oclk = 0,
956 .xtal_freq = TDA10046_XTAL_16M,
957 .agc_config = TDA10046_AGC_TDA827X_GP11,
958 .if_freq = TDA10046_FREQ_045,
959 .request_firmware = philips_tda1004x_request_firmware,
960};
961
877static int asus_p7131_dual_tuner_init(struct dvb_frontend *fe) 962static int asus_p7131_dual_tuner_init(struct dvb_frontend *fe)
878{ 963{
879 struct saa7134_dev *dev = fe->dvb->priv; 964 struct saa7134_dev *dev = fe->dvb->priv;
@@ -921,7 +1006,7 @@ static struct tda1004x_config lifeview_trio_config = {
921 .invert = 1, 1006 .invert = 1,
922 .invert_oclk = 0, 1007 .invert_oclk = 0,
923 .xtal_freq = TDA10046_XTAL_16M, 1008 .xtal_freq = TDA10046_XTAL_16M,
924 .agc_config = TDA10046_AGC_TDA827X_GPL, 1009 .agc_config = TDA10046_AGC_TDA827X_GP00,
925 .if_freq = TDA10046_FREQ_045, 1010 .if_freq = TDA10046_FREQ_045,
926 .request_firmware = NULL, 1011 .request_firmware = NULL,
927}; 1012};
@@ -958,7 +1043,7 @@ static struct tda1004x_config ads_tech_duo_config = {
958 .invert = 1, 1043 .invert = 1,
959 .invert_oclk = 0, 1044 .invert_oclk = 0,
960 .xtal_freq = TDA10046_XTAL_16M, 1045 .xtal_freq = TDA10046_XTAL_16M,
961 .agc_config = TDA10046_AGC_TDA827X_GPL, 1046 .agc_config = TDA10046_AGC_TDA827X_GP00,
962 .if_freq = TDA10046_FREQ_045, 1047 .if_freq = TDA10046_FREQ_045,
963 .request_firmware = NULL, 1048 .request_firmware = NULL,
964}; 1049};
@@ -983,7 +1068,7 @@ static struct tda1004x_config tevion_dvbt220rf_config = {
983 .invert = 1, 1068 .invert = 1,
984 .invert_oclk = 0, 1069 .invert_oclk = 0,
985 .xtal_freq = TDA10046_XTAL_16M, 1070 .xtal_freq = TDA10046_XTAL_16M,
986 .agc_config = TDA10046_AGC_TDA827X, 1071 .agc_config = TDA10046_AGC_TDA827X_GP11,
987 .if_freq = TDA10046_FREQ_045, 1072 .if_freq = TDA10046_FREQ_045,
988 .request_firmware = NULL, 1073 .request_firmware = NULL,
989}; 1074};
@@ -1028,7 +1113,7 @@ static struct tda1004x_config md8800_dvbt_config = {
1028 .invert = 1, 1113 .invert = 1,
1029 .invert_oclk = 0, 1114 .invert_oclk = 0,
1030 .xtal_freq = TDA10046_XTAL_16M, 1115 .xtal_freq = TDA10046_XTAL_16M,
1031 .agc_config = TDA10046_AGC_TDA827X, 1116 .agc_config = TDA10046_AGC_TDA827X_GP11,
1032 .if_freq = TDA10046_FREQ_045, 1117 .if_freq = TDA10046_FREQ_045,
1033 .request_firmware = NULL, 1118 .request_firmware = NULL,
1034}; 1119};
@@ -1168,6 +1253,29 @@ static int dvb_init(struct saa7134_dev *dev)
1168 &philips_tiger_config, 1253 &philips_tiger_config,
1169 &dev->i2c_adap); 1254 &dev->i2c_adap);
1170 if (dev->dvb.frontend) { 1255 if (dev->dvb.frontend) {
1256 dev->dvb.frontend->ops.i2c_gate_ctrl = tda8290_i2c_gate_ctrl;
1257 dev->dvb.frontend->ops.tuner_ops.init = philips_tiger_tuner_init;
1258 dev->dvb.frontend->ops.tuner_ops.sleep = philips_tiger_tuner_sleep;
1259 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tiger_tuner_set_params;
1260 }
1261 break;
1262 case SAA7134_BOARD_PINNACLE_PCTV_310i:
1263 dev->dvb.frontend = dvb_attach(tda10046_attach,
1264 &pinnacle_pctv_310i_config,
1265 &dev->i2c_adap);
1266 if (dev->dvb.frontend) {
1267 dev->dvb.frontend->ops.i2c_gate_ctrl = tda8290_i2c_gate_ctrl;
1268 dev->dvb.frontend->ops.tuner_ops.init = philips_tiger_tuner_init;
1269 dev->dvb.frontend->ops.tuner_ops.sleep = philips_tiger_tuner_sleep;
1270 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tiger_tuner_set_params;
1271 }
1272 break;
1273 case SAA7134_BOARD_HAUPPAUGE_HVR1110:
1274 dev->dvb.frontend = dvb_attach(tda10046_attach,
1275 &hauppauge_hvr_1110_config,
1276 &dev->i2c_adap);
1277 if (dev->dvb.frontend) {
1278 dev->dvb.frontend->ops.i2c_gate_ctrl = tda8290_i2c_gate_ctrl;
1171 dev->dvb.frontend->ops.tuner_ops.init = philips_tiger_tuner_init; 1279 dev->dvb.frontend->ops.tuner_ops.init = philips_tiger_tuner_init;
1172 dev->dvb.frontend->ops.tuner_ops.sleep = philips_tiger_tuner_sleep; 1280 dev->dvb.frontend->ops.tuner_ops.sleep = philips_tiger_tuner_sleep;
1173 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tiger_tuner_set_params; 1281 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tiger_tuner_set_params;
@@ -1175,9 +1283,10 @@ static int dvb_init(struct saa7134_dev *dev)
1175 break; 1283 break;
1176 case SAA7134_BOARD_ASUSTeK_P7131_DUAL: 1284 case SAA7134_BOARD_ASUSTeK_P7131_DUAL:
1177 dev->dvb.frontend = dvb_attach(tda10046_attach, 1285 dev->dvb.frontend = dvb_attach(tda10046_attach,
1178 &philips_tiger_config, 1286 &asus_p7131_dual_config,
1179 &dev->i2c_adap); 1287 &dev->i2c_adap);
1180 if (dev->dvb.frontend) { 1288 if (dev->dvb.frontend) {
1289 dev->dvb.frontend->ops.i2c_gate_ctrl = tda8290_i2c_gate_ctrl;
1181 dev->dvb.frontend->ops.tuner_ops.init = asus_p7131_dual_tuner_init; 1290 dev->dvb.frontend->ops.tuner_ops.init = asus_p7131_dual_tuner_init;
1182 dev->dvb.frontend->ops.tuner_ops.sleep = asus_p7131_dual_tuner_sleep; 1291 dev->dvb.frontend->ops.tuner_ops.sleep = asus_p7131_dual_tuner_sleep;
1183 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tiger_tuner_set_params; 1292 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tiger_tuner_set_params;
@@ -1194,12 +1303,27 @@ static int dvb_init(struct saa7134_dev *dev)
1194 } 1303 }
1195 break; 1304 break;
1196 case SAA7134_BOARD_FLYDVB_TRIO: 1305 case SAA7134_BOARD_FLYDVB_TRIO:
1197 dev->dvb.frontend = dvb_attach(tda10046_attach, 1306 if(! use_frontent) { //terrestrial
1198 &lifeview_trio_config, 1307 dev->dvb.frontend = dvb_attach(tda10046_attach,
1199 &dev->i2c_adap); 1308 &lifeview_trio_config,
1200 if (dev->dvb.frontend) { 1309 &dev->i2c_adap);
1201 dev->dvb.frontend->ops.tuner_ops.sleep = lifeview_trio_tuner_sleep; 1310 if (dev->dvb.frontend) {
1202 dev->dvb.frontend->ops.tuner_ops.set_params = lifeview_trio_tuner_set_params; 1311 dev->dvb.frontend->ops.tuner_ops.sleep = lifeview_trio_tuner_sleep;
1312 dev->dvb.frontend->ops.tuner_ops.set_params =
1313 lifeview_trio_tuner_set_params;
1314 }
1315 } else { //satellite
1316 dev->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs, &dev->i2c_adap);
1317 if (dev->dvb.frontend) {
1318 if (dvb_attach(tda826x_attach, dev->dvb.frontend, 0x63,
1319 &dev->i2c_adap, 0) == NULL) {
1320 printk("%s: Lifeview Trio, No tda826x found!\n", __FUNCTION__);
1321 }
1322 if (dvb_attach(isl6421_attach, dev->dvb.frontend, &dev->i2c_adap,
1323 0x08, 0, 0) == NULL) {
1324 printk("%s: Lifeview Trio, No ISL6421 found!\n", __FUNCTION__);
1325 }
1326 }
1203 } 1327 }
1204 break; 1328 break;
1205 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331: 1329 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
@@ -1281,7 +1405,27 @@ static int dvb_init(struct saa7134_dev *dev)
1281 dev->dvb.frontend->ops.tuner_ops.set_params = philips_fmd1216_tuner_set_params; 1405 dev->dvb.frontend->ops.tuner_ops.set_params = philips_fmd1216_tuner_set_params;
1282 } 1406 }
1283 break; 1407 break;
1408 case SAA7134_BOARD_VIDEOMATE_DVBT_200A:
1409 dev->dvb.frontend = dvb_attach(tda10046_attach,
1410 &philips_europa_config,
1411 &dev->i2c_adap);
1412 if (dev->dvb.frontend) {
1413 dev->dvb.frontend->ops.tuner_ops.init = philips_td1316_tuner_init;
1414 dev->dvb.frontend->ops.tuner_ops.set_params = philips_td1316_tuner_set_params;
1415 }
1416 break;
1417 case SAA7134_BOARD_CINERGY_HT_PCMCIA:
1418 dev->dvb.frontend = dvb_attach(tda10046_attach,
1419 &cinergy_ht_config,
1420 &dev->i2c_adap);
1421 if (dev->dvb.frontend) {
1422 dev->dvb.frontend->ops.i2c_gate_ctrl = tda8290_i2c_gate_ctrl;
1423 dev->dvb.frontend->ops.tuner_ops.init = cinergy_ht_tuner_init;
1424 dev->dvb.frontend->ops.tuner_ops.sleep = cinergy_ht_tuner_sleep;
1425 dev->dvb.frontend->ops.tuner_ops.set_params = philips_tiger_tuner_set_params;
1284 1426
1427 }
1428 break;
1285 default: 1429 default:
1286 printk("%s: Huh? unknown DVB card?\n",dev->name); 1430 printk("%s: Huh? unknown DVB card?\n",dev->name);
1287 break; 1431 break;
diff --git a/drivers/media/video/saa7134/saa7134-i2c.c b/drivers/media/video/saa7134/saa7134-i2c.c
index 6162550c4136..6f9fe86fed98 100644
--- a/drivers/media/video/saa7134/saa7134-i2c.c
+++ b/drivers/media/video/saa7134/saa7134-i2c.c
@@ -341,6 +341,7 @@ static int attach_inform(struct i2c_client *client)
341 switch (client->addr) { 341 switch (client->addr) {
342 case 0x7a: 342 case 0x7a:
343 case 0x47: 343 case 0x47:
344 case 0x71:
344 { 345 {
345 struct IR_i2c *ir = i2c_get_clientdata(client); 346 struct IR_i2c *ir = i2c_get_clientdata(client);
346 d1printk("%s i2c IR detected (%s).\n", 347 d1printk("%s i2c IR detected (%s).\n",
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index dee83552e681..60b38defd9bc 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -112,6 +112,27 @@ static int get_key_purpletv(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
112 return 1; 112 return 1;
113} 113}
114 114
115static int get_key_hvr1110(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
116{
117 unsigned char buf[5], cod4, code3, code4;
118
119 /* poll IR chip */
120 if (5 != i2c_master_recv(&ir->c,buf,5))
121 return -EIO;
122
123 cod4 = buf[4];
124 code4 = (cod4 >> 2);
125 code3 = buf[3];
126 if (code3 == 0)
127 /* no key pressed */
128 return 0;
129
130 /* return key */
131 *ir_key = code4;
132 *ir_raw = code4;
133 return 1;
134}
135
115void saa7134_input_irq(struct saa7134_dev *dev) 136void saa7134_input_irq(struct saa7134_dev *dev)
116{ 137{
117 struct saa7134_ir *ir = dev->remote; 138 struct saa7134_ir *ir = dev->remote;
@@ -131,6 +152,23 @@ static void saa7134_input_timer(unsigned long data)
131 mod_timer(&ir->timer, timeout); 152 mod_timer(&ir->timer, timeout);
132} 153}
133 154
155static void saa7134_ir_start(struct saa7134_dev *dev, struct saa7134_ir *ir)
156{
157 if (ir->polling) {
158 init_timer(&ir->timer);
159 ir->timer.function = saa7134_input_timer;
160 ir->timer.data = (unsigned long)dev;
161 ir->timer.expires = jiffies + HZ;
162 add_timer(&ir->timer);
163 }
164}
165
166static void saa7134_ir_stop(struct saa7134_dev *dev)
167{
168 if (dev->remote->polling)
169 del_timer_sync(&dev->remote->timer);
170}
171
134int saa7134_input_init1(struct saa7134_dev *dev) 172int saa7134_input_init1(struct saa7134_dev *dev)
135{ 173{
136 struct saa7134_ir *ir; 174 struct saa7134_ir *ir;
@@ -141,6 +179,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
141 u32 mask_keyup = 0; 179 u32 mask_keyup = 0;
142 int polling = 0; 180 int polling = 0;
143 int ir_type = IR_TYPE_OTHER; 181 int ir_type = IR_TYPE_OTHER;
182 int err;
144 183
145 if (dev->has_remote != SAA7134_REMOTE_GPIO) 184 if (dev->has_remote != SAA7134_REMOTE_GPIO)
146 return -ENODEV; 185 return -ENODEV;
@@ -184,6 +223,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
184 case SAA7134_BOARD_AVERMEDIA_307: 223 case SAA7134_BOARD_AVERMEDIA_307:
185 case SAA7134_BOARD_AVERMEDIA_STUDIO_305: 224 case SAA7134_BOARD_AVERMEDIA_STUDIO_305:
186 case SAA7134_BOARD_AVERMEDIA_STUDIO_307: 225 case SAA7134_BOARD_AVERMEDIA_STUDIO_307:
226 case SAA7134_BOARD_AVERMEDIA_STUDIO_507:
187 case SAA7134_BOARD_AVERMEDIA_GO_007_FM: 227 case SAA7134_BOARD_AVERMEDIA_GO_007_FM:
188 ir_codes = ir_codes_avermedia; 228 ir_codes = ir_codes_avermedia;
189 mask_keycode = 0x0007C8; 229 mask_keycode = 0x0007C8;
@@ -266,9 +306,8 @@ int saa7134_input_init1(struct saa7134_dev *dev)
266 ir = kzalloc(sizeof(*ir), GFP_KERNEL); 306 ir = kzalloc(sizeof(*ir), GFP_KERNEL);
267 input_dev = input_allocate_device(); 307 input_dev = input_allocate_device();
268 if (!ir || !input_dev) { 308 if (!ir || !input_dev) {
269 kfree(ir); 309 err = -ENOMEM;
270 input_free_device(input_dev); 310 goto err_out_free;
271 return -ENOMEM;
272 } 311 }
273 312
274 ir->dev = input_dev; 313 ir->dev = input_dev;
@@ -299,18 +338,22 @@ int saa7134_input_init1(struct saa7134_dev *dev)
299 } 338 }
300 input_dev->cdev.dev = &dev->pci->dev; 339 input_dev->cdev.dev = &dev->pci->dev;
301 340
302 /* all done */
303 dev->remote = ir; 341 dev->remote = ir;
304 if (ir->polling) { 342 saa7134_ir_start(dev, ir);
305 init_timer(&ir->timer); 343
306 ir->timer.function = saa7134_input_timer; 344 err = input_register_device(ir->dev);
307 ir->timer.data = (unsigned long)dev; 345 if (err)
308 ir->timer.expires = jiffies + HZ; 346 goto err_out_stop;
309 add_timer(&ir->timer);
310 }
311 347
312 input_register_device(ir->dev);
313 return 0; 348 return 0;
349
350 err_out_stop:
351 saa7134_ir_stop(dev);
352 dev->remote = NULL;
353 err_out_free:
354 input_free_device(input_dev);
355 kfree(ir);
356 return err;
314} 357}
315 358
316void saa7134_input_fini(struct saa7134_dev *dev) 359void saa7134_input_fini(struct saa7134_dev *dev)
@@ -318,8 +361,7 @@ void saa7134_input_fini(struct saa7134_dev *dev)
318 if (NULL == dev->remote) 361 if (NULL == dev->remote)
319 return; 362 return;
320 363
321 if (dev->remote->polling) 364 saa7134_ir_stop(dev);
322 del_timer_sync(&dev->remote->timer);
323 input_unregister_device(dev->remote->dev); 365 input_unregister_device(dev->remote->dev);
324 kfree(dev->remote); 366 kfree(dev->remote);
325 dev->remote = NULL; 367 dev->remote = NULL;
@@ -335,6 +377,7 @@ void saa7134_set_i2c_ir(struct saa7134_dev *dev, struct IR_i2c *ir)
335 377
336 switch (dev->board) { 378 switch (dev->board) {
337 case SAA7134_BOARD_PINNACLE_PCTV_110i: 379 case SAA7134_BOARD_PINNACLE_PCTV_110i:
380 case SAA7134_BOARD_PINNACLE_PCTV_310i:
338 snprintf(ir->c.name, sizeof(ir->c.name), "Pinnacle PCTV"); 381 snprintf(ir->c.name, sizeof(ir->c.name), "Pinnacle PCTV");
339 if (pinnacle_remote == 0) { 382 if (pinnacle_remote == 0) {
340 ir->get_key = get_key_pinnacle_color; 383 ir->get_key = get_key_pinnacle_color;
@@ -349,6 +392,11 @@ void saa7134_set_i2c_ir(struct saa7134_dev *dev, struct IR_i2c *ir)
349 ir->get_key = get_key_purpletv; 392 ir->get_key = get_key_purpletv;
350 ir->ir_codes = ir_codes_purpletv; 393 ir->ir_codes = ir_codes_purpletv;
351 break; 394 break;
395 case SAA7134_BOARD_HAUPPAUGE_HVR1110:
396 snprintf(ir->c.name, sizeof(ir->c.name), "HVR 1110");
397 ir->get_key = get_key_hvr1110;
398 ir->ir_codes = ir_codes_hauppauge_new;
399 break;
352 default: 400 default:
353 dprintk("Shouldn't get here: Unknown board %x for I2C IR?\n",dev->board); 401 dprintk("Shouldn't get here: Unknown board %x for I2C IR?\n",dev->board);
354 break; 402 break;
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 7cf96b430250..e88ad7b40c47 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -227,6 +227,11 @@ struct saa7134_format {
227#define SAA7134_BOARD_PROTEUS_2309 98 227#define SAA7134_BOARD_PROTEUS_2309 98
228#define SAA7134_BOARD_AVERMEDIA_A16AR 99 228#define SAA7134_BOARD_AVERMEDIA_A16AR 99
229#define SAA7134_BOARD_ASUS_EUROPA2_HYBRID 100 229#define SAA7134_BOARD_ASUS_EUROPA2_HYBRID 100
230#define SAA7134_BOARD_PINNACLE_PCTV_310i 101
231#define SAA7134_BOARD_AVERMEDIA_STUDIO_507 102
232#define SAA7134_BOARD_VIDEOMATE_DVBT_200A 103
233#define SAA7134_BOARD_HAUPPAUGE_HVR1110 104
234#define SAA7134_BOARD_CINERGY_HT_PCMCIA 105
230 235
231#define SAA7134_MAXBOARDS 8 236#define SAA7134_MAXBOARDS 8
232#define SAA7134_INPUT_MAX 8 237#define SAA7134_INPUT_MAX 8
@@ -446,6 +451,9 @@ struct saa7134_dev {
446 struct v4l2_prio_state prio; 451 struct v4l2_prio_state prio;
447#endif 452#endif
448 453
454 /* insmod option/autodetected */
455 int autodetected;
456
449 /* various device info */ 457 /* various device info */
450 unsigned int resources; 458 unsigned int resources;
451 struct video_device *video_dev; 459 struct video_device *video_dev;
diff --git a/drivers/media/video/stv680.c b/drivers/media/video/stv680.c
index 6d1ef1e2e8ef..a1ec3aca3f91 100644
--- a/drivers/media/video/stv680.c
+++ b/drivers/media/video/stv680.c
@@ -687,7 +687,7 @@ static int stv680_start_stream (struct usb_stv *stv680)
687 stv680->sbuf[i].data = kmalloc (stv680->rawbufsize, GFP_KERNEL); 687 stv680->sbuf[i].data = kmalloc (stv680->rawbufsize, GFP_KERNEL);
688 if (stv680->sbuf[i].data == NULL) { 688 if (stv680->sbuf[i].data == NULL) {
689 PDEBUG (0, "STV(e): Could not kmalloc raw data buffer %i", i); 689 PDEBUG (0, "STV(e): Could not kmalloc raw data buffer %i", i);
690 return -1; 690 goto nomem_err;
691 } 691 }
692 } 692 }
693 693
@@ -698,7 +698,7 @@ static int stv680_start_stream (struct usb_stv *stv680)
698 stv680->scratch[i].data = kmalloc (stv680->rawbufsize, GFP_KERNEL); 698 stv680->scratch[i].data = kmalloc (stv680->rawbufsize, GFP_KERNEL);
699 if (stv680->scratch[i].data == NULL) { 699 if (stv680->scratch[i].data == NULL) {
700 PDEBUG (0, "STV(e): Could not kmalloc raw scratch buffer %i", i); 700 PDEBUG (0, "STV(e): Could not kmalloc raw scratch buffer %i", i);
701 return -1; 701 goto nomem_err;
702 } 702 }
703 stv680->scratch[i].state = BUFFER_UNUSED; 703 stv680->scratch[i].state = BUFFER_UNUSED;
704 } 704 }
@@ -706,7 +706,7 @@ static int stv680_start_stream (struct usb_stv *stv680)
706 for (i = 0; i < STV680_NUMSBUF; i++) { 706 for (i = 0; i < STV680_NUMSBUF; i++) {
707 urb = usb_alloc_urb (0, GFP_KERNEL); 707 urb = usb_alloc_urb (0, GFP_KERNEL);
708 if (!urb) 708 if (!urb)
709 return -ENOMEM; 709 goto nomem_err;
710 710
711 /* sbuf is urb->transfer_buffer, later gets memcpyed to scratch */ 711 /* sbuf is urb->transfer_buffer, later gets memcpyed to scratch */
712 usb_fill_bulk_urb (urb, stv680->udev, 712 usb_fill_bulk_urb (urb, stv680->udev,
@@ -721,6 +721,21 @@ static int stv680_start_stream (struct usb_stv *stv680)
721 721
722 stv680->framecount = 0; 722 stv680->framecount = 0;
723 return 0; 723 return 0;
724
725 nomem_err:
726 for (i = 0; i < STV680_NUMSCRATCH; i++) {
727 kfree(stv680->scratch[i].data);
728 stv680->scratch[i].data = NULL;
729 }
730 for (i = 0; i < STV680_NUMSBUF; i++) {
731 usb_kill_urb(stv680->urb[i]);
732 usb_free_urb(stv680->urb[i]);
733 stv680->urb[i] = NULL;
734 kfree(stv680->sbuf[i].data);
735 stv680->sbuf[i].data = NULL;
736 }
737 return -ENOMEM;
738
724} 739}
725 740
726static int stv680_stop_stream (struct usb_stv *stv680) 741static int stv680_stop_stream (struct usb_stv *stv680)
diff --git a/drivers/media/video/tda9887.c b/drivers/media/video/tda9887.c
index 87ffb0e84a7a..fde576f1101c 100644
--- a/drivers/media/video/tda9887.c
+++ b/drivers/media/video/tda9887.c
@@ -482,6 +482,12 @@ static int tda9887_set_config(struct tuner *t, char *buf)
482 buf[1] &= ~cQSS; 482 buf[1] &= ~cQSS;
483 if (t->tda9887_config & TDA9887_GATING_18) 483 if (t->tda9887_config & TDA9887_GATING_18)
484 buf[3] &= ~cGating_36; 484 buf[3] &= ~cGating_36;
485
486 if (t->tda9887_config & TDA9887_GAIN_NORMAL) {
487 radio_stereo.e &= ~cTunerGainLow;
488 radio_mono.e &= ~cTunerGainLow;
489 }
490
485 return 0; 491 return 0;
486} 492}
487 493
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 40590bae5ff7..705daaa2a4ff 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -443,6 +443,10 @@ static int tuner_attach(struct i2c_adapter *adap, int addr, int kind)
443 printk("%02x ",buffer[i]); 443 printk("%02x ",buffer[i]);
444 printk("\n"); 444 printk("\n");
445 } 445 }
446 /* HACK: This test were added to avoid tuner to probe tda9840 and tea6415c on the MXB card */
447 if (adap->id == I2C_HW_SAA7146 && addr < 0x4a)
448 return -ENODEV;
449
446 /* autodetection code based on the i2c addr */ 450 /* autodetection code based on the i2c addr */
447 if (!no_autodetect) { 451 if (!no_autodetect) {
448 switch (addr) { 452 switch (addr) {
diff --git a/drivers/media/video/tuner-simple.c b/drivers/media/video/tuner-simple.c
index 63db4e97ae6c..1b9b0742f753 100644
--- a/drivers/media/video/tuner-simple.c
+++ b/drivers/media/video/tuner-simple.c
@@ -108,6 +108,7 @@ static int tuner_stereo(struct i2c_client *c)
108 case TUNER_PHILIPS_FM1216ME_MK3: 108 case TUNER_PHILIPS_FM1216ME_MK3:
109 case TUNER_PHILIPS_FM1236_MK3: 109 case TUNER_PHILIPS_FM1236_MK3:
110 case TUNER_PHILIPS_FM1256_IH3: 110 case TUNER_PHILIPS_FM1256_IH3:
111 case TUNER_LG_NTSC_TAPE:
111 stereo = ((status & TUNER_SIGNAL) == TUNER_STEREO_MK3); 112 stereo = ((status & TUNER_SIGNAL) == TUNER_STEREO_MK3);
112 break; 113 break;
113 default: 114 default:
@@ -421,6 +422,7 @@ static void default_set_radio_freq(struct i2c_client *c, unsigned int freq)
421 case TUNER_PHILIPS_FM1216ME_MK3: 422 case TUNER_PHILIPS_FM1216ME_MK3:
422 case TUNER_PHILIPS_FM1236_MK3: 423 case TUNER_PHILIPS_FM1236_MK3:
423 case TUNER_PHILIPS_FMD1216ME_MK3: 424 case TUNER_PHILIPS_FMD1216ME_MK3:
425 case TUNER_LG_NTSC_TAPE:
424 buffer[3] = 0x19; 426 buffer[3] = 0x19;
425 break; 427 break;
426 case TUNER_TNF_5335MF: 428 case TUNER_TNF_5335MF:
@@ -465,6 +467,8 @@ static void default_set_radio_freq(struct i2c_client *c, unsigned int freq)
465 config |= TDA9887_INTERCARRIER; 467 config |= TDA9887_INTERCARRIER;
466/* if (params->port1_set_for_fm_mono) 468/* if (params->port1_set_for_fm_mono)
467 config &= ~TDA9887_PORT1_ACTIVE;*/ 469 config &= ~TDA9887_PORT1_ACTIVE;*/
470 if (params->fm_gain_normal)
471 config |= TDA9887_GAIN_NORMAL;
468 i2c_clients_command(c->adapter, TDA9887_SET_CONFIG, &config); 472 i2c_clients_command(c->adapter, TDA9887_SET_CONFIG, &config);
469 } 473 }
470 if (4 != (rc = i2c_master_send(c,buffer,4))) 474 if (4 != (rc = i2c_master_send(c,buffer,4)))
diff --git a/drivers/media/video/tuner-types.c b/drivers/media/video/tuner-types.c
index 781682373b61..74c3e6f96f1a 100644
--- a/drivers/media/video/tuner-types.c
+++ b/drivers/media/video/tuner-types.c
@@ -651,6 +651,7 @@ static struct tuner_params tuner_microtune_4049_fm5_params[] = {
651 .has_tda9887 = 1, 651 .has_tda9887 = 1,
652 .port1_invert_for_secam_lc = 1, 652 .port1_invert_for_secam_lc = 1,
653 .default_pll_gating_18 = 1, 653 .default_pll_gating_18 = 1,
654 .fm_gain_normal=1,
654 }, 655 },
655}; 656};
656 657
@@ -672,16 +673,6 @@ static struct tuner_params tuner_panasonic_vp27_params[] = {
672 }, 673 },
673}; 674};
674 675
675/* ------------ TUNER_LG_NTSC_TAPE - LGINNOTEK NTSC ------------ */
676
677static struct tuner_params tuner_lg_ntsc_tape_params[] = {
678 {
679 .type = TUNER_PARAM_TYPE_NTSC,
680 .ranges = tuner_fm1236_mk3_ntsc_ranges,
681 .count = ARRAY_SIZE(tuner_fm1236_mk3_ntsc_ranges),
682 },
683};
684
685/* ------------ TUNER_TNF_8831BGFF - Philips PAL ------------ */ 676/* ------------ TUNER_TNF_8831BGFF - Philips PAL ------------ */
686 677
687static struct tuner_range tuner_tnf_8831bgff_pal_ranges[] = { 678static struct tuner_range tuner_tnf_8831bgff_pal_ranges[] = {
@@ -1331,8 +1322,8 @@ struct tunertype tuners[] = {
1331 }, 1322 },
1332 [TUNER_LG_NTSC_TAPE] = { /* LGINNOTEK NTSC */ 1323 [TUNER_LG_NTSC_TAPE] = { /* LGINNOTEK NTSC */
1333 .name = "LG NTSC (TAPE series)", 1324 .name = "LG NTSC (TAPE series)",
1334 .params = tuner_lg_ntsc_tape_params, 1325 .params = tuner_fm1236_mk3_params,
1335 .count = ARRAY_SIZE(tuner_lg_ntsc_tape_params), 1326 .count = ARRAY_SIZE(tuner_fm1236_mk3_params),
1336 }, 1327 },
1337 [TUNER_TNF_8831BGFF] = { /* Philips PAL */ 1328 [TUNER_TNF_8831BGFF] = { /* Philips PAL */
1338 .name = "Tenna TNF 8831 BGFF)", 1329 .name = "Tenna TNF 8831 BGFF)",
diff --git a/drivers/media/video/tveeprom.c b/drivers/media/video/tveeprom.c
index 6b9ef731b83a..2624e3f7dd29 100644
--- a/drivers/media/video/tveeprom.c
+++ b/drivers/media/video/tveeprom.c
@@ -430,7 +430,7 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
430 tvee->has_radio = eeprom_data[i+len-1]; 430 tvee->has_radio = eeprom_data[i+len-1];
431 /* old style tag, don't know how to detect 431 /* old style tag, don't know how to detect
432 IR presence, mark as unknown. */ 432 IR presence, mark as unknown. */
433 tvee->has_ir = 2; 433 tvee->has_ir = -1;
434 tvee->model = 434 tvee->model =
435 eeprom_data[i+8] + 435 eeprom_data[i+8] +
436 (eeprom_data[i+9] << 8); 436 (eeprom_data[i+9] << 8);
@@ -653,13 +653,14 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
653 STRM(decoderIC, tvee->decoder_processor), 653 STRM(decoderIC, tvee->decoder_processor),
654 tvee->decoder_processor); 654 tvee->decoder_processor);
655 } 655 }
656 if (tvee->has_ir == 2) 656 if (tvee->has_ir == -1)
657 tveeprom_info("has %sradio\n", 657 tveeprom_info("has %sradio\n",
658 tvee->has_radio ? "" : "no "); 658 tvee->has_radio ? "" : "no ");
659 else 659 else
660 tveeprom_info("has %sradio, has %sIR remote\n", 660 tveeprom_info("has %sradio, has %sIR receiver, has %sIR transmitter\n",
661 tvee->has_radio ? "" : "no ", 661 tvee->has_radio ? "" : "no ",
662 tvee->has_ir ? "" : "no "); 662 (tvee->has_ir & 1) ? "" : "no ",
663 (tvee->has_ir & 2) ? "" : "no ");
663} 664}
664EXPORT_SYMBOL(tveeprom_hauppauge_analog); 665EXPORT_SYMBOL(tveeprom_hauppauge_analog);
665 666
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index bbf2beeeb449..ec0ff2247f06 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -86,6 +86,7 @@ MODULE_DEVICE_TABLE(usb, qcm_table);
86static void qcm_register_input(struct qcm *cam, struct usb_device *dev) 86static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
87{ 87{
88 struct input_dev *input_dev; 88 struct input_dev *input_dev;
89 int error;
89 90
90 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname)); 91 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
91 strncat(cam->input_physname, "/input0", sizeof(cam->input_physname)); 92 strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
@@ -106,7 +107,13 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
106 107
107 input_dev->private = cam; 108 input_dev->private = cam;
108 109
109 input_register_device(cam->input); 110 error = input_register_device(cam->input);
111 if (error) {
112 warn("Failed to register camera's input device, err: %d\n",
113 error);
114 input_free_device(cam->input);
115 cam->input = NULL;
116 }
110} 117}
111 118
112static void qcm_unregister_input(struct qcm *cam) 119static void qcm_unregister_input(struct qcm *cam)
diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
index d8b88024bc2f..b560c9d7c516 100644
--- a/drivers/media/video/usbvideo/usbvideo.c
+++ b/drivers/media/video/usbvideo/usbvideo.c
@@ -690,7 +690,7 @@ int usbvideo_register(
690 } 690 }
691 691
692 base_size = num_cams * sizeof(struct uvd) + sizeof(struct usbvideo); 692 base_size = num_cams * sizeof(struct uvd) + sizeof(struct usbvideo);
693 cams = (struct usbvideo *) kzalloc(base_size, GFP_KERNEL); 693 cams = kzalloc(base_size, GFP_KERNEL);
694 if (cams == NULL) { 694 if (cams == NULL) {
695 err("Failed to allocate %d. bytes for usbvideo struct", base_size); 695 err("Failed to allocate %d. bytes for usbvideo struct", base_size);
696 return -ENOMEM; 696 return -ENOMEM;
diff --git a/drivers/media/video/usbvision/Kconfig b/drivers/media/video/usbvision/Kconfig
new file mode 100644
index 000000000000..fc24ef05b3f3
--- /dev/null
+++ b/drivers/media/video/usbvision/Kconfig
@@ -0,0 +1,12 @@
1config VIDEO_USBVISION
2 tristate "USB video devices based on Nogatech NT1003/1004/1005"
3 depends on I2C && VIDEO_V4L2
4 select VIDEO_TUNER
5 select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO
6 ---help---
7 There are more than 50 different USB video devices based on
8 NT1003/1004/1005 USB Bridges. This driver enables using those
9 devices.
10
11 To compile this driver as a module, choose M here: the
12 module will be called usbvision.
diff --git a/drivers/media/video/usbvision/Makefile b/drivers/media/video/usbvision/Makefile
new file mode 100644
index 000000000000..9ac92a80c645
--- /dev/null
+++ b/drivers/media/video/usbvision/Makefile
@@ -0,0 +1,5 @@
1usbvision-objs := usbvision-core.o usbvision-video.o usbvision-i2c.o usbvision-cards.o
2
3obj-$(CONFIG_VIDEO_USBVISION) += usbvision.o
4
5EXTRA_CFLAGS += -Idrivers/media/video
diff --git a/drivers/media/video/usbvision/usbvision-cards.c b/drivers/media/video/usbvision/usbvision-cards.c
new file mode 100644
index 000000000000..134eb9865df6
--- /dev/null
+++ b/drivers/media/video/usbvision/usbvision-cards.c
@@ -0,0 +1,157 @@
1/*
2 * USBVISION.H
3 * usbvision header file
4 *
5 * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de>
6 *
7 * This module is part of usbvision driver project.
8 * Updates to driver completed by Dwaine P. Garden
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
26#include <linux/list.h>
27#include <linux/i2c.h>
28#include <media/v4l2-dev.h>
29#include <media/tuner.h>
30#include "usbvision.h"
31
32/* Supported Devices: A table for usbvision.c*/
33struct usbvision_device_data_st usbvision_device_data[] = {
34 {0xFFF0, 0xFFF0, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, -1, -1, "Custom Dummy USBVision Device"},
35 {0x0A6F, 0x0400, -1, CODEC_SAA7113, 4, V4L2_STD_NTSC, 1, 0, 1, 0, 0, -1, -1, -1, -1, -1, "Xanboo"},
36 {0x050D, 0x0208, -1, CODEC_SAA7113, 2, V4L2_STD_PAL, 1, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Belkin USBView II"},
37 {0x0571, 0x0002, 0, CODEC_SAA7111, 2, V4L2_STD_PAL, 0, 0, 1, 0, 0, -1, -1, -1, -1, 7, "echoFX InterView Lite"},
38 {0x0573, 0x0003, -1, CODEC_SAA7111, 2, V4L2_STD_NTSC, 1, 0, 1, 0, 0, -1, -1, -1, -1, -1, "USBGear USBG-V1 resp. HAMA USB"},
39 {0x0573, 0x0400, -1, CODEC_SAA7113, 4, V4L2_STD_NTSC, 0, 0, 1, 0, 0, -1, -1, 0, 3, 7, "D-Link V100"},
40 {0x0573, 0x2000, -1, CODEC_SAA7111, 2, V4L2_STD_NTSC, 1, 0, 1, 0, 0, -1, -1, -1, -1, -1, "X10 USB Camera"},
41 {0x0573, 0x2d00, -1, CODEC_SAA7111, 2, V4L2_STD_PAL, 1, 0, 1, 0, 0, -1, -1, -1, 3, 7, "Osprey 50"},
42 {0x0573, 0x2d01, -1, CODEC_SAA7113, 2, V4L2_STD_NTSC, 0, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Hauppauge USB-Live Model 600"},
43 {0x0573, 0x2101, -1, CODEC_SAA7113, 2, V4L2_STD_PAL, 2, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Zoran Co. PMD (Nogatech) AV-grabber Manhattan"},
44 {0x0573, 0x4100, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, 20, -1, "Nogatech USB-TV (NTSC) FM"},
45 {0x0573, 0x4110, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, 20, -1, "PNY USB-TV (NTSC) FM"},
46 {0x0573, 0x4450, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "PixelView PlayTv-USB PRO (PAL) FM"},
47 {0x0573, 0x4550, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "ZTV ZT-721 2.4GHz USB A/V Receiver"},
48 {0x0573, 0x4d00, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 0, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, 20, -1, "Hauppauge WinTv-USB USA"},
49 {0x0573, 0x4d01, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 0, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, -1, -1, "Hauppauge WinTv-USB"},
50 {0x0573, 0x4d02, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 0, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, -1, -1, "Hauppauge WinTv-USB (NTSC)"},
51 {0x0573, 0x4d03, -1, CODEC_SAA7111, 3, V4L2_STD_SECAM, 1, 0, 1, 1, TUNER_PHILIPS_SECAM, -1, -1, -1, -1, -1, "Hauppauge WinTv-USB (SECAM) "},
52 {0x0573, 0x4d10, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, -1, -1, "Hauppauge WinTv-USB (NTSC) FM"},
53 {0x0573, 0x4d11, -1, CODEC_SAA7111, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1, -1, -1, -1, "Hauppauge WinTv-USB (PAL) FM"},
54 {0x0573, 0x4d12, -1, CODEC_SAA7111, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1, -1, -1, -1, "Hauppauge WinTv-USB (PAL) FM"},
55 {0x0573, 0x4d2a, 0, CODEC_SAA7113, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_MICROTUNE_4049FM5, -1, -1, 0, 3, 7, "Hauppauge WinTv USB (NTSC) FM Model 602 40201 Rev B285"},
56 {0x0573, 0x4d2b, 0, CODEC_SAA7113, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_MICROTUNE_4049FM5, -1, -1, 0, 3, 7, "Hauppauge WinTv USB (NTSC) FM Model 602 40201 Rev B282"},
57 {0x0573, 0x4d2c, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 0, 1, 1, TUNER_PHILIPS_FM1216ME_MK3, -1, -1, 0, 3, 7, "Hauppauge WinTv USB (PAL/SECAM) 40209 Rev E1A5"},
58 {0x0573, 0x4d20, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB II (PAL) FM Model 40201 Rev B226"},
59 {0x0573, 0x4d21, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 0, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB II (PAL)"},
60 {0x0573, 0x4d22, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 0, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB II (PAL) MODEL 566"},
61 {0x0573, 0x4d23, -1, CODEC_SAA7113, 3, V4L2_STD_SECAM, 1, 0, 1, 1, TUNER_PHILIPS_SECAM, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB (SECAM) 4D23"},
62 {0x0573, 0x4d25, -1, CODEC_SAA7113, 3, V4L2_STD_SECAM, 1, 0, 1, 1, TUNER_PHILIPS_SECAM, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB (SECAM) Model 40209 Rev B234"},
63 {0x0573, 0x4d26, -1, CODEC_SAA7113, 3, V4L2_STD_SECAM, 1, 0, 1, 1, TUNER_PHILIPS_SECAM, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB (SECAM) Model 40209 Rev B243"},
64 {0x0573, 0x4d27, -1, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 0, 1, 1, TUNER_ALPS_TSBE1_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB Model 40204 Rev B281"},
65 {0x0573, 0x4d28, -1, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 0, 1, 1, TUNER_ALPS_TSBE1_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB Model 40204 Rev B283"},
66 {0x0573, 0x4d29, -1, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 0, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB Model 40205 Rev B298"},
67 {0x0573, 0x4d30, -1, CODEC_SAA7113, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB FM Model 40211 Rev B123"},
68 {0x0573, 0x4d31, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB III (PAL) FM Model 568"},
69 {0x0573, 0x4d32, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB III (PAL) FM Model 573"},
70 {0x0573, 0x4d35, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_MICROTUNE_4049FM5, -1, -1, 0, 3, 7, "Hauppauge WinTv-USB III (PAL) FM Model 40219 Rev B252"},
71 {0x0573, 0x4d37, 0, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_FM1216ME_MK3, -1, -1, 0, 3, 7, "Hauppauge WinTV USB device Model 40219 Rev E189"},
72 {0x0768, 0x0006, -1, CODEC_SAA7113, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, 5, 5, -1, "Camtel Technology USB TV Genie Pro FM Model TVB330"},
73 {0x07d0, 0x0001, -1, CODEC_SAA7113, 2, V4L2_STD_PAL, 0, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Digital Video Creator I"},
74 {0x07d0, 0x0002, -1, CODEC_SAA7111, 2, V4L2_STD_NTSC, 0, 0, 1, 0, 0, -1, -1, 82, 20, 7, "Global Village GV-007 (NTSC)"},
75 {0x07d0, 0x0003, 0, CODEC_SAA7113, 2, V4L2_STD_NTSC, 0, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Dazzle Fusion Model DVC-50 Rev 1 (NTSC)"},
76 {0x07d0, 0x0004, 0, CODEC_SAA7113, 2, V4L2_STD_PAL, 0, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Dazzle Fusion Model DVC-80 Rev 1 (PAL)"},
77 {0x07d0, 0x0005, 0, CODEC_SAA7113, 2, V4L2_STD_SECAM, 0, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Dazzle Fusion Model DVC-90 Rev 1 (SECAM)"},
78 {0x2304, 0x010d, -1, CODEC_SAA7111, 3, V4L2_STD_PAL, 1, 0, 0, 1, TUNER_TEMIC_4066FY5_PAL_I, -1, -1, -1, -1, -1, "Pinnacle Studio PCTV USB (PAL)"},
79 {0x2304, 0x0109, -1, CODEC_SAA7111, 3, V4L2_STD_SECAM, 1, 0, 1, 1, TUNER_PHILIPS_SECAM, -1, -1, -1, -1, -1, "Pinnacle Studio PCTV USB (SECAM)"},
80 {0x2304, 0x0110, -1, CODEC_SAA7111, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_PHILIPS_PAL, -1, -1,128, 23, -1, "Pinnacle Studio PCTV USB (PAL) FM"},
81 {0x2304, 0x0111, -1, CODEC_SAA7111, 3, V4L2_STD_PAL, 1, 0, 1, 1, TUNER_PHILIPS_PAL, -1, -1, -1, -1, -1, "Miro PCTV USB"},
82 {0x2304, 0x0112, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, -1, -1, "Pinnacle Studio PCTV USB (NTSC) FM"},
83 {0x2304, 0x0210, -1, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_TEMIC_4009FR5_PAL, -1, -1, 0, 3, 7, "Pinnacle Studio PCTV USB (PAL) FM"},
84 {0x2304, 0x0212, -1, CODEC_SAA7111, 3, V4L2_STD_NTSC, 1, 1, 1, 1, TUNER_TEMIC_4039FR5_NTSC, -1, -1, 0, 3, 7, "Pinnacle Studio PCTV USB (NTSC) FM"},
85 {0x2304, 0x0214, -1, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_TEMIC_4009FR5_PAL, -1, -1, 0, 3, 7, "Pinnacle Studio PCTV USB (PAL) FM"},
86 {0x2304, 0x0300, -1, CODEC_SAA7113, 2, V4L2_STD_NTSC, 1, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Pinnacle Studio Linx Video input cable (NTSC)"},
87 {0x2304, 0x0301, -1, CODEC_SAA7113, 2, V4L2_STD_PAL, 1, 0, 1, 0, 0, -1, -1, 0, 3, 7, "Pinnacle Studio Linx Video input cable (PAL)"},
88 {0x2304, 0x0419, -1, CODEC_SAA7113, 3, V4L2_STD_PAL, 1, 1, 1, 1, TUNER_TEMIC_4009FR5_PAL, -1, -1, 0, 3, 7, "Pinnacle PCTV Bungee USB (PAL) FM"},
89 {0x2400, 0x4200, -1, CODEC_SAA7111, 3, VIDEO_MODE_NTSC, 1, 0, 1, 1, TUNER_PHILIPS_NTSC_M, -1, -1, -1, -1, -1, "Hauppauge WinTv-USB"},
90 {} /* Terminating entry */
91};
92
93/* Supported Devices */
94
95struct usb_device_id usbvision_table [] = {
96 { USB_DEVICE(0xFFF0, 0xFFF0) }, /* Custom Dummy USBVision Device */
97 { USB_DEVICE(0x0A6F, 0x0400) }, /* Xanboo */
98 { USB_DEVICE(0x050d, 0x0208) }, /* Belkin USBView II */
99 { USB_DEVICE(0x0571, 0x0002) }, /* echoFX InterView Lite */
100 { USB_DEVICE(0x0573, 0x0003) }, /* USBGear USBG-V1 */
101 { USB_DEVICE(0x0573, 0x0400) }, /* D-Link V100 */
102 { USB_DEVICE(0x0573, 0x2000) }, /* X10 USB Camera */
103 { USB_DEVICE(0x0573, 0x2d00) }, /* Osprey 50 */
104 { USB_DEVICE(0x0573, 0x2d01) }, /* Hauppauge USB-Live Model 600 */
105 { USB_DEVICE(0x0573, 0x2101) }, /* Zoran Co. PMD (Nogatech) AV-grabber Manhattan */
106 { USB_DEVICE(0x0573, 0x4100) }, /* Nogatech USB-TV FM (NTSC) */
107 { USB_DEVICE(0x0573, 0x4110) }, /* PNY USB-TV (NTSC) FM */
108 { USB_DEVICE(0x0573, 0x4450) }, /* PixelView PlayTv-USB PRO (PAL) FM */
109 { USB_DEVICE(0x0573, 0x4550) }, /* ZTV ZT-721 2.4GHz USB A/V Receiver */
110 { USB_DEVICE(0x0573, 0x4d00) }, /* Hauppauge WinTv-USB USA */
111 { USB_DEVICE(0x0573, 0x4d01) }, /* Hauppauge WinTv-USB */
112 { USB_DEVICE(0x0573, 0x4d02) }, /* Hauppauge WinTv-USB UK */
113 { USB_DEVICE(0x0573, 0x4d03) }, /* Hauppauge WinTv-USB France */
114 { USB_DEVICE(0x0573, 0x4d10) }, /* Hauppauge WinTv-USB with FM USA radio */
115 { USB_DEVICE(0x0573, 0x4d11) }, /* Hauppauge WinTv-USB (PAL) with FM radio */
116 { USB_DEVICE(0x0573, 0x4d12) }, /* Hauppauge WinTv-USB UK with FM Radio */
117 { USB_DEVICE(0x0573, 0x4d2a) }, /* Hauppague WinTv USB Model 602 40201 Rev B285 */
118 { USB_DEVICE(0x0573, 0x4d2b) }, /* Hauppague WinTv USB Model 602 40201 Rev B282 */
119 { USB_DEVICE(0x0573, 0x4d2c) }, /* Hauppague WinTv USB Model 40209 Rev. E1A5 PAL*/
120 { USB_DEVICE(0x0573, 0x4d20) }, /* Hauppauge WinTv-USB II (PAL) FM Model 40201 Rev B226 */
121 { USB_DEVICE(0x0573, 0x4d21) }, /* Hauppauge WinTv-USB II (PAL) with FM radio*/
122 { USB_DEVICE(0x0573, 0x4d22) }, /* Hauppauge WinTv-USB II (PAL) Model 566 */
123 { USB_DEVICE(0x0573, 0x4d23) }, /* Hauppauge WinTv-USB France 4D23*/
124 { USB_DEVICE(0x0573, 0x4d25) }, /* Hauppauge WinTv-USB Model 40209 rev B234 */
125 { USB_DEVICE(0x0573, 0x4d26) }, /* Hauppauge WinTv-USB Model 40209 Rev B243 */
126 { USB_DEVICE(0x0573, 0x4d27) }, /* Hauppauge WinTv-USB Model 40204 Rev B281 */
127 { USB_DEVICE(0x0573, 0x4d28) }, /* Hauppauge WinTv-USB Model 40204 Rev B283 */
128 { USB_DEVICE(0x0573, 0x4d29) }, /* Hauppauge WinTv-USB Model 40205 Rev B298 */
129 { USB_DEVICE(0x0573, 0x4d30) }, /* Hauppauge WinTv-USB FM Model 40211 Rev B123 */
130 { USB_DEVICE(0x0573, 0x4d31) }, /* Hauppauge WinTv-USB III (PAL) with FM radio Model 568 */
131 { USB_DEVICE(0x0573, 0x4d32) }, /* Hauppauge WinTv-USB III (PAL) FM Model 573 */
132 { USB_DEVICE(0x0573, 0x4d35) }, /* Hauppauge WinTv-USB III (SECAM) FM Model 40219 Rev B252 */
133 { USB_DEVICE(0x0573, 0x4d37) }, /* Hauppauge WinTv-USB Model 40219 Rev E189 */
134 { USB_DEVICE(0x0768, 0x0006) }, /* Camtel Technology USB TV Genie Pro FM Model TVB330 */
135 { USB_DEVICE(0x07d0, 0x0001) }, /* Digital Video Creator I */
136 { USB_DEVICE(0x07d0, 0x0002) }, /* Global Village GV-007 (NTSC) */
137 { USB_DEVICE(0x07d0, 0x0003) }, /* Dazzle Fusion Model DVC-50 Rev 1 (NTSC) */
138 { USB_DEVICE(0x07d0, 0x0004) }, /* Dazzle Fusion Model DVC-80 Rev 1 (PAL) */
139 { USB_DEVICE(0x07d0, 0x0005) }, /* Dazzle Fusion Model DVC-90 Rev 1 (SECAM) */
140 { USB_DEVICE(0x2304, 0x010d) }, /* Pinnacle Studio PCTV USB (PAL) */
141 { USB_DEVICE(0x2304, 0x0109) }, /* Pinnacle Studio PCTV USB (SECAM) */
142 { USB_DEVICE(0x2304, 0x0110) }, /* Pinnacle Studio PCTV USB (PAL) */
143 { USB_DEVICE(0x2304, 0x0111) }, /* Miro PCTV USB */
144 { USB_DEVICE(0x2304, 0x0112) }, /* Pinnacle Studio PCTV USB (NTSC) with FM radio */
145 { USB_DEVICE(0x2304, 0x0210) }, /* Pinnacle Studio PCTV USB (PAL) with FM radio */
146 { USB_DEVICE(0x2304, 0x0212) }, /* Pinnacle Studio PCTV USB (NTSC) with FM radio */
147 { USB_DEVICE(0x2304, 0x0214) }, /* Pinnacle Studio PCTV USB (PAL) with FM radio */
148 { USB_DEVICE(0x2304, 0x0300) }, /* Pinnacle Studio Linx Video input cable (NTSC) */
149 { USB_DEVICE(0x2304, 0x0301) }, /* Pinnacle Studio Linx Video input cable (PAL) */
150 { USB_DEVICE(0x2304, 0x0419) }, /* Pinnacle PCTV Bungee USB (PAL) FM */
151
152 { USB_DEVICE(0x2400, 0x4200) }, /* Hauppauge WinTv-USB2 Model 42012 */
153
154 { } /* Terminating entry */
155};
156
157MODULE_DEVICE_TABLE (usb, usbvision_table);
diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
new file mode 100644
index 000000000000..797b97baf9ed
--- /dev/null
+++ b/drivers/media/video/usbvision/usbvision-core.c
@@ -0,0 +1,2554 @@
1/*
2 * usbvision-core.c - driver for NT100x USB video capture devices
3 *
4 *
5 * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de>
6 * Dwaine Garden <dwainegarden@rogers.com>
7 *
8 * This module is part of usbvision driver project.
9 * Updates to driver completed by Dwaine P. Garden
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/kernel.h>
27#include <linux/sched.h>
28#include <linux/list.h>
29#include <linux/timer.h>
30#include <linux/slab.h>
31#include <linux/mm.h>
32#include <linux/utsname.h>
33#include <linux/highmem.h>
34#include <linux/smp_lock.h>
35#include <linux/videodev.h>
36#include <linux/vmalloc.h>
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/spinlock.h>
40#include <asm/io.h>
41#include <linux/videodev2.h>
42#include <linux/video_decoder.h>
43#include <linux/i2c.h>
44
45#include <media/saa7115.h>
46#include <media/v4l2-common.h>
47#include <media/tuner.h>
48#include <media/audiochip.h>
49
50#include <linux/moduleparam.h>
51#include <linux/workqueue.h>
52
53#ifdef CONFIG_KMOD
54#include <linux/kmod.h>
55#endif
56
57#include "usbvision.h"
58
59static unsigned int core_debug = 0;
60module_param(core_debug,int,0644);
61MODULE_PARM_DESC(core_debug,"enable debug messages [core]");
62
63static unsigned int force_testpattern = 0;
64module_param(force_testpattern,int,0644);
65MODULE_PARM_DESC(force_testpattern,"enable test pattern display [core]");
66
67static int adjustCompression = 1; // Set the compression to be adaptive
68module_param(adjustCompression, int, 0444);
69MODULE_PARM_DESC(adjustCompression, " Set the ADPCM compression for the device. Default: 1 (On)");
70
71static int SwitchSVideoInput = 0; // To help people with Black and White output with using s-video input. Some cables and input device are wired differently.
72module_param(SwitchSVideoInput, int, 0444);
73MODULE_PARM_DESC(SwitchSVideoInput, " Set the S-Video input. Some cables and input device are wired differently. Default: 0 (Off)");
74
75#define ENABLE_HEXDUMP 0 /* Enable if you need it */
76
77
78#ifdef USBVISION_DEBUG
79 #define PDEBUG(level, fmt, args...) \
80 if (core_debug & (level)) info("[%s:%d] " fmt, __PRETTY_FUNCTION__, __LINE__ , ## args)
81#else
82 #define PDEBUG(level, fmt, args...) do {} while(0)
83#endif
84
85#define DBG_HEADER 1<<0
86#define DBG_IRQ 1<<1
87#define DBG_ISOC 1<<2
88#define DBG_PARSE 1<<3
89#define DBG_SCRATCH 1<<4
90#define DBG_FUNC 1<<5
91
92static const int max_imgwidth = MAX_FRAME_WIDTH;
93static const int max_imgheight = MAX_FRAME_HEIGHT;
94static const int min_imgwidth = MIN_FRAME_WIDTH;
95static const int min_imgheight = MIN_FRAME_HEIGHT;
96
97/* The value of 'scratch_buf_size' affects quality of the picture
98 * in many ways. Shorter buffers may cause loss of data when client
99 * is too slow. Larger buffers are memory-consuming and take longer
100 * to work with. This setting can be adjusted, but the default value
101 * should be OK for most desktop users.
102 */
103#define DEFAULT_SCRATCH_BUF_SIZE (0x20000) // 128kB memory scratch buffer
104static const int scratch_buf_size = DEFAULT_SCRATCH_BUF_SIZE;
105
106// Function prototypes
107static int usbvision_request_intra (struct usb_usbvision *usbvision);
108static int usbvision_unrequest_intra (struct usb_usbvision *usbvision);
109static int usbvision_adjust_compression (struct usb_usbvision *usbvision);
110static int usbvision_measure_bandwidth (struct usb_usbvision *usbvision);
111
112/*******************************/
113/* Memory management functions */
114/*******************************/
115
116/*
117 * Here we want the physical address of the memory.
118 * This is used when initializing the contents of the area.
119 */
120
121void *usbvision_rvmalloc(unsigned long size)
122{
123 void *mem;
124 unsigned long adr;
125
126 size = PAGE_ALIGN(size);
127 mem = vmalloc_32(size);
128 if (!mem)
129 return NULL;
130
131 memset(mem, 0, size); /* Clear the ram out, no junk to the user */
132 adr = (unsigned long) mem;
133 while (size > 0) {
134 SetPageReserved(vmalloc_to_page((void *)adr));
135 adr += PAGE_SIZE;
136 size -= PAGE_SIZE;
137 }
138
139 return mem;
140}
141
142void usbvision_rvfree(void *mem, unsigned long size)
143{
144 unsigned long adr;
145
146 if (!mem)
147 return;
148
149 size = PAGE_ALIGN(size);
150
151 adr = (unsigned long) mem;
152 while ((long) size > 0) {
153 ClearPageReserved(vmalloc_to_page((void *)adr));
154 adr += PAGE_SIZE;
155 size -= PAGE_SIZE;
156 }
157
158 vfree(mem);
159}
160
161
162
163#if ENABLE_HEXDUMP
164static void usbvision_hexdump(const unsigned char *data, int len)
165{
166 char tmp[80];
167 int i, k;
168
169 for (i = k = 0; len > 0; i++, len--) {
170 if (i > 0 && (i % 16 == 0)) {
171 printk("%s\n", tmp);
172 k = 0;
173 }
174 k += sprintf(&tmp[k], "%02x ", data[i]);
175 }
176 if (k > 0)
177 printk("%s\n", tmp);
178}
179#endif
180
181/********************************
182 * scratch ring buffer handling
183 ********************************/
184int scratch_len(struct usb_usbvision *usbvision) /*This returns the amount of data actually in the buffer */
185{
186 int len = usbvision->scratch_write_ptr - usbvision->scratch_read_ptr;
187 if (len < 0) {
188 len += scratch_buf_size;
189 }
190 PDEBUG(DBG_SCRATCH, "scratch_len() = %d\n", len);
191
192 return len;
193}
194
195
196/* This returns the free space left in the buffer */
197int scratch_free(struct usb_usbvision *usbvision)
198{
199 int free = usbvision->scratch_read_ptr - usbvision->scratch_write_ptr;
200 if (free <= 0) {
201 free += scratch_buf_size;
202 }
203 if (free) {
204 free -= 1; /* at least one byte in the buffer must */
205 /* left blank, otherwise there is no chance to differ between full and empty */
206 }
207 PDEBUG(DBG_SCRATCH, "return %d\n", free);
208
209 return free;
210}
211
212
213/* This puts data into the buffer */
214int scratch_put(struct usb_usbvision *usbvision, unsigned char *data, int len)
215{
216 int len_part;
217
218 if (usbvision->scratch_write_ptr + len < scratch_buf_size) {
219 memcpy(usbvision->scratch + usbvision->scratch_write_ptr, data, len);
220 usbvision->scratch_write_ptr += len;
221 }
222 else {
223 len_part = scratch_buf_size - usbvision->scratch_write_ptr;
224 memcpy(usbvision->scratch + usbvision->scratch_write_ptr, data, len_part);
225 if (len == len_part) {
226 usbvision->scratch_write_ptr = 0; /* just set write_ptr to zero */
227 }
228 else {
229 memcpy(usbvision->scratch, data + len_part, len - len_part);
230 usbvision->scratch_write_ptr = len - len_part;
231 }
232 }
233
234 PDEBUG(DBG_SCRATCH, "len=%d, new write_ptr=%d\n", len, usbvision->scratch_write_ptr);
235
236 return len;
237}
238
239/* This marks the write_ptr as position of new frame header */
240void scratch_mark_header(struct usb_usbvision *usbvision)
241{
242 PDEBUG(DBG_SCRATCH, "header at write_ptr=%d\n", usbvision->scratch_headermarker_write_ptr);
243
244 usbvision->scratch_headermarker[usbvision->scratch_headermarker_write_ptr] =
245 usbvision->scratch_write_ptr;
246 usbvision->scratch_headermarker_write_ptr += 1;
247 usbvision->scratch_headermarker_write_ptr %= USBVISION_NUM_HEADERMARKER;
248}
249
250/* This gets data from the buffer at the given "ptr" position */
251int scratch_get_extra(struct usb_usbvision *usbvision, unsigned char *data, int *ptr, int len)
252{
253 int len_part;
254 if (*ptr + len < scratch_buf_size) {
255 memcpy(data, usbvision->scratch + *ptr, len);
256 *ptr += len;
257 }
258 else {
259 len_part = scratch_buf_size - *ptr;
260 memcpy(data, usbvision->scratch + *ptr, len_part);
261 if (len == len_part) {
262 *ptr = 0; /* just set the y_ptr to zero */
263 }
264 else {
265 memcpy(data + len_part, usbvision->scratch, len - len_part);
266 *ptr = len - len_part;
267 }
268 }
269
270 PDEBUG(DBG_SCRATCH, "len=%d, new ptr=%d\n", len, *ptr);
271
272 return len;
273}
274
275
276/* This sets the scratch extra read pointer */
277void scratch_set_extra_ptr(struct usb_usbvision *usbvision, int *ptr, int len)
278{
279 *ptr = (usbvision->scratch_read_ptr + len)%scratch_buf_size;
280
281 PDEBUG(DBG_SCRATCH, "ptr=%d\n", *ptr);
282}
283
284
285/*This increments the scratch extra read pointer */
286void scratch_inc_extra_ptr(int *ptr, int len)
287{
288 *ptr = (*ptr + len) % scratch_buf_size;
289
290 PDEBUG(DBG_SCRATCH, "ptr=%d\n", *ptr);
291}
292
293
294/* This gets data from the buffer */
295int scratch_get(struct usb_usbvision *usbvision, unsigned char *data, int len)
296{
297 int len_part;
298 if (usbvision->scratch_read_ptr + len < scratch_buf_size) {
299 memcpy(data, usbvision->scratch + usbvision->scratch_read_ptr, len);
300 usbvision->scratch_read_ptr += len;
301 }
302 else {
303 len_part = scratch_buf_size - usbvision->scratch_read_ptr;
304 memcpy(data, usbvision->scratch + usbvision->scratch_read_ptr, len_part);
305 if (len == len_part) {
306 usbvision->scratch_read_ptr = 0; /* just set the read_ptr to zero */
307 }
308 else {
309 memcpy(data + len_part, usbvision->scratch, len - len_part);
310 usbvision->scratch_read_ptr = len - len_part;
311 }
312 }
313
314 PDEBUG(DBG_SCRATCH, "len=%d, new read_ptr=%d\n", len, usbvision->scratch_read_ptr);
315
316 return len;
317}
318
319
320/* This sets read pointer to next header and returns it */
321int scratch_get_header(struct usb_usbvision *usbvision,struct usbvision_frame_header *header)
322{
323 int errCode = 0;
324
325 PDEBUG(DBG_SCRATCH, "from read_ptr=%d", usbvision->scratch_headermarker_read_ptr);
326
327 while (usbvision->scratch_headermarker_write_ptr -
328 usbvision->scratch_headermarker_read_ptr != 0) {
329 usbvision->scratch_read_ptr =
330 usbvision->scratch_headermarker[usbvision->scratch_headermarker_read_ptr];
331 usbvision->scratch_headermarker_read_ptr += 1;
332 usbvision->scratch_headermarker_read_ptr %= USBVISION_NUM_HEADERMARKER;
333 scratch_get(usbvision, (unsigned char *)header, USBVISION_HEADER_LENGTH);
334 if ((header->magic_1 == USBVISION_MAGIC_1)
335 && (header->magic_2 == USBVISION_MAGIC_2)
336 && (header->headerLength == USBVISION_HEADER_LENGTH)) {
337 errCode = USBVISION_HEADER_LENGTH;
338 header->frameWidth = header->frameWidthLo + (header->frameWidthHi << 8);
339 header->frameHeight = header->frameHeightLo + (header->frameHeightHi << 8);
340 break;
341 }
342 }
343
344 return errCode;
345}
346
347
348/*This removes len bytes of old data from the buffer */
349void scratch_rm_old(struct usb_usbvision *usbvision, int len)
350{
351
352 usbvision->scratch_read_ptr += len;
353 usbvision->scratch_read_ptr %= scratch_buf_size;
354 PDEBUG(DBG_SCRATCH, "read_ptr is now %d\n", usbvision->scratch_read_ptr);
355}
356
357
358/*This resets the buffer - kills all data in it too */
359void scratch_reset(struct usb_usbvision *usbvision)
360{
361 PDEBUG(DBG_SCRATCH, "\n");
362
363 usbvision->scratch_read_ptr = 0;
364 usbvision->scratch_write_ptr = 0;
365 usbvision->scratch_headermarker_read_ptr = 0;
366 usbvision->scratch_headermarker_write_ptr = 0;
367 usbvision->isocstate = IsocState_NoFrame;
368}
369
370int usbvision_scratch_alloc(struct usb_usbvision *usbvision)
371{
372 usbvision->scratch = vmalloc(scratch_buf_size);
373 scratch_reset(usbvision);
374 if(usbvision->scratch == NULL) {
375 err("%s: unable to allocate %d bytes for scratch",
376 __FUNCTION__, scratch_buf_size);
377 return -ENOMEM;
378 }
379 return 0;
380}
381
382void usbvision_scratch_free(struct usb_usbvision *usbvision)
383{
384 if (usbvision->scratch != NULL) {
385 vfree(usbvision->scratch);
386 usbvision->scratch = NULL;
387 }
388}
389
390/*
391 * usbvision_testpattern()
392 *
393 * Procedure forms a test pattern (yellow grid on blue background).
394 *
395 * Parameters:
396 * fullframe: if TRUE then entire frame is filled, otherwise the procedure
397 * continues from the current scanline.
398 * pmode 0: fill the frame with solid blue color (like on VCR or TV)
399 * 1: Draw a colored grid
400 *
401 */
402void usbvision_testpattern(struct usb_usbvision *usbvision, int fullframe,
403 int pmode)
404{
405 static const char proc[] = "usbvision_testpattern";
406 struct usbvision_frame *frame;
407 unsigned char *f;
408 int num_cell = 0;
409 int scan_length = 0;
410 static int num_pass = 0;
411
412 if (usbvision == NULL) {
413 printk(KERN_ERR "%s: usbvision == NULL\n", proc);
414 return;
415 }
416 if (usbvision->curFrame == NULL) {
417 printk(KERN_ERR "%s: usbvision->curFrame is NULL.\n", proc);
418 return;
419 }
420
421 /* Grab the current frame */
422 frame = usbvision->curFrame;
423
424 /* Optionally start at the beginning */
425 if (fullframe) {
426 frame->curline = 0;
427 frame->scanlength = 0;
428 }
429
430 /* Form every scan line */
431 for (; frame->curline < frame->frmheight; frame->curline++) {
432 int i;
433
434 f = frame->data + (usbvision->curwidth * 3 * frame->curline);
435 for (i = 0; i < usbvision->curwidth; i++) {
436 unsigned char cb = 0x80;
437 unsigned char cg = 0;
438 unsigned char cr = 0;
439
440 if (pmode == 1) {
441 if (frame->curline % 32 == 0)
442 cb = 0, cg = cr = 0xFF;
443 else if (i % 32 == 0) {
444 if (frame->curline % 32 == 1)
445 num_cell++;
446 cb = 0, cg = cr = 0xFF;
447 } else {
448 cb =
449 ((num_cell * 7) +
450 num_pass) & 0xFF;
451 cg =
452 ((num_cell * 5) +
453 num_pass * 2) & 0xFF;
454 cr =
455 ((num_cell * 3) +
456 num_pass * 3) & 0xFF;
457 }
458 } else {
459 /* Just the blue screen */
460 }
461
462 *f++ = cb;
463 *f++ = cg;
464 *f++ = cr;
465 scan_length += 3;
466 }
467 }
468
469 frame->grabstate = FrameState_Done;
470 frame->scanlength += scan_length;
471 ++num_pass;
472
473}
474
475/*
476 * usbvision_decompress_alloc()
477 *
478 * allocates intermediate buffer for decompression
479 */
480int usbvision_decompress_alloc(struct usb_usbvision *usbvision)
481{
482 int IFB_size = MAX_FRAME_WIDTH * MAX_FRAME_HEIGHT * 3 / 2;
483 usbvision->IntraFrameBuffer = vmalloc(IFB_size);
484 if (usbvision->IntraFrameBuffer == NULL) {
485 err("%s: unable to allocate %d for compr. frame buffer", __FUNCTION__, IFB_size);
486 return -ENOMEM;
487 }
488 return 0;
489}
490
491/*
492 * usbvision_decompress_free()
493 *
494 * frees intermediate buffer for decompression
495 */
496void usbvision_decompress_free(struct usb_usbvision *usbvision)
497{
498 if (usbvision->IntraFrameBuffer != NULL) {
499 vfree(usbvision->IntraFrameBuffer);
500 usbvision->IntraFrameBuffer = NULL;
501 }
502}
503
504/************************************************************
505 * Here comes the data parsing stuff that is run as interrupt
506 ************************************************************/
507/*
508 * usbvision_find_header()
509 *
510 * Locate one of supported header markers in the scratch buffer.
511 */
512static enum ParseState usbvision_find_header(struct usb_usbvision *usbvision)
513{
514 struct usbvision_frame *frame;
515 int foundHeader = 0;
516
517 frame = usbvision->curFrame;
518
519 while (scratch_get_header(usbvision, &frame->isocHeader) == USBVISION_HEADER_LENGTH) {
520 // found header in scratch
521 PDEBUG(DBG_HEADER, "found header: 0x%02x%02x %d %d %d %d %#x 0x%02x %u %u",
522 frame->isocHeader.magic_2,
523 frame->isocHeader.magic_1,
524 frame->isocHeader.headerLength,
525 frame->isocHeader.frameNum,
526 frame->isocHeader.framePhase,
527 frame->isocHeader.frameLatency,
528 frame->isocHeader.dataFormat,
529 frame->isocHeader.formatParam,
530 frame->isocHeader.frameWidth,
531 frame->isocHeader.frameHeight);
532
533 if (usbvision->requestIntra) {
534 if (frame->isocHeader.formatParam & 0x80) {
535 foundHeader = 1;
536 usbvision->lastIsocFrameNum = -1; // do not check for lost frames this time
537 usbvision_unrequest_intra(usbvision);
538 break;
539 }
540 }
541 else {
542 foundHeader = 1;
543 break;
544 }
545 }
546
547 if (foundHeader) {
548 frame->frmwidth = frame->isocHeader.frameWidth * usbvision->stretch_width;
549 frame->frmheight = frame->isocHeader.frameHeight * usbvision->stretch_height;
550 frame->v4l2_linesize = (frame->frmwidth * frame->v4l2_format.depth)>> 3;
551 }
552 else { // no header found
553 PDEBUG(DBG_HEADER, "skipping scratch data, no header");
554 scratch_reset(usbvision);
555 return ParseState_EndParse;
556 }
557
558 // found header
559 if (frame->isocHeader.dataFormat==ISOC_MODE_COMPRESS) {
560 //check isocHeader.frameNum for lost frames
561 if (usbvision->lastIsocFrameNum >= 0) {
562 if (((usbvision->lastIsocFrameNum + 1) % 32) != frame->isocHeader.frameNum) {
563 // unexpected frame drop: need to request new intra frame
564 PDEBUG(DBG_HEADER, "Lost frame before %d on USB", frame->isocHeader.frameNum);
565 usbvision_request_intra(usbvision);
566 return ParseState_NextFrame;
567 }
568 }
569 usbvision->lastIsocFrameNum = frame->isocHeader.frameNum;
570 }
571 usbvision->header_count++;
572 frame->scanstate = ScanState_Lines;
573 frame->curline = 0;
574
575 if (force_testpattern) {
576 usbvision_testpattern(usbvision, 1, 1);
577 return ParseState_NextFrame;
578 }
579 return ParseState_Continue;
580}
581
582static enum ParseState usbvision_parse_lines_422(struct usb_usbvision *usbvision,
583 long *pcopylen)
584{
585 volatile struct usbvision_frame *frame;
586 unsigned char *f;
587 int len;
588 int i;
589 unsigned char yuyv[4]={180, 128, 10, 128}; // YUV components
590 unsigned char rv, gv, bv; // RGB components
591 int clipmask_index, bytes_per_pixel;
592 int stretch_bytes, clipmask_add;
593
594 frame = usbvision->curFrame;
595 f = frame->data + (frame->v4l2_linesize * frame->curline);
596
597 /* Make sure there's enough data for the entire line */
598 len = (frame->isocHeader.frameWidth * 2)+5;
599 if (scratch_len(usbvision) < len) {
600 PDEBUG(DBG_PARSE, "out of data in line %d, need %u.\n", frame->curline, len);
601 return ParseState_Out;
602 }
603
604 if ((frame->curline + 1) >= frame->frmheight) {
605 return ParseState_NextFrame;
606 }
607
608 bytes_per_pixel = frame->v4l2_format.bytes_per_pixel;
609 stretch_bytes = (usbvision->stretch_width - 1) * bytes_per_pixel;
610 clipmask_index = frame->curline * MAX_FRAME_WIDTH;
611 clipmask_add = usbvision->stretch_width;
612
613 for (i = 0; i < frame->frmwidth; i+=(2 * usbvision->stretch_width)) {
614
615 scratch_get(usbvision, &yuyv[0], 4);
616
617 if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) {
618 *f++ = yuyv[0]; // Y
619 *f++ = yuyv[3]; // U
620 }
621 else {
622
623 YUV_TO_RGB_BY_THE_BOOK(yuyv[0], yuyv[1], yuyv[3], rv, gv, bv);
624 switch (frame->v4l2_format.format) {
625 case V4L2_PIX_FMT_RGB565:
626 *f++ = (0x1F & (bv >> 3)) | (0xE0 & (gv << 3));
627 *f++ = (0x07 & (gv >> 5)) | (0xF8 & rv);
628 break;
629 case V4L2_PIX_FMT_RGB24:
630 *f++ = bv;
631 *f++ = gv;
632 *f++ = rv;
633 break;
634 case V4L2_PIX_FMT_RGB32:
635 *f++ = bv;
636 *f++ = gv;
637 *f++ = rv;
638 f++;
639 break;
640 case V4L2_PIX_FMT_RGB555:
641 *f++ = (0x1F & (bv >> 3)) | (0xE0 & (gv << 2));
642 *f++ = (0x03 & (gv >> 6)) | (0x7C & (rv >> 1));
643 break;
644 }
645 }
646 clipmask_index += clipmask_add;
647 f += stretch_bytes;
648
649 if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) {
650 *f++ = yuyv[2]; // Y
651 *f++ = yuyv[1]; // V
652 }
653 else {
654
655 YUV_TO_RGB_BY_THE_BOOK(yuyv[2], yuyv[1], yuyv[3], rv, gv, bv);
656 switch (frame->v4l2_format.format) {
657 case V4L2_PIX_FMT_RGB565:
658 *f++ = (0x1F & (bv >> 3)) | (0xE0 & (gv << 3));
659 *f++ = (0x07 & (gv >> 5)) | (0xF8 & rv);
660 break;
661 case V4L2_PIX_FMT_RGB24:
662 *f++ = bv;
663 *f++ = gv;
664 *f++ = rv;
665 break;
666 case V4L2_PIX_FMT_RGB32:
667 *f++ = bv;
668 *f++ = gv;
669 *f++ = rv;
670 f++;
671 break;
672 case V4L2_PIX_FMT_RGB555:
673 *f++ = (0x1F & (bv >> 3)) | (0xE0 & (gv << 2));
674 *f++ = (0x03 & (gv >> 6)) | (0x7C & (rv >> 1));
675 break;
676 }
677 }
678 clipmask_index += clipmask_add;
679 f += stretch_bytes;
680 }
681
682 frame->curline += usbvision->stretch_height;
683 *pcopylen += frame->v4l2_linesize * usbvision->stretch_height;
684
685 if (frame->curline >= frame->frmheight) {
686 return ParseState_NextFrame;
687 }
688 else {
689 return ParseState_Continue;
690 }
691}
692
693/* The decompression routine */
694static int usbvision_decompress(struct usb_usbvision *usbvision,unsigned char *Compressed,
695 unsigned char *Decompressed, int *StartPos,
696 int *BlockTypeStartPos, int Len)
697{
698 int RestPixel, Idx, MaxPos, Pos, ExtraPos, BlockLen, BlockTypePos, BlockTypeLen;
699 unsigned char BlockByte, BlockCode, BlockType, BlockTypeByte, Integrator;
700
701 Integrator = 0;
702 Pos = *StartPos;
703 BlockTypePos = *BlockTypeStartPos;
704 MaxPos = 396; //Pos + Len;
705 ExtraPos = Pos;
706 BlockLen = 0;
707 BlockByte = 0;
708 BlockCode = 0;
709 BlockType = 0;
710 BlockTypeByte = 0;
711 BlockTypeLen = 0;
712 RestPixel = Len;
713
714 for (Idx = 0; Idx < Len; Idx++) {
715
716 if (BlockLen == 0) {
717 if (BlockTypeLen==0) {
718 BlockTypeByte = Compressed[BlockTypePos];
719 BlockTypePos++;
720 BlockTypeLen = 4;
721 }
722 BlockType = (BlockTypeByte & 0xC0) >> 6;
723
724 //statistic:
725 usbvision->ComprBlockTypes[BlockType]++;
726
727 Pos = ExtraPos;
728 if (BlockType == 0) {
729 if(RestPixel >= 24) {
730 Idx += 23;
731 RestPixel -= 24;
732 Integrator = Decompressed[Idx];
733 } else {
734 Idx += RestPixel - 1;
735 RestPixel = 0;
736 }
737 } else {
738 BlockCode = Compressed[Pos];
739 Pos++;
740 if (RestPixel >= 24) {
741 BlockLen = 24;
742 } else {
743 BlockLen = RestPixel;
744 }
745 RestPixel -= BlockLen;
746 ExtraPos = Pos + (BlockLen / 4);
747 }
748 BlockTypeByte <<= 2;
749 BlockTypeLen -= 1;
750 }
751 if (BlockLen > 0) {
752 if ((BlockLen%4) == 0) {
753 BlockByte = Compressed[Pos];
754 Pos++;
755 }
756 if (BlockType == 1) { //inter Block
757 Integrator = Decompressed[Idx];
758 }
759 switch (BlockByte & 0xC0) {
760 case 0x03<<6:
761 Integrator += Compressed[ExtraPos];
762 ExtraPos++;
763 break;
764 case 0x02<<6:
765 Integrator += BlockCode;
766 break;
767 case 0x00:
768 Integrator -= BlockCode;
769 break;
770 }
771 Decompressed[Idx] = Integrator;
772 BlockByte <<= 2;
773 BlockLen -= 1;
774 }
775 }
776 *StartPos = ExtraPos;
777 *BlockTypeStartPos = BlockTypePos;
778 return Idx;
779}
780
781
782/*
783 * usbvision_parse_compress()
784 *
785 * Parse compressed frame from the scratch buffer, put
786 * decoded RGB value into the current frame buffer and add the written
787 * number of bytes (RGB) to the *pcopylen.
788 *
789 */
790static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
791 long *pcopylen)
792{
793#define USBVISION_STRIP_MAGIC 0x5A
794#define USBVISION_STRIP_LEN_MAX 400
795#define USBVISION_STRIP_HEADER_LEN 3
796
797 struct usbvision_frame *frame;
798 unsigned char *f,*u = NULL ,*v = NULL;
799 unsigned char StripData[USBVISION_STRIP_LEN_MAX];
800 unsigned char StripHeader[USBVISION_STRIP_HEADER_LEN];
801 int Idx, IdxEnd, StripLen, StripPtr, StartBlockPos, BlockPos, BlockTypePos;
802 int clipmask_index, bytes_per_pixel, rc;
803 int imageSize;
804 unsigned char rv, gv, bv;
805 static unsigned char *Y, *U, *V;
806
807 frame = usbvision->curFrame;
808 imageSize = frame->frmwidth * frame->frmheight;
809 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
810 (frame->v4l2_format.format == V4L2_PIX_FMT_YVU420) ) { // this is a planar format
811 //... v4l2_linesize not used here.
812 f = frame->data + (frame->width * frame->curline);
813 } else
814 f = frame->data + (frame->v4l2_linesize * frame->curline);
815
816 if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV){ //initialise u and v pointers
817 // get base of u and b planes add halfoffset
818
819 u = frame->data
820 + imageSize
821 + (frame->frmwidth >>1) * frame->curline ;
822 v = u + (imageSize >>1 );
823
824 } else if (frame->v4l2_format.format == V4L2_PIX_FMT_YVU420){
825
826 v = frame->data + imageSize + ((frame->curline* (frame->width))>>2) ;
827 u = v + (imageSize >>2) ;
828 }
829
830 if (frame->curline == 0) {
831 usbvision_adjust_compression(usbvision);
832 }
833
834 if (scratch_len(usbvision) < USBVISION_STRIP_HEADER_LEN) {
835 return ParseState_Out;
836 }
837
838 //get strip header without changing the scratch_read_ptr
839 scratch_set_extra_ptr(usbvision, &StripPtr, 0);
840 scratch_get_extra(usbvision, &StripHeader[0], &StripPtr,
841 USBVISION_STRIP_HEADER_LEN);
842
843 if (StripHeader[0] != USBVISION_STRIP_MAGIC) {
844 // wrong strip magic
845 usbvision->stripMagicErrors++;
846 return ParseState_NextFrame;
847 }
848
849 if (frame->curline != (int)StripHeader[2]) {
850 //line number missmatch error
851 usbvision->stripLineNumberErrors++;
852 }
853
854 StripLen = 2 * (unsigned int)StripHeader[1];
855 if (StripLen > USBVISION_STRIP_LEN_MAX) {
856 // strip overrun
857 // I think this never happens
858 usbvision_request_intra(usbvision);
859 }
860
861 if (scratch_len(usbvision) < StripLen) {
862 //there is not enough data for the strip
863 return ParseState_Out;
864 }
865
866 if (usbvision->IntraFrameBuffer) {
867 Y = usbvision->IntraFrameBuffer + frame->frmwidth * frame->curline;
868 U = usbvision->IntraFrameBuffer + imageSize + (frame->frmwidth / 2) * (frame->curline / 2);
869 V = usbvision->IntraFrameBuffer + imageSize / 4 * 5 + (frame->frmwidth / 2) * (frame->curline / 2);
870 }
871 else {
872 return ParseState_NextFrame;
873 }
874
875 bytes_per_pixel = frame->v4l2_format.bytes_per_pixel;
876 clipmask_index = frame->curline * MAX_FRAME_WIDTH;
877
878 scratch_get(usbvision, StripData, StripLen);
879
880 IdxEnd = frame->frmwidth;
881 BlockTypePos = USBVISION_STRIP_HEADER_LEN;
882 StartBlockPos = BlockTypePos + (IdxEnd - 1) / 96 + (IdxEnd / 2 - 1) / 96 + 2;
883 BlockPos = StartBlockPos;
884
885 usbvision->BlockPos = BlockPos;
886
887 if ((rc = usbvision_decompress(usbvision, StripData, Y, &BlockPos, &BlockTypePos, IdxEnd)) != IdxEnd) {
888 //return ParseState_Continue;
889 }
890 if (StripLen > usbvision->maxStripLen) {
891 usbvision->maxStripLen = StripLen;
892 }
893
894 if (frame->curline%2) {
895 if ((rc = usbvision_decompress(usbvision, StripData, V, &BlockPos, &BlockTypePos, IdxEnd/2)) != IdxEnd/2) {
896 //return ParseState_Continue;
897 }
898 }
899 else {
900 if ((rc = usbvision_decompress(usbvision, StripData, U, &BlockPos, &BlockTypePos, IdxEnd/2)) != IdxEnd/2) {
901 //return ParseState_Continue;
902 }
903 }
904
905 if (BlockPos > usbvision->comprBlockPos) {
906 usbvision->comprBlockPos = BlockPos;
907 }
908 if (BlockPos > StripLen) {
909 usbvision->stripLenErrors++;
910 }
911
912 for (Idx = 0; Idx < IdxEnd; Idx++) {
913 if(frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) {
914 *f++ = Y[Idx];
915 *f++ = Idx & 0x01 ? U[Idx/2] : V[Idx/2];
916 }
917 else if(frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) {
918 *f++ = Y[Idx];
919 if ( Idx & 0x01)
920 *u++ = U[Idx>>1] ;
921 else
922 *v++ = V[Idx>>1];
923 }
924 else if (frame->v4l2_format.format == V4L2_PIX_FMT_YVU420) {
925 *f++ = Y [Idx];
926 if ( !(( Idx & 0x01 ) | ( frame->curline & 0x01 )) ){
927
928/* only need do this for 1 in 4 pixels */
929/* intraframe buffer is YUV420 format */
930
931 *u++ = U[Idx >>1];
932 *v++ = V[Idx >>1];
933 }
934
935 }
936 else {
937 YUV_TO_RGB_BY_THE_BOOK(Y[Idx], U[Idx/2], V[Idx/2], rv, gv, bv);
938 switch (frame->v4l2_format.format) {
939 case V4L2_PIX_FMT_GREY:
940 *f++ = Y[Idx];
941 break;
942 case V4L2_PIX_FMT_RGB555:
943 *f++ = (0x1F & (bv >> 3)) | (0xE0 & (gv << 2));
944 *f++ = (0x03 & (gv >> 6)) | (0x7C & (rv >> 1));
945 break;
946 case V4L2_PIX_FMT_RGB565:
947 *f++ = (0x1F & (bv >> 3)) | (0xE0 & (gv << 3));
948 *f++ = (0x07 & (gv >> 5)) | (0xF8 & rv);
949 break;
950 case V4L2_PIX_FMT_RGB24:
951 *f++ = bv;
952 *f++ = gv;
953 *f++ = rv;
954 break;
955 case V4L2_PIX_FMT_RGB32:
956 *f++ = bv;
957 *f++ = gv;
958 *f++ = rv;
959 f++;
960 break;
961 }
962 }
963 clipmask_index++;
964 }
965 /* Deal with non-integer no. of bytes for YUV420P */
966 if (frame->v4l2_format.format != V4L2_PIX_FMT_YVU420 )
967 *pcopylen += frame->v4l2_linesize;
968 else
969 *pcopylen += frame->curline & 0x01 ? frame->v4l2_linesize : frame->v4l2_linesize << 1;
970
971 frame->curline += 1;
972
973 if (frame->curline >= frame->frmheight) {
974 return ParseState_NextFrame;
975 }
976 else {
977 return ParseState_Continue;
978 }
979
980}
981
982
983/*
984 * usbvision_parse_lines_420()
985 *
986 * Parse two lines from the scratch buffer, put
987 * decoded RGB value into the current frame buffer and add the written
988 * number of bytes (RGB) to the *pcopylen.
989 *
990 */
991static enum ParseState usbvision_parse_lines_420(struct usb_usbvision *usbvision,
992 long *pcopylen)
993{
994 struct usbvision_frame *frame;
995 unsigned char *f_even = NULL, *f_odd = NULL;
996 unsigned int pixel_per_line, block;
997 int pixel, block_split;
998 int y_ptr, u_ptr, v_ptr, y_odd_offset;
999 const int y_block_size = 128;
1000 const int uv_block_size = 64;
1001 const int sub_block_size = 32;
1002 const int y_step[] = { 0, 0, 0, 2 }, y_step_size = 4;
1003 const int uv_step[]= { 0, 0, 0, 4 }, uv_step_size = 4;
1004 unsigned char y[2], u, v; /* YUV components */
1005 int y_, u_, v_, vb, uvg, ur;
1006 int r_, g_, b_; /* RGB components */
1007 unsigned char g;
1008 int clipmask_even_index, clipmask_odd_index, bytes_per_pixel;
1009 int clipmask_add, stretch_bytes;
1010
1011 frame = usbvision->curFrame;
1012 f_even = frame->data + (frame->v4l2_linesize * frame->curline);
1013 f_odd = f_even + frame->v4l2_linesize * usbvision->stretch_height;
1014
1015 /* Make sure there's enough data for the entire line */
1016 /* In this mode usbvision transfer 3 bytes for every 2 pixels */
1017 /* I need two lines to decode the color */
1018 bytes_per_pixel = frame->v4l2_format.bytes_per_pixel;
1019 stretch_bytes = (usbvision->stretch_width - 1) * bytes_per_pixel;
1020 clipmask_even_index = frame->curline * MAX_FRAME_WIDTH;
1021 clipmask_odd_index = clipmask_even_index + MAX_FRAME_WIDTH;
1022 clipmask_add = usbvision->stretch_width;
1023 pixel_per_line = frame->isocHeader.frameWidth;
1024
1025 if (scratch_len(usbvision) < (int)pixel_per_line * 3) {
1026 //printk(KERN_DEBUG "out of data, need %d\n", len);
1027 return ParseState_Out;
1028 }
1029
1030 if ((frame->curline + 1) >= frame->frmheight) {
1031 return ParseState_NextFrame;
1032 }
1033
1034 block_split = (pixel_per_line%y_block_size) ? 1 : 0; //are some blocks splitted into different lines?
1035
1036 y_odd_offset = (pixel_per_line / y_block_size) * (y_block_size + uv_block_size)
1037 + block_split * uv_block_size;
1038
1039 scratch_set_extra_ptr(usbvision, &y_ptr, y_odd_offset);
1040 scratch_set_extra_ptr(usbvision, &u_ptr, y_block_size);
1041 scratch_set_extra_ptr(usbvision, &v_ptr, y_odd_offset
1042 + (4 - block_split) * sub_block_size);
1043
1044 for (block = 0; block < (pixel_per_line / sub_block_size);
1045 block++) {
1046
1047
1048 for (pixel = 0; pixel < sub_block_size; pixel +=2) {
1049 scratch_get(usbvision, &y[0], 2);
1050 scratch_get_extra(usbvision, &u, &u_ptr, 1);
1051 scratch_get_extra(usbvision, &v, &v_ptr, 1);
1052
1053 //I don't use the YUV_TO_RGB macro for better performance
1054 v_ = v - 128;
1055 u_ = u - 128;
1056 vb = 132252 * v_;
1057 uvg= -53281 * u_ - 25625 * v_;
1058 ur = 104595 * u_;
1059
1060 if(frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) {
1061 *f_even++ = y[0];
1062 *f_even++ = v;
1063 }
1064 else {
1065 y_ = 76284 * (y[0] - 16);
1066
1067 b_ = (y_ + vb) >> 16;
1068 g_ = (y_ + uvg)>> 16;
1069 r_ = (y_ + ur) >> 16;
1070
1071 switch (frame->v4l2_format.format) {
1072 case V4L2_PIX_FMT_RGB565:
1073 g = LIMIT_RGB(g_);
1074 *f_even++ = (0x1F & (LIMIT_RGB(b_) >> 3)) | (0xE0 & (g << 3));
1075 *f_even++ = (0x07 & ( g >> 5)) | (0xF8 & LIMIT_RGB(r_));
1076 break;
1077 case V4L2_PIX_FMT_RGB24:
1078 *f_even++ = LIMIT_RGB(b_);
1079 *f_even++ = LIMIT_RGB(g_);
1080 *f_even++ = LIMIT_RGB(r_);
1081 break;
1082 case V4L2_PIX_FMT_RGB32:
1083 *f_even++ = LIMIT_RGB(b_);
1084 *f_even++ = LIMIT_RGB(g_);
1085 *f_even++ = LIMIT_RGB(r_);
1086 f_even++;
1087 break;
1088 case V4L2_PIX_FMT_RGB555:
1089 g = LIMIT_RGB(g_);
1090 *f_even++ = (0x1F & (LIMIT_RGB(b_) >> 3)) | (0xE0 & (g << 2));
1091 *f_even++ = (0x03 & ( g >> 6)) |
1092 (0x7C & (LIMIT_RGB(r_) >> 1));
1093 break;
1094 }
1095 }
1096 clipmask_even_index += clipmask_add;
1097 f_even += stretch_bytes;
1098
1099 if(frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) {
1100 *f_even++ = y[1];
1101 *f_even++ = u;
1102 }
1103 else {
1104 y_ = 76284 * (y[1] - 16);
1105
1106 b_ = (y_ + vb) >> 16;
1107 g_ = (y_ + uvg)>> 16;
1108 r_ = (y_ + ur) >> 16;
1109
1110 switch (frame->v4l2_format.format) {
1111 case V4L2_PIX_FMT_RGB565:
1112 g = LIMIT_RGB(g_);
1113 *f_even++ = (0x1F & (LIMIT_RGB(b_) >> 3)) | (0xE0 & (g << 3));
1114 *f_even++ = (0x07 & ( g >> 5)) | (0xF8 & LIMIT_RGB(r_));
1115 break;
1116 case V4L2_PIX_FMT_RGB24:
1117 *f_even++ = LIMIT_RGB(b_);
1118 *f_even++ = LIMIT_RGB(g_);
1119 *f_even++ = LIMIT_RGB(r_);
1120 break;
1121 case V4L2_PIX_FMT_RGB32:
1122 *f_even++ = LIMIT_RGB(b_);
1123 *f_even++ = LIMIT_RGB(g_);
1124 *f_even++ = LIMIT_RGB(r_);
1125 f_even++;
1126 break;
1127 case V4L2_PIX_FMT_RGB555:
1128 g = LIMIT_RGB(g_);
1129 *f_even++ = (0x1F & (LIMIT_RGB(b_) >> 3)) | (0xE0 & (g << 2));
1130 *f_even++ = (0x03 & ( g >> 6)) |
1131 (0x7C & (LIMIT_RGB(r_) >> 1));
1132 break;
1133 }
1134 }
1135 clipmask_even_index += clipmask_add;
1136 f_even += stretch_bytes;
1137
1138 scratch_get_extra(usbvision, &y[0], &y_ptr, 2);
1139
1140 if(frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) {
1141 *f_odd++ = y[0];
1142 *f_odd++ = v;
1143 }
1144 else {
1145 y_ = 76284 * (y[0] - 16);
1146
1147 b_ = (y_ + vb) >> 16;
1148 g_ = (y_ + uvg)>> 16;
1149 r_ = (y_ + ur) >> 16;
1150
1151 switch (frame->v4l2_format.format) {
1152 case V4L2_PIX_FMT_RGB565:
1153 g = LIMIT_RGB(g_);
1154 *f_odd++ = (0x1F & (LIMIT_RGB(b_) >> 3)) | (0xE0 & (g << 3));
1155 *f_odd++ = (0x07 & ( g >> 5)) | (0xF8 & LIMIT_RGB(r_));
1156 break;
1157 case V4L2_PIX_FMT_RGB24:
1158 *f_odd++ = LIMIT_RGB(b_);
1159 *f_odd++ = LIMIT_RGB(g_);
1160 *f_odd++ = LIMIT_RGB(r_);
1161 break;
1162 case V4L2_PIX_FMT_RGB32:
1163 *f_odd++ = LIMIT_RGB(b_);
1164 *f_odd++ = LIMIT_RGB(g_);
1165 *f_odd++ = LIMIT_RGB(r_);
1166 f_odd++;
1167 break;
1168 case V4L2_PIX_FMT_RGB555:
1169 g = LIMIT_RGB(g_);
1170 *f_odd++ = (0x1F & (LIMIT_RGB(b_) >> 3)) | (0xE0 & (g << 2));
1171 *f_odd++ = (0x03 & ( g >> 6)) |
1172 (0x7C & (LIMIT_RGB(r_) >> 1));
1173 break;
1174 }
1175 }
1176 clipmask_odd_index += clipmask_add;
1177 f_odd += stretch_bytes;
1178
1179 if(frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) {
1180 *f_odd++ = y[1];
1181 *f_odd++ = u;
1182 }
1183 else {
1184 y_ = 76284 * (y[1] - 16);
1185
1186 b_ = (y_ + vb) >> 16;
1187 g_ = (y_ + uvg)>> 16;
1188 r_ = (y_ + ur) >> 16;
1189
1190 switch (frame->v4l2_format.format) {
1191 case V4L2_PIX_FMT_RGB565:
1192 g = LIMIT_RGB(g_);
1193 *f_odd++ = (0x1F & (LIMIT_RGB(b_) >> 3)) | (0xE0 & (g << 3));
1194 *f_odd++ = (0x07 & ( g >> 5)) | (0xF8 & LIMIT_RGB(r_));
1195 break;
1196 case V4L2_PIX_FMT_RGB24:
1197 *f_odd++ = LIMIT_RGB(b_);
1198 *f_odd++ = LIMIT_RGB(g_);
1199 *f_odd++ = LIMIT_RGB(r_);
1200 break;
1201 case V4L2_PIX_FMT_RGB32:
1202 *f_odd++ = LIMIT_RGB(b_);
1203 *f_odd++ = LIMIT_RGB(g_);
1204 *f_odd++ = LIMIT_RGB(r_);
1205 f_odd++;
1206 break;
1207 case V4L2_PIX_FMT_RGB555:
1208 g = LIMIT_RGB(g_);
1209 *f_odd++ = (0x1F & (LIMIT_RGB(b_) >> 3)) | (0xE0 & (g << 2));
1210 *f_odd++ = (0x03 & ( g >> 6)) |
1211 (0x7C & (LIMIT_RGB(r_) >> 1));
1212 break;
1213 }
1214 }
1215 clipmask_odd_index += clipmask_add;
1216 f_odd += stretch_bytes;
1217 }
1218
1219 scratch_rm_old(usbvision,y_step[block % y_step_size] * sub_block_size);
1220 scratch_inc_extra_ptr(&y_ptr, y_step[(block + 2 * block_split) % y_step_size]
1221 * sub_block_size);
1222 scratch_inc_extra_ptr(&u_ptr, uv_step[block % uv_step_size]
1223 * sub_block_size);
1224 scratch_inc_extra_ptr(&v_ptr, uv_step[(block + 2 * block_split) % uv_step_size]
1225 * sub_block_size);
1226 }
1227
1228 scratch_rm_old(usbvision, pixel_per_line * 3 / 2
1229 + block_split * sub_block_size);
1230
1231 frame->curline += 2 * usbvision->stretch_height;
1232 *pcopylen += frame->v4l2_linesize * 2 * usbvision->stretch_height;
1233
1234 if (frame->curline >= frame->frmheight)
1235 return ParseState_NextFrame;
1236 else
1237 return ParseState_Continue;
1238}
1239
1240/*
1241 * usbvision_parse_data()
1242 *
1243 * Generic routine to parse the scratch buffer. It employs either
1244 * usbvision_find_header() or usbvision_parse_lines() to do most
1245 * of work.
1246 *
1247 */
1248static void usbvision_parse_data(struct usb_usbvision *usbvision)
1249{
1250 struct usbvision_frame *frame;
1251 enum ParseState newstate;
1252 long copylen = 0;
1253 unsigned long lock_flags;
1254
1255 frame = usbvision->curFrame;
1256
1257 PDEBUG(DBG_PARSE, "parsing len=%d\n", scratch_len(usbvision));
1258
1259 while (1) {
1260
1261 newstate = ParseState_Out;
1262 if (scratch_len(usbvision)) {
1263 if (frame->scanstate == ScanState_Scanning) {
1264 newstate = usbvision_find_header(usbvision);
1265 }
1266 else if (frame->scanstate == ScanState_Lines) {
1267 if (usbvision->isocMode == ISOC_MODE_YUV420) {
1268 newstate = usbvision_parse_lines_420(usbvision, &copylen);
1269 }
1270 else if (usbvision->isocMode == ISOC_MODE_YUV422) {
1271 newstate = usbvision_parse_lines_422(usbvision, &copylen);
1272 }
1273 else if (usbvision->isocMode == ISOC_MODE_COMPRESS) {
1274 newstate = usbvision_parse_compress(usbvision, &copylen);
1275 }
1276
1277 }
1278 }
1279 if (newstate == ParseState_Continue) {
1280 continue;
1281 }
1282 else if ((newstate == ParseState_NextFrame) || (newstate == ParseState_Out)) {
1283 break;
1284 }
1285 else {
1286 return; /* ParseState_EndParse */
1287 }
1288 }
1289
1290 if (newstate == ParseState_NextFrame) {
1291 frame->grabstate = FrameState_Done;
1292 do_gettimeofday(&(frame->timestamp));
1293 frame->sequence = usbvision->frame_num;
1294
1295 spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
1296 list_move_tail(&(frame->frame), &usbvision->outqueue);
1297 usbvision->curFrame = NULL;
1298 spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
1299
1300 usbvision->frame_num++;
1301
1302 /* This will cause the process to request another frame. */
1303 if (waitqueue_active(&usbvision->wait_frame)) {
1304 PDEBUG(DBG_PARSE, "Wake up !");
1305 wake_up_interruptible(&usbvision->wait_frame);
1306 }
1307 }
1308 else
1309 frame->grabstate = FrameState_Grabbing;
1310
1311
1312 /* Update the frame's uncompressed length. */
1313 frame->scanlength += copylen;
1314}
1315
1316
1317/*
1318 * Make all of the blocks of data contiguous
1319 */
1320static int usbvision_compress_isochronous(struct usb_usbvision *usbvision,
1321 struct urb *urb)
1322{
1323 unsigned char *packet_data;
1324 int i, totlen = 0;
1325
1326 for (i = 0; i < urb->number_of_packets; i++) {
1327 int packet_len = urb->iso_frame_desc[i].actual_length;
1328 int packet_stat = urb->iso_frame_desc[i].status;
1329
1330 packet_data = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
1331
1332 /* Detect and ignore errored packets */
1333 if (packet_stat) { // packet_stat != 0 ?????????????
1334 PDEBUG(DBG_ISOC, "data error: [%d] len=%d, status=%X", i, packet_len, packet_stat);
1335 usbvision->isocErrCount++;
1336 continue;
1337 }
1338
1339 /* Detect and ignore empty packets */
1340 if (packet_len < 0) {
1341 PDEBUG(DBG_ISOC, "error packet [%d]", i);
1342 usbvision->isocSkipCount++;
1343 continue;
1344 }
1345 else if (packet_len == 0) { /* Frame end ????? */
1346 PDEBUG(DBG_ISOC, "null packet [%d]", i);
1347 usbvision->isocstate=IsocState_NoFrame;
1348 usbvision->isocSkipCount++;
1349 continue;
1350 }
1351 else if (packet_len > usbvision->isocPacketSize) {
1352 PDEBUG(DBG_ISOC, "packet[%d] > isocPacketSize", i);
1353 usbvision->isocSkipCount++;
1354 continue;
1355 }
1356
1357 PDEBUG(DBG_ISOC, "packet ok [%d] len=%d", i, packet_len);
1358
1359 if (usbvision->isocstate==IsocState_NoFrame) { //new frame begins
1360 usbvision->isocstate=IsocState_InFrame;
1361 scratch_mark_header(usbvision);
1362 usbvision_measure_bandwidth(usbvision);
1363 PDEBUG(DBG_ISOC, "packet with header");
1364 }
1365
1366 /*
1367 * If usbvision continues to feed us with data but there is no
1368 * consumption (if, for example, V4L client fell asleep) we
1369 * may overflow the buffer. We have to move old data over to
1370 * free room for new data. This is bad for old data. If we
1371 * just drop new data then it's bad for new data... choose
1372 * your favorite evil here.
1373 */
1374 if (scratch_free(usbvision) < packet_len) {
1375
1376 usbvision->scratch_ovf_count++;
1377 PDEBUG(DBG_ISOC, "scratch buf overflow! scr_len: %d, n: %d",
1378 scratch_len(usbvision), packet_len);
1379 scratch_rm_old(usbvision, packet_len - scratch_free(usbvision));
1380 }
1381
1382 /* Now we know that there is enough room in scratch buffer */
1383 scratch_put(usbvision, packet_data, packet_len);
1384 totlen += packet_len;
1385 usbvision->isocDataCount += packet_len;
1386 usbvision->isocPacketCount++;
1387 }
1388#if ENABLE_HEXDUMP
1389 if (totlen > 0) {
1390 static int foo = 0;
1391 if (foo < 1) {
1392 printk(KERN_DEBUG "+%d.\n", usbvision->scratchlen);
1393 usbvision_hexdump(data0, (totlen > 64) ? 64 : totlen);
1394 ++foo;
1395 }
1396 }
1397#endif
1398 return totlen;
1399}
1400
1401static void usbvision_isocIrq(struct urb *urb)
1402{
1403 int errCode = 0;
1404 int len;
1405 struct usb_usbvision *usbvision = urb->context;
1406 int i;
1407 unsigned long startTime = jiffies;
1408 struct usbvision_frame **f;
1409
1410 /* We don't want to do anything if we are about to be removed! */
1411 if (!USBVISION_IS_OPERATIONAL(usbvision))
1412 return;
1413
1414 f = &usbvision->curFrame;
1415
1416 /* Manage streaming interruption */
1417 if (usbvision->streaming == Stream_Interrupt) {
1418 usbvision->streaming = Stream_Idle;
1419 if ((*f)) {
1420 (*f)->grabstate = FrameState_Ready;
1421 (*f)->scanstate = ScanState_Scanning;
1422 }
1423 PDEBUG(DBG_IRQ, "stream interrupted");
1424 wake_up_interruptible(&usbvision->wait_stream);
1425 }
1426
1427 /* Copy the data received into our scratch buffer */
1428 len = usbvision_compress_isochronous(usbvision, urb);
1429
1430 usbvision->isocUrbCount++;
1431 usbvision->urb_length = len;
1432
1433 if (usbvision->streaming == Stream_On) {
1434
1435 /* If we collected enough data let's parse! */
1436 if (scratch_len(usbvision) > USBVISION_HEADER_LENGTH) { /* 12 == header_length */
1437 /*If we don't have a frame we're current working on, complain */
1438 if(!list_empty(&(usbvision->inqueue))) {
1439 if (!(*f)) {
1440 (*f) = list_entry(usbvision->inqueue.next,struct usbvision_frame, frame);
1441 }
1442 usbvision_parse_data(usbvision);
1443 }
1444 else {
1445 PDEBUG(DBG_IRQ, "received data, but no one needs it");
1446 scratch_reset(usbvision);
1447 }
1448 }
1449 }
1450 else {
1451 PDEBUG(DBG_IRQ, "received data, but no one needs it");
1452 scratch_reset(usbvision);
1453 }
1454
1455 usbvision->timeInIrq += jiffies - startTime;
1456
1457 for (i = 0; i < USBVISION_URB_FRAMES; i++) {
1458 urb->iso_frame_desc[i].status = 0;
1459 urb->iso_frame_desc[i].actual_length = 0;
1460 }
1461
1462 urb->status = 0;
1463 urb->dev = usbvision->dev;
1464 errCode = usb_submit_urb (urb, GFP_ATOMIC);
1465
1466 /* Disable this warning. By design of the driver. */
1467 // if(errCode) {
1468 // err("%s: usb_submit_urb failed: error %d", __FUNCTION__, errCode);
1469 // }
1470
1471 return;
1472}
1473
1474/*************************************/
1475/* Low level usbvision access functions */
1476/*************************************/
1477
1478/*
1479 * usbvision_read_reg()
1480 *
1481 * return < 0 -> Error
1482 * >= 0 -> Data
1483 */
1484
1485int usbvision_read_reg(struct usb_usbvision *usbvision, unsigned char reg)
1486{
1487 int errCode = 0;
1488 unsigned char buffer[1];
1489
1490 if (!USBVISION_IS_OPERATIONAL(usbvision))
1491 return -1;
1492
1493 errCode = usb_control_msg(usbvision->dev, usb_rcvctrlpipe(usbvision->dev, 1),
1494 USBVISION_OP_CODE,
1495 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT,
1496 0, (__u16) reg, buffer, 1, HZ);
1497
1498 if (errCode < 0) {
1499 err("%s: failed: error %d", __FUNCTION__, errCode);
1500 return errCode;
1501 }
1502 return buffer[0];
1503}
1504
1505/*
1506 * usbvision_write_reg()
1507 *
1508 * return 1 -> Reg written
1509 * 0 -> usbvision is not yet ready
1510 * -1 -> Something went wrong
1511 */
1512
1513int usbvision_write_reg(struct usb_usbvision *usbvision, unsigned char reg,
1514 unsigned char value)
1515{
1516 int errCode = 0;
1517
1518 if (!USBVISION_IS_OPERATIONAL(usbvision))
1519 return 0;
1520
1521 errCode = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
1522 USBVISION_OP_CODE,
1523 USB_DIR_OUT | USB_TYPE_VENDOR |
1524 USB_RECIP_ENDPOINT, 0, (__u16) reg, &value, 1, HZ);
1525
1526 if (errCode < 0) {
1527 err("%s: failed: error %d", __FUNCTION__, errCode);
1528 }
1529 return errCode;
1530}
1531
1532
1533static void usbvision_ctrlUrb_complete(struct urb *urb)
1534{
1535 struct usb_usbvision *usbvision = (struct usb_usbvision *)urb->context;
1536
1537 PDEBUG(DBG_IRQ, "");
1538 usbvision->ctrlUrbBusy = 0;
1539 if (waitqueue_active(&usbvision->ctrlUrb_wq)) {
1540 wake_up_interruptible(&usbvision->ctrlUrb_wq);
1541 }
1542}
1543
1544
1545static int usbvision_write_reg_irq(struct usb_usbvision *usbvision,int address,
1546 unsigned char *data, int len)
1547{
1548 int errCode = 0;
1549
1550 PDEBUG(DBG_IRQ, "");
1551 if (len > 8) {
1552 return -EFAULT;
1553 }
1554// down(&usbvision->ctrlUrbLock);
1555 if (usbvision->ctrlUrbBusy) {
1556// up(&usbvision->ctrlUrbLock);
1557 return -EBUSY;
1558 }
1559 usbvision->ctrlUrbBusy = 1;
1560// up(&usbvision->ctrlUrbLock);
1561
1562 usbvision->ctrlUrbSetup.bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT;
1563 usbvision->ctrlUrbSetup.bRequest = USBVISION_OP_CODE;
1564 usbvision->ctrlUrbSetup.wValue = 0;
1565 usbvision->ctrlUrbSetup.wIndex = cpu_to_le16(address);
1566 usbvision->ctrlUrbSetup.wLength = cpu_to_le16(len);
1567 usb_fill_control_urb (usbvision->ctrlUrb, usbvision->dev,
1568 usb_sndctrlpipe(usbvision->dev, 1),
1569 (unsigned char *)&usbvision->ctrlUrbSetup,
1570 (void *)usbvision->ctrlUrbBuffer, len,
1571 usbvision_ctrlUrb_complete,
1572 (void *)usbvision);
1573
1574 memcpy(usbvision->ctrlUrbBuffer, data, len);
1575
1576 errCode = usb_submit_urb(usbvision->ctrlUrb, GFP_ATOMIC);
1577 if (errCode < 0) {
1578 // error in usb_submit_urb()
1579 usbvision->ctrlUrbBusy = 0;
1580 }
1581 PDEBUG(DBG_IRQ, "submit %d byte: error %d", len, errCode);
1582 return errCode;
1583}
1584
1585
1586static int usbvision_init_compression(struct usb_usbvision *usbvision)
1587{
1588 int errCode = 0;
1589
1590 usbvision->lastIsocFrameNum = -1;
1591 usbvision->isocDataCount = 0;
1592 usbvision->isocPacketCount = 0;
1593 usbvision->isocSkipCount = 0;
1594 usbvision->comprLevel = 50;
1595 usbvision->lastComprLevel = -1;
1596 usbvision->isocUrbCount = 0;
1597 usbvision->requestIntra = 1;
1598 usbvision->isocMeasureBandwidthCount = 0;
1599
1600 return errCode;
1601}
1602
1603/* this function measures the used bandwidth since last call
1604 * return: 0 : no error
1605 * sets usedBandwidth to 1-100 : 1-100% of full bandwidth resp. to isocPacketSize
1606 */
1607static int usbvision_measure_bandwidth (struct usb_usbvision *usbvision)
1608{
1609 int errCode = 0;
1610
1611 if (usbvision->isocMeasureBandwidthCount < 2) { // this gives an average bandwidth of 3 frames
1612 usbvision->isocMeasureBandwidthCount++;
1613 return errCode;
1614 }
1615 if ((usbvision->isocPacketSize > 0) && (usbvision->isocPacketCount > 0)) {
1616 usbvision->usedBandwidth = usbvision->isocDataCount /
1617 (usbvision->isocPacketCount + usbvision->isocSkipCount) *
1618 100 / usbvision->isocPacketSize;
1619 }
1620 usbvision->isocMeasureBandwidthCount = 0;
1621 usbvision->isocDataCount = 0;
1622 usbvision->isocPacketCount = 0;
1623 usbvision->isocSkipCount = 0;
1624 return errCode;
1625}
1626
1627static int usbvision_adjust_compression (struct usb_usbvision *usbvision)
1628{
1629 int errCode = 0;
1630 unsigned char buffer[6];
1631
1632 PDEBUG(DBG_IRQ, "");
1633 if ((adjustCompression) && (usbvision->usedBandwidth > 0)) {
1634 usbvision->comprLevel += (usbvision->usedBandwidth - 90) / 2;
1635 RESTRICT_TO_RANGE(usbvision->comprLevel, 0, 100);
1636 if (usbvision->comprLevel != usbvision->lastComprLevel) {
1637 int distorsion;
1638 if (usbvision->bridgeType == BRIDGE_NT1004 || usbvision->bridgeType == BRIDGE_NT1005) {
1639 buffer[0] = (unsigned char)(4 + 16 * usbvision->comprLevel / 100); // PCM Threshold 1
1640 buffer[1] = (unsigned char)(4 + 8 * usbvision->comprLevel / 100); // PCM Threshold 2
1641 distorsion = 7 + 248 * usbvision->comprLevel / 100;
1642 buffer[2] = (unsigned char)(distorsion & 0xFF); // Average distorsion Threshold (inter)
1643 buffer[3] = (unsigned char)(distorsion & 0xFF); // Average distorsion Threshold (intra)
1644 distorsion = 1 + 42 * usbvision->comprLevel / 100;
1645 buffer[4] = (unsigned char)(distorsion & 0xFF); // Maximum distorsion Threshold (inter)
1646 buffer[5] = (unsigned char)(distorsion & 0xFF); // Maximum distorsion Threshold (intra)
1647 }
1648 else { //BRIDGE_NT1003
1649 buffer[0] = (unsigned char)(4 + 16 * usbvision->comprLevel / 100); // PCM threshold 1
1650 buffer[1] = (unsigned char)(4 + 8 * usbvision->comprLevel / 100); // PCM threshold 2
1651 distorsion = 2 + 253 * usbvision->comprLevel / 100;
1652 buffer[2] = (unsigned char)(distorsion & 0xFF); // distorsion threshold bit0-7
1653 buffer[3] = 0; //(unsigned char)((distorsion >> 8) & 0x0F); // distorsion threshold bit 8-11
1654 distorsion = 0 + 43 * usbvision->comprLevel / 100;
1655 buffer[4] = (unsigned char)(distorsion & 0xFF); // maximum distorsion bit0-7
1656 buffer[5] = 0; //(unsigned char)((distorsion >> 8) & 0x01); // maximum distorsion bit 8
1657 }
1658 errCode = usbvision_write_reg_irq(usbvision, USBVISION_PCM_THR1, buffer, 6);
1659 if (errCode == 0){
1660 PDEBUG(DBG_IRQ, "new compr params %#02x %#02x %#02x %#02x %#02x %#02x", buffer[0],
1661 buffer[1], buffer[2], buffer[3], buffer[4], buffer[5]);
1662 usbvision->lastComprLevel = usbvision->comprLevel;
1663 }
1664 }
1665 }
1666 return errCode;
1667}
1668
1669static int usbvision_request_intra (struct usb_usbvision *usbvision)
1670{
1671 int errCode = 0;
1672 unsigned char buffer[1];
1673
1674 PDEBUG(DBG_IRQ, "");
1675 usbvision->requestIntra = 1;
1676 buffer[0] = 1;
1677 usbvision_write_reg_irq(usbvision, USBVISION_FORCE_INTRA, buffer, 1);
1678 return errCode;
1679}
1680
1681static int usbvision_unrequest_intra (struct usb_usbvision *usbvision)
1682{
1683 int errCode = 0;
1684 unsigned char buffer[1];
1685
1686 PDEBUG(DBG_IRQ, "");
1687 usbvision->requestIntra = 0;
1688 buffer[0] = 0;
1689 usbvision_write_reg_irq(usbvision, USBVISION_FORCE_INTRA, buffer, 1);
1690 return errCode;
1691}
1692
1693/*******************************
1694 * usbvision utility functions
1695 *******************************/
1696
1697int usbvision_power_off(struct usb_usbvision *usbvision)
1698{
1699 int errCode = 0;
1700
1701 PDEBUG(DBG_FUNC, "");
1702
1703 errCode = usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN);
1704 if (errCode == 1) {
1705 usbvision->power = 0;
1706 }
1707 PDEBUG(DBG_FUNC, "%s: errCode %d", (errCode!=1)?"ERROR":"power is off", errCode);
1708 return errCode;
1709}
1710
1711/*
1712 * usbvision_set_video_format()
1713 *
1714 */
1715static int usbvision_set_video_format(struct usb_usbvision *usbvision, int format)
1716{
1717 static const char proc[] = "usbvision_set_video_format";
1718 int rc;
1719 unsigned char value[2];
1720
1721 if (!USBVISION_IS_OPERATIONAL(usbvision))
1722 return 0;
1723
1724 PDEBUG(DBG_FUNC, "isocMode %#02x", format);
1725
1726 if ((format != ISOC_MODE_YUV422)
1727 && (format != ISOC_MODE_YUV420)
1728 && (format != ISOC_MODE_COMPRESS)) {
1729 printk(KERN_ERR "usbvision: unknown video format %02x, using default YUV420",
1730 format);
1731 format = ISOC_MODE_YUV420;
1732 }
1733 value[0] = 0x0A; //TODO: See the effect of the filter
1734 value[1] = format;
1735 rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
1736 USBVISION_OP_CODE,
1737 USB_DIR_OUT | USB_TYPE_VENDOR |
1738 USB_RECIP_ENDPOINT, 0,
1739 (__u16) USBVISION_FILT_CONT, value, 2, HZ);
1740
1741 if (rc < 0) {
1742 printk(KERN_ERR "%s: ERROR=%d. USBVISION stopped - "
1743 "reconnect or reload driver.\n", proc, rc);
1744 }
1745 usbvision->isocMode = format;
1746 return rc;
1747}
1748
1749/*
1750 * usbvision_set_output()
1751 *
1752 */
1753
1754int usbvision_set_output(struct usb_usbvision *usbvision, int width,
1755 int height)
1756{
1757 int errCode = 0;
1758 int UsbWidth, UsbHeight;
1759 unsigned int frameRate=0, frameDrop=0;
1760 unsigned char value[4];
1761
1762 if (!USBVISION_IS_OPERATIONAL(usbvision)) {
1763 return 0;
1764 }
1765
1766 if (width > MAX_USB_WIDTH) {
1767 UsbWidth = width / 2;
1768 usbvision->stretch_width = 2;
1769 }
1770 else {
1771 UsbWidth = width;
1772 usbvision->stretch_width = 1;
1773 }
1774
1775 if (height > MAX_USB_HEIGHT) {
1776 UsbHeight = height / 2;
1777 usbvision->stretch_height = 2;
1778 }
1779 else {
1780 UsbHeight = height;
1781 usbvision->stretch_height = 1;
1782 }
1783
1784 RESTRICT_TO_RANGE(UsbWidth, MIN_FRAME_WIDTH, MAX_USB_WIDTH);
1785 UsbWidth &= ~(MIN_FRAME_WIDTH-1);
1786 RESTRICT_TO_RANGE(UsbHeight, MIN_FRAME_HEIGHT, MAX_USB_HEIGHT);
1787 UsbHeight &= ~(1);
1788
1789 PDEBUG(DBG_FUNC, "usb %dx%d; screen %dx%d; stretch %dx%d",
1790 UsbWidth, UsbHeight, width, height,
1791 usbvision->stretch_width, usbvision->stretch_height);
1792
1793 /* I'll not rewrite the same values */
1794 if ((UsbWidth != usbvision->curwidth) || (UsbHeight != usbvision->curheight)) {
1795 value[0] = UsbWidth & 0xff; //LSB
1796 value[1] = (UsbWidth >> 8) & 0x03; //MSB
1797 value[2] = UsbHeight & 0xff; //LSB
1798 value[3] = (UsbHeight >> 8) & 0x03; //MSB
1799
1800 errCode = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
1801 USBVISION_OP_CODE,
1802 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT,
1803 0, (__u16) USBVISION_LXSIZE_O, value, 4, HZ);
1804
1805 if (errCode < 0) {
1806 err("%s failed: error %d", __FUNCTION__, errCode);
1807 return errCode;
1808 }
1809 usbvision->curwidth = usbvision->stretch_width * UsbWidth;
1810 usbvision->curheight = usbvision->stretch_height * UsbHeight;
1811 }
1812
1813 if (usbvision->isocMode == ISOC_MODE_YUV422) {
1814 frameRate = (usbvision->isocPacketSize * 1000) / (UsbWidth * UsbHeight * 2);
1815 }
1816 else if (usbvision->isocMode == ISOC_MODE_YUV420) {
1817 frameRate = (usbvision->isocPacketSize * 1000) / ((UsbWidth * UsbHeight * 12) / 8);
1818 }
1819 else {
1820 frameRate = FRAMERATE_MAX;
1821 }
1822
1823 if (usbvision->tvnorm->id & V4L2_STD_625_50) {
1824 frameDrop = frameRate * 32 / 25 - 1;
1825 }
1826 else if (usbvision->tvnorm->id & V4L2_STD_525_60) {
1827 frameDrop = frameRate * 32 / 30 - 1;
1828 }
1829
1830 RESTRICT_TO_RANGE(frameDrop, FRAMERATE_MIN, FRAMERATE_MAX);
1831
1832 PDEBUG(DBG_FUNC, "frameRate %d fps, frameDrop %d", frameRate, frameDrop);
1833
1834 frameDrop = FRAMERATE_MAX; // We can allow the maximum here, because dropping is controlled
1835
1836 /* frameDrop = 7; => framePhase = 1, 5, 9, 13, 17, 21, 25, 0, 4, 8, ...
1837 => frameSkip = 4;
1838 => frameRate = (7 + 1) * 25 / 32 = 200 / 32 = 6.25;
1839
1840 frameDrop = 9; => framePhase = 1, 5, 8, 11, 14, 17, 21, 24, 27, 1, 4, 8, ...
1841 => frameSkip = 4, 3, 3, 3, 3, 4, 3, 3, 3, 3, 4, ...
1842 => frameRate = (9 + 1) * 25 / 32 = 250 / 32 = 7.8125;
1843 */
1844 errCode = usbvision_write_reg(usbvision, USBVISION_FRM_RATE, frameDrop);
1845 return errCode;
1846}
1847
1848
1849/*
1850 * usbvision_frames_alloc
1851 * allocate the maximum frames this driver can manage
1852 */
1853int usbvision_frames_alloc(struct usb_usbvision *usbvision)
1854{
1855 int i;
1856
1857 /* Allocate memory for the frame buffers */
1858 usbvision->max_frame_size = MAX_FRAME_SIZE;
1859 usbvision->fbuf_size = USBVISION_NUMFRAMES * usbvision->max_frame_size;
1860 usbvision->fbuf = usbvision_rvmalloc(usbvision->fbuf_size);
1861
1862 if(usbvision->fbuf == NULL) {
1863 err("%s: unable to allocate %d bytes for fbuf ",
1864 __FUNCTION__, usbvision->fbuf_size);
1865 return -ENOMEM;
1866 }
1867 spin_lock_init(&usbvision->queue_lock);
1868 init_waitqueue_head(&usbvision->wait_frame);
1869 init_waitqueue_head(&usbvision->wait_stream);
1870
1871 /* Allocate all buffers */
1872 for (i = 0; i < USBVISION_NUMFRAMES; i++) {
1873 usbvision->frame[i].index = i;
1874 usbvision->frame[i].grabstate = FrameState_Unused;
1875 usbvision->frame[i].data = usbvision->fbuf +
1876 i * usbvision->max_frame_size;
1877 /*
1878 * Set default sizes for read operation.
1879 */
1880 usbvision->stretch_width = 1;
1881 usbvision->stretch_height = 1;
1882 usbvision->frame[i].width = usbvision->curwidth;
1883 usbvision->frame[i].height = usbvision->curheight;
1884 usbvision->frame[i].bytes_read = 0;
1885 }
1886 return 0;
1887}
1888
1889/*
1890 * usbvision_frames_free
1891 * frees memory allocated for the frames
1892 */
1893void usbvision_frames_free(struct usb_usbvision *usbvision)
1894{
1895 /* Have to free all that memory */
1896 if (usbvision->fbuf != NULL) {
1897 usbvision_rvfree(usbvision->fbuf, usbvision->fbuf_size);
1898 usbvision->fbuf = NULL;
1899 }
1900}
1901/*
1902 * usbvision_empty_framequeues()
1903 * prepare queues for incoming and outgoing frames
1904 */
1905void usbvision_empty_framequeues(struct usb_usbvision *usbvision)
1906{
1907 u32 i;
1908
1909 INIT_LIST_HEAD(&(usbvision->inqueue));
1910 INIT_LIST_HEAD(&(usbvision->outqueue));
1911
1912 for (i = 0; i < USBVISION_NUMFRAMES; i++) {
1913 usbvision->frame[i].grabstate = FrameState_Unused;
1914 usbvision->frame[i].bytes_read = 0;
1915 }
1916}
1917
1918/*
1919 * usbvision_stream_interrupt()
1920 * stops streaming
1921 */
1922int usbvision_stream_interrupt(struct usb_usbvision *usbvision)
1923{
1924 int ret = 0;
1925
1926 /* stop reading from the device */
1927
1928 usbvision->streaming = Stream_Interrupt;
1929 ret = wait_event_timeout(usbvision->wait_stream,
1930 (usbvision->streaming == Stream_Idle),
1931 msecs_to_jiffies(USBVISION_NUMSBUF*USBVISION_URB_FRAMES));
1932 return ret;
1933}
1934
1935/*
1936 * usbvision_set_compress_params()
1937 *
1938 */
1939
1940static int usbvision_set_compress_params(struct usb_usbvision *usbvision)
1941{
1942 static const char proc[] = "usbvision_set_compresion_params: ";
1943 int rc;
1944 unsigned char value[6];
1945
1946 value[0] = 0x0F; // Intra-Compression cycle
1947 value[1] = 0x01; // Reg.45 one line per strip
1948 value[2] = 0x00; // Reg.46 Force intra mode on all new frames
1949 value[3] = 0x00; // Reg.47 FORCE_UP <- 0 normal operation (not force)
1950 value[4] = 0xA2; // Reg.48 BUF_THR I'm not sure if this does something in not compressed mode.
1951 value[5] = 0x00; // Reg.49 DVI_YUV This has nothing to do with compression
1952
1953 //catched values for NT1004
1954 // value[0] = 0xFF; // Never apply intra mode automatically
1955 // value[1] = 0xF1; // Use full frame height for virtual strip width; One line per strip
1956 // value[2] = 0x01; // Force intra mode on all new frames
1957 // value[3] = 0x00; // Strip size 400 Bytes; do not force up
1958 // value[4] = 0xA2; //
1959 if (!USBVISION_IS_OPERATIONAL(usbvision))
1960 return 0;
1961
1962 rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
1963 USBVISION_OP_CODE,
1964 USB_DIR_OUT | USB_TYPE_VENDOR |
1965 USB_RECIP_ENDPOINT, 0,
1966 (__u16) USBVISION_INTRA_CYC, value, 5, HZ);
1967
1968 if (rc < 0) {
1969 printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
1970 "reconnect or reload driver.\n", proc, rc);
1971 return rc;
1972 }
1973
1974 if (usbvision->bridgeType == BRIDGE_NT1004) {
1975 value[0] = 20; // PCM Threshold 1
1976 value[1] = 12; // PCM Threshold 2
1977 value[2] = 255; // Distorsion Threshold inter
1978 value[3] = 255; // Distorsion Threshold intra
1979 value[4] = 43; // Max Distorsion inter
1980 value[5] = 43; // Max Distorsion intra
1981 }
1982 else {
1983 value[0] = 20; // PCM Threshold 1
1984 value[1] = 12; // PCM Threshold 2
1985 value[2] = 255; // Distorsion Threshold d7-d0
1986 value[3] = 0; // Distorsion Threshold d11-d8
1987 value[4] = 43; // Max Distorsion d7-d0
1988 value[5] = 0; // Max Distorsion d8
1989 }
1990
1991 if (!USBVISION_IS_OPERATIONAL(usbvision))
1992 return 0;
1993
1994 rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
1995 USBVISION_OP_CODE,
1996 USB_DIR_OUT | USB_TYPE_VENDOR |
1997 USB_RECIP_ENDPOINT, 0,
1998 (__u16) USBVISION_PCM_THR1, value, 6, HZ);
1999
2000 if (rc < 0) {
2001 printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
2002 "reconnect or reload driver.\n", proc, rc);
2003 return rc;
2004 }
2005
2006
2007 return rc;
2008}
2009
2010
2011/*
2012 * usbvision_set_input()
2013 *
2014 * Set the input (saa711x, ...) size x y and other misc input params
2015 * I've no idea if this parameters are right
2016 *
2017 */
2018int usbvision_set_input(struct usb_usbvision *usbvision)
2019{
2020 static const char proc[] = "usbvision_set_input: ";
2021 int rc;
2022 unsigned char value[8];
2023 unsigned char dvi_yuv_value;
2024
2025 if (!USBVISION_IS_OPERATIONAL(usbvision))
2026 return 0;
2027
2028 /* Set input format expected from decoder*/
2029 if (usbvision_device_data[usbvision->DevModel].Vin_Reg1 >= 0) {
2030 value[0] = usbvision_device_data[usbvision->DevModel].Vin_Reg1 & 0xff;
2031 } else if(usbvision_device_data[usbvision->DevModel].Codec == CODEC_SAA7113) {
2032 /* SAA7113 uses 8 bit output */
2033 value[0] = USBVISION_8_422_SYNC;
2034 } else {
2035 /* I'm sure only about d2-d0 [010] 16 bit 4:2:2 usin sync pulses
2036 * as that is how saa7111 is configured */
2037 value[0] = USBVISION_16_422_SYNC;
2038 /* | USBVISION_VSNC_POL | USBVISION_VCLK_POL);*/
2039 }
2040
2041 rc = usbvision_write_reg(usbvision, USBVISION_VIN_REG1, value[0]);
2042 if (rc < 0) {
2043 printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
2044 "reconnect or reload driver.\n", proc, rc);
2045 return rc;
2046 }
2047
2048
2049 if (usbvision->tvnorm->id & V4L2_STD_PAL) {
2050 value[0] = 0xC0;
2051 value[1] = 0x02; //0x02C0 -> 704 Input video line length
2052 value[2] = 0x20;
2053 value[3] = 0x01; //0x0120 -> 288 Input video n. of lines
2054 value[4] = 0x60;
2055 value[5] = 0x00; //0x0060 -> 96 Input video h offset
2056 value[6] = 0x16;
2057 value[7] = 0x00; //0x0016 -> 22 Input video v offset
2058 } else if (usbvision->tvnorm->id & V4L2_STD_SECAM) {
2059 value[0] = 0xC0;
2060 value[1] = 0x02; //0x02C0 -> 704 Input video line length
2061 value[2] = 0x20;
2062 value[3] = 0x01; //0x0120 -> 288 Input video n. of lines
2063 value[4] = 0x01;
2064 value[5] = 0x00; //0x0001 -> 01 Input video h offset
2065 value[6] = 0x01;
2066 value[7] = 0x00; //0x0001 -> 01 Input video v offset
2067 } else { /* V4L2_STD_NTSC */
2068 value[0] = 0xD0;
2069 value[1] = 0x02; //0x02D0 -> 720 Input video line length
2070 value[2] = 0xF0;
2071 value[3] = 0x00; //0x00F0 -> 240 Input video number of lines
2072 value[4] = 0x50;
2073 value[5] = 0x00; //0x0050 -> 80 Input video h offset
2074 value[6] = 0x10;
2075 value[7] = 0x00; //0x0010 -> 16 Input video v offset
2076 }
2077
2078 if (usbvision_device_data[usbvision->DevModel].X_Offset >= 0) {
2079 value[4]=usbvision_device_data[usbvision->DevModel].X_Offset & 0xff;
2080 value[5]=(usbvision_device_data[usbvision->DevModel].X_Offset & 0x0300) >> 8;
2081 }
2082
2083 if (usbvision_device_data[usbvision->DevModel].Y_Offset >= 0) {
2084 value[6]=usbvision_device_data[usbvision->DevModel].Y_Offset & 0xff;
2085 value[7]=(usbvision_device_data[usbvision->DevModel].Y_Offset & 0x0300) >> 8;
2086 }
2087
2088 rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
2089 USBVISION_OP_CODE, /* USBVISION specific code */
2090 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0,
2091 (__u16) USBVISION_LXSIZE_I, value, 8, HZ);
2092 if (rc < 0) {
2093 printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
2094 "reconnect or reload driver.\n", proc, rc);
2095 return rc;
2096 }
2097
2098
2099 dvi_yuv_value = 0x00; /* U comes after V, Ya comes after U/V, Yb comes after Yb */
2100
2101 if(usbvision_device_data[usbvision->DevModel].Dvi_yuv >= 0){
2102 dvi_yuv_value = usbvision_device_data[usbvision->DevModel].Dvi_yuv & 0xff;
2103 }
2104 else if(usbvision_device_data[usbvision->DevModel].Codec == CODEC_SAA7113) {
2105 /* This changes as the fine sync control changes. Further investigation necessary */
2106 dvi_yuv_value = 0x06;
2107 }
2108
2109 return (usbvision_write_reg(usbvision, USBVISION_DVI_YUV, dvi_yuv_value));
2110}
2111
2112
2113/*
2114 * usbvision_set_dram_settings()
2115 *
2116 * Set the buffer address needed by the usbvision dram to operate
2117 * This values has been taken with usbsnoop.
2118 *
2119 */
2120
2121static int usbvision_set_dram_settings(struct usb_usbvision *usbvision)
2122{
2123 int rc;
2124 unsigned char value[8];
2125
2126 if (usbvision->isocMode == ISOC_MODE_COMPRESS) {
2127 value[0] = 0x42;
2128 value[1] = 0x71;
2129 value[2] = 0xff;
2130 value[3] = 0x00;
2131 value[4] = 0x98;
2132 value[5] = 0xe0;
2133 value[6] = 0x71;
2134 value[7] = 0xff;
2135 // UR: 0x0E200-0x3FFFF = 204288 Words (1 Word = 2 Byte)
2136 // FDL: 0x00000-0x0E099 = 57498 Words
2137 // VDW: 0x0E3FF-0x3FFFF
2138 }
2139 else {
2140 value[0] = 0x42;
2141 value[1] = 0x00;
2142 value[2] = 0xff;
2143 value[3] = 0x00;
2144 value[4] = 0x00;
2145 value[5] = 0x00;
2146 value[6] = 0x00;
2147 value[7] = 0xff;
2148 }
2149 /* These are the values of the address of the video buffer,
2150 * they have to be loaded into the USBVISION_DRM_PRM1-8
2151 *
2152 * Start address of video output buffer for read: drm_prm1-2 -> 0x00000
2153 * End address of video output buffer for read: drm_prm1-3 -> 0x1ffff
2154 * Start address of video frame delay buffer: drm_prm1-4 -> 0x20000
2155 * Only used in compressed mode
2156 * End address of video frame delay buffer: drm_prm1-5-6 -> 0x3ffff
2157 * Only used in compressed mode
2158 * Start address of video output buffer for write: drm_prm1-7 -> 0x00000
2159 * End address of video output buffer for write: drm_prm1-8 -> 0x1ffff
2160 */
2161
2162 if (!USBVISION_IS_OPERATIONAL(usbvision))
2163 return 0;
2164
2165 rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
2166 USBVISION_OP_CODE, /* USBVISION specific code */
2167 USB_DIR_OUT | USB_TYPE_VENDOR |
2168 USB_RECIP_ENDPOINT, 0,
2169 (__u16) USBVISION_DRM_PRM1, value, 8, HZ);
2170
2171 if (rc < 0) {
2172 err("%sERROR=%d", __FUNCTION__, rc);
2173 return rc;
2174 }
2175
2176 /* Restart the video buffer logic */
2177 if ((rc = usbvision_write_reg(usbvision, USBVISION_DRM_CONT, USBVISION_RES_UR |
2178 USBVISION_RES_FDL | USBVISION_RES_VDW)) < 0)
2179 return rc;
2180 rc = usbvision_write_reg(usbvision, USBVISION_DRM_CONT, 0x00);
2181
2182 return rc;
2183}
2184
2185/*
2186 * ()
2187 *
2188 * Power on the device, enables suspend-resume logic
2189 * & reset the isoc End-Point
2190 *
2191 */
2192
2193int usbvision_power_on(struct usb_usbvision *usbvision)
2194{
2195 int errCode = 0;
2196
2197 PDEBUG(DBG_FUNC, "");
2198
2199 usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN);
2200 usbvision_write_reg(usbvision, USBVISION_PWR_REG,
2201 USBVISION_SSPND_EN | USBVISION_RES2);
2202 usbvision_write_reg(usbvision, USBVISION_PWR_REG,
2203 USBVISION_SSPND_EN | USBVISION_PWR_VID);
2204 errCode = usbvision_write_reg(usbvision, USBVISION_PWR_REG,
2205 USBVISION_SSPND_EN | USBVISION_PWR_VID | USBVISION_RES2);
2206 if (errCode == 1) {
2207 usbvision->power = 1;
2208 }
2209 PDEBUG(DBG_FUNC, "%s: errCode %d", (errCode<0)?"ERROR":"power is on", errCode);
2210 return errCode;
2211}
2212
2213
2214/*
2215 * usbvision timer stuff
2216 */
2217
2218// to call usbvision_power_off from task queue
2219static void call_usbvision_power_off(struct work_struct *work)
2220{
2221 struct usb_usbvision *usbvision = container_of(work, struct usb_usbvision, powerOffWork);
2222
2223 PDEBUG(DBG_FUNC, "");
2224 down_interruptible(&usbvision->lock);
2225 if(usbvision->user == 0) {
2226 usbvision_i2c_usb_del_bus(&usbvision->i2c_adap);
2227
2228 usbvision_power_off(usbvision);
2229 usbvision->initialized = 0;
2230 }
2231 up(&usbvision->lock);
2232}
2233
2234static void usbvision_powerOffTimer(unsigned long data)
2235{
2236 struct usb_usbvision *usbvision = (void *) data;
2237
2238 PDEBUG(DBG_FUNC, "");
2239 del_timer(&usbvision->powerOffTimer);
2240 INIT_WORK(&usbvision->powerOffWork, call_usbvision_power_off);
2241 (void) schedule_work(&usbvision->powerOffWork);
2242
2243}
2244
2245void usbvision_init_powerOffTimer(struct usb_usbvision *usbvision)
2246{
2247 init_timer(&usbvision->powerOffTimer);
2248 usbvision->powerOffTimer.data = (long) usbvision;
2249 usbvision->powerOffTimer.function = usbvision_powerOffTimer;
2250}
2251
2252void usbvision_set_powerOffTimer(struct usb_usbvision *usbvision)
2253{
2254 mod_timer(&usbvision->powerOffTimer, jiffies + USBVISION_POWEROFF_TIME);
2255}
2256
2257void usbvision_reset_powerOffTimer(struct usb_usbvision *usbvision)
2258{
2259 if (timer_pending(&usbvision->powerOffTimer)) {
2260 del_timer(&usbvision->powerOffTimer);
2261 }
2262}
2263
2264/*
2265 * usbvision_begin_streaming()
2266 * Sure you have to put bit 7 to 0, if not incoming frames are droped, but no
2267 * idea about the rest
2268 */
2269int usbvision_begin_streaming(struct usb_usbvision *usbvision)
2270{
2271 int errCode = 0;
2272
2273 if (usbvision->isocMode == ISOC_MODE_COMPRESS) {
2274 usbvision_init_compression(usbvision);
2275 }
2276 errCode = usbvision_write_reg(usbvision, USBVISION_VIN_REG2, USBVISION_NOHVALID |
2277 usbvision->Vin_Reg2_Preset);
2278 return errCode;
2279}
2280
2281/*
2282 * usbvision_restart_isoc()
2283 * Not sure yet if touching here PWR_REG make loose the config
2284 */
2285
2286int usbvision_restart_isoc(struct usb_usbvision *usbvision)
2287{
2288 int ret;
2289
2290 if (
2291 (ret =
2292 usbvision_write_reg(usbvision, USBVISION_PWR_REG,
2293 USBVISION_SSPND_EN | USBVISION_PWR_VID)) < 0)
2294 return ret;
2295 if (
2296 (ret =
2297 usbvision_write_reg(usbvision, USBVISION_PWR_REG,
2298 USBVISION_SSPND_EN | USBVISION_PWR_VID |
2299 USBVISION_RES2)) < 0)
2300 return ret;
2301 if (
2302 (ret =
2303 usbvision_write_reg(usbvision, USBVISION_VIN_REG2,
2304 USBVISION_KEEP_BLANK | USBVISION_NOHVALID |
2305 usbvision->Vin_Reg2_Preset)) < 0) return ret;
2306
2307 /* TODO: schedule timeout */
2308 while ((usbvision_read_reg(usbvision, USBVISION_STATUS_REG) && 0x01) != 1);
2309
2310 return 0;
2311}
2312
2313int usbvision_audio_off(struct usb_usbvision *usbvision)
2314{
2315 if (usbvision_write_reg(usbvision, USBVISION_IOPIN_REG, USBVISION_AUDIO_MUTE) < 0) {
2316 printk(KERN_ERR "usbvision_audio_off: can't wirte reg\n");
2317 return -1;
2318 }
2319 usbvision->AudioMute = 0;
2320 usbvision->AudioChannel = USBVISION_AUDIO_MUTE;
2321 return 0;
2322}
2323
2324int usbvision_set_audio(struct usb_usbvision *usbvision, int AudioChannel)
2325{
2326 if (!usbvision->AudioMute) {
2327 if (usbvision_write_reg(usbvision, USBVISION_IOPIN_REG, AudioChannel) < 0) {
2328 printk(KERN_ERR "usbvision_set_audio: can't write iopin register for audio switching\n");
2329 return -1;
2330 }
2331 }
2332 usbvision->AudioChannel = AudioChannel;
2333 return 0;
2334}
2335
2336int usbvision_setup(struct usb_usbvision *usbvision,int format)
2337{
2338 usbvision_set_video_format(usbvision, format);
2339 usbvision_set_dram_settings(usbvision);
2340 usbvision_set_compress_params(usbvision);
2341 usbvision_set_input(usbvision);
2342 usbvision_set_output(usbvision, MAX_USB_WIDTH, MAX_USB_HEIGHT);
2343 usbvision_restart_isoc(usbvision);
2344
2345 /* cosas del PCM */
2346 return USBVISION_IS_OPERATIONAL(usbvision);
2347}
2348
2349
2350int usbvision_sbuf_alloc(struct usb_usbvision *usbvision)
2351{
2352 int i, errCode = 0;
2353 const int sb_size = USBVISION_URB_FRAMES * USBVISION_MAX_ISOC_PACKET_SIZE;
2354
2355 /* Clean pointers so we know if we allocated something */
2356 for (i = 0; i < USBVISION_NUMSBUF; i++)
2357 usbvision->sbuf[i].data = NULL;
2358
2359 for (i = 0; i < USBVISION_NUMSBUF; i++) {
2360 usbvision->sbuf[i].data = kzalloc(sb_size, GFP_KERNEL);
2361 if (usbvision->sbuf[i].data == NULL) {
2362 err("%s: unable to allocate %d bytes for sbuf", __FUNCTION__, sb_size);
2363 errCode = -ENOMEM;
2364 break;
2365 }
2366 }
2367 return errCode;
2368}
2369
2370
2371void usbvision_sbuf_free(struct usb_usbvision *usbvision)
2372{
2373 int i;
2374
2375 for (i = 0; i < USBVISION_NUMSBUF; i++) {
2376 if (usbvision->sbuf[i].data != NULL) {
2377 kfree(usbvision->sbuf[i].data);
2378 usbvision->sbuf[i].data = NULL;
2379 }
2380 }
2381}
2382
2383/*
2384 * usbvision_init_isoc()
2385 *
2386 */
2387int usbvision_init_isoc(struct usb_usbvision *usbvision)
2388{
2389 struct usb_device *dev = usbvision->dev;
2390 int bufIdx, errCode, regValue;
2391
2392 if (!USBVISION_IS_OPERATIONAL(usbvision))
2393 return -EFAULT;
2394
2395 usbvision->curFrame = NULL;
2396 scratch_reset(usbvision);
2397
2398 /* Alternate interface 1 is is the biggest frame size */
2399 errCode = usb_set_interface(dev, usbvision->iface, usbvision->ifaceAltActive);
2400 if (errCode < 0) {
2401 usbvision->last_error = errCode;
2402 return -EBUSY;
2403 }
2404
2405 regValue = (16 - usbvision_read_reg(usbvision, USBVISION_ALTER_REG)) & 0x0F;
2406 usbvision->isocPacketSize = (regValue == 0) ? 0 : (regValue * 64) - 1;
2407 PDEBUG(DBG_ISOC, "ISO Packet Length:%d", usbvision->isocPacketSize);
2408
2409 usbvision->usb_bandwidth = regValue >> 1;
2410 PDEBUG(DBG_ISOC, "USB Bandwidth Usage: %dMbit/Sec", usbvision->usb_bandwidth);
2411
2412
2413
2414 /* We double buffer the Iso lists */
2415
2416 for (bufIdx = 0; bufIdx < USBVISION_NUMSBUF; bufIdx++) {
2417 int j, k;
2418 struct urb *urb;
2419
2420 urb = usb_alloc_urb(USBVISION_URB_FRAMES, GFP_KERNEL);
2421 if (urb == NULL) {
2422 err("%s: usb_alloc_urb() failed", __FUNCTION__);
2423 return -ENOMEM;
2424 }
2425 usbvision->sbuf[bufIdx].urb = urb;
2426 urb->dev = dev;
2427 urb->context = usbvision;
2428 urb->pipe = usb_rcvisocpipe(dev, usbvision->video_endp);
2429 urb->transfer_flags = URB_ISO_ASAP;
2430 urb->interval = 1;
2431 urb->transfer_buffer = usbvision->sbuf[bufIdx].data;
2432 urb->complete = usbvision_isocIrq;
2433 urb->number_of_packets = USBVISION_URB_FRAMES;
2434 urb->transfer_buffer_length =
2435 usbvision->isocPacketSize * USBVISION_URB_FRAMES;
2436 for (j = k = 0; j < USBVISION_URB_FRAMES; j++,
2437 k += usbvision->isocPacketSize) {
2438 urb->iso_frame_desc[j].offset = k;
2439 urb->iso_frame_desc[j].length = usbvision->isocPacketSize;
2440 }
2441 }
2442
2443
2444 /* Submit all URBs */
2445 for (bufIdx = 0; bufIdx < USBVISION_NUMSBUF; bufIdx++) {
2446 errCode = usb_submit_urb(usbvision->sbuf[bufIdx].urb, GFP_KERNEL);
2447 if (errCode) {
2448 err("%s: usb_submit_urb(%d) failed: error %d", __FUNCTION__, bufIdx, errCode);
2449 }
2450 }
2451
2452 usbvision->streaming = Stream_Idle;
2453 PDEBUG(DBG_ISOC, "%s: streaming=1 usbvision->video_endp=$%02x", __FUNCTION__, usbvision->video_endp);
2454 return 0;
2455}
2456
2457/*
2458 * usbvision_stop_isoc()
2459 *
2460 * This procedure stops streaming and deallocates URBs. Then it
2461 * activates zero-bandwidth alt. setting of the video interface.
2462 *
2463 */
2464void usbvision_stop_isoc(struct usb_usbvision *usbvision)
2465{
2466 int bufIdx, errCode, regValue;
2467
2468 if ((usbvision->streaming == Stream_Off) || (usbvision->dev == NULL))
2469 return;
2470
2471 /* Unschedule all of the iso td's */
2472 for (bufIdx = 0; bufIdx < USBVISION_NUMSBUF; bufIdx++) {
2473 usb_kill_urb(usbvision->sbuf[bufIdx].urb);
2474 usb_free_urb(usbvision->sbuf[bufIdx].urb);
2475 usbvision->sbuf[bufIdx].urb = NULL;
2476 }
2477
2478
2479 PDEBUG(DBG_ISOC, "%s: streaming=Stream_Off\n", __FUNCTION__);
2480 usbvision->streaming = Stream_Off;
2481
2482 if (!usbvision->remove_pending) {
2483
2484 /* Set packet size to 0 */
2485 errCode = usb_set_interface(usbvision->dev, usbvision->iface,
2486 usbvision->ifaceAltInactive);
2487 if (errCode < 0) {
2488 err("%s: usb_set_interface() failed: error %d", __FUNCTION__, errCode);
2489 usbvision->last_error = errCode;
2490 }
2491 regValue = (16 - usbvision_read_reg(usbvision, USBVISION_ALTER_REG)) & 0x0F;
2492 usbvision->isocPacketSize = (regValue == 0) ? 0 : (regValue * 64) - 1;
2493 PDEBUG(DBG_ISOC, "ISO Packet Length:%d", usbvision->isocPacketSize);
2494
2495 usbvision->usb_bandwidth = regValue >> 1;
2496 PDEBUG(DBG_ISOC, "USB Bandwidth Usage: %dMbit/Sec", usbvision->usb_bandwidth);
2497 }
2498}
2499
2500int usbvision_muxsel(struct usb_usbvision *usbvision, int channel)
2501{
2502 int mode[4];
2503 int audio[]= {1, 0, 0, 0};
2504 struct v4l2_routing route;
2505 //channel 0 is TV with audiochannel 1 (tuner mono)
2506 //channel 1 is Composite with audio channel 0 (line in)
2507 //channel 2 is S-Video with audio channel 0 (line in)
2508 //channel 3 is additional video inputs to the device with audio channel 0 (line in)
2509
2510 RESTRICT_TO_RANGE(channel, 0, usbvision->video_inputs);
2511 usbvision->ctl_input = channel;
2512 route.input = SAA7115_COMPOSITE1;
2513 call_i2c_clients(usbvision, VIDIOC_INT_S_VIDEO_ROUTING,&route);
2514 call_i2c_clients(usbvision, VIDIOC_S_INPUT, &usbvision->ctl_input);
2515
2516 // set the new channel
2517 // Regular USB TV Tuners -> channel: 0 = Television, 1 = Composite, 2 = S-Video
2518 // Four video input devices -> channel: 0 = Chan White, 1 = Chan Green, 2 = Chan Yellow, 3 = Chan Red
2519
2520 switch (usbvision_device_data[usbvision->DevModel].Codec) {
2521 case CODEC_SAA7113:
2522 if (SwitchSVideoInput) { // To handle problems with S-Video Input for some devices. Use SwitchSVideoInput parameter when loading the module.
2523 mode[2] = 1;
2524 }
2525 else {
2526 mode[2] = 7;
2527 }
2528 if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
2529 mode[0] = 0; mode[1] = 2; mode[3] = 3; // Special for four input devices
2530 }
2531 else {
2532 mode[0] = 0; mode[1] = 2; //modes for regular saa7113 devices
2533 }
2534 break;
2535 case CODEC_SAA7111:
2536 mode[0] = 0; mode[1] = 1; mode[2] = 7; //modes for saa7111
2537 break;
2538 default:
2539 mode[0] = 0; mode[1] = 1; mode[2] = 7; //default modes
2540 }
2541 route.input = mode[channel];
2542 call_i2c_clients(usbvision, VIDIOC_INT_S_VIDEO_ROUTING,&route);
2543 usbvision->channel = channel;
2544 usbvision_set_audio(usbvision, audio[channel]);
2545 return 0;
2546}
2547
2548/*
2549 * Overrides for Emacs so that we follow Linus's tabbing style.
2550 * ---------------------------------------------------------------------------
2551 * Local variables:
2552 * c-basic-offset: 8
2553 * End:
2554 */
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
new file mode 100644
index 000000000000..0f3fba7ea6fe
--- /dev/null
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -0,0 +1,571 @@
1/*
2 * I2C_ALGO_USB.C
3 * i2c algorithm for USB-I2C Bridges
4 *
5 * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de>
6 * Dwaine Garden <dwainegarden@rogers.com>
7 *
8 * This module is part of usbvision driver project.
9 * Updates to driver completed by Dwaine P. Garden
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/delay.h>
30#include <linux/slab.h>
31#include <linux/version.h>
32#include <linux/utsname.h>
33#include <linux/init.h>
34#include <asm/uaccess.h>
35#include <linux/ioport.h>
36#include <linux/errno.h>
37#include <linux/sched.h>
38#include <linux/usb.h>
39#include <linux/i2c.h>
40#include "usbvision.h"
41
42#define DBG_I2C 1<<0
43#define DBG_ALGO 1<<1
44
45static int i2c_debug = 0;
46
47module_param (i2c_debug, int, 0644); // debug_i2c_usb mode of the device driver
48MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
49
50#define PDEBUG(level, fmt, args...) \
51 if (i2c_debug & (level)) info("[%s:%d] " fmt, __PRETTY_FUNCTION__, __LINE__ , ## args)
52
53static int usbvision_i2c_write(void *data, unsigned char addr, char *buf,
54 short len);
55static int usbvision_i2c_read(void *data, unsigned char addr, char *buf,
56 short len);
57
58static inline int try_write_address(struct i2c_adapter *i2c_adap,
59 unsigned char addr, int retries)
60{
61 struct i2c_algo_usb_data *adap = i2c_adap->algo_data;
62 void *data;
63 int i, ret = -1;
64 char buf[4];
65
66 data = i2c_get_adapdata(i2c_adap);
67 buf[0] = 0x00;
68 for (i = 0; i <= retries; i++) {
69 ret = (usbvision_i2c_write(data, addr, buf, 1));
70 if (ret == 1)
71 break; /* success! */
72 udelay(5 /*adap->udelay */ );
73 if (i == retries) /* no success */
74 break;
75 udelay(adap->udelay);
76 }
77 if (i) {
78 PDEBUG(DBG_ALGO,"Needed %d retries for address %#2x", i, addr);
79 PDEBUG(DBG_ALGO,"Maybe there's no device at this address");
80 }
81 return ret;
82}
83
84static inline int try_read_address(struct i2c_adapter *i2c_adap,
85 unsigned char addr, int retries)
86{
87 struct i2c_algo_usb_data *adap = i2c_adap->algo_data;
88 void *data;
89 int i, ret = -1;
90 char buf[4];
91
92 data = i2c_get_adapdata(i2c_adap);
93 for (i = 0; i <= retries; i++) {
94 ret = (usbvision_i2c_read(data, addr, buf, 1));
95 if (ret == 1)
96 break; /* success! */
97 udelay(5 /*adap->udelay */ );
98 if (i == retries) /* no success */
99 break;
100 udelay(adap->udelay);
101 }
102 if (i) {
103 PDEBUG(DBG_ALGO,"Needed %d retries for address %#2x", i, addr);
104 PDEBUG(DBG_ALGO,"Maybe there's no device at this address");
105 }
106 return ret;
107}
108
109static inline int usb_find_address(struct i2c_adapter *i2c_adap,
110 struct i2c_msg *msg, int retries,
111 unsigned char *add)
112{
113 unsigned short flags = msg->flags;
114
115 unsigned char addr;
116 int ret;
117 if ((flags & I2C_M_TEN)) {
118 /* a ten bit address */
119 addr = 0xf0 | ((msg->addr >> 7) & 0x03);
120 /* try extended address code... */
121 ret = try_write_address(i2c_adap, addr, retries);
122 if (ret != 1) {
123 err("died at extended address code, while writing");
124 return -EREMOTEIO;
125 }
126 add[0] = addr;
127 if (flags & I2C_M_RD) {
128 /* okay, now switch into reading mode */
129 addr |= 0x01;
130 ret = try_read_address(i2c_adap, addr, retries);
131 if (ret != 1) {
132 err("died at extended address code, while reading");
133 return -EREMOTEIO;
134 }
135 }
136
137 } else { /* normal 7bit address */
138 addr = (msg->addr << 1);
139 if (flags & I2C_M_RD)
140 addr |= 1;
141 if (flags & I2C_M_REV_DIR_ADDR)
142 addr ^= 1;
143
144 add[0] = addr;
145 if (flags & I2C_M_RD)
146 ret = try_read_address(i2c_adap, addr, retries);
147 else
148 ret = try_write_address(i2c_adap, addr, retries);
149
150 if (ret != 1) {
151 return -EREMOTEIO;
152 }
153 }
154 return 0;
155}
156
157static int
158usb_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num)
159{
160 struct i2c_msg *pmsg;
161 void *data;
162 int i, ret;
163 unsigned char addr;
164
165 data = i2c_get_adapdata(i2c_adap);
166
167 for (i = 0; i < num; i++) {
168 pmsg = &msgs[i];
169 ret = usb_find_address(i2c_adap, pmsg, i2c_adap->retries, &addr);
170 if (ret != 0) {
171 PDEBUG(DBG_ALGO,"got NAK from device, message #%d", i);
172 return (ret < 0) ? ret : -EREMOTEIO;
173 }
174
175 if (pmsg->flags & I2C_M_RD) {
176 /* read bytes into buffer */
177 ret = (usbvision_i2c_read(data, addr, pmsg->buf, pmsg->len));
178 if (ret < pmsg->len) {
179 return (ret < 0) ? ret : -EREMOTEIO;
180 }
181 } else {
182 /* write bytes from buffer */
183 ret = (usbvision_i2c_write(data, addr, pmsg->buf, pmsg->len));
184 if (ret < pmsg->len) {
185 return (ret < 0) ? ret : -EREMOTEIO;
186 }
187 }
188 }
189 return num;
190}
191
192static int algo_control(struct i2c_adapter *adapter, unsigned int cmd, unsigned long arg)
193{
194 return 0;
195}
196
197static u32 usb_func(struct i2c_adapter *adap)
198{
199 return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
200}
201
202
203/* -----exported algorithm data: ------------------------------------- */
204
205static struct i2c_algorithm i2c_usb_algo = {
206 .master_xfer = usb_xfer,
207 .smbus_xfer = NULL,
208 .algo_control = algo_control,
209 .functionality = usb_func,
210};
211
212
213/*
214 * registering functions to load algorithms at runtime
215 */
216int usbvision_i2c_usb_add_bus(struct i2c_adapter *adap)
217{
218 PDEBUG(DBG_I2C, "I2C debugging is enabled [i2c]");
219 PDEBUG(DBG_ALGO, "ALGO debugging is enabled [i2c]");
220
221 /* register new adapter to i2c module... */
222
223 adap->algo = &i2c_usb_algo;
224
225 adap->timeout = 100; /* default values, should */
226 adap->retries = 3; /* be replaced by defines */
227
228 i2c_add_adapter(adap);
229
230 PDEBUG(DBG_ALGO,"i2c bus for %s registered", adap->name);
231
232 return 0;
233}
234
235
236int usbvision_i2c_usb_del_bus(struct i2c_adapter *adap)
237{
238
239 i2c_del_adapter(adap);
240
241 PDEBUG(DBG_ALGO,"i2c bus for %s unregistered", adap->name);
242
243 return 0;
244}
245
246
247/* ----------------------------------------------------------------------- */
248/* usbvision specific I2C functions */
249/* ----------------------------------------------------------------------- */
250static struct i2c_adapter i2c_adap_template;
251static struct i2c_algo_usb_data i2c_algo_template;
252static struct i2c_client i2c_client_template;
253
254int usbvision_init_i2c(struct usb_usbvision *usbvision)
255{
256 memcpy(&usbvision->i2c_adap, &i2c_adap_template,
257 sizeof(struct i2c_adapter));
258 memcpy(&usbvision->i2c_algo, &i2c_algo_template,
259 sizeof(struct i2c_algo_usb_data));
260 memcpy(&usbvision->i2c_client, &i2c_client_template,
261 sizeof(struct i2c_client));
262
263 sprintf(usbvision->i2c_adap.name + strlen(usbvision->i2c_adap.name),
264 " #%d", usbvision->vdev->minor & 0x1f);
265 PDEBUG(DBG_I2C,"Adaptername: %s", usbvision->i2c_adap.name);
266
267 i2c_set_adapdata(&usbvision->i2c_adap, usbvision);
268 i2c_set_clientdata(&usbvision->i2c_client, usbvision);
269 i2c_set_algo_usb_data(&usbvision->i2c_algo, usbvision);
270
271 usbvision->i2c_adap.algo_data = &usbvision->i2c_algo;
272 usbvision->i2c_client.adapter = &usbvision->i2c_adap;
273
274 if (usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_IIC_LRNACK) < 0) {
275 printk(KERN_ERR "usbvision_init_i2c: can't write reg\n");
276 return -EBUSY;
277 }
278
279#ifdef CONFIG_MODULES
280 /* Request the load of the i2c modules we need */
281 switch (usbvision_device_data[usbvision->DevModel].Codec) {
282 case CODEC_SAA7113:
283 request_module("saa7115");
284 break;
285 case CODEC_SAA7111:
286 request_module("saa7115");
287 break;
288 }
289 if (usbvision_device_data[usbvision->DevModel].Tuner == 1) {
290 request_module("tuner");
291 }
292#endif
293
294 return usbvision_i2c_usb_add_bus(&usbvision->i2c_adap);
295}
296
297void call_i2c_clients(struct usb_usbvision *usbvision, unsigned int cmd,
298 void *arg)
299{
300 BUG_ON(NULL == usbvision->i2c_adap.algo_data);
301 i2c_clients_command(&usbvision->i2c_adap, cmd, arg);
302}
303
304static int attach_inform(struct i2c_client *client)
305{
306 struct usb_usbvision *usbvision;
307
308 usbvision = (struct usb_usbvision *)i2c_get_adapdata(client->adapter);
309
310 switch (client->addr << 1) {
311 case 0x43:
312 case 0x4b:
313 {
314 struct tuner_setup tun_setup;
315
316 tun_setup.mode_mask = T_ANALOG_TV | T_RADIO;
317 tun_setup.type = TUNER_TDA9887;
318 tun_setup.addr = client->addr;
319
320 call_i2c_clients(usbvision, TUNER_SET_TYPE_ADDR, &tun_setup);
321
322 break;
323 }
324 case 0x42:
325 PDEBUG(DBG_I2C,"attach_inform: saa7114 detected.");
326 break;
327 case 0x4a:
328 PDEBUG(DBG_I2C,"attach_inform: saa7113 detected.");
329 break;
330 case 0xa0:
331 PDEBUG(DBG_I2C,"attach_inform: eeprom detected.");
332 break;
333
334 default:
335 {
336 struct tuner_setup tun_setup;
337
338 PDEBUG(DBG_I2C,"attach inform: detected I2C address %x", client->addr << 1);
339 usbvision->tuner_addr = client->addr;
340
341 if ((usbvision->have_tuner) && (usbvision->tuner_type != -1)) {
342 tun_setup.mode_mask = T_ANALOG_TV | T_RADIO;
343 tun_setup.type = usbvision->tuner_type;
344 tun_setup.addr = usbvision->tuner_addr;
345 call_i2c_clients(usbvision, TUNER_SET_TYPE_ADDR, &tun_setup);
346 }
347 }
348 break;
349 }
350 return 0;
351}
352
353static int detach_inform(struct i2c_client *client)
354{
355 struct usb_usbvision *usbvision;
356
357 usbvision = (struct usb_usbvision *)i2c_get_adapdata(client->adapter);
358
359 PDEBUG(DBG_I2C,"usbvision[%d] detaches %s", usbvision->nr, client->name);
360 return 0;
361}
362
363static int
364usbvision_i2c_read_max4(struct usb_usbvision *usbvision, unsigned char addr,
365 char *buf, short len)
366{
367 int rc, retries;
368
369 for (retries = 5;;) {
370 rc = usbvision_write_reg(usbvision, USBVISION_SER_ADRS, addr);
371 if (rc < 0)
372 return rc;
373
374 /* Initiate byte read cycle */
375 /* USBVISION_SER_CONT <- d0-d2 n. of bytes to r/w */
376 /* d3 0=Wr 1=Rd */
377 rc = usbvision_write_reg(usbvision, USBVISION_SER_CONT,
378 (len & 0x07) | 0x18);
379 if (rc < 0)
380 return rc;
381
382 /* Test for Busy and ACK */
383 do {
384 /* USBVISION_SER_CONT -> d4 == 0 busy */
385 rc = usbvision_read_reg(usbvision, USBVISION_SER_CONT);
386 } while (rc > 0 && ((rc & 0x10) != 0)); /* Retry while busy */
387 if (rc < 0)
388 return rc;
389
390 /* USBVISION_SER_CONT -> d5 == 1 Not ack */
391 if ((rc & 0x20) == 0) /* Ack? */
392 break;
393
394 /* I2C abort */
395 rc = usbvision_write_reg(usbvision, USBVISION_SER_CONT, 0x00);
396 if (rc < 0)
397 return rc;
398
399 if (--retries < 0)
400 return -1;
401 }
402
403 switch (len) {
404 case 4:
405 buf[3] = usbvision_read_reg(usbvision, USBVISION_SER_DAT4);
406 case 3:
407 buf[2] = usbvision_read_reg(usbvision, USBVISION_SER_DAT3);
408 case 2:
409 buf[1] = usbvision_read_reg(usbvision, USBVISION_SER_DAT2);
410 case 1:
411 buf[0] = usbvision_read_reg(usbvision, USBVISION_SER_DAT1);
412 break;
413 default:
414 printk(KERN_ERR
415 "usbvision_i2c_read_max4: buffer length > 4\n");
416 }
417
418 if (i2c_debug & DBG_I2C) {
419 int idx;
420 for (idx = 0; idx < len; idx++) {
421 PDEBUG(DBG_I2C,"read %x from address %x", (unsigned char)buf[idx], addr);
422 }
423 }
424 return len;
425}
426
427
428static int usbvision_i2c_write_max4(struct usb_usbvision *usbvision,
429 unsigned char addr, const char *buf,
430 short len)
431{
432 int rc, retries;
433 int i;
434 unsigned char value[6];
435 unsigned char ser_cont;
436
437 ser_cont = (len & 0x07) | 0x10;
438
439 value[0] = addr;
440 value[1] = ser_cont;
441 for (i = 0; i < len; i++)
442 value[i + 2] = buf[i];
443
444 for (retries = 5;;) {
445 rc = usb_control_msg(usbvision->dev,
446 usb_sndctrlpipe(usbvision->dev, 1),
447 USBVISION_OP_CODE,
448 USB_DIR_OUT | USB_TYPE_VENDOR |
449 USB_RECIP_ENDPOINT, 0,
450 (__u16) USBVISION_SER_ADRS, value,
451 len + 2, HZ);
452
453 if (rc < 0)
454 return rc;
455
456 rc = usbvision_write_reg(usbvision, USBVISION_SER_CONT,
457 (len & 0x07) | 0x10);
458 if (rc < 0)
459 return rc;
460
461 /* Test for Busy and ACK */
462 do {
463 rc = usbvision_read_reg(usbvision, USBVISION_SER_CONT);
464 } while (rc > 0 && ((rc & 0x10) != 0)); /* Retry while busy */
465 if (rc < 0)
466 return rc;
467
468 if ((rc & 0x20) == 0) /* Ack? */
469 break;
470
471 /* I2C abort */
472 usbvision_write_reg(usbvision, USBVISION_SER_CONT, 0x00);
473
474 if (--retries < 0)
475 return -1;
476
477 }
478
479 if (i2c_debug & DBG_I2C) {
480 int idx;
481 for (idx = 0; idx < len; idx++) {
482 PDEBUG(DBG_I2C,"wrote %x at address %x", (unsigned char)buf[idx], addr);
483 }
484 }
485 return len;
486}
487
488static int usbvision_i2c_write(void *data, unsigned char addr, char *buf,
489 short len)
490{
491 char *bufPtr = buf;
492 int retval;
493 int wrcount = 0;
494 int count;
495 int maxLen = 4;
496 struct usb_usbvision *usbvision = (struct usb_usbvision *) data;
497
498 while (len > 0) {
499 count = (len > maxLen) ? maxLen : len;
500 retval = usbvision_i2c_write_max4(usbvision, addr, bufPtr, count);
501 if (retval > 0) {
502 len -= count;
503 bufPtr += count;
504 wrcount += count;
505 } else
506 return (retval < 0) ? retval : -EFAULT;
507 }
508 return wrcount;
509}
510
511static int usbvision_i2c_read(void *data, unsigned char addr, char *buf,
512 short len)
513{
514 char temp[4];
515 int retval, i;
516 int rdcount = 0;
517 int count;
518 struct usb_usbvision *usbvision = (struct usb_usbvision *) data;
519
520 while (len > 0) {
521 count = (len > 3) ? 4 : len;
522 retval = usbvision_i2c_read_max4(usbvision, addr, temp, count);
523 if (retval > 0) {
524 for (i = 0; i < len; i++)
525 buf[rdcount + i] = temp[i];
526 len -= count;
527 rdcount += count;
528 } else
529 return (retval < 0) ? retval : -EFAULT;
530 }
531 return rdcount;
532}
533
534static struct i2c_algo_usb_data i2c_algo_template = {
535 .data = NULL,
536 .inb = usbvision_i2c_read,
537 .outb = usbvision_i2c_write,
538 .udelay = 10,
539 .mdelay = 10,
540 .timeout = 100,
541};
542
543static struct i2c_adapter i2c_adap_template = {
544 .owner = THIS_MODULE,
545 .name = "usbvision",
546 .id = I2C_HW_B_BT848, /* FIXME */
547 .algo = NULL,
548 .algo_data = NULL,
549 .client_register = attach_inform,
550 .client_unregister = detach_inform,
551#ifdef I2C_ADAP_CLASS_TV_ANALOG
552 .class = I2C_ADAP_CLASS_TV_ANALOG,
553#else
554 .class = I2C_CLASS_TV_ANALOG,
555#endif
556};
557
558static struct i2c_client i2c_client_template = {
559 .name = "usbvision internal",
560};
561
562EXPORT_SYMBOL(usbvision_i2c_usb_add_bus);
563EXPORT_SYMBOL(usbvision_i2c_usb_del_bus);
564
565/*
566 * Overrides for Emacs so that we follow Linus's tabbing style.
567 * ---------------------------------------------------------------------------
568 * Local variables:
569 * c-basic-offset: 8
570 * End:
571 */
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
new file mode 100644
index 000000000000..864446c012eb
--- /dev/null
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -0,0 +1,2051 @@
1/*
2 * USB USBVISION Video device driver 0.9.9
3 *
4 *
5 *
6 * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de>
7 *
8 * This module is part of usbvision driver project.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Let's call the version 0.... until compression decoding is completely
25 * implemented.
26 *
27 * This driver is written by Jose Ignacio Gijon and Joerg Heckenbach.
28 * It was based on USB CPiA driver written by Peter Pregler,
29 * Scott J. Bertin and Johannes Erdfelt
30 * Ideas are taken from bttv driver by Ralph Metzler, Marcus Metzler &
31 * Gerd Knorr and zoran 36120/36125 driver by Pauline Middelink
32 * Updates to driver completed by Dwaine P. Garden
33 *
34 *
35 * TODO:
36 * - use submit_urb for all setup packets
37 * - Fix memory settings for nt1004. It is 4 times as big as the
38 * nt1003 memory.
39 * - Add audio on endpoint 3 for nt1004 chip. Seems impossible, needs a codec interface. Which one?
40 * - Clean up the driver.
41 * - optimization for performance.
42 * - Add Videotext capability (VBI). Working on it.....
43 * - Check audio for other devices
44 *
45 */
46
47#include <linux/version.h>
48#include <linux/kernel.h>
49#include <linux/sched.h>
50#include <linux/list.h>
51#include <linux/timer.h>
52#include <linux/slab.h>
53#include <linux/mm.h>
54#include <linux/utsname.h>
55#include <linux/highmem.h>
56#include <linux/smp_lock.h>
57#include <linux/videodev.h>
58#include <linux/vmalloc.h>
59#include <linux/module.h>
60#include <linux/init.h>
61#include <linux/spinlock.h>
62#include <asm/io.h>
63#include <linux/videodev2.h>
64#include <linux/video_decoder.h>
65#include <linux/i2c.h>
66
67#include <media/saa7115.h>
68#include <media/v4l2-common.h>
69#include <media/tuner.h>
70#include <media/audiochip.h>
71
72#include <linux/moduleparam.h>
73#include <linux/workqueue.h>
74
75#ifdef CONFIG_KMOD
76#include <linux/kmod.h>
77#endif
78
79#include "usbvision.h"
80
81#define DRIVER_AUTHOR "Joerg Heckenbach <joerg@heckenbach-aw.de>, Dwaine Garden <DwaineGarden@rogers.com>"
82#define DRIVER_NAME "usbvision"
83#define DRIVER_ALIAS "USBVision"
84#define DRIVER_DESC "USBVision USB Video Device Driver for Linux"
85#define DRIVER_LICENSE "GPL"
86#define USBVISION_DRIVER_VERSION_MAJOR 0
87#define USBVISION_DRIVER_VERSION_MINOR 9
88#define USBVISION_DRIVER_VERSION_PATCHLEVEL 9
89#define USBVISION_DRIVER_VERSION KERNEL_VERSION(USBVISION_DRIVER_VERSION_MAJOR,USBVISION_DRIVER_VERSION_MINOR,USBVISION_DRIVER_VERSION_PATCHLEVEL)
90#define USBVISION_VERSION_STRING __stringify(USBVISION_DRIVER_VERSION_MAJOR) "." __stringify(USBVISION_DRIVER_VERSION_MINOR) "." __stringify(USBVISION_DRIVER_VERSION_PATCHLEVEL)
91
92#define ENABLE_HEXDUMP 0 /* Enable if you need it */
93
94
95#ifdef USBVISION_DEBUG
96 #define PDEBUG(level, fmt, args...) \
97 if (video_debug & (level)) info("[%s:%d] " fmt, __PRETTY_FUNCTION__, __LINE__ , ## args)
98#else
99 #define PDEBUG(level, fmt, args...) do {} while(0)
100#endif
101
102#define DBG_IOCTL 1<<0
103#define DBG_IO 1<<1
104#define DBG_PROBE 1<<2
105#define DBG_MMAP 1<<3
106
107//String operations
108#define rmspace(str) while(*str==' ') str++;
109#define goto2next(str) while(*str!=' ') str++; while(*str==' ') str++;
110
111
112static int usbvision_nr = 0; // sequential number of usbvision device
113
114static struct usbvision_v4l2_format_st usbvision_v4l2_format[] = {
115 { 1, 1, 8, V4L2_PIX_FMT_GREY , "GREY" },
116 { 1, 2, 16, V4L2_PIX_FMT_RGB565 , "RGB565" },
117 { 1, 3, 24, V4L2_PIX_FMT_RGB24 , "RGB24" },
118 { 1, 4, 32, V4L2_PIX_FMT_RGB32 , "RGB32" },
119 { 1, 2, 16, V4L2_PIX_FMT_RGB555 , "RGB555" },
120 { 1, 2, 16, V4L2_PIX_FMT_YUYV , "YUV422" },
121 { 1, 2, 12, V4L2_PIX_FMT_YVU420 , "YUV420P" }, // 1.5 !
122 { 1, 2, 16, V4L2_PIX_FMT_YUV422P , "YUV422P" }
123};
124
125/* supported tv norms */
126static struct usbvision_tvnorm tvnorms[] = {
127 {
128 .name = "PAL",
129 .id = V4L2_STD_PAL,
130 }, {
131 .name = "NTSC",
132 .id = V4L2_STD_NTSC,
133 }, {
134 .name = "SECAM",
135 .id = V4L2_STD_SECAM,
136 }, {
137 .name = "PAL-M",
138 .id = V4L2_STD_PAL_M,
139 }
140};
141
142#define TVNORMS ARRAY_SIZE(tvnorms)
143
144// Function prototypes
145static void usbvision_release(struct usb_usbvision *usbvision);
146
147// Default initalization of device driver parameters
148static int isocMode = ISOC_MODE_COMPRESS; // Set the default format for ISOC endpoint
149static int video_debug = 0; // Set the default Debug Mode of the device driver
150static int PowerOnAtOpen = 1; // Set the default device to power on at startup
151static int video_nr = -1; // Sequential Number of Video Device
152static int radio_nr = -1; // Sequential Number of Radio Device
153static int vbi_nr = -1; // Sequential Number of VBI Device
154static char *CustomDevice=NULL; // Set as nothing....
155
156// Grab parameters for the device driver
157
158#if defined(module_param) // Showing parameters under SYSFS
159module_param(isocMode, int, 0444);
160module_param(video_debug, int, 0444);
161module_param(PowerOnAtOpen, int, 0444);
162module_param(video_nr, int, 0444);
163module_param(radio_nr, int, 0444);
164module_param(vbi_nr, int, 0444);
165module_param(CustomDevice, charp, 0444);
166#else // Old Style
167MODULE_PARAM(isocMode, "i");
168MODULE_PARM(video_debug, "i"); // Grab the Debug Mode of the device driver
169MODULE_PARM(adjustCompression, "i"); // Grab the compression to be adaptive
170MODULE_PARM(PowerOnAtOpen, "i"); // Grab the device to power on at startup
171MODULE_PARM(SwitchSVideoInput, "i"); // To help people with Black and White output with using s-video input. Some cables and input device are wired differently.
172MODULE_PARM(video_nr, "i"); // video_nr option allows to specify a certain /dev/videoX device (like /dev/video0 or /dev/video1 ...)
173MODULE_PARM(radio_nr, "i"); // radio_nr option allows to specify a certain /dev/radioX device (like /dev/radio0 or /dev/radio1 ...)
174MODULE_PARM(vbi_nr, "i"); // vbi_nr option allows to specify a certain /dev/vbiX device (like /dev/vbi0 or /dev/vbi1 ...)
175MODULE_PARM(CustomDevice, "s"); // .... CustomDevice
176#endif
177
178MODULE_PARM_DESC(isocMode, " Set the default format for ISOC endpoint. Default: 0x60 (Compression On)");
179MODULE_PARM_DESC(video_debug, " Set the default Debug Mode of the device driver. Default: 0 (Off)");
180MODULE_PARM_DESC(PowerOnAtOpen, " Set the default device to power on when device is opened. Default: 1 (On)");
181MODULE_PARM_DESC(video_nr, "Set video device number (/dev/videoX). Default: -1 (autodetect)");
182MODULE_PARM_DESC(radio_nr, "Set radio device number (/dev/radioX). Default: -1 (autodetect)");
183MODULE_PARM_DESC(vbi_nr, "Set vbi device number (/dev/vbiX). Default: -1 (autodetect)");
184MODULE_PARM_DESC(CustomDevice, " Define the fine tuning parameters for the device. Default: null");
185
186
187// Misc stuff
188MODULE_AUTHOR(DRIVER_AUTHOR);
189MODULE_DESCRIPTION(DRIVER_DESC);
190MODULE_LICENSE(DRIVER_LICENSE);
191MODULE_VERSION(USBVISION_VERSION_STRING);
192MODULE_ALIAS(DRIVER_ALIAS);
193
194
195/****************************************************************************************/
196/* SYSFS Code - Copied from the stv680.c usb module. */
197/* Device information is located at /sys/class/video4linux/video0 */
198/* Device parameters information is located at /sys/module/usbvision */
199/* Device USB Information is located at /sys/bus/usb/drivers/USBVision Video Grabber */
200/****************************************************************************************/
201
202
203#define YES_NO(x) ((x) ? "Yes" : "No")
204
205static inline struct usb_usbvision *cd_to_usbvision(struct class_device *cd)
206{
207 struct video_device *vdev = to_video_device(cd);
208 return video_get_drvdata(vdev);
209}
210
211static ssize_t show_version(struct class_device *cd, char *buf)
212{
213 return sprintf(buf, "%s\n", USBVISION_VERSION_STRING);
214}
215static CLASS_DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
216
217static ssize_t show_model(struct class_device *class_dev, char *buf)
218{
219 struct video_device *vdev = to_video_device(class_dev);
220 struct usb_usbvision *usbvision = video_get_drvdata(vdev);
221 return sprintf(buf, "%s\n", usbvision_device_data[usbvision->DevModel].ModelString);
222}
223static CLASS_DEVICE_ATTR(model, S_IRUGO, show_model, NULL);
224
225static ssize_t show_hue(struct class_device *class_dev, char *buf)
226{
227 struct video_device *vdev = to_video_device(class_dev);
228 struct usb_usbvision *usbvision = video_get_drvdata(vdev);
229 struct v4l2_control ctrl;
230 ctrl.id = V4L2_CID_HUE;
231 ctrl.value = 0;
232 call_i2c_clients(usbvision, VIDIOC_G_CTRL, &ctrl);
233 return sprintf(buf, "%d\n", ctrl.value >> 8);
234}
235static CLASS_DEVICE_ATTR(hue, S_IRUGO, show_hue, NULL);
236
237static ssize_t show_contrast(struct class_device *class_dev, char *buf)
238{
239 struct video_device *vdev = to_video_device(class_dev);
240 struct usb_usbvision *usbvision = video_get_drvdata(vdev);
241 struct v4l2_control ctrl;
242 ctrl.id = V4L2_CID_CONTRAST;
243 ctrl.value = 0;
244 call_i2c_clients(usbvision, VIDIOC_G_CTRL, &ctrl);
245 return sprintf(buf, "%d\n", ctrl.value >> 8);
246}
247static CLASS_DEVICE_ATTR(contrast, S_IRUGO, show_contrast, NULL);
248
249static ssize_t show_brightness(struct class_device *class_dev, char *buf)
250{
251 struct video_device *vdev = to_video_device(class_dev);
252 struct usb_usbvision *usbvision = video_get_drvdata(vdev);
253 struct v4l2_control ctrl;
254 ctrl.id = V4L2_CID_BRIGHTNESS;
255 ctrl.value = 0;
256 call_i2c_clients(usbvision, VIDIOC_G_CTRL, &ctrl);
257 return sprintf(buf, "%d\n", ctrl.value >> 8);
258}
259static CLASS_DEVICE_ATTR(brightness, S_IRUGO, show_brightness, NULL);
260
261static ssize_t show_saturation(struct class_device *class_dev, char *buf)
262{
263 struct video_device *vdev = to_video_device(class_dev);
264 struct usb_usbvision *usbvision = video_get_drvdata(vdev);
265 struct v4l2_control ctrl;
266 ctrl.id = V4L2_CID_SATURATION;
267 ctrl.value = 0;
268 call_i2c_clients(usbvision, VIDIOC_G_CTRL, &ctrl);
269 return sprintf(buf, "%d\n", ctrl.value >> 8);
270}
271static CLASS_DEVICE_ATTR(saturation, S_IRUGO, show_saturation, NULL);
272
273static ssize_t show_streaming(struct class_device *class_dev, char *buf)
274{
275 struct video_device *vdev = to_video_device(class_dev);
276 struct usb_usbvision *usbvision = video_get_drvdata(vdev);
277 return sprintf(buf, "%s\n", YES_NO(usbvision->streaming==Stream_On?1:0));
278}
279static CLASS_DEVICE_ATTR(streaming, S_IRUGO, show_streaming, NULL);
280
281static ssize_t show_compression(struct class_device *class_dev, char *buf)
282{
283 struct video_device *vdev = to_video_device(class_dev);
284 struct usb_usbvision *usbvision = video_get_drvdata(vdev);
285 return sprintf(buf, "%s\n", YES_NO(usbvision->isocMode==ISOC_MODE_COMPRESS));
286}
287static CLASS_DEVICE_ATTR(compression, S_IRUGO, show_compression, NULL);
288
289static ssize_t show_device_bridge(struct class_device *class_dev, char *buf)
290{
291 struct video_device *vdev = to_video_device(class_dev);
292 struct usb_usbvision *usbvision = video_get_drvdata(vdev);
293 return sprintf(buf, "%d\n", usbvision->bridgeType);
294}
295static CLASS_DEVICE_ATTR(bridge, S_IRUGO, show_device_bridge, NULL);
296
297static void usbvision_create_sysfs(struct video_device *vdev)
298{
299 int res;
300 if (vdev) {
301 res=video_device_create_file(vdev, &class_device_attr_version);
302 res=video_device_create_file(vdev, &class_device_attr_model);
303 res=video_device_create_file(vdev, &class_device_attr_hue);
304 res=video_device_create_file(vdev, &class_device_attr_contrast);
305 res=video_device_create_file(vdev, &class_device_attr_brightness);
306 res=video_device_create_file(vdev, &class_device_attr_saturation);
307 res=video_device_create_file(vdev, &class_device_attr_streaming);
308 res=video_device_create_file(vdev, &class_device_attr_compression);
309 res=video_device_create_file(vdev, &class_device_attr_bridge);
310 }
311}
312
313static void usbvision_remove_sysfs(struct video_device *vdev)
314{
315 if (vdev) {
316 video_device_remove_file(vdev, &class_device_attr_version);
317 video_device_remove_file(vdev, &class_device_attr_model);
318 video_device_remove_file(vdev, &class_device_attr_hue);
319 video_device_remove_file(vdev, &class_device_attr_contrast);
320 video_device_remove_file(vdev, &class_device_attr_brightness);
321 video_device_remove_file(vdev, &class_device_attr_saturation);
322 video_device_remove_file(vdev, &class_device_attr_streaming);
323 video_device_remove_file(vdev, &class_device_attr_compression);
324 video_device_remove_file(vdev, &class_device_attr_bridge);
325 }
326}
327
328
329/*
330 * usbvision_open()
331 *
332 * This is part of Video 4 Linux API. The driver can be opened by one
333 * client only (checks internal counter 'usbvision->user'). The procedure
334 * then allocates buffers needed for video processing.
335 *
336 */
337static int usbvision_v4l2_open(struct inode *inode, struct file *file)
338{
339 struct video_device *dev = video_devdata(file);
340 struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
341 int errCode = 0;
342
343 PDEBUG(DBG_IO, "open");
344
345
346 usbvision_reset_powerOffTimer(usbvision);
347
348 if (usbvision->user)
349 errCode = -EBUSY;
350 else {
351 /* Allocate memory for the frame buffers */
352 errCode = usbvision_frames_alloc(usbvision);
353 if(!errCode) {
354 /* Allocate memory for the scratch ring buffer */
355 errCode = usbvision_scratch_alloc(usbvision);
356 if(!errCode) {
357 /* Allocate memory for the USB S buffers */
358 errCode = usbvision_sbuf_alloc(usbvision);
359 if ((!errCode) && (usbvision->isocMode==ISOC_MODE_COMPRESS)) {
360 /* Allocate intermediate decompression buffers only if needed */
361 errCode = usbvision_decompress_alloc(usbvision);
362 }
363 }
364 }
365 if (errCode) {
366 /* Deallocate all buffers if trouble */
367 usbvision_frames_free(usbvision);
368 usbvision_scratch_free(usbvision);
369 usbvision_sbuf_free(usbvision);
370 usbvision_decompress_free(usbvision);
371 }
372 }
373
374 /* If so far no errors then we shall start the camera */
375 if (!errCode) {
376 down(&usbvision->lock);
377 if (usbvision->power == 0) {
378 usbvision_power_on(usbvision);
379 usbvision_init_i2c(usbvision);
380 }
381
382 /* Send init sequence only once, it's large! */
383 if (!usbvision->initialized) {
384 int setup_ok = 0;
385 setup_ok = usbvision_setup(usbvision,isocMode);
386 if (setup_ok)
387 usbvision->initialized = 1;
388 else
389 errCode = -EBUSY;
390 }
391
392 if (!errCode) {
393 usbvision_begin_streaming(usbvision);
394 errCode = usbvision_init_isoc(usbvision);
395 /* device needs to be initialized before isoc transfer */
396 usbvision_muxsel(usbvision,0);
397 usbvision->user++;
398 }
399 else {
400 if (PowerOnAtOpen) {
401 usbvision_i2c_usb_del_bus(&usbvision->i2c_adap);
402 usbvision_power_off(usbvision);
403 usbvision->initialized = 0;
404 }
405 }
406 up(&usbvision->lock);
407 }
408
409 if (errCode) {
410 }
411
412 /* prepare queues */
413 usbvision_empty_framequeues(usbvision);
414
415 PDEBUG(DBG_IO, "success");
416 return errCode;
417}
418
419/*
420 * usbvision_v4l2_close()
421 *
422 * This is part of Video 4 Linux API. The procedure
423 * stops streaming and deallocates all buffers that were earlier
424 * allocated in usbvision_v4l2_open().
425 *
426 */
427static int usbvision_v4l2_close(struct inode *inode, struct file *file)
428{
429 struct video_device *dev = video_devdata(file);
430 struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
431
432 PDEBUG(DBG_IO, "close");
433 down(&usbvision->lock);
434
435 usbvision_audio_off(usbvision);
436 usbvision_restart_isoc(usbvision);
437 usbvision_stop_isoc(usbvision);
438
439 usbvision_decompress_free(usbvision);
440 usbvision_rvfree(usbvision->fbuf, usbvision->fbuf_size);
441 usbvision_scratch_free(usbvision);
442 usbvision_sbuf_free(usbvision);
443
444 usbvision->user--;
445
446 if (PowerOnAtOpen) {
447 /* power off in a little while to avoid off/on every close/open short sequences */
448 usbvision_set_powerOffTimer(usbvision);
449 usbvision->initialized = 0;
450 }
451
452 up(&usbvision->lock);
453
454 if (usbvision->remove_pending) {
455 info("%s: Final disconnect", __FUNCTION__);
456 usbvision_release(usbvision);
457 }
458
459 PDEBUG(DBG_IO, "success");
460
461
462 return 0;
463}
464
465
466/*
467 * usbvision_ioctl()
468 *
469 * This is part of Video 4 Linux API. The procedure handles ioctl() calls.
470 *
471 */
472static int usbvision_v4l2_do_ioctl(struct inode *inode, struct file *file,
473 unsigned int cmd, void *arg)
474{
475 struct video_device *dev = video_devdata(file);
476 struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
477
478 if (!USBVISION_IS_OPERATIONAL(usbvision))
479 return -EFAULT;
480
481 switch (cmd) {
482
483#ifdef CONFIG_VIDEO_ADV_DEBUG
484 /* ioctls to allow direct acces to the NT100x registers */
485 case VIDIOC_INT_G_REGISTER:
486 {
487 struct v4l2_register *reg = arg;
488 int errCode;
489
490 if (reg->i2c_id != 0)
491 return -EINVAL;
492 /* NT100x has a 8-bit register space */
493 errCode = usbvision_read_reg(usbvision, reg->reg&0xff);
494 if (errCode < 0) {
495 err("%s: VIDIOC_INT_G_REGISTER failed: error %d", __FUNCTION__, errCode);
496 }
497 else {
498 reg->val=(unsigned char)errCode;
499 PDEBUG(DBG_IOCTL, "VIDIOC_INT_G_REGISTER reg=0x%02X, value=0x%02X",
500 (unsigned int)reg->reg, reg->val);
501 errCode = 0; // No error
502 }
503 return errCode;
504 }
505 case VIDIOC_INT_S_REGISTER:
506 {
507 struct v4l2_register *reg = arg;
508 int errCode;
509
510 if (reg->i2c_id != 0)
511 return -EINVAL;
512 if (!capable(CAP_SYS_ADMIN))
513 return -EPERM;
514 errCode = usbvision_write_reg(usbvision, reg->reg&0xff, reg->val);
515 if (errCode < 0) {
516 err("%s: VIDIOC_INT_S_REGISTER failed: error %d", __FUNCTION__, errCode);
517 }
518 else {
519 PDEBUG(DBG_IOCTL, "VIDIOC_INT_S_REGISTER reg=0x%02X, value=0x%02X",
520 (unsigned int)reg->reg, reg->val);
521 errCode = 0;
522 }
523 return 0;
524 }
525#endif
526 case VIDIOC_QUERYCAP:
527 {
528 struct v4l2_capability *vc=arg;
529
530 memset(vc, 0, sizeof(*vc));
531 strlcpy(vc->driver, "USBVision", sizeof(vc->driver));
532 strlcpy(vc->card, usbvision_device_data[usbvision->DevModel].ModelString,
533 sizeof(vc->card));
534 strlcpy(vc->bus_info, usbvision->dev->dev.bus_id,
535 sizeof(vc->bus_info));
536 vc->version = USBVISION_DRIVER_VERSION;
537 vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
538 V4L2_CAP_AUDIO |
539 V4L2_CAP_READWRITE |
540 V4L2_CAP_STREAMING |
541 (usbvision->have_tuner ? V4L2_CAP_TUNER : 0);
542 PDEBUG(DBG_IOCTL, "VIDIOC_QUERYCAP");
543 return 0;
544 }
545 case VIDIOC_ENUMINPUT:
546 {
547 struct v4l2_input *vi = arg;
548 int chan;
549
550 if ((vi->index >= usbvision->video_inputs) || (vi->index < 0) )
551 return -EINVAL;
552 if (usbvision->have_tuner) {
553 chan = vi->index;
554 }
555 else {
556 chan = vi->index + 1; //skip Television string
557 }
558 switch(chan) {
559 case 0:
560 if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
561 strcpy(vi->name, "White Video Input");
562 }
563 else {
564 strcpy(vi->name, "Television");
565 vi->type = V4L2_INPUT_TYPE_TUNER;
566 vi->audioset = 1;
567 vi->tuner = chan;
568 vi->std = V4L2_STD_PAL | V4L2_STD_NTSC | V4L2_STD_SECAM;
569 }
570 break;
571 case 1:
572 vi->type = V4L2_INPUT_TYPE_CAMERA;
573 if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
574 strcpy(vi->name, "Green Video Input");
575 }
576 else {
577 strcpy(vi->name, "Composite Video Input");
578 }
579 vi->std = V4L2_STD_PAL;
580 break;
581 case 2:
582 vi->type = V4L2_INPUT_TYPE_CAMERA;
583 if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
584 strcpy(vi->name, "Yellow Video Input");
585 }
586 else {
587 strcpy(vi->name, "S-Video Input");
588 }
589 vi->std = V4L2_STD_PAL;
590 break;
591 case 3:
592 vi->type = V4L2_INPUT_TYPE_CAMERA;
593 strcpy(vi->name, "Red Video Input");
594 vi->std = V4L2_STD_PAL;
595 break;
596 }
597 PDEBUG(DBG_IOCTL, "VIDIOC_ENUMINPUT name=%s:%d tuners=%d type=%d norm=%x",
598 vi->name, vi->index, vi->tuner,vi->type,(int)vi->std);
599 return 0;
600 }
601 case VIDIOC_ENUMSTD:
602 {
603 struct v4l2_standard *e = arg;
604 unsigned int i;
605 int ret;
606
607 i = e->index;
608 if (i >= TVNORMS)
609 return -EINVAL;
610 ret = v4l2_video_std_construct(e, tvnorms[e->index].id,
611 tvnorms[e->index].name);
612 e->index = i;
613 if (ret < 0)
614 return ret;
615 return 0;
616 }
617 case VIDIOC_G_INPUT:
618 {
619 int *input = arg;
620 *input = usbvision->ctl_input;
621 return 0;
622 }
623 case VIDIOC_S_INPUT:
624 {
625 int *input = arg;
626 if ((*input >= usbvision->video_inputs) || (*input < 0) )
627 return -EINVAL;
628 usbvision->ctl_input = *input;
629
630 down(&usbvision->lock);
631 usbvision_muxsel(usbvision, usbvision->ctl_input);
632 usbvision_set_input(usbvision);
633 usbvision_set_output(usbvision, usbvision->curwidth, usbvision->curheight);
634 up(&usbvision->lock);
635 return 0;
636 }
637 case VIDIOC_G_STD:
638 {
639 v4l2_std_id *id = arg;
640
641 *id = usbvision->tvnorm->id;
642
643 PDEBUG(DBG_IOCTL, "VIDIOC_G_STD std_id=%s", usbvision->tvnorm->name);
644 return 0;
645 }
646 case VIDIOC_S_STD:
647 {
648 v4l2_std_id *id = arg;
649 unsigned int i;
650
651 for (i = 0; i < TVNORMS; i++)
652 if (*id == tvnorms[i].id)
653 break;
654 if (i == TVNORMS)
655 for (i = 0; i < TVNORMS; i++)
656 if (*id & tvnorms[i].id)
657 break;
658 if (i == TVNORMS)
659 return -EINVAL;
660
661 down(&usbvision->lock);
662 usbvision->tvnorm = &tvnorms[i];
663
664 call_i2c_clients(usbvision, VIDIOC_S_STD,
665 &usbvision->tvnorm->id);
666
667 up(&usbvision->lock);
668
669 PDEBUG(DBG_IOCTL, "VIDIOC_S_STD std_id=%s", usbvision->tvnorm->name);
670 return 0;
671 }
672 case VIDIOC_G_TUNER:
673 {
674 struct v4l2_tuner *vt = arg;
675
676 if (!usbvision->have_tuner || vt->index) // Only tuner 0
677 return -EINVAL;
678 strcpy(vt->name, "Television");
679 /* Let clients fill in the remainder of this struct */
680 call_i2c_clients(usbvision,VIDIOC_G_TUNER,vt);
681
682 PDEBUG(DBG_IOCTL, "VIDIOC_G_TUNER signal=%x, afc=%x",vt->signal,vt->afc);
683 return 0;
684 }
685 case VIDIOC_S_TUNER:
686 {
687 struct v4l2_tuner *vt = arg;
688
689 // Only no or one tuner for now
690 if (!usbvision->have_tuner || vt->index)
691 return -EINVAL;
692 /* let clients handle this */
693 call_i2c_clients(usbvision,VIDIOC_S_TUNER,vt);
694
695 PDEBUG(DBG_IOCTL, "VIDIOC_S_TUNER");
696 return 0;
697 }
698 case VIDIOC_G_FREQUENCY:
699 {
700 struct v4l2_frequency *freq = arg;
701
702 freq->tuner = 0; // Only one tuner
703 freq->type = V4L2_TUNER_ANALOG_TV;
704 freq->frequency = usbvision->freq;
705 PDEBUG(DBG_IOCTL, "VIDIOC_G_FREQUENCY freq=0x%X", (unsigned)freq->frequency);
706 return 0;
707 }
708 case VIDIOC_S_FREQUENCY:
709 {
710 struct v4l2_frequency *freq = arg;
711
712 // Only no or one tuner for now
713 if (!usbvision->have_tuner || freq->tuner)
714 return -EINVAL;
715
716 usbvision->freq = freq->frequency;
717 call_i2c_clients(usbvision, cmd, freq);
718 PDEBUG(DBG_IOCTL, "VIDIOC_S_FREQUENCY freq=0x%X", (unsigned)freq->frequency);
719 return 0;
720 }
721 case VIDIOC_G_AUDIO:
722 {
723 struct v4l2_audio *v = arg;
724 memset(v,0, sizeof(v));
725 strcpy(v->name, "TV");
726 PDEBUG(DBG_IOCTL, "VIDIOC_G_AUDIO");
727 return 0;
728 }
729 case VIDIOC_S_AUDIO:
730 {
731 struct v4l2_audio *v = arg;
732 if(v->index) {
733 return -EINVAL;
734 }
735 PDEBUG(DBG_IOCTL, "VIDIOC_S_AUDIO");
736 return 0;
737 }
738 case VIDIOC_QUERYCTRL:
739 {
740 struct v4l2_queryctrl *ctrl = arg;
741 int id=ctrl->id;
742
743 memset(ctrl,0,sizeof(*ctrl));
744 ctrl->id=id;
745
746 call_i2c_clients(usbvision, cmd, arg);
747
748 if (ctrl->type)
749 return 0;
750 else
751 return -EINVAL;
752
753 PDEBUG(DBG_IOCTL,"VIDIOC_QUERYCTRL id=%x value=%x",ctrl->id,ctrl->type);
754 }
755 case VIDIOC_G_CTRL:
756 {
757 struct v4l2_control *ctrl = arg;
758 PDEBUG(DBG_IOCTL,"VIDIOC_G_CTRL id=%x value=%x",ctrl->id,ctrl->value);
759 call_i2c_clients(usbvision, VIDIOC_G_CTRL, ctrl);
760 return 0;
761 }
762 case VIDIOC_S_CTRL:
763 {
764 struct v4l2_control *ctrl = arg;
765
766 PDEBUG(DBG_IOCTL, "VIDIOC_S_CTRL id=%x value=%x",ctrl->id,ctrl->value);
767 call_i2c_clients(usbvision, VIDIOC_S_CTRL, ctrl);
768 return 0;
769 }
770 case VIDIOC_REQBUFS:
771 {
772 struct v4l2_requestbuffers *vr = arg;
773 int ret;
774
775 RESTRICT_TO_RANGE(vr->count,1,USBVISION_NUMFRAMES);
776
777 // Check input validity : the user must do a VIDEO CAPTURE and MMAP method.
778 if((vr->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
779 (vr->memory != V4L2_MEMORY_MMAP))
780 return -EINVAL;
781
782 if(usbvision->streaming == Stream_On) {
783 if ((ret = usbvision_stream_interrupt(usbvision)))
784 return ret;
785 }
786
787 usbvision_empty_framequeues(usbvision);
788
789 usbvision->curFrame = NULL;
790
791 PDEBUG(DBG_IOCTL, "VIDIOC_REQBUFS count=%d",vr->count);
792 return 0;
793 }
794 case VIDIOC_QUERYBUF:
795 {
796 struct v4l2_buffer *vb = arg;
797 struct usbvision_frame *frame;
798
799 // FIXME : must control that buffers are mapped (VIDIOC_REQBUFS has been called)
800
801 if(vb->type != V4L2_CAP_VIDEO_CAPTURE) {
802 return -EINVAL;
803 }
804 if(vb->index>=USBVISION_NUMFRAMES) {
805 return -EINVAL;
806 }
807 // Updating the corresponding frame state
808 vb->flags = 0;
809 frame = &usbvision->frame[vb->index];
810 if(frame->grabstate >= FrameState_Ready)
811 vb->flags |= V4L2_BUF_FLAG_QUEUED;
812 if(frame->grabstate >= FrameState_Done)
813 vb->flags |= V4L2_BUF_FLAG_DONE;
814 if(frame->grabstate == FrameState_Unused)
815 vb->flags |= V4L2_BUF_FLAG_MAPPED;
816 vb->memory = V4L2_MEMORY_MMAP;
817
818 vb->m.offset = vb->index*usbvision->max_frame_size;
819
820 vb->memory = V4L2_MEMORY_MMAP;
821 vb->field = V4L2_FIELD_NONE;
822 vb->length = usbvision->curwidth*usbvision->curheight*usbvision->palette.bytes_per_pixel;
823 vb->timestamp = usbvision->frame[vb->index].timestamp;
824 vb->sequence = usbvision->frame[vb->index].sequence;
825 return 0;
826 }
827 case VIDIOC_QBUF:
828 {
829 struct v4l2_buffer *vb = arg;
830 struct usbvision_frame *frame;
831 unsigned long lock_flags;
832
833 // FIXME : works only on VIDEO_CAPTURE MODE, MMAP.
834 if(vb->type != V4L2_CAP_VIDEO_CAPTURE) {
835 return -EINVAL;
836 }
837 if(vb->index>=USBVISION_NUMFRAMES) {
838 return -EINVAL;
839 }
840
841 frame = &usbvision->frame[vb->index];
842
843 if (frame->grabstate != FrameState_Unused) {
844 return -EAGAIN;
845 }
846
847 /* Mark it as ready and enqueue frame */
848 frame->grabstate = FrameState_Ready;
849 frame->scanstate = ScanState_Scanning;
850 frame->scanlength = 0; /* Accumulated in usbvision_parse_data() */
851
852 vb->flags &= ~V4L2_BUF_FLAG_DONE;
853
854 /* set v4l2_format index */
855 frame->v4l2_format = usbvision->palette;
856
857 spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
858 list_add_tail(&usbvision->frame[vb->index].frame, &usbvision->inqueue);
859 spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
860
861 PDEBUG(DBG_IOCTL, "VIDIOC_QBUF frame #%d",vb->index);
862 return 0;
863 }
864 case VIDIOC_DQBUF:
865 {
866 struct v4l2_buffer *vb = arg;
867 int ret;
868 struct usbvision_frame *f;
869 unsigned long lock_flags;
870
871 if (vb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
872 return -EINVAL;
873
874 if (list_empty(&(usbvision->outqueue))) {
875 if (usbvision->streaming == Stream_Idle)
876 return -EINVAL;
877 ret = wait_event_interruptible
878 (usbvision->wait_frame,
879 !list_empty(&(usbvision->outqueue)));
880 if (ret)
881 return ret;
882 }
883
884 spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
885 f = list_entry(usbvision->outqueue.next,
886 struct usbvision_frame, frame);
887 list_del(usbvision->outqueue.next);
888 spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
889
890 f->grabstate = FrameState_Unused;
891
892 vb->memory = V4L2_MEMORY_MMAP;
893 vb->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE;
894 vb->index = f->index;
895 vb->sequence = f->sequence;
896 vb->timestamp = f->timestamp;
897 vb->field = V4L2_FIELD_NONE;
898 vb->bytesused = f->scanlength;
899
900 return 0;
901 }
902 case VIDIOC_STREAMON:
903 {
904 int b=V4L2_BUF_TYPE_VIDEO_CAPTURE;
905
906 usbvision->streaming = Stream_On;
907
908 call_i2c_clients(usbvision,VIDIOC_STREAMON , &b);
909
910 PDEBUG(DBG_IOCTL, "VIDIOC_STREAMON");
911
912 return 0;
913 }
914 case VIDIOC_STREAMOFF:
915 {
916 int *type = arg;
917 int b=V4L2_BUF_TYPE_VIDEO_CAPTURE;
918
919 if (*type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
920 return -EINVAL;
921
922 if(usbvision->streaming == Stream_On) {
923 usbvision_stream_interrupt(usbvision);
924 // Stop all video streamings
925 call_i2c_clients(usbvision,VIDIOC_STREAMOFF , &b);
926 }
927 usbvision_empty_framequeues(usbvision);
928
929 PDEBUG(DBG_IOCTL, "VIDIOC_STREAMOFF");
930 return 0;
931 }
932 case VIDIOC_ENUM_FMT:
933 {
934 struct v4l2_fmtdesc *vfd = arg;
935
936 if(vfd->index>=USBVISION_SUPPORTED_PALETTES-1) {
937 return -EINVAL;
938 }
939 vfd->flags = 0;
940 vfd->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
941 strcpy(vfd->description,usbvision_v4l2_format[vfd->index].desc);
942 vfd->pixelformat = usbvision_v4l2_format[vfd->index].format;
943 memset(vfd->reserved, 0, sizeof(vfd->reserved));
944 return 0;
945 }
946 case VIDIOC_G_FMT:
947 {
948 struct v4l2_format *vf = arg;
949
950 switch (vf->type) {
951 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
952 {
953 vf->fmt.pix.width = usbvision->curwidth;
954 vf->fmt.pix.height = usbvision->curheight;
955 vf->fmt.pix.pixelformat = usbvision->palette.format;
956 vf->fmt.pix.bytesperline = usbvision->curwidth*usbvision->palette.bytes_per_pixel;
957 vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline*usbvision->curheight;
958 vf->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
959 vf->fmt.pix.field = V4L2_FIELD_NONE; /* Always progressive image */
960 PDEBUG(DBG_IOCTL, "VIDIOC_G_FMT w=%d, h=%d, format=%s",
961 vf->fmt.pix.width, vf->fmt.pix.height,usbvision->palette.desc);
962 return 0;
963 }
964 default:
965 PDEBUG(DBG_IOCTL, "VIDIOC_G_FMT invalid type %d",vf->type);
966 return -EINVAL;
967 }
968 return 0;
969 }
970 case VIDIOC_TRY_FMT:
971 case VIDIOC_S_FMT:
972 {
973 struct v4l2_format *vf = arg;
974 int formatIdx,ret;
975
976 switch(vf->type) {
977 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
978 {
979 /* Find requested format in available ones */
980 for(formatIdx=0;formatIdx<USBVISION_SUPPORTED_PALETTES;formatIdx++) {
981 if(vf->fmt.pix.pixelformat == usbvision_v4l2_format[formatIdx].format) {
982 usbvision->palette = usbvision_v4l2_format[formatIdx];
983 break;
984 }
985 }
986 /* robustness */
987 if(formatIdx == USBVISION_SUPPORTED_PALETTES) {
988 return -EINVAL;
989 }
990 RESTRICT_TO_RANGE(vf->fmt.pix.width, MIN_FRAME_WIDTH, MAX_FRAME_WIDTH);
991 RESTRICT_TO_RANGE(vf->fmt.pix.height, MIN_FRAME_HEIGHT, MAX_FRAME_HEIGHT);
992
993 vf->fmt.pix.bytesperline = vf->fmt.pix.width*usbvision->palette.bytes_per_pixel;
994 vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline*vf->fmt.pix.height;
995
996 if(cmd == VIDIOC_TRY_FMT) {
997 PDEBUG(DBG_IOCTL, "VIDIOC_TRY_FMT grabdisplay w=%d, h=%d, format=%s",
998 vf->fmt.pix.width, vf->fmt.pix.height,usbvision->palette.desc);
999 return 0;
1000 }
1001
1002 /* stop io in case it is already in progress */
1003 if(usbvision->streaming == Stream_On) {
1004 if ((ret = usbvision_stream_interrupt(usbvision)))
1005 return ret;
1006 }
1007 usbvision_empty_framequeues(usbvision);
1008
1009 usbvision->curFrame = NULL;
1010
1011 // by now we are committed to the new data...
1012 down(&usbvision->lock);
1013 usbvision_set_output(usbvision, vf->fmt.pix.width, vf->fmt.pix.height);
1014 up(&usbvision->lock);
1015
1016 PDEBUG(DBG_IOCTL, "VIDIOC_S_FMT grabdisplay w=%d, h=%d, format=%s",
1017 vf->fmt.pix.width, vf->fmt.pix.height,usbvision->palette.desc);
1018 return 0;
1019 }
1020 default:
1021 return -EINVAL;
1022 }
1023 }
1024 default:
1025 return -ENOIOCTLCMD;
1026 }
1027 return 0;
1028}
1029
1030static int usbvision_v4l2_ioctl(struct inode *inode, struct file *file,
1031 unsigned int cmd, unsigned long arg)
1032{
1033 return video_usercopy(inode, file, cmd, arg, usbvision_v4l2_do_ioctl);
1034}
1035
1036
1037static ssize_t usbvision_v4l2_read(struct file *file, char *buf,
1038 size_t count, loff_t *ppos)
1039{
1040 struct video_device *dev = video_devdata(file);
1041 struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
1042 int noblock = file->f_flags & O_NONBLOCK;
1043 unsigned long lock_flags;
1044
1045 int frmx = -1;
1046 int ret,i;
1047 struct usbvision_frame *frame;
1048
1049 PDEBUG(DBG_IO, "%s: %ld bytes, noblock=%d", __FUNCTION__, (unsigned long)count, noblock);
1050
1051 if (!USBVISION_IS_OPERATIONAL(usbvision) || (buf == NULL))
1052 return -EFAULT;
1053
1054 /* no stream is running, make it running ! */
1055 usbvision->streaming = Stream_On;
1056 call_i2c_clients(usbvision,VIDIOC_STREAMON , NULL);
1057
1058 /* First, enqueue as many frames as possible (like a user of VIDIOC_QBUF would do) */
1059 for(i=0;i<USBVISION_NUMFRAMES;i++) {
1060 frame = &usbvision->frame[i];
1061 if(frame->grabstate == FrameState_Unused) {
1062 /* Mark it as ready and enqueue frame */
1063 frame->grabstate = FrameState_Ready;
1064 frame->scanstate = ScanState_Scanning;
1065 frame->scanlength = 0; /* Accumulated in usbvision_parse_data() */
1066
1067 /* set v4l2_format index */
1068 frame->v4l2_format = usbvision->palette;
1069
1070 spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
1071 list_add_tail(&frame->frame, &usbvision->inqueue);
1072 spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
1073 }
1074 }
1075
1076 /* Then try to steal a frame (like a VIDIOC_DQBUF would do) */
1077 if (list_empty(&(usbvision->outqueue))) {
1078 if(noblock)
1079 return -EAGAIN;
1080
1081 ret = wait_event_interruptible
1082 (usbvision->wait_frame,
1083 !list_empty(&(usbvision->outqueue)));
1084 if (ret)
1085 return ret;
1086 }
1087
1088 spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
1089 frame = list_entry(usbvision->outqueue.next,
1090 struct usbvision_frame, frame);
1091 list_del(usbvision->outqueue.next);
1092 spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
1093
1094 /* An error returns an empty frame */
1095 if (frame->grabstate == FrameState_Error) {
1096 frame->bytes_read = 0;
1097 return 0;
1098 }
1099
1100 PDEBUG(DBG_IO, "%s: frmx=%d, bytes_read=%ld, scanlength=%ld", __FUNCTION__,
1101 frame->index, frame->bytes_read, frame->scanlength);
1102
1103 /* copy bytes to user space; we allow for partials reads */
1104 if ((count + frame->bytes_read) > (unsigned long)frame->scanlength)
1105 count = frame->scanlength - frame->bytes_read;
1106
1107 if (copy_to_user(buf, frame->data + frame->bytes_read, count)) {
1108 return -EFAULT;
1109 }
1110
1111 frame->bytes_read += count;
1112 PDEBUG(DBG_IO, "%s: {copy} count used=%ld, new bytes_read=%ld", __FUNCTION__,
1113 (unsigned long)count, frame->bytes_read);
1114
1115 // For now, forget the frame if it has not been read in one shot.
1116/* if (frame->bytes_read >= frame->scanlength) {// All data has been read */
1117 frame->bytes_read = 0;
1118
1119 /* Mark it as available to be used again. */
1120 usbvision->frame[frmx].grabstate = FrameState_Unused;
1121/* } */
1122
1123 return count;
1124}
1125
1126static int usbvision_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
1127{
1128 unsigned long size = vma->vm_end - vma->vm_start,
1129 start = vma->vm_start;
1130 void *pos;
1131 u32 i;
1132
1133 struct video_device *dev = video_devdata(file);
1134 struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
1135
1136 down(&usbvision->lock);
1137
1138 if (!USBVISION_IS_OPERATIONAL(usbvision)) {
1139 up(&usbvision->lock);
1140 return -EFAULT;
1141 }
1142
1143 if (!(vma->vm_flags & VM_WRITE) ||
1144 size != PAGE_ALIGN(usbvision->curwidth*usbvision->curheight*usbvision->palette.bytes_per_pixel)) {
1145 up(&usbvision->lock);
1146 return -EINVAL;
1147 }
1148
1149 for (i = 0; i < USBVISION_NUMFRAMES; i++) {
1150 if (((usbvision->max_frame_size*i) >> PAGE_SHIFT) == vma->vm_pgoff)
1151 break;
1152 }
1153 if (i == USBVISION_NUMFRAMES) {
1154 PDEBUG(DBG_MMAP, "mmap: user supplied mapping address is out of range");
1155 up(&usbvision->lock);
1156 return -EINVAL;
1157 }
1158
1159 /* VM_IO is eventually going to replace PageReserved altogether */
1160 vma->vm_flags |= VM_IO;
1161 vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
1162
1163 pos = usbvision->frame[i].data;
1164 while (size > 0) {
1165
1166 if (vm_insert_page(vma, start, vmalloc_to_page(pos))) {
1167 PDEBUG(DBG_MMAP, "mmap: vm_insert_page failed");
1168 up(&usbvision->lock);
1169 return -EAGAIN;
1170 }
1171 start += PAGE_SIZE;
1172 pos += PAGE_SIZE;
1173 size -= PAGE_SIZE;
1174 }
1175
1176 up(&usbvision->lock);
1177 return 0;
1178}
1179
1180
1181/*
1182 * Here comes the stuff for radio on usbvision based devices
1183 *
1184 */
1185static int usbvision_radio_open(struct inode *inode, struct file *file)
1186{
1187 struct video_device *dev = video_devdata(file);
1188 struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
1189 struct v4l2_frequency freq;
1190 int errCode = 0;
1191
1192 PDEBUG(DBG_IO, "%s:", __FUNCTION__);
1193
1194 down(&usbvision->lock);
1195
1196 if (usbvision->user) {
1197 err("%s: Someone tried to open an already opened USBVision Radio!", __FUNCTION__);
1198 errCode = -EBUSY;
1199 }
1200 else {
1201 if(PowerOnAtOpen) {
1202 usbvision_reset_powerOffTimer(usbvision);
1203 if (usbvision->power == 0) {
1204 usbvision_power_on(usbvision);
1205 usbvision_init_i2c(usbvision);
1206 }
1207 }
1208
1209 // If so far no errors then we shall start the radio
1210 usbvision->radio = 1;
1211 call_i2c_clients(usbvision,AUDC_SET_RADIO,&usbvision->tuner_type);
1212 freq.frequency = 1517; //SWR3 @ 94.8MHz
1213 call_i2c_clients(usbvision, VIDIOC_S_FREQUENCY, &freq);
1214 usbvision_set_audio(usbvision, USBVISION_AUDIO_RADIO);
1215 usbvision->user++;
1216 }
1217
1218 if (errCode) {
1219 if (PowerOnAtOpen) {
1220 usbvision_i2c_usb_del_bus(&usbvision->i2c_adap);
1221 usbvision_power_off(usbvision);
1222 usbvision->initialized = 0;
1223 }
1224 }
1225 up(&usbvision->lock);
1226 return errCode;
1227}
1228
1229
1230static int usbvision_radio_close(struct inode *inode, struct file *file)
1231{
1232 struct video_device *dev = video_devdata(file);
1233 struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
1234 int errCode = 0;
1235
1236 PDEBUG(DBG_IO, "");
1237
1238 down(&usbvision->lock);
1239
1240 usbvision_audio_off(usbvision);
1241 usbvision->radio=0;
1242 usbvision->user--;
1243
1244 if (PowerOnAtOpen) {
1245 usbvision_set_powerOffTimer(usbvision);
1246 usbvision->initialized = 0;
1247 }
1248
1249 up(&usbvision->lock);
1250
1251 if (usbvision->remove_pending) {
1252 info("%s: Final disconnect", __FUNCTION__);
1253 usbvision_release(usbvision);
1254 }
1255
1256
1257 PDEBUG(DBG_IO, "success");
1258
1259 return errCode;
1260}
1261
1262static int usbvision_do_radio_ioctl(struct inode *inode, struct file *file,
1263 unsigned int cmd, void *arg)
1264{
1265 struct video_device *dev = video_devdata(file);
1266 struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
1267
1268 if (!USBVISION_IS_OPERATIONAL(usbvision))
1269 return -EIO;
1270
1271 switch (cmd) {
1272 case VIDIOC_QUERYCAP:
1273 {
1274 struct v4l2_capability *vc=arg;
1275
1276 memset(vc, 0, sizeof(*vc));
1277 strlcpy(vc->driver, "USBVision", sizeof(vc->driver));
1278 strlcpy(vc->card, usbvision_device_data[usbvision->DevModel].ModelString,
1279 sizeof(vc->card));
1280 strlcpy(vc->bus_info, usbvision->dev->dev.bus_id,
1281 sizeof(vc->bus_info));
1282 vc->version = USBVISION_DRIVER_VERSION;
1283 vc->capabilities = (usbvision->have_tuner ? V4L2_CAP_TUNER : 0);
1284 PDEBUG(DBG_IO, "VIDIOC_QUERYCAP");
1285 return 0;
1286 }
1287 case VIDIOC_QUERYCTRL:
1288 {
1289 struct v4l2_queryctrl *ctrl = arg;
1290 int id=ctrl->id;
1291
1292 memset(ctrl,0,sizeof(*ctrl));
1293 ctrl->id=id;
1294
1295 call_i2c_clients(usbvision, cmd, arg);
1296 PDEBUG(DBG_IO,"VIDIOC_QUERYCTRL id=%x value=%x",ctrl->id,ctrl->type);
1297
1298 if (ctrl->type)
1299 return 0;
1300 else
1301 return -EINVAL;
1302
1303 }
1304 case VIDIOC_G_CTRL:
1305 {
1306 struct v4l2_control *ctrl = arg;
1307
1308 call_i2c_clients(usbvision, VIDIOC_G_CTRL, ctrl);
1309 PDEBUG(DBG_IO,"VIDIOC_G_CTRL id=%x value=%x",ctrl->id,ctrl->value);
1310 return 0;
1311 }
1312 case VIDIOC_S_CTRL:
1313 {
1314 struct v4l2_control *ctrl = arg;
1315
1316 call_i2c_clients(usbvision, VIDIOC_S_CTRL, ctrl);
1317 PDEBUG(DBG_IO, "VIDIOC_S_CTRL id=%x value=%x",ctrl->id,ctrl->value);
1318 return 0;
1319 }
1320 case VIDIOC_G_TUNER:
1321 {
1322 struct v4l2_tuner *t = arg;
1323
1324 if (t->index > 0)
1325 return -EINVAL;
1326
1327 memset(t,0,sizeof(*t));
1328 strcpy(t->name, "Radio");
1329 t->type = V4L2_TUNER_RADIO;
1330
1331 /* Let clients fill in the remainder of this struct */
1332 call_i2c_clients(usbvision,VIDIOC_G_TUNER,t);
1333 PDEBUG(DBG_IO, "VIDIOC_G_TUNER signal=%x, afc=%x",t->signal,t->afc);
1334 return 0;
1335 }
1336 case VIDIOC_S_TUNER:
1337 {
1338 struct v4l2_tuner *vt = arg;
1339
1340 // Only no or one tuner for now
1341 if (!usbvision->have_tuner || vt->index)
1342 return -EINVAL;
1343 /* let clients handle this */
1344 call_i2c_clients(usbvision,VIDIOC_S_TUNER,vt);
1345
1346 PDEBUG(DBG_IO, "VIDIOC_S_TUNER");
1347 return 0;
1348 }
1349 case VIDIOC_G_AUDIO:
1350 {
1351 struct v4l2_audio *a = arg;
1352
1353 memset(a,0,sizeof(*a));
1354 strcpy(a->name,"Radio");
1355 PDEBUG(DBG_IO, "VIDIOC_G_AUDIO");
1356 return 0;
1357 }
1358 case VIDIOC_S_AUDIO:
1359 case VIDIOC_S_INPUT:
1360 case VIDIOC_S_STD:
1361 return 0;
1362
1363 case VIDIOC_G_FREQUENCY:
1364 {
1365 struct v4l2_frequency *f = arg;
1366
1367 memset(f,0,sizeof(*f));
1368
1369 f->type = V4L2_TUNER_RADIO;
1370 f->frequency = usbvision->freq;
1371 call_i2c_clients(usbvision, cmd, f);
1372 PDEBUG(DBG_IO, "VIDIOC_G_FREQUENCY freq=0x%X", (unsigned)f->frequency);
1373
1374 return 0;
1375 }
1376 case VIDIOC_S_FREQUENCY:
1377 {
1378 struct v4l2_frequency *f = arg;
1379
1380 if (f->tuner != 0)
1381 return -EINVAL;
1382 usbvision->freq = f->frequency;
1383 call_i2c_clients(usbvision, cmd, f);
1384 PDEBUG(DBG_IO, "VIDIOC_S_FREQUENCY freq=0x%X", (unsigned)f->frequency);
1385
1386 return 0;
1387 }
1388 default:
1389 {
1390 PDEBUG(DBG_IO, "%s: Unknown command %x", __FUNCTION__, cmd);
1391 return -ENOIOCTLCMD;
1392 }
1393 }
1394 return 0;
1395}
1396
1397
1398static int usbvision_radio_ioctl(struct inode *inode, struct file *file,
1399 unsigned int cmd, unsigned long arg)
1400{
1401 return video_usercopy(inode, file, cmd, arg, usbvision_do_radio_ioctl);
1402}
1403
1404
1405/*
1406 * Here comes the stuff for vbi on usbvision based devices
1407 *
1408 */
1409static int usbvision_vbi_open(struct inode *inode, struct file *file)
1410{
1411 /* TODO */
1412 return -EINVAL;
1413
1414}
1415
1416static int usbvision_vbi_close(struct inode *inode, struct file *file)
1417{
1418 /* TODO */
1419 return -EINVAL;
1420}
1421
1422static int usbvision_do_vbi_ioctl(struct inode *inode, struct file *file,
1423 unsigned int cmd, void *arg)
1424{
1425 /* TODO */
1426 return -EINVAL;
1427}
1428
1429static int usbvision_vbi_ioctl(struct inode *inode, struct file *file,
1430 unsigned int cmd, unsigned long arg)
1431{
1432 return video_usercopy(inode, file, cmd, arg, usbvision_do_vbi_ioctl);
1433}
1434
1435
1436//
1437// Video registration stuff
1438//
1439
1440// Video template
1441static struct file_operations usbvision_fops = {
1442 .owner = THIS_MODULE,
1443 .open = usbvision_v4l2_open,
1444 .release = usbvision_v4l2_close,
1445 .read = usbvision_v4l2_read,
1446 .mmap = usbvision_v4l2_mmap,
1447 .ioctl = usbvision_v4l2_ioctl,
1448 .llseek = no_llseek,
1449};
1450static struct video_device usbvision_video_template = {
1451 .owner = THIS_MODULE,
1452 .type = VID_TYPE_TUNER | VID_TYPE_CAPTURE,
1453 .hardware = VID_HARDWARE_USBVISION,
1454 .fops = &usbvision_fops,
1455 .name = "usbvision-video",
1456 .release = video_device_release,
1457 .minor = -1,
1458};
1459
1460
1461// Radio template
1462static struct file_operations usbvision_radio_fops = {
1463 .owner = THIS_MODULE,
1464 .open = usbvision_radio_open,
1465 .release = usbvision_radio_close,
1466 .ioctl = usbvision_radio_ioctl,
1467 .llseek = no_llseek,
1468};
1469
1470static struct video_device usbvision_radio_template=
1471{
1472 .owner = THIS_MODULE,
1473 .type = VID_TYPE_TUNER,
1474 .hardware = VID_HARDWARE_USBVISION,
1475 .fops = &usbvision_radio_fops,
1476 .release = video_device_release,
1477 .name = "usbvision-radio",
1478 .minor = -1,
1479};
1480
1481
1482// vbi template
1483static struct file_operations usbvision_vbi_fops = {
1484 .owner = THIS_MODULE,
1485 .open = usbvision_vbi_open,
1486 .release = usbvision_vbi_close,
1487 .ioctl = usbvision_vbi_ioctl,
1488 .llseek = no_llseek,
1489};
1490
1491static struct video_device usbvision_vbi_template=
1492{
1493 .owner = THIS_MODULE,
1494 .type = VID_TYPE_TUNER,
1495 .hardware = VID_HARDWARE_USBVISION,
1496 .fops = &usbvision_vbi_fops,
1497 .release = video_device_release,
1498 .name = "usbvision-vbi",
1499 .minor = -1,
1500};
1501
1502
1503static struct video_device *usbvision_vdev_init(struct usb_usbvision *usbvision,
1504 struct video_device *vdev_template,
1505 char *name)
1506{
1507 struct usb_device *usb_dev = usbvision->dev;
1508 struct video_device *vdev;
1509
1510 if (usb_dev == NULL) {
1511 err("%s: usbvision->dev is not set", __FUNCTION__);
1512 return NULL;
1513 }
1514
1515 vdev = video_device_alloc();
1516 if (NULL == vdev) {
1517 return NULL;
1518 }
1519 *vdev = *vdev_template;
1520// vdev->minor = -1;
1521 vdev->dev = &usb_dev->dev;
1522 snprintf(vdev->name, sizeof(vdev->name), "%s", name);
1523 video_set_drvdata(vdev, usbvision);
1524 return vdev;
1525}
1526
1527// unregister video4linux devices
1528static void usbvision_unregister_video(struct usb_usbvision *usbvision)
1529{
1530 // vbi Device:
1531 if (usbvision->vbi) {
1532 PDEBUG(DBG_PROBE, "unregister /dev/vbi%d [v4l2]", usbvision->vbi->minor & 0x1f);
1533 if (usbvision->vbi->minor != -1) {
1534 video_unregister_device(usbvision->vbi);
1535 }
1536 else {
1537 video_device_release(usbvision->vbi);
1538 }
1539 usbvision->vbi = NULL;
1540 }
1541
1542 // Radio Device:
1543 if (usbvision->rdev) {
1544 PDEBUG(DBG_PROBE, "unregister /dev/radio%d [v4l2]", usbvision->rdev->minor & 0x1f);
1545 if (usbvision->rdev->minor != -1) {
1546 video_unregister_device(usbvision->rdev);
1547 }
1548 else {
1549 video_device_release(usbvision->rdev);
1550 }
1551 usbvision->rdev = NULL;
1552 }
1553
1554 // Video Device:
1555 if (usbvision->vdev) {
1556 PDEBUG(DBG_PROBE, "unregister /dev/video%d [v4l2]", usbvision->vdev->minor & 0x1f);
1557 if (usbvision->vdev->minor != -1) {
1558 video_unregister_device(usbvision->vdev);
1559 }
1560 else {
1561 video_device_release(usbvision->vdev);
1562 }
1563 usbvision->vdev = NULL;
1564 }
1565}
1566
1567// register video4linux devices
1568static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
1569{
1570 // Video Device:
1571 usbvision->vdev = usbvision_vdev_init(usbvision, &usbvision_video_template, "USBVision Video");
1572 if (usbvision->vdev == NULL) {
1573 goto err_exit;
1574 }
1575 if (video_register_device(usbvision->vdev, VFL_TYPE_GRABBER, video_nr)<0) {
1576 goto err_exit;
1577 }
1578 info("USBVision[%d]: registered USBVision Video device /dev/video%d [v4l2]", usbvision->nr,usbvision->vdev->minor & 0x1f);
1579
1580 // Radio Device:
1581 if (usbvision_device_data[usbvision->DevModel].Radio) {
1582 // usbvision has radio
1583 usbvision->rdev = usbvision_vdev_init(usbvision, &usbvision_radio_template, "USBVision Radio");
1584 if (usbvision->rdev == NULL) {
1585 goto err_exit;
1586 }
1587 if (video_register_device(usbvision->rdev, VFL_TYPE_RADIO, radio_nr)<0) {
1588 goto err_exit;
1589 }
1590 info("USBVision[%d]: registered USBVision Radio device /dev/radio%d [v4l2]", usbvision->nr, usbvision->rdev->minor & 0x1f);
1591 }
1592 // vbi Device:
1593 if (usbvision_device_data[usbvision->DevModel].vbi) {
1594 usbvision->vbi = usbvision_vdev_init(usbvision, &usbvision_vbi_template, "USBVision VBI");
1595 if (usbvision->vdev == NULL) {
1596 goto err_exit;
1597 }
1598 if (video_register_device(usbvision->vbi, VFL_TYPE_VBI, vbi_nr)<0) {
1599 goto err_exit;
1600 }
1601 info("USBVision[%d]: registered USBVision VBI device /dev/vbi%d [v4l2] (Not Working Yet!)", usbvision->nr,usbvision->vbi->minor & 0x1f);
1602 }
1603 // all done
1604 return 0;
1605
1606 err_exit:
1607 err("USBVision[%d]: video_register_device() failed", usbvision->nr);
1608 usbvision_unregister_video(usbvision);
1609 return -1;
1610}
1611
1612/*
1613 * usbvision_alloc()
1614 *
1615 * This code allocates the struct usb_usbvision. It is filled with default values.
1616 *
1617 * Returns NULL on error, a pointer to usb_usbvision else.
1618 *
1619 */
1620static struct usb_usbvision *usbvision_alloc(struct usb_device *dev)
1621{
1622 struct usb_usbvision *usbvision;
1623
1624 if ((usbvision = kzalloc(sizeof(struct usb_usbvision), GFP_KERNEL)) == NULL) {
1625 goto err_exit;
1626 }
1627
1628 usbvision->dev = dev;
1629
1630 init_MUTEX(&usbvision->lock); /* to 1 == available */
1631
1632 // prepare control urb for control messages during interrupts
1633 usbvision->ctrlUrb = usb_alloc_urb(USBVISION_URB_FRAMES, GFP_KERNEL);
1634 if (usbvision->ctrlUrb == NULL) {
1635 goto err_exit;
1636 }
1637 init_waitqueue_head(&usbvision->ctrlUrb_wq);
1638 init_MUTEX(&usbvision->ctrlUrbLock); /* to 1 == available */
1639
1640 usbvision_init_powerOffTimer(usbvision);
1641
1642 return usbvision;
1643
1644err_exit:
1645 if (usbvision && usbvision->ctrlUrb) {
1646 usb_free_urb(usbvision->ctrlUrb);
1647 }
1648 if (usbvision) {
1649 kfree(usbvision);
1650 }
1651 return NULL;
1652}
1653
1654/*
1655 * usbvision_release()
1656 *
1657 * This code does final release of struct usb_usbvision. This happens
1658 * after the device is disconnected -and- all clients closed their files.
1659 *
1660 */
1661static void usbvision_release(struct usb_usbvision *usbvision)
1662{
1663 PDEBUG(DBG_PROBE, "");
1664
1665 down(&usbvision->lock);
1666
1667 usbvision_reset_powerOffTimer(usbvision);
1668
1669 usbvision->initialized = 0;
1670
1671 up(&usbvision->lock);
1672
1673 usbvision_remove_sysfs(usbvision->vdev);
1674 usbvision_unregister_video(usbvision);
1675
1676 if (usbvision->ctrlUrb) {
1677 usb_free_urb(usbvision->ctrlUrb);
1678 }
1679
1680 kfree(usbvision);
1681
1682 PDEBUG(DBG_PROBE, "success");
1683}
1684
1685
1686/******************************** usb interface *****************************************/
1687
1688static void usbvision_configure_video(struct usb_usbvision *usbvision)
1689{
1690 int model,i;
1691
1692 if (usbvision == NULL)
1693 return;
1694
1695 model = usbvision->DevModel;
1696 usbvision->palette = usbvision_v4l2_format[2]; // V4L2_PIX_FMT_RGB24;
1697
1698 if (usbvision_device_data[usbvision->DevModel].Vin_Reg2 >= 0) {
1699 usbvision->Vin_Reg2_Preset = usbvision_device_data[usbvision->DevModel].Vin_Reg2 & 0xff;
1700 } else {
1701 usbvision->Vin_Reg2_Preset = 0;
1702 }
1703
1704 for (i = 0; i < TVNORMS; i++)
1705 if (usbvision_device_data[model].VideoNorm == tvnorms[i].mode)
1706 break;
1707 if (i == TVNORMS)
1708 i = 0;
1709 usbvision->tvnorm = &tvnorms[i]; /* set default norm */
1710
1711 usbvision->video_inputs = usbvision_device_data[model].VideoChannels;
1712 usbvision->ctl_input = 0;
1713
1714 /* This should be here to make i2c clients to be able to register */
1715 usbvision_audio_off(usbvision); //first switch off audio
1716 if (!PowerOnAtOpen) {
1717 usbvision_power_on(usbvision); //and then power up the noisy tuner
1718 usbvision_init_i2c(usbvision);
1719 }
1720}
1721
1722/*
1723 * usbvision_probe()
1724 *
1725 * This procedure queries device descriptor and accepts the interface
1726 * if it looks like USBVISION video device
1727 *
1728 */
1729static int __devinit usbvision_probe(struct usb_interface *intf, const struct usb_device_id *devid)
1730{
1731 struct usb_device *dev = interface_to_usbdev(intf);
1732 __u8 ifnum = intf->altsetting->desc.bInterfaceNumber;
1733 const struct usb_host_interface *interface;
1734 struct usb_usbvision *usbvision = NULL;
1735 const struct usb_endpoint_descriptor *endpoint;
1736 int model;
1737
1738 PDEBUG(DBG_PROBE, "VID=%#04x, PID=%#04x, ifnum=%u",
1739 dev->descriptor.idVendor, dev->descriptor.idProduct, ifnum);
1740 /* Is it an USBVISION video dev? */
1741 model = 0;
1742 for(model = 0; usbvision_device_data[model].idVendor; model++) {
1743 if (le16_to_cpu(dev->descriptor.idVendor) != usbvision_device_data[model].idVendor) {
1744 continue;
1745 }
1746 if (le16_to_cpu(dev->descriptor.idProduct) != usbvision_device_data[model].idProduct) {
1747 continue;
1748 }
1749
1750 info("%s: %s found", __FUNCTION__, usbvision_device_data[model].ModelString);
1751 break;
1752 }
1753
1754 if (usbvision_device_data[model].idVendor == 0) {
1755 return -ENODEV; //no matching device
1756 }
1757 if (usbvision_device_data[model].Interface >= 0) {
1758 interface = &dev->actconfig->interface[usbvision_device_data[model].Interface]->altsetting[0];
1759 }
1760 else {
1761 interface = &dev->actconfig->interface[ifnum]->altsetting[0];
1762 }
1763 endpoint = &interface->endpoint[1].desc;
1764 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_ISOC) {
1765 err("%s: interface %d. has non-ISO endpoint!", __FUNCTION__, ifnum);
1766 err("%s: Endpoint attribures %d", __FUNCTION__, endpoint->bmAttributes);
1767 return -ENODEV;
1768 }
1769 if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT) {
1770 err("%s: interface %d. has ISO OUT endpoint!", __FUNCTION__, ifnum);
1771 return -ENODEV;
1772 }
1773
1774 usb_get_dev(dev);
1775
1776 if ((usbvision = usbvision_alloc(dev)) == NULL) {
1777 err("%s: couldn't allocate USBVision struct", __FUNCTION__);
1778 return -ENOMEM;
1779 }
1780 if (dev->descriptor.bNumConfigurations > 1) {
1781 usbvision->bridgeType = BRIDGE_NT1004;
1782 }
1783 else if (usbvision_device_data[model].ModelString == "Dazzle Fusion Model DVC-90 Rev 1 (SECAM)") {
1784 usbvision->bridgeType = BRIDGE_NT1005;
1785 }
1786 else {
1787 usbvision->bridgeType = BRIDGE_NT1003;
1788 }
1789 PDEBUG(DBG_PROBE, "bridgeType %d", usbvision->bridgeType);
1790
1791 down(&usbvision->lock);
1792
1793 usbvision->nr = usbvision_nr++;
1794
1795 usbvision->have_tuner = usbvision_device_data[model].Tuner;
1796 if (usbvision->have_tuner) {
1797 usbvision->tuner_type = usbvision_device_data[model].TunerType;
1798 }
1799
1800 usbvision->tuner_addr = ADDR_UNSET;
1801
1802 usbvision->DevModel = model;
1803 usbvision->remove_pending = 0;
1804 usbvision->iface = ifnum;
1805 usbvision->ifaceAltInactive = 0;
1806 usbvision->ifaceAltActive = 1;
1807 usbvision->video_endp = endpoint->bEndpointAddress;
1808 usbvision->isocPacketSize = 0;
1809 usbvision->usb_bandwidth = 0;
1810 usbvision->user = 0;
1811 usbvision->streaming = Stream_Off;
1812 usbvision_register_video(usbvision);
1813 usbvision_configure_video(usbvision);
1814 up(&usbvision->lock);
1815
1816
1817 usb_set_intfdata (intf, usbvision);
1818 usbvision_create_sysfs(usbvision->vdev);
1819
1820 PDEBUG(DBG_PROBE, "success");
1821 return 0;
1822}
1823
1824
1825/*
1826 * usbvision_disconnect()
1827 *
1828 * This procedure stops all driver activity, deallocates interface-private
1829 * structure (pointed by 'ptr') and after that driver should be removable
1830 * with no ill consequences.
1831 *
1832 */
1833static void __devexit usbvision_disconnect(struct usb_interface *intf)
1834{
1835 struct usb_usbvision *usbvision = usb_get_intfdata(intf);
1836
1837 PDEBUG(DBG_PROBE, "");
1838
1839 if (usbvision == NULL) {
1840 err("%s: usb_get_intfdata() failed", __FUNCTION__);
1841 return;
1842 }
1843 usb_set_intfdata (intf, NULL);
1844
1845 down(&usbvision->lock);
1846
1847 // At this time we ask to cancel outstanding URBs
1848 usbvision_stop_isoc(usbvision);
1849
1850 if (usbvision->power) {
1851 usbvision_i2c_usb_del_bus(&usbvision->i2c_adap);
1852 usbvision_power_off(usbvision);
1853 }
1854 usbvision->remove_pending = 1; // Now all ISO data will be ignored
1855
1856 usb_put_dev(usbvision->dev);
1857 usbvision->dev = NULL; // USB device is no more
1858
1859 up(&usbvision->lock);
1860
1861 if (usbvision->user) {
1862 info("%s: In use, disconnect pending", __FUNCTION__);
1863 wake_up_interruptible(&usbvision->wait_frame);
1864 wake_up_interruptible(&usbvision->wait_stream);
1865 }
1866 else {
1867 usbvision_release(usbvision);
1868 }
1869
1870 PDEBUG(DBG_PROBE, "success");
1871
1872}
1873
1874static struct usb_driver usbvision_driver = {
1875 .name = "usbvision",
1876 .id_table = usbvision_table,
1877 .probe = usbvision_probe,
1878 .disconnect = usbvision_disconnect
1879};
1880
1881/*
1882 * customdevice_process()
1883 *
1884 * This procedure preprocesses CustomDevice parameter if any
1885 *
1886 */
1887void customdevice_process(void)
1888{
1889 usbvision_device_data[0]=usbvision_device_data[1];
1890 usbvision_table[0]=usbvision_table[1];
1891
1892 if(CustomDevice)
1893 {
1894 char *parse=CustomDevice;
1895
1896 PDEBUG(DBG_PROBE, "CustomDevide=%s", CustomDevice);
1897
1898 /*format is CustomDevice="0x0573 0x4D31 0 7113 3 PAL 1 1 1 5 -1 -1 -1 -1 -1"
1899 usbvision_device_data[0].idVendor;
1900 usbvision_device_data[0].idProduct;
1901 usbvision_device_data[0].Interface;
1902 usbvision_device_data[0].Codec;
1903 usbvision_device_data[0].VideoChannels;
1904 usbvision_device_data[0].VideoNorm;
1905 usbvision_device_data[0].AudioChannels;
1906 usbvision_device_data[0].Radio;
1907 usbvision_device_data[0].Tuner;
1908 usbvision_device_data[0].TunerType;
1909 usbvision_device_data[0].Vin_Reg1;
1910 usbvision_device_data[0].Vin_Reg2;
1911 usbvision_device_data[0].X_Offset;
1912 usbvision_device_data[0].Y_Offset;
1913 usbvision_device_data[0].Dvi_yuv;
1914 usbvision_device_data[0].ModelString;
1915 */
1916
1917 rmspace(parse);
1918 usbvision_device_data[0].ModelString="USBVISION Custom Device";
1919
1920 parse+=2;
1921 sscanf(parse,"%x",&usbvision_device_data[0].idVendor);
1922 goto2next(parse);
1923 PDEBUG(DBG_PROBE, "idVendor=0x%.4X", usbvision_device_data[0].idVendor);
1924 parse+=2;
1925 sscanf(parse,"%x",&usbvision_device_data[0].idProduct);
1926 goto2next(parse);
1927 PDEBUG(DBG_PROBE, "idProduct=0x%.4X", usbvision_device_data[0].idProduct);
1928 sscanf(parse,"%d",&usbvision_device_data[0].Interface);
1929 goto2next(parse);
1930 PDEBUG(DBG_PROBE, "Interface=%d", usbvision_device_data[0].Interface);
1931 sscanf(parse,"%d",&usbvision_device_data[0].Codec);
1932 goto2next(parse);
1933 PDEBUG(DBG_PROBE, "Codec=%d", usbvision_device_data[0].Codec);
1934 sscanf(parse,"%d",&usbvision_device_data[0].VideoChannels);
1935 goto2next(parse);
1936 PDEBUG(DBG_PROBE, "VideoChannels=%d", usbvision_device_data[0].VideoChannels);
1937
1938 switch(*parse)
1939 {
1940 case 'P':
1941 PDEBUG(DBG_PROBE, "VideoNorm=PAL");
1942 usbvision_device_data[0].VideoNorm=VIDEO_MODE_PAL;
1943 break;
1944
1945 case 'S':
1946 PDEBUG(DBG_PROBE, "VideoNorm=SECAM");
1947 usbvision_device_data[0].VideoNorm=VIDEO_MODE_SECAM;
1948 break;
1949
1950 case 'N':
1951 PDEBUG(DBG_PROBE, "VideoNorm=NTSC");
1952 usbvision_device_data[0].VideoNorm=VIDEO_MODE_NTSC;
1953 break;
1954
1955 default:
1956 PDEBUG(DBG_PROBE, "VideoNorm=PAL (by default)");
1957 usbvision_device_data[0].VideoNorm=VIDEO_MODE_PAL;
1958 break;
1959 }
1960 goto2next(parse);
1961
1962 sscanf(parse,"%d",&usbvision_device_data[0].AudioChannels);
1963 goto2next(parse);
1964 PDEBUG(DBG_PROBE, "AudioChannels=%d", usbvision_device_data[0].AudioChannels);
1965 sscanf(parse,"%d",&usbvision_device_data[0].Radio);
1966 goto2next(parse);
1967 PDEBUG(DBG_PROBE, "Radio=%d", usbvision_device_data[0].Radio);
1968 sscanf(parse,"%d",&usbvision_device_data[0].Tuner);
1969 goto2next(parse);
1970 PDEBUG(DBG_PROBE, "Tuner=%d", usbvision_device_data[0].Tuner);
1971 sscanf(parse,"%d",&usbvision_device_data[0].TunerType);
1972 goto2next(parse);
1973 PDEBUG(DBG_PROBE, "TunerType=%d", usbvision_device_data[0].TunerType);
1974 sscanf(parse,"%d",&usbvision_device_data[0].Vin_Reg1);
1975 goto2next(parse);
1976 PDEBUG(DBG_PROBE, "Vin_Reg1=%d", usbvision_device_data[0].Vin_Reg1);
1977 sscanf(parse,"%d",&usbvision_device_data[0].Vin_Reg2);
1978 goto2next(parse);
1979 PDEBUG(DBG_PROBE, "Vin_Reg2=%d", usbvision_device_data[0].Vin_Reg2);
1980 sscanf(parse,"%d",&usbvision_device_data[0].X_Offset);
1981 goto2next(parse);
1982 PDEBUG(DBG_PROBE, "X_Offset=%d", usbvision_device_data[0].X_Offset);
1983 sscanf(parse,"%d",&usbvision_device_data[0].Y_Offset);
1984 goto2next(parse);
1985 PDEBUG(DBG_PROBE, "Y_Offset=%d", usbvision_device_data[0].Y_Offset);
1986 sscanf(parse,"%d",&usbvision_device_data[0].Dvi_yuv);
1987 PDEBUG(DBG_PROBE, "Dvi_yuv=%d", usbvision_device_data[0].Dvi_yuv);
1988
1989 //add to usbvision_table also
1990 usbvision_table[0].match_flags=USB_DEVICE_ID_MATCH_DEVICE;
1991 usbvision_table[0].idVendor=usbvision_device_data[0].idVendor;
1992 usbvision_table[0].idProduct=usbvision_device_data[0].idProduct;
1993
1994 }
1995}
1996
1997
1998
1999/*
2000 * usbvision_init()
2001 *
2002 * This code is run to initialize the driver.
2003 *
2004 */
2005static int __init usbvision_init(void)
2006{
2007 int errCode;
2008
2009 PDEBUG(DBG_PROBE, "");
2010
2011 PDEBUG(DBG_IOCTL, "IOCTL debugging is enabled [video]");
2012 PDEBUG(DBG_IO, "IO debugging is enabled [video]");
2013 PDEBUG(DBG_PROBE, "PROBE debugging is enabled [video]");
2014 PDEBUG(DBG_MMAP, "MMAP debugging is enabled [video]");
2015
2016 /* disable planar mode support unless compression enabled */
2017 if (isocMode != ISOC_MODE_COMPRESS ) {
2018 // FIXME : not the right way to set supported flag
2019 usbvision_v4l2_format[6].supported = 0; // V4L2_PIX_FMT_YVU420
2020 usbvision_v4l2_format[7].supported = 0; // V4L2_PIX_FMT_YUV422P
2021 }
2022
2023 customdevice_process();
2024
2025 errCode = usb_register(&usbvision_driver);
2026
2027 if (errCode == 0) {
2028 info(DRIVER_DESC " : " USBVISION_VERSION_STRING);
2029 PDEBUG(DBG_PROBE, "success");
2030 }
2031 return errCode;
2032}
2033
2034static void __exit usbvision_exit(void)
2035{
2036 PDEBUG(DBG_PROBE, "");
2037
2038 usb_deregister(&usbvision_driver);
2039 PDEBUG(DBG_PROBE, "success");
2040}
2041
2042module_init(usbvision_init);
2043module_exit(usbvision_exit);
2044
2045/*
2046 * Overrides for Emacs so that we follow Linus's tabbing style.
2047 * ---------------------------------------------------------------------------
2048 * Local variables:
2049 * c-basic-offset: 8
2050 * End:
2051 */
diff --git a/drivers/media/video/usbvision/usbvision.h b/drivers/media/video/usbvision/usbvision.h
new file mode 100644
index 000000000000..0e7e3d653cac
--- /dev/null
+++ b/drivers/media/video/usbvision/usbvision.h
@@ -0,0 +1,558 @@
1/*
2 * USBVISION.H
3 * usbvision header file
4 *
5 * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de>
6 * Dwaine Garden <dwainegarden@rogers.com>
7 *
8 *
9 * Report problems to v4l MailingList : http://www.redhat.com/mailman/listinfo/video4linux-list
10 *
11 * This module is part of usbvision driver project.
12 * Updates to driver completed by Dwaine P. Garden
13 * v4l2 conversion by Thierry Merle <thierry.merle@free.fr>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 */
29
30
31#ifndef __LINUX_USBVISION_H
32#define __LINUX_USBVISION_H
33
34#include <linux/list.h>
35#include <linux/usb.h>
36#include <media/v4l2-common.h>
37#include <media/tuner.h>
38#include <linux/videodev2.h>
39
40#define USBVISION_DEBUG /* Turn on debug messages */
41
42#ifndef VID_HARDWARE_USBVISION
43 #define VID_HARDWARE_USBVISION 34 /* USBVision Video Grabber */
44#endif
45
46#define USBVISION_PWR_REG 0x00
47 #define USBVISION_SSPND_EN (1 << 1)
48 #define USBVISION_RES2 (1 << 2)
49 #define USBVISION_PWR_VID (1 << 5)
50 #define USBVISION_E2_EN (1 << 7)
51#define USBVISION_CONFIG_REG 0x01
52#define USBVISION_ADRS_REG 0x02
53#define USBVISION_ALTER_REG 0x03
54#define USBVISION_FORCE_ALTER_REG 0x04
55#define USBVISION_STATUS_REG 0x05
56#define USBVISION_IOPIN_REG 0x06
57 #define USBVISION_IO_1 (1 << 0)
58 #define USBVISION_IO_2 (1 << 1)
59 #define USBVISION_AUDIO_IN 0
60 #define USBVISION_AUDIO_TV 1
61 #define USBVISION_AUDIO_RADIO 2
62 #define USBVISION_AUDIO_MUTE 3
63#define USBVISION_SER_MODE 0x07
64#define USBVISION_SER_ADRS 0x08
65#define USBVISION_SER_CONT 0x09
66#define USBVISION_SER_DAT1 0x0A
67#define USBVISION_SER_DAT2 0x0B
68#define USBVISION_SER_DAT3 0x0C
69#define USBVISION_SER_DAT4 0x0D
70#define USBVISION_EE_DATA 0x0E
71#define USBVISION_EE_LSBAD 0x0F
72#define USBVISION_EE_CONT 0x10
73#define USBVISION_DRM_CONT 0x12
74 #define USBVISION_REF (1 << 0)
75 #define USBVISION_RES_UR (1 << 2)
76 #define USBVISION_RES_FDL (1 << 3)
77 #define USBVISION_RES_VDW (1 << 4)
78#define USBVISION_DRM_PRM1 0x13
79#define USBVISION_DRM_PRM2 0x14
80#define USBVISION_DRM_PRM3 0x15
81#define USBVISION_DRM_PRM4 0x16
82#define USBVISION_DRM_PRM5 0x17
83#define USBVISION_DRM_PRM6 0x18
84#define USBVISION_DRM_PRM7 0x19
85#define USBVISION_DRM_PRM8 0x1A
86#define USBVISION_VIN_REG1 0x1B
87 #define USBVISION_8_422_SYNC 0x01
88 #define USBVISION_16_422_SYNC 0x02
89 #define USBVISION_VSNC_POL (1 << 3)
90 #define USBVISION_HSNC_POL (1 << 4)
91 #define USBVISION_FID_POL (1 << 5)
92 #define USBVISION_HVALID_PO (1 << 6)
93 #define USBVISION_VCLK_POL (1 << 7)
94#define USBVISION_VIN_REG2 0x1C
95 #define USBVISION_AUTO_FID (1 << 0)
96 #define USBVISION_NONE_INTER (1 << 1)
97 #define USBVISION_NOHVALID (1 << 2)
98 #define USBVISION_UV_ID (1 << 3)
99 #define USBVISION_FIX_2C (1 << 4)
100 #define USBVISION_SEND_FID (1 << 5)
101 #define USBVISION_KEEP_BLANK (1 << 7)
102#define USBVISION_LXSIZE_I 0x1D
103#define USBVISION_MXSIZE_I 0x1E
104#define USBVISION_LYSIZE_I 0x1F
105#define USBVISION_MYSIZE_I 0x20
106#define USBVISION_LX_OFFST 0x21
107#define USBVISION_MX_OFFST 0x22
108#define USBVISION_LY_OFFST 0x23
109#define USBVISION_MY_OFFST 0x24
110#define USBVISION_FRM_RATE 0x25
111#define USBVISION_LXSIZE_O 0x26
112#define USBVISION_MXSIZE_O 0x27
113#define USBVISION_LYSIZE_O 0x28
114#define USBVISION_MYSIZE_O 0x29
115#define USBVISION_FILT_CONT 0x2A
116#define USBVISION_VO_MODE 0x2B
117#define USBVISION_INTRA_CYC 0x2C
118#define USBVISION_STRIP_SZ 0x2D
119#define USBVISION_FORCE_INTRA 0x2E
120#define USBVISION_FORCE_UP 0x2F
121#define USBVISION_BUF_THR 0x30
122#define USBVISION_DVI_YUV 0x31
123#define USBVISION_AUDIO_CONT 0x32
124#define USBVISION_AUD_PK_LEN 0x33
125#define USBVISION_BLK_PK_LEN 0x34
126#define USBVISION_PCM_THR1 0x38
127#define USBVISION_PCM_THR2 0x39
128#define USBVISION_DIST_THR_L 0x3A
129#define USBVISION_DIST_THR_H 0x3B
130#define USBVISION_MAX_DIST_L 0x3C
131#define USBVISION_MAX_DIST_H 0x3D
132#define USBVISION_OP_CODE 0x33
133
134#define MAX_BYTES_PER_PIXEL 4
135
136#define MIN_FRAME_WIDTH 64
137#define MAX_USB_WIDTH 320 //384
138#define MAX_FRAME_WIDTH 320 //384 /*streching sometimes causes crashes*/
139
140#define MIN_FRAME_HEIGHT 48
141#define MAX_USB_HEIGHT 240 //288
142#define MAX_FRAME_HEIGHT 240 //288 /*Streching sometimes causes crashes*/
143
144#define MAX_FRAME_SIZE (MAX_FRAME_WIDTH * MAX_FRAME_HEIGHT * MAX_BYTES_PER_PIXEL)
145#define USBVISION_CLIPMASK_SIZE (MAX_FRAME_WIDTH * MAX_FRAME_HEIGHT / 8) //bytesize of clipmask
146
147#define USBVISION_URB_FRAMES 32
148#define USBVISION_MAX_ISOC_PACKET_SIZE 959 // NT1003 Specs Document says 1023
149
150#define USBVISION_NUM_HEADERMARKER 20
151#define USBVISION_NUMFRAMES 3 /* Maximum number of frames an application can get */
152#define USBVISION_NUMSBUF 2 /* Dimensioning the USB S buffering */
153
154#define USBVISION_POWEROFF_TIME 3 * (HZ) // 3 seconds
155
156
157#define FRAMERATE_MIN 0
158#define FRAMERATE_MAX 31
159
160enum {
161 ISOC_MODE_YUV422 = 0x03,
162 ISOC_MODE_YUV420 = 0x14,
163 ISOC_MODE_COMPRESS = 0x60,
164};
165
166/* This macro restricts an int variable to an inclusive range */
167#define RESTRICT_TO_RANGE(v,mi,ma) { if ((v) < (mi)) (v) = (mi); else if ((v) > (ma)) (v) = (ma); }
168
169/*
170 * We use macros to do YUV -> RGB conversion because this is
171 * very important for speed and totally unimportant for size.
172 *
173 * YUV -> RGB Conversion
174 * ---------------------
175 *
176 * B = 1.164*(Y-16) + 2.018*(V-128)
177 * G = 1.164*(Y-16) - 0.813*(U-128) - 0.391*(V-128)
178 * R = 1.164*(Y-16) + 1.596*(U-128)
179 *
180 * If you fancy integer arithmetics (as you should), hear this:
181 *
182 * 65536*B = 76284*(Y-16) + 132252*(V-128)
183 * 65536*G = 76284*(Y-16) - 53281*(U-128) - 25625*(V-128)
184 * 65536*R = 76284*(Y-16) + 104595*(U-128)
185 *
186 * Make sure the output values are within [0..255] range.
187 */
188#define LIMIT_RGB(x) (((x) < 0) ? 0 : (((x) > 255) ? 255 : (x)))
189#define YUV_TO_RGB_BY_THE_BOOK(my,mu,mv,mr,mg,mb) { \
190 int mm_y, mm_yc, mm_u, mm_v, mm_r, mm_g, mm_b; \
191 mm_y = (my) - 16; \
192 mm_u = (mu) - 128; \
193 mm_v = (mv) - 128; \
194 mm_yc= mm_y * 76284; \
195 mm_b = (mm_yc + 132252*mm_v ) >> 16; \
196 mm_g = (mm_yc - 53281*mm_u - 25625*mm_v ) >> 16; \
197 mm_r = (mm_yc + 104595*mm_u ) >> 16; \
198 mb = LIMIT_RGB(mm_b); \
199 mg = LIMIT_RGB(mm_g); \
200 mr = LIMIT_RGB(mm_r); \
201}
202
203/* Debugging aid */
204#define USBVISION_SAY_AND_WAIT(what) { \
205 wait_queue_head_t wq; \
206 init_waitqueue_head(&wq); \
207 printk(KERN_INFO "Say: %s\n", what); \
208 interruptible_sleep_on_timeout (&wq, HZ*3); \
209}
210
211/*
212 * This macro checks if usbvision is still operational. The 'usbvision'
213 * pointer must be valid, usbvision->dev must be valid, we are not
214 * removing the device and the device has not erred on us.
215 */
216#define USBVISION_IS_OPERATIONAL(udevice) (\
217 (udevice != NULL) && \
218 ((udevice)->dev != NULL) && \
219 ((udevice)->last_error == 0) && \
220 (!(udevice)->remove_pending))
221
222/* I2C structures */
223struct i2c_algo_usb_data {
224 void *data; /* private data for lowlevel routines */
225 int (*inb) (void *data, unsigned char addr, char *buf, short len);
226 int (*outb) (void *data, unsigned char addr, char *buf, short len);
227
228 /* local settings */
229 int udelay;
230 int mdelay;
231 int timeout;
232};
233
234#define I2C_USB_ADAP_MAX 16
235
236/* ----------------------------------------------------------------- */
237/* usbvision video structures */
238/* ----------------------------------------------------------------- */
239enum ScanState {
240 ScanState_Scanning, /* Scanning for header */
241 ScanState_Lines /* Parsing lines */
242};
243
244/* Completion states of the data parser */
245enum ParseState {
246 ParseState_Continue, /* Just parse next item */
247 ParseState_NextFrame, /* Frame done, send it to V4L */
248 ParseState_Out, /* Not enough data for frame */
249 ParseState_EndParse /* End parsing */
250};
251
252enum FrameState {
253 FrameState_Unused, /* Unused (no MCAPTURE) */
254 FrameState_Ready, /* Ready to start grabbing */
255 FrameState_Grabbing, /* In the process of being grabbed into */
256 FrameState_Done, /* Finished grabbing, but not been synced yet */
257 FrameState_DoneHold, /* Are syncing or reading */
258 FrameState_Error, /* Something bad happened while processing */
259};
260
261/* stream states */
262enum StreamState {
263 Stream_Off, /* Driver streaming is completely OFF */
264 Stream_Idle, /* Driver streaming is ready to be put ON by the application */
265 Stream_Interrupt, /* Driver streaming must be interrupted */
266 Stream_On, /* Driver streaming is put ON by the application */
267};
268
269enum IsocState {
270 IsocState_InFrame, /* Isoc packet is member of frame */
271 IsocState_NoFrame, /* Isoc packet is not member of any frame */
272};
273
274struct usb_device;
275
276struct usbvision_sbuf {
277 char *data;
278 struct urb *urb;
279};
280
281#define USBVISION_MAGIC_1 0x55
282#define USBVISION_MAGIC_2 0xAA
283#define USBVISION_HEADER_LENGTH 0x0c
284#define USBVISION_SAA7111_ADDR 0x48
285#define USBVISION_SAA7113_ADDR 0x4a
286#define USBVISION_IIC_LRACK 0x20
287#define USBVISION_IIC_LRNACK 0x30
288#define USBVISION_FRAME_FORMAT_PARAM_INTRA (1<<7)
289
290struct usbvision_v4l2_format_st {
291 int supported;
292 int bytes_per_pixel;
293 int depth;
294 int format;
295 char *desc;
296};
297#define USBVISION_SUPPORTED_PALETTES ARRAY_SIZE(usbvision_v4l2_format)
298
299struct usbvision_frame_header {
300 unsigned char magic_1; /* 0 magic */
301 unsigned char magic_2; /* 1 magic */
302 unsigned char headerLength; /* 2 */
303 unsigned char frameNum; /* 3 */
304 unsigned char framePhase; /* 4 */
305 unsigned char frameLatency; /* 5 */
306 unsigned char dataFormat; /* 6 */
307 unsigned char formatParam; /* 7 */
308 unsigned char frameWidthLo; /* 8 */
309 unsigned char frameWidthHi; /* 9 */
310 unsigned char frameHeightLo; /* 10 */
311 unsigned char frameHeightHi; /* 11 */
312 __u16 frameWidth; /* 8 - 9 after endian correction*/
313 __u16 frameHeight; /* 10 - 11 after endian correction*/
314};
315
316/* tvnorms */
317struct usbvision_tvnorm {
318 char *name;
319 v4l2_std_id id;
320 /* mode for saa7113h */
321 int mode;
322};
323
324struct usbvision_frame {
325 char *data; /* Frame buffer */
326 struct usbvision_frame_header isocHeader; /* Header from stream */
327
328 int width; /* Width application is expecting */
329 int height; /* Height */
330 int index; /* Frame index */
331 int frmwidth; /* Width the frame actually is */
332 int frmheight; /* Height */
333
334 volatile int grabstate; /* State of grabbing */
335 int scanstate; /* State of scanning */
336
337 struct list_head frame;
338
339 int curline; /* Line of frame we're working on */
340
341 long scanlength; /* uncompressed, raw data length of frame */
342 long bytes_read; /* amount of scanlength that has been read from data */
343 struct usbvision_v4l2_format_st v4l2_format; /* format the user needs*/
344 int v4l2_linesize; /* bytes for one videoline*/
345 struct timeval timestamp;
346 int sequence; // How many video frames we send to user
347};
348
349#define CODEC_SAA7113 7113
350#define CODEC_SAA7111 7111
351#define BRIDGE_NT1003 1003
352#define BRIDGE_NT1004 1004
353#define BRIDGE_NT1005 1005
354
355struct usbvision_device_data_st {
356 int idVendor;
357 int idProduct;
358 int Interface; /* to handle special interface number like BELKIN and Hauppauge WinTV-USB II */
359 int Codec;
360 int VideoChannels;
361 __u64 VideoNorm;
362 int AudioChannels;
363 int Radio;
364 int vbi;
365 int Tuner;
366 int TunerType;
367 int Vin_Reg1;
368 int Vin_Reg2;
369 int X_Offset;
370 int Y_Offset;
371 int Dvi_yuv;
372 char *ModelString;
373};
374
375/* Declared on usbvision-cards.c */
376extern struct usbvision_device_data_st usbvision_device_data[];
377extern struct usb_device_id usbvision_table[];
378
379struct usb_usbvision {
380 struct video_device *vdev; /* Video Device */
381 struct video_device *rdev; /* Radio Device */
382 struct video_device *vbi; /* VBI Device */
383
384 /* i2c Declaration Section*/
385 struct i2c_adapter i2c_adap;
386 struct i2c_algo_usb_data i2c_algo;
387 struct i2c_client i2c_client;
388
389 struct urb *ctrlUrb;
390 unsigned char ctrlUrbBuffer[8];
391 int ctrlUrbBusy;
392 struct usb_ctrlrequest ctrlUrbSetup;
393 wait_queue_head_t ctrlUrb_wq; // Processes waiting
394 struct semaphore ctrlUrbLock;
395
396 /* configuration part */
397 int have_tuner;
398 int tuner_type;
399 int tuner_addr;
400 int bridgeType; // NT1003, NT1004, NT1005
401 int channel;
402 int radio;
403 int video_inputs; // # of inputs
404 unsigned long freq;
405 int AudioMute;
406 int AudioChannel;
407 int isocMode; // format of video data for the usb isoc-transfer
408 unsigned int nr; // Number of the device
409
410 /* Device structure */
411 struct usb_device *dev;
412 unsigned char iface; /* Video interface number */
413 unsigned char ifaceAltActive, ifaceAltInactive; /* Alt settings */
414 unsigned char Vin_Reg2_Preset;
415 struct semaphore lock;
416 struct timer_list powerOffTimer;
417 struct work_struct powerOffWork;
418 int power; /* is the device powered on? */
419 int user; /* user count for exclusive use */
420 int initialized; /* Had we already sent init sequence? */
421 int DevModel; /* What type of USBVISION device we got? */
422 enum StreamState streaming; /* Are we streaming Isochronous? */
423 int last_error; /* What calamity struck us? */
424 int curwidth; /* width of the frame the device is currently set to*/
425 int curheight; /* height of the frame the device is currently set to*/
426 int stretch_width; /* stretch-factor for frame width (from usb to screen)*/
427 int stretch_height; /* stretch-factor for frame height (from usb to screen)*/
428 char *fbuf; /* Videodev buffer area for mmap*/
429 int max_frame_size; /* Bytes in one video frame */
430 int fbuf_size; /* Videodev buffer size */
431 spinlock_t queue_lock; /* spinlock for protecting mods on inqueue and outqueue */
432 struct list_head inqueue, outqueue; /* queued frame list and ready to dequeue frame list */
433 wait_queue_head_t wait_frame; /* Processes waiting */
434 wait_queue_head_t wait_stream; /* Processes waiting */
435 struct usbvision_frame *curFrame; // pointer to current frame, set by usbvision_find_header
436 struct usbvision_frame frame[USBVISION_NUMFRAMES]; // frame buffer
437 struct usbvision_sbuf sbuf[USBVISION_NUMSBUF]; // S buffering
438 volatile int remove_pending; /* If set then about to exit */
439
440 /* Scratch space from the Isochronous Pipe.*/
441 unsigned char *scratch;
442 int scratch_read_ptr;
443 int scratch_write_ptr;
444 int scratch_headermarker[USBVISION_NUM_HEADERMARKER];
445 int scratch_headermarker_read_ptr;
446 int scratch_headermarker_write_ptr;
447 enum IsocState isocstate;
448 struct usbvision_v4l2_format_st palette;
449
450 struct v4l2_capability vcap; /* Video capabilities */
451 unsigned int ctl_input; /* selected input */
452 struct usbvision_tvnorm *tvnorm; /* selected tv norm */
453 unsigned char video_endp; /* 0x82 for USBVISION devices based */
454
455 // Decompression stuff:
456 unsigned char *IntraFrameBuffer; /* Buffer for reference frame */
457 int BlockPos; //for test only
458 int requestIntra; // 0 = normal; 1 = intra frame is requested;
459 int lastIsocFrameNum; // check for lost isoc frames
460 int isocPacketSize; // need to calculate usedBandwidth
461 int usedBandwidth; // used bandwidth 0-100%, need to set comprLevel
462 int comprLevel; // How strong (100) or weak (0) is compression
463 int lastComprLevel; // How strong (100) or weak (0) was compression
464 int usb_bandwidth; /* Mbit/s */
465
466 /* Statistics that can be overlayed on the screen */
467 unsigned long isocUrbCount; // How many URBs we received so far
468 unsigned long urb_length; /* Length of last URB */
469 unsigned long isocDataCount; /* How many bytes we received */
470 unsigned long header_count; /* How many frame headers we found */
471 unsigned long scratch_ovf_count; /* How many times we overflowed scratch */
472 unsigned long isocSkipCount; /* How many empty ISO packets received */
473 unsigned long isocErrCount; /* How many bad ISO packets received */
474 unsigned long isocPacketCount; // How many packets we totally got
475 unsigned long timeInIrq; // How long do we need for interrupt
476 int isocMeasureBandwidthCount;
477 int frame_num; // How many video frames we send to user
478 int maxStripLen; // How big is the biggest strip
479 int comprBlockPos;
480 int stripLenErrors; // How many times was BlockPos greater than StripLen
481 int stripMagicErrors;
482 int stripLineNumberErrors;
483 int ComprBlockTypes[4];
484};
485
486
487/* --------------------------------------------------------------- */
488/* defined in usbvision-i2c.c */
489/* i2c-algo-usb declaration */
490/* --------------------------------------------------------------- */
491
492int usbvision_i2c_usb_add_bus(struct i2c_adapter *);
493int usbvision_i2c_usb_del_bus(struct i2c_adapter *);
494
495static inline void *i2c_get_algo_usb_data (struct i2c_algo_usb_data *dev)
496{
497 return dev->data;
498}
499
500static inline void i2c_set_algo_usb_data (struct i2c_algo_usb_data *dev, void *data)
501{
502 dev->data = data;
503}
504
505
506/* ----------------------------------------------------------------------- */
507/* usbvision specific I2C functions */
508/* ----------------------------------------------------------------------- */
509int usbvision_init_i2c(struct usb_usbvision *usbvision);
510void call_i2c_clients(struct usb_usbvision *usbvision, unsigned int cmd,void *arg);
511
512/* defined in usbvision-core.c */
513void *usbvision_rvmalloc(unsigned long size);
514void usbvision_rvfree(void *mem, unsigned long size);
515int usbvision_read_reg(struct usb_usbvision *usbvision, unsigned char reg);
516int usbvision_write_reg(struct usb_usbvision *usbvision, unsigned char reg,
517 unsigned char value);
518
519int usbvision_frames_alloc(struct usb_usbvision *usbvision);
520void usbvision_frames_free(struct usb_usbvision *usbvision);
521int usbvision_scratch_alloc(struct usb_usbvision *usbvision);
522void usbvision_scratch_free(struct usb_usbvision *usbvision);
523int usbvision_sbuf_alloc(struct usb_usbvision *usbvision);
524void usbvision_sbuf_free(struct usb_usbvision *usbvision);
525int usbvision_decompress_alloc(struct usb_usbvision *usbvision);
526void usbvision_decompress_free(struct usb_usbvision *usbvision);
527
528int usbvision_setup(struct usb_usbvision *usbvision,int format);
529int usbvision_init_isoc(struct usb_usbvision *usbvision);
530int usbvision_restart_isoc(struct usb_usbvision *usbvision);
531void usbvision_stop_isoc(struct usb_usbvision *usbvision);
532
533int usbvision_set_audio(struct usb_usbvision *usbvision, int AudioChannel);
534int usbvision_audio_off(struct usb_usbvision *usbvision);
535
536int usbvision_begin_streaming(struct usb_usbvision *usbvision);
537void usbvision_empty_framequeues(struct usb_usbvision *dev);
538int usbvision_stream_interrupt(struct usb_usbvision *dev);
539
540int usbvision_muxsel(struct usb_usbvision *usbvision, int channel);
541int usbvision_set_input(struct usb_usbvision *usbvision);
542int usbvision_set_output(struct usb_usbvision *usbvision, int width, int height);
543
544void usbvision_init_powerOffTimer(struct usb_usbvision *usbvision);
545void usbvision_set_powerOffTimer(struct usb_usbvision *usbvision);
546void usbvision_reset_powerOffTimer(struct usb_usbvision *usbvision);
547int usbvision_power_off(struct usb_usbvision *usbvision);
548int usbvision_power_on(struct usb_usbvision *usbvision);
549
550#endif /* __LINUX_USBVISION_H */
551
552/*
553 * Overrides for Emacs so that we follow Linus's tabbing style.
554 * ---------------------------------------------------------------------------
555 * Local variables:
556 * c-basic-offset: 8
557 * End:
558 */
diff --git a/drivers/media/video/v4l1-compat.c b/drivers/media/video/v4l1-compat.c
index 1d899e2db394..8a13e595304e 100644
--- a/drivers/media/video/v4l1-compat.c
+++ b/drivers/media/video/v4l1-compat.c
@@ -350,6 +350,7 @@ v4l_compat_translate_ioctl(struct inode *inode,
350 struct video_buffer *buffer = arg; 350 struct video_buffer *buffer = arg;
351 351
352 memset(buffer, 0, sizeof(*buffer)); 352 memset(buffer, 0, sizeof(*buffer));
353 memset(&fbuf2, 0, sizeof(fbuf2));
353 354
354 err = drv(inode, file, VIDIOC_G_FBUF, &fbuf2); 355 err = drv(inode, file, VIDIOC_G_FBUF, &fbuf2);
355 if (err < 0) { 356 if (err < 0) {
@@ -616,6 +617,7 @@ v4l_compat_translate_ioctl(struct inode *inode,
616 case VIDIOCSPICT: /* set tone controls & partial capture format */ 617 case VIDIOCSPICT: /* set tone controls & partial capture format */
617 { 618 {
618 struct video_picture *pict = arg; 619 struct video_picture *pict = arg;
620 memset(&fbuf2, 0, sizeof(fbuf2));
619 621
620 set_v4l_control(inode, file, 622 set_v4l_control(inode, file,
621 V4L2_CID_BRIGHTNESS, pict->brightness, drv); 623 V4L2_CID_BRIGHTNESS, pict->brightness, drv);
@@ -708,12 +710,22 @@ v4l_compat_translate_ioctl(struct inode *inode,
708 } 710 }
709 case VIDIOCSTUNER: /* select a tuner input */ 711 case VIDIOCSTUNER: /* select a tuner input */
710 { 712 {
711 err = 0; 713 struct video_tuner *tun = arg;
714 struct v4l2_tuner t;
715 memset(&t,0,sizeof(t));
716
717 t.index=tun->tuner;
718
719 err = drv(inode, file, VIDIOC_S_INPUT, &t);
720 if (err < 0)
721 dprintk("VIDIOCSTUNER / VIDIOC_S_INPUT: %d\n",err);
722
712 break; 723 break;
713 } 724 }
714 case VIDIOCGFREQ: /* get frequency */ 725 case VIDIOCGFREQ: /* get frequency */
715 { 726 {
716 unsigned long *freq = arg; 727 unsigned long *freq = arg;
728 memset(&freq2,0,sizeof(freq2));
717 729
718 freq2.tuner = 0; 730 freq2.tuner = 0;
719 err = drv(inode, file, VIDIOC_G_FREQUENCY, &freq2); 731 err = drv(inode, file, VIDIOC_G_FREQUENCY, &freq2);
@@ -726,8 +738,8 @@ v4l_compat_translate_ioctl(struct inode *inode,
726 case VIDIOCSFREQ: /* set frequency */ 738 case VIDIOCSFREQ: /* set frequency */
727 { 739 {
728 unsigned long *freq = arg; 740 unsigned long *freq = arg;
741 memset(&freq2,0,sizeof(freq2));
729 742
730 freq2.tuner = 0;
731 drv(inode, file, VIDIOC_G_FREQUENCY, &freq2); 743 drv(inode, file, VIDIOC_G_FREQUENCY, &freq2);
732 freq2.frequency = *freq; 744 freq2.frequency = *freq;
733 err = drv(inode, file, VIDIOC_S_FREQUENCY, &freq2); 745 err = drv(inode, file, VIDIOC_S_FREQUENCY, &freq2);
@@ -738,6 +750,7 @@ v4l_compat_translate_ioctl(struct inode *inode,
738 case VIDIOCGAUDIO: /* get audio properties/controls */ 750 case VIDIOCGAUDIO: /* get audio properties/controls */
739 { 751 {
740 struct video_audio *aud = arg; 752 struct video_audio *aud = arg;
753 memset(&aud2,0,sizeof(aud2));
741 754
742 err = drv(inode, file, VIDIOC_G_AUDIO, &aud2); 755 err = drv(inode, file, VIDIOC_G_AUDIO, &aud2);
743 if (err < 0) { 756 if (err < 0) {
@@ -898,6 +911,7 @@ v4l_compat_translate_ioctl(struct inode *inode,
898 { 911 {
899 int *i = arg; 912 int *i = arg;
900 913
914 memset(&buf2,0,sizeof(buf2));
901 buf2.index = *i; 915 buf2.index = *i;
902 buf2.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 916 buf2.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
903 err = drv(inode, file, VIDIOC_QUERYBUF, &buf2); 917 err = drv(inode, file, VIDIOC_QUERYBUF, &buf2);
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 78d28b03ec93..752c82c37f55 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -87,6 +87,78 @@ MODULE_LICENSE("GPL");
87 */ 87 */
88 88
89 89
90char *v4l2_norm_to_name(v4l2_std_id id)
91{
92 char *name;
93
94 switch (id) {
95 case V4L2_STD_PAL:
96 name="PAL"; break;
97 case V4L2_STD_PAL_BG:
98 name="PAL-BG"; break;
99 case V4L2_STD_PAL_DK:
100 name="PAL-DK"; break;
101 case V4L2_STD_PAL_B:
102 name="PAL-B"; break;
103 case V4L2_STD_PAL_B1:
104 name="PAL-B1"; break;
105 case V4L2_STD_PAL_G:
106 name="PAL-G"; break;
107 case V4L2_STD_PAL_H:
108 name="PAL-H"; break;
109 case V4L2_STD_PAL_I:
110 name="PAL-I"; break;
111 case V4L2_STD_PAL_D:
112 name="PAL-D"; break;
113 case V4L2_STD_PAL_D1:
114 name="PAL-D1"; break;
115 case V4L2_STD_PAL_K:
116 name="PAL-K"; break;
117 case V4L2_STD_PAL_M:
118 name="PAL-M"; break;
119 case V4L2_STD_PAL_N:
120 name="PAL-N"; break;
121 case V4L2_STD_PAL_Nc:
122 name="PAL-Nc"; break;
123 case V4L2_STD_PAL_60:
124 name="PAL-60"; break;
125 case V4L2_STD_NTSC:
126 name="NTSC"; break;
127 case V4L2_STD_NTSC_M:
128 name="NTSC-M"; break;
129 case V4L2_STD_NTSC_M_JP:
130 name="NTSC-M-JP"; break;
131 case V4L2_STD_NTSC_443:
132 name="NTSC-443"; break;
133 case V4L2_STD_NTSC_M_KR:
134 name="NTSC-M-KR"; break;
135 case V4L2_STD_SECAM:
136 name="SECAM"; break;
137 case V4L2_STD_SECAM_DK:
138 name="SECAM-DK"; break;
139 case V4L2_STD_SECAM_B:
140 name="SECAM-B"; break;
141 case V4L2_STD_SECAM_D:
142 name="SECAM-D"; break;
143 case V4L2_STD_SECAM_G:
144 name="SECAM-G"; break;
145 case V4L2_STD_SECAM_H:
146 name="SECAM-H"; break;
147 case V4L2_STD_SECAM_K:
148 name="SECAM-K"; break;
149 case V4L2_STD_SECAM_K1:
150 name="SECAM-K1"; break;
151 case V4L2_STD_SECAM_L:
152 name="SECAM-L"; break;
153 case V4L2_STD_SECAM_LC:
154 name="SECAM-LC"; break;
155 default:
156 name="Unknown"; break;
157 }
158
159 return name;
160}
161
90/* Fill in the fields of a v4l2_standard structure according to the 162/* Fill in the fields of a v4l2_standard structure according to the
91 'id' and 'transmission' parameters. Returns negative on error. */ 163 'id' and 'transmission' parameters. Returns negative on error. */
92int v4l2_video_std_construct(struct v4l2_standard *vs, 164int v4l2_video_std_construct(struct v4l2_standard *vs,
@@ -184,11 +256,13 @@ char *v4l2_field_names[] = {
184}; 256};
185 257
186char *v4l2_type_names[] = { 258char *v4l2_type_names[] = {
187 [V4L2_BUF_TYPE_VIDEO_CAPTURE] = "video-cap", 259 [V4L2_BUF_TYPE_VIDEO_CAPTURE] = "video-cap",
188 [V4L2_BUF_TYPE_VIDEO_OVERLAY] = "video-over", 260 [V4L2_BUF_TYPE_VIDEO_OVERLAY] = "video-over",
189 [V4L2_BUF_TYPE_VIDEO_OUTPUT] = "video-out", 261 [V4L2_BUF_TYPE_VIDEO_OUTPUT] = "video-out",
190 [V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap", 262 [V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap",
191 [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out", 263 [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out",
264 [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-cap",
265 [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "slicec-vbi-out",
192}; 266};
193 267
194static char *v4l2_memory_names[] = { 268static char *v4l2_memory_names[] = {
@@ -1451,6 +1525,7 @@ u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id)
1451 1525
1452/* ----------------------------------------------------------------- */ 1526/* ----------------------------------------------------------------- */
1453 1527
1528EXPORT_SYMBOL(v4l2_norm_to_name);
1454EXPORT_SYMBOL(v4l2_video_std_construct); 1529EXPORT_SYMBOL(v4l2_video_std_construct);
1455 1530
1456EXPORT_SYMBOL(v4l2_prio_init); 1531EXPORT_SYMBOL(v4l2_prio_init);
diff --git a/drivers/media/video/videocodec.c b/drivers/media/video/videocodec.c
index 2ae3fb250630..290e64135650 100644
--- a/drivers/media/video/videocodec.c
+++ b/drivers/media/video/videocodec.c
@@ -346,7 +346,7 @@ videocodec_build_table (void)
346 size); 346 size);
347 347
348 kfree(videocodec_buf); 348 kfree(videocodec_buf);
349 videocodec_buf = (char *) kmalloc(size, GFP_KERNEL); 349 videocodec_buf = kmalloc(size, GFP_KERNEL);
350 350
351 i = 0; 351 i = 0;
352 i += scnprintf(videocodec_buf + i, size - 1, 352 i += scnprintf(videocodec_buf + i, size - 1,
diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c
index 41ec0c4b35a2..6a0e8ca72948 100644
--- a/drivers/media/video/videodev.c
+++ b/drivers/media/video/videodev.c
@@ -342,7 +342,7 @@ static void dbgbuf(unsigned int cmd, struct video_device *vfd,
342 342
343 dbgarg (cmd, "%02ld:%02d:%02d.%08ld index=%d, type=%s, " 343 dbgarg (cmd, "%02ld:%02d:%02d.%08ld index=%d, type=%s, "
344 "bytesused=%d, flags=0x%08d, " 344 "bytesused=%d, flags=0x%08d, "
345 "field=%0d, sequence=%d, memory=%s, offset/userptr=0x%08lx\n", 345 "field=%0d, sequence=%d, memory=%s, offset/userptr=0x%08lx, length=%d\n",
346 (p->timestamp.tv_sec/3600), 346 (p->timestamp.tv_sec/3600),
347 (int)(p->timestamp.tv_sec/60)%60, 347 (int)(p->timestamp.tv_sec/60)%60,
348 (int)(p->timestamp.tv_sec%60), 348 (int)(p->timestamp.tv_sec%60),
@@ -352,7 +352,7 @@ static void dbgbuf(unsigned int cmd, struct video_device *vfd,
352 p->bytesused,p->flags, 352 p->bytesused,p->flags,
353 p->field,p->sequence, 353 p->field,p->sequence,
354 prt_names(p->memory,v4l2_memory_names), 354 prt_names(p->memory,v4l2_memory_names),
355 p->m.userptr); 355 p->m.userptr, p->length);
356 dbgarg2 ("timecode= %02d:%02d:%02d type=%d, " 356 dbgarg2 ("timecode= %02d:%02d:%02d type=%d, "
357 "flags=0x%08d, frames=%d, userbits=0x%08x\n", 357 "flags=0x%08d, frames=%d, userbits=0x%08x\n",
358 tc->hours,tc->minutes,tc->seconds, 358 tc->hours,tc->minutes,tc->seconds,
@@ -369,9 +369,13 @@ static inline void dbgrect(struct video_device *vfd, char *s,
369static inline void v4l_print_pix_fmt (struct video_device *vfd, 369static inline void v4l_print_pix_fmt (struct video_device *vfd,
370 struct v4l2_pix_format *fmt) 370 struct v4l2_pix_format *fmt)
371{ 371{
372 dbgarg2 ("width=%d, height=%d, format=0x%08x, field=%s, " 372 dbgarg2 ("width=%d, height=%d, format=%c%c%c%c, field=%s, "
373 "bytesperline=%d sizeimage=%d, colorspace=%d\n", 373 "bytesperline=%d sizeimage=%d, colorspace=%d\n",
374 fmt->width,fmt->height,fmt->pixelformat, 374 fmt->width,fmt->height,
375 (fmt->pixelformat & 0xff),
376 (fmt->pixelformat >> 8) & 0xff,
377 (fmt->pixelformat >> 16) & 0xff,
378 (fmt->pixelformat >> 24) & 0xff,
375 prt_names(fmt->field,v4l2_field_names_FIXME), 379 prt_names(fmt->field,v4l2_field_names_FIXME),
376 fmt->bytesperline,fmt->sizeimage,fmt->colorspace); 380 fmt->bytesperline,fmt->sizeimage,fmt->colorspace);
377}; 381};
@@ -428,6 +432,10 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
428 v4l_print_ioctl(vfd->name, cmd); 432 v4l_print_ioctl(vfd->name, cmd);
429 } 433 }
430 434
435 if (_IOC_TYPE(cmd)=='v')
436 return v4l_compat_translate_ioctl(inode,file,cmd,arg,
437 __video_do_ioctl);
438
431 switch(cmd) { 439 switch(cmd) {
432 /* --- capabilities ------------------------------------------ */ 440 /* --- capabilities ------------------------------------------ */
433 case VIDIOC_QUERYCAP: 441 case VIDIOC_QUERYCAP:
@@ -526,12 +534,13 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
526 } 534 }
527 if (!ret) 535 if (!ret)
528 dbgarg (cmd, "index=%d, type=%d, flags=%d, " 536 dbgarg (cmd, "index=%d, type=%d, flags=%d, "
529 "description=%s," 537 "pixelformat=%c%c%c%c, description='%s'\n",
530 " pixelformat=0x%8x\n",
531 f->index, f->type, f->flags, 538 f->index, f->type, f->flags,
532 f->description, 539 (f->pixelformat & 0xff),
533 f->pixelformat); 540 (f->pixelformat >> 8) & 0xff,
534 541 (f->pixelformat >> 16) & 0xff,
542 (f->pixelformat >> 24) & 0xff,
543 f->description);
535 break; 544 break;
536 } 545 }
537 case VIDIOC_G_FMT: 546 case VIDIOC_G_FMT:
@@ -829,20 +838,85 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
829 case VIDIOC_ENUMSTD: 838 case VIDIOC_ENUMSTD:
830 { 839 {
831 struct v4l2_standard *p = arg; 840 struct v4l2_standard *p = arg;
832 unsigned int index = p->index; 841 v4l2_std_id id = vfd->tvnorms,curr_id=0;
842 unsigned int index = p->index,i;
833 843
834 if (!vfd->tvnormsize) { 844 if (index<0) {
835 printk (KERN_WARNING "%s: no TV norms defined!\n", 845 ret=-EINVAL;
836 vfd->name);
837 break; 846 break;
838 } 847 }
839 848
840 if (index<0 || index >= vfd->tvnormsize) { 849 /* Return norm array on a canonical way */
841 ret=-EINVAL; 850 for (i=0;i<= index && id; i++) {
842 break; 851 if ( (id & V4L2_STD_PAL) == V4L2_STD_PAL) {
852 curr_id = V4L2_STD_PAL;
853 } else if ( (id & V4L2_STD_PAL_BG) == V4L2_STD_PAL_BG) {
854 curr_id = V4L2_STD_PAL_BG;
855 } else if ( (id & V4L2_STD_PAL_DK) == V4L2_STD_PAL_DK) {
856 curr_id = V4L2_STD_PAL_DK;
857 } else if ( (id & V4L2_STD_PAL_B) == V4L2_STD_PAL_B) {
858 curr_id = V4L2_STD_PAL_B;
859 } else if ( (id & V4L2_STD_PAL_B1) == V4L2_STD_PAL_B1) {
860 curr_id = V4L2_STD_PAL_B1;
861 } else if ( (id & V4L2_STD_PAL_G) == V4L2_STD_PAL_G) {
862 curr_id = V4L2_STD_PAL_G;
863 } else if ( (id & V4L2_STD_PAL_H) == V4L2_STD_PAL_H) {
864 curr_id = V4L2_STD_PAL_H;
865 } else if ( (id & V4L2_STD_PAL_I) == V4L2_STD_PAL_I) {
866 curr_id = V4L2_STD_PAL_I;
867 } else if ( (id & V4L2_STD_PAL_D) == V4L2_STD_PAL_D) {
868 curr_id = V4L2_STD_PAL_D;
869 } else if ( (id & V4L2_STD_PAL_D1) == V4L2_STD_PAL_D1) {
870 curr_id = V4L2_STD_PAL_D1;
871 } else if ( (id & V4L2_STD_PAL_K) == V4L2_STD_PAL_K) {
872 curr_id = V4L2_STD_PAL_K;
873 } else if ( (id & V4L2_STD_PAL_M) == V4L2_STD_PAL_M) {
874 curr_id = V4L2_STD_PAL_M;
875 } else if ( (id & V4L2_STD_PAL_N) == V4L2_STD_PAL_N) {
876 curr_id = V4L2_STD_PAL_N;
877 } else if ( (id & V4L2_STD_PAL_Nc) == V4L2_STD_PAL_Nc) {
878 curr_id = V4L2_STD_PAL_Nc;
879 } else if ( (id & V4L2_STD_PAL_60) == V4L2_STD_PAL_60) {
880 curr_id = V4L2_STD_PAL_60;
881 } else if ( (id & V4L2_STD_NTSC) == V4L2_STD_NTSC) {
882 curr_id = V4L2_STD_NTSC;
883 } else if ( (id & V4L2_STD_NTSC_M) == V4L2_STD_NTSC_M) {
884 curr_id = V4L2_STD_NTSC_M;
885 } else if ( (id & V4L2_STD_NTSC_M_JP) == V4L2_STD_NTSC_M_JP) {
886 curr_id = V4L2_STD_NTSC_M_JP;
887 } else if ( (id & V4L2_STD_NTSC_443) == V4L2_STD_NTSC_443) {
888 curr_id = V4L2_STD_NTSC_443;
889 } else if ( (id & V4L2_STD_NTSC_M_KR) == V4L2_STD_NTSC_M_KR) {
890 curr_id = V4L2_STD_NTSC_M_KR;
891 } else if ( (id & V4L2_STD_SECAM) == V4L2_STD_SECAM) {
892 curr_id = V4L2_STD_SECAM;
893 } else if ( (id & V4L2_STD_SECAM_DK) == V4L2_STD_SECAM_DK) {
894 curr_id = V4L2_STD_SECAM_DK;
895 } else if ( (id & V4L2_STD_SECAM_B) == V4L2_STD_SECAM_B) {
896 curr_id = V4L2_STD_SECAM_B;
897 } else if ( (id & V4L2_STD_SECAM_D) == V4L2_STD_SECAM_D) {
898 curr_id = V4L2_STD_SECAM_D;
899 } else if ( (id & V4L2_STD_SECAM_G) == V4L2_STD_SECAM_G) {
900 curr_id = V4L2_STD_SECAM_G;
901 } else if ( (id & V4L2_STD_SECAM_H) == V4L2_STD_SECAM_H) {
902 curr_id = V4L2_STD_SECAM_H;
903 } else if ( (id & V4L2_STD_SECAM_K) == V4L2_STD_SECAM_K) {
904 curr_id = V4L2_STD_SECAM_K;
905 } else if ( (id & V4L2_STD_SECAM_K1) == V4L2_STD_SECAM_K1) {
906 curr_id = V4L2_STD_SECAM_K1;
907 } else if ( (id & V4L2_STD_SECAM_L) == V4L2_STD_SECAM_L) {
908 curr_id = V4L2_STD_SECAM_L;
909 } else if ( (id & V4L2_STD_SECAM_LC) == V4L2_STD_SECAM_LC) {
910 curr_id = V4L2_STD_SECAM_LC;
911 } else {
912 break;
913 }
914 id &= ~curr_id;
843 } 915 }
844 v4l2_video_std_construct(p, vfd->tvnorms[p->index].id, 916 if (i<=index)
845 vfd->tvnorms[p->index].name); 917 return -EINVAL;
918
919 v4l2_video_std_construct(p, curr_id,v4l2_norm_to_name(curr_id));
846 p->index = index; 920 p->index = index;
847 921
848 dbgarg (cmd, "index=%d, id=%Ld, name=%s, fps=%d/%d, " 922 dbgarg (cmd, "index=%d, id=%Ld, name=%s, fps=%d/%d, "
@@ -868,39 +942,23 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
868 } 942 }
869 case VIDIOC_S_STD: 943 case VIDIOC_S_STD:
870 { 944 {
871 v4l2_std_id *id = arg; 945 v4l2_std_id *id = arg,norm;
872 unsigned int i;
873
874 if (!vfd->tvnormsize) {
875 printk (KERN_WARNING "%s: no TV norms defined!\n",
876 vfd->name);
877 break;
878 }
879 946
880 dbgarg (cmd, "value=%Lu\n", (long long unsigned) *id); 947 dbgarg (cmd, "value=%Lu\n", (long long unsigned) *id);
881 948
882 /* First search for exact match */ 949 norm = (*id) & vfd->tvnorms;
883 for (i = 0; i < vfd->tvnormsize; i++) 950 if ( vfd->tvnorms && !norm) /* Check if std is supported */
884 if (*id == vfd->tvnorms[i].id)
885 break;
886 /* Then for a generic video std that contains desired std */
887 if (i == vfd->tvnormsize)
888 for (i = 0; i < vfd->tvnormsize; i++)
889 if (*id & vfd->tvnorms[i].id)
890 break;
891 if (i == vfd->tvnormsize) {
892 break; 951 break;
893 }
894 952
895 /* Calls the specific handler */ 953 /* Calls the specific handler */
896 if (vfd->vidioc_s_std) 954 if (vfd->vidioc_s_std)
897 ret=vfd->vidioc_s_std(file, fh, i); 955 ret=vfd->vidioc_s_std(file, fh, &norm);
898 else 956 else
899 ret=-EINVAL; 957 ret=-EINVAL;
900 958
901 /* Updates standard information */ 959 /* Updates standard information */
902 if (!ret) 960 if (ret>=0)
903 vfd->current_norm=*id; 961 vfd->current_norm=norm;
904 962
905 break; 963 break;
906 } 964 }
@@ -1088,9 +1146,13 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
1088 case VIDIOC_G_AUDIO: 1146 case VIDIOC_G_AUDIO:
1089 { 1147 {
1090 struct v4l2_audio *p=arg; 1148 struct v4l2_audio *p=arg;
1149 __u32 index=p->index;
1091 1150
1092 if (!vfd->vidioc_g_audio) 1151 if (!vfd->vidioc_g_audio)
1093 break; 1152 break;
1153
1154 memset(p,0,sizeof(*p));
1155 p->index=index;
1094 dbgarg(cmd, "Get for index=%d\n", p->index); 1156 dbgarg(cmd, "Get for index=%d\n", p->index);
1095 ret=vfd->vidioc_g_audio(file, fh, p); 1157 ret=vfd->vidioc_g_audio(file, fh, p);
1096 if (!ret) 1158 if (!ret)
@@ -1288,25 +1350,12 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
1288 ret=vfd->vidioc_g_parm(file, fh, p); 1350 ret=vfd->vidioc_g_parm(file, fh, p);
1289 } else { 1351 } else {
1290 struct v4l2_standard s; 1352 struct v4l2_standard s;
1291 int i;
1292
1293 if (!vfd->tvnormsize) {
1294 printk (KERN_WARNING "%s: no TV norms defined!\n",
1295 vfd->name);
1296 break;
1297 }
1298 1353
1299 if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1354 if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1300 return -EINVAL; 1355 return -EINVAL;
1301 1356
1302 for (i = 0; i < vfd->tvnormsize; i++)
1303 if (vfd->tvnorms[i].id == vfd->current_norm)
1304 break;
1305 if (i >= vfd->tvnormsize)
1306 return -EINVAL;
1307
1308 v4l2_video_std_construct(&s, vfd->current_norm, 1357 v4l2_video_std_construct(&s, vfd->current_norm,
1309 vfd->tvnorms[i].name); 1358 v4l2_norm_to_name(vfd->current_norm));
1310 1359
1311 memset(p,0,sizeof(*p)); 1360 memset(p,0,sizeof(*p));
1312 1361
@@ -1329,8 +1378,14 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
1329 case VIDIOC_G_TUNER: 1378 case VIDIOC_G_TUNER:
1330 { 1379 {
1331 struct v4l2_tuner *p=arg; 1380 struct v4l2_tuner *p=arg;
1381 __u32 index=p->index;
1382
1332 if (!vfd->vidioc_g_tuner) 1383 if (!vfd->vidioc_g_tuner)
1333 break; 1384 break;
1385
1386 memset(p,0,sizeof(*p));
1387 p->index=index;
1388
1334 ret=vfd->vidioc_g_tuner(file, fh, p); 1389 ret=vfd->vidioc_g_tuner(file, fh, p);
1335 if (!ret) 1390 if (!ret)
1336 dbgarg (cmd, "index=%d, name=%s, type=%d, " 1391 dbgarg (cmd, "index=%d, name=%s, type=%d, "
@@ -1363,6 +1418,9 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
1363 struct v4l2_frequency *p=arg; 1418 struct v4l2_frequency *p=arg;
1364 if (!vfd->vidioc_g_frequency) 1419 if (!vfd->vidioc_g_frequency)
1365 break; 1420 break;
1421
1422 memset(p,0,sizeof(*p));
1423
1366 ret=vfd->vidioc_g_frequency(file, fh, p); 1424 ret=vfd->vidioc_g_frequency(file, fh, p);
1367 if (!ret) 1425 if (!ret)
1368 dbgarg (cmd, "tuner=%d, type=%d, frequency=%d\n", 1426 dbgarg (cmd, "tuner=%d, type=%d, frequency=%d\n",
@@ -1396,12 +1454,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
1396 ret=vfd->vidioc_log_status(file, fh); 1454 ret=vfd->vidioc_log_status(file, fh);
1397 break; 1455 break;
1398 } 1456 }
1399 1457 } /* switch */
1400 /* --- Others --------------------------------------------- */
1401
1402 default:
1403 ret=v4l_compat_translate_ioctl(inode,file,cmd,arg,__video_do_ioctl);
1404 }
1405 1458
1406 if (vfd->debug & V4L2_DEBUG_IOCTL_ARG) { 1459 if (vfd->debug & V4L2_DEBUG_IOCTL_ARG) {
1407 if (ret<0) { 1460 if (ret<0) {
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 6b6dff4d236a..a373c142e742 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -782,7 +782,7 @@ static int vino_i2c_add_bus(void)
782 782
783static int vino_i2c_del_bus(void) 783static int vino_i2c_del_bus(void)
784{ 784{
785 return i2c_sgi_del_bus(&vino_i2c_adapter); 785 return i2c_del_adapter(&vino_i2c_adapter);
786} 786}
787 787
788static int i2c_camera_command(unsigned int cmd, void *arg) 788static int i2c_camera_command(unsigned int cmd, void *arg)
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 9986de5cb3d6..474ddb779643 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -1044,16 +1044,8 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
1044 return (0); 1044 return (0);
1045} 1045}
1046 1046
1047static struct v4l2_tvnorm tvnorms[] = { 1047static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *i)
1048 {
1049 .name = "NTSC-M",
1050 .id = V4L2_STD_NTSC_M,
1051 }
1052};
1053
1054static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id a)
1055{ 1048{
1056
1057 return 0; 1049 return 0;
1058} 1050}
1059 1051
@@ -1333,8 +1325,8 @@ static struct video_device vivi = {
1333#ifdef CONFIG_VIDEO_V4L1_COMPAT 1325#ifdef CONFIG_VIDEO_V4L1_COMPAT
1334 .vidiocgmbuf = vidiocgmbuf, 1326 .vidiocgmbuf = vidiocgmbuf,
1335#endif 1327#endif
1336 .tvnorms = tvnorms, 1328 .tvnorms = V4L2_STD_NTSC_M,
1337 .tvnormsize = ARRAY_SIZE(tvnorms), 1329 .current_norm = V4L2_STD_NTSC_M,
1338}; 1330};
1339/* ----------------------------------------------------------------- 1331/* -----------------------------------------------------------------
1340 Initialization and module stuff 1332 Initialization and module stuff
@@ -1361,8 +1353,6 @@ static int __init vivi_init(void)
1361 dev->vidq.timeout.data = (unsigned long)dev; 1353 dev->vidq.timeout.data = (unsigned long)dev;
1362 init_timer(&dev->vidq.timeout); 1354 init_timer(&dev->vidq.timeout);
1363 1355
1364 vivi.current_norm = tvnorms[0].id;
1365
1366 ret = video_register_device(&vivi, VFL_TYPE_GRABBER, video_nr); 1356 ret = video_register_device(&vivi, VFL_TYPE_GRABBER, video_nr);
1367 printk(KERN_INFO "Video Technology Magazine Virtual Video Capture Board (Load status: %d)\n", ret); 1357 printk(KERN_INFO "Video Technology Magazine Virtual Video Capture Board (Load status: %d)\n", ret);
1368 return ret; 1358 return ret;
diff --git a/drivers/media/video/zoran_card.c b/drivers/media/video/zoran_card.c
index 653822ce391c..4d1eb2fba34a 100644
--- a/drivers/media/video/zoran_card.c
+++ b/drivers/media/video/zoran_card.c
@@ -849,7 +849,7 @@ zoran_register_i2c (struct zoran *zr)
849static void 849static void
850zoran_unregister_i2c (struct zoran *zr) 850zoran_unregister_i2c (struct zoran *zr)
851{ 851{
852 i2c_bit_del_bus((&zr->i2c_adapter)); 852 i2c_del_adapter(&zr->i2c_adapter);
853} 853}
854 854
855/* Check a zoran_params struct for correctness, insert default params */ 855/* Check a zoran_params struct for correctness, insert default params */
diff --git a/drivers/media/video/zr36120.c b/drivers/media/video/zr36120.c
deleted file mode 100644
index 0cbf564388a6..000000000000
--- a/drivers/media/video/zr36120.c
+++ /dev/null
@@ -1,2079 +0,0 @@
1/*
2 zr36120.c - Zoran 36120/36125 based framegrabbers
3
4 Copyright (C) 1998-1999 Pauline Middelink <middelin@polyware.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/module.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/errno.h>
25#include <linux/fs.h>
26#include <linux/kernel.h>
27#include <linux/major.h>
28#include <linux/slab.h>
29#include <linux/vmalloc.h>
30#include <linux/mm.h>
31#include <linux/pci.h>
32#include <linux/signal.h>
33#include <linux/wait.h>
34#include <asm/io.h>
35#include <asm/pgtable.h>
36#include <asm/page.h>
37#include <linux/sched.h>
38#include <linux/video_decoder.h>
39
40#include <asm/uaccess.h>
41
42#include "tuner.h"
43#include "zr36120.h"
44#include "zr36120_mem.h"
45
46/* mark an required function argument unused - lintism */
47#define UNUSED(x) (void)(x)
48
49/* sensible default */
50#ifndef CARDTYPE
51#define CARDTYPE 0
52#endif
53
54/* Anybody who uses more than four? */
55#define ZORAN_MAX 4
56
57static unsigned int triton1=0; /* triton1 chipset? */
58static unsigned int cardtype[ZORAN_MAX]={ [ 0 ... ZORAN_MAX-1 ] = CARDTYPE };
59static int video_nr = -1;
60static int vbi_nr = -1;
61
62static struct pci_device_id zr36120_pci_tbl[] = {
63 { PCI_VENDOR_ID_ZORAN,PCI_DEVICE_ID_ZORAN_36120,
64 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
65 { 0 }
66};
67MODULE_DEVICE_TABLE(pci, zr36120_pci_tbl);
68
69MODULE_AUTHOR("Pauline Middelink <middelin@polyware.nl>");
70MODULE_DESCRIPTION("Zoran ZR36120 based framegrabber");
71MODULE_LICENSE("GPL");
72
73module_param(triton1, uint, 0);
74module_param_array(cardtype, uint, NULL, 0);
75module_param(video_nr, int, 0);
76module_param(vbi_nr, int, 0);
77
78static int zoran_cards;
79static struct zoran zorans[ZORAN_MAX];
80
81/*
82 * the meaning of each element can be found in zr36120.h
83 * Determining the value of gpdir/gpval can be tricky. The
84 * best way is to run the card under the original software
85 * and read the values from the general purpose registers
86 * 0x28 and 0x2C. How you do that is left as an exercise
87 * to the impatient reader :)
88 */
89#define T 1 /* to separate the bools from the ints */
90#define F 0
91static struct tvcard tvcards[] = {
92 /* reported working by <middelin@polyware.nl> */
93/*0*/ { "Trust Victor II",
94 2, 0, T, T, T, T, 0x7F, 0x80, { 1, SVHS(6) }, { 0 } },
95 /* reported working by <Michael.Paxton@aihw.gov.au> */
96/*1*/ { "Aitech WaveWatcher TV-PCI",
97 3, 0, T, F, T, T, 0x7F, 0x80, { 1, TUNER(3), SVHS(6) }, { 0 } },
98 /* reported working by ? */
99/*2*/ { "Genius Video Wonder PCI Video Capture Card",
100 2, 0, T, T, T, T, 0x7F, 0x80, { 1, SVHS(6) }, { 0 } },
101 /* reported working by <Pascal.Gabriel@wanadoo.fr> */
102/*3*/ { "Guillemot Maxi-TV PCI",
103 2, 0, T, T, T, T, 0x7F, 0x80, { 1, SVHS(6) }, { 0 } },
104 /* reported working by "Craig Whitmore <lennon@igrin.co.nz> */
105/*4*/ { "Quadrant Buster",
106 3, 3, T, F, T, T, 0x7F, 0x80, { SVHS(1), TUNER(2), 3 }, { 1, 2, 3 } },
107 /* a debug entry which has all inputs mapped */
108/*5*/ { "ZR36120 based framegrabber (all inputs enabled)",
109 6, 0, T, T, T, T, 0x7F, 0x80, { 1, 2, 3, 4, 5, 6 }, { 0 } }
110};
111#undef T
112#undef F
113#define NRTVCARDS (sizeof(tvcards)/sizeof(tvcards[0]))
114
115#ifdef __sparc__
116#define ENDIANESS 0
117#else
118#define ENDIANESS ZORAN_VFEC_LE
119#endif
120
121static struct { const char name[8]; uint mode; uint bpp; } palette2fmt[] = {
122/* n/a */ { "n/a", 0, 0 },
123/* GREY */ { "GRAY", 0, 0 },
124/* HI240 */ { "HI240", 0, 0 },
125/* RGB565 */ { "RGB565", ZORAN_VFEC_RGB_RGB565|ENDIANESS, 2 },
126/* RGB24 */ { "RGB24", ZORAN_VFEC_RGB_RGB888|ENDIANESS|ZORAN_VFEC_PACK24, 3 },
127/* RGB32 */ { "RGB32", ZORAN_VFEC_RGB_RGB888|ENDIANESS, 4 },
128/* RGB555 */ { "RGB555", ZORAN_VFEC_RGB_RGB555|ENDIANESS, 2 },
129/* YUV422 */ { "YUV422", ZORAN_VFEC_RGB_YUV422|ENDIANESS, 2 },
130/* YUYV */ { "YUYV", 0, 0 },
131/* UYVY */ { "UYVY", 0, 0 },
132/* YUV420 */ { "YUV420", 0, 0 },
133/* YUV411 */ { "YUV411", 0, 0 },
134/* RAW */ { "RAW", 0, 0 },
135/* YUV422P */ { "YUV422P", 0, 0 },
136/* YUV411P */ { "YUV411P", 0, 0 }};
137#define NRPALETTES (sizeof(palette2fmt)/sizeof(palette2fmt[0]))
138#undef ENDIANESS
139
140/* ----------------------------------------------------------------------- */
141/* ZORAN chipset detector */
142/* shamelessly stolen from bttv.c */
143/* Reason for beeing here: we need to detect if we are running on a */
144/* Triton based chipset, and if so, enable a certain bit */
145/* ----------------------------------------------------------------------- */
146static
147void __init handle_chipset(void)
148{
149 /* Just in case some nut set this to something dangerous */
150 if (triton1)
151 triton1 = ZORAN_VDC_TRICOM;
152
153 if (pci_pci_problems & PCIPCI_TRITON) {
154 printk(KERN_INFO "zoran: Host bridge 82437FX Triton PIIX\n");
155 triton1 = ZORAN_VDC_TRICOM;
156 }
157}
158
159/* ----------------------------------------------------------------------- */
160/* ZORAN functions */
161/* ----------------------------------------------------------------------- */
162
163static void zoran_set_geo(struct zoran* ztv, struct vidinfo* i);
164
165#if 0 /* unused */
166static
167void zoran_dump(struct zoran *ztv)
168{
169 char str[256];
170 char *p=str; /* shut up, gcc! */
171 int i;
172
173 for (i=0; i<0x60; i+=4) {
174 if ((i % 16) == 0) {
175 if (i) printk("%s\n",str);
176 p = str;
177 p+= sprintf(str, KERN_DEBUG " %04x: ",i);
178 }
179 p += sprintf(p, "%08x ",zrread(i));
180 }
181}
182#endif /* unused */
183
184static
185void reap_states(struct zoran* ztv)
186{
187 /* count frames */
188 ztv->fieldnr++;
189
190 /*
191 * Are we busy at all?
192 * This depends on if there is a workqueue AND the
193 * videotransfer is enabled on the chip...
194 */
195 if (ztv->workqueue && (zrread(ZORAN_VDC) & ZORAN_VDC_VIDEN))
196 {
197 struct vidinfo* newitem;
198
199 /* did we get a complete frame? */
200 if (zrread(ZORAN_VSTR) & ZORAN_VSTR_GRAB)
201 return;
202
203DEBUG(printk(CARD_DEBUG "completed %s at %p\n",CARD,ztv->workqueue->kindof==FBUFFER_GRAB?"grab":"read",ztv->workqueue));
204
205 /* we are done with this buffer, tell everyone */
206 ztv->workqueue->status = FBUFFER_DONE;
207 ztv->workqueue->fieldnr = ztv->fieldnr;
208 /* not good, here for BTTV_FIELDNR reasons */
209 ztv->lastfieldnr = ztv->fieldnr;
210
211 switch (ztv->workqueue->kindof) {
212 case FBUFFER_GRAB:
213 wake_up_interruptible(&ztv->grabq);
214 break;
215 case FBUFFER_VBI:
216 wake_up_interruptible(&ztv->vbiq);
217 break;
218 default:
219 printk(CARD_INFO "somebody killed the workqueue (kindof=%d)!\n",CARD,ztv->workqueue->kindof);
220 }
221
222 /* item completed, skip to next item in queue */
223 write_lock(&ztv->lock);
224 newitem = ztv->workqueue->next;
225 ztv->workqueue->next = 0; /* mark completed */
226 ztv->workqueue = newitem;
227 write_unlock(&ztv->lock);
228 }
229
230 /*
231 * ok, so it seems we have nothing in progress right now.
232 * Lets see if we can find some work.
233 */
234 if (ztv->workqueue)
235 {
236 struct vidinfo* newitem;
237again:
238
239DEBUG(printk(CARD_DEBUG "starting %s at %p\n",CARD,ztv->workqueue->kindof==FBUFFER_GRAB?"grab":"read",ztv->workqueue));
240
241 /* loadup the frame settings */
242 read_lock(&ztv->lock);
243 zoran_set_geo(ztv,ztv->workqueue);
244 read_unlock(&ztv->lock);
245
246 switch (ztv->workqueue->kindof) {
247 case FBUFFER_GRAB:
248 case FBUFFER_VBI:
249 zrand(~ZORAN_OCR_OVLEN, ZORAN_OCR);
250 zror(ZORAN_VSTR_SNAPSHOT,ZORAN_VSTR);
251 zror(ZORAN_VDC_VIDEN,ZORAN_VDC);
252
253 /* start single-shot grab */
254 zror(ZORAN_VSTR_GRAB, ZORAN_VSTR);
255 break;
256 default:
257 printk(CARD_INFO "what is this doing on the queue? (kindof=%d)\n",CARD,ztv->workqueue->kindof);
258 write_lock(&ztv->lock);
259 newitem = ztv->workqueue->next;
260 ztv->workqueue->next = 0;
261 ztv->workqueue = newitem;
262 write_unlock(&ztv->lock);
263 if (newitem)
264 goto again; /* yeah, sure.. */
265 }
266 /* bye for now */
267 return;
268 }
269DEBUG(printk(CARD_DEBUG "nothing in queue\n",CARD));
270
271 /*
272 * What? Even the workqueue is empty? Am i really here
273 * for nothing? Did i come all that way to... do nothing?
274 */
275
276 /* do we need to overlay? */
277 if (test_bit(STATE_OVERLAY, &ztv->state))
278 {
279 /* are we already overlaying? */
280 if (!(zrread(ZORAN_OCR) & ZORAN_OCR_OVLEN) ||
281 !(zrread(ZORAN_VDC) & ZORAN_VDC_VIDEN))
282 {
283DEBUG(printk(CARD_DEBUG "starting overlay\n",CARD));
284
285 read_lock(&ztv->lock);
286 zoran_set_geo(ztv,&ztv->overinfo);
287 read_unlock(&ztv->lock);
288
289 zror(ZORAN_OCR_OVLEN, ZORAN_OCR);
290 zrand(~ZORAN_VSTR_SNAPSHOT,ZORAN_VSTR);
291 zror(ZORAN_VDC_VIDEN,ZORAN_VDC);
292 }
293
294 /*
295 * leave overlaying on, but turn interrupts off.
296 */
297 zrand(~ZORAN_ICR_EN,ZORAN_ICR);
298 return;
299 }
300
301 /* do we have any VBI idle time processing? */
302 if (test_bit(STATE_VBI, &ztv->state))
303 {
304 struct vidinfo* item;
305 struct vidinfo* lastitem;
306
307 /* protect the workqueue */
308 write_lock(&ztv->lock);
309 lastitem = ztv->workqueue;
310 if (lastitem)
311 while (lastitem->next) lastitem = lastitem->next;
312 for (item=ztv->readinfo; item!=ztv->readinfo+ZORAN_VBI_BUFFERS; item++)
313 if (item->next == 0 && item->status == FBUFFER_FREE)
314 {
315DEBUG(printk(CARD_DEBUG "%p added to queue\n",CARD,item));
316 item->status = FBUFFER_BUSY;
317 if (!lastitem)
318 ztv->workqueue = item;
319 else
320 lastitem->next = item;
321 lastitem = item;
322 }
323 write_unlock(&ztv->lock);
324 if (ztv->workqueue)
325 goto again; /* hey, _i_ graduated :) */
326 }
327
328 /*
329 * Then we must be realy IDLE
330 */
331DEBUG(printk(CARD_DEBUG "turning off\n",CARD));
332 /* nothing further to do, disable DMA and further IRQs */
333 zrand(~ZORAN_VDC_VIDEN,ZORAN_VDC);
334 zrand(~ZORAN_ICR_EN,ZORAN_ICR);
335}
336
337static
338void zoran_irq(int irq, void *dev_id)
339{
340 u32 stat,estat;
341 int count = 0;
342 struct zoran *ztv = dev_id;
343
344 UNUSED(irq);
345 for (;;) {
346 /* get/clear interrupt status bits */
347 stat=zrread(ZORAN_ISR);
348 estat=stat & zrread(ZORAN_ICR);
349 if (!estat)
350 return;
351 zrwrite(estat,ZORAN_ISR);
352 IDEBUG(printk(CARD_DEBUG "estat %08x\n",CARD,estat));
353 IDEBUG(printk(CARD_DEBUG " stat %08x\n",CARD,stat));
354
355 if (estat & ZORAN_ISR_CODE)
356 {
357 IDEBUG(printk(CARD_DEBUG "CodReplIRQ\n",CARD));
358 }
359 if (estat & ZORAN_ISR_GIRQ0)
360 {
361 IDEBUG(printk(CARD_DEBUG "GIRQ0\n",CARD));
362 if (!ztv->card->usegirq1)
363 reap_states(ztv);
364 }
365 if (estat & ZORAN_ISR_GIRQ1)
366 {
367 IDEBUG(printk(CARD_DEBUG "GIRQ1\n",CARD));
368 if (ztv->card->usegirq1)
369 reap_states(ztv);
370 }
371
372 count++;
373 if (count > 10)
374 printk(CARD_ERR "irq loop %d (%x)\n",CARD,count,estat);
375 if (count > 20)
376 {
377 zrwrite(0, ZORAN_ICR);
378 printk(CARD_ERR "IRQ lockup, cleared int mask\n",CARD);
379 }
380 }
381}
382
383static
384int zoran_muxsel(struct zoran* ztv, int channel, int norm)
385{
386 int rv;
387
388 /* set the new video norm */
389 rv = i2c_control_device(&(ztv->i2c), I2C_DRIVERID_VIDEODECODER, DECODER_SET_NORM, &norm);
390 if (rv)
391 return rv;
392 ztv->norm = norm;
393
394 /* map the given channel to the cards decoder's channel */
395 channel = ztv->card->video_mux[channel] & CHANNEL_MASK;
396
397 /* set the new channel */
398 rv = i2c_control_device(&(ztv->i2c), I2C_DRIVERID_VIDEODECODER, DECODER_SET_INPUT, &channel);
399 return rv;
400}
401
402/* Tell the interrupt handler what to to. */
403static
404void zoran_cap(struct zoran* ztv, int on)
405{
406DEBUG(printk(CARD_DEBUG "zoran_cap(%d) state=%x\n",CARD,on,ztv->state));
407
408 if (on) {
409 ztv->running = 1;
410
411 /*
412 * turn interrupts (back) on. The DMA will be enabled
413 * inside the irq handler when it detects a restart.
414 */
415 zror(ZORAN_ICR_EN,ZORAN_ICR);
416 }
417 else {
418 /*
419 * turn both interrupts and DMA off
420 */
421 zrand(~ZORAN_VDC_VIDEN,ZORAN_VDC);
422 zrand(~ZORAN_ICR_EN,ZORAN_ICR);
423
424 ztv->running = 0;
425 }
426}
427
428static ulong dmask[] = {
429 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFC, 0xFFFFFFF8,
430 0xFFFFFFF0, 0xFFFFFFE0, 0xFFFFFFC0, 0xFFFFFF80,
431 0xFFFFFF00, 0xFFFFFE00, 0xFFFFFC00, 0xFFFFF800,
432 0xFFFFF000, 0xFFFFE000, 0xFFFFC000, 0xFFFF8000,
433 0xFFFF0000, 0xFFFE0000, 0xFFFC0000, 0xFFF80000,
434 0xFFF00000, 0xFFE00000, 0xFFC00000, 0xFF800000,
435 0xFF000000, 0xFE000000, 0xFC000000, 0xF8000000,
436 0xF0000000, 0xE0000000, 0xC0000000, 0x80000000
437};
438
439static
440void zoran_built_overlay(struct zoran* ztv, int count, struct video_clip *vcp)
441{
442 ulong* mtop;
443 int ystep = (ztv->vidXshift + ztv->vidWidth+31)/32; /* next DWORD */
444 int i;
445
446DEBUG(printk(KERN_DEBUG " overlay at %p, ystep=%d, clips=%d\n",ztv->overinfo.overlay,ystep,count));
447
448 for (i=0; i<count; i++) {
449 struct video_clip *vp = vcp+i;
450 UNUSED(vp);
451DEBUG(printk(KERN_DEBUG " %d: clip(%d,%d,%d,%d)\n", i,vp->x,vp->y,vp->width,vp->height));
452 }
453
454 /*
455 * activate the visible portion of the screen
456 * Note we take some shortcuts here, because we
457 * know the width can never be < 32. (I.e. a DWORD)
458 * We also assume the overlay starts somewhere in
459 * the FIRST dword.
460 */
461 {
462 int start = ztv->vidXshift;
463 ulong firstd = dmask[start];
464 ulong lastd = ~dmask[(start + ztv->overinfo.w) & 31];
465 mtop = ztv->overinfo.overlay;
466 for (i=0; i<ztv->overinfo.h; i++) {
467 int w = ztv->vidWidth;
468 ulong* line = mtop;
469 if (start & 31) {
470 *line++ = firstd;
471 w -= 32-(start&31);
472 }
473 memset(line, ~0, w/8);
474 if (w & 31)
475 line[w/32] = lastd;
476 mtop += ystep;
477 }
478 }
479
480 /* process clipping regions */
481 for (i=0; i<count; i++) {
482 int h;
483 if (vcp->x < 0 || (uint)vcp->x > ztv->overinfo.w ||
484 vcp->y < 0 || vcp->y > ztv->overinfo.h ||
485 vcp->width < 0 || (uint)(vcp->x+vcp->width) > ztv->overinfo.w ||
486 vcp->height < 0 || (vcp->y+vcp->height) > ztv->overinfo.h)
487 {
488 DEBUG(printk(CARD_DEBUG "invalid clipzone (%d,%d,%d,%d) not in (0,0,%d,%d), adapting\n",CARD,vcp->x,vcp->y,vcp->width,vcp->height,ztv->overinfo.w,ztv->overinfo.h));
489 if (vcp->x < 0) vcp->x = 0;
490 if ((uint)vcp->x > ztv->overinfo.w) vcp->x = ztv->overinfo.w;
491 if (vcp->y < 0) vcp->y = 0;
492 if (vcp->y > ztv->overinfo.h) vcp->y = ztv->overinfo.h;
493 if (vcp->width < 0) vcp->width = 0;
494 if ((uint)(vcp->x+vcp->width) > ztv->overinfo.w) vcp->width = ztv->overinfo.w - vcp->x;
495 if (vcp->height < 0) vcp->height = 0;
496 if (vcp->y+vcp->height > ztv->overinfo.h) vcp->height = ztv->overinfo.h - vcp->y;
497// continue;
498 }
499
500 mtop = &ztv->overinfo.overlay[vcp->y*ystep];
501 for (h=0; h<=vcp->height; h++) {
502 int w;
503 int x = ztv->vidXshift + vcp->x;
504 for (w=0; w<=vcp->width; w++) {
505 clear_bit(x&31, &mtop[x/32]);
506 x++;
507 }
508 mtop += ystep;
509 }
510 ++vcp;
511 }
512
513 mtop = ztv->overinfo.overlay;
514 zrwrite(virt_to_bus(mtop), ZORAN_MTOP);
515 zrwrite(virt_to_bus(mtop+ystep), ZORAN_MBOT);
516 zraor((ztv->vidInterlace*ystep)<<0,~ZORAN_OCR_MASKSTRIDE,ZORAN_OCR);
517}
518
519struct tvnorm
520{
521 u16 Wt, Wa, Ht, Ha, HStart, VStart;
522};
523
524static struct tvnorm tvnorms[] = {
525 /* PAL-BDGHI */
526/* { 864, 720, 625, 576, 131, 21 },*/
527/*00*/ { 864, 768, 625, 576, 81, 17 },
528 /* NTSC */
529/*01*/ { 858, 720, 525, 480, 121, 10 },
530 /* SECAM */
531/*02*/ { 864, 720, 625, 576, 131, 21 },
532 /* BW50 */
533/*03*/ { 864, 720, 625, 576, 131, 21 },
534 /* BW60 */
535/*04*/ { 858, 720, 525, 480, 121, 10 }
536};
537#define TVNORMS (sizeof(tvnorms)/sizeof(tvnorm))
538
539/*
540 * Program the chip for a setup as described in the vidinfo struct.
541 *
542 * Side-effects: calculates vidXshift, vidInterlace,
543 * vidHeight, vidWidth which are used in a later stage
544 * to calculate the overlay mask
545 *
546 * This is an internal function, as such it does not check the
547 * validity of the struct members... Spectaculair crashes will
548 * follow /very/ quick when you're wrong and the chip right :)
549 */
550static
551void zoran_set_geo(struct zoran* ztv, struct vidinfo* i)
552{
553 ulong top, bot;
554 int stride;
555 int winWidth, winHeight;
556 int maxWidth, maxHeight, maxXOffset, maxYOffset;
557 long vfec;
558
559DEBUG(printk(CARD_DEBUG "set_geo(rect=(%d,%d,%d,%d), norm=%d, format=%d, bpp=%d, bpl=%d, busadr=%lx, overlay=%p)\n",CARD,i->x,i->y,i->w,i->h,ztv->norm,i->format,i->bpp,i->bpl,i->busadr,i->overlay));
560
561 /*
562 * make sure the DMA transfers are inhibited during our
563 * reprogramming of the chip
564 */
565 zrand(~ZORAN_VDC_VIDEN,ZORAN_VDC);
566
567 maxWidth = tvnorms[ztv->norm].Wa;
568 maxHeight = tvnorms[ztv->norm].Ha/2;
569 maxXOffset = tvnorms[ztv->norm].HStart;
570 maxYOffset = tvnorms[ztv->norm].VStart;
571
572 /* setup vfec register (keep ExtFl,TopField and VCLKPol settings) */
573 vfec = (zrread(ZORAN_VFEC) & (ZORAN_VFEC_EXTFL|ZORAN_VFEC_TOPFIELD|ZORAN_VFEC_VCLKPOL)) |
574 (palette2fmt[i->format].mode & (ZORAN_VFEC_RGB|ZORAN_VFEC_ERRDIF|ZORAN_VFEC_LE|ZORAN_VFEC_PACK24));
575
576 /*
577 * Set top, bottom ptrs. Since these must be DWORD aligned,
578 * possible adjust the x and the width of the window.
579 * so the endposition stay the same. The vidXshift will make
580 * sure we are not writing pixels before the requested x.
581 */
582 ztv->vidXshift = 0;
583 winWidth = i->w;
584 if (winWidth < 0)
585 winWidth = -winWidth;
586 top = i->busadr + i->x*i->bpp + i->y*i->bpl;
587 if (top & 3) {
588 ztv->vidXshift = (top & 3) / i->bpp;
589 winWidth += ztv->vidXshift;
590 DEBUG(printk(KERN_DEBUG " window-x shifted %d pixels left\n",ztv->vidXshift));
591 top &= ~3;
592 }
593
594 /*
595 * bottom points to next frame but in interleaved mode we want
596 * to 'mix' the 2 frames to one capture, so 'bot' points to one
597 * (physical) line below the top line.
598 */
599 bot = top + i->bpl;
600 zrwrite(top,ZORAN_VTOP);
601 zrwrite(bot,ZORAN_VBOT);
602
603 /*
604 * Make sure the winWidth is DWORD aligned too,
605 * thereby automaticly making sure the stride to the
606 * next line is DWORD aligned too (as required by spec).
607 */
608 if ((winWidth*i->bpp) & 3) {
609DEBUG(printk(KERN_DEBUG " window-width enlarged by %d pixels\n",(winWidth*i->bpp) & 3));
610 winWidth += (winWidth*i->bpp) & 3;
611 }
612
613 /* determine the DispMode and stride */
614 if (i->h >= 0 && i->h <= maxHeight) {
615 /* single frame grab suffices for this height. */
616 vfec |= ZORAN_VFEC_DISPMOD;
617 ztv->vidInterlace = 0;
618 stride = i->bpl - (winWidth*i->bpp);
619 winHeight = i->h;
620 }
621 else {
622 /* interleaving needed for this height */
623 ztv->vidInterlace = 1;
624 stride = i->bpl*2 - (winWidth*i->bpp);
625 winHeight = i->h/2;
626 }
627 if (winHeight < 0) /* can happen for VBI! */
628 winHeight = -winHeight;
629
630 /* safety net, sometimes bpl is too short??? */
631 if (stride<0) {
632DEBUG(printk(CARD_DEBUG "WARNING stride = %d\n",CARD,stride));
633 stride = 0;
634 }
635
636 zraor((winHeight<<12)|(winWidth<<0),~(ZORAN_VDC_VIDWINHT|ZORAN_VDC_VIDWINWID), ZORAN_VDC);
637 zraor(stride<<16,~ZORAN_VSTR_DISPSTRIDE,ZORAN_VSTR);
638
639 /* remember vidWidth, vidHeight for overlay calculations */
640 ztv->vidWidth = winWidth;
641 ztv->vidHeight = winHeight;
642DEBUG(printk(KERN_DEBUG " top=%08lx, bottom=%08lx\n",top,bot));
643DEBUG(printk(KERN_DEBUG " winWidth=%d, winHeight=%d\n",winWidth,winHeight));
644DEBUG(printk(KERN_DEBUG " maxWidth=%d, maxHeight=%d\n",maxWidth,maxHeight));
645DEBUG(printk(KERN_DEBUG " stride=%d\n",stride));
646
647 /*
648 * determine horizontal scales and crops
649 */
650 if (i->w < 0) {
651 int Hstart = 1;
652 int Hend = Hstart + winWidth;
653DEBUG(printk(KERN_DEBUG " Y: scale=0, start=%d, end=%d\n", Hstart, Hend));
654 zraor((Hstart<<10)|(Hend<<0),~(ZORAN_VFEH_HSTART|ZORAN_VFEH_HEND),ZORAN_VFEH);
655 }
656 else {
657 int Wa = maxWidth;
658 int X = (winWidth*64+Wa-1)/Wa;
659 int We = winWidth*64/X;
660 int HorDcm = 64-X;
661 int hcrop1 = 2*(Wa-We)/4;
662 /*
663 * BUGFIX: Juha Nurmela <junki@qn-lpr2-165.quicknet.inet.fi>
664 * found the solution to the color phase shift.
665 * See ChangeLog for the full explanation)
666 */
667 int Hstart = (maxXOffset + hcrop1) | 1;
668 int Hend = Hstart + We - 1;
669
670DEBUG(printk(KERN_DEBUG " X: scale=%d, start=%d, end=%d\n", HorDcm, Hstart, Hend));
671
672 zraor((Hstart<<10)|(Hend<<0),~(ZORAN_VFEH_HSTART|ZORAN_VFEH_HEND),ZORAN_VFEH);
673 vfec |= HorDcm<<14;
674
675 if (HorDcm<16)
676 vfec |= ZORAN_VFEC_HFILTER_1; /* no filter */
677 else if (HorDcm<32)
678 vfec |= ZORAN_VFEC_HFILTER_3; /* 3 tap filter */
679 else if (HorDcm<48)
680 vfec |= ZORAN_VFEC_HFILTER_4; /* 4 tap filter */
681 else vfec |= ZORAN_VFEC_HFILTER_5; /* 5 tap filter */
682 }
683
684 /*
685 * Determine vertical scales and crops
686 *
687 * when height is negative, we want to read starting at line 0
688 * One day someone might need access to these lines...
689 */
690 if (i->h < 0) {
691 int Vstart = 0;
692 int Vend = Vstart + winHeight;
693DEBUG(printk(KERN_DEBUG " Y: scale=0, start=%d, end=%d\n", Vstart, Vend));
694 zraor((Vstart<<10)|(Vend<<0),~(ZORAN_VFEV_VSTART|ZORAN_VFEV_VEND),ZORAN_VFEV);
695 }
696 else {
697 int Ha = maxHeight;
698 int Y = (winHeight*64+Ha-1)/Ha;
699 int He = winHeight*64/Y;
700 int VerDcm = 64-Y;
701 int vcrop1 = 2*(Ha-He)/4;
702 int Vstart = maxYOffset + vcrop1;
703 int Vend = Vstart + He - 1;
704
705DEBUG(printk(KERN_DEBUG " Y: scale=%d, start=%d, end=%d\n", VerDcm, Vstart, Vend));
706 zraor((Vstart<<10)|(Vend<<0),~(ZORAN_VFEV_VSTART|ZORAN_VFEV_VEND),ZORAN_VFEV);
707 vfec |= VerDcm<<8;
708 }
709
710DEBUG(printk(KERN_DEBUG " F: format=%d(=%s)\n",i->format,palette2fmt[i->format].name));
711
712 /* setup the requested format */
713 zrwrite(vfec, ZORAN_VFEC);
714}
715
716static
717void zoran_common_open(struct zoran* ztv, int flags)
718{
719 UNUSED(flags);
720
721 /* already opened? */
722 if (ztv->users++ != 0)
723 return;
724
725 /* unmute audio */
726 /* /what/ audio? */
727
728 ztv->state = 0;
729
730 /* setup the encoder to the initial values */
731 ztv->picture.colour=254<<7;
732 ztv->picture.brightness=128<<8;
733 ztv->picture.hue=128<<8;
734 ztv->picture.contrast=216<<7;
735 i2c_control_device(&ztv->i2c, I2C_DRIVERID_VIDEODECODER, DECODER_SET_PICTURE, &ztv->picture);
736
737 /* default to the composite input since my camera is there */
738 zoran_muxsel(ztv, 0, VIDEO_MODE_PAL);
739}
740
741static
742void zoran_common_close(struct zoran* ztv)
743{
744 if (--ztv->users != 0)
745 return;
746
747 /* mute audio */
748 /* /what/ audio? */
749
750 /* stop the chip */
751 zoran_cap(ztv, 0);
752}
753
754/*
755 * Open a zoran card. Right now the flags are just a hack
756 */
757static int zoran_open(struct video_device *dev, int flags)
758{
759 struct zoran *ztv = (struct zoran*)dev;
760 struct vidinfo* item;
761 char* pos;
762
763 DEBUG(printk(CARD_DEBUG "open(dev,%d)\n",CARD,flags));
764
765 /*********************************************
766 * We really should be doing lazy allocing...
767 *********************************************/
768 /* allocate a frame buffer */
769 if (!ztv->fbuffer)
770 ztv->fbuffer = bmalloc(ZORAN_MAX_FBUFSIZE);
771 if (!ztv->fbuffer) {
772 /* could not get a buffer, bail out */
773 return -ENOBUFS;
774 }
775 /* at this time we _always_ have a framebuffer */
776 memset(ztv->fbuffer,0,ZORAN_MAX_FBUFSIZE);
777
778 if (!ztv->overinfo.overlay)
779 ztv->overinfo.overlay = kmalloc(1024*1024/8, GFP_KERNEL);
780 if (!ztv->overinfo.overlay) {
781 /* could not get an overlay buffer, bail out */
782 bfree(ztv->fbuffer, ZORAN_MAX_FBUFSIZE);
783 return -ENOBUFS;
784 }
785 /* at this time we _always_ have a overlay */
786
787 /* clear buffer status, and give them a DMAable address */
788 pos = ztv->fbuffer;
789 for (item=ztv->grabinfo; item!=ztv->grabinfo+ZORAN_MAX_FBUFFERS; item++)
790 {
791 item->status = FBUFFER_FREE;
792 item->memadr = pos;
793 item->busadr = virt_to_bus(pos);
794 pos += ZORAN_MAX_FBUFFER;
795 }
796
797 /* do the common part of all open's */
798 zoran_common_open(ztv, flags);
799
800 return 0;
801}
802
803static
804void zoran_close(struct video_device* dev)
805{
806 struct zoran *ztv = (struct zoran*)dev;
807
808 DEBUG(printk(CARD_DEBUG "close(dev)\n",CARD));
809
810 /* driver specific closure */
811 clear_bit(STATE_OVERLAY, &ztv->state);
812
813 zoran_common_close(ztv);
814
815 /*
816 * This is sucky but right now I can't find a good way to
817 * be sure its safe to free the buffer. We wait 5-6 fields
818 * which is more than sufficient to be sure.
819 */
820 msleep(100); /* Wait 1/10th of a second */
821
822 /* free the allocated framebuffer */
823 bfree(ztv->fbuffer, ZORAN_MAX_FBUFSIZE);
824 ztv->fbuffer = 0;
825 kfree(ztv->overinfo.overlay);
826 ztv->overinfo.overlay = 0;
827
828}
829
830/*
831 * This read function could be used reentrant in a SMP situation.
832 *
833 * This is made possible by the spinlock which is kept till we
834 * found and marked a buffer for our own use. The lock must
835 * be released as soon as possible to prevent lock contention.
836 */
837static
838long zoran_read(struct video_device* dev, char* buf, unsigned long count, int nonblock)
839{
840 struct zoran *ztv = (struct zoran*)dev;
841 unsigned long max;
842 struct vidinfo* unused = 0;
843 struct vidinfo* done = 0;
844
845 DEBUG(printk(CARD_DEBUG "zoran_read(%p,%ld,%d)\n",CARD,buf,count,nonblock));
846
847 /* find ourself a free or completed buffer */
848 for (;;) {
849 struct vidinfo* item;
850
851 write_lock_irq(&ztv->lock);
852 for (item=ztv->grabinfo; item!=ztv->grabinfo+ZORAN_MAX_FBUFFERS; item++)
853 {
854 if (!unused && item->status == FBUFFER_FREE)
855 unused = item;
856 if (!done && item->status == FBUFFER_DONE)
857 done = item;
858 }
859 if (done || unused)
860 break;
861
862 /* no more free buffers, wait for them. */
863 write_unlock_irq(&ztv->lock);
864 if (nonblock)
865 return -EWOULDBLOCK;
866 interruptible_sleep_on(&ztv->grabq);
867 if (signal_pending(current))
868 return -EINTR;
869 }
870
871 /* Do we have 'ready' data? */
872 if (!done) {
873 /* no? than this will take a while... */
874 if (nonblock) {
875 write_unlock_irq(&ztv->lock);
876 return -EWOULDBLOCK;
877 }
878
879 /* mark the unused buffer as wanted */
880 unused->status = FBUFFER_BUSY;
881 unused->w = 320;
882 unused->h = 240;
883 unused->format = VIDEO_PALETTE_RGB24;
884 unused->bpp = palette2fmt[unused->format].bpp;
885 unused->bpl = unused->w * unused->bpp;
886 unused->next = 0;
887 { /* add to tail of queue */
888 struct vidinfo* oldframe = ztv->workqueue;
889 if (!oldframe) ztv->workqueue = unused;
890 else {
891 while (oldframe->next) oldframe = oldframe->next;
892 oldframe->next = unused;
893 }
894 }
895 write_unlock_irq(&ztv->lock);
896
897 /* tell the state machine we want it filled /NOW/ */
898 zoran_cap(ztv, 1);
899
900 /* wait till this buffer gets grabbed */
901 wait_event_interruptible(ztv->grabq,
902 (unused->status != FBUFFER_BUSY));
903 /* see if a signal did it */
904 if (signal_pending(current))
905 return -EINTR;
906 done = unused;
907 }
908 else
909 write_unlock_irq(&ztv->lock);
910
911 /* Yes! we got data! */
912 max = done->bpl * done->h;
913 if (count > max)
914 count = max;
915 if (copy_to_user((void*)buf, done->memadr, count))
916 count = -EFAULT;
917
918 /* keep the engine running */
919 done->status = FBUFFER_FREE;
920// zoran_cap(ztv,1);
921
922 /* tell listeners this buffer became free */
923 wake_up_interruptible(&ztv->grabq);
924
925 /* goodbye */
926 DEBUG(printk(CARD_DEBUG "zoran_read() returns %lu\n",CARD,count));
927 return count;
928}
929
930static
931long zoran_write(struct video_device* dev, const char* buf, unsigned long count, int nonblock)
932{
933 struct zoran *ztv = (struct zoran *)dev;
934 UNUSED(ztv); UNUSED(dev); UNUSED(buf); UNUSED(count); UNUSED(nonblock);
935 DEBUG(printk(CARD_DEBUG "zoran_write\n",CARD));
936 return -EINVAL;
937}
938
939static
940unsigned int zoran_poll(struct video_device *dev, struct file *file, poll_table *wait)
941{
942 struct zoran *ztv = (struct zoran *)dev;
943 struct vidinfo* item;
944 unsigned int mask = 0;
945
946 poll_wait(file, &ztv->grabq, wait);
947
948 for (item=ztv->grabinfo; item!=ztv->grabinfo+ZORAN_MAX_FBUFFERS; item++)
949 if (item->status == FBUFFER_DONE)
950 {
951 mask |= (POLLIN | POLLRDNORM);
952 break;
953 }
954
955 DEBUG(printk(CARD_DEBUG "zoran_poll()=%x\n",CARD,mask));
956
957 return mask;
958}
959
960/* append a new clipregion to the vector of video_clips */
961static
962void new_clip(struct video_window* vw, struct video_clip* vcp, int x, int y, int w, int h)
963{
964 vcp[vw->clipcount].x = x;
965 vcp[vw->clipcount].y = y;
966 vcp[vw->clipcount].width = w;
967 vcp[vw->clipcount].height = h;
968 vw->clipcount++;
969}
970
971static
972int zoran_ioctl(struct video_device* dev, unsigned int cmd, void *arg)
973{
974 struct zoran* ztv = (struct zoran*)dev;
975
976 switch (cmd) {
977 case VIDIOCGCAP:
978 {
979 struct video_capability c;
980 DEBUG(printk(CARD_DEBUG "VIDIOCGCAP\n",CARD));
981
982 strcpy(c.name,ztv->video_dev.name);
983 c.type = VID_TYPE_CAPTURE|
984 VID_TYPE_OVERLAY|
985 VID_TYPE_CLIPPING|
986 VID_TYPE_FRAMERAM|
987 VID_TYPE_SCALES;
988 if (ztv->have_tuner)
989 c.type |= VID_TYPE_TUNER;
990 if (pci_problems & (PCIPCI_FAIL|PCIAGP_FAIL))
991 c.type &= ~VID_TYPE_OVERLAY;
992 if (ztv->have_decoder) {
993 c.channels = ztv->card->video_inputs;
994 c.audios = ztv->card->audio_inputs;
995 } else
996 /* no decoder -> no channels */
997 c.channels = c.audios = 0;
998 c.maxwidth = 768;
999 c.maxheight = 576;
1000 c.minwidth = 32;
1001 c.minheight = 32;
1002 if (copy_to_user(arg,&c,sizeof(c)))
1003 return -EFAULT;
1004 break;
1005 }
1006
1007 case VIDIOCGCHAN:
1008 {
1009 struct video_channel v;
1010 int mux;
1011 if (copy_from_user(&v, arg,sizeof(v)))
1012 return -EFAULT;
1013 DEBUG(printk(CARD_DEBUG "VIDIOCGCHAN(%d)\n",CARD,v.channel));
1014 v.flags=VIDEO_VC_AUDIO
1015#ifdef VIDEO_VC_NORM
1016 |VIDEO_VC_NORM
1017#endif
1018 ;
1019 v.tuners=0;
1020 v.type=VIDEO_TYPE_CAMERA;
1021#ifdef I_EXPECT_POSSIBLE_NORMS_IN_THE_API
1022 v.norm=VIDEO_MODE_PAL|
1023 VIDEO_MODE_NTSC|
1024 VIDEO_MODE_SECAM;
1025#else
1026 v.norm=VIDEO_MODE_PAL;
1027#endif
1028 /* too many inputs? no decoder -> no channels */
1029 if (!ztv->have_decoder || v.channel < 0 || v.channel >= ztv->card->video_inputs)
1030 return -EINVAL;
1031
1032 /* now determine the name of the channel */
1033 mux = ztv->card->video_mux[v.channel];
1034 if (mux & IS_TUNER) {
1035 /* lets assume only one tuner, yes? */
1036 strcpy(v.name,"Television");
1037 v.type = VIDEO_TYPE_TV;
1038 if (ztv->have_tuner) {
1039 v.flags |= VIDEO_VC_TUNER;
1040 v.tuners = 1;
1041 }
1042 }
1043 else if (mux & IS_SVHS)
1044 sprintf(v.name,"S-Video-%d",v.channel);
1045 else
1046 sprintf(v.name,"CVBS-%d",v.channel);
1047
1048 if (copy_to_user(arg,&v,sizeof(v)))
1049 return -EFAULT;
1050 break;
1051 }
1052 case VIDIOCSCHAN:
1053 { /* set video channel */
1054 struct video_channel v;
1055 if (copy_from_user(&v, arg,sizeof(v)))
1056 return -EFAULT;
1057 DEBUG(printk(CARD_DEBUG "VIDIOCSCHAN(%d,%d)\n",CARD,v.channel,v.norm));
1058
1059 /* too many inputs? no decoder -> no channels */
1060 if (!ztv->have_decoder || v.channel >= ztv->card->video_inputs || v.channel < 0)
1061 return -EINVAL;
1062
1063 if (v.norm != VIDEO_MODE_PAL &&
1064 v.norm != VIDEO_MODE_NTSC &&
1065 v.norm != VIDEO_MODE_SECAM &&
1066 v.norm != VIDEO_MODE_AUTO)
1067 return -EOPNOTSUPP;
1068
1069 /* make it happen, nr1! */
1070 return zoran_muxsel(ztv,v.channel,v.norm);
1071 }
1072
1073 case VIDIOCGTUNER:
1074 {
1075 struct video_tuner v;
1076 if (copy_from_user(&v, arg,sizeof(v)))
1077 return -EFAULT;
1078 DEBUG(printk(CARD_DEBUG "VIDIOCGTUNER(%d)\n",CARD,v.tuner));
1079
1080 /* Only no or one tuner for now */
1081 if (!ztv->have_tuner || v.tuner)
1082 return -EINVAL;
1083
1084 strcpy(v.name,"Television");
1085 v.rangelow = 0;
1086 v.rangehigh = ~0;
1087 v.flags = VIDEO_TUNER_PAL|VIDEO_TUNER_NTSC|VIDEO_TUNER_SECAM;
1088 v.mode = ztv->norm;
1089 v.signal = 0xFFFF; /* unknown */
1090
1091 if (copy_to_user(arg,&v,sizeof(v)))
1092 return -EFAULT;
1093 break;
1094 }
1095 case VIDIOCSTUNER:
1096 {
1097 struct video_tuner v;
1098 if (copy_from_user(&v, arg, sizeof(v)))
1099 return -EFAULT;
1100 DEBUG(printk(CARD_DEBUG "VIDIOCSTUNER(%d,%d)\n",CARD,v.tuner,v.mode));
1101
1102 /* Only no or one tuner for now */
1103 if (!ztv->have_tuner || v.tuner)
1104 return -EINVAL;
1105
1106 /* and it only has certain valid modes */
1107 if( v.mode != VIDEO_MODE_PAL &&
1108 v.mode != VIDEO_MODE_NTSC &&
1109 v.mode != VIDEO_MODE_SECAM)
1110 return -EOPNOTSUPP;
1111
1112 /* engage! */
1113 return zoran_muxsel(ztv,v.tuner,v.mode);
1114 }
1115
1116 case VIDIOCGPICT:
1117 {
1118 struct video_picture p = ztv->picture;
1119 DEBUG(printk(CARD_DEBUG "VIDIOCGPICT\n",CARD));
1120 p.depth = ztv->depth;
1121 switch (p.depth) {
1122 case 8: p.palette=VIDEO_PALETTE_YUV422;
1123 break;
1124 case 15: p.palette=VIDEO_PALETTE_RGB555;
1125 break;
1126 case 16: p.palette=VIDEO_PALETTE_RGB565;
1127 break;
1128 case 24: p.palette=VIDEO_PALETTE_RGB24;
1129 break;
1130 case 32: p.palette=VIDEO_PALETTE_RGB32;
1131 break;
1132 }
1133 if (copy_to_user(arg, &p, sizeof(p)))
1134 return -EFAULT;
1135 break;
1136 }
1137 case VIDIOCSPICT:
1138 {
1139 struct video_picture p;
1140 if (copy_from_user(&p, arg,sizeof(p)))
1141 return -EFAULT;
1142 DEBUG(printk(CARD_DEBUG "VIDIOCSPICT(%d,%d,%d,%d,%d,%d,%d)\n",CARD,p.brightness,p.hue,p.colour,p.contrast,p.whiteness,p.depth,p.palette));
1143
1144 /* depth must match with framebuffer */
1145 if (p.depth != ztv->depth)
1146 return -EINVAL;
1147
1148 /* check if palette matches this bpp */
1149 if (p.palette>NRPALETTES ||
1150 palette2fmt[p.palette].bpp != ztv->overinfo.bpp)
1151 return -EINVAL;
1152
1153 write_lock_irq(&ztv->lock);
1154 ztv->overinfo.format = p.palette;
1155 ztv->picture = p;
1156 write_unlock_irq(&ztv->lock);
1157
1158 /* tell the decoder */
1159 i2c_control_device(&ztv->i2c, I2C_DRIVERID_VIDEODECODER, DECODER_SET_PICTURE, &p);
1160 break;
1161 }
1162
1163 case VIDIOCGWIN:
1164 {
1165 struct video_window vw;
1166 DEBUG(printk(CARD_DEBUG "VIDIOCGWIN\n",CARD));
1167 read_lock(&ztv->lock);
1168 vw.x = ztv->overinfo.x;
1169 vw.y = ztv->overinfo.y;
1170 vw.width = ztv->overinfo.w;
1171 vw.height = ztv->overinfo.h;
1172 vw.chromakey= 0;
1173 vw.flags = 0;
1174 if (ztv->vidInterlace)
1175 vw.flags|=VIDEO_WINDOW_INTERLACE;
1176 read_unlock(&ztv->lock);
1177 if (copy_to_user(arg,&vw,sizeof(vw)))
1178 return -EFAULT;
1179 break;
1180 }
1181 case VIDIOCSWIN:
1182 {
1183 struct video_window vw;
1184 struct video_clip *vcp;
1185 int on;
1186 if (copy_from_user(&vw,arg,sizeof(vw)))
1187 return -EFAULT;
1188 DEBUG(printk(CARD_DEBUG "VIDIOCSWIN(%d,%d,%d,%d,%x,%d)\n",CARD,vw.x,vw.y,vw.width,vw.height,vw.flags,vw.clipcount));
1189
1190 if (vw.flags)
1191 return -EINVAL;
1192
1193 if (vw.clipcount <0 || vw.clipcount>256)
1194 return -EDOM; /* Too many! */
1195
1196 /*
1197 * Do any clips.
1198 */
1199 vcp = vmalloc(sizeof(struct video_clip)*(vw.clipcount+4));
1200 if (vcp==NULL)
1201 return -ENOMEM;
1202 if (vw.clipcount && copy_from_user(vcp,vw.clips,sizeof(struct video_clip)*vw.clipcount)) {
1203 vfree(vcp);
1204 return -EFAULT;
1205 }
1206
1207 on = ztv->running;
1208 if (on)
1209 zoran_cap(ztv, 0);
1210
1211 /*
1212 * strange, it seems xawtv sometimes calls us with 0
1213 * width and/or height. Ignore these values
1214 */
1215 if (vw.x == 0)
1216 vw.x = ztv->overinfo.x;
1217 if (vw.y == 0)
1218 vw.y = ztv->overinfo.y;
1219
1220 /* by now we are committed to the new data... */
1221 write_lock_irq(&ztv->lock);
1222 ztv->overinfo.x = vw.x;
1223 ztv->overinfo.y = vw.y;
1224 ztv->overinfo.w = vw.width;
1225 ztv->overinfo.h = vw.height;
1226 write_unlock_irq(&ztv->lock);
1227
1228 /*
1229 * Impose display clips
1230 */
1231 if (vw.x+vw.width > ztv->swidth)
1232 new_clip(&vw, vcp, ztv->swidth-vw.x, 0, vw.width-1, vw.height-1);
1233 if (vw.y+vw.height > ztv->sheight)
1234 new_clip(&vw, vcp, 0, ztv->sheight-vw.y, vw.width-1, vw.height-1);
1235
1236 /* built the requested clipping zones */
1237 zoran_set_geo(ztv, &ztv->overinfo);
1238 zoran_built_overlay(ztv, vw.clipcount, vcp);
1239 vfree(vcp);
1240
1241 /* if we were on, restart the video engine */
1242 if (on)
1243 zoran_cap(ztv, 1);
1244 break;
1245 }
1246
1247 case VIDIOCCAPTURE:
1248 {
1249 int v;
1250 if (get_user(v, (int *)arg))
1251 return -EFAULT;
1252 DEBUG(printk(CARD_DEBUG "VIDIOCCAPTURE(%d)\n",CARD,v));
1253
1254 if (v==0) {
1255 clear_bit(STATE_OVERLAY, &ztv->state);
1256 zoran_cap(ztv, 1);
1257 }
1258 else {
1259 /* is VIDIOCSFBUF, VIDIOCSWIN done? */
1260 if (ztv->overinfo.busadr==0 || ztv->overinfo.w==0 || ztv->overinfo.h==0)
1261 return -EINVAL;
1262
1263 set_bit(STATE_OVERLAY, &ztv->state);
1264 zoran_cap(ztv, 1);
1265 }
1266 break;
1267 }
1268
1269 case VIDIOCGFBUF:
1270 {
1271 struct video_buffer v;
1272 DEBUG(printk(CARD_DEBUG "VIDIOCGFBUF\n",CARD));
1273 read_lock(&ztv->lock);
1274 v.base = (void *)ztv->overinfo.busadr;
1275 v.height = ztv->sheight;
1276 v.width = ztv->swidth;
1277 v.depth = ztv->depth;
1278 v.bytesperline = ztv->overinfo.bpl;
1279 read_unlock(&ztv->lock);
1280 if(copy_to_user(arg, &v,sizeof(v)))
1281 return -EFAULT;
1282 break;
1283 }
1284 case VIDIOCSFBUF:
1285 {
1286 struct video_buffer v;
1287 if(!capable(CAP_SYS_ADMIN))
1288 return -EPERM;
1289 if (pcipci_problems & (PCIPCI_FAIL|PCIAGP_FAIL))
1290 return -ENXIO;
1291 if (copy_from_user(&v, arg,sizeof(v)))
1292 return -EFAULT;
1293 DEBUG(printk(CARD_DEBUG "VIDIOCSFBUF(%p,%d,%d,%d,%d)\n",CARD,v.base, v.width,v.height,v.depth,v.bytesperline));
1294
1295 if (v.depth!=15 && v.depth!=16 && v.depth!=24 && v.depth!=32)
1296 return -EINVAL;
1297 if (v.bytesperline<1)
1298 return -EINVAL;
1299 if (ztv->running)
1300 return -EBUSY;
1301 write_lock_irq(&ztv->lock);
1302 ztv->overinfo.busadr = (ulong)v.base;
1303 ztv->sheight = v.height;
1304 ztv->swidth = v.width;
1305 ztv->depth = v.depth; /* bits per pixel */
1306 ztv->overinfo.bpp = ((v.depth+1)&0x38)/8;/* bytes per pixel */
1307 ztv->overinfo.bpl = v.bytesperline; /* bytes per line */
1308 write_unlock_irq(&ztv->lock);
1309 break;
1310 }
1311
1312 case VIDIOCKEY:
1313 {
1314 /* Will be handled higher up .. */
1315 break;
1316 }
1317
1318 case VIDIOCSYNC:
1319 {
1320 int i;
1321 if (get_user(i, (int *) arg))
1322 return -EFAULT;
1323 DEBUG(printk(CARD_DEBUG "VIDEOCSYNC(%d)\n",CARD,i));
1324 if (i<0 || i>ZORAN_MAX_FBUFFERS)
1325 return -EINVAL;
1326 switch (ztv->grabinfo[i].status) {
1327 case FBUFFER_FREE:
1328 return -EINVAL;
1329 case FBUFFER_BUSY:
1330 /* wait till this buffer gets grabbed */
1331 wait_event_interruptible(ztv->grabq,
1332 (ztv->grabinfo[i].status != FBUFFER_BUSY));
1333 /* see if a signal did it */
1334 if (signal_pending(current))
1335 return -EINTR;
1336 /* don't fall through; a DONE buffer is not UNUSED */
1337 break;
1338 case FBUFFER_DONE:
1339 ztv->grabinfo[i].status = FBUFFER_FREE;
1340 /* tell ppl we have a spare buffer */
1341 wake_up_interruptible(&ztv->grabq);
1342 break;
1343 }
1344 DEBUG(printk(CARD_DEBUG "VIDEOCSYNC(%d) returns\n",CARD,i));
1345 break;
1346 }
1347
1348 case VIDIOCMCAPTURE:
1349 {
1350 struct video_mmap vm;
1351 struct vidinfo* frame;
1352 if (copy_from_user(&vm,arg,sizeof(vm)))
1353 return -EFAULT;
1354 DEBUG(printk(CARD_DEBUG "VIDIOCMCAPTURE(%d,(%d,%d),%d)\n",CARD,vm.frame,vm.width,vm.height,vm.format));
1355 if (vm.frame<0 || vm.frame>ZORAN_MAX_FBUFFERS ||
1356 vm.width<32 || vm.width>768 ||
1357 vm.height<32 || vm.height>576 ||
1358 vm.format>NRPALETTES ||
1359 palette2fmt[vm.format].mode == 0)
1360 return -EINVAL;
1361
1362 /* we are allowed to take over UNUSED and DONE buffers */
1363 frame = &ztv->grabinfo[vm.frame];
1364 if (frame->status == FBUFFER_BUSY)
1365 return -EBUSY;
1366
1367 /* setup the other parameters if they are given */
1368 write_lock_irq(&ztv->lock);
1369 frame->w = vm.width;
1370 frame->h = vm.height;
1371 frame->format = vm.format;
1372 frame->bpp = palette2fmt[frame->format].bpp;
1373 frame->bpl = frame->w*frame->bpp;
1374 frame->status = FBUFFER_BUSY;
1375 frame->next = 0;
1376 { /* add to tail of queue */
1377 struct vidinfo* oldframe = ztv->workqueue;
1378 if (!oldframe) ztv->workqueue = frame;
1379 else {
1380 while (oldframe->next) oldframe = oldframe->next;
1381 oldframe->next = frame;
1382 }
1383 }
1384 write_unlock_irq(&ztv->lock);
1385 zoran_cap(ztv, 1);
1386 break;
1387 }
1388
1389 case VIDIOCGMBUF:
1390 {
1391 struct video_mbuf mb;
1392 int i;
1393 DEBUG(printk(CARD_DEBUG "VIDIOCGMBUF\n",CARD));
1394 mb.size = ZORAN_MAX_FBUFSIZE;
1395 mb.frames = ZORAN_MAX_FBUFFERS;
1396 for (i=0; i<ZORAN_MAX_FBUFFERS; i++)
1397 mb.offsets[i] = i*ZORAN_MAX_FBUFFER;
1398 if(copy_to_user(arg, &mb,sizeof(mb)))
1399 return -EFAULT;
1400 break;
1401 }
1402
1403 case VIDIOCGUNIT:
1404 {
1405 struct video_unit vu;
1406 DEBUG(printk(CARD_DEBUG "VIDIOCGUNIT\n",CARD));
1407 vu.video = ztv->video_dev.minor;
1408 vu.vbi = ztv->vbi_dev.minor;
1409 vu.radio = VIDEO_NO_UNIT;
1410 vu.audio = VIDEO_NO_UNIT;
1411 vu.teletext = VIDEO_NO_UNIT;
1412 if(copy_to_user(arg, &vu,sizeof(vu)))
1413 return -EFAULT;
1414 break;
1415 }
1416
1417 case VIDIOCGFREQ:
1418 {
1419 unsigned long v = ztv->tuner_freq;
1420 if (copy_to_user(arg,&v,sizeof(v)))
1421 return -EFAULT;
1422 DEBUG(printk(CARD_DEBUG "VIDIOCGFREQ\n",CARD));
1423 break;
1424 }
1425 case VIDIOCSFREQ:
1426 {
1427 unsigned long v;
1428 if (copy_from_user(&v, arg, sizeof(v)))
1429 return -EFAULT;
1430 DEBUG(printk(CARD_DEBUG "VIDIOCSFREQ\n",CARD));
1431
1432 if (ztv->have_tuner) {
1433 int fixme = v;
1434 if (i2c_control_device(&(ztv->i2c), I2C_DRIVERID_TUNER, TUNER_SET_TVFREQ, &fixme) < 0)
1435 return -EAGAIN;
1436 }
1437 ztv->tuner_freq = v;
1438 break;
1439 }
1440
1441 /* Why isn't this in the API?
1442 * And why doesn't it take a buffer number?
1443 case BTTV_FIELDNR:
1444 {
1445 unsigned long v = ztv->lastfieldnr;
1446 if (copy_to_user(arg,&v,sizeof(v)))
1447 return -EFAULT;
1448 DEBUG(printk(CARD_DEBUG "BTTV_FIELDNR\n",CARD));
1449 break;
1450 }
1451 */
1452
1453 default:
1454 return -ENOIOCTLCMD;
1455 }
1456 return 0;
1457}
1458
1459static
1460int zoran_mmap(struct vm_area_struct *vma, struct video_device* dev, const char* adr, unsigned long size)
1461{
1462 struct zoran* ztv = (struct zoran*)dev;
1463 unsigned long start = (unsigned long)adr;
1464 unsigned long pos;
1465
1466 DEBUG(printk(CARD_DEBUG "zoran_mmap(0x%p,%ld)\n",CARD,adr,size));
1467
1468 /* sanity checks */
1469 if (size > ZORAN_MAX_FBUFSIZE || !ztv->fbuffer)
1470 return -EINVAL;
1471
1472 /* start mapping the whole shabang to user memory */
1473 pos = (unsigned long)ztv->fbuffer;
1474 while (size>0) {
1475 unsigned long pfn = virt_to_phys((void*)pos) >> PAGE_SHIFT;
1476 if (remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED))
1477 return -EAGAIN;
1478 start += PAGE_SIZE;
1479 pos += PAGE_SIZE;
1480 size -= PAGE_SIZE;
1481 }
1482 return 0;
1483}
1484
1485static struct video_device zr36120_template=
1486{
1487 .owner = THIS_MODULE,
1488 .name = "UNSET",
1489 .type = VID_TYPE_TUNER|VID_TYPE_CAPTURE|VID_TYPE_OVERLAY,
1490 .hardware = VID_HARDWARE_ZR36120,
1491 .open = zoran_open,
1492 .close = zoran_close,
1493 .read = zoran_read,
1494 .write = zoran_write,
1495 .poll = zoran_poll,
1496 .ioctl = zoran_ioctl,
1497 .compat_ioctl = v4l_compat_ioctl32,
1498 .mmap = zoran_mmap,
1499 .minor = -1,
1500};
1501
1502static
1503int vbi_open(struct video_device *dev, int flags)
1504{
1505 struct zoran *ztv = dev->priv;
1506 struct vidinfo* item;
1507
1508 DEBUG(printk(CARD_DEBUG "vbi_open(dev,%d)\n",CARD,flags));
1509
1510 /*
1511 * During VBI device open, we continiously grab VBI-like
1512 * data in the vbi buffer when we have nothing to do.
1513 * Only when there is an explicit request for VBI data
1514 * (read call) we /force/ a read.
1515 */
1516
1517 /* allocate buffers */
1518 for (item=ztv->readinfo; item!=ztv->readinfo+ZORAN_VBI_BUFFERS; item++)
1519 {
1520 item->status = FBUFFER_FREE;
1521
1522 /* alloc */
1523 if (!item->memadr) {
1524 item->memadr = bmalloc(ZORAN_VBI_BUFSIZE);
1525 if (!item->memadr) {
1526 /* could not get a buffer, bail out */
1527 while (item != ztv->readinfo) {
1528 item--;
1529 bfree(item->memadr, ZORAN_VBI_BUFSIZE);
1530 item->memadr = 0;
1531 item->busadr = 0;
1532 }
1533 return -ENOBUFS;
1534 }
1535 }
1536
1537 /* determine the DMAable address */
1538 item->busadr = virt_to_bus(item->memadr);
1539 }
1540
1541 /* do the common part of all open's */
1542 zoran_common_open(ztv, flags);
1543
1544 set_bit(STATE_VBI, &ztv->state);
1545 /* start read-ahead */
1546 zoran_cap(ztv, 1);
1547
1548 return 0;
1549}
1550
1551static
1552void vbi_close(struct video_device *dev)
1553{
1554 struct zoran *ztv = dev->priv;
1555 struct vidinfo* item;
1556
1557 DEBUG(printk(CARD_DEBUG "vbi_close(dev)\n",CARD));
1558
1559 /* driver specific closure */
1560 clear_bit(STATE_VBI, &ztv->state);
1561
1562 zoran_common_close(ztv);
1563
1564 /*
1565 * This is sucky but right now I can't find a good way to
1566 * be sure its safe to free the buffer. We wait 5-6 fields
1567 * which is more than sufficient to be sure.
1568 */
1569 msleep(100); /* Wait 1/10th of a second */
1570
1571 for (item=ztv->readinfo; item!=ztv->readinfo+ZORAN_VBI_BUFFERS; item++)
1572 {
1573 if (item->memadr)
1574 bfree(item->memadr, ZORAN_VBI_BUFSIZE);
1575 item->memadr = 0;
1576 }
1577
1578}
1579
1580/*
1581 * This read function could be used reentrant in a SMP situation.
1582 *
1583 * This is made possible by the spinlock which is kept till we
1584 * found and marked a buffer for our own use. The lock must
1585 * be released as soon as possible to prevent lock contention.
1586 */
1587static
1588long vbi_read(struct video_device* dev, char* buf, unsigned long count, int nonblock)
1589{
1590 struct zoran *ztv = dev->priv;
1591 unsigned long max;
1592 struct vidinfo* unused = 0;
1593 struct vidinfo* done = 0;
1594
1595 DEBUG(printk(CARD_DEBUG "vbi_read(0x%p,%ld,%d)\n",CARD,buf,count,nonblock));
1596
1597 /* find ourself a free or completed buffer */
1598 for (;;) {
1599 struct vidinfo* item;
1600
1601 write_lock_irq(&ztv->lock);
1602 for (item=ztv->readinfo; item!=ztv->readinfo+ZORAN_VBI_BUFFERS; item++) {
1603 if (!unused && item->status == FBUFFER_FREE)
1604 unused = item;
1605 if (!done && item->status == FBUFFER_DONE)
1606 done = item;
1607 }
1608 if (done || unused)
1609 break;
1610
1611 /* no more free buffers, wait for them. */
1612 write_unlock_irq(&ztv->lock);
1613 if (nonblock)
1614 return -EWOULDBLOCK;
1615 interruptible_sleep_on(&ztv->vbiq);
1616 if (signal_pending(current))
1617 return -EINTR;
1618 }
1619
1620 /* Do we have 'ready' data? */
1621 if (!done) {
1622 /* no? than this will take a while... */
1623 if (nonblock) {
1624 write_unlock_irq(&ztv->lock);
1625 return -EWOULDBLOCK;
1626 }
1627
1628 /* mark the unused buffer as wanted */
1629 unused->status = FBUFFER_BUSY;
1630 unused->next = 0;
1631 { /* add to tail of queue */
1632 struct vidinfo* oldframe = ztv->workqueue;
1633 if (!oldframe) ztv->workqueue = unused;
1634 else {
1635 while (oldframe->next) oldframe = oldframe->next;
1636 oldframe->next = unused;
1637 }
1638 }
1639 write_unlock_irq(&ztv->lock);
1640
1641 /* tell the state machine we want it filled /NOW/ */
1642 zoran_cap(ztv, 1);
1643
1644 /* wait till this buffer gets grabbed */
1645 wait_event_interruptible(ztv->vbiq,
1646 (unused->status != FBUFFER_BUSY));
1647 /* see if a signal did it */
1648 if (signal_pending(current))
1649 return -EINTR;
1650 done = unused;
1651 }
1652 else
1653 write_unlock_irq(&ztv->lock);
1654
1655 /* Yes! we got data! */
1656 max = done->bpl * -done->h;
1657 if (count > max)
1658 count = max;
1659
1660 /* check if the user gave us enough room to write the data */
1661 if (!access_ok(VERIFY_WRITE, buf, count)) {
1662 count = -EFAULT;
1663 goto out;
1664 }
1665
1666 /*
1667 * Now transform/strip the data from YUV to Y-only
1668 * NB. Assume the Y is in the LSB of the YUV data.
1669 */
1670 {
1671 unsigned char* optr = buf;
1672 unsigned char* eptr = buf+count;
1673
1674 /* are we beeing accessed from an old driver? */
1675 if (count == 2*19*2048) {
1676 /*
1677 * Extreme HACK, old VBI programs expect 2048 points
1678 * of data, and we only got 864 orso. Double each
1679 * datapoint and clear the rest of the line.
1680 * This way we have appear to have a
1681 * sample_frequency of 29.5 Mc.
1682 */
1683 int x,y;
1684 unsigned char* iptr = done->memadr+1;
1685 for (y=done->h; optr<eptr && y<0; y++)
1686 {
1687 /* copy to doubled data to userland */
1688 for (x=0; optr+1<eptr && x<-done->w; x++)
1689 {
1690 unsigned char a = iptr[x*2];
1691 __put_user(a, optr++);
1692 __put_user(a, optr++);
1693 }
1694 /* and clear the rest of the line */
1695 for (x*=2; optr<eptr && x<done->bpl; x++)
1696 __put_user(0, optr++);
1697 /* next line */
1698 iptr += done->bpl;
1699 }
1700 }
1701 else {
1702 /*
1703 * Other (probably newer) programs asked
1704 * us what geometry we are using, and are
1705 * reading the correct size.
1706 */
1707 int x,y;
1708 unsigned char* iptr = done->memadr+1;
1709 for (y=done->h; optr<eptr && y<0; y++)
1710 {
1711 /* copy to doubled data to userland */
1712 for (x=0; optr<eptr && x<-done->w; x++)
1713 __put_user(iptr[x*2], optr++);
1714 /* and clear the rest of the line */
1715 for (;optr<eptr && x<done->bpl; x++)
1716 __put_user(0, optr++);
1717 /* next line */
1718 iptr += done->bpl;
1719 }
1720 }
1721
1722 /* API compliance:
1723 * place the framenumber (half fieldnr) in the last long
1724 */
1725 __put_user(done->fieldnr/2, ((ulong*)eptr)[-1]);
1726 }
1727
1728 /* keep the engine running */
1729 done->status = FBUFFER_FREE;
1730 zoran_cap(ztv, 1);
1731
1732 /* tell listeners this buffer just became free */
1733 wake_up_interruptible(&ztv->vbiq);
1734
1735 /* goodbye */
1736out:
1737 DEBUG(printk(CARD_DEBUG "vbi_read() returns %lu\n",CARD,count));
1738 return count;
1739}
1740
1741static
1742unsigned int vbi_poll(struct video_device *dev, struct file *file, poll_table *wait)
1743{
1744 struct zoran *ztv = dev->priv;
1745 struct vidinfo* item;
1746 unsigned int mask = 0;
1747
1748 poll_wait(file, &ztv->vbiq, wait);
1749
1750 for (item=ztv->readinfo; item!=ztv->readinfo+ZORAN_VBI_BUFFERS; item++)
1751 if (item->status == FBUFFER_DONE)
1752 {
1753 mask |= (POLLIN | POLLRDNORM);
1754 break;
1755 }
1756
1757 DEBUG(printk(CARD_DEBUG "vbi_poll()=%x\n",CARD,mask));
1758
1759 return mask;
1760}
1761
1762static
1763int vbi_ioctl(struct video_device *dev, unsigned int cmd, void *arg)
1764{
1765 struct zoran* ztv = dev->priv;
1766
1767 switch (cmd) {
1768 case VIDIOCGVBIFMT:
1769 {
1770 struct vbi_format f;
1771 DEBUG(printk(CARD_DEBUG "VIDIOCGVBIINFO\n",CARD));
1772 f.sampling_rate = 14750000UL;
1773 f.samples_per_line = -ztv->readinfo[0].w;
1774 f.sample_format = VIDEO_PALETTE_RAW;
1775 f.start[0] = f.start[1] = ztv->readinfo[0].y;
1776 f.start[1] += 312;
1777 f.count[0] = f.count[1] = -ztv->readinfo[0].h;
1778 f.flags = VBI_INTERLACED;
1779 if (copy_to_user(arg,&f,sizeof(f)))
1780 return -EFAULT;
1781 break;
1782 }
1783 case VIDIOCSVBIFMT:
1784 {
1785 struct vbi_format f;
1786 int i;
1787 if (copy_from_user(&f, arg,sizeof(f)))
1788 return -EFAULT;
1789 DEBUG(printk(CARD_DEBUG "VIDIOCSVBIINFO(%d,%d,%d,%d,%d,%d,%d,%x)\n",CARD,f.sampling_rate,f.samples_per_line,f.sample_format,f.start[0],f.start[1],f.count[0],f.count[1],f.flags));
1790
1791 /* lots of parameters are fixed... (PAL) */
1792 if (f.sampling_rate != 14750000UL ||
1793 f.samples_per_line > 864 ||
1794 f.sample_format != VIDEO_PALETTE_RAW ||
1795 f.start[0] < 0 ||
1796 f.start[0] != f.start[1]-312 ||
1797 f.count[0] != f.count[1] ||
1798 f.start[0]+f.count[0] >= 288 ||
1799 f.flags != VBI_INTERLACED)
1800 return -EINVAL;
1801
1802 write_lock_irq(&ztv->lock);
1803 ztv->readinfo[0].y = f.start[0];
1804 ztv->readinfo[0].w = -f.samples_per_line;
1805 ztv->readinfo[0].h = -f.count[0];
1806 ztv->readinfo[0].bpl = f.samples_per_line*ztv->readinfo[0].bpp;
1807 for (i=1; i<ZORAN_VBI_BUFFERS; i++)
1808 ztv->readinfo[i] = ztv->readinfo[i];
1809 write_unlock_irq(&ztv->lock);
1810 break;
1811 }
1812 default:
1813 return -ENOIOCTLCMD;
1814 }
1815 return 0;
1816}
1817
1818static struct video_device vbi_template=
1819{
1820 .owner = THIS_MODULE,
1821 .name = "UNSET",
1822 .type = VID_TYPE_CAPTURE|VID_TYPE_TELETEXT,
1823 .hardware = VID_HARDWARE_ZR36120,
1824 .open = vbi_open,
1825 .close = vbi_close,
1826 .read = vbi_read,
1827 .write = zoran_write,
1828 .poll = vbi_poll,
1829 .ioctl = vbi_ioctl,
1830 .minor = -1,
1831};
1832
1833/*
1834 * Scan for a Zoran chip, request the irq and map the io memory
1835 */
1836static
1837int __init find_zoran(void)
1838{
1839 int result;
1840 struct zoran *ztv;
1841 struct pci_dev *dev = NULL;
1842 unsigned char revision;
1843 int zoran_num = 0;
1844
1845 while ((dev = pci_get_device(PCI_VENDOR_ID_ZORAN,PCI_DEVICE_ID_ZORAN_36120, dev)))
1846 {
1847 /* Ok, a ZR36120/ZR36125 found! */
1848 ztv = &zorans[zoran_num];
1849 ztv->dev = dev;
1850
1851 if (pci_enable_device(dev))
1852 continue;
1853
1854 pci_read_config_byte(dev, PCI_CLASS_REVISION, &revision);
1855 printk(KERN_INFO "zoran: Zoran %x (rev %d) ",
1856 dev->device, revision);
1857 printk("bus: %d, devfn: %d, irq: %d, ",
1858 dev->bus->number, dev->devfn, dev->irq);
1859 printk("memory: 0x%08lx.\n", ztv->zoran_adr);
1860
1861 ztv->zoran_mem = ioremap(ztv->zoran_adr, 0x1000);
1862 DEBUG(printk(KERN_DEBUG "zoran: mapped-memory at 0x%p\n",ztv->zoran_mem));
1863
1864 result = request_irq(dev->irq, zoran_irq,
1865 IRQF_SHARED|IRQF_DISABLED,"zoran", ztv);
1866 if (result==-EINVAL)
1867 {
1868 iounmap(ztv->zoran_mem);
1869 printk(KERN_ERR "zoran: Bad irq number or handler\n");
1870 continue;
1871 }
1872 if (result==-EBUSY)
1873 printk(KERN_ERR "zoran: IRQ %d busy, change your PnP config in BIOS\n",dev->irq);
1874 if (result < 0) {
1875 iounmap(ztv->zoran_mem);
1876 continue;
1877 }
1878 /* Enable bus-mastering */
1879 pci_set_master(dev);
1880 /* Keep a reference */
1881 pci_dev_get(dev);
1882 zoran_num++;
1883 }
1884 if(zoran_num)
1885 printk(KERN_INFO "zoran: %d Zoran card(s) found.\n",zoran_num);
1886 return zoran_num;
1887}
1888
1889static
1890int __init init_zoran(int card)
1891{
1892 struct zoran *ztv = &zorans[card];
1893 int i;
1894
1895 /* if the given cardtype valid? */
1896 if (cardtype[card]>=NRTVCARDS) {
1897 printk(KERN_INFO "invalid cardtype(%d) detected\n",cardtype[card]);
1898 return -1;
1899 }
1900
1901 /* reset the zoran */
1902 zrand(~ZORAN_PCI_SOFTRESET,ZORAN_PCI);
1903 udelay(10);
1904 zror(ZORAN_PCI_SOFTRESET,ZORAN_PCI);
1905 udelay(10);
1906
1907 /* zoran chip specific details */
1908 ztv->card = tvcards+cardtype[card]; /* point to the selected card */
1909 ztv->norm = 0; /* PAL */
1910 ztv->tuner_freq = 0;
1911
1912 /* videocard details */
1913 ztv->swidth = 800;
1914 ztv->sheight = 600;
1915 ztv->depth = 16;
1916
1917 /* State details */
1918 ztv->fbuffer = 0;
1919 ztv->overinfo.kindof = FBUFFER_OVERLAY;
1920 ztv->overinfo.status = FBUFFER_FREE;
1921 ztv->overinfo.x = 0;
1922 ztv->overinfo.y = 0;
1923 ztv->overinfo.w = 768; /* 640 */
1924 ztv->overinfo.h = 576; /* 480 */
1925 ztv->overinfo.format = VIDEO_PALETTE_RGB565;
1926 ztv->overinfo.bpp = palette2fmt[ztv->overinfo.format].bpp;
1927 ztv->overinfo.bpl = ztv->overinfo.bpp*ztv->swidth;
1928 ztv->overinfo.busadr = 0;
1929 ztv->overinfo.memadr = 0;
1930 ztv->overinfo.overlay = 0;
1931 for (i=0; i<ZORAN_MAX_FBUFFERS; i++) {
1932 ztv->grabinfo[i] = ztv->overinfo;
1933 ztv->grabinfo[i].kindof = FBUFFER_GRAB;
1934 }
1935 init_waitqueue_head(&ztv->grabq);
1936
1937 /* VBI details */
1938 ztv->readinfo[0] = ztv->overinfo;
1939 ztv->readinfo[0].kindof = FBUFFER_VBI;
1940 ztv->readinfo[0].w = -864;
1941 ztv->readinfo[0].h = -38;
1942 ztv->readinfo[0].format = VIDEO_PALETTE_YUV422;
1943 ztv->readinfo[0].bpp = palette2fmt[ztv->readinfo[0].format].bpp;
1944 ztv->readinfo[0].bpl = 1024*ztv->readinfo[0].bpp;
1945 for (i=1; i<ZORAN_VBI_BUFFERS; i++)
1946 ztv->readinfo[i] = ztv->readinfo[0];
1947 init_waitqueue_head(&ztv->vbiq);
1948
1949 /* maintenance data */
1950 ztv->have_decoder = 0;
1951 ztv->have_tuner = 0;
1952 ztv->tuner_type = 0;
1953 ztv->running = 0;
1954 ztv->users = 0;
1955 rwlock_init(&ztv->lock);
1956 ztv->workqueue = 0;
1957 ztv->fieldnr = 0;
1958 ztv->lastfieldnr = 0;
1959
1960 if (triton1)
1961 zrand(~ZORAN_VDC_TRICOM, ZORAN_VDC);
1962
1963 /* external FL determines TOP frame */
1964 zror(ZORAN_VFEC_EXTFL, ZORAN_VFEC);
1965
1966 /* set HSpol */
1967 if (ztv->card->hsync_pos)
1968 zrwrite(ZORAN_VFEH_HSPOL, ZORAN_VFEH);
1969 /* set VSpol */
1970 if (ztv->card->vsync_pos)
1971 zrwrite(ZORAN_VFEV_VSPOL, ZORAN_VFEV);
1972
1973 /* Set the proper General Purpuse register bits */
1974 /* implicit: no softreset, 0 waitstates */
1975 zrwrite(ZORAN_PCI_SOFTRESET|(ztv->card->gpdir<<0),ZORAN_PCI);
1976 /* implicit: 3 duration and recovery PCI clocks on guest 0-3 */
1977 zrwrite(ztv->card->gpval<<24,ZORAN_GUEST);
1978
1979 /* clear interrupt status */
1980 zrwrite(~0, ZORAN_ISR);
1981
1982 /*
1983 * i2c template
1984 */
1985 ztv->i2c = zoran_i2c_bus_template;
1986 sprintf(ztv->i2c.name,"zoran-%d",card);
1987 ztv->i2c.data = ztv;
1988
1989 /*
1990 * Now add the template and register the device unit
1991 */
1992 ztv->video_dev = zr36120_template;
1993 strcpy(ztv->video_dev.name, ztv->i2c.name);
1994 ztv->video_dev.priv = ztv;
1995 if (video_register_device(&ztv->video_dev, VFL_TYPE_GRABBER, video_nr) < 0)
1996 return -1;
1997
1998 ztv->vbi_dev = vbi_template;
1999 strcpy(ztv->vbi_dev.name, ztv->i2c.name);
2000 ztv->vbi_dev.priv = ztv;
2001 if (video_register_device(&ztv->vbi_dev, VFL_TYPE_VBI, vbi_nr) < 0) {
2002 video_unregister_device(&ztv->video_dev);
2003 return -1;
2004 }
2005 i2c_register_bus(&ztv->i2c);
2006
2007 /* set interrupt mask - the PIN enable will be set later */
2008 zrwrite(ZORAN_ICR_GIRQ0|ZORAN_ICR_GIRQ1|ZORAN_ICR_CODE, ZORAN_ICR);
2009
2010 printk(KERN_INFO "%s: installed %s\n",ztv->i2c.name,ztv->card->name);
2011 return 0;
2012}
2013
2014static
2015void release_zoran(int max)
2016{
2017 struct zoran *ztv;
2018 int i;
2019
2020 for (i=0;i<max; i++)
2021 {
2022 ztv = &zorans[i];
2023
2024 /* turn off all capturing, DMA and IRQs */
2025 /* reset the zoran */
2026 zrand(~ZORAN_PCI_SOFTRESET,ZORAN_PCI);
2027 udelay(10);
2028 zror(ZORAN_PCI_SOFTRESET,ZORAN_PCI);
2029 udelay(10);
2030
2031 /* first disable interrupts before unmapping the memory! */
2032 zrwrite(0, ZORAN_ICR);
2033 zrwrite(0xffffffffUL,ZORAN_ISR);
2034
2035 /* free it */
2036 free_irq(ztv->dev->irq,ztv);
2037
2038 /* unregister i2c_bus */
2039 i2c_unregister_bus((&ztv->i2c));
2040
2041 /* unmap and free memory */
2042 if (ztv->zoran_mem)
2043 iounmap(ztv->zoran_mem);
2044
2045 /* Drop PCI device */
2046 pci_dev_put(ztv->dev);
2047
2048 video_unregister_device(&ztv->video_dev);
2049 video_unregister_device(&ztv->vbi_dev);
2050 }
2051}
2052
2053void __exit zr36120_exit(void)
2054{
2055 release_zoran(zoran_cards);
2056}
2057
2058int __init zr36120_init(void)
2059{
2060 int card;
2061
2062 handle_chipset();
2063 zoran_cards = find_zoran();
2064 if (zoran_cards <= 0)
2065 return -EIO;
2066
2067 /* initialize Zorans */
2068 for (card=0; card<zoran_cards; card++) {
2069 if (init_zoran(card) < 0) {
2070 /* only release the zorans we have registered */
2071 release_zoran(card);
2072 return -EIO;
2073 }
2074 }
2075 return 0;
2076}
2077
2078module_init(zr36120_init);
2079module_exit(zr36120_exit);
diff --git a/drivers/media/video/zr36120.h b/drivers/media/video/zr36120.h
deleted file mode 100644
index a71e485b0f98..000000000000
--- a/drivers/media/video/zr36120.h
+++ /dev/null
@@ -1,279 +0,0 @@
1/*
2 zr36120.h - Zoran 36120/36125 based framegrabbers
3
4 Copyright (C) 1998-1999 Pauline Middelink (middelin@polyware.nl)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef _ZR36120_H
22#define _ZR36120_H
23
24#ifdef __KERNEL__
25
26#include <linux/types.h>
27#include <linux/wait.h>
28
29#include <linux/i2c-old.h>
30#include <linux/videodev.h>
31
32#include <asm/io.h>
33
34/*
35 * Debug macro's, place an x behind the ) for actual debug-compilation
36 * E.g. #define DEBUG(x...) x
37 */
38#define DEBUG(x...) /* Debug driver */
39#define IDEBUG(x...) /* Debug interrupt handler */
40#define PDEBUG 0 /* Debug PCI writes */
41
42/* defined in zr36120_i2c */
43extern struct i2c_bus zoran_i2c_bus_template;
44
45#define ZORAN_MAX_FBUFFERS 2
46#define ZORAN_MAX_FBUFFER (768*576*2)
47#define ZORAN_MAX_FBUFSIZE (ZORAN_MAX_FBUFFERS*ZORAN_MAX_FBUFFER)
48
49#define ZORAN_VBI_BUFFERS 2
50#define ZORAN_VBI_BUFSIZE (22*1024*2)
51
52struct tvcard {
53 char* name; /* name of the cardtype */
54 int video_inputs; /* number of channels defined in video_mux */
55 int audio_inputs; /* number of channels defined in audio_mux */
56 __u32 swapi2c:1, /* need to swap i2c wires SDA/SCL? */
57 usegirq1:1, /* VSYNC at GIRQ1 instead of GIRQ0? */
58 vsync_pos:1, /* positive VSYNC signal? */
59 hsync_pos:1, /* positive HSYNC signal? */
60 gpdir:8, /* General Purpose Direction register */
61 gpval:8; /* General Purpose Value register */
62 int video_mux[6]; /* mapping channel number to physical input */
63#define IS_TUNER 0x80
64#define IS_SVHS 0x40
65#define CHANNEL_MASK 0x3F
66 int audio_mux[6]; /* mapping channel number to physical input */
67};
68#define TUNER(x) ((x)|IS_TUNER)
69#define SVHS(x) ((x)|IS_SVHS)
70
71struct vidinfo {
72 struct vidinfo* next; /* next active buffer */
73 uint kindof;
74#define FBUFFER_OVERLAY 0
75#define FBUFFER_GRAB 1
76#define FBUFFER_VBI 2
77 uint status;
78#define FBUFFER_FREE 0
79#define FBUFFER_BUSY 1
80#define FBUFFER_DONE 2
81 ulong fieldnr; /* # of field, not framer! */
82 uint x,y;
83 int w,h; /* w,h can be negative! */
84 uint format; /* index in palette2fmt[] */
85 uint bpp; /* lookup from palette2fmt[] */
86 uint bpl; /* calc: width * bpp */
87 ulong busadr; /* bus addr for DMA engine */
88 char* memadr; /* kernel addr for making copies */
89 ulong* overlay; /* kernel addr of overlay mask */
90};
91
92struct zoran
93{
94 struct video_device video_dev;
95#define CARD_DEBUG KERN_DEBUG "%s(%lu): "
96#define CARD_INFO KERN_INFO "%s(%lu): "
97#define CARD_ERR KERN_ERR "%s(%lu): "
98#define CARD ztv->video_dev.name,ztv->fieldnr
99
100 /* zoran chip specific details */
101 struct i2c_bus i2c; /* i2c registration data */
102 struct pci_dev* dev; /* ptr to PCI device */
103 ulong zoran_adr; /* bus address of IO memory */
104 char* zoran_mem; /* kernel address of IO memory */
105 struct tvcard* card; /* the cardtype */
106 uint norm; /* 0=PAL, 1=NTSC, 2=SECAM */
107 uint tuner_freq; /* Current freq in kHz */
108 struct video_picture picture; /* Current picture params */
109
110 /* videocard details */
111 uint swidth; /* screen width */
112 uint sheight; /* screen height */
113 uint depth; /* depth in bits */
114
115 /* State details */
116 char* fbuffer; /* framebuffers for mmap */
117 struct vidinfo overinfo; /* overlay data */
118 struct vidinfo grabinfo[ZORAN_MAX_FBUFFERS]; /* grabbing data*/
119 wait_queue_head_t grabq; /* grabbers queue */
120
121 /* VBI details */
122 struct video_device vbi_dev;
123 struct vidinfo readinfo[2]; /* VBI data - flip buffers */
124 wait_queue_head_t vbiq; /* vbi queue */
125
126 /* maintenance data */
127 int have_decoder; /* did we detect a mux? */
128 int have_tuner; /* did we detect a tuner? */
129 int users; /* howmany video/vbi open? */
130 int tuner_type; /* tuner type, when found */
131 int running; /* are we rolling? */
132 rwlock_t lock;
133 long state; /* what is requested of us? */
134#define STATE_OVERLAY 0
135#define STATE_VBI 1
136 struct vidinfo* workqueue; /* buffers to grab, head is active */
137 ulong fieldnr; /* #field, ticked every VSYNC */
138 ulong lastfieldnr; /* #field, ticked every GRAB */
139
140 int vidInterlace; /* calculated */
141 int vidXshift; /* calculated */
142 uint vidWidth; /* calculated */
143 uint vidHeight; /* calculated */
144};
145
146#define zrwrite(dat,adr) writel((dat),(char *) (ztv->zoran_mem+(adr)))
147#define zrread(adr) readl(ztv->zoran_mem+(adr))
148
149#if PDEBUG == 0
150#define zrand(dat,adr) zrwrite((dat) & zrread(adr), adr)
151#define zror(dat,adr) zrwrite((dat) | zrread(adr), adr)
152#define zraor(dat,mask,adr) zrwrite( ((dat)&~(mask)) | ((mask)&zrread(adr)), adr)
153#else
154#define zrand(dat, adr) \
155do { \
156 ulong data = (dat) & zrread((adr)); \
157 zrwrite(data, (adr)); \
158 if (0 != (~(dat) & zrread((adr)))) \
159 printk(KERN_DEBUG "zoran: zrand at %d(%d) detected set bits(%x)\n", __LINE__, (adr), (dat)); \
160} while(0)
161
162#define zror(dat, adr) \
163do { \
164 ulong data = (dat) | zrread((adr)); \
165 zrwrite(data, (adr)); \
166 if ((dat) != ((dat) & zrread(adr))) \
167 printk(KERN_DEBUG "zoran: zror at %d(%d) detected unset bits(%x)\n", __LINE__, (adr), (dat)); \
168} while(0)
169
170#define zraor(dat, mask, adr) \
171do { \
172 ulong data; \
173 if ((dat) & (mask)) \
174 printk(KERN_DEBUG "zoran: zraor at %d(%d) detected bits(%x:%x)\n", __LINE__, (adr), (dat), (mask)); \
175 data = ((dat)&~(mask)) | ((mask) & zrread((adr))); \
176 zrwrite(data,(adr)); \
177 if ( (dat) != (~(mask) & zrread((adr))) ) \
178 printk(KERN_DEBUG "zoran: zraor at %d(%d) could not set all bits(%x:%x)\n", __LINE__, (adr), (dat), (mask)); \
179} while(0)
180#endif
181
182#endif
183
184/* zoran PCI address space */
185#define ZORAN_VFEH 0x000 /* Video Front End Horizontal Conf. */
186#define ZORAN_VFEH_HSPOL (1<<30)
187#define ZORAN_VFEH_HSTART (0x3FF<<10)
188#define ZORAN_VFEH_HEND (0x3FF<<0)
189
190#define ZORAN_VFEV 0x004 /* Video Front End Vertical Conf. */
191#define ZORAN_VFEV_VSPOL (1<<30)
192#define ZORAN_VFEV_VSTART (0x3FF<<10)
193#define ZORAN_VFEV_VEND (0x3FF<<0)
194
195#define ZORAN_VFEC 0x008 /* Video Front End Scaler and Pixel */
196#define ZORAN_VFEC_EXTFL (1<<26)
197#define ZORAN_VFEC_TOPFIELD (1<<25)
198#define ZORAN_VFEC_VCLKPOL (1<<24)
199#define ZORAN_VFEC_HFILTER (7<<21)
200#define ZORAN_VFEC_HFILTER_1 (0<<21) /* no lumi, 3-tap chromo */
201#define ZORAN_VFEC_HFILTER_2 (1<<21) /* 3-tap lumi, 3-tap chromo */
202#define ZORAN_VFEC_HFILTER_3 (2<<21) /* 4-tap lumi, 4-tap chromo */
203#define ZORAN_VFEC_HFILTER_4 (3<<21) /* 5-tap lumi, 4-tap chromo */
204#define ZORAN_VFEC_HFILTER_5 (4<<21) /* 4-tap lumi, 4-tap chromo */
205#define ZORAN_VFEC_DUPFLD (1<<20)
206#define ZORAN_VFEC_HORDCM (63<<14)
207#define ZORAN_VFEC_VERDCM (63<<8)
208#define ZORAN_VFEC_DISPMOD (1<<6)
209#define ZORAN_VFEC_RGB (3<<3)
210#define ZORAN_VFEC_RGB_YUV422 (0<<3)
211#define ZORAN_VFEC_RGB_RGB888 (1<<3)
212#define ZORAN_VFEC_RGB_RGB565 (2<<3)
213#define ZORAN_VFEC_RGB_RGB555 (3<<3)
214#define ZORAN_VFEC_ERRDIF (1<<2)
215#define ZORAN_VFEC_PACK24 (1<<1)
216#define ZORAN_VFEC_LE (1<<0)
217
218#define ZORAN_VTOP 0x00C /* Video Display "Top" */
219
220#define ZORAN_VBOT 0x010 /* Video Display "Bottom" */
221
222#define ZORAN_VSTR 0x014 /* Video Display Stride */
223#define ZORAN_VSTR_DISPSTRIDE (0xFFFF<<16)
224#define ZORAN_VSTR_VIDOVF (1<<8)
225#define ZORAN_VSTR_SNAPSHOT (1<<1)
226#define ZORAN_VSTR_GRAB (1<<0)
227
228#define ZORAN_VDC 0x018 /* Video Display Conf. */
229#define ZORAN_VDC_VIDEN (1<<31)
230#define ZORAN_VDC_MINPIX (0x1F<<25)
231#define ZORAN_VDC_TRICOM (1<<24)
232#define ZORAN_VDC_VIDWINHT (0x3FF<<12)
233#define ZORAN_VDC_VIDWINWID (0x3FF<<0)
234
235#define ZORAN_MTOP 0x01C /* Masking Map "Top" */
236
237#define ZORAN_MBOT 0x020 /* Masking Map "Bottom" */
238
239#define ZORAN_OCR 0x024 /* Overlay Control */
240#define ZORAN_OCR_OVLEN (1<<15)
241#define ZORAN_OCR_MASKSTRIDE (0xFF<<0)
242
243#define ZORAN_PCI 0x028 /* System, PCI and GPP Control */
244#define ZORAN_PCI_SOFTRESET (1<<24)
245#define ZORAN_PCI_WAITSTATE (3<<16)
246#define ZORAN_PCI_GENPURDIR (0xFF<<0)
247
248#define ZORAN_GUEST 0x02C /* GuestBus Control */
249
250#define ZORAN_CSOURCE 0x030 /* Code Source Address */
251
252#define ZORAN_CTRANS 0x034 /* Code Transfer Control */
253
254#define ZORAN_CMEM 0x038 /* Code Memory Pointer */
255
256#define ZORAN_ISR 0x03C /* Interrupt Status Register */
257#define ZORAN_ISR_CODE (1<<28)
258#define ZORAN_ISR_GIRQ0 (1<<29)
259#define ZORAN_ISR_GIRQ1 (1<<30)
260
261#define ZORAN_ICR 0x040 /* Interrupt Control Register */
262#define ZORAN_ICR_EN (1<<24)
263#define ZORAN_ICR_CODE (1<<28)
264#define ZORAN_ICR_GIRQ0 (1<<29)
265#define ZORAN_ICR_GIRQ1 (1<<30)
266
267#define ZORAN_I2C 0x044 /* I2C-Bus */
268#define ZORAN_I2C_SCL (1<<1)
269#define ZORAN_I2C_SDA (1<<0)
270
271#define ZORAN_POST 0x48 /* PostOffice */
272#define ZORAN_POST_PEN (1<<25)
273#define ZORAN_POST_TIME (1<<24)
274#define ZORAN_POST_DIR (1<<23)
275#define ZORAN_POST_GUESTID (3<<20)
276#define ZORAN_POST_GUEST (7<<16)
277#define ZORAN_POST_DATA (0xFF<<0)
278
279#endif
diff --git a/drivers/media/video/zr36120_i2c.c b/drivers/media/video/zr36120_i2c.c
deleted file mode 100644
index 21fde43a6aed..000000000000
--- a/drivers/media/video/zr36120_i2c.c
+++ /dev/null
@@ -1,132 +0,0 @@
1/*
2 zr36120_i2c.c - Zoran 36120/36125 based framegrabbers
3
4 Copyright (C) 1998-1999 Pauline Middelink <middelin@polyware.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/types.h>
22#include <linux/delay.h>
23#include <asm/io.h>
24
25#include <linux/video_decoder.h>
26#include <asm/uaccess.h>
27
28#include "tuner.h"
29#include "zr36120.h"
30
31/* ----------------------------------------------------------------------- */
32/* I2C functions */
33/* ----------------------------------------------------------------------- */
34
35/* software I2C functions */
36
37#define I2C_DELAY 10
38
39static void i2c_setlines(struct i2c_bus *bus,int ctrl,int data)
40{
41 struct zoran *ztv = (struct zoran*)bus->data;
42 unsigned int b = 0;
43 if (data) b |= ztv->card->swapi2c ? ZORAN_I2C_SCL : ZORAN_I2C_SDA;
44 if (ctrl) b |= ztv->card->swapi2c ? ZORAN_I2C_SDA : ZORAN_I2C_SCL;
45 zrwrite(b, ZORAN_I2C);
46 udelay(I2C_DELAY);
47}
48
49static int i2c_getdataline(struct i2c_bus *bus)
50{
51 struct zoran *ztv = (struct zoran*)bus->data;
52 if (ztv->card->swapi2c)
53 return zrread(ZORAN_I2C) & ZORAN_I2C_SCL;
54 return zrread(ZORAN_I2C) & ZORAN_I2C_SDA;
55}
56
57static
58void attach_inform(struct i2c_bus *bus, int id)
59{
60 struct zoran *ztv = (struct zoran*)bus->data;
61 struct video_decoder_capability dc;
62 int rv;
63
64 switch (id) {
65 case I2C_DRIVERID_VIDEODECODER:
66 DEBUG(printk(CARD_INFO "decoder attached\n",CARD));
67
68 /* fetch the capabilities of the decoder */
69 rv = i2c_control_device(&ztv->i2c, I2C_DRIVERID_VIDEODECODER, DECODER_GET_CAPABILITIES, &dc);
70 if (rv) {
71 DEBUG(printk(CARD_DEBUG "decoder is not V4L aware!\n",CARD));
72 break;
73 }
74 DEBUG(printk(CARD_DEBUG "capabilities %d %d %d\n",CARD,dc.flags,dc.inputs,dc.outputs));
75
76 /* Test if the decoder can de VBI transfers */
77 if (dc.flags & 16 /*VIDEO_DECODER_VBI*/)
78 ztv->have_decoder = 2;
79 else
80 ztv->have_decoder = 1;
81 break;
82 case I2C_DRIVERID_TUNER:
83 ztv->have_tuner = 1;
84 DEBUG(printk(CARD_INFO "tuner attached\n",CARD));
85 if (ztv->tuner_type >= 0)
86 {
87 if (i2c_control_device(&ztv->i2c,I2C_DRIVERID_TUNER,TUNER_SET_TYPE,&ztv->tuner_type)<0)
88 DEBUG(printk(CARD_INFO "attach_inform; tuner won't be set to type %d\n",CARD,ztv->tuner_type));
89 }
90 break;
91 default:
92 DEBUG(printk(CARD_INFO "attach_inform; unknown device id=%d\n",CARD,id));
93 break;
94 }
95}
96
97static
98void detach_inform(struct i2c_bus *bus, int id)
99{
100 struct zoran *ztv = (struct zoran*)bus->data;
101
102 switch (id) {
103 case I2C_DRIVERID_VIDEODECODER:
104 ztv->have_decoder = 0;
105 DEBUG(printk(CARD_INFO "decoder detached\n",CARD));
106 break;
107 case I2C_DRIVERID_TUNER:
108 ztv->have_tuner = 0;
109 DEBUG(printk(CARD_INFO "tuner detached\n",CARD));
110 break;
111 default:
112 DEBUG(printk(CARD_INFO "detach_inform; unknown device id=%d\n",CARD,id));
113 break;
114 }
115}
116
117struct i2c_bus zoran_i2c_bus_template =
118{
119 "ZR36120",
120 I2C_BUSID_ZORAN,
121 NULL,
122
123 SPIN_LOCK_UNLOCKED,
124
125 attach_inform,
126 detach_inform,
127
128 i2c_setlines,
129 i2c_getdataline,
130 NULL,
131 NULL
132};
diff --git a/drivers/media/video/zr36120_mem.c b/drivers/media/video/zr36120_mem.c
deleted file mode 100644
index 416eaa93b8a4..000000000000
--- a/drivers/media/video/zr36120_mem.c
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2 zr36120_mem.c - Zoran 36120/36125 based framegrabbers
3
4 Copyright (C) 1998-1999 Pauline Middelink <middelin@polyware.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/mm.h>
22#include <linux/pci.h>
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <asm/io.h>
26#ifdef CONFIG_BIGPHYS_AREA
27#include <linux/bigphysarea.h>
28#endif
29
30#include "zr36120.h"
31#include "zr36120_mem.h"
32
33/*******************************/
34/* Memory management functions */
35/*******************************/
36
37void* bmalloc(unsigned long size)
38{
39 void* mem;
40#ifdef CONFIG_BIGPHYS_AREA
41 mem = bigphysarea_alloc_pages(size/PAGE_SIZE, 1, GFP_KERNEL);
42#else
43 /*
44 * The following function got a lot of memory at boottime,
45 * so we know its always there...
46 */
47 mem = (void*)__get_free_pages(GFP_USER|GFP_DMA,get_order(size));
48#endif
49 if (mem) {
50 unsigned long adr = (unsigned long)mem;
51 while (size > 0) {
52 SetPageReserved(virt_to_page(phys_to_virt(adr)));
53 adr += PAGE_SIZE;
54 size -= PAGE_SIZE;
55 }
56 }
57 return mem;
58}
59
60void bfree(void* mem, unsigned long size)
61{
62 if (mem) {
63 unsigned long adr = (unsigned long)mem;
64 unsigned long siz = size;
65 while (siz > 0) {
66 ClearPageReserved(virt_to_page(phys_to_virt(adr)));
67 adr += PAGE_SIZE;
68 siz -= PAGE_SIZE;
69 }
70#ifdef CONFIG_BIGPHYS_AREA
71 bigphysarea_free_pages(mem);
72#else
73 free_pages((unsigned long)mem,get_order(size));
74#endif
75 }
76}
77
78MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/zr36120_mem.h b/drivers/media/video/zr36120_mem.h
deleted file mode 100644
index aad117acc91d..000000000000
--- a/drivers/media/video/zr36120_mem.h
+++ /dev/null
@@ -1,3 +0,0 @@
1/* either kmalloc() or bigphysarea() alloced memory - continuous */
2void* bmalloc(unsigned long size);
3void bfree(void* mem, unsigned long size);
diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h
index dc388a3ff5e0..cbe384fb848c 100644
--- a/drivers/message/i2o/core.h
+++ b/drivers/message/i2o/core.h
@@ -18,7 +18,7 @@ extern struct i2o_driver i2o_exec_driver;
18extern int i2o_exec_lct_get(struct i2o_controller *); 18extern int i2o_exec_lct_get(struct i2o_controller *);
19 19
20extern int __init i2o_exec_init(void); 20extern int __init i2o_exec_init(void);
21extern void __exit i2o_exec_exit(void); 21extern void i2o_exec_exit(void);
22 22
23/* driver */ 23/* driver */
24extern struct bus_type i2o_bus_type; 24extern struct bus_type i2o_bus_type;
@@ -26,7 +26,7 @@ extern struct bus_type i2o_bus_type;
26extern int i2o_driver_dispatch(struct i2o_controller *, u32); 26extern int i2o_driver_dispatch(struct i2o_controller *, u32);
27 27
28extern int __init i2o_driver_init(void); 28extern int __init i2o_driver_init(void);
29extern void __exit i2o_driver_exit(void); 29extern void i2o_driver_exit(void);
30 30
31/* PCI */ 31/* PCI */
32extern int __init i2o_pci_init(void); 32extern int __init i2o_pci_init(void);
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 9104b65ff70f..d3235f213c89 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -362,7 +362,7 @@ int __init i2o_driver_init(void)
362 * 362 *
363 * Unregisters the I2O bus and frees driver array. 363 * Unregisters the I2O bus and frees driver array.
364 */ 364 */
365void __exit i2o_driver_exit(void) 365void i2o_driver_exit(void)
366{ 366{
367 bus_unregister(&i2o_bus_type); 367 bus_unregister(&i2o_bus_type);
368 kfree(i2o_drivers); 368 kfree(i2o_drivers);
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 902753b2c661..a539d3b61e76 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -595,7 +595,7 @@ int __init i2o_exec_init(void)
595 * 595 *
596 * Unregisters the Exec OSM from the I2O core. 596 * Unregisters the Exec OSM from the I2O core.
597 */ 597 */
598void __exit i2o_exec_exit(void) 598void i2o_exec_exit(void)
599{ 599{
600 i2o_driver_unregister(&i2o_exec_driver); 600 i2o_driver_unregister(&i2o_exec_driver);
601}; 601};
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 1de30d711671..e33d446e7493 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -186,7 +186,7 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type)
186 if (!dev) 186 if (!dev)
187 return -ENXIO; 187 return -ENXIO;
188 188
189 ops = (u8 *) kmalloc(kcmd.oplen, GFP_KERNEL); 189 ops = kmalloc(kcmd.oplen, GFP_KERNEL);
190 if (!ops) 190 if (!ops)
191 return -ENOMEM; 191 return -ENOMEM;
192 192
@@ -199,7 +199,7 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type)
199 * It's possible to have a _very_ large table 199 * It's possible to have a _very_ large table
200 * and that the user asks for all of it at once... 200 * and that the user asks for all of it at once...
201 */ 201 */
202 res = (u8 *) kmalloc(65536, GFP_KERNEL); 202 res = kmalloc(65536, GFP_KERNEL);
203 if (!res) { 203 if (!res) {
204 kfree(ops); 204 kfree(ops);
205 return -ENOMEM; 205 return -ENOMEM;
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c
index 4633dbc9a90f..08a33c33f6ed 100644
--- a/drivers/mmc/at91_mci.c
+++ b/drivers/mmc/at91_mci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/mmc/at91_mci.c - ATMEL AT91RM9200 MCI Driver 2 * linux/drivers/mmc/at91_mci.c - ATMEL AT91 MCI Driver
3 * 3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved 4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 * 5 *
@@ -11,7 +11,7 @@
11 */ 11 */
12 12
13/* 13/*
14 This is the AT91RM9200 MCI driver that has been tested with both MMC cards 14 This is the AT91 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported. 15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards. 16 The CCAT91SBC001 board does not support SD cards.
17 17
@@ -38,8 +38,8 @@
38 controller to manage the transfers. 38 controller to manage the transfers.
39 39
40 A read is done from the controller directly to the scatterlist passed in from the request. 40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the controller, when a read is completed, all the words are byte 41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. 42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
43 43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY 44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45 45
@@ -72,6 +72,7 @@
72#include <asm/irq.h> 72#include <asm/irq.h>
73#include <asm/mach/mmc.h> 73#include <asm/mach/mmc.h>
74#include <asm/arch/board.h> 74#include <asm/arch/board.h>
75#include <asm/arch/cpu.h>
75#include <asm/arch/gpio.h> 76#include <asm/arch/gpio.h>
76#include <asm/arch/at91_mci.h> 77#include <asm/arch/at91_mci.h>
77#include <asm/arch/at91_pdc.h> 78#include <asm/arch/at91_pdc.h>
@@ -80,34 +81,18 @@
80 81
81#undef SUPPORT_4WIRE 82#undef SUPPORT_4WIRE
82 83
83static struct clk *mci_clk; 84#define FL_SENT_COMMAND (1 << 0)
85#define FL_SENT_STOP (1 << 1)
84 86
85#define FL_SENT_COMMAND (1 << 0) 87#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
86#define FL_SENT_STOP (1 << 1) 88 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
89 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
87 90
91#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
92#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
88 93
89 94
90/* 95/*
91 * Read from a MCI register.
92 */
93static inline unsigned long at91_mci_read(unsigned int reg)
94{
95 void __iomem *mci_base = (void __iomem *)AT91_VA_BASE_MCI;
96
97 return __raw_readl(mci_base + reg);
98}
99
100/*
101 * Write to a MCI register.
102 */
103static inline void at91_mci_write(unsigned int reg, unsigned long value)
104{
105 void __iomem *mci_base = (void __iomem *)AT91_VA_BASE_MCI;
106
107 __raw_writel(value, mci_base + reg);
108}
109
110/*
111 * Low level type for this driver 96 * Low level type for this driver
112 */ 97 */
113struct at91mci_host 98struct at91mci_host
@@ -116,9 +101,14 @@ struct at91mci_host
116 struct mmc_command *cmd; 101 struct mmc_command *cmd;
117 struct mmc_request *request; 102 struct mmc_request *request;
118 103
104 void __iomem *baseaddr;
105 int irq;
106
119 struct at91_mmc_data *board; 107 struct at91_mmc_data *board;
120 int present; 108 int present;
121 109
110 struct clk *mci_clk;
111
122 /* 112 /*
123 * Flag indicating when the command has been sent. This is used to 113 * Flag indicating when the command has been sent. This is used to
124 * work out whether or not to send the stop 114 * work out whether or not to send the stop
@@ -158,7 +148,6 @@ static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
158 for (i = 0; i < len; i++) { 148 for (i = 0; i < len; i++) {
159 struct scatterlist *sg; 149 struct scatterlist *sg;
160 int amount; 150 int amount;
161 int index;
162 unsigned int *sgbuffer; 151 unsigned int *sgbuffer;
163 152
164 sg = &data->sg[i]; 153 sg = &data->sg[i];
@@ -166,10 +155,15 @@ static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
166 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset; 155 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
167 amount = min(size, sg->length); 156 amount = min(size, sg->length);
168 size -= amount; 157 size -= amount;
169 amount /= 4;
170 158
171 for (index = 0; index < amount; index++) 159 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
172 *dmabuf++ = swab32(sgbuffer[index]); 160 int index;
161
162 for (index = 0; index < (amount / 4); index++)
163 *dmabuf++ = swab32(sgbuffer[index]);
164 }
165 else
166 memcpy(dmabuf, sgbuffer, amount);
173 167
174 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); 168 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
175 169
@@ -217,13 +211,13 @@ static void at91mci_pre_dma_read(struct at91mci_host *host)
217 211
218 /* Check to see if this needs filling */ 212 /* Check to see if this needs filling */
219 if (i == 0) { 213 if (i == 0) {
220 if (at91_mci_read(AT91_PDC_RCR) != 0) { 214 if (at91_mci_read(host, AT91_PDC_RCR) != 0) {
221 pr_debug("Transfer active in current\n"); 215 pr_debug("Transfer active in current\n");
222 continue; 216 continue;
223 } 217 }
224 } 218 }
225 else { 219 else {
226 if (at91_mci_read(AT91_PDC_RNCR) != 0) { 220 if (at91_mci_read(host, AT91_PDC_RNCR) != 0) {
227 pr_debug("Transfer active in next\n"); 221 pr_debug("Transfer active in next\n");
228 continue; 222 continue;
229 } 223 }
@@ -240,12 +234,12 @@ static void at91mci_pre_dma_read(struct at91mci_host *host)
240 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length); 234 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
241 235
242 if (i == 0) { 236 if (i == 0) {
243 at91_mci_write(AT91_PDC_RPR, sg->dma_address); 237 at91_mci_write(host, AT91_PDC_RPR, sg->dma_address);
244 at91_mci_write(AT91_PDC_RCR, sg->length / 4); 238 at91_mci_write(host, AT91_PDC_RCR, sg->length / 4);
245 } 239 }
246 else { 240 else {
247 at91_mci_write(AT91_PDC_RNPR, sg->dma_address); 241 at91_mci_write(host, AT91_PDC_RNPR, sg->dma_address);
248 at91_mci_write(AT91_PDC_RNCR, sg->length / 4); 242 at91_mci_write(host, AT91_PDC_RNCR, sg->length / 4);
249 } 243 }
250 } 244 }
251 245
@@ -276,8 +270,6 @@ static void at91mci_post_dma_read(struct at91mci_host *host)
276 270
277 while (host->in_use_index < host->transfer_index) { 271 while (host->in_use_index < host->transfer_index) {
278 unsigned int *buffer; 272 unsigned int *buffer;
279 int index;
280 int len;
281 273
282 struct scatterlist *sg; 274 struct scatterlist *sg;
283 275
@@ -295,11 +287,13 @@ static void at91mci_post_dma_read(struct at91mci_host *host)
295 287
296 data->bytes_xfered += sg->length; 288 data->bytes_xfered += sg->length;
297 289
298 len = sg->length / 4; 290 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
291 int index;
299 292
300 for (index = 0; index < len; index++) { 293 for (index = 0; index < (sg->length / 4); index++)
301 buffer[index] = swab32(buffer[index]); 294 buffer[index] = swab32(buffer[index]);
302 } 295 }
296
303 kunmap_atomic(buffer, KM_BIO_SRC_IRQ); 297 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
304 flush_dcache_page(sg->page); 298 flush_dcache_page(sg->page);
305 } 299 }
@@ -308,8 +302,8 @@ static void at91mci_post_dma_read(struct at91mci_host *host)
308 if (host->transfer_index < data->sg_len) 302 if (host->transfer_index < data->sg_len)
309 at91mci_pre_dma_read(host); 303 at91mci_pre_dma_read(host);
310 else { 304 else {
311 at91_mci_write(AT91_MCI_IER, AT91_MCI_RXBUFF); 305 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
312 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS); 306 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
313 } 307 }
314 308
315 pr_debug("post dma read done\n"); 309 pr_debug("post dma read done\n");
@@ -326,11 +320,11 @@ static void at91_mci_handle_transmitted(struct at91mci_host *host)
326 pr_debug("Handling the transmit\n"); 320 pr_debug("Handling the transmit\n");
327 321
328 /* Disable the transfer */ 322 /* Disable the transfer */
329 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS); 323 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
330 324
331 /* Now wait for cmd ready */ 325 /* Now wait for cmd ready */
332 at91_mci_write(AT91_MCI_IDR, AT91_MCI_TXBUFE); 326 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
333 at91_mci_write(AT91_MCI_IER, AT91_MCI_NOTBUSY); 327 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
334 328
335 cmd = host->cmd; 329 cmd = host->cmd;
336 if (!cmd) return; 330 if (!cmd) return;
@@ -344,21 +338,23 @@ static void at91_mci_handle_transmitted(struct at91mci_host *host)
344/* 338/*
345 * Enable the controller 339 * Enable the controller
346 */ 340 */
347static void at91_mci_enable(void) 341static void at91_mci_enable(struct at91mci_host *host)
348{ 342{
349 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIEN); 343 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
350 at91_mci_write(AT91_MCI_IDR, 0xFFFFFFFF); 344 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
351 at91_mci_write(AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC); 345 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
352 at91_mci_write(AT91_MCI_MR, 0x834A); 346 at91_mci_write(host, AT91_MCI_MR, AT91_MCI_PDCMODE | 0x34a);
353 at91_mci_write(AT91_MCI_SDCR, 0x0); 347
348 /* use Slot A or B (only one at same time) */
349 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
354} 350}
355 351
356/* 352/*
357 * Disable the controller 353 * Disable the controller
358 */ 354 */
359static void at91_mci_disable(void) 355static void at91_mci_disable(struct at91mci_host *host)
360{ 356{
361 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST); 357 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
362} 358}
363 359
364/* 360/*
@@ -378,13 +374,13 @@ static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_
378 374
379 /* Not sure if this is needed */ 375 /* Not sure if this is needed */
380#if 0 376#if 0
381 if ((at91_mci_read(AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) { 377 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
382 pr_debug("Clearing timeout\n"); 378 pr_debug("Clearing timeout\n");
383 at91_mci_write(AT91_MCI_ARGR, 0); 379 at91_mci_write(host, AT91_MCI_ARGR, 0);
384 at91_mci_write(AT91_MCI_CMDR, AT91_MCI_OPDCMD); 380 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
385 while (!(at91_mci_read(AT91_MCI_SR) & AT91_MCI_CMDRDY)) { 381 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
386 /* spin */ 382 /* spin */
387 pr_debug("Clearing: SR = %08X\n", at91_mci_read(AT91_MCI_SR)); 383 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
388 } 384 }
389 } 385 }
390#endif 386#endif
@@ -431,32 +427,32 @@ static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_
431 /* 427 /*
432 * Set the arguments and send the command 428 * Set the arguments and send the command
433 */ 429 */
434 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08lX)\n", 430 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
435 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(AT91_MCI_MR)); 431 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
436 432
437 if (!data) { 433 if (!data) {
438 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_TXTDIS | AT91_PDC_RXTDIS); 434 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_TXTDIS | AT91_PDC_RXTDIS);
439 at91_mci_write(AT91_PDC_RPR, 0); 435 at91_mci_write(host, AT91_PDC_RPR, 0);
440 at91_mci_write(AT91_PDC_RCR, 0); 436 at91_mci_write(host, AT91_PDC_RCR, 0);
441 at91_mci_write(AT91_PDC_RNPR, 0); 437 at91_mci_write(host, AT91_PDC_RNPR, 0);
442 at91_mci_write(AT91_PDC_RNCR, 0); 438 at91_mci_write(host, AT91_PDC_RNCR, 0);
443 at91_mci_write(AT91_PDC_TPR, 0); 439 at91_mci_write(host, AT91_PDC_TPR, 0);
444 at91_mci_write(AT91_PDC_TCR, 0); 440 at91_mci_write(host, AT91_PDC_TCR, 0);
445 at91_mci_write(AT91_PDC_TNPR, 0); 441 at91_mci_write(host, AT91_PDC_TNPR, 0);
446 at91_mci_write(AT91_PDC_TNCR, 0); 442 at91_mci_write(host, AT91_PDC_TNCR, 0);
447 443
448 at91_mci_write(AT91_MCI_ARGR, cmd->arg); 444 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
449 at91_mci_write(AT91_MCI_CMDR, cmdr); 445 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
450 return AT91_MCI_CMDRDY; 446 return AT91_MCI_CMDRDY;
451 } 447 }
452 448
453 mr = at91_mci_read(AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */ 449 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */
454 at91_mci_write(AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE); 450 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
455 451
456 /* 452 /*
457 * Disable the PDC controller 453 * Disable the PDC controller
458 */ 454 */
459 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS); 455 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
460 456
461 if (cmdr & AT91_MCI_TRCMD_START) { 457 if (cmdr & AT91_MCI_TRCMD_START) {
462 data->bytes_xfered = 0; 458 data->bytes_xfered = 0;
@@ -485,8 +481,8 @@ static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_
485 481
486 pr_debug("Transmitting %d bytes\n", host->total_length); 482 pr_debug("Transmitting %d bytes\n", host->total_length);
487 483
488 at91_mci_write(AT91_PDC_TPR, host->physical_address); 484 at91_mci_write(host, AT91_PDC_TPR, host->physical_address);
489 at91_mci_write(AT91_PDC_TCR, host->total_length / 4); 485 at91_mci_write(host, AT91_PDC_TCR, host->total_length / 4);
490 ier = AT91_MCI_TXBUFE; 486 ier = AT91_MCI_TXBUFE;
491 } 487 }
492 } 488 }
@@ -496,14 +492,14 @@ static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_
496 * the data sheet says 492 * the data sheet says
497 */ 493 */
498 494
499 at91_mci_write(AT91_MCI_ARGR, cmd->arg); 495 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
500 at91_mci_write(AT91_MCI_CMDR, cmdr); 496 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
501 497
502 if (cmdr & AT91_MCI_TRCMD_START) { 498 if (cmdr & AT91_MCI_TRCMD_START) {
503 if (cmdr & AT91_MCI_TRDIR) 499 if (cmdr & AT91_MCI_TRDIR)
504 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTEN); 500 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTEN);
505 else 501 else
506 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_TXTEN); 502 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_TXTEN);
507 } 503 }
508 return ier; 504 return ier;
509} 505}
@@ -520,7 +516,7 @@ static void at91mci_process_command(struct at91mci_host *host, struct mmc_comman
520 pr_debug("setting ier to %08X\n", ier); 516 pr_debug("setting ier to %08X\n", ier);
521 517
522 /* Stop on errors or the required value */ 518 /* Stop on errors or the required value */
523 at91_mci_write(AT91_MCI_IER, 0xffff0000 | ier); 519 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
524} 520}
525 521
526/* 522/*
@@ -548,19 +544,19 @@ static void at91mci_completed_command(struct at91mci_host *host)
548 struct mmc_command *cmd = host->cmd; 544 struct mmc_command *cmd = host->cmd;
549 unsigned int status; 545 unsigned int status;
550 546
551 at91_mci_write(AT91_MCI_IDR, 0xffffffff); 547 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
552 548
553 cmd->resp[0] = at91_mci_read(AT91_MCI_RSPR(0)); 549 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
554 cmd->resp[1] = at91_mci_read(AT91_MCI_RSPR(1)); 550 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
555 cmd->resp[2] = at91_mci_read(AT91_MCI_RSPR(2)); 551 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
556 cmd->resp[3] = at91_mci_read(AT91_MCI_RSPR(3)); 552 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
557 553
558 if (host->buffer) { 554 if (host->buffer) {
559 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address); 555 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
560 host->buffer = NULL; 556 host->buffer = NULL;
561 } 557 }
562 558
563 status = at91_mci_read(AT91_MCI_SR); 559 status = at91_mci_read(host, AT91_MCI_SR);
564 560
565 pr_debug("Status = %08X [%08X %08X %08X %08X]\n", 561 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
566 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); 562 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
@@ -611,18 +607,18 @@ static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
611{ 607{
612 int clkdiv; 608 int clkdiv;
613 struct at91mci_host *host = mmc_priv(mmc); 609 struct at91mci_host *host = mmc_priv(mmc);
614 unsigned long at91_master_clock = clk_get_rate(mci_clk); 610 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
615 611
616 host->bus_mode = ios->bus_mode; 612 host->bus_mode = ios->bus_mode;
617 613
618 if (ios->clock == 0) { 614 if (ios->clock == 0) {
619 /* Disable the MCI controller */ 615 /* Disable the MCI controller */
620 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIDIS); 616 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
621 clkdiv = 0; 617 clkdiv = 0;
622 } 618 }
623 else { 619 else {
624 /* Enable the MCI controller */ 620 /* Enable the MCI controller */
625 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIEN); 621 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
626 622
627 if ((at91_master_clock % (ios->clock * 2)) == 0) 623 if ((at91_master_clock % (ios->clock * 2)) == 0)
628 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1; 624 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
@@ -634,25 +630,25 @@ static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
634 } 630 }
635 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) { 631 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
636 pr_debug("MMC: Setting controller bus width to 4\n"); 632 pr_debug("MMC: Setting controller bus width to 4\n");
637 at91_mci_write(AT91_MCI_SDCR, at91_mci_read(AT91_MCI_SDCR) | AT91_MCI_SDCBUS); 633 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
638 } 634 }
639 else { 635 else {
640 pr_debug("MMC: Setting controller bus width to 1\n"); 636 pr_debug("MMC: Setting controller bus width to 1\n");
641 at91_mci_write(AT91_MCI_SDCR, at91_mci_read(AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS); 637 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
642 } 638 }
643 639
644 /* Set the clock divider */ 640 /* Set the clock divider */
645 at91_mci_write(AT91_MCI_MR, (at91_mci_read(AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv); 641 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
646 642
647 /* maybe switch power to the card */ 643 /* maybe switch power to the card */
648 if (host->board->vcc_pin) { 644 if (host->board->vcc_pin) {
649 switch (ios->power_mode) { 645 switch (ios->power_mode) {
650 case MMC_POWER_OFF: 646 case MMC_POWER_OFF:
651 at91_set_gpio_output(host->board->vcc_pin, 0); 647 at91_set_gpio_value(host->board->vcc_pin, 0);
652 break; 648 break;
653 case MMC_POWER_UP: 649 case MMC_POWER_UP:
654 case MMC_POWER_ON: 650 case MMC_POWER_ON:
655 at91_set_gpio_output(host->board->vcc_pin, 1); 651 at91_set_gpio_value(host->board->vcc_pin, 1);
656 break; 652 break;
657 } 653 }
658 } 654 }
@@ -665,39 +661,40 @@ static irqreturn_t at91_mci_irq(int irq, void *devid)
665{ 661{
666 struct at91mci_host *host = devid; 662 struct at91mci_host *host = devid;
667 int completed = 0; 663 int completed = 0;
664 unsigned int int_status, int_mask;
668 665
669 unsigned int int_status; 666 int_status = at91_mci_read(host, AT91_MCI_SR);
667 int_mask = at91_mci_read(host, AT91_MCI_IMR);
668
669 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
670 int_status & int_mask);
671
672 int_status = int_status & int_mask;
670 673
671 int_status = at91_mci_read(AT91_MCI_SR); 674 if (int_status & AT91_MCI_ERRORS) {
672 pr_debug("MCI irq: status = %08X, %08lX, %08lX\n", int_status, at91_mci_read(AT91_MCI_IMR),
673 int_status & at91_mci_read(AT91_MCI_IMR));
674
675 if ((int_status & at91_mci_read(AT91_MCI_IMR)) & 0xffff0000)
676 completed = 1; 675 completed = 1;
676
677 if (int_status & AT91_MCI_UNRE)
678 pr_debug("MMC: Underrun error\n");
679 if (int_status & AT91_MCI_OVRE)
680 pr_debug("MMC: Overrun error\n");
681 if (int_status & AT91_MCI_DTOE)
682 pr_debug("MMC: Data timeout\n");
683 if (int_status & AT91_MCI_DCRCE)
684 pr_debug("MMC: CRC error in data\n");
685 if (int_status & AT91_MCI_RTOE)
686 pr_debug("MMC: Response timeout\n");
687 if (int_status & AT91_MCI_RENDE)
688 pr_debug("MMC: Response end bit error\n");
689 if (int_status & AT91_MCI_RCRCE)
690 pr_debug("MMC: Response CRC error\n");
691 if (int_status & AT91_MCI_RDIRE)
692 pr_debug("MMC: Response direction error\n");
693 if (int_status & AT91_MCI_RINDE)
694 pr_debug("MMC: Response index error\n");
695 } else {
696 /* Only continue processing if no errors */
677 697
678 int_status &= at91_mci_read(AT91_MCI_IMR);
679
680 if (int_status & AT91_MCI_UNRE)
681 pr_debug("MMC: Underrun error\n");
682 if (int_status & AT91_MCI_OVRE)
683 pr_debug("MMC: Overrun error\n");
684 if (int_status & AT91_MCI_DTOE)
685 pr_debug("MMC: Data timeout\n");
686 if (int_status & AT91_MCI_DCRCE)
687 pr_debug("MMC: CRC error in data\n");
688 if (int_status & AT91_MCI_RTOE)
689 pr_debug("MMC: Response timeout\n");
690 if (int_status & AT91_MCI_RENDE)
691 pr_debug("MMC: Response end bit error\n");
692 if (int_status & AT91_MCI_RCRCE)
693 pr_debug("MMC: Response CRC error\n");
694 if (int_status & AT91_MCI_RDIRE)
695 pr_debug("MMC: Response direction error\n");
696 if (int_status & AT91_MCI_RINDE)
697 pr_debug("MMC: Response index error\n");
698
699 /* Only continue processing if no errors */
700 if (!completed) {
701 if (int_status & AT91_MCI_TXBUFE) { 698 if (int_status & AT91_MCI_TXBUFE) {
702 pr_debug("TX buffer empty\n"); 699 pr_debug("TX buffer empty\n");
703 at91_mci_handle_transmitted(host); 700 at91_mci_handle_transmitted(host);
@@ -705,12 +702,11 @@ static irqreturn_t at91_mci_irq(int irq, void *devid)
705 702
706 if (int_status & AT91_MCI_RXBUFF) { 703 if (int_status & AT91_MCI_RXBUFF) {
707 pr_debug("RX buffer full\n"); 704 pr_debug("RX buffer full\n");
708 at91_mci_write(AT91_MCI_IER, AT91_MCI_CMDRDY); 705 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
709 } 706 }
710 707
711 if (int_status & AT91_MCI_ENDTX) { 708 if (int_status & AT91_MCI_ENDTX)
712 pr_debug("Transmit has ended\n"); 709 pr_debug("Transmit has ended\n");
713 }
714 710
715 if (int_status & AT91_MCI_ENDRX) { 711 if (int_status & AT91_MCI_ENDRX) {
716 pr_debug("Receive has ended\n"); 712 pr_debug("Receive has ended\n");
@@ -719,37 +715,33 @@ static irqreturn_t at91_mci_irq(int irq, void *devid)
719 715
720 if (int_status & AT91_MCI_NOTBUSY) { 716 if (int_status & AT91_MCI_NOTBUSY) {
721 pr_debug("Card is ready\n"); 717 pr_debug("Card is ready\n");
722 at91_mci_write(AT91_MCI_IER, AT91_MCI_CMDRDY); 718 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
723 } 719 }
724 720
725 if (int_status & AT91_MCI_DTIP) { 721 if (int_status & AT91_MCI_DTIP)
726 pr_debug("Data transfer in progress\n"); 722 pr_debug("Data transfer in progress\n");
727 }
728 723
729 if (int_status & AT91_MCI_BLKE) { 724 if (int_status & AT91_MCI_BLKE)
730 pr_debug("Block transfer has ended\n"); 725 pr_debug("Block transfer has ended\n");
731 }
732 726
733 if (int_status & AT91_MCI_TXRDY) { 727 if (int_status & AT91_MCI_TXRDY)
734 pr_debug("Ready to transmit\n"); 728 pr_debug("Ready to transmit\n");
735 }
736 729
737 if (int_status & AT91_MCI_RXRDY) { 730 if (int_status & AT91_MCI_RXRDY)
738 pr_debug("Ready to receive\n"); 731 pr_debug("Ready to receive\n");
739 }
740 732
741 if (int_status & AT91_MCI_CMDRDY) { 733 if (int_status & AT91_MCI_CMDRDY) {
742 pr_debug("Command ready\n"); 734 pr_debug("Command ready\n");
743 completed = 1; 735 completed = 1;
744 } 736 }
745 } 737 }
746 at91_mci_write(AT91_MCI_IDR, int_status);
747 738
748 if (completed) { 739 if (completed) {
749 pr_debug("Completed command\n"); 740 pr_debug("Completed command\n");
750 at91_mci_write(AT91_MCI_IDR, 0xffffffff); 741 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
751 at91mci_completed_command(host); 742 at91mci_completed_command(host);
752 } 743 } else
744 at91_mci_write(host, AT91_MCI_IDR, int_status);
753 745
754 return IRQ_HANDLED; 746 return IRQ_HANDLED;
755} 747}
@@ -769,7 +761,7 @@ static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
769 present ? "insert" : "remove"); 761 present ? "insert" : "remove");
770 if (!present) { 762 if (!present) {
771 pr_debug("****** Resetting SD-card bus width ******\n"); 763 pr_debug("****** Resetting SD-card bus width ******\n");
772 at91_mci_write(AT91_MCI_SDCR, 0); 764 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
773 } 765 }
774 mmc_detect_change(host->mmc, msecs_to_jiffies(100)); 766 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
775 } 767 }
@@ -806,15 +798,22 @@ static int at91_mci_probe(struct platform_device *pdev)
806{ 798{
807 struct mmc_host *mmc; 799 struct mmc_host *mmc;
808 struct at91mci_host *host; 800 struct at91mci_host *host;
801 struct resource *res;
809 int ret; 802 int ret;
810 803
811 pr_debug("Probe MCI devices\n"); 804 pr_debug("Probe MCI devices\n");
812 at91_mci_disable(); 805
813 at91_mci_enable(); 806 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
807 if (!res)
808 return -ENXIO;
809
810 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
811 return -EBUSY;
814 812
815 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev); 813 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
816 if (!mmc) { 814 if (!mmc) {
817 pr_debug("Failed to allocate mmc host\n"); 815 pr_debug("Failed to allocate mmc host\n");
816 release_mem_region(res->start, res->end - res->start + 1);
818 return -ENOMEM; 817 return -ENOMEM;
819 } 818 }
820 819
@@ -833,30 +832,51 @@ static int at91_mci_probe(struct platform_device *pdev)
833#ifdef SUPPORT_4WIRE 832#ifdef SUPPORT_4WIRE
834 mmc->caps |= MMC_CAP_4_BIT_DATA; 833 mmc->caps |= MMC_CAP_4_BIT_DATA;
835#else 834#else
836 printk("MMC: 4 wire bus mode not supported by this driver - using 1 wire\n"); 835 printk("AT91 MMC: 4 wire bus mode not supported by this driver - using 1 wire\n");
837#endif 836#endif
838 } 837 }
839 838
840 /* 839 /*
841 * Get Clock 840 * Get Clock
842 */ 841 */
843 mci_clk = clk_get(&pdev->dev, "mci_clk"); 842 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
844 if (IS_ERR(mci_clk)) { 843 if (IS_ERR(host->mci_clk)) {
845 printk(KERN_ERR "AT91 MMC: no clock defined.\n"); 844 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
846 mmc_free_host(mmc); 845 mmc_free_host(mmc);
846 release_mem_region(res->start, res->end - res->start + 1);
847 return -ENODEV; 847 return -ENODEV;
848 } 848 }
849 clk_enable(mci_clk); /* Enable the peripheral clock */ 849
850 /*
851 * Map I/O region
852 */
853 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
854 if (!host->baseaddr) {
855 clk_put(host->mci_clk);
856 mmc_free_host(mmc);
857 release_mem_region(res->start, res->end - res->start + 1);
858 return -ENOMEM;
859 }
860
861 /*
862 * Reset hardware
863 */
864 clk_enable(host->mci_clk); /* Enable the peripheral clock */
865 at91_mci_disable(host);
866 at91_mci_enable(host);
850 867
851 /* 868 /*
852 * Allocate the MCI interrupt 869 * Allocate the MCI interrupt
853 */ 870 */
854 ret = request_irq(AT91RM9200_ID_MCI, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host); 871 host->irq = platform_get_irq(pdev, 0);
872 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
855 if (ret) { 873 if (ret) {
856 printk(KERN_ERR "Failed to request MCI interrupt\n"); 874 printk(KERN_ERR "AT91 MMC: Failed to request MCI interrupt\n");
857 clk_disable(mci_clk); 875 clk_disable(host->mci_clk);
858 clk_put(mci_clk); 876 clk_put(host->mci_clk);
859 mmc_free_host(mmc); 877 mmc_free_host(mmc);
878 iounmap(host->baseaddr);
879 release_mem_region(res->start, res->end - res->start + 1);
860 return ret; 880 return ret;
861 } 881 }
862 882
@@ -879,10 +899,10 @@ static int at91_mci_probe(struct platform_device *pdev)
879 ret = request_irq(host->board->det_pin, at91_mmc_det_irq, 899 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
880 0, DRIVER_NAME, host); 900 0, DRIVER_NAME, host);
881 if (ret) 901 if (ret)
882 printk(KERN_ERR "couldn't allocate MMC detect irq\n"); 902 printk(KERN_ERR "AT91 MMC: Couldn't allocate MMC detect irq\n");
883 } 903 }
884 904
885 pr_debug(KERN_INFO "Added MCI driver\n"); 905 pr_debug("Added MCI driver\n");
886 906
887 return 0; 907 return 0;
888} 908}
@@ -894,6 +914,7 @@ static int at91_mci_remove(struct platform_device *pdev)
894{ 914{
895 struct mmc_host *mmc = platform_get_drvdata(pdev); 915 struct mmc_host *mmc = platform_get_drvdata(pdev);
896 struct at91mci_host *host; 916 struct at91mci_host *host;
917 struct resource *res;
897 918
898 if (!mmc) 919 if (!mmc)
899 return -1; 920 return -1;
@@ -905,16 +926,19 @@ static int at91_mci_remove(struct platform_device *pdev)
905 cancel_delayed_work(&host->mmc->detect); 926 cancel_delayed_work(&host->mmc->detect);
906 } 927 }
907 928
929 at91_mci_disable(host);
908 mmc_remove_host(mmc); 930 mmc_remove_host(mmc);
909 at91_mci_disable(); 931 free_irq(host->irq, host);
910 free_irq(AT91RM9200_ID_MCI, host);
911 mmc_free_host(mmc);
912 932
913 clk_disable(mci_clk); /* Disable the peripheral clock */ 933 clk_disable(host->mci_clk); /* Disable the peripheral clock */
914 clk_put(mci_clk); 934 clk_put(host->mci_clk);
915 935
916 platform_set_drvdata(pdev, NULL); 936 iounmap(host->baseaddr);
937 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
938 release_mem_region(res->start, res->end - res->start + 1);
917 939
940 mmc_free_host(mmc);
941 platform_set_drvdata(pdev, NULL);
918 pr_debug("MCI Removed\n"); 942 pr_debug("MCI Removed\n");
919 943
920 return 0; 944 return 0;
diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c
index a17423a4ed8f..3e35a43819fb 100644
--- a/drivers/mmc/mmc_queue.c
+++ b/drivers/mmc/mmc_queue.c
@@ -78,8 +78,10 @@ static int mmc_queue_thread(void *d)
78 spin_unlock_irq(q->queue_lock); 78 spin_unlock_irq(q->queue_lock);
79 79
80 if (!req) { 80 if (!req) {
81 if (kthread_should_stop()) 81 if (kthread_should_stop()) {
82 set_current_state(TASK_RUNNING);
82 break; 83 break;
84 }
83 up(&mq->thread_sem); 85 up(&mq->thread_sem);
84 schedule(); 86 schedule();
85 down(&mq->thread_sem); 87 down(&mq->thread_sem);
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index cd98117632d3..c2d13d7e9911 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -1170,8 +1170,8 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1170 } 1170 }
1171 1171
1172 if (pci_resource_len(pdev, first_bar + slot) != 0x100) { 1172 if (pci_resource_len(pdev, first_bar + slot) != 0x100) {
1173 printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. Aborting.\n"); 1173 printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. "
1174 return -ENODEV; 1174 "You may experience problems.\n");
1175 } 1175 }
1176 1176
1177 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { 1177 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 5db716045927..0a7e86859bf1 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -459,7 +459,7 @@ add_dataflash(struct spi_device *spi, char *name,
459 struct mtd_info *device; 459 struct mtd_info *device;
460 struct flash_platform_data *pdata = spi->dev.platform_data; 460 struct flash_platform_data *pdata = spi->dev.platform_data;
461 461
462 priv = (struct dataflash *) kzalloc(sizeof *priv, GFP_KERNEL); 462 priv = kzalloc(sizeof *priv, GFP_KERNEL);
463 if (!priv) 463 if (!priv)
464 return -ENOMEM; 464 return -ENOMEM;
465 465
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index fa4362fb4dd8..0f3baa5d9c2a 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -768,7 +768,7 @@ static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
768 if (mtd->type != MTD_NORFLASH) 768 if (mtd->type != MTD_NORFLASH)
769 return; 769 return;
770 770
771 part = kcalloc(1, sizeof(struct partition), GFP_KERNEL); 771 part = kzalloc(sizeof(struct partition), GFP_KERNEL);
772 if (!part) 772 if (!part)
773 return; 773 return;
774 774
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 931028f672de..35ad5cff18e6 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -2131,14 +2131,15 @@ static int rtl8139_poll(struct net_device *dev, int *budget)
2131 } 2131 }
2132 2132
2133 if (done) { 2133 if (done) {
2134 unsigned long flags;
2134 /* 2135 /*
2135 * Order is important since data can get interrupted 2136 * Order is important since data can get interrupted
2136 * again when we think we are done. 2137 * again when we think we are done.
2137 */ 2138 */
2138 local_irq_disable(); 2139 local_irq_save(flags);
2139 RTL_W16_F(IntrMask, rtl8139_intr_mask); 2140 RTL_W16_F(IntrMask, rtl8139_intr_mask);
2140 __netif_rx_complete(dev); 2141 __netif_rx_complete(dev);
2141 local_irq_enable(); 2142 local_irq_restore(flags);
2142 } 2143 }
2143 spin_unlock(&tp->rx_lock); 2144 spin_unlock(&tp->rx_lock);
2144 2145
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 9de0eed6755b..8aa8dd02b910 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2384,6 +2384,14 @@ config CHELSIO_T1_1G
2384 Enables support for Chelsio's gigabit Ethernet PCI cards. If you 2384 Enables support for Chelsio's gigabit Ethernet PCI cards. If you
2385 are using only 10G cards say 'N' here. 2385 are using only 10G cards say 'N' here.
2386 2386
2387config CHELSIO_T1_NAPI
2388 bool "Use Rx Polling (NAPI)"
2389 depends on CHELSIO_T1
2390 default y
2391 help
2392 NAPI is a driver API designed to reduce CPU and interrupt load
2393 when the driver is receiving lots of packets from the card.
2394
2387config EHEA 2395config EHEA
2388 tristate "eHEA Ethernet support" 2396 tristate "eHEA Ethernet support"
2389 depends on IBMEBUS 2397 depends on IBMEBUS
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index b98592a8bac8..f22e46dfd770 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -186,7 +186,7 @@ static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
186 */ 186 */
187static int ipddp_create(struct ipddp_route *new_rt) 187static int ipddp_create(struct ipddp_route *new_rt)
188{ 188{
189 struct ipddp_route *rt =(struct ipddp_route*) kmalloc(sizeof(*rt), GFP_KERNEL); 189 struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
190 190
191 if (rt == NULL) 191 if (rt == NULL)
192 return -ENOMEM; 192 return -ENOMEM;
diff --git a/drivers/net/bsd_comp.c b/drivers/net/bsd_comp.c
index bae1de1e7802..7845eaf6f29f 100644
--- a/drivers/net/bsd_comp.c
+++ b/drivers/net/bsd_comp.c
@@ -395,7 +395,7 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
395 * Allocate the main control structure for this instance. 395 * Allocate the main control structure for this instance.
396 */ 396 */
397 maxmaxcode = MAXCODE(bits); 397 maxmaxcode = MAXCODE(bits);
398 db = (struct bsd_db *) kmalloc (sizeof (struct bsd_db), 398 db = kmalloc(sizeof (struct bsd_db),
399 GFP_KERNEL); 399 GFP_KERNEL);
400 if (!db) 400 if (!db)
401 { 401 {
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index de48eadddbc4..fd5d821f3f2a 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -220,9 +220,8 @@ static int cxgb_up(struct adapter *adapter)
220 220
221 t1_interrupts_clear(adapter); 221 t1_interrupts_clear(adapter);
222 222
223 adapter->params.has_msi = !disable_msi && pci_enable_msi(adapter->pdev) == 0; 223 adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
224 err = request_irq(adapter->pdev->irq, 224 err = request_irq(adapter->pdev->irq, t1_interrupt,
225 t1_select_intr_handler(adapter),
226 adapter->params.has_msi ? 0 : IRQF_SHARED, 225 adapter->params.has_msi ? 0 : IRQF_SHARED,
227 adapter->name, adapter); 226 adapter->name, adapter);
228 if (err) { 227 if (err) {
@@ -764,18 +763,7 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
764{ 763{
765 struct adapter *adapter = dev->priv; 764 struct adapter *adapter = dev->priv;
766 765
767 /* 766 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
768 * If RX coalescing is requested we use NAPI, otherwise interrupts.
769 * This choice can be made only when all ports and the TOE are off.
770 */
771 if (adapter->open_device_map == 0)
772 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
773
774 if (adapter->params.sge.polling) {
775 adapter->params.sge.rx_coalesce_usecs = 0;
776 } else {
777 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
778 }
779 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; 767 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
780 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; 768 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
781 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); 769 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
@@ -944,7 +932,7 @@ static void t1_netpoll(struct net_device *dev)
944 struct adapter *adapter = dev->priv; 932 struct adapter *adapter = dev->priv;
945 933
946 local_irq_save(flags); 934 local_irq_save(flags);
947 t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter); 935 t1_interrupt(adapter->pdev->irq, adapter);
948 local_irq_restore(flags); 936 local_irq_restore(flags);
949} 937}
950#endif 938#endif
@@ -1165,7 +1153,10 @@ static int __devinit init_one(struct pci_dev *pdev,
1165#ifdef CONFIG_NET_POLL_CONTROLLER 1153#ifdef CONFIG_NET_POLL_CONTROLLER
1166 netdev->poll_controller = t1_netpoll; 1154 netdev->poll_controller = t1_netpoll;
1167#endif 1155#endif
1156#ifdef CONFIG_CHELSIO_T1_NAPI
1168 netdev->weight = 64; 1157 netdev->weight = 64;
1158 netdev->poll = t1_poll;
1159#endif
1169 1160
1170 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); 1161 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1171 } 1162 }
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 0ca8d876e16f..659cb2252e44 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1413,16 +1413,20 @@ static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1413 1413
1414 if (unlikely(adapter->vlan_grp && p->vlan_valid)) { 1414 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
1415 st->vlan_xtract++; 1415 st->vlan_xtract++;
1416 if (adapter->params.sge.polling) 1416#ifdef CONFIG_CHELSIO_T1_NAPI
1417 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, 1417 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1418 ntohs(p->vlan)); 1418 ntohs(p->vlan));
1419 else 1419#else
1420 vlan_hwaccel_rx(skb, adapter->vlan_grp, 1420 vlan_hwaccel_rx(skb, adapter->vlan_grp,
1421 ntohs(p->vlan)); 1421 ntohs(p->vlan));
1422 } else if (adapter->params.sge.polling) 1422#endif
1423 } else {
1424#ifdef CONFIG_CHELSIO_T1_NAPI
1423 netif_receive_skb(skb); 1425 netif_receive_skb(skb);
1424 else 1426#else
1425 netif_rx(skb); 1427 netif_rx(skb);
1428#endif
1429 }
1426 return 0; 1430 return 0;
1427} 1431}
1428 1432
@@ -1572,6 +1576,7 @@ static int process_responses(struct adapter *adapter, int budget)
1572 return budget; 1576 return budget;
1573} 1577}
1574 1578
1579#ifdef CONFIG_CHELSIO_T1_NAPI
1575/* 1580/*
1576 * A simpler version of process_responses() that handles only pure (i.e., 1581 * A simpler version of process_responses() that handles only pure (i.e.,
1577 * non data-carrying) responses. Such respones are too light-weight to justify 1582 * non data-carrying) responses. Such respones are too light-weight to justify
@@ -1619,92 +1624,76 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1619 * or protection from interrupts as data interrupts are off at this point and 1624 * or protection from interrupts as data interrupts are off at this point and
1620 * other adapter interrupts do not interfere. 1625 * other adapter interrupts do not interfere.
1621 */ 1626 */
1622static int t1_poll(struct net_device *dev, int *budget) 1627int t1_poll(struct net_device *dev, int *budget)
1623{ 1628{
1624 struct adapter *adapter = dev->priv; 1629 struct adapter *adapter = dev->priv;
1625 int effective_budget = min(*budget, dev->quota); 1630 int effective_budget = min(*budget, dev->quota);
1626
1627 int work_done = process_responses(adapter, effective_budget); 1631 int work_done = process_responses(adapter, effective_budget);
1632
1628 *budget -= work_done; 1633 *budget -= work_done;
1629 dev->quota -= work_done; 1634 dev->quota -= work_done;
1630 1635
1631 if (work_done >= effective_budget) 1636 if (work_done >= effective_budget)
1632 return 1; 1637 return 1;
1633 1638
1639 spin_lock_irq(&adapter->async_lock);
1634 __netif_rx_complete(dev); 1640 __netif_rx_complete(dev);
1635
1636 /*
1637 * Because we don't atomically flush the following write it is
1638 * possible that in very rare cases it can reach the device in a way
1639 * that races with a new response being written plus an error interrupt
1640 * causing the NAPI interrupt handler below to return unhandled status
1641 * to the OS. To protect against this would require flushing the write
1642 * and doing both the write and the flush with interrupts off. Way too
1643 * expensive and unjustifiable given the rarity of the race.
1644 */
1645 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1641 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1646 return 0; 1642 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
1647} 1643 adapter->regs + A_PL_ENABLE);
1644 spin_unlock_irq(&adapter->async_lock);
1648 1645
1649/* 1646 return 0;
1650 * Returns true if the device is already scheduled for polling.
1651 */
1652static inline int napi_is_scheduled(struct net_device *dev)
1653{
1654 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1655} 1647}
1656 1648
1657/* 1649/*
1658 * NAPI version of the main interrupt handler. 1650 * NAPI version of the main interrupt handler.
1659 */ 1651 */
1660static irqreturn_t t1_interrupt_napi(int irq, void *data) 1652irqreturn_t t1_interrupt(int irq, void *data)
1661{ 1653{
1662 int handled;
1663 struct adapter *adapter = data; 1654 struct adapter *adapter = data;
1655 struct net_device *dev = adapter->sge->netdev;
1664 struct sge *sge = adapter->sge; 1656 struct sge *sge = adapter->sge;
1665 struct respQ *q = &adapter->sge->respQ; 1657 u32 cause;
1658 int handled = 0;
1666 1659
1667 /* 1660 cause = readl(adapter->regs + A_PL_CAUSE);
1668 * Clear the SGE_DATA interrupt first thing. Normally the NAPI 1661 if (cause == 0 || cause == ~0)
1669 * handler has control of the response queue and the interrupt handler 1662 return IRQ_NONE;
1670 * can look at the queue reliably only once it knows NAPI is off.
1671 * We can't wait that long to clear the SGE_DATA interrupt because we
1672 * could race with t1_poll rearming the SGE interrupt, so we need to
1673 * clear the interrupt speculatively and really early on.
1674 */
1675 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1676 1663
1677 spin_lock(&adapter->async_lock); 1664 spin_lock(&adapter->async_lock);
1678 if (!napi_is_scheduled(sge->netdev)) { 1665 if (cause & F_PL_INTR_SGE_DATA) {
1666 struct respQ *q = &adapter->sge->respQ;
1679 struct respQ_e *e = &q->entries[q->cidx]; 1667 struct respQ_e *e = &q->entries[q->cidx];
1680 1668
1681 if (e->GenerationBit == q->genbit) { 1669 handled = 1;
1682 if (e->DataValid || 1670 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1683 process_pure_responses(adapter, e)) { 1671
1684 if (likely(__netif_rx_schedule_prep(sge->netdev))) 1672 if (e->GenerationBit == q->genbit &&
1685 __netif_rx_schedule(sge->netdev); 1673 __netif_rx_schedule_prep(dev)) {
1686 else if (net_ratelimit()) 1674 if (e->DataValid || process_pure_responses(adapter, e)) {
1687 printk(KERN_INFO 1675 /* mask off data IRQ */
1688 "NAPI schedule failure!\n"); 1676 writel(adapter->slow_intr_mask,
1689 } else 1677 adapter->regs + A_PL_ENABLE);
1690 writel(q->cidx, adapter->regs + A_SG_SLEEPING); 1678 __netif_rx_schedule(sge->netdev);
1691 1679 goto unlock;
1692 handled = 1; 1680 }
1693 goto unlock; 1681 /* no data, no NAPI needed */
1694 } else 1682 netif_poll_enable(dev);
1695 writel(q->cidx, adapter->regs + A_SG_SLEEPING); 1683
1696 } else if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA) { 1684 }
1697 printk(KERN_ERR "data interrupt while NAPI running\n"); 1685 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1698 } 1686 } else
1699 1687 handled = t1_slow_intr_handler(adapter);
1700 handled = t1_slow_intr_handler(adapter); 1688
1701 if (!handled) 1689 if (!handled)
1702 sge->stats.unhandled_irqs++; 1690 sge->stats.unhandled_irqs++;
1703 unlock: 1691unlock:
1704 spin_unlock(&adapter->async_lock); 1692 spin_unlock(&adapter->async_lock);
1705 return IRQ_RETVAL(handled != 0); 1693 return IRQ_RETVAL(handled != 0);
1706} 1694}
1707 1695
1696#else
1708/* 1697/*
1709 * Main interrupt handler, optimized assuming that we took a 'DATA' 1698 * Main interrupt handler, optimized assuming that we took a 'DATA'
1710 * interrupt. 1699 * interrupt.
@@ -1720,7 +1709,7 @@ static irqreturn_t t1_interrupt_napi(int irq, void *data)
1720 * 5. If we took an interrupt, but no valid respQ descriptors was found we 1709 * 5. If we took an interrupt, but no valid respQ descriptors was found we
1721 * let the slow_intr_handler run and do error handling. 1710 * let the slow_intr_handler run and do error handling.
1722 */ 1711 */
1723static irqreturn_t t1_interrupt(int irq, void *cookie) 1712irqreturn_t t1_interrupt(int irq, void *cookie)
1724{ 1713{
1725 int work_done; 1714 int work_done;
1726 struct respQ_e *e; 1715 struct respQ_e *e;
@@ -1752,11 +1741,7 @@ static irqreturn_t t1_interrupt(int irq, void *cookie)
1752 spin_unlock(&adapter->async_lock); 1741 spin_unlock(&adapter->async_lock);
1753 return IRQ_RETVAL(work_done != 0); 1742 return IRQ_RETVAL(work_done != 0);
1754} 1743}
1755 1744#endif
1756irq_handler_t t1_select_intr_handler(adapter_t *adapter)
1757{
1758 return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;
1759}
1760 1745
1761/* 1746/*
1762 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. 1747 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
@@ -2033,7 +2018,6 @@ static void sge_tx_reclaim_cb(unsigned long data)
2033 */ 2018 */
2034int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) 2019int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
2035{ 2020{
2036 sge->netdev->poll = t1_poll;
2037 sge->fixed_intrtimer = p->rx_coalesce_usecs * 2021 sge->fixed_intrtimer = p->rx_coalesce_usecs *
2038 core_ticks_per_usec(sge->adapter); 2022 core_ticks_per_usec(sge->adapter);
2039 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); 2023 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
@@ -2234,7 +2218,6 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
2234 2218
2235 p->coalesce_enable = 0; 2219 p->coalesce_enable = 0;
2236 p->sample_interval_usecs = 0; 2220 p->sample_interval_usecs = 0;
2237 p->polling = 0;
2238 2221
2239 return sge; 2222 return sge;
2240nomem_port: 2223nomem_port:
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
index 7ceb0117d039..d132a0ef2a22 100644
--- a/drivers/net/chelsio/sge.h
+++ b/drivers/net/chelsio/sge.h
@@ -76,7 +76,9 @@ struct sge *t1_sge_create(struct adapter *, struct sge_params *);
76int t1_sge_configure(struct sge *, struct sge_params *); 76int t1_sge_configure(struct sge *, struct sge_params *);
77int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); 77int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
78void t1_sge_destroy(struct sge *); 78void t1_sge_destroy(struct sge *);
79irq_handler_t t1_select_intr_handler(adapter_t *adapter); 79irqreturn_t t1_interrupt(int irq, void *cookie);
80int t1_poll(struct net_device *, int *);
81
80int t1_start_xmit(struct sk_buff *skb, struct net_device *dev); 82int t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
81void t1_set_vlan_accel(struct adapter *adapter, int on_off); 83void t1_set_vlan_accel(struct adapter *adapter, int on_off);
82void t1_sge_start(struct sge *); 84void t1_sge_start(struct sge *);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 03bf164f9e8d..c2ae2a24629b 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1930,9 +1930,8 @@ static int e100_rx_alloc_list(struct nic *nic)
1930 nic->rx_to_use = nic->rx_to_clean = NULL; 1930 nic->rx_to_use = nic->rx_to_clean = NULL;
1931 nic->ru_running = RU_UNINITIALIZED; 1931 nic->ru_running = RU_UNINITIALIZED;
1932 1932
1933 if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC))) 1933 if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1934 return -ENOMEM; 1934 return -ENOMEM;
1935 memset(nic->rxs, 0, sizeof(struct rx) * count);
1936 1935
1937 for(rx = nic->rxs, i = 0; i < count; rx++, i++) { 1936 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1938 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; 1937 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 8a83db0fb3b7..153b6dc80af4 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -1177,7 +1177,7 @@ static void baycom_probe(struct net_device *dev)
1177 dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */ 1177 dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */
1178 dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */ 1178 dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */
1179 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); 1179 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
1180 memcpy(dev->dev_addr, &ax25_nocall, AX25_ADDR_LEN); 1180 memcpy(dev->dev_addr, &null_ax25_address, AX25_ADDR_LEN);
1181 dev->tx_queue_len = 16; 1181 dev->tx_queue_len = 16;
1182 1182
1183 /* New style flags */ 1183 /* New style flags */
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 16620bd97fbf..11af0ae7510e 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1603,7 +1603,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1603 irda_qos_bits_to_value (&self->qos); 1603 irda_qos_bits_to_value (&self->qos);
1604 1604
1605 /* Allocate twice the size to guarantee alignment */ 1605 /* Allocate twice the size to guarantee alignment */
1606 self->ringbuf = (void *) kmalloc (OBOE_RING_LEN << 1, GFP_KERNEL); 1606 self->ringbuf = kmalloc(OBOE_RING_LEN << 1, GFP_KERNEL);
1607 if (!self->ringbuf) 1607 if (!self->ringbuf)
1608 { 1608 {
1609 printk (KERN_ERR DRIVER_NAME ": can't allocate DMA buffers\n"); 1609 printk (KERN_ERR DRIVER_NAME ": can't allocate DMA buffers\n");
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 6e95645e7245..3ca1082ec776 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1747,7 +1747,7 @@ static int irda_usb_probe(struct usb_interface *intf,
1747 /* Don't change this buffer size and allocation without doing 1747 /* Don't change this buffer size and allocation without doing
1748 * some heavy and complete testing. Don't ask why :-( 1748 * some heavy and complete testing. Don't ask why :-(
1749 * Jean II */ 1749 * Jean II */
1750 self->speed_buff = (char *) kmalloc(IRDA_USB_SPEED_MTU, GFP_KERNEL); 1750 self->speed_buff = kmalloc(IRDA_USB_SPEED_MTU, GFP_KERNEL);
1751 if (self->speed_buff == NULL) 1751 if (self->speed_buff == NULL)
1752 goto err_out_3; 1752 goto err_out_3;
1753 1753
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 654a68b490ae..3098960dc2a1 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -164,7 +164,7 @@ irport_open(int i, unsigned int iobase, unsigned int irq)
164 164
165 /* Allocate memory if needed */ 165 /* Allocate memory if needed */
166 if (self->tx_buff.truesize > 0) { 166 if (self->tx_buff.truesize > 0) {
167 self->tx_buff.head = (__u8 *) kmalloc(self->tx_buff.truesize, 167 self->tx_buff.head = kmalloc(self->tx_buff.truesize,
168 GFP_KERNEL); 168 GFP_KERNEL);
169 if (self->tx_buff.head == NULL) { 169 if (self->tx_buff.head == NULL) {
170 IRDA_ERROR("%s(), can't allocate memory for " 170 IRDA_ERROR("%s(), can't allocate memory for "
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index b833016f1825..177c502f7385 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -884,7 +884,7 @@ static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) {
884 884
885 dev->trans_start = jiffies; 885 dev->trans_start = jiffies;
886 886
887 tx_cmd = (struct tx_cmd *) kmalloc ((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC); 887 tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
888 if (tx_cmd == NULL) { 888 if (tx_cmd == NULL) {
889 printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name); 889 printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
890 lp->stats.tx_dropped++; 890 lp->stats.tx_dropped++;
@@ -1266,7 +1266,7 @@ static void set_multicast_list(struct net_device *dev) {
1266 if (dev->mc_count > 0) { 1266 if (dev->mc_count > 0) {
1267 struct dev_mc_list *dmi; 1267 struct dev_mc_list *dmi;
1268 char *cp; 1268 char *cp;
1269 cmd = (struct i596_cmd *)kmalloc(sizeof(struct i596_cmd)+2+dev->mc_count*6, GFP_ATOMIC); 1269 cmd = kmalloc(sizeof(struct i596_cmd)+2+dev->mc_count*6, GFP_ATOMIC);
1270 if (cmd == NULL) { 1270 if (cmd == NULL) {
1271 printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name); 1271 printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
1272 return; 1272 return;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index bd0ce98c939c..25b559b5d5ed 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -264,12 +264,12 @@ static void macb_update_stats(struct macb *bp)
264 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 264 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
265 265
266 for(; p < end; p++, reg++) 266 for(; p < end; p++, reg++)
267 *p += readl(reg); 267 *p += __raw_readl(reg);
268} 268}
269 269
270static void macb_periodic_task(void *arg) 270static void macb_periodic_task(struct work_struct *work)
271{ 271{
272 struct macb *bp = arg; 272 struct macb *bp = container_of(work, struct macb, periodic_task.work);
273 273
274 macb_update_stats(bp); 274 macb_update_stats(bp);
275 macb_check_media(bp, 1, 0); 275 macb_check_media(bp, 1, 0);
@@ -1088,7 +1088,7 @@ static int __devinit macb_probe(struct platform_device *pdev)
1088 1088
1089 dev->base_addr = regs->start; 1089 dev->base_addr = regs->start;
1090 1090
1091 INIT_WORK(&bp->periodic_task, macb_periodic_task, bp); 1091 INIT_DELAYED_WORK(&bp->periodic_task, macb_periodic_task);
1092 mutex_init(&bp->mdio_mutex); 1092 mutex_init(&bp->mdio_mutex);
1093 init_completion(&bp->mdio_complete); 1093 init_completion(&bp->mdio_complete);
1094 1094
diff --git a/drivers/net/macb.h b/drivers/net/macb.h
index 8c253db69881..27bf0ae0f0bb 100644
--- a/drivers/net/macb.h
+++ b/drivers/net/macb.h
@@ -250,9 +250,9 @@
250 250
251/* Register access macros */ 251/* Register access macros */
252#define macb_readl(port,reg) \ 252#define macb_readl(port,reg) \
253 readl((port)->regs + MACB_##reg) 253 __raw_readl((port)->regs + MACB_##reg)
254#define macb_writel(port,reg,value) \ 254#define macb_writel(port,reg,value) \
255 writel((value), (port)->regs + MACB_##reg) 255 __raw_writel((value), (port)->regs + MACB_##reg)
256 256
257struct dma_desc { 257struct dma_desc {
258 u32 addr; 258 u32 addr;
@@ -377,7 +377,7 @@ struct macb {
377 377
378 unsigned int rx_pending, tx_pending; 378 unsigned int rx_pending, tx_pending;
379 379
380 struct work_struct periodic_task; 380 struct delayed_work periodic_task;
381 381
382 struct mutex mdio_mutex; 382 struct mutex mdio_mutex;
383 struct completion mdio_complete; 383 struct completion mdio_complete;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 81f127a78afa..94ac168be593 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -71,7 +71,7 @@
71#include "myri10ge_mcp.h" 71#include "myri10ge_mcp.h"
72#include "myri10ge_mcp_gen_header.h" 72#include "myri10ge_mcp_gen_header.h"
73 73
74#define MYRI10GE_VERSION_STR "1.0.0" 74#define MYRI10GE_VERSION_STR "1.1.0"
75 75
76MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 76MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
77MODULE_AUTHOR("Maintainer: help@myri.com"); 77MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -92,8 +92,13 @@ MODULE_LICENSE("Dual BSD/GPL");
92#define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff) 92#define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
93#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff 93#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
94 94
95#define MYRI10GE_ALLOC_ORDER 0
96#define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
97#define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1)
98
95struct myri10ge_rx_buffer_state { 99struct myri10ge_rx_buffer_state {
96 struct sk_buff *skb; 100 struct page *page;
101 int page_offset;
97 DECLARE_PCI_UNMAP_ADDR(bus) 102 DECLARE_PCI_UNMAP_ADDR(bus)
98 DECLARE_PCI_UNMAP_LEN(len) 103 DECLARE_PCI_UNMAP_LEN(len)
99}; 104};
@@ -116,9 +121,14 @@ struct myri10ge_rx_buf {
116 u8 __iomem *wc_fifo; /* w/c rx dma addr fifo address */ 121 u8 __iomem *wc_fifo; /* w/c rx dma addr fifo address */
117 struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */ 122 struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
118 struct myri10ge_rx_buffer_state *info; 123 struct myri10ge_rx_buffer_state *info;
124 struct page *page;
125 dma_addr_t bus;
126 int page_offset;
119 int cnt; 127 int cnt;
128 int fill_cnt;
120 int alloc_fail; 129 int alloc_fail;
121 int mask; /* number of rx slots -1 */ 130 int mask; /* number of rx slots -1 */
131 int watchdog_needed;
122}; 132};
123 133
124struct myri10ge_tx_buf { 134struct myri10ge_tx_buf {
@@ -150,6 +160,7 @@ struct myri10ge_priv {
150 struct myri10ge_rx_buf rx_big; 160 struct myri10ge_rx_buf rx_big;
151 struct myri10ge_rx_done rx_done; 161 struct myri10ge_rx_done rx_done;
152 int small_bytes; 162 int small_bytes;
163 int big_bytes;
153 struct net_device *dev; 164 struct net_device *dev;
154 struct net_device_stats stats; 165 struct net_device_stats stats;
155 u8 __iomem *sram; 166 u8 __iomem *sram;
@@ -238,11 +249,6 @@ module_param(myri10ge_force_firmware, int, S_IRUGO);
238MODULE_PARM_DESC(myri10ge_force_firmware, 249MODULE_PARM_DESC(myri10ge_force_firmware,
239 "Force firmware to assume aligned completions\n"); 250 "Force firmware to assume aligned completions\n");
240 251
241static int myri10ge_skb_cross_4k = 0;
242module_param(myri10ge_skb_cross_4k, int, S_IRUGO | S_IWUSR);
243MODULE_PARM_DESC(myri10ge_skb_cross_4k,
244 "Can a small skb cross a 4KB boundary?\n");
245
246static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; 252static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
247module_param(myri10ge_initial_mtu, int, S_IRUGO); 253module_param(myri10ge_initial_mtu, int, S_IRUGO);
248MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n"); 254MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n");
@@ -266,6 +272,10 @@ static int myri10ge_debug = -1; /* defaults above */
266module_param(myri10ge_debug, int, 0); 272module_param(myri10ge_debug, int, 0);
267MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); 273MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
268 274
275static int myri10ge_fill_thresh = 256;
276module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
277MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n");
278
269#define MYRI10GE_FW_OFFSET 1024*1024 279#define MYRI10GE_FW_OFFSET 1024*1024
270#define MYRI10GE_HIGHPART_TO_U32(X) \ 280#define MYRI10GE_HIGHPART_TO_U32(X) \
271(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0) 281(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
@@ -273,9 +283,9 @@ MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
273 283
274#define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8) 284#define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
275 285
276static inline void put_be32(__be32 val, __be32 __iomem *p) 286static inline void put_be32(__be32 val, __be32 __iomem * p)
277{ 287{
278 __raw_writel((__force __u32)val, (__force void __iomem *)p); 288 __raw_writel((__force __u32) val, (__force void __iomem *)p);
279} 289}
280 290
281static int 291static int
@@ -804,194 +814,179 @@ myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
804 mb(); 814 mb();
805} 815}
806 816
807/* 817static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
808 * Set of routines to get a new receive buffer. Any buffer which
809 * crosses a 4KB boundary must start on a 4KB boundary due to PCIe
810 * wdma restrictions. We also try to align any smaller allocation to
811 * at least a 16 byte boundary for efficiency. We assume the linux
812 * memory allocator works by powers of 2, and will not return memory
813 * smaller than 2KB which crosses a 4KB boundary. If it does, we fall
814 * back to allocating 2x as much space as required.
815 *
816 * We intend to replace large (>4KB) skb allocations by using
817 * pages directly and building a fraglist in the near future.
818 */
819
820static inline struct sk_buff *myri10ge_alloc_big(struct net_device *dev,
821 int bytes)
822{
823 struct sk_buff *skb;
824 unsigned long data, roundup;
825
826 skb = netdev_alloc_skb(dev, bytes + 4096 + MXGEFW_PAD);
827 if (skb == NULL)
828 return NULL;
829
830 /* Correct skb->truesize so that socket buffer
831 * accounting is not confused the rounding we must
832 * do to satisfy alignment constraints.
833 */
834 skb->truesize -= 4096;
835
836 data = (unsigned long)(skb->data);
837 roundup = (-data) & (4095);
838 skb_reserve(skb, roundup);
839 return skb;
840}
841
842/* Allocate 2x as much space as required and use whichever portion
843 * does not cross a 4KB boundary */
844static inline struct sk_buff *myri10ge_alloc_small_safe(struct net_device *dev,
845 unsigned int bytes)
846{ 818{
847 struct sk_buff *skb; 819 struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
848 unsigned long data, boundary;
849
850 skb = netdev_alloc_skb(dev, 2 * (bytes + MXGEFW_PAD) - 1);
851 if (unlikely(skb == NULL))
852 return NULL;
853
854 /* Correct skb->truesize so that socket buffer
855 * accounting is not confused the rounding we must
856 * do to satisfy alignment constraints.
857 */
858 skb->truesize -= bytes + MXGEFW_PAD;
859
860 data = (unsigned long)(skb->data);
861 boundary = (data + 4095UL) & ~4095UL;
862 if ((boundary - data) >= (bytes + MXGEFW_PAD))
863 return skb;
864 820
865 skb_reserve(skb, boundary - data); 821 if ((skb->protocol == htons(ETH_P_8021Q)) &&
866 return skb; 822 (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
823 vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
824 skb->csum = hw_csum;
825 skb->ip_summed = CHECKSUM_COMPLETE;
826 }
867} 827}
868 828
869/* Allocate just enough space, and verify that the allocated 829static inline void
870 * space does not cross a 4KB boundary */ 830myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
871static inline struct sk_buff *myri10ge_alloc_small(struct net_device *dev, 831 struct skb_frag_struct *rx_frags, int len, int hlen)
872 int bytes)
873{ 832{
874 struct sk_buff *skb; 833 struct skb_frag_struct *skb_frags;
875 unsigned long roundup, data, end; 834
876 835 skb->len = skb->data_len = len;
877 skb = netdev_alloc_skb(dev, bytes + 16 + MXGEFW_PAD); 836 skb->truesize = len + sizeof(struct sk_buff);
878 if (unlikely(skb == NULL)) 837 /* attach the page(s) */
879 return NULL; 838
880 839 skb_frags = skb_shinfo(skb)->frags;
881 /* Round allocated buffer to 16 byte boundary */ 840 while (len > 0) {
882 data = (unsigned long)(skb->data); 841 memcpy(skb_frags, rx_frags, sizeof(*skb_frags));
883 roundup = (-data) & 15UL; 842 len -= rx_frags->size;
884 skb_reserve(skb, roundup); 843 skb_frags++;
885 /* Verify that the data buffer does not cross a page boundary */ 844 rx_frags++;
886 data = (unsigned long)(skb->data); 845 skb_shinfo(skb)->nr_frags++;
887 end = data + bytes + MXGEFW_PAD - 1; 846 }
888 if (unlikely(((end >> 12) != (data >> 12)) && (data & 4095UL))) { 847
889 printk(KERN_NOTICE 848 /* pskb_may_pull is not available in irq context, but
890 "myri10ge_alloc_small: small skb crossed 4KB boundary\n"); 849 * skb_pull() (for ether_pad and eth_type_trans()) requires
891 myri10ge_skb_cross_4k = 1; 850 * the beginning of the packet in skb_headlen(), move it
892 dev_kfree_skb_any(skb); 851 * manually */
893 skb = myri10ge_alloc_small_safe(dev, bytes); 852 memcpy(skb->data, va, hlen);
894 } 853 skb_shinfo(skb)->frags[0].page_offset += hlen;
895 return skb; 854 skb_shinfo(skb)->frags[0].size -= hlen;
855 skb->data_len -= hlen;
856 skb->tail += hlen;
857 skb_pull(skb, MXGEFW_PAD);
896} 858}
897 859
898static inline int 860static void
899myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct myri10ge_priv *mgp, 861myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
900 int bytes, int idx) 862 int bytes, int watchdog)
901{ 863{
902 struct net_device *dev = mgp->dev; 864 struct page *page;
903 struct pci_dev *pdev = mgp->pdev; 865 int idx;
904 struct sk_buff *skb;
905 dma_addr_t bus;
906 int len, retval = 0;
907 866
908 bytes += VLAN_HLEN; /* account for 802.1q vlan tag */ 867 if (unlikely(rx->watchdog_needed && !watchdog))
868 return;
909 869
910 if ((bytes + MXGEFW_PAD) > (4096 - 16) /* linux overhead */ ) 870 /* try to refill entire ring */
911 skb = myri10ge_alloc_big(dev, bytes); 871 while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) {
912 else if (myri10ge_skb_cross_4k) 872 idx = rx->fill_cnt & rx->mask;
913 skb = myri10ge_alloc_small_safe(dev, bytes);
914 else
915 skb = myri10ge_alloc_small(dev, bytes);
916 873
917 if (unlikely(skb == NULL)) { 874 if ((bytes < MYRI10GE_ALLOC_SIZE / 2) &&
918 rx->alloc_fail++; 875 (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE)) {
919 retval = -ENOBUFS; 876 /* we can use part of previous page */
920 goto done; 877 get_page(rx->page);
921 } 878 } else {
922 879 /* we need a new page */
923 /* set len so that it only covers the area we 880 page =
924 * need mapped for DMA */ 881 alloc_pages(GFP_ATOMIC | __GFP_COMP,
925 len = bytes + MXGEFW_PAD; 882 MYRI10GE_ALLOC_ORDER);
926 883 if (unlikely(page == NULL)) {
927 bus = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE); 884 if (rx->fill_cnt - rx->cnt < 16)
928 rx->info[idx].skb = skb; 885 rx->watchdog_needed = 1;
929 pci_unmap_addr_set(&rx->info[idx], bus, bus); 886 return;
930 pci_unmap_len_set(&rx->info[idx], len, len); 887 }
931 rx->shadow[idx].addr_low = htonl(MYRI10GE_LOWPART_TO_U32(bus)); 888 rx->page = page;
932 rx->shadow[idx].addr_high = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); 889 rx->page_offset = 0;
933 890 rx->bus = pci_map_page(mgp->pdev, page, 0,
934done: 891 MYRI10GE_ALLOC_SIZE,
935 /* copy 8 descriptors (64-bytes) to the mcp at a time */ 892 PCI_DMA_FROMDEVICE);
936 if ((idx & 7) == 7) { 893 }
937 if (rx->wc_fifo == NULL) 894 rx->info[idx].page = rx->page;
938 myri10ge_submit_8rx(&rx->lanai[idx - 7], 895 rx->info[idx].page_offset = rx->page_offset;
939 &rx->shadow[idx - 7]); 896 /* note that this is the address of the start of the
940 else { 897 * page */
941 mb(); 898 pci_unmap_addr_set(&rx->info[idx], bus, rx->bus);
942 myri10ge_pio_copy(rx->wc_fifo, 899 rx->shadow[idx].addr_low =
943 &rx->shadow[idx - 7], 64); 900 htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
901 rx->shadow[idx].addr_high =
902 htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus));
903
904 /* start next packet on a cacheline boundary */
905 rx->page_offset += SKB_DATA_ALIGN(bytes);
906 rx->fill_cnt++;
907
908 /* copy 8 descriptors to the firmware at a time */
909 if ((idx & 7) == 7) {
910 if (rx->wc_fifo == NULL)
911 myri10ge_submit_8rx(&rx->lanai[idx - 7],
912 &rx->shadow[idx - 7]);
913 else {
914 mb();
915 myri10ge_pio_copy(rx->wc_fifo,
916 &rx->shadow[idx - 7], 64);
917 }
944 } 918 }
945 } 919 }
946 return retval;
947} 920}
948 921
949static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum) 922static inline void
923myri10ge_unmap_rx_page(struct pci_dev *pdev,
924 struct myri10ge_rx_buffer_state *info, int bytes)
950{ 925{
951 struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data); 926 /* unmap the recvd page if we're the only or last user of it */
952 927 if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
953 if ((skb->protocol == htons(ETH_P_8021Q)) && 928 (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
954 (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) || 929 pci_unmap_page(pdev, (pci_unmap_addr(info, bus)
955 vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) { 930 & ~(MYRI10GE_ALLOC_SIZE - 1)),
956 skb->csum = hw_csum; 931 MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
957 skb->ip_summed = CHECKSUM_COMPLETE;
958 } 932 }
959} 933}
960 934
961static inline unsigned long 935#define MYRI10GE_HLEN 64 /* The number of bytes to copy from a
936 * page into an skb */
937
938static inline int
962myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, 939myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
963 int bytes, int len, __wsum csum) 940 int bytes, int len, __wsum csum)
964{ 941{
965 dma_addr_t bus;
966 struct sk_buff *skb; 942 struct sk_buff *skb;
967 int idx, unmap_len; 943 struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
944 int i, idx, hlen, remainder;
945 struct pci_dev *pdev = mgp->pdev;
946 struct net_device *dev = mgp->dev;
947 u8 *va;
968 948
949 len += MXGEFW_PAD;
969 idx = rx->cnt & rx->mask; 950 idx = rx->cnt & rx->mask;
970 rx->cnt++; 951 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
952 prefetch(va);
953 /* Fill skb_frag_struct(s) with data from our receive */
954 for (i = 0, remainder = len; remainder > 0; i++) {
955 myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
956 rx_frags[i].page = rx->info[idx].page;
957 rx_frags[i].page_offset = rx->info[idx].page_offset;
958 if (remainder < MYRI10GE_ALLOC_SIZE)
959 rx_frags[i].size = remainder;
960 else
961 rx_frags[i].size = MYRI10GE_ALLOC_SIZE;
962 rx->cnt++;
963 idx = rx->cnt & rx->mask;
964 remainder -= MYRI10GE_ALLOC_SIZE;
965 }
966
967 hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN;
971 968
972 /* save a pointer to the received skb */ 969 /* allocate an skb to attach the page(s) to. */
973 skb = rx->info[idx].skb;
974 bus = pci_unmap_addr(&rx->info[idx], bus);
975 unmap_len = pci_unmap_len(&rx->info[idx], len);
976 970
977 /* try to replace the received skb */ 971 skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
978 if (myri10ge_getbuf(rx, mgp, bytes, idx)) { 972 if (unlikely(skb == NULL)) {
979 /* drop the frame -- the old skbuf is re-cycled */ 973 mgp->stats.rx_dropped++;
980 mgp->stats.rx_dropped += 1; 974 do {
975 i--;
976 put_page(rx_frags[i].page);
977 } while (i != 0);
981 return 0; 978 return 0;
982 } 979 }
983 980
984 /* unmap the recvd skb */ 981 /* Attach the pages to the skb, and trim off any padding */
985 pci_unmap_single(mgp->pdev, bus, unmap_len, PCI_DMA_FROMDEVICE); 982 myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen);
986 983 if (skb_shinfo(skb)->frags[0].size <= 0) {
987 /* mcp implicitly skips 1st bytes so that packet is properly 984 put_page(skb_shinfo(skb)->frags[0].page);
988 * aligned */ 985 skb_shinfo(skb)->nr_frags = 0;
989 skb_reserve(skb, MXGEFW_PAD); 986 }
990 987 skb->protocol = eth_type_trans(skb, dev);
991 /* set the length of the frame */ 988 skb->dev = dev;
992 skb_put(skb, len);
993 989
994 skb->protocol = eth_type_trans(skb, mgp->dev);
995 if (mgp->csum_flag) { 990 if (mgp->csum_flag) {
996 if ((skb->protocol == htons(ETH_P_IP)) || 991 if ((skb->protocol == htons(ETH_P_IP)) ||
997 (skb->protocol == htons(ETH_P_IPV6))) { 992 (skb->protocol == htons(ETH_P_IPV6))) {
@@ -1000,9 +995,8 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1000 } else 995 } else
1001 myri10ge_vlan_ip_csum(skb, csum); 996 myri10ge_vlan_ip_csum(skb, csum);
1002 } 997 }
1003
1004 netif_receive_skb(skb); 998 netif_receive_skb(skb);
1005 mgp->dev->last_rx = jiffies; 999 dev->last_rx = jiffies;
1006 return 1; 1000 return 1;
1007} 1001}
1008 1002
@@ -1079,7 +1073,7 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit)
1079 length, checksum); 1073 length, checksum);
1080 else 1074 else
1081 rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big, 1075 rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big,
1082 mgp->dev->mtu + ETH_HLEN, 1076 mgp->big_bytes,
1083 length, checksum); 1077 length, checksum);
1084 rx_packets += rx_ok; 1078 rx_packets += rx_ok;
1085 rx_bytes += rx_ok * (unsigned long)length; 1079 rx_bytes += rx_ok * (unsigned long)length;
@@ -1094,6 +1088,14 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit)
1094 rx_done->cnt = cnt; 1088 rx_done->cnt = cnt;
1095 mgp->stats.rx_packets += rx_packets; 1089 mgp->stats.rx_packets += rx_packets;
1096 mgp->stats.rx_bytes += rx_bytes; 1090 mgp->stats.rx_bytes += rx_bytes;
1091
1092 /* restock receive rings if needed */
1093 if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh)
1094 myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,
1095 mgp->small_bytes + MXGEFW_PAD, 0);
1096 if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh)
1097 myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0);
1098
1097} 1099}
1098 1100
1099static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) 1101static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
@@ -1484,56 +1486,48 @@ static int myri10ge_allocate_rings(struct net_device *dev)
1484 goto abort_with_rx_small_info; 1486 goto abort_with_rx_small_info;
1485 1487
1486 /* Fill the receive rings */ 1488 /* Fill the receive rings */
1489 mgp->rx_big.cnt = 0;
1490 mgp->rx_small.cnt = 0;
1491 mgp->rx_big.fill_cnt = 0;
1492 mgp->rx_small.fill_cnt = 0;
1493 mgp->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
1494 mgp->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
1495 mgp->rx_small.watchdog_needed = 0;
1496 mgp->rx_big.watchdog_needed = 0;
1497 myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,
1498 mgp->small_bytes + MXGEFW_PAD, 0);
1487 1499
1488 for (i = 0; i <= mgp->rx_small.mask; i++) { 1500 if (mgp->rx_small.fill_cnt < mgp->rx_small.mask + 1) {
1489 status = myri10ge_getbuf(&mgp->rx_small, mgp, 1501 printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n",
1490 mgp->small_bytes, i); 1502 dev->name, mgp->rx_small.fill_cnt);
1491 if (status) { 1503 goto abort_with_rx_small_ring;
1492 printk(KERN_ERR
1493 "myri10ge: %s: alloced only %d small bufs\n",
1494 dev->name, i);
1495 goto abort_with_rx_small_ring;
1496 }
1497 } 1504 }
1498 1505
1499 for (i = 0; i <= mgp->rx_big.mask; i++) { 1506 myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0);
1500 status = 1507 if (mgp->rx_big.fill_cnt < mgp->rx_big.mask + 1) {
1501 myri10ge_getbuf(&mgp->rx_big, mgp, dev->mtu + ETH_HLEN, i); 1508 printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n",
1502 if (status) { 1509 dev->name, mgp->rx_big.fill_cnt);
1503 printk(KERN_ERR 1510 goto abort_with_rx_big_ring;
1504 "myri10ge: %s: alloced only %d big bufs\n",
1505 dev->name, i);
1506 goto abort_with_rx_big_ring;
1507 }
1508 } 1511 }
1509 1512
1510 return 0; 1513 return 0;
1511 1514
1512abort_with_rx_big_ring: 1515abort_with_rx_big_ring:
1513 for (i = 0; i <= mgp->rx_big.mask; i++) { 1516 for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) {
1514 if (mgp->rx_big.info[i].skb != NULL) 1517 int idx = i & mgp->rx_big.mask;
1515 dev_kfree_skb_any(mgp->rx_big.info[i].skb); 1518 myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx],
1516 if (pci_unmap_len(&mgp->rx_big.info[i], len)) 1519 mgp->big_bytes);
1517 pci_unmap_single(mgp->pdev, 1520 put_page(mgp->rx_big.info[idx].page);
1518 pci_unmap_addr(&mgp->rx_big.info[i],
1519 bus),
1520 pci_unmap_len(&mgp->rx_big.info[i],
1521 len),
1522 PCI_DMA_FROMDEVICE);
1523 } 1521 }
1524 1522
1525abort_with_rx_small_ring: 1523abort_with_rx_small_ring:
1526 for (i = 0; i <= mgp->rx_small.mask; i++) { 1524 for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) {
1527 if (mgp->rx_small.info[i].skb != NULL) 1525 int idx = i & mgp->rx_small.mask;
1528 dev_kfree_skb_any(mgp->rx_small.info[i].skb); 1526 myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx],
1529 if (pci_unmap_len(&mgp->rx_small.info[i], len)) 1527 mgp->small_bytes + MXGEFW_PAD);
1530 pci_unmap_single(mgp->pdev, 1528 put_page(mgp->rx_small.info[idx].page);
1531 pci_unmap_addr(&mgp->rx_small.info[i],
1532 bus),
1533 pci_unmap_len(&mgp->rx_small.info[i],
1534 len),
1535 PCI_DMA_FROMDEVICE);
1536 } 1529 }
1530
1537 kfree(mgp->rx_big.info); 1531 kfree(mgp->rx_big.info);
1538 1532
1539abort_with_rx_small_info: 1533abort_with_rx_small_info:
@@ -1566,30 +1560,24 @@ static void myri10ge_free_rings(struct net_device *dev)
1566 1560
1567 mgp = netdev_priv(dev); 1561 mgp = netdev_priv(dev);
1568 1562
1569 for (i = 0; i <= mgp->rx_big.mask; i++) { 1563 for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) {
1570 if (mgp->rx_big.info[i].skb != NULL) 1564 idx = i & mgp->rx_big.mask;
1571 dev_kfree_skb_any(mgp->rx_big.info[i].skb); 1565 if (i == mgp->rx_big.fill_cnt - 1)
1572 if (pci_unmap_len(&mgp->rx_big.info[i], len)) 1566 mgp->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
1573 pci_unmap_single(mgp->pdev, 1567 myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx],
1574 pci_unmap_addr(&mgp->rx_big.info[i], 1568 mgp->big_bytes);
1575 bus), 1569 put_page(mgp->rx_big.info[idx].page);
1576 pci_unmap_len(&mgp->rx_big.info[i],
1577 len),
1578 PCI_DMA_FROMDEVICE);
1579 }
1580
1581 for (i = 0; i <= mgp->rx_small.mask; i++) {
1582 if (mgp->rx_small.info[i].skb != NULL)
1583 dev_kfree_skb_any(mgp->rx_small.info[i].skb);
1584 if (pci_unmap_len(&mgp->rx_small.info[i], len))
1585 pci_unmap_single(mgp->pdev,
1586 pci_unmap_addr(&mgp->rx_small.info[i],
1587 bus),
1588 pci_unmap_len(&mgp->rx_small.info[i],
1589 len),
1590 PCI_DMA_FROMDEVICE);
1591 } 1570 }
1592 1571
1572 for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) {
1573 idx = i & mgp->rx_small.mask;
1574 if (i == mgp->rx_small.fill_cnt - 1)
1575 mgp->rx_small.info[idx].page_offset =
1576 MYRI10GE_ALLOC_SIZE;
1577 myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx],
1578 mgp->small_bytes + MXGEFW_PAD);
1579 put_page(mgp->rx_small.info[idx].page);
1580 }
1593 tx = &mgp->tx; 1581 tx = &mgp->tx;
1594 while (tx->done != tx->req) { 1582 while (tx->done != tx->req) {
1595 idx = tx->done & tx->mask; 1583 idx = tx->done & tx->mask;
@@ -1657,19 +1645,18 @@ static int myri10ge_open(struct net_device *dev)
1657 */ 1645 */
1658 1646
1659 if (dev->mtu <= ETH_DATA_LEN) 1647 if (dev->mtu <= ETH_DATA_LEN)
1660 mgp->small_bytes = 128; /* enough for a TCP header */ 1648 /* enough for a TCP header */
1649 mgp->small_bytes = (128 > SMP_CACHE_BYTES)
1650 ? (128 - MXGEFW_PAD)
1651 : (SMP_CACHE_BYTES - MXGEFW_PAD);
1661 else 1652 else
1662 mgp->small_bytes = ETH_FRAME_LEN; /* enough for an ETH_DATA_LEN frame */ 1653 /* enough for a vlan encapsulated ETH_DATA_LEN frame */
1654 mgp->small_bytes = VLAN_ETH_FRAME_LEN;
1663 1655
1664 /* Override the small buffer size? */ 1656 /* Override the small buffer size? */
1665 if (myri10ge_small_bytes > 0) 1657 if (myri10ge_small_bytes > 0)
1666 mgp->small_bytes = myri10ge_small_bytes; 1658 mgp->small_bytes = myri10ge_small_bytes;
1667 1659
1668 /* If the user sets an obscenely small MTU, adjust the small
1669 * bytes down to nearly nothing */
1670 if (mgp->small_bytes >= (dev->mtu + ETH_HLEN))
1671 mgp->small_bytes = 64;
1672
1673 /* get the lanai pointers to the send and receive rings */ 1660 /* get the lanai pointers to the send and receive rings */
1674 1661
1675 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); 1662 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0);
@@ -1705,17 +1692,23 @@ static int myri10ge_open(struct net_device *dev)
1705 mgp->rx_big.wc_fifo = NULL; 1692 mgp->rx_big.wc_fifo = NULL;
1706 } 1693 }
1707 1694
1708 status = myri10ge_allocate_rings(dev);
1709 if (status != 0)
1710 goto abort_with_nothing;
1711
1712 /* Firmware needs the big buff size as a power of 2. Lie and 1695 /* Firmware needs the big buff size as a power of 2. Lie and
1713 * tell him the buffer is larger, because we only use 1 1696 * tell him the buffer is larger, because we only use 1
1714 * buffer/pkt, and the mtu will prevent overruns. 1697 * buffer/pkt, and the mtu will prevent overruns.
1715 */ 1698 */
1716 big_pow2 = dev->mtu + ETH_HLEN + MXGEFW_PAD; 1699 big_pow2 = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
1717 while ((big_pow2 & (big_pow2 - 1)) != 0) 1700 if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) {
1718 big_pow2++; 1701 while ((big_pow2 & (big_pow2 - 1)) != 0)
1702 big_pow2++;
1703 mgp->big_bytes = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
1704 } else {
1705 big_pow2 = MYRI10GE_ALLOC_SIZE;
1706 mgp->big_bytes = big_pow2;
1707 }
1708
1709 status = myri10ge_allocate_rings(dev);
1710 if (status != 0)
1711 goto abort_with_nothing;
1719 1712
1720 /* now give firmware buffers sizes, and MTU */ 1713 /* now give firmware buffers sizes, and MTU */
1721 cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN; 1714 cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
@@ -2206,7 +2199,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
2206 struct myri10ge_cmd cmd; 2199 struct myri10ge_cmd cmd;
2207 struct myri10ge_priv *mgp; 2200 struct myri10ge_priv *mgp;
2208 struct dev_mc_list *mc_list; 2201 struct dev_mc_list *mc_list;
2209 __be32 data[2] = {0, 0}; 2202 __be32 data[2] = { 0, 0 };
2210 int err; 2203 int err;
2211 2204
2212 mgp = netdev_priv(dev); 2205 mgp = netdev_priv(dev);
@@ -2625,7 +2618,7 @@ static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
2625static void myri10ge_watchdog(struct work_struct *work) 2618static void myri10ge_watchdog(struct work_struct *work)
2626{ 2619{
2627 struct myri10ge_priv *mgp = 2620 struct myri10ge_priv *mgp =
2628 container_of(work, struct myri10ge_priv, watchdog_work); 2621 container_of(work, struct myri10ge_priv, watchdog_work);
2629 u32 reboot; 2622 u32 reboot;
2630 int status; 2623 int status;
2631 u16 cmd, vendor; 2624 u16 cmd, vendor;
@@ -2698,6 +2691,21 @@ static void myri10ge_watchdog_timer(unsigned long arg)
2698 struct myri10ge_priv *mgp; 2691 struct myri10ge_priv *mgp;
2699 2692
2700 mgp = (struct myri10ge_priv *)arg; 2693 mgp = (struct myri10ge_priv *)arg;
2694
2695 if (mgp->rx_small.watchdog_needed) {
2696 myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,
2697 mgp->small_bytes + MXGEFW_PAD, 1);
2698 if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt >=
2699 myri10ge_fill_thresh)
2700 mgp->rx_small.watchdog_needed = 0;
2701 }
2702 if (mgp->rx_big.watchdog_needed) {
2703 myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 1);
2704 if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt >=
2705 myri10ge_fill_thresh)
2706 mgp->rx_big.watchdog_needed = 0;
2707 }
2708
2701 if (mgp->tx.req != mgp->tx.done && 2709 if (mgp->tx.req != mgp->tx.done &&
2702 mgp->tx.done == mgp->watchdog_tx_done && 2710 mgp->tx.done == mgp->watchdog_tx_done &&
2703 mgp->watchdog_tx_req != mgp->watchdog_tx_done) 2711 mgp->watchdog_tx_req != mgp->watchdog_tx_done)
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b01fc70a57db..a4d7529ef415 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -50,7 +50,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
50 struct phy_device *dev; 50 struct phy_device *dev;
51 /* We allocate the device, and initialize the 51 /* We allocate the device, and initialize the
52 * default values */ 52 * default values */
53 dev = kcalloc(1, sizeof(*dev), GFP_KERNEL); 53 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
54 54
55 if (NULL == dev) 55 if (NULL == dev)
56 return (struct phy_device*) PTR_ERR((void*)-ENOMEM); 56 return (struct phy_device*) PTR_ERR((void*)-ENOMEM);
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index f54c55242f4a..72c8d6628f58 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -121,7 +121,7 @@ static void *z_comp_alloc(unsigned char *options, int opt_len)
121 if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE) 121 if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
122 return NULL; 122 return NULL;
123 123
124 state = (struct ppp_deflate_state *) kmalloc(sizeof(*state), 124 state = kmalloc(sizeof(*state),
125 GFP_KERNEL); 125 GFP_KERNEL);
126 if (state == NULL) 126 if (state == NULL)
127 return NULL; 127 return NULL;
@@ -341,7 +341,7 @@ static void *z_decomp_alloc(unsigned char *options, int opt_len)
341 if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE) 341 if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
342 return NULL; 342 return NULL;
343 343
344 state = (struct ppp_deflate_state *) kmalloc(sizeof(*state), GFP_KERNEL); 344 state = kmalloc(sizeof(*state), GFP_KERNEL);
345 if (state == NULL) 345 if (state == NULL)
346 return NULL; 346 return NULL;
347 347
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index f3655fd772f5..d5bdd2574659 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -200,7 +200,7 @@ static void *mppe_alloc(unsigned char *options, int optlen)
200 || options[0] != CI_MPPE || options[1] != CILEN_MPPE) 200 || options[0] != CI_MPPE || options[1] != CILEN_MPPE)
201 goto out; 201 goto out;
202 202
203 state = (struct ppp_mppe_state *) kmalloc(sizeof(*state), GFP_KERNEL); 203 state = kmalloc(sizeof(*state), GFP_KERNEL);
204 if (state == NULL) 204 if (state == NULL)
205 goto out; 205 goto out;
206 206
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index b60f0451f6cd..8a39376f87dc 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -749,7 +749,7 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
749 struct skge_element *e; 749 struct skge_element *e;
750 int i; 750 int i;
751 751
752 ring->start = kcalloc(sizeof(*e), ring->count, GFP_KERNEL); 752 ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL);
753 if (!ring->start) 753 if (!ring->start)
754 return -ENOMEM; 754 return -ENOMEM;
755 755
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 39c2152a07f4..a0806d262fc6 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -229,10 +229,10 @@ static int sl_realloc_bufs(struct slip *sl, int mtu)
229 if (len < 576 * 2) 229 if (len < 576 * 2)
230 len = 576 * 2; 230 len = 576 * 2;
231 231
232 xbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC); 232 xbuff = kmalloc(len + 4, GFP_ATOMIC);
233 rbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC); 233 rbuff = kmalloc(len + 4, GFP_ATOMIC);
234#ifdef SL_INCLUDE_CSLIP 234#ifdef SL_INCLUDE_CSLIP
235 cbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC); 235 cbuff = kmalloc(len + 4, GFP_ATOMIC);
236#endif 236#endif
237 237
238 238
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 9367c574477a..d2767e6584a9 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -362,96 +362,6 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
362 362
363#define SMC_IRQ_FLAGS (0) 363#define SMC_IRQ_FLAGS (0)
364 364
365#elif defined(CONFIG_ARCH_VERSATILE)
366
367#define SMC_CAN_USE_8BIT 1
368#define SMC_CAN_USE_16BIT 1
369#define SMC_CAN_USE_32BIT 1
370#define SMC_NOWAIT 1
371
372#define SMC_inb(a, r) readb((a) + (r))
373#define SMC_inw(a, r) readw((a) + (r))
374#define SMC_inl(a, r) readl((a) + (r))
375#define SMC_outb(v, a, r) writeb(v, (a) + (r))
376#define SMC_outw(v, a, r) writew(v, (a) + (r))
377#define SMC_outl(v, a, r) writel(v, (a) + (r))
378#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
379#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
380
381#define SMC_IRQ_FLAGS (0)
382
383#elif defined(CONFIG_ARCH_VERSATILE)
384
385#define SMC_CAN_USE_8BIT 1
386#define SMC_CAN_USE_16BIT 1
387#define SMC_CAN_USE_32BIT 1
388#define SMC_NOWAIT 1
389
390#define SMC_inb(a, r) readb((a) + (r))
391#define SMC_inw(a, r) readw((a) + (r))
392#define SMC_inl(a, r) readl((a) + (r))
393#define SMC_outb(v, a, r) writeb(v, (a) + (r))
394#define SMC_outw(v, a, r) writew(v, (a) + (r))
395#define SMC_outl(v, a, r) writel(v, (a) + (r))
396#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
397#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
398
399#define SMC_IRQ_FLAGS (0)
400
401#elif defined(CONFIG_ARCH_VERSATILE)
402
403#define SMC_CAN_USE_8BIT 1
404#define SMC_CAN_USE_16BIT 1
405#define SMC_CAN_USE_32BIT 1
406#define SMC_NOWAIT 1
407
408#define SMC_inb(a, r) readb((a) + (r))
409#define SMC_inw(a, r) readw((a) + (r))
410#define SMC_inl(a, r) readl((a) + (r))
411#define SMC_outb(v, a, r) writeb(v, (a) + (r))
412#define SMC_outw(v, a, r) writew(v, (a) + (r))
413#define SMC_outl(v, a, r) writel(v, (a) + (r))
414#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
415#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
416
417#define SMC_IRQ_FLAGS (0)
418
419#elif defined(CONFIG_ARCH_VERSATILE)
420
421#define SMC_CAN_USE_8BIT 1
422#define SMC_CAN_USE_16BIT 1
423#define SMC_CAN_USE_32BIT 1
424#define SMC_NOWAIT 1
425
426#define SMC_inb(a, r) readb((a) + (r))
427#define SMC_inw(a, r) readw((a) + (r))
428#define SMC_inl(a, r) readl((a) + (r))
429#define SMC_outb(v, a, r) writeb(v, (a) + (r))
430#define SMC_outw(v, a, r) writew(v, (a) + (r))
431#define SMC_outl(v, a, r) writel(v, (a) + (r))
432#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
433#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
434
435#define SMC_IRQ_FLAGS (0)
436
437#elif defined(CONFIG_ARCH_VERSATILE)
438
439#define SMC_CAN_USE_8BIT 1
440#define SMC_CAN_USE_16BIT 1
441#define SMC_CAN_USE_32BIT 1
442#define SMC_NOWAIT 1
443
444#define SMC_inb(a, r) readb((a) + (r))
445#define SMC_inw(a, r) readw((a) + (r))
446#define SMC_inl(a, r) readl((a) + (r))
447#define SMC_outb(v, a, r) writeb(v, (a) + (r))
448#define SMC_outw(v, a, r) writew(v, (a) + (r))
449#define SMC_outl(v, a, r) writel(v, (a) + (r))
450#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
451#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
452
453#define SMC_IRQ_FLAGS (0)
454
455#else 365#else
456 366
457#define SMC_CAN_USE_8BIT 1 367#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 1f05511fa390..8243150f5b05 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -194,9 +194,9 @@ static void enqueue(struct list_head *node, struct list_head *lh)
194{ 194{
195 unsigned long flags; 195 unsigned long flags;
196 196
197 spin_lock_irqsave(ugeth_lock, flags); 197 spin_lock_irqsave(&ugeth_lock, flags);
198 list_add_tail(node, lh); 198 list_add_tail(node, lh);
199 spin_unlock_irqrestore(ugeth_lock, flags); 199 spin_unlock_irqrestore(&ugeth_lock, flags);
200} 200}
201#endif /* CONFIG_UGETH_FILTERING */ 201#endif /* CONFIG_UGETH_FILTERING */
202 202
@@ -204,14 +204,14 @@ static struct list_head *dequeue(struct list_head *lh)
204{ 204{
205 unsigned long flags; 205 unsigned long flags;
206 206
207 spin_lock_irqsave(ugeth_lock, flags); 207 spin_lock_irqsave(&ugeth_lock, flags);
208 if (!list_empty(lh)) { 208 if (!list_empty(lh)) {
209 struct list_head *node = lh->next; 209 struct list_head *node = lh->next;
210 list_del(node); 210 list_del(node);
211 spin_unlock_irqrestore(ugeth_lock, flags); 211 spin_unlock_irqrestore(&ugeth_lock, flags);
212 return node; 212 return node;
213 } else { 213 } else {
214 spin_unlock_irqrestore(ugeth_lock, flags); 214 spin_unlock_irqrestore(&ugeth_lock, flags);
215 return NULL; 215 return NULL;
216 } 216 }
217} 217}
@@ -1852,6 +1852,8 @@ static int init_phy(struct net_device *dev)
1852 mii_info->mdio_read = &read_phy_reg; 1852 mii_info->mdio_read = &read_phy_reg;
1853 mii_info->mdio_write = &write_phy_reg; 1853 mii_info->mdio_write = &write_phy_reg;
1854 1854
1855 spin_lock_init(&mii_info->mdio_lock);
1856
1855 ugeth->mii_info = mii_info; 1857 ugeth->mii_info = mii_info;
1856 1858
1857 spin_lock_irq(&ugeth->lock); 1859 spin_lock_irq(&ugeth->lock);
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index d5ab9cf13257..21f76f51c95e 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -382,7 +382,7 @@ config SDLA
382 382
383# Wan router core. 383# Wan router core.
384config WAN_ROUTER_DRIVERS 384config WAN_ROUTER_DRIVERS
385 bool "WAN router drivers" 385 tristate "WAN router drivers"
386 depends on WAN && WAN_ROUTER 386 depends on WAN && WAN_ROUTER
387 ---help--- 387 ---help---
388 Connect LAN to WAN via Linux box. 388 Connect LAN to WAN via Linux box.
@@ -393,7 +393,8 @@ config WAN_ROUTER_DRIVERS
393 <file:Documentation/networking/wan-router.txt>. 393 <file:Documentation/networking/wan-router.txt>.
394 394
395 Note that the answer to this question won't directly affect the 395 Note that the answer to this question won't directly affect the
396 kernel: saying N will just cause the configurator to skip all 396 kernel except for how subordinate drivers may be built:
397 saying N will just cause the configurator to skip all
397 the questions about WAN router drivers. 398 the questions about WAN router drivers.
398 399
399 If unsure, say N. 400 If unsure, say N.
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index a4f735723c41..a02c5fb40567 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -231,7 +231,7 @@ static struct sv11_device *sv11_init(int iobase, int irq)
231 return NULL; 231 return NULL;
232 } 232 }
233 233
234 sv=(struct sv11_device *)kmalloc(sizeof(struct sv11_device), GFP_KERNEL); 234 sv = kmalloc(sizeof(struct sv11_device), GFP_KERNEL);
235 if(!sv) 235 if(!sv)
236 goto fail3; 236 goto fail3;
237 237
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 36d1c3ff7078..62184dee377c 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -3455,7 +3455,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3455 if ((err = pci_enable_device(pdev)) < 0) 3455 if ((err = pci_enable_device(pdev)) < 0)
3456 return err; 3456 return err;
3457 3457
3458 card = (pc300_t *) kmalloc(sizeof(pc300_t), GFP_KERNEL); 3458 card = kmalloc(sizeof(pc300_t), GFP_KERNEL);
3459 if (card == NULL) { 3459 if (card == NULL) {
3460 printk("PC300 found at RAM 0x%016llx, " 3460 printk("PC300 found at RAM 0x%016llx, "
3461 "but could not allocate card structure.\n", 3461 "but could not allocate card structure.\n",
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index b2a23aed4428..5873c346e7e9 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -784,7 +784,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
784 continue; 784 continue;
785 } 785 }
786 786
787 new = (st_cpc_rx_buf *)kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC); 787 new = kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC);
788 if (new == 0) { 788 if (new == 0) {
789 cpc_tty_rx_disc_frame(pc300chan); 789 cpc_tty_rx_disc_frame(pc300chan);
790 continue; 790 continue;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 9c3ccc669143..1c9edd97accd 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -123,8 +123,8 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
123 unsigned char *xbuff, *rbuff; 123 unsigned char *xbuff, *rbuff;
124 int len = 2* newmtu; 124 int len = 2* newmtu;
125 125
126 xbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC); 126 xbuff = kmalloc(len + 4, GFP_ATOMIC);
127 rbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC); 127 rbuff = kmalloc(len + 4, GFP_ATOMIC);
128 128
129 if (xbuff == NULL || rbuff == NULL) 129 if (xbuff == NULL || rbuff == NULL)
130 { 130 {
@@ -465,11 +465,11 @@ static int x25_asy_open(struct net_device *dev)
465 465
466 len = dev->mtu * 2; 466 len = dev->mtu * 2;
467 467
468 sl->rbuff = (unsigned char *) kmalloc(len + 4, GFP_KERNEL); 468 sl->rbuff = kmalloc(len + 4, GFP_KERNEL);
469 if (sl->rbuff == NULL) { 469 if (sl->rbuff == NULL) {
470 goto norbuff; 470 goto norbuff;
471 } 471 }
472 sl->xbuff = (unsigned char *) kmalloc(len + 4, GFP_KERNEL); 472 sl->xbuff = kmalloc(len + 4, GFP_KERNEL);
473 if (sl->xbuff == NULL) { 473 if (sl->xbuff == NULL) {
474 goto noxbuff; 474 goto noxbuff;
475 } 475 }
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 974a8e5bec8b..efb8cf3bd8ad 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -1253,7 +1253,7 @@ static char * ap_auth_make_challenge(struct ap_data *ap)
1253 return NULL; 1253 return NULL;
1254 } 1254 }
1255 1255
1256 tmpbuf = (char *) kmalloc(WLAN_AUTH_CHALLENGE_LEN, GFP_ATOMIC); 1256 tmpbuf = kmalloc(WLAN_AUTH_CHALLENGE_LEN, GFP_ATOMIC);
1257 if (tmpbuf == NULL) { 1257 if (tmpbuf == NULL) {
1258 PDEBUG(DEBUG_AP, "AP: kmalloc failed for challenge\n"); 1258 PDEBUG(DEBUG_AP, "AP: kmalloc failed for challenge\n");
1259 return NULL; 1259 return NULL;
diff --git a/drivers/net/wireless/hostap/hostap_download.c b/drivers/net/wireless/hostap/hostap_download.c
index 24fc387bba67..c7678e67697d 100644
--- a/drivers/net/wireless/hostap/hostap_download.c
+++ b/drivers/net/wireless/hostap/hostap_download.c
@@ -201,7 +201,7 @@ static u8 * prism2_read_pda(struct net_device *dev)
201 0x7f0002 /* Intel PRO/Wireless 2011B (PCI) */, 201 0x7f0002 /* Intel PRO/Wireless 2011B (PCI) */,
202 }; 202 };
203 203
204 buf = (u8 *) kmalloc(PRISM2_PDA_SIZE, GFP_KERNEL); 204 buf = kmalloc(PRISM2_PDA_SIZE, GFP_KERNEL);
205 if (buf == NULL) 205 if (buf == NULL)
206 return NULL; 206 return NULL;
207 207
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index a394a23b9a20..3079378fb8cd 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2252,7 +2252,7 @@ static int hostap_tx_compl_read(local_info_t *local, int error,
2252 if (txdesc->sw_support) { 2252 if (txdesc->sw_support) {
2253 len = le16_to_cpu(txdesc->data_len); 2253 len = le16_to_cpu(txdesc->data_len);
2254 if (len < PRISM2_DATA_MAXLEN) { 2254 if (len < PRISM2_DATA_MAXLEN) {
2255 *payload = (char *) kmalloc(len, GFP_ATOMIC); 2255 *payload = kmalloc(len, GFP_ATOMIC);
2256 if (*payload == NULL || 2256 if (*payload == NULL ||
2257 hfa384x_from_bap(dev, BAP0, *payload, len)) { 2257 hfa384x_from_bap(dev, BAP0, *payload, len)) {
2258 PDEBUG(DEBUG_EXTRA, "%s: could not read TX " 2258 PDEBUG(DEBUG_EXTRA, "%s: could not read TX "
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 3b7b8063ff1c..cb08bc5db2bd 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -3829,7 +3829,7 @@ static int prism2_ioctl_priv_hostapd(local_info_t *local, struct iw_point *p)
3829 p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer) 3829 p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
3830 return -EINVAL; 3830 return -EINVAL;
3831 3831
3832 param = (struct prism2_hostapd_param *) kmalloc(p->length, GFP_KERNEL); 3832 param = kmalloc(p->length, GFP_KERNEL);
3833 if (param == NULL) 3833 if (param == NULL)
3834 return -ENOMEM; 3834 return -ENOMEM;
3835 3835
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 0796be9d9e77..04c19cefa1da 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -250,7 +250,7 @@ u16 hostap_tx_callback_register(local_info_t *local,
250 unsigned long flags; 250 unsigned long flags;
251 struct hostap_tx_callback_info *entry; 251 struct hostap_tx_callback_info *entry;
252 252
253 entry = (struct hostap_tx_callback_info *) kmalloc(sizeof(*entry), 253 entry = kmalloc(sizeof(*entry),
254 GFP_ATOMIC); 254 GFP_ATOMIC);
255 if (entry == NULL) 255 if (entry == NULL)
256 return 0; 256 return 0;
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index dd9ba4aad7bb..0e94fbbf7a94 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -2246,7 +2246,7 @@ static int ipw2100_snapshot_alloc(struct ipw2100_priv *priv)
2246 if (priv->snapshot[0]) 2246 if (priv->snapshot[0])
2247 return 1; 2247 return 1;
2248 for (i = 0; i < 0x30; i++) { 2248 for (i = 0; i < 0x30; i++) {
2249 priv->snapshot[i] = (u8 *) kmalloc(0x1000, GFP_ATOMIC); 2249 priv->snapshot[i] = kmalloc(0x1000, GFP_ATOMIC);
2250 if (!priv->snapshot[i]) { 2250 if (!priv->snapshot[i]) {
2251 IPW_DEBUG_INFO("%s: Error allocating snapshot " 2251 IPW_DEBUG_INFO("%s: Error allocating snapshot "
2252 "buffer %d\n", priv->net_dev->name, i); 2252 "buffer %d\n", priv->net_dev->name, i);
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 96606ed10076..838d510213c6 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -2775,7 +2775,7 @@ prism54_hostapd(struct net_device *ndev, struct iw_point *p)
2775 p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer) 2775 p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
2776 return -EINVAL; 2776 return -EINVAL;
2777 2777
2778 param = (struct prism2_hostapd_param *) kmalloc(p->length, GFP_KERNEL); 2778 param = kmalloc(p->length, GFP_KERNEL);
2779 if (param == NULL) 2779 if (param == NULL)
2780 return -ENOMEM; 2780 return -ENOMEM;
2781 2781
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 233d906c08f0..5eb81638e846 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -603,7 +603,7 @@ static wavepoint_history *wl_new_wavepoint(unsigned short nwid, unsigned char se
603 if(lp->wavepoint_table.num_wavepoints==MAX_WAVEPOINTS) 603 if(lp->wavepoint_table.num_wavepoints==MAX_WAVEPOINTS)
604 return NULL; 604 return NULL;
605 605
606 new_wavepoint=(wavepoint_history *) kmalloc(sizeof(wavepoint_history),GFP_ATOMIC); 606 new_wavepoint = kmalloc(sizeof(wavepoint_history),GFP_ATOMIC);
607 if(new_wavepoint==NULL) 607 if(new_wavepoint==NULL)
608 return NULL; 608 return NULL;
609 609
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 77e11ddad836..78ea72fb8f0c 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -101,7 +101,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
101 101
102 /* Allocate a single memory block for values and addresses. */ 102 /* Allocate a single memory block for values and addresses. */
103 count16 = 2*count; 103 count16 = 2*count;
104 a16 = (zd_addr_t *)kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)), 104 a16 = kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
105 GFP_NOFS); 105 GFP_NOFS);
106 if (!a16) { 106 if (!a16) {
107 dev_dbg_f(zd_chip_dev(chip), 107 dev_dbg_f(zd_chip_dev(chip),
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 12bab64a62a1..6fb3f7979f21 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -874,7 +874,7 @@ void *iosapic_register(unsigned long hpa)
874 return NULL; 874 return NULL;
875 } 875 }
876 876
877 isi = (struct iosapic_info *)kzalloc(sizeof(struct iosapic_info), GFP_KERNEL); 877 isi = kzalloc(sizeof(struct iosapic_info), GFP_KERNEL);
878 if (!isi) { 878 if (!isi) {
879 BUG(); 879 BUG();
880 return NULL; 880 return NULL;
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index c7fa28a28b9f..36c6a1bfe558 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -82,9 +82,6 @@ config PARPORT_PC_PCMCIA
82 Say Y here if you need PCMCIA support for your PC-style parallel 82 Say Y here if you need PCMCIA support for your PC-style parallel
83 ports. If unsure, say N. 83 ports. If unsure, say N.
84 84
85config PARPORT_NOT_PC
86 bool
87
88config PARPORT_IP32 85config PARPORT_IP32
89 tristate "SGI IP32 builtin port (EXPERIMENTAL)" 86 tristate "SGI IP32 builtin port (EXPERIMENTAL)"
90 depends on SGI_IP32 && PARPORT && EXPERIMENTAL 87 depends on SGI_IP32 && PARPORT && EXPERIMENTAL
@@ -158,5 +155,8 @@ config PARPORT_1284
158 transfer modes. Also say Y if you want device ID information to 155 transfer modes. Also say Y if you want device ID information to
159 appear in /proc/sys/dev/parport/*/autoprobe*. It is safe to say N. 156 appear in /proc/sys/dev/parport/*/autoprobe*. It is safe to say N.
160 157
158config PARPORT_NOT_PC
159 bool
160
161endmenu 161endmenu
162 162
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 6e780db9454d..adce4204d87d 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -76,7 +76,8 @@ config HOTPLUG_PCI_IBM
76 76
77config HOTPLUG_PCI_ACPI 77config HOTPLUG_PCI_ACPI
78 tristate "ACPI PCI Hotplug driver" 78 tristate "ACPI PCI Hotplug driver"
79 depends on (!ACPI_DOCK && ACPI && HOTPLUG_PCI) || (ACPI_DOCK && HOTPLUG_PCI) 79 depends on HOTPLUG_PCI
80 depends on (!ACPI_DOCK && ACPI) || (ACPI_DOCK)
80 help 81 help
81 Say Y here if you have a system that supports PCI Hotplug using 82 Say Y here if you have a system that supports PCI Hotplug using
82 ACPI. 83 ACPI.
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index 298a6cfd8406..ae5e974c45a7 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -520,7 +520,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
520 return 2; 520 return 2;
521 521
522 while (nummem--) { 522 while (nummem--) {
523 mem_node = (struct pci_resource*) kmalloc(sizeof(struct pci_resource), GFP_KERNEL); 523 mem_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
524 524
525 if (!mem_node) 525 if (!mem_node)
526 break; 526 break;
@@ -548,7 +548,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
548 } 548 }
549 549
550 while (numpmem--) { 550 while (numpmem--) {
551 p_mem_node = (struct pci_resource*) kmalloc(sizeof(struct pci_resource), GFP_KERNEL); 551 p_mem_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
552 552
553 if (!p_mem_node) 553 if (!p_mem_node)
554 break; 554 break;
@@ -576,7 +576,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
576 } 576 }
577 577
578 while (numio--) { 578 while (numio--) {
579 io_node = (struct pci_resource*) kmalloc(sizeof(struct pci_resource), GFP_KERNEL); 579 io_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
580 580
581 if (!io_node) 581 if (!io_node)
582 break; 582 break;
@@ -604,7 +604,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
604 } 604 }
605 605
606 while (numbus--) { 606 while (numbus--) {
607 bus_node = (struct pci_resource*) kmalloc(sizeof(struct pci_resource), GFP_KERNEL); 607 bus_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
608 608
609 if (!bus_node) 609 if (!bus_node)
610 break; 610 break;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 6d3f580f2666..25d3aadfddbf 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -1320,7 +1320,7 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1320 DBG_ENTER_ROUTINE 1320 DBG_ENTER_ROUTINE
1321 1321
1322 spin_lock_init(&list_lock); 1322 spin_lock_init(&list_lock);
1323 php_ctlr = (struct php_ctlr_state_s *) kmalloc(sizeof(struct php_ctlr_state_s), GFP_KERNEL); 1323 php_ctlr = kmalloc(sizeof(struct php_ctlr_state_s), GFP_KERNEL);
1324 1324
1325 if (!php_ctlr) { /* allocate controller state data */ 1325 if (!php_ctlr) { /* allocate controller state data */
1326 err("%s: HPC controller memory allocation error!\n", __FUNCTION__); 1326 err("%s: HPC controller memory allocation error!\n", __FUNCTION__);
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 55866b6b26fa..6f5fabbd14e5 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -148,7 +148,7 @@ static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev)
148{ 148{
149 struct aer_rpc *rpc; 149 struct aer_rpc *rpc;
150 150
151 if (!(rpc = (struct aer_rpc *)kmalloc(sizeof(struct aer_rpc), 151 if (!(rpc = kmalloc(sizeof(struct aer_rpc),
152 GFP_KERNEL))) 152 GFP_KERNEL)))
153 return NULL; 153 return NULL;
154 154
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 52d4a38b3667..3334f22a86c0 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -230,7 +230,7 @@ static int __init at91_cf_probe(struct platform_device *pdev)
230 if (!io) 230 if (!io)
231 return -ENODEV; 231 return -ENODEV;
232 232
233 cf = kcalloc(1, sizeof *cf, GFP_KERNEL); 233 cf = kzalloc(sizeof *cf, GFP_KERNEL);
234 if (!cf) 234 if (!cf)
235 return -ENOMEM; 235 return -ENOMEM;
236 236
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 06bf7f48836e..e65a6b8188f6 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -220,7 +220,7 @@ static int __devinit omap_cf_probe(struct device *dev)
220 if (irq < 0) 220 if (irq < 0)
221 return -EINVAL; 221 return -EINVAL;
222 222
223 cf = kcalloc(1, sizeof *cf, GFP_KERNEL); 223 cf = kzalloc(sizeof *cf, GFP_KERNEL);
224 if (!cf) 224 if (!cf)
225 return -ENOMEM; 225 return -ENOMEM;
226 init_timer(&cf->timer); 226 init_timer(&cf->timer);
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index 3ac5b123215a..a0b158704ca1 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -395,7 +395,7 @@ static void isapnp_parse_id(struct pnp_dev * dev, unsigned short vendor, unsigne
395 struct pnp_id * id; 395 struct pnp_id * id;
396 if (!dev) 396 if (!dev)
397 return; 397 return;
398 id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL); 398 id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
399 if (!id) 399 if (!id)
400 return; 400 return;
401 sprintf(id->id, "%c%c%c%x%x%x%x", 401 sprintf(id->id, "%c%c%c%x%x%x%x",
@@ -419,7 +419,7 @@ static struct pnp_dev * __init isapnp_parse_device(struct pnp_card *card, int si
419 struct pnp_dev *dev; 419 struct pnp_dev *dev;
420 420
421 isapnp_peek(tmp, size); 421 isapnp_peek(tmp, size);
422 dev = kcalloc(1, sizeof(struct pnp_dev), GFP_KERNEL); 422 dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL);
423 if (!dev) 423 if (!dev)
424 return NULL; 424 return NULL;
425 dev->number = number; 425 dev->number = number;
@@ -450,7 +450,7 @@ static void __init isapnp_parse_irq_resource(struct pnp_option *option,
450 unsigned long bits; 450 unsigned long bits;
451 451
452 isapnp_peek(tmp, size); 452 isapnp_peek(tmp, size);
453 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL); 453 irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL);
454 if (!irq) 454 if (!irq)
455 return; 455 return;
456 bits = (tmp[1] << 8) | tmp[0]; 456 bits = (tmp[1] << 8) | tmp[0];
@@ -474,7 +474,7 @@ static void __init isapnp_parse_dma_resource(struct pnp_option *option,
474 struct pnp_dma *dma; 474 struct pnp_dma *dma;
475 475
476 isapnp_peek(tmp, size); 476 isapnp_peek(tmp, size);
477 dma = kcalloc(1, sizeof(struct pnp_dma), GFP_KERNEL); 477 dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL);
478 if (!dma) 478 if (!dma)
479 return; 479 return;
480 dma->map = tmp[0]; 480 dma->map = tmp[0];
@@ -494,7 +494,7 @@ static void __init isapnp_parse_port_resource(struct pnp_option *option,
494 struct pnp_port *port; 494 struct pnp_port *port;
495 495
496 isapnp_peek(tmp, size); 496 isapnp_peek(tmp, size);
497 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL); 497 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
498 if (!port) 498 if (!port)
499 return; 499 return;
500 port->min = (tmp[2] << 8) | tmp[1]; 500 port->min = (tmp[2] << 8) | tmp[1];
@@ -517,7 +517,7 @@ static void __init isapnp_parse_fixed_port_resource(struct pnp_option *option,
517 struct pnp_port *port; 517 struct pnp_port *port;
518 518
519 isapnp_peek(tmp, size); 519 isapnp_peek(tmp, size);
520 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL); 520 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
521 if (!port) 521 if (!port)
522 return; 522 return;
523 port->min = port->max = (tmp[1] << 8) | tmp[0]; 523 port->min = port->max = (tmp[1] << 8) | tmp[0];
@@ -539,7 +539,7 @@ static void __init isapnp_parse_mem_resource(struct pnp_option *option,
539 struct pnp_mem *mem; 539 struct pnp_mem *mem;
540 540
541 isapnp_peek(tmp, size); 541 isapnp_peek(tmp, size);
542 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 542 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
543 if (!mem) 543 if (!mem)
544 return; 544 return;
545 mem->min = ((tmp[2] << 8) | tmp[1]) << 8; 545 mem->min = ((tmp[2] << 8) | tmp[1]) << 8;
@@ -562,7 +562,7 @@ static void __init isapnp_parse_mem32_resource(struct pnp_option *option,
562 struct pnp_mem *mem; 562 struct pnp_mem *mem;
563 563
564 isapnp_peek(tmp, size); 564 isapnp_peek(tmp, size);
565 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 565 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
566 if (!mem) 566 if (!mem)
567 return; 567 return;
568 mem->min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; 568 mem->min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
@@ -584,7 +584,7 @@ static void __init isapnp_parse_fixed_mem32_resource(struct pnp_option *option,
584 struct pnp_mem *mem; 584 struct pnp_mem *mem;
585 585
586 isapnp_peek(tmp, size); 586 isapnp_peek(tmp, size);
587 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 587 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
588 if (!mem) 588 if (!mem)
589 return; 589 return;
590 mem->min = mem->max = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; 590 mem->min = mem->max = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
@@ -829,7 +829,7 @@ static unsigned char __init isapnp_checksum(unsigned char *data)
829 829
830static void isapnp_parse_card_id(struct pnp_card * card, unsigned short vendor, unsigned short device) 830static void isapnp_parse_card_id(struct pnp_card * card, unsigned short vendor, unsigned short device)
831{ 831{
832 struct pnp_id * id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL); 832 struct pnp_id * id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
833 if (!id) 833 if (!id)
834 return; 834 return;
835 sprintf(id->id, "%c%c%c%x%x%x%x", 835 sprintf(id->id, "%c%c%c%x%x%x%x",
@@ -865,7 +865,7 @@ static int __init isapnp_build_device_list(void)
865 header[4], header[5], header[6], header[7], header[8]); 865 header[4], header[5], header[6], header[7], header[8]);
866 printk(KERN_DEBUG "checksum = 0x%x\n", checksum); 866 printk(KERN_DEBUG "checksum = 0x%x\n", checksum);
867#endif 867#endif
868 if ((card = kcalloc(1, sizeof(struct pnp_card), GFP_KERNEL)) == NULL) 868 if ((card = kzalloc(sizeof(struct pnp_card), GFP_KERNEL)) == NULL)
869 continue; 869 continue;
870 870
871 card->number = csn; 871 card->number = csn;
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 6cf34a63c790..62eda5d59024 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -139,7 +139,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
139 return 0; 139 return 0;
140 140
141 pnp_dbg("ACPI device : hid %s", acpi_device_hid(device)); 141 pnp_dbg("ACPI device : hid %s", acpi_device_hid(device));
142 dev = kcalloc(1, sizeof(struct pnp_dev), GFP_KERNEL); 142 dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL);
143 if (!dev) { 143 if (!dev) {
144 pnp_err("Out of memory"); 144 pnp_err("Out of memory");
145 return -ENOMEM; 145 return -ENOMEM;
@@ -169,7 +169,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
169 dev->number = num; 169 dev->number = num;
170 170
171 /* set the initial values for the PnP device */ 171 /* set the initial values for the PnP device */
172 dev_id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL); 172 dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
173 if (!dev_id) 173 if (!dev_id)
174 goto err; 174 goto err;
175 pnpidacpi_to_pnpid(acpi_device_hid(device), dev_id->id); 175 pnpidacpi_to_pnpid(acpi_device_hid(device), dev_id->id);
@@ -201,7 +201,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
201 for (i = 0; i < cid_list->count; i++) { 201 for (i = 0; i < cid_list->count; i++) {
202 if (!ispnpidacpi(cid_list->id[i].value)) 202 if (!ispnpidacpi(cid_list->id[i].value))
203 continue; 203 continue;
204 dev_id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL); 204 dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
205 if (!dev_id) 205 if (!dev_id)
206 continue; 206 continue;
207 207
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 379048fdf05d..7a535542fe92 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -298,7 +298,7 @@ static void pnpacpi_parse_dma_option(struct pnp_option *option, struct acpi_reso
298 298
299 if (p->channel_count == 0) 299 if (p->channel_count == 0)
300 return; 300 return;
301 dma = kcalloc(1, sizeof(struct pnp_dma), GFP_KERNEL); 301 dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL);
302 if (!dma) 302 if (!dma)
303 return; 303 return;
304 304
@@ -354,7 +354,7 @@ static void pnpacpi_parse_irq_option(struct pnp_option *option,
354 354
355 if (p->interrupt_count == 0) 355 if (p->interrupt_count == 0)
356 return; 356 return;
357 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL); 357 irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL);
358 if (!irq) 358 if (!irq)
359 return; 359 return;
360 360
@@ -375,7 +375,7 @@ static void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
375 375
376 if (p->interrupt_count == 0) 376 if (p->interrupt_count == 0)
377 return; 377 return;
378 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL); 378 irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL);
379 if (!irq) 379 if (!irq)
380 return; 380 return;
381 381
@@ -396,7 +396,7 @@ pnpacpi_parse_port_option(struct pnp_option *option,
396 396
397 if (io->address_length == 0) 397 if (io->address_length == 0)
398 return; 398 return;
399 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL); 399 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
400 if (!port) 400 if (!port)
401 return; 401 return;
402 port->min = io->minimum; 402 port->min = io->minimum;
@@ -417,7 +417,7 @@ pnpacpi_parse_fixed_port_option(struct pnp_option *option,
417 417
418 if (io->address_length == 0) 418 if (io->address_length == 0)
419 return; 419 return;
420 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL); 420 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
421 if (!port) 421 if (!port)
422 return; 422 return;
423 port->min = port->max = io->address; 423 port->min = port->max = io->address;
@@ -436,7 +436,7 @@ pnpacpi_parse_mem24_option(struct pnp_option *option,
436 436
437 if (p->address_length == 0) 437 if (p->address_length == 0)
438 return; 438 return;
439 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 439 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
440 if (!mem) 440 if (!mem)
441 return; 441 return;
442 mem->min = p->minimum; 442 mem->min = p->minimum;
@@ -459,7 +459,7 @@ pnpacpi_parse_mem32_option(struct pnp_option *option,
459 459
460 if (p->address_length == 0) 460 if (p->address_length == 0)
461 return; 461 return;
462 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 462 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
463 if (!mem) 463 if (!mem)
464 return; 464 return;
465 mem->min = p->minimum; 465 mem->min = p->minimum;
@@ -482,7 +482,7 @@ pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
482 482
483 if (p->address_length == 0) 483 if (p->address_length == 0)
484 return; 484 return;
485 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 485 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
486 if (!mem) 486 if (!mem)
487 return; 487 return;
488 mem->min = mem->max = p->address; 488 mem->min = mem->max = p->address;
@@ -514,7 +514,7 @@ pnpacpi_parse_address_option(struct pnp_option *option, struct acpi_resource *r)
514 return; 514 return;
515 515
516 if (p->resource_type == ACPI_MEMORY_RANGE) { 516 if (p->resource_type == ACPI_MEMORY_RANGE) {
517 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 517 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
518 if (!mem) 518 if (!mem)
519 return; 519 return;
520 mem->min = mem->max = p->minimum; 520 mem->min = mem->max = p->minimum;
@@ -524,7 +524,7 @@ pnpacpi_parse_address_option(struct pnp_option *option, struct acpi_resource *r)
524 ACPI_READ_WRITE_MEMORY) ? IORESOURCE_MEM_WRITEABLE : 0; 524 ACPI_READ_WRITE_MEMORY) ? IORESOURCE_MEM_WRITEABLE : 0;
525 pnp_register_mem_resource(option, mem); 525 pnp_register_mem_resource(option, mem);
526 } else if (p->resource_type == ACPI_IO_RANGE) { 526 } else if (p->resource_type == ACPI_IO_RANGE) {
527 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL); 527 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
528 if (!port) 528 if (!port)
529 return; 529 return;
530 port->min = port->max = p->minimum; 530 port->min = port->max = p->minimum;
@@ -721,7 +721,7 @@ int pnpacpi_build_resource_template(acpi_handle handle,
721 if (!res_cnt) 721 if (!res_cnt)
722 return -EINVAL; 722 return -EINVAL;
723 buffer->length = sizeof(struct acpi_resource) * (res_cnt + 1) + 1; 723 buffer->length = sizeof(struct acpi_resource) * (res_cnt + 1) + 1;
724 buffer->pointer = kcalloc(1, buffer->length - 1, GFP_KERNEL); 724 buffer->pointer = kzalloc(buffer->length - 1, GFP_KERNEL);
725 if (!buffer->pointer) 725 if (!buffer->pointer)
726 return -ENOMEM; 726 return -ENOMEM;
727 pnp_dbg("Res cnt %d", res_cnt); 727 pnp_dbg("Res cnt %d", res_cnt);
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 33adeba1a31f..95738dbd5d45 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -109,10 +109,10 @@ static int pnp_dock_event(int dock, struct pnp_docking_station_info *info)
109 if (!current->fs->root) { 109 if (!current->fs->root) {
110 return -EAGAIN; 110 return -EAGAIN;
111 } 111 }
112 if (!(envp = (char **) kcalloc (20, sizeof (char *), GFP_KERNEL))) { 112 if (!(envp = kcalloc(20, sizeof (char *), GFP_KERNEL))) {
113 return -ENOMEM; 113 return -ENOMEM;
114 } 114 }
115 if (!(buf = kcalloc (1, 256, GFP_KERNEL))) { 115 if (!(buf = kzalloc(256, GFP_KERNEL))) {
116 kfree (envp); 116 kfree (envp);
117 return -ENOMEM; 117 return -ENOMEM;
118 } 118 }
@@ -220,7 +220,7 @@ static int pnpbios_get_resources(struct pnp_dev * dev, struct pnp_resource_table
220 if(!pnpbios_is_dynamic(dev)) 220 if(!pnpbios_is_dynamic(dev))
221 return -EPERM; 221 return -EPERM;
222 222
223 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL); 223 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
224 if (!node) 224 if (!node)
225 return -1; 225 return -1;
226 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) { 226 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) {
@@ -243,7 +243,7 @@ static int pnpbios_set_resources(struct pnp_dev * dev, struct pnp_resource_table
243 if (!pnpbios_is_dynamic(dev)) 243 if (!pnpbios_is_dynamic(dev))
244 return -EPERM; 244 return -EPERM;
245 245
246 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL); 246 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
247 if (!node) 247 if (!node)
248 return -1; 248 return -1;
249 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) { 249 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) {
@@ -294,7 +294,7 @@ static int pnpbios_disable_resources(struct pnp_dev *dev)
294 if(dev->flags & PNPBIOS_NO_DISABLE || !pnpbios_is_dynamic(dev)) 294 if(dev->flags & PNPBIOS_NO_DISABLE || !pnpbios_is_dynamic(dev))
295 return -EPERM; 295 return -EPERM;
296 296
297 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL); 297 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
298 if (!node) 298 if (!node)
299 return -ENOMEM; 299 return -ENOMEM;
300 300
@@ -336,7 +336,7 @@ static int insert_device(struct pnp_dev *dev, struct pnp_bios_node * node)
336 } 336 }
337 337
338 /* set the initial values for the PnP device */ 338 /* set the initial values for the PnP device */
339 dev_id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL); 339 dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
340 if (!dev_id) 340 if (!dev_id)
341 return -1; 341 return -1;
342 pnpid32_to_pnpid(node->eisa_id,id); 342 pnpid32_to_pnpid(node->eisa_id,id);
@@ -374,7 +374,7 @@ static void __init build_devlist(void)
374 struct pnp_bios_node *node; 374 struct pnp_bios_node *node;
375 struct pnp_dev *dev; 375 struct pnp_dev *dev;
376 376
377 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL); 377 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
378 if (!node) 378 if (!node)
379 return; 379 return;
380 380
@@ -391,7 +391,7 @@ static void __init build_devlist(void)
391 break; 391 break;
392 } 392 }
393 nodes_got++; 393 nodes_got++;
394 dev = kcalloc(1, sizeof (struct pnp_dev), GFP_KERNEL); 394 dev = kzalloc(sizeof (struct pnp_dev), GFP_KERNEL);
395 if (!dev) 395 if (!dev)
396 break; 396 break;
397 if(insert_device(dev,node)<0) 397 if(insert_device(dev,node)<0)
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index 5a3dfc97f5e9..8027073f7919 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -87,7 +87,7 @@ static int proc_read_escd(char *buf, char **start, off_t pos,
87 return -EFBIG; 87 return -EFBIG;
88 } 88 }
89 89
90 tmpbuf = kcalloc(1, escd.escd_size, GFP_KERNEL); 90 tmpbuf = kzalloc(escd.escd_size, GFP_KERNEL);
91 if (!tmpbuf) return -ENOMEM; 91 if (!tmpbuf) return -ENOMEM;
92 92
93 if (pnp_bios_read_escd(tmpbuf, escd.nv_storage_base)) { 93 if (pnp_bios_read_escd(tmpbuf, escd.nv_storage_base)) {
@@ -133,7 +133,7 @@ static int proc_read_devices(char *buf, char **start, off_t pos,
133 if (pos >= 0xff) 133 if (pos >= 0xff)
134 return 0; 134 return 0;
135 135
136 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL); 136 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
137 if (!node) return -ENOMEM; 137 if (!node) return -ENOMEM;
138 138
139 for (nodenum=pos; nodenum<0xff; ) { 139 for (nodenum=pos; nodenum<0xff; ) {
@@ -168,7 +168,7 @@ static int proc_read_node(char *buf, char **start, off_t pos,
168 u8 nodenum = (long)data; 168 u8 nodenum = (long)data;
169 int len; 169 int len;
170 170
171 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL); 171 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
172 if (!node) return -ENOMEM; 172 if (!node) return -ENOMEM;
173 if (pnp_bios_get_dev_node(&nodenum, boot, node)) { 173 if (pnp_bios_get_dev_node(&nodenum, boot, node)) {
174 kfree(node); 174 kfree(node);
@@ -188,7 +188,7 @@ static int proc_write_node(struct file *file, const char __user *buf,
188 u8 nodenum = (long)data; 188 u8 nodenum = (long)data;
189 int ret = count; 189 int ret = count;
190 190
191 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL); 191 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
192 if (!node) 192 if (!node)
193 return -ENOMEM; 193 return -ENOMEM;
194 if (pnp_bios_get_dev_node(&nodenum, boot, node)) { 194 if (pnp_bios_get_dev_node(&nodenum, boot, node)) {
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index ef508a4de557..95b79685a9d1 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -248,7 +248,7 @@ static void
248pnpbios_parse_mem_option(unsigned char *p, int size, struct pnp_option *option) 248pnpbios_parse_mem_option(unsigned char *p, int size, struct pnp_option *option)
249{ 249{
250 struct pnp_mem * mem; 250 struct pnp_mem * mem;
251 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 251 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
252 if (!mem) 252 if (!mem)
253 return; 253 return;
254 mem->min = ((p[5] << 8) | p[4]) << 8; 254 mem->min = ((p[5] << 8) | p[4]) << 8;
@@ -264,7 +264,7 @@ static void
264pnpbios_parse_mem32_option(unsigned char *p, int size, struct pnp_option *option) 264pnpbios_parse_mem32_option(unsigned char *p, int size, struct pnp_option *option)
265{ 265{
266 struct pnp_mem * mem; 266 struct pnp_mem * mem;
267 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 267 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
268 if (!mem) 268 if (!mem)
269 return; 269 return;
270 mem->min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; 270 mem->min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4];
@@ -280,7 +280,7 @@ static void
280pnpbios_parse_fixed_mem32_option(unsigned char *p, int size, struct pnp_option *option) 280pnpbios_parse_fixed_mem32_option(unsigned char *p, int size, struct pnp_option *option)
281{ 281{
282 struct pnp_mem * mem; 282 struct pnp_mem * mem;
283 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL); 283 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
284 if (!mem) 284 if (!mem)
285 return; 285 return;
286 mem->min = mem->max = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; 286 mem->min = mem->max = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4];
@@ -297,7 +297,7 @@ pnpbios_parse_irq_option(unsigned char *p, int size, struct pnp_option *option)
297 struct pnp_irq * irq; 297 struct pnp_irq * irq;
298 unsigned long bits; 298 unsigned long bits;
299 299
300 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL); 300 irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL);
301 if (!irq) 301 if (!irq)
302 return; 302 return;
303 bits = (p[2] << 8) | p[1]; 303 bits = (p[2] << 8) | p[1];
@@ -314,7 +314,7 @@ static void
314pnpbios_parse_dma_option(unsigned char *p, int size, struct pnp_option *option) 314pnpbios_parse_dma_option(unsigned char *p, int size, struct pnp_option *option)
315{ 315{
316 struct pnp_dma * dma; 316 struct pnp_dma * dma;
317 dma = kcalloc(1, sizeof(struct pnp_dma), GFP_KERNEL); 317 dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL);
318 if (!dma) 318 if (!dma)
319 return; 319 return;
320 dma->map = p[1]; 320 dma->map = p[1];
@@ -327,7 +327,7 @@ static void
327pnpbios_parse_port_option(unsigned char *p, int size, struct pnp_option *option) 327pnpbios_parse_port_option(unsigned char *p, int size, struct pnp_option *option)
328{ 328{
329 struct pnp_port * port; 329 struct pnp_port * port;
330 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL); 330 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
331 if (!port) 331 if (!port)
332 return; 332 return;
333 port->min = (p[3] << 8) | p[2]; 333 port->min = (p[3] << 8) | p[2];
@@ -343,7 +343,7 @@ static void
343pnpbios_parse_fixed_port_option(unsigned char *p, int size, struct pnp_option *option) 343pnpbios_parse_fixed_port_option(unsigned char *p, int size, struct pnp_option *option)
344{ 344{
345 struct pnp_port * port; 345 struct pnp_port * port;
346 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL); 346 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
347 if (!port) 347 if (!port)
348 return; 348 return;
349 port->min = port->max = (p[2] << 8) | p[1]; 349 port->min = port->max = (p[2] << 8) | p[1];
@@ -527,7 +527,7 @@ pnpbios_parse_compatible_ids(unsigned char *p, unsigned char *end, struct pnp_de
527 case SMALL_TAG_COMPATDEVID: /* compatible ID */ 527 case SMALL_TAG_COMPATDEVID: /* compatible ID */
528 if (len != 4) 528 if (len != 4)
529 goto len_err; 529 goto len_err;
530 dev_id = kcalloc(1, sizeof (struct pnp_id), GFP_KERNEL); 530 dev_id = kzalloc(sizeof (struct pnp_id), GFP_KERNEL);
531 if (!dev_id) 531 if (!dev_id)
532 return NULL; 532 return NULL;
533 memset(dev_id, 0, sizeof(struct pnp_id)); 533 memset(dev_id, 0, sizeof(struct pnp_id));
diff --git a/drivers/ps3/Makefile b/drivers/ps3/Makefile
index b52d547b7a78..8433eb7562cb 100644
--- a/drivers/ps3/Makefile
+++ b/drivers/ps3/Makefile
@@ -1 +1,2 @@
1obj-y += system-bus.o 1obj-y += system-bus.o
2obj-$(CONFIG_PS3_VUART) += vuart.o
diff --git a/drivers/ps3/vuart.c b/drivers/ps3/vuart.c
new file mode 100644
index 000000000000..6974f65bcda5
--- /dev/null
+++ b/drivers/ps3/vuart.c
@@ -0,0 +1,965 @@
1/*
2 * PS3 virtual uart
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <asm/ps3.h>
25
26#include <asm/lv1call.h>
27#include <asm/bitops.h>
28
29#include "vuart.h"
30
31MODULE_AUTHOR("Sony Corporation");
32MODULE_LICENSE("GPL v2");
33MODULE_DESCRIPTION("ps3 vuart");
34
35/**
36 * vuart - An inter-partition data link service.
37 * port 0: PS3 AV Settings.
38 * port 2: PS3 System Manager.
39 *
40 * The vuart provides a bi-directional byte stream data link between logical
41 * partitions. Its primary role is as a communications link between the guest
42 * OS and the system policy module. The current HV does not support any
43 * connections other than those listed.
44 */
45
46enum {PORT_COUNT = 3,};
47
48enum vuart_param {
49 PARAM_TX_TRIGGER = 0,
50 PARAM_RX_TRIGGER = 1,
51 PARAM_INTERRUPT_MASK = 2,
52 PARAM_RX_BUF_SIZE = 3, /* read only */
53 PARAM_RX_BYTES = 4, /* read only */
54 PARAM_TX_BUF_SIZE = 5, /* read only */
55 PARAM_TX_BYTES = 6, /* read only */
56 PARAM_INTERRUPT_STATUS = 7, /* read only */
57};
58
59enum vuart_interrupt_bit {
60 INTERRUPT_BIT_TX = 0,
61 INTERRUPT_BIT_RX = 1,
62 INTERRUPT_BIT_DISCONNECT = 2,
63};
64
65enum vuart_interrupt_mask {
66 INTERRUPT_MASK_TX = 1,
67 INTERRUPT_MASK_RX = 2,
68 INTERRUPT_MASK_DISCONNECT = 4,
69};
70
71/**
72 * struct ports_bmp - bitmap indicating ports needing service.
73 *
74 * A 256 bit read only bitmap indicating ports needing service. Do not write
75 * to these bits. Must not cross a page boundary.
76 */
77
78struct ports_bmp {
79 u64 status;
80 u64 unused[3];
81} __attribute__ ((aligned (32)));
82
83/* redefine dev_dbg to do a syntax check */
84
85#if !defined(DEBUG)
86#undef dev_dbg
87static inline int __attribute__ ((format (printf, 2, 3))) dev_dbg(
88 const struct device *_dev, const char *fmt, ...) {return 0;}
89#endif
90
91#define dump_ports_bmp(_b) _dump_ports_bmp(_b, __func__, __LINE__)
92static void __attribute__ ((unused)) _dump_ports_bmp(
93 const struct ports_bmp* bmp, const char* func, int line)
94{
95 pr_debug("%s:%d: ports_bmp: %016lxh\n", func, line, bmp->status);
96}
97
98static int ps3_vuart_match_id_to_port(enum ps3_match_id match_id,
99 unsigned int *port_number)
100{
101 switch(match_id) {
102 case PS3_MATCH_ID_AV_SETTINGS:
103 *port_number = 0;
104 return 0;
105 case PS3_MATCH_ID_SYSTEM_MANAGER:
106 *port_number = 2;
107 return 0;
108 default:
109 WARN_ON(1);
110 *port_number = UINT_MAX;
111 return -EINVAL;
112 };
113}
114
115#define dump_port_params(_b) _dump_port_params(_b, __func__, __LINE__)
116static void __attribute__ ((unused)) _dump_port_params(unsigned int port_number,
117 const char* func, int line)
118{
119#if defined(DEBUG)
120 static const char *strings[] = {
121 "tx_trigger ",
122 "rx_trigger ",
123 "interrupt_mask ",
124 "rx_buf_size ",
125 "rx_bytes ",
126 "tx_buf_size ",
127 "tx_bytes ",
128 "interrupt_status",
129 };
130 int result;
131 unsigned int i;
132 u64 value;
133
134 for (i = 0; i < ARRAY_SIZE(strings); i++) {
135 result = lv1_get_virtual_uart_param(port_number, i, &value);
136
137 if (result) {
138 pr_debug("%s:%d: port_%u: %s failed: %s\n", func, line,
139 port_number, strings[i], ps3_result(result));
140 continue;
141 }
142 pr_debug("%s:%d: port_%u: %s = %lxh\n",
143 func, line, port_number, strings[i], value);
144 }
145#endif
146}
147
148struct vuart_triggers {
149 unsigned long rx;
150 unsigned long tx;
151};
152
153int ps3_vuart_get_triggers(struct ps3_vuart_port_device *dev,
154 struct vuart_triggers *trig)
155{
156 int result;
157 unsigned long size;
158 unsigned long val;
159
160 result = lv1_get_virtual_uart_param(dev->port_number,
161 PARAM_TX_TRIGGER, &trig->tx);
162
163 if (result) {
164 dev_dbg(&dev->core, "%s:%d: tx_trigger failed: %s\n",
165 __func__, __LINE__, ps3_result(result));
166 return result;
167 }
168
169 result = lv1_get_virtual_uart_param(dev->port_number,
170 PARAM_RX_BUF_SIZE, &size);
171
172 if (result) {
173 dev_dbg(&dev->core, "%s:%d: tx_buf_size failed: %s\n",
174 __func__, __LINE__, ps3_result(result));
175 return result;
176 }
177
178 result = lv1_get_virtual_uart_param(dev->port_number,
179 PARAM_RX_TRIGGER, &val);
180
181 if (result) {
182 dev_dbg(&dev->core, "%s:%d: rx_trigger failed: %s\n",
183 __func__, __LINE__, ps3_result(result));
184 return result;
185 }
186
187 trig->rx = size - val;
188
189 dev_dbg(&dev->core, "%s:%d: tx %lxh, rx %lxh\n", __func__, __LINE__,
190 trig->tx, trig->rx);
191
192 return result;
193}
194
195int ps3_vuart_set_triggers(struct ps3_vuart_port_device *dev, unsigned int tx,
196 unsigned int rx)
197{
198 int result;
199 unsigned long size;
200
201 result = lv1_set_virtual_uart_param(dev->port_number,
202 PARAM_TX_TRIGGER, tx);
203
204 if (result) {
205 dev_dbg(&dev->core, "%s:%d: tx_trigger failed: %s\n",
206 __func__, __LINE__, ps3_result(result));
207 return result;
208 }
209
210 result = lv1_get_virtual_uart_param(dev->port_number,
211 PARAM_RX_BUF_SIZE, &size);
212
213 if (result) {
214 dev_dbg(&dev->core, "%s:%d: tx_buf_size failed: %s\n",
215 __func__, __LINE__, ps3_result(result));
216 return result;
217 }
218
219 result = lv1_set_virtual_uart_param(dev->port_number,
220 PARAM_RX_TRIGGER, size - rx);
221
222 if (result) {
223 dev_dbg(&dev->core, "%s:%d: rx_trigger failed: %s\n",
224 __func__, __LINE__, ps3_result(result));
225 return result;
226 }
227
228 dev_dbg(&dev->core, "%s:%d: tx %xh, rx %xh\n", __func__, __LINE__,
229 tx, rx);
230
231 return result;
232}
233
234static int ps3_vuart_get_rx_bytes_waiting(struct ps3_vuart_port_device *dev,
235 unsigned long *bytes_waiting)
236{
237 int result = lv1_get_virtual_uart_param(dev->port_number,
238 PARAM_RX_BYTES, bytes_waiting);
239
240 if (result)
241 dev_dbg(&dev->core, "%s:%d: rx_bytes failed: %s\n",
242 __func__, __LINE__, ps3_result(result));
243
244 dev_dbg(&dev->core, "%s:%d: %lxh\n", __func__, __LINE__,
245 *bytes_waiting);
246 return result;
247}
248
249static int ps3_vuart_set_interrupt_mask(struct ps3_vuart_port_device *dev,
250 unsigned long mask)
251{
252 int result;
253
254 dev_dbg(&dev->core, "%s:%d: %lxh\n", __func__, __LINE__, mask);
255
256 dev->interrupt_mask = mask;
257
258 result = lv1_set_virtual_uart_param(dev->port_number,
259 PARAM_INTERRUPT_MASK, dev->interrupt_mask);
260
261 if (result)
262 dev_dbg(&dev->core, "%s:%d: interrupt_mask failed: %s\n",
263 __func__, __LINE__, ps3_result(result));
264
265 return result;
266}
267
268static int ps3_vuart_get_interrupt_mask(struct ps3_vuart_port_device *dev,
269 unsigned long *status)
270{
271 int result = lv1_get_virtual_uart_param(dev->port_number,
272 PARAM_INTERRUPT_STATUS, status);
273
274 if (result)
275 dev_dbg(&dev->core, "%s:%d: interrupt_status failed: %s\n",
276 __func__, __LINE__, ps3_result(result));
277
278 dev_dbg(&dev->core, "%s:%d: m %lxh, s %lxh, m&s %lxh\n",
279 __func__, __LINE__, dev->interrupt_mask, *status,
280 dev->interrupt_mask & *status);
281
282 return result;
283}
284
285int ps3_vuart_enable_interrupt_tx(struct ps3_vuart_port_device *dev)
286{
287 return (dev->interrupt_mask & INTERRUPT_MASK_TX) ? 0
288 : ps3_vuart_set_interrupt_mask(dev, dev->interrupt_mask
289 | INTERRUPT_MASK_TX);
290}
291
292int ps3_vuart_enable_interrupt_rx(struct ps3_vuart_port_device *dev)
293{
294 return (dev->interrupt_mask & INTERRUPT_MASK_RX) ? 0
295 : ps3_vuart_set_interrupt_mask(dev, dev->interrupt_mask
296 | INTERRUPT_MASK_RX);
297}
298
299int ps3_vuart_enable_interrupt_disconnect(struct ps3_vuart_port_device *dev)
300{
301 return (dev->interrupt_mask & INTERRUPT_MASK_DISCONNECT) ? 0
302 : ps3_vuart_set_interrupt_mask(dev, dev->interrupt_mask
303 | INTERRUPT_MASK_DISCONNECT);
304}
305
306int ps3_vuart_disable_interrupt_tx(struct ps3_vuart_port_device *dev)
307{
308 return (dev->interrupt_mask & INTERRUPT_MASK_TX)
309 ? ps3_vuart_set_interrupt_mask(dev, dev->interrupt_mask
310 & ~INTERRUPT_MASK_TX) : 0;
311}
312
313int ps3_vuart_disable_interrupt_rx(struct ps3_vuart_port_device *dev)
314{
315 return (dev->interrupt_mask & INTERRUPT_MASK_RX)
316 ? ps3_vuart_set_interrupt_mask(dev, dev->interrupt_mask
317 & ~INTERRUPT_MASK_RX) : 0;
318}
319
320int ps3_vuart_disable_interrupt_disconnect(struct ps3_vuart_port_device *dev)
321{
322 return (dev->interrupt_mask & INTERRUPT_MASK_DISCONNECT)
323 ? ps3_vuart_set_interrupt_mask(dev, dev->interrupt_mask
324 & ~INTERRUPT_MASK_DISCONNECT) : 0;
325}
326
327/**
328 * ps3_vuart_raw_write - Low level write helper.
329 *
330 * Do not call ps3_vuart_raw_write directly, use ps3_vuart_write.
331 */
332
333static int ps3_vuart_raw_write(struct ps3_vuart_port_device *dev,
334 const void* buf, unsigned int bytes, unsigned long *bytes_written)
335{
336 int result;
337
338 dev_dbg(&dev->core, "%s:%d: %xh\n", __func__, __LINE__, bytes);
339
340 result = lv1_write_virtual_uart(dev->port_number,
341 ps3_mm_phys_to_lpar(__pa(buf)), bytes, bytes_written);
342
343 if (result) {
344 dev_dbg(&dev->core, "%s:%d: lv1_write_virtual_uart failed: "
345 "%s\n", __func__, __LINE__, ps3_result(result));
346 return result;
347 }
348
349 dev->stats.bytes_written += *bytes_written;
350
351 dev_dbg(&dev->core, "%s:%d: wrote %lxh/%xh=>%lxh\n", __func__,
352 __LINE__, *bytes_written, bytes, dev->stats.bytes_written);
353
354 return result;
355}
356
357/**
358 * ps3_vuart_raw_read - Low level read helper.
359 *
360 * Do not call ps3_vuart_raw_read directly, use ps3_vuart_read.
361 */
362
363static int ps3_vuart_raw_read(struct ps3_vuart_port_device *dev, void* buf,
364 unsigned int bytes, unsigned long *bytes_read)
365{
366 int result;
367
368 dev_dbg(&dev->core, "%s:%d: %xh\n", __func__, __LINE__, bytes);
369
370 result = lv1_read_virtual_uart(dev->port_number,
371 ps3_mm_phys_to_lpar(__pa(buf)), bytes, bytes_read);
372
373 if (result) {
374 dev_dbg(&dev->core, "%s:%d: lv1_read_virtual_uart failed: %s\n",
375 __func__, __LINE__, ps3_result(result));
376 return result;
377 }
378
379 dev->stats.bytes_read += *bytes_read;
380
381 dev_dbg(&dev->core, "%s:%d: read %lxh/%xh=>%lxh\n", __func__, __LINE__,
382 *bytes_read, bytes, dev->stats.bytes_read);
383
384 return result;
385}
386
387/**
388 * struct list_buffer - An element for a port device fifo buffer list.
389 */
390
391struct list_buffer {
392 struct list_head link;
393 const unsigned char *head;
394 const unsigned char *tail;
395 unsigned long dbg_number;
396 unsigned char data[];
397};
398
399/**
400 * ps3_vuart_write - the entry point for writing data to a port
401 *
402 * If the port is idle on entry as much of the incoming data is written to
403 * the port as the port will accept. Otherwise a list buffer is created
404 * and any remaning incoming data is copied to that buffer. The buffer is
405 * then enqueued for transmision via the transmit interrupt.
406 */
407
408int ps3_vuart_write(struct ps3_vuart_port_device *dev, const void* buf,
409 unsigned int bytes)
410{
411 static unsigned long dbg_number;
412 int result;
413 unsigned long flags;
414 struct list_buffer *lb;
415
416 dev_dbg(&dev->core, "%s:%d: %u(%xh) bytes\n", __func__, __LINE__,
417 bytes, bytes);
418
419 spin_lock_irqsave(&dev->tx_list.lock, flags);
420
421 if (list_empty(&dev->tx_list.head)) {
422 unsigned long bytes_written;
423
424 result = ps3_vuart_raw_write(dev, buf, bytes, &bytes_written);
425
426 spin_unlock_irqrestore(&dev->tx_list.lock, flags);
427
428 if (result) {
429 dev_dbg(&dev->core,
430 "%s:%d: ps3_vuart_raw_write failed\n",
431 __func__, __LINE__);
432 return result;
433 }
434
435 if (bytes_written == bytes) {
436 dev_dbg(&dev->core, "%s:%d: wrote %xh bytes\n",
437 __func__, __LINE__, bytes);
438 return 0;
439 }
440
441 bytes -= bytes_written;
442 buf += bytes_written;
443 } else
444 spin_unlock_irqrestore(&dev->tx_list.lock, flags);
445
446 lb = kmalloc(sizeof(struct list_buffer) + bytes, GFP_KERNEL);
447
448 if (!lb) {
449 return -ENOMEM;
450 }
451
452 memcpy(lb->data, buf, bytes);
453 lb->head = lb->data;
454 lb->tail = lb->data + bytes;
455 lb->dbg_number = ++dbg_number;
456
457 spin_lock_irqsave(&dev->tx_list.lock, flags);
458 list_add_tail(&lb->link, &dev->tx_list.head);
459 ps3_vuart_enable_interrupt_tx(dev);
460 spin_unlock_irqrestore(&dev->tx_list.lock, flags);
461
462 dev_dbg(&dev->core, "%s:%d: queued buf_%lu, %xh bytes\n",
463 __func__, __LINE__, lb->dbg_number, bytes);
464
465 return 0;
466}
467
468/**
469 * ps3_vuart_read - the entry point for reading data from a port
470 *
471 * If enough bytes to satisfy the request are held in the buffer list those
472 * bytes are dequeued and copied to the caller's buffer. Emptied list buffers
473 * are retiered. If the request cannot be statified by bytes held in the list
474 * buffers -EAGAIN is returned.
475 */
476
477int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
478 unsigned int bytes)
479{
480 unsigned long flags;
481 struct list_buffer *lb, *n;
482 unsigned long bytes_read;
483
484 dev_dbg(&dev->core, "%s:%d: %u(%xh) bytes\n", __func__, __LINE__,
485 bytes, bytes);
486
487 spin_lock_irqsave(&dev->rx_list.lock, flags);
488
489 if (dev->rx_list.bytes_held < bytes) {
490 spin_unlock_irqrestore(&dev->rx_list.lock, flags);
491 dev_dbg(&dev->core, "%s:%d: starved for %lxh bytes\n",
492 __func__, __LINE__, bytes - dev->rx_list.bytes_held);
493 return -EAGAIN;
494 }
495
496 list_for_each_entry_safe(lb, n, &dev->rx_list.head, link) {
497 bytes_read = min((unsigned int)(lb->tail - lb->head), bytes);
498
499 memcpy(buf, lb->head, bytes_read);
500 buf += bytes_read;
501 bytes -= bytes_read;
502 dev->rx_list.bytes_held -= bytes_read;
503
504 if (bytes_read < lb->tail - lb->head) {
505 lb->head += bytes_read;
506 spin_unlock_irqrestore(&dev->rx_list.lock, flags);
507
508 dev_dbg(&dev->core,
509 "%s:%d: dequeued buf_%lu, %lxh bytes\n",
510 __func__, __LINE__, lb->dbg_number, bytes_read);
511 return 0;
512 }
513
514 dev_dbg(&dev->core, "%s:%d free buf_%lu\n", __func__, __LINE__,
515 lb->dbg_number);
516
517 list_del(&lb->link);
518 kfree(lb);
519 }
520 spin_unlock_irqrestore(&dev->rx_list.lock, flags);
521
522 dev_dbg(&dev->core, "%s:%d: dequeued buf_%lu, %xh bytes\n",
523 __func__, __LINE__, lb->dbg_number, bytes);
524
525 return 0;
526}
527
528/**
529 * ps3_vuart_handle_interrupt_tx - third stage transmit interrupt handler
530 *
531 * Services the transmit interrupt for the port. Writes as much data from the
532 * buffer list as the port will accept. Retires any emptied list buffers and
533 * adjusts the final list buffer state for a partial write.
534 */
535
536static int ps3_vuart_handle_interrupt_tx(struct ps3_vuart_port_device *dev)
537{
538 int result = 0;
539 unsigned long flags;
540 struct list_buffer *lb, *n;
541 unsigned long bytes_total = 0;
542
543 dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
544
545 spin_lock_irqsave(&dev->tx_list.lock, flags);
546
547 list_for_each_entry_safe(lb, n, &dev->tx_list.head, link) {
548
549 unsigned long bytes_written;
550
551 result = ps3_vuart_raw_write(dev, lb->head, lb->tail - lb->head,
552 &bytes_written);
553
554 if (result) {
555 dev_dbg(&dev->core,
556 "%s:%d: ps3_vuart_raw_write failed\n",
557 __func__, __LINE__);
558 break;
559 }
560
561 bytes_total += bytes_written;
562
563 if (bytes_written < lb->tail - lb->head) {
564 lb->head += bytes_written;
565 dev_dbg(&dev->core,
566 "%s:%d cleared buf_%lu, %lxh bytes\n",
567 __func__, __LINE__, lb->dbg_number,
568 bytes_written);
569 goto port_full;
570 }
571
572 dev_dbg(&dev->core, "%s:%d free buf_%lu\n", __func__, __LINE__,
573 lb->dbg_number);
574
575 list_del(&lb->link);
576 kfree(lb);
577 }
578
579 ps3_vuart_disable_interrupt_tx(dev);
580port_full:
581 spin_unlock_irqrestore(&dev->tx_list.lock, flags);
582 dev_dbg(&dev->core, "%s:%d wrote %lxh bytes total\n",
583 __func__, __LINE__, bytes_total);
584 return result;
585}
586
587/**
588 * ps3_vuart_handle_interrupt_rx - third stage receive interrupt handler
589 *
590 * Services the receive interrupt for the port. Creates a list buffer and
591 * copies all waiting port data to that buffer and enqueues the buffer in the
592 * buffer list. Buffer list data is dequeued via ps3_vuart_read.
593 */
594
595static int ps3_vuart_handle_interrupt_rx(struct ps3_vuart_port_device *dev)
596{
597 static unsigned long dbg_number;
598 int result = 0;
599 unsigned long flags;
600 struct list_buffer *lb;
601 unsigned long bytes;
602
603 dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
604
605 result = ps3_vuart_get_rx_bytes_waiting(dev, &bytes);
606
607 if (result)
608 return -EIO;
609
610 BUG_ON(!bytes);
611
612 /* add some extra space for recently arrived data */
613
614 bytes += 128;
615
616 lb = kmalloc(sizeof(struct list_buffer) + bytes, GFP_ATOMIC);
617
618 if (!lb)
619 return -ENOMEM;
620
621 ps3_vuart_raw_read(dev, lb->data, bytes, &bytes);
622
623 lb->head = lb->data;
624 lb->tail = lb->data + bytes;
625 lb->dbg_number = ++dbg_number;
626
627 spin_lock_irqsave(&dev->rx_list.lock, flags);
628 list_add_tail(&lb->link, &dev->rx_list.head);
629 dev->rx_list.bytes_held += bytes;
630 spin_unlock_irqrestore(&dev->rx_list.lock, flags);
631
632 dev_dbg(&dev->core, "%s:%d: queued buf_%lu, %lxh bytes\n",
633 __func__, __LINE__, lb->dbg_number, bytes);
634
635 return 0;
636}
637
638static int ps3_vuart_handle_interrupt_disconnect(
639 struct ps3_vuart_port_device *dev)
640{
641 dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
642 BUG_ON("no support");
643 return -1;
644}
645
646/**
647 * ps3_vuart_handle_port_interrupt - second stage interrupt handler
648 *
649 * Services any pending interrupt types for the port. Passes control to the
650 * third stage type specific interrupt handler. Returns control to the first
651 * stage handler after one iteration.
652 */
653
654static int ps3_vuart_handle_port_interrupt(struct ps3_vuart_port_device *dev)
655{
656 int result;
657 unsigned long status;
658
659 result = ps3_vuart_get_interrupt_mask(dev, &status);
660
661 if (result)
662 return result;
663
664 dev_dbg(&dev->core, "%s:%d: status: %lxh\n", __func__, __LINE__,
665 status);
666
667 if (status & INTERRUPT_MASK_DISCONNECT) {
668 dev->stats.disconnect_interrupts++;
669 result = ps3_vuart_handle_interrupt_disconnect(dev);
670 if (result)
671 ps3_vuart_disable_interrupt_disconnect(dev);
672 }
673
674 if (status & INTERRUPT_MASK_TX) {
675 dev->stats.tx_interrupts++;
676 result = ps3_vuart_handle_interrupt_tx(dev);
677 if (result)
678 ps3_vuart_disable_interrupt_tx(dev);
679 }
680
681 if (status & INTERRUPT_MASK_RX) {
682 dev->stats.rx_interrupts++;
683 result = ps3_vuart_handle_interrupt_rx(dev);
684 if (result)
685 ps3_vuart_disable_interrupt_rx(dev);
686 }
687
688 return 0;
689}
690
691struct vuart_private {
692 unsigned int in_use;
693 unsigned int virq;
694 struct ps3_vuart_port_device *devices[PORT_COUNT];
695 const struct ports_bmp bmp;
696};
697
698/**
699 * ps3_vuart_irq_handler - first stage interrupt handler
700 *
701 * Loops finding any interrupting port and its associated instance data.
702 * Passes control to the second stage port specific interrupt handler. Loops
703 * until all outstanding interrupts are serviced.
704 */
705
706static irqreturn_t ps3_vuart_irq_handler(int irq, void *_private)
707{
708 struct vuart_private *private;
709
710 BUG_ON(!_private);
711 private = (struct vuart_private *)_private;
712
713 while (1) {
714 unsigned int port;
715
716 dump_ports_bmp(&private->bmp);
717
718 port = (BITS_PER_LONG - 1) - __ilog2(private->bmp.status);
719
720 if (port == BITS_PER_LONG)
721 break;
722
723 BUG_ON(port >= PORT_COUNT);
724 BUG_ON(!private->devices[port]);
725
726 ps3_vuart_handle_port_interrupt(private->devices[port]);
727 }
728
729 return IRQ_HANDLED;
730}
731
732static int ps3_vuart_match(struct device *_dev, struct device_driver *_drv)
733{
734 int result;
735 struct ps3_vuart_port_driver *drv = to_ps3_vuart_port_driver(_drv);
736 struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
737
738 result = dev->match_id == drv->match_id;
739
740 dev_info(&dev->core, "%s:%d: dev=%u(%s), drv=%u(%s): %s\n", __func__,
741 __LINE__, dev->match_id, dev->core.bus_id, drv->match_id,
742 drv->core.name, (result ? "match" : "miss"));
743
744 return result;
745}
746
747static struct vuart_private vuart_private;
748
749static int ps3_vuart_probe(struct device *_dev)
750{
751 int result;
752 unsigned long tmp;
753 struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
754 struct ps3_vuart_port_driver *drv =
755 to_ps3_vuart_port_driver(_dev->driver);
756
757 dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
758
759 BUG_ON(!drv);
760
761 result = ps3_vuart_match_id_to_port(dev->match_id, &dev->port_number);
762
763 if (result) {
764 dev_dbg(&dev->core, "%s:%d: unknown match_id (%d)\n",
765 __func__, __LINE__, dev->match_id);
766 result = -EINVAL;
767 goto fail_match;
768 }
769
770 if (vuart_private.devices[dev->port_number]) {
771 dev_dbg(&dev->core, "%s:%d: port busy (%d)\n", __func__,
772 __LINE__, dev->port_number);
773 result = -EBUSY;
774 goto fail_match;
775 }
776
777 vuart_private.devices[dev->port_number] = dev;
778
779 INIT_LIST_HEAD(&dev->tx_list.head);
780 spin_lock_init(&dev->tx_list.lock);
781 INIT_LIST_HEAD(&dev->rx_list.head);
782 spin_lock_init(&dev->rx_list.lock);
783
784 vuart_private.in_use++;
785 if (vuart_private.in_use == 1) {
786 result = ps3_alloc_vuart_irq((void*)&vuart_private.bmp.status,
787 &vuart_private.virq);
788
789 if (result) {
790 dev_dbg(&dev->core,
791 "%s:%d: ps3_alloc_vuart_irq failed (%d)\n",
792 __func__, __LINE__, result);
793 result = -EPERM;
794 goto fail_alloc_irq;
795 }
796
797 result = request_irq(vuart_private.virq, ps3_vuart_irq_handler,
798 IRQF_DISABLED, "vuart", &vuart_private);
799
800 if (result) {
801 dev_info(&dev->core, "%s:%d: request_irq failed (%d)\n",
802 __func__, __LINE__, result);
803 goto fail_request_irq;
804 }
805 }
806
807 ps3_vuart_set_interrupt_mask(dev, INTERRUPT_MASK_RX);
808
809 /* clear stale pending interrupts */
810 ps3_vuart_get_interrupt_mask(dev, &tmp);
811
812 ps3_vuart_set_triggers(dev, 1, 1);
813
814 if (drv->probe)
815 result = drv->probe(dev);
816 else {
817 result = 0;
818 dev_info(&dev->core, "%s:%d: no probe method\n", __func__,
819 __LINE__);
820 }
821
822 if (result) {
823 dev_dbg(&dev->core, "%s:%d: drv->probe failed\n",
824 __func__, __LINE__);
825 goto fail_probe;
826 }
827
828 return result;
829
830fail_probe:
831fail_request_irq:
832 vuart_private.in_use--;
833 if (!vuart_private.in_use) {
834 ps3_free_vuart_irq(vuart_private.virq);
835 vuart_private.virq = NO_IRQ;
836 }
837fail_alloc_irq:
838fail_match:
839 dev_dbg(&dev->core, "%s:%d failed\n", __func__, __LINE__);
840 return result;
841}
842
843static int ps3_vuart_remove(struct device *_dev)
844{
845 struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
846 struct ps3_vuart_port_driver *drv =
847 to_ps3_vuart_port_driver(_dev->driver);
848
849 dev_dbg(&dev->core, "%s:%d: %s\n", __func__, __LINE__,
850 dev->core.bus_id);
851
852 BUG_ON(vuart_private.in_use < 1);
853
854 if (drv->remove)
855 drv->remove(dev);
856 else
857 dev_dbg(&dev->core, "%s:%d: %s no remove method\n", __func__,
858 __LINE__, dev->core.bus_id);
859
860 vuart_private.in_use--;
861
862 if (!vuart_private.in_use) {
863 free_irq(vuart_private.virq, &vuart_private);
864 ps3_free_vuart_irq(vuart_private.virq);
865 vuart_private.virq = NO_IRQ;
866 }
867 return 0;
868}
869
870/**
871 * ps3_vuart - The vuart instance.
872 *
873 * The vuart is managed as a bus that port devices connect to.
874 */
875
876struct bus_type ps3_vuart = {
877 .name = "ps3_vuart",
878 .match = ps3_vuart_match,
879 .probe = ps3_vuart_probe,
880 .remove = ps3_vuart_remove,
881};
882
883int __init ps3_vuart_init(void)
884{
885 int result;
886
887 pr_debug("%s:%d:\n", __func__, __LINE__);
888 result = bus_register(&ps3_vuart);
889 BUG_ON(result);
890 return result;
891}
892
893void __exit ps3_vuart_exit(void)
894{
895 pr_debug("%s:%d:\n", __func__, __LINE__);
896 bus_unregister(&ps3_vuart);
897}
898
899core_initcall(ps3_vuart_init);
900module_exit(ps3_vuart_exit);
901
902/**
903 * ps3_vuart_port_release_device - Remove a vuart port device.
904 */
905
906static void ps3_vuart_port_release_device(struct device *_dev)
907{
908 struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
909#if defined(DEBUG)
910 memset(dev, 0xad, sizeof(struct ps3_vuart_port_device));
911#endif
912 kfree(dev);
913}
914
915/**
916 * ps3_vuart_port_device_register - Add a vuart port device.
917 */
918
919int ps3_vuart_port_device_register(struct ps3_vuart_port_device *dev)
920{
921 int result;
922 static unsigned int dev_count = 1;
923
924 dev->core.parent = NULL;
925 dev->core.bus = &ps3_vuart;
926 dev->core.release = ps3_vuart_port_release_device;
927
928 snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), "vuart_%02x",
929 dev_count++);
930
931 dev_dbg(&dev->core, "%s:%d register\n", __func__, __LINE__);
932
933 result = device_register(&dev->core);
934
935 return result;
936}
937
938EXPORT_SYMBOL_GPL(ps3_vuart_port_device_register);
939
940/**
941 * ps3_vuart_port_driver_register - Add a vuart port device driver.
942 */
943
944int ps3_vuart_port_driver_register(struct ps3_vuart_port_driver *drv)
945{
946 int result;
947
948 pr_debug("%s:%d: (%s)\n", __func__, __LINE__, drv->core.name);
949 drv->core.bus = &ps3_vuart;
950 result = driver_register(&drv->core);
951 return result;
952}
953
954EXPORT_SYMBOL_GPL(ps3_vuart_port_driver_register);
955
956/**
957 * ps3_vuart_port_driver_unregister - Remove a vuart port device driver.
958 */
959
960void ps3_vuart_port_driver_unregister(struct ps3_vuart_port_driver *drv)
961{
962 driver_unregister(&drv->core);
963}
964
965EXPORT_SYMBOL_GPL(ps3_vuart_port_driver_unregister);
diff --git a/drivers/ps3/vuart.h b/drivers/ps3/vuart.h
new file mode 100644
index 000000000000..28fd89f0c8aa
--- /dev/null
+++ b/drivers/ps3/vuart.h
@@ -0,0 +1,94 @@
1/*
2 * PS3 virtual uart
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#if !defined(_PS3_VUART_H)
22#define _PS3_VUART_H
23
24struct ps3_vuart_stats {
25 unsigned long bytes_written;
26 unsigned long bytes_read;
27 unsigned long tx_interrupts;
28 unsigned long rx_interrupts;
29 unsigned long disconnect_interrupts;
30};
31
32/**
33 * struct ps3_vuart_port_device - a device on a vuart port
34 */
35
36struct ps3_vuart_port_device {
37 enum ps3_match_id match_id;
38 struct device core;
39
40 /* private driver variables */
41 unsigned int port_number;
42 unsigned long interrupt_mask;
43 struct {
44 spinlock_t lock;
45 struct list_head head;
46 } tx_list;
47 struct {
48 unsigned long bytes_held;
49 spinlock_t lock;
50 struct list_head head;
51 } rx_list;
52 struct ps3_vuart_stats stats;
53};
54
55/**
56 * struct ps3_vuart_port_driver - a driver for a device on a vuart port
57 */
58
59struct ps3_vuart_port_driver {
60 enum ps3_match_id match_id;
61 struct device_driver core;
62 int (*probe)(struct ps3_vuart_port_device *);
63 int (*remove)(struct ps3_vuart_port_device *);
64 int (*tx_event)(struct ps3_vuart_port_device *dev);
65 int (*rx_event)(struct ps3_vuart_port_device *dev);
66 int (*disconnect_event)(struct ps3_vuart_port_device *dev);
67 /* int (*suspend)(struct ps3_vuart_port_device *, pm_message_t); */
68 /* int (*resume)(struct ps3_vuart_port_device *); */
69};
70
71int ps3_vuart_port_device_register(struct ps3_vuart_port_device *dev);
72int ps3_vuart_port_driver_register(struct ps3_vuart_port_driver *drv);
73void ps3_vuart_port_driver_unregister(struct ps3_vuart_port_driver *drv);
74int ps3_vuart_write(struct ps3_vuart_port_device *dev,
75 const void* buf, unsigned int bytes);
76int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
77 unsigned int bytes);
78static inline struct ps3_vuart_port_driver *to_ps3_vuart_port_driver(
79 struct device_driver *_drv)
80{
81 return container_of(_drv, struct ps3_vuart_port_driver, core);
82}
83static inline struct ps3_vuart_port_device *to_ps3_vuart_port_device(
84 struct device *_dev)
85{
86 return container_of(_dev, struct ps3_vuart_port_device, core);
87}
88
89int ps3_vuart_write(struct ps3_vuart_port_device *dev, const void* buf,
90 unsigned int bytes);
91int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
92 unsigned int bytes);
93
94#endif
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 2a63ab2b47f4..09660e2ab051 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -288,7 +288,7 @@ config RTC_DRV_PL031
288 To compile this driver as a module, choose M here: the 288 To compile this driver as a module, choose M here: the
289 module will be called rtc-pl031. 289 module will be called rtc-pl031.
290 290
291config RTC_DRV_AT91 291config RTC_DRV_AT91RM9200
292 tristate "AT91RM9200" 292 tristate "AT91RM9200"
293 depends on RTC_CLASS && ARCH_AT91RM9200 293 depends on RTC_CLASS && ARCH_AT91RM9200
294 help 294 help
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index bd4c45d333f0..e6beedacc966 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -35,5 +35,5 @@ obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
35obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o 35obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
36obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o 36obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
37obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o 37obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
38obj-$(CONFIG_RTC_DRV_AT91) += rtc-at91.o 38obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
39obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o 39obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
diff --git a/drivers/rtc/rtc-at91.c b/drivers/rtc/rtc-at91rm9200.c
index 5c8addcaf1fb..4f654c901c64 100644
--- a/drivers/rtc/rtc-at91.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -137,6 +137,9 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
137 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); 137 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
138 tm->tm_year = at91_alarm_year - 1900; 138 tm->tm_year = at91_alarm_year - 1900;
139 139
140 alrm->enabled = (at91_sys_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
141 ? 1 : 0;
142
140 pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__, 143 pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
141 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, 144 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
142 tm->tm_hour, tm->tm_min, tm->tm_sec); 145 tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -223,8 +226,6 @@ static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
223{ 226{
224 unsigned long imr = at91_sys_read(AT91_RTC_IMR); 227 unsigned long imr = at91_sys_read(AT91_RTC_IMR);
225 228
226 seq_printf(seq, "alarm_IRQ\t: %s\n",
227 (imr & AT91_RTC_ALARM) ? "yes" : "no");
228 seq_printf(seq, "update_IRQ\t: %s\n", 229 seq_printf(seq, "update_IRQ\t: %s\n",
229 (imr & AT91_RTC_ACKUPD) ? "yes" : "no"); 230 (imr & AT91_RTC_ACKUPD) ? "yes" : "no");
230 seq_printf(seq, "periodic_IRQ\t: %s\n", 231 seq_printf(seq, "periodic_IRQ\t: %s\n",
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 828b329e08e0..94d3df62a5fa 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -435,7 +435,7 @@ static int rtc_dev_add_device(struct class_device *class_dev,
435 goto err_cdev_del; 435 goto err_cdev_del;
436 } 436 }
437 437
438 dev_info(class_dev->dev, "rtc intf: dev (%d:%d)\n", 438 dev_dbg(class_dev->dev, "rtc intf: dev (%d:%d)\n",
439 MAJOR(rtc->rtc_dev->devt), 439 MAJOR(rtc->rtc_dev->devt),
440 MINOR(rtc->rtc_dev->devt)); 440 MINOR(rtc->rtc_dev->devt));
441 441
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index dfef1637bfb8..205fa28593b7 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -199,7 +199,7 @@ static int ds1672_probe(struct i2c_adapter *adapter, int address, int kind)
199 struct i2c_client *client; 199 struct i2c_client *client;
200 struct rtc_device *rtc; 200 struct rtc_device *rtc;
201 201
202 dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); 202 dev_dbg(adapter->class_dev.dev, "%s\n", __FUNCTION__);
203 203
204 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) { 204 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
205 err = -ENODEV; 205 err = -ENODEV;
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index ba795a4db1e9..7bbc26a34bd2 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -117,4 +117,85 @@ int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
117} 117}
118EXPORT_SYMBOL(rtc_tm_to_time); 118EXPORT_SYMBOL(rtc_tm_to_time);
119 119
120
121/* Merge the valid (i.e. non-negative) fields of alarm into the current
122 * time. If the valid alarm fields are earlier than the equivalent
123 * fields in the time, carry one into the least significant invalid
124 * field, so that the alarm expiry is in the future. It assumes that the
125 * least significant invalid field is more significant than the most
126 * significant valid field, and that the seconds field is valid.
127 *
128 * This is used by alarms that take relative (rather than absolute)
129 * times, and/or have a simple binary second counter instead of
130 * day/hour/minute/sec registers.
131 */
132void rtc_merge_alarm(struct rtc_time *now, struct rtc_time *alarm)
133{
134 int *alarmp = &alarm->tm_sec;
135 int *timep = &now->tm_sec;
136 int carry_into, i;
137
138 /* Ignore everything past the 6th element (tm_year). */
139 for (i = 5; i > 0; i--) {
140 if (alarmp[i] < 0)
141 alarmp[i] = timep[i];
142 else
143 break;
144 }
145
146 /* No carry needed if all fields are valid. */
147 if (i == 5)
148 return;
149
150 for (carry_into = i + 1; i >= 0; i--) {
151 if (alarmp[i] < timep[i])
152 break;
153
154 if (alarmp[i] > timep[i])
155 return;
156 }
157
158 switch (carry_into) {
159 case 1:
160 alarm->tm_min++;
161
162 if (alarm->tm_min < 60)
163 return;
164
165 alarm->tm_min = 0;
166 /* fall-through */
167
168 case 2:
169 alarm->tm_hour++;
170
171 if (alarm->tm_hour < 60)
172 return;
173
174 alarm->tm_hour = 0;
175 /* fall-through */
176
177 case 3:
178 alarm->tm_mday++;
179
180 if (alarm->tm_mday <= rtc_days_in_month[alarm->tm_mon])
181 return;
182
183 alarm->tm_mday = 1;
184 /* fall-through */
185
186 case 4:
187 alarm->tm_mon++;
188
189 if (alarm->tm_mon <= 12)
190 return;
191
192 alarm->tm_mon = 1;
193 /* fall-through */
194
195 case 5:
196 alarm->tm_year++;
197 }
198}
199EXPORT_SYMBOL(rtc_merge_alarm);
200
120MODULE_LICENSE("GPL"); 201MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index eac5fb1fc02f..d59880d44fba 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -279,9 +279,8 @@ static int omap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
279 local_irq_enable(); 279 local_irq_enable();
280 280
281 bcd2tm(&alm->time); 281 bcd2tm(&alm->time);
282 alm->pending = !!(rtc_read(OMAP_RTC_INTERRUPTS_REG) 282 alm->enabled = !!(rtc_read(OMAP_RTC_INTERRUPTS_REG)
283 & OMAP_RTC_INTERRUPTS_IT_ALARM); 283 & OMAP_RTC_INTERRUPTS_IT_ALARM);
284 alm->enabled = alm->pending && device_may_wakeup(dev);
285 284
286 return 0; 285 return 0;
287} 286}
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index a760cf69af90..4b72b8ef5d66 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -192,7 +192,7 @@ static int pcf8563_validate_client(struct i2c_client *client)
192 xfer = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); 192 xfer = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
193 193
194 if (xfer != ARRAY_SIZE(msgs)) { 194 if (xfer != ARRAY_SIZE(msgs)) {
195 dev_err(&client->adapter->dev, 195 dev_err(&client->dev,
196 "%s: could not read register 0x%02X\n", 196 "%s: could not read register 0x%02X\n",
197 __FUNCTION__, pattern[i].reg); 197 __FUNCTION__, pattern[i].reg);
198 198
@@ -203,7 +203,7 @@ static int pcf8563_validate_client(struct i2c_client *client)
203 203
204 if (value > pattern[i].max || 204 if (value > pattern[i].max ||
205 value < pattern[i].min) { 205 value < pattern[i].min) {
206 dev_dbg(&client->adapter->dev, 206 dev_dbg(&client->dev,
207 "%s: pattern=%d, reg=%x, mask=0x%02x, min=%d, " 207 "%s: pattern=%d, reg=%x, mask=0x%02x, min=%d, "
208 "max=%d, value=%d, raw=0x%02X\n", 208 "max=%d, value=%d, raw=0x%02X\n",
209 __FUNCTION__, i, pattern[i].reg, pattern[i].mask, 209 __FUNCTION__, i, pattern[i].reg, pattern[i].mask,
@@ -253,7 +253,7 @@ static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind)
253 253
254 int err = 0; 254 int err = 0;
255 255
256 dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); 256 dev_dbg(adapter->class_dev.dev, "%s\n", __FUNCTION__);
257 257
258 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) { 258 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
259 err = -ENODEV; 259 err = -ENODEV;
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c
index d51d8f20e634..c272afd62173 100644
--- a/drivers/rtc/rtc-proc.c
+++ b/drivers/rtc/rtc-proc.c
@@ -65,7 +65,7 @@ static int rtc_proc_show(struct seq_file *seq, void *offset)
65 seq_printf(seq, "%02d\n", alrm.time.tm_mday); 65 seq_printf(seq, "%02d\n", alrm.time.tm_mday);
66 else 66 else
67 seq_printf(seq, "**\n"); 67 seq_printf(seq, "**\n");
68 seq_printf(seq, "alrm_wakeup\t: %s\n", 68 seq_printf(seq, "alarm_IRQ\t: %s\n",
69 alrm.enabled ? "yes" : "no"); 69 alrm.enabled ? "yes" : "no");
70 seq_printf(seq, "alrm_pending\t: %s\n", 70 seq_printf(seq, "alrm_pending\t: %s\n",
71 alrm.pending ? "yes" : "no"); 71 alrm.pending ? "yes" : "no");
@@ -120,7 +120,7 @@ static int rtc_proc_add_device(struct class_device *class_dev,
120 ent->owner = rtc->owner; 120 ent->owner = rtc->owner;
121 ent->data = class_dev; 121 ent->data = class_dev;
122 122
123 dev_info(class_dev->dev, "rtc intf: proc\n"); 123 dev_dbg(class_dev->dev, "rtc intf: proc\n");
124 } 124 }
125 else 125 else
126 rtc_dev = NULL; 126 rtc_dev = NULL;
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index e2c7698fdba3..1460f6b769f2 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -200,7 +200,7 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
200 struct i2c_client *client; 200 struct i2c_client *client;
201 struct rs5c372 *rs5c372; 201 struct rs5c372 *rs5c372;
202 202
203 dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); 203 dev_dbg(adapter->class_dev.dev, "%s\n", __FUNCTION__);
204 204
205 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) { 205 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
206 err = -ENODEV; 206 err = -ENODEV;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index e301dea57bb3..f406a2b55aea 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -191,6 +191,8 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
191 191
192 alm_en = readb(base + S3C2410_RTCALM); 192 alm_en = readb(base + S3C2410_RTCALM);
193 193
194 alrm->enabled = (alm_en & S3C2410_RTCALM_ALMEN) ? 1 : 0;
195
194 pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n", 196 pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
195 alm_en, 197 alm_en,
196 alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday, 198 alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
@@ -331,12 +333,8 @@ static int s3c_rtc_ioctl(struct device *dev,
331 333
332static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) 334static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
333{ 335{
334 unsigned int rtcalm = readb(s3c_rtc_base + S3C2410_RTCALM);
335 unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT); 336 unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
336 337
337 seq_printf(seq, "alarm_IRQ\t: %s\n",
338 (rtcalm & S3C2410_RTCALM_ALMEN) ? "yes" : "no" );
339
340 seq_printf(seq, "periodic_IRQ\t: %s\n", 338 seq_printf(seq, "periodic_IRQ\t: %s\n",
341 (ticnt & S3C2410_TICNT_ENABLE) ? "yes" : "no" ); 339 (ticnt & S3C2410_TICNT_ENABLE) ? "yes" : "no" );
342 340
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index bd4d7d174ef4..9c8ead43a59c 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -289,9 +289,7 @@ static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
289 289
290static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq) 290static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
291{ 291{
292 seq_printf(seq, "trim/divider\t: 0x%08lx\n", RTTR); 292 seq_printf(seq, "trim/divider\t: 0x%08x\n", (u32) RTTR);
293 seq_printf(seq, "alarm_IRQ\t: %s\n",
294 (RTSR & RTSR_ALE) ? "yes" : "no" );
295 seq_printf(seq, "update_IRQ\t: %s\n", 293 seq_printf(seq, "update_IRQ\t: %s\n",
296 (RTSR & RTSR_HZE) ? "yes" : "no"); 294 (RTSR & RTSR_HZE) ? "yes" : "no");
297 seq_printf(seq, "periodic_IRQ\t: %s\n", 295 seq_printf(seq, "periodic_IRQ\t: %s\n",
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 143302a8e79c..72ba1a70f35f 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -2,6 +2,7 @@
2 * SuperH On-Chip RTC Support 2 * SuperH On-Chip RTC Support
3 * 3 *
4 * Copyright (C) 2006 Paul Mundt 4 * Copyright (C) 2006 Paul Mundt
5 * Copyright (C) 2006 Jamie Lenehan
5 * 6 *
6 * Based on the old arch/sh/kernel/cpu/rtc.c by: 7 * Based on the old arch/sh/kernel/cpu/rtc.c by:
7 * 8 *
@@ -21,7 +22,10 @@
21#include <linux/seq_file.h> 22#include <linux/seq_file.h>
22#include <linux/interrupt.h> 23#include <linux/interrupt.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <asm/io.h> 25#include <linux/io.h>
26
27#define DRV_NAME "sh-rtc"
28#define DRV_VERSION "0.1.2"
25 29
26#ifdef CONFIG_CPU_SH3 30#ifdef CONFIG_CPU_SH3
27#define rtc_reg_size sizeof(u16) 31#define rtc_reg_size sizeof(u16)
@@ -33,22 +37,26 @@
33 37
34#define RTC_REG(r) ((r) * rtc_reg_size) 38#define RTC_REG(r) ((r) * rtc_reg_size)
35 39
36#define R64CNT RTC_REG(0) 40#define R64CNT RTC_REG(0)
37#define RSECCNT RTC_REG(1) 41
38#define RMINCNT RTC_REG(2) 42#define RSECCNT RTC_REG(1) /* RTC sec */
39#define RHRCNT RTC_REG(3) 43#define RMINCNT RTC_REG(2) /* RTC min */
40#define RWKCNT RTC_REG(4) 44#define RHRCNT RTC_REG(3) /* RTC hour */
41#define RDAYCNT RTC_REG(5) 45#define RWKCNT RTC_REG(4) /* RTC week */
42#define RMONCNT RTC_REG(6) 46#define RDAYCNT RTC_REG(5) /* RTC day */
43#define RYRCNT RTC_REG(7) 47#define RMONCNT RTC_REG(6) /* RTC month */
44#define RSECAR RTC_REG(8) 48#define RYRCNT RTC_REG(7) /* RTC year */
45#define RMINAR RTC_REG(9) 49#define RSECAR RTC_REG(8) /* ALARM sec */
46#define RHRAR RTC_REG(10) 50#define RMINAR RTC_REG(9) /* ALARM min */
47#define RWKAR RTC_REG(11) 51#define RHRAR RTC_REG(10) /* ALARM hour */
48#define RDAYAR RTC_REG(12) 52#define RWKAR RTC_REG(11) /* ALARM week */
49#define RMONAR RTC_REG(13) 53#define RDAYAR RTC_REG(12) /* ALARM day */
50#define RCR1 RTC_REG(14) 54#define RMONAR RTC_REG(13) /* ALARM month */
51#define RCR2 RTC_REG(15) 55#define RCR1 RTC_REG(14) /* Control */
56#define RCR2 RTC_REG(15) /* Control */
57
58/* ALARM Bits - or with BCD encoded value */
59#define AR_ENB 0x80 /* Enable for alarm cmp */
52 60
53/* RCR1 Bits */ 61/* RCR1 Bits */
54#define RCR1_CF 0x80 /* Carry Flag */ 62#define RCR1_CF 0x80 /* Carry Flag */
@@ -71,22 +79,28 @@ struct sh_rtc {
71 unsigned int alarm_irq, periodic_irq, carry_irq; 79 unsigned int alarm_irq, periodic_irq, carry_irq;
72 struct rtc_device *rtc_dev; 80 struct rtc_device *rtc_dev;
73 spinlock_t lock; 81 spinlock_t lock;
82 int rearm_aie;
74}; 83};
75 84
76static irqreturn_t sh_rtc_interrupt(int irq, void *id) 85static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id)
77{ 86{
78 struct platform_device *pdev = id; 87 struct platform_device *pdev = to_platform_device(dev_id);
79 struct sh_rtc *rtc = platform_get_drvdata(pdev); 88 struct sh_rtc *rtc = platform_get_drvdata(pdev);
80 unsigned int tmp, events = 0; 89 unsigned int tmp, events = 0;
81 90
82 spin_lock(&rtc->lock); 91 spin_lock(&rtc->lock);
83 92
84 tmp = readb(rtc->regbase + RCR1); 93 tmp = readb(rtc->regbase + RCR1);
94 tmp &= ~RCR1_CF;
85 95
86 if (tmp & RCR1_AF) 96 if (rtc->rearm_aie) {
87 events |= RTC_AF | RTC_IRQF; 97 if (tmp & RCR1_AF)
88 98 tmp &= ~RCR1_AF; /* try to clear AF again */
89 tmp &= ~(RCR1_CF | RCR1_AF); 99 else {
100 tmp |= RCR1_AIE; /* AF has cleared, rearm IRQ */
101 rtc->rearm_aie = 0;
102 }
103 }
90 104
91 writeb(tmp, rtc->regbase + RCR1); 105 writeb(tmp, rtc->regbase + RCR1);
92 106
@@ -97,9 +111,45 @@ static irqreturn_t sh_rtc_interrupt(int irq, void *id)
97 return IRQ_HANDLED; 111 return IRQ_HANDLED;
98} 112}
99 113
100static irqreturn_t sh_rtc_periodic(int irq, void *id) 114static irqreturn_t sh_rtc_alarm(int irq, void *dev_id)
115{
116 struct platform_device *pdev = to_platform_device(dev_id);
117 struct sh_rtc *rtc = platform_get_drvdata(pdev);
118 unsigned int tmp, events = 0;
119
120 spin_lock(&rtc->lock);
121
122 tmp = readb(rtc->regbase + RCR1);
123
124 /*
125 * If AF is set then the alarm has triggered. If we clear AF while
126 * the alarm time still matches the RTC time then AF will
127 * immediately be set again, and if AIE is enabled then the alarm
128 * interrupt will immediately be retrigger. So we clear AIE here
129 * and use rtc->rearm_aie so that the carry interrupt will keep
130 * trying to clear AF and once it stays cleared it'll re-enable
131 * AIE.
132 */
133 if (tmp & RCR1_AF) {
134 events |= RTC_AF | RTC_IRQF;
135
136 tmp &= ~(RCR1_AF|RCR1_AIE);
137
138 writeb(tmp, rtc->regbase + RCR1);
139
140 rtc->rearm_aie = 1;
141
142 rtc_update_irq(&rtc->rtc_dev->class_dev, 1, events);
143 }
144
145 spin_unlock(&rtc->lock);
146 return IRQ_HANDLED;
147}
148
149static irqreturn_t sh_rtc_periodic(int irq, void *dev_id)
101{ 150{
102 struct sh_rtc *rtc = dev_get_drvdata(id); 151 struct platform_device *pdev = to_platform_device(dev_id);
152 struct sh_rtc *rtc = platform_get_drvdata(pdev);
103 153
104 spin_lock(&rtc->lock); 154 spin_lock(&rtc->lock);
105 155
@@ -139,10 +189,11 @@ static inline void sh_rtc_setaie(struct device *dev, unsigned int enable)
139 189
140 tmp = readb(rtc->regbase + RCR1); 190 tmp = readb(rtc->regbase + RCR1);
141 191
142 if (enable) 192 if (!enable) {
143 tmp |= RCR1_AIE;
144 else
145 tmp &= ~RCR1_AIE; 193 tmp &= ~RCR1_AIE;
194 rtc->rearm_aie = 0;
195 } else if (rtc->rearm_aie == 0)
196 tmp |= RCR1_AIE;
146 197
147 writeb(tmp, rtc->regbase + RCR1); 198 writeb(tmp, rtc->regbase + RCR1);
148 199
@@ -177,7 +228,7 @@ static int sh_rtc_open(struct device *dev)
177 goto err_bad_carry; 228 goto err_bad_carry;
178 } 229 }
179 230
180 ret = request_irq(rtc->alarm_irq, sh_rtc_interrupt, IRQF_DISABLED, 231 ret = request_irq(rtc->alarm_irq, sh_rtc_alarm, IRQF_DISABLED,
181 "sh-rtc alarm", dev); 232 "sh-rtc alarm", dev);
182 if (unlikely(ret)) { 233 if (unlikely(ret)) {
183 dev_err(dev, "request alarm IRQ failed with %d, IRQ %d\n", 234 dev_err(dev, "request alarm IRQ failed with %d, IRQ %d\n",
@@ -200,6 +251,7 @@ static void sh_rtc_release(struct device *dev)
200 struct sh_rtc *rtc = dev_get_drvdata(dev); 251 struct sh_rtc *rtc = dev_get_drvdata(dev);
201 252
202 sh_rtc_setpie(dev, 0); 253 sh_rtc_setpie(dev, 0);
254 sh_rtc_setaie(dev, 0);
203 255
204 free_irq(rtc->periodic_irq, dev); 256 free_irq(rtc->periodic_irq, dev);
205 free_irq(rtc->carry_irq, dev); 257 free_irq(rtc->carry_irq, dev);
@@ -267,7 +319,7 @@ static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
267 tm->tm_hour = BCD2BIN(readb(rtc->regbase + RHRCNT)); 319 tm->tm_hour = BCD2BIN(readb(rtc->regbase + RHRCNT));
268 tm->tm_wday = BCD2BIN(readb(rtc->regbase + RWKCNT)); 320 tm->tm_wday = BCD2BIN(readb(rtc->regbase + RWKCNT));
269 tm->tm_mday = BCD2BIN(readb(rtc->regbase + RDAYCNT)); 321 tm->tm_mday = BCD2BIN(readb(rtc->regbase + RDAYCNT));
270 tm->tm_mon = BCD2BIN(readb(rtc->regbase + RMONCNT)); 322 tm->tm_mon = BCD2BIN(readb(rtc->regbase + RMONCNT)) - 1;
271 323
272#if defined(CONFIG_CPU_SH4) 324#if defined(CONFIG_CPU_SH4)
273 yr = readw(rtc->regbase + RYRCNT); 325 yr = readw(rtc->regbase + RYRCNT);
@@ -295,7 +347,7 @@ static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
295 "mday=%d, mon=%d, year=%d, wday=%d\n", 347 "mday=%d, mon=%d, year=%d, wday=%d\n",
296 __FUNCTION__, 348 __FUNCTION__,
297 tm->tm_sec, tm->tm_min, tm->tm_hour, 349 tm->tm_sec, tm->tm_min, tm->tm_hour,
298 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); 350 tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday);
299 351
300 if (rtc_valid_tm(tm) < 0) 352 if (rtc_valid_tm(tm) < 0)
301 dev_err(dev, "invalid date\n"); 353 dev_err(dev, "invalid date\n");
@@ -322,7 +374,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
322 writeb(BIN2BCD(tm->tm_hour), rtc->regbase + RHRCNT); 374 writeb(BIN2BCD(tm->tm_hour), rtc->regbase + RHRCNT);
323 writeb(BIN2BCD(tm->tm_wday), rtc->regbase + RWKCNT); 375 writeb(BIN2BCD(tm->tm_wday), rtc->regbase + RWKCNT);
324 writeb(BIN2BCD(tm->tm_mday), rtc->regbase + RDAYCNT); 376 writeb(BIN2BCD(tm->tm_mday), rtc->regbase + RDAYCNT);
325 writeb(BIN2BCD(tm->tm_mon), rtc->regbase + RMONCNT); 377 writeb(BIN2BCD(tm->tm_mon + 1), rtc->regbase + RMONCNT);
326 378
327#ifdef CONFIG_CPU_SH3 379#ifdef CONFIG_CPU_SH3
328 year = tm->tm_year % 100; 380 year = tm->tm_year % 100;
@@ -344,12 +396,136 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
344 return 0; 396 return 0;
345} 397}
346 398
399static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
400{
401 unsigned int byte;
402 int value = 0xff; /* return 0xff for ignored values */
403
404 byte = readb(rtc->regbase + reg_off);
405 if (byte & AR_ENB) {
406 byte &= ~AR_ENB; /* strip the enable bit */
407 value = BCD2BIN(byte);
408 }
409
410 return value;
411}
412
413static int sh_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
414{
415 struct platform_device *pdev = to_platform_device(dev);
416 struct sh_rtc *rtc = platform_get_drvdata(pdev);
417 struct rtc_time* tm = &wkalrm->time;
418
419 spin_lock_irq(&rtc->lock);
420
421 tm->tm_sec = sh_rtc_read_alarm_value(rtc, RSECAR);
422 tm->tm_min = sh_rtc_read_alarm_value(rtc, RMINAR);
423 tm->tm_hour = sh_rtc_read_alarm_value(rtc, RHRAR);
424 tm->tm_wday = sh_rtc_read_alarm_value(rtc, RWKAR);
425 tm->tm_mday = sh_rtc_read_alarm_value(rtc, RDAYAR);
426 tm->tm_mon = sh_rtc_read_alarm_value(rtc, RMONAR);
427 if (tm->tm_mon > 0)
428 tm->tm_mon -= 1; /* RTC is 1-12, tm_mon is 0-11 */
429 tm->tm_year = 0xffff;
430
431 spin_unlock_irq(&rtc->lock);
432
433 return 0;
434}
435
436static inline void sh_rtc_write_alarm_value(struct sh_rtc *rtc,
437 int value, int reg_off)
438{
439 /* < 0 for a value that is ignored */
440 if (value < 0)
441 writeb(0, rtc->regbase + reg_off);
442 else
443 writeb(BIN2BCD(value) | AR_ENB, rtc->regbase + reg_off);
444}
445
446static int sh_rtc_check_alarm(struct rtc_time* tm)
447{
448 /*
449 * The original rtc says anything > 0xc0 is "don't care" or "match
450 * all" - most users use 0xff but rtc-dev uses -1 for the same thing.
451 * The original rtc doesn't support years - some things use -1 and
452 * some 0xffff. We use -1 to make out tests easier.
453 */
454 if (tm->tm_year == 0xffff)
455 tm->tm_year = -1;
456 if (tm->tm_mon >= 0xff)
457 tm->tm_mon = -1;
458 if (tm->tm_mday >= 0xff)
459 tm->tm_mday = -1;
460 if (tm->tm_wday >= 0xff)
461 tm->tm_wday = -1;
462 if (tm->tm_hour >= 0xff)
463 tm->tm_hour = -1;
464 if (tm->tm_min >= 0xff)
465 tm->tm_min = -1;
466 if (tm->tm_sec >= 0xff)
467 tm->tm_sec = -1;
468
469 if (tm->tm_year > 9999 ||
470 tm->tm_mon >= 12 ||
471 tm->tm_mday == 0 || tm->tm_mday >= 32 ||
472 tm->tm_wday >= 7 ||
473 tm->tm_hour >= 24 ||
474 tm->tm_min >= 60 ||
475 tm->tm_sec >= 60)
476 return -EINVAL;
477
478 return 0;
479}
480
481static int sh_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
482{
483 struct platform_device *pdev = to_platform_device(dev);
484 struct sh_rtc *rtc = platform_get_drvdata(pdev);
485 unsigned int rcr1;
486 struct rtc_time *tm = &wkalrm->time;
487 int mon, err;
488
489 err = sh_rtc_check_alarm(tm);
490 if (unlikely(err < 0))
491 return err;
492
493 spin_lock_irq(&rtc->lock);
494
495 /* disable alarm interrupt and clear flag */
496 rcr1 = readb(rtc->regbase + RCR1);
497 rcr1 &= ~RCR1_AF;
498 writeb(rcr1 & ~RCR1_AIE, rtc->regbase + RCR1);
499
500 rtc->rearm_aie = 0;
501
502 /* set alarm time */
503 sh_rtc_write_alarm_value(rtc, tm->tm_sec, RSECAR);
504 sh_rtc_write_alarm_value(rtc, tm->tm_min, RMINAR);
505 sh_rtc_write_alarm_value(rtc, tm->tm_hour, RHRAR);
506 sh_rtc_write_alarm_value(rtc, tm->tm_wday, RWKAR);
507 sh_rtc_write_alarm_value(rtc, tm->tm_mday, RDAYAR);
508 mon = tm->tm_mon;
509 if (mon >= 0)
510 mon += 1;
511 sh_rtc_write_alarm_value(rtc, mon, RMONAR);
512
513 /* Restore interrupt activation status */
514 writeb(rcr1, rtc->regbase + RCR1);
515
516 spin_unlock_irq(&rtc->lock);
517
518 return 0;
519}
520
347static struct rtc_class_ops sh_rtc_ops = { 521static struct rtc_class_ops sh_rtc_ops = {
348 .open = sh_rtc_open, 522 .open = sh_rtc_open,
349 .release = sh_rtc_release, 523 .release = sh_rtc_release,
350 .ioctl = sh_rtc_ioctl, 524 .ioctl = sh_rtc_ioctl,
351 .read_time = sh_rtc_read_time, 525 .read_time = sh_rtc_read_time,
352 .set_time = sh_rtc_set_time, 526 .set_time = sh_rtc_set_time,
527 .read_alarm = sh_rtc_read_alarm,
528 .set_alarm = sh_rtc_set_alarm,
353 .proc = sh_rtc_proc, 529 .proc = sh_rtc_proc,
354}; 530};
355 531
@@ -442,7 +618,7 @@ static int __devexit sh_rtc_remove(struct platform_device *pdev)
442} 618}
443static struct platform_driver sh_rtc_platform_driver = { 619static struct platform_driver sh_rtc_platform_driver = {
444 .driver = { 620 .driver = {
445 .name = "sh-rtc", 621 .name = DRV_NAME,
446 .owner = THIS_MODULE, 622 .owner = THIS_MODULE,
447 }, 623 },
448 .probe = sh_rtc_probe, 624 .probe = sh_rtc_probe,
@@ -463,5 +639,6 @@ module_init(sh_rtc_init);
463module_exit(sh_rtc_exit); 639module_exit(sh_rtc_exit);
464 640
465MODULE_DESCRIPTION("SuperH on-chip RTC driver"); 641MODULE_DESCRIPTION("SuperH on-chip RTC driver");
466MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); 642MODULE_VERSION(DRV_VERSION);
643MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, Jamie Lenehan <lenehan@twibble.org>");
467MODULE_LICENSE("GPL"); 644MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index 625637b84d33..9418a59fb368 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -83,7 +83,7 @@ static int __devinit rtc_sysfs_add_device(struct class_device *class_dev,
83{ 83{
84 int err; 84 int err;
85 85
86 dev_info(class_dev->dev, "rtc intf: sysfs\n"); 86 dev_dbg(class_dev->dev, "rtc intf: sysfs\n");
87 87
88 err = sysfs_create_group(&class_dev->kobj, &rtc_attr_group); 88 err = sysfs_create_group(&class_dev->kobj, &rtc_attr_group);
89 if (err) 89 if (err)
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index 9a67487d086b..019ae255b0c8 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -372,7 +372,7 @@ static int x1205_validate_client(struct i2c_client *client)
372 }; 372 };
373 373
374 if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) { 374 if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) {
375 dev_err(&client->adapter->dev, 375 dev_err(&client->dev,
376 "%s: could not read register %x\n", 376 "%s: could not read register %x\n",
377 __FUNCTION__, probe_zero_pattern[i]); 377 __FUNCTION__, probe_zero_pattern[i]);
378 378
@@ -380,7 +380,7 @@ static int x1205_validate_client(struct i2c_client *client)
380 } 380 }
381 381
382 if ((buf & probe_zero_pattern[i+1]) != 0) { 382 if ((buf & probe_zero_pattern[i+1]) != 0) {
383 dev_err(&client->adapter->dev, 383 dev_err(&client->dev,
384 "%s: register=%02x, zero pattern=%d, value=%x\n", 384 "%s: register=%02x, zero pattern=%d, value=%x\n",
385 __FUNCTION__, probe_zero_pattern[i], i, buf); 385 __FUNCTION__, probe_zero_pattern[i], i, buf);
386 386
@@ -400,7 +400,7 @@ static int x1205_validate_client(struct i2c_client *client)
400 }; 400 };
401 401
402 if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) { 402 if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) {
403 dev_err(&client->adapter->dev, 403 dev_err(&client->dev,
404 "%s: could not read register %x\n", 404 "%s: could not read register %x\n",
405 __FUNCTION__, probe_limits_pattern[i].reg); 405 __FUNCTION__, probe_limits_pattern[i].reg);
406 406
@@ -411,7 +411,7 @@ static int x1205_validate_client(struct i2c_client *client)
411 411
412 if (value > probe_limits_pattern[i].max || 412 if (value > probe_limits_pattern[i].max ||
413 value < probe_limits_pattern[i].min) { 413 value < probe_limits_pattern[i].min) {
414 dev_dbg(&client->adapter->dev, 414 dev_dbg(&client->dev,
415 "%s: register=%x, lim pattern=%d, value=%d\n", 415 "%s: register=%x, lim pattern=%d, value=%d\n",
416 __FUNCTION__, probe_limits_pattern[i].reg, 416 __FUNCTION__, probe_limits_pattern[i].reg,
417 i, value); 417 i, value);
@@ -506,7 +506,7 @@ static int x1205_probe(struct i2c_adapter *adapter, int address, int kind)
506 struct i2c_client *client; 506 struct i2c_client *client;
507 struct rtc_device *rtc; 507 struct rtc_device *rtc;
508 508
509 dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); 509 dev_dbg(adapter->class_dev.dev, "%s\n", __FUNCTION__);
510 510
511 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) { 511 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
512 err = -ENODEV; 512 err = -ENODEV;
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index c9321b920e90..25b5d7a66417 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -688,7 +688,7 @@ raw3215_probe (struct ccw_device *cdev)
688 raw->cdev = cdev; 688 raw->cdev = cdev;
689 raw->inbuf = (char *) raw + sizeof(struct raw3215_info); 689 raw->inbuf = (char *) raw + sizeof(struct raw3215_info);
690 memset(raw, 0, sizeof(struct raw3215_info)); 690 memset(raw, 0, sizeof(struct raw3215_info));
691 raw->buffer = (char *) kmalloc(RAW3215_BUFFER_SIZE, 691 raw->buffer = kmalloc(RAW3215_BUFFER_SIZE,
692 GFP_KERNEL|GFP_DMA); 692 GFP_KERNEL|GFP_DMA);
693 if (raw->buffer == NULL) { 693 if (raw->buffer == NULL) {
694 spin_lock(&raw3215_device_lock); 694 spin_lock(&raw3215_device_lock);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index e3491a5f5219..3e86fd1756e5 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -377,7 +377,7 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
377 if (!(key_map = kbd->key_maps[tmp.kb_table])) { 377 if (!(key_map = kbd->key_maps[tmp.kb_table])) {
378 int j; 378 int j;
379 379
380 key_map = (ushort *) kmalloc(sizeof(plain_map), 380 key_map = kmalloc(sizeof(plain_map),
381 GFP_KERNEL); 381 GFP_KERNEL);
382 if (!key_map) 382 if (!key_map)
383 return -ENOMEM; 383 return -ENOMEM;
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 732dfbdb85c4..f7c10d954ec6 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -127,7 +127,7 @@ cpi_prepare_req(void)
127 struct cpi_sccb *sccb; 127 struct cpi_sccb *sccb;
128 struct cpi_evbuf *evb; 128 struct cpi_evbuf *evb;
129 129
130 req = (struct sclp_req *) kmalloc(sizeof(struct sclp_req), GFP_KERNEL); 130 req = kmalloc(sizeof(struct sclp_req), GFP_KERNEL);
131 if (req == NULL) 131 if (req == NULL)
132 return ERR_PTR(-ENOMEM); 132 return ERR_PTR(-ENOMEM);
133 sccb = (struct cpi_sccb *) __get_free_page(GFP_KERNEL | GFP_DMA); 133 sccb = (struct cpi_sccb *) __get_free_page(GFP_KERNEL | GFP_DMA);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index a62b00083d0c..5bb13a9d0898 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -295,7 +295,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
295 struct completion work; 295 struct completion work;
296 int rc; 296 int rc;
297 297
298 ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 298 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
299 if (!ap_msg.message) 299 if (!ap_msg.message)
300 return -ENOMEM; 300 return -ENOMEM;
301 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 301 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -337,7 +337,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
337 struct completion work; 337 struct completion work;
338 int rc; 338 int rc;
339 339
340 ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 340 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
341 if (!ap_msg.message) 341 if (!ap_msg.message)
342 return -ENOMEM; 342 return -ENOMEM;
343 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 343 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index b6a4ecdc8025..32e37014345c 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -279,7 +279,7 @@ static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev,
279 struct completion work; 279 struct completion work;
280 int rc; 280 int rc;
281 281
282 ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 282 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
283 if (!ap_msg.message) 283 if (!ap_msg.message)
284 return -ENOMEM; 284 return -ENOMEM;
285 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 285 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -321,7 +321,7 @@ static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev,
321 struct completion work; 321 struct completion work;
322 int rc; 322 int rc;
323 323
324 ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 324 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
325 if (!ap_msg.message) 325 if (!ap_msg.message)
326 return -ENOMEM; 326 return -ENOMEM;
327 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 327 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 2da8b9381407..b7153c1e15cd 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -717,7 +717,7 @@ long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB)
717 }; 717 };
718 int rc; 718 int rc;
719 719
720 ap_msg.message = (void *) kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL); 720 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
721 if (!ap_msg.message) 721 if (!ap_msg.message)
722 return -ENOMEM; 722 return -ENOMEM;
723 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 723 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 3257c22dd79c..03cc263fe0da 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -1646,7 +1646,7 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
1646 return -1; 1646 return -1;
1647 } 1647 }
1648 memset(ch, 0, sizeof (struct channel)); 1648 memset(ch, 0, sizeof (struct channel));
1649 if ((ch->ccw = (struct ccw1 *) kmalloc(8*sizeof(struct ccw1), 1649 if ((ch->ccw = kmalloc(8*sizeof(struct ccw1),
1650 GFP_KERNEL | GFP_DMA)) == NULL) { 1650 GFP_KERNEL | GFP_DMA)) == NULL) {
1651 kfree(ch); 1651 kfree(ch);
1652 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1652 ctc_pr_warn("ctc: Out of memory in add_channel\n");
@@ -1693,7 +1693,7 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
1693 return -1; 1693 return -1;
1694 } 1694 }
1695 fsm_newstate(ch->fsm, CH_STATE_IDLE); 1695 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1696 if ((ch->irb = (struct irb *) kmalloc(sizeof (struct irb), 1696 if ((ch->irb = kmalloc(sizeof (struct irb),
1697 GFP_KERNEL)) == NULL) { 1697 GFP_KERNEL)) == NULL) {
1698 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1698 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1699 kfree_fsm(ch->fsm); 1699 kfree_fsm(ch->fsm);
@@ -2535,7 +2535,7 @@ ctc_print_statistics(struct ctc_priv *priv)
2535 DBF_TEXT(trace, 4, __FUNCTION__); 2535 DBF_TEXT(trace, 4, __FUNCTION__);
2536 if (!priv) 2536 if (!priv)
2537 return; 2537 return;
2538 sbuf = (char *)kmalloc(2048, GFP_KERNEL); 2538 sbuf = kmalloc(2048, GFP_KERNEL);
2539 if (sbuf == NULL) 2539 if (sbuf == NULL)
2540 return; 2540 return;
2541 p = sbuf; 2541 p = sbuf;
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
index 1476ce2b437c..229aeb5fc399 100644
--- a/drivers/s390/net/iucv.c
+++ b/drivers/s390/net/iucv.c
@@ -772,7 +772,7 @@ iucv_register_program (__u8 pgmname[16],
772 } 772 }
773 773
774 /* Allocate handler entry */ 774 /* Allocate handler entry */
775 new_handler = (handler *)kmalloc(sizeof(handler), GFP_ATOMIC); 775 new_handler = kmalloc(sizeof(handler), GFP_ATOMIC);
776 if (new_handler == NULL) { 776 if (new_handler == NULL) {
777 printk(KERN_WARNING "%s: storage allocation for new handler " 777 printk(KERN_WARNING "%s: storage allocation for new handler "
778 "failed.\n", __FUNCTION__); 778 "failed.\n", __FUNCTION__);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 5d39b2df0cc4..85093b71f9fa 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -237,7 +237,7 @@ zfcp_device_setup(char *devstr)
237 return 0; 237 return 0;
238 238
239 len = strlen(devstr) + 1; 239 len = strlen(devstr) + 1;
240 str = (char *) kmalloc(len, GFP_KERNEL); 240 str = kmalloc(len, GFP_KERNEL);
241 if (!str) 241 if (!str)
242 goto err_out; 242 goto err_out;
243 memcpy(str, devstr, len); 243 memcpy(str, devstr, len);
diff --git a/drivers/sbus/char/vfc_dev.c b/drivers/sbus/char/vfc_dev.c
index 2722af5d3404..386e7de0b7e3 100644
--- a/drivers/sbus/char/vfc_dev.c
+++ b/drivers/sbus/char/vfc_dev.c
@@ -659,7 +659,7 @@ static int vfc_probe(void)
659 if (!cards) 659 if (!cards)
660 return -ENODEV; 660 return -ENODEV;
661 661
662 vfc_dev_lst = (struct vfc_dev **)kmalloc(sizeof(struct vfc_dev *) * 662 vfc_dev_lst = kmalloc(sizeof(struct vfc_dev *) *
663 (cards+1), 663 (cards+1),
664 GFP_KERNEL); 664 GFP_KERNEL);
665 if (vfc_dev_lst == NULL) 665 if (vfc_dev_lst == NULL)
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index ac108f9e2674..426cd6f49f5d 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -288,7 +288,7 @@ int aac_get_containers(struct aac_dev *dev)
288 288
289 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 289 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
290 maximum_num_containers = MAXIMUM_NUM_CONTAINERS; 290 maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
291 fsa_dev_ptr = (struct fsa_dev_info *) kmalloc( 291 fsa_dev_ptr = kmalloc(
292 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL); 292 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL);
293 if (!fsa_dev_ptr) { 293 if (!fsa_dev_ptr) {
294 aac_fib_free(fibptr); 294 aac_fib_free(fibptr);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index d5cf8b91a0e7..6d305b2f854e 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -386,7 +386,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
386 * Ok now init the communication subsystem 386 * Ok now init the communication subsystem
387 */ 387 */
388 388
389 dev->queues = (struct aac_queue_block *) kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL); 389 dev->queues = kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
390 if (dev->queues == NULL) { 390 if (dev->queues == NULL) {
391 printk(KERN_ERR "Error could not allocate comm region.\n"); 391 printk(KERN_ERR "Error could not allocate comm region.\n");
392 return NULL; 392 return NULL;
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index d7a61a6bdaae..1d239f6c0103 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -699,7 +699,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
699#endif 699#endif
700 int i; 700 int i;
701 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */ 701 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
702 SCpnt->host_scribble = (unsigned char *) kmalloc(512, GFP_KERNEL | GFP_DMA); 702 SCpnt->host_scribble = kmalloc(512, GFP_KERNEL | GFP_DMA);
703 sgpnt = (struct scatterlist *) SCpnt->request_buffer; 703 sgpnt = (struct scatterlist *) SCpnt->request_buffer;
704 cptr = (struct chain *) SCpnt->host_scribble; 704 cptr = (struct chain *) SCpnt->host_scribble;
705 if (cptr == NULL) { 705 if (cptr == NULL) {
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index 46eed10b25d9..7d1fec620948 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -2565,7 +2565,7 @@ aic7xxx_allocate_scb(struct aic7xxx_host *p)
2565 } 2565 }
2566 } 2566 }
2567 scb_count = min( (i-1), p->scb_data->maxscbs - p->scb_data->numscbs); 2567 scb_count = min( (i-1), p->scb_data->maxscbs - p->scb_data->numscbs);
2568 scb_ap = (struct aic7xxx_scb *)kmalloc(sizeof (struct aic7xxx_scb) * scb_count 2568 scb_ap = kmalloc(sizeof (struct aic7xxx_scb) * scb_count
2569 + sizeof(struct aic7xxx_scb_dma), GFP_ATOMIC); 2569 + sizeof(struct aic7xxx_scb_dma), GFP_ATOMIC);
2570 if (scb_ap == NULL) 2570 if (scb_ap == NULL)
2571 return(0); 2571 return(0);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index e95b367d09ed..a965ed3548d5 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4319,7 +4319,7 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
4319 4319
4320 dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages); 4320 dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
4321 while (pages--) { 4321 while (pages--) {
4322 ptr = (struct SGentry *)kmalloc(PAGE_SIZE, GFP_KERNEL); 4322 ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
4323 if (!ptr) { 4323 if (!ptr) {
4324 adapter_sg_tables_free(acb); 4324 adapter_sg_tables_free(acb);
4325 return 1; 4325 return 1;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 60b1b434eba7..365db537a28d 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -297,7 +297,7 @@ static void adpt_inquiry(adpt_hba* pHba)
297 s32 rcode; 297 s32 rcode;
298 298
299 memset(msg, 0, sizeof(msg)); 299 memset(msg, 0, sizeof(msg));
300 buf = (u8*)kmalloc(80,GFP_KERNEL|ADDR32); 300 buf = kmalloc(80,GFP_KERNEL|ADDR32);
301 if(!buf){ 301 if(!buf){
302 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name); 302 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
303 return; 303 return;
@@ -1311,7 +1311,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1311 schedule_timeout_uninterruptible(1); 1311 schedule_timeout_uninterruptible(1);
1312 } while (m == EMPTY_QUEUE); 1312 } while (m == EMPTY_QUEUE);
1313 1313
1314 status = (u8*)kmalloc(4, GFP_KERNEL|ADDR32); 1314 status = kmalloc(4, GFP_KERNEL|ADDR32);
1315 if(status == NULL) { 1315 if(status == NULL) {
1316 adpt_send_nop(pHba, m); 1316 adpt_send_nop(pHba, m);
1317 printk(KERN_ERR"IOP reset failed - no free memory.\n"); 1317 printk(KERN_ERR"IOP reset failed - no free memory.\n");
@@ -1444,7 +1444,7 @@ static int adpt_i2o_parse_lct(adpt_hba* pHba)
1444 } 1444 }
1445 continue; 1445 continue;
1446 } 1446 }
1447 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL); 1447 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1448 if(d==NULL) 1448 if(d==NULL)
1449 { 1449 {
1450 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name); 1450 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
@@ -2425,7 +2425,7 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2425 pDev = pDev->next_lun; 2425 pDev = pDev->next_lun;
2426 } 2426 }
2427 if(!pDev ) { // Something new add it 2427 if(!pDev ) { // Something new add it
2428 d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL); 2428 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
2429 if(d==NULL) 2429 if(d==NULL)
2430 { 2430 {
2431 printk(KERN_CRIT "Out of memory for I2O device data.\n"); 2431 printk(KERN_CRIT "Out of memory for I2O device data.\n");
@@ -2728,7 +2728,7 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2728 2728
2729 kfree(pHba->reply_pool); 2729 kfree(pHba->reply_pool);
2730 2730
2731 pHba->reply_pool = (u32*)kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32); 2731 pHba->reply_pool = kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32);
2732 if(!pHba->reply_pool){ 2732 if(!pHba->reply_pool){
2733 printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name); 2733 printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name);
2734 return -1; 2734 return -1;
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index f160357e37a6..d561663fb4e4 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2828,7 +2828,7 @@ static int i91u_detect(struct scsi_host_template * tpnt)
2828 2828
2829 for (; tul_num_scb >= MAX_TARGETS + 3; tul_num_scb--) { 2829 for (; tul_num_scb >= MAX_TARGETS + 3; tul_num_scb--) {
2830 i = tul_num_ch * tul_num_scb * sizeof(SCB); 2830 i = tul_num_ch * tul_num_scb * sizeof(SCB);
2831 if ((tul_scb = (SCB *) kmalloc(i, GFP_ATOMIC | GFP_DMA)) != NULL) 2831 if ((tul_scb = kmalloc(i, GFP_ATOMIC | GFP_DMA)) != NULL)
2832 break; 2832 break;
2833 } 2833 }
2834 if (tul_scb == NULL) { 2834 if (tul_scb == NULL) {
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 824fe080d1dc..7d2311067903 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -5777,7 +5777,7 @@ static int osst_probe(struct device *dev)
5777 dev_num = i; 5777 dev_num = i;
5778 5778
5779 /* allocate a struct osst_tape for this device */ 5779 /* allocate a struct osst_tape for this device */
5780 tpnt = (struct osst_tape *)kmalloc(sizeof(struct osst_tape), GFP_ATOMIC); 5780 tpnt = kmalloc(sizeof(struct osst_tape), GFP_ATOMIC);
5781 if (tpnt == NULL) { 5781 if (tpnt == NULL) {
5782 write_unlock(&os_scsi_tapes_lock); 5782 write_unlock(&os_scsi_tapes_lock);
5783 printk(KERN_ERR "osst :E: Can't allocate device descriptor, device not attached.\n"); 5783 printk(KERN_ERR "osst :E: Can't allocate device descriptor, device not attached.\n");
diff --git a/drivers/scsi/pluto.c b/drivers/scsi/pluto.c
index aa60a5f1fbc3..3b2e1a53e6e2 100644
--- a/drivers/scsi/pluto.c
+++ b/drivers/scsi/pluto.c
@@ -117,7 +117,7 @@ int __init pluto_detect(struct scsi_host_template *tpnt)
117#endif 117#endif
118 return 0; 118 return 0;
119 } 119 }
120 fcs = (struct ctrl_inquiry *) kmalloc (sizeof (struct ctrl_inquiry) * fcscount, GFP_DMA); 120 fcs = kmalloc(sizeof (struct ctrl_inquiry) * fcscount, GFP_DMA);
121 if (!fcs) { 121 if (!fcs) {
122 printk ("PLUTO: Not enough memory to probe\n"); 122 printk ("PLUTO: Not enough memory to probe\n");
123 return 0; 123 return 0;
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index d1268cb46837..0578ba42718b 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -546,7 +546,7 @@ int sr_is_xa(Scsi_CD *cd)
546 if (!xa_test) 546 if (!xa_test)
547 return 0; 547 return 0;
548 548
549 raw_sector = (unsigned char *) kmalloc(2048, GFP_KERNEL | SR_GFP_DMA(cd)); 549 raw_sector = kmalloc(2048, GFP_KERNEL | SR_GFP_DMA(cd));
550 if (!raw_sector) 550 if (!raw_sector)
551 return -ENOMEM; 551 return -ENOMEM;
552 if (0 == sr_read_sector(cd, cd->ms_offset + 16, 552 if (0 == sr_read_sector(cd, cd->ms_offset + 16,
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index a3e9d0f2eb5b..4eb3da996b36 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -117,7 +117,7 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength)
117 density = (blocklength > 2048) ? 0x81 : 0x83; 117 density = (blocklength > 2048) ? 0x81 : 0x83;
118#endif 118#endif
119 119
120 buffer = (unsigned char *) kmalloc(512, GFP_KERNEL | GFP_DMA); 120 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
121 if (!buffer) 121 if (!buffer)
122 return -ENOMEM; 122 return -ENOMEM;
123 123
@@ -164,7 +164,7 @@ int sr_cd_check(struct cdrom_device_info *cdi)
164 if (cd->cdi.mask & CDC_MULTI_SESSION) 164 if (cd->cdi.mask & CDC_MULTI_SESSION)
165 return 0; 165 return 0;
166 166
167 buffer = (unsigned char *) kmalloc(512, GFP_KERNEL | GFP_DMA); 167 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
168 if (!buffer) 168 if (!buffer)
169 return -ENOMEM; 169 return -ENOMEM;
170 170
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 940fa1e6f994..21cd4c7f5289 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -5545,7 +5545,7 @@ int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram
5545 /* 5545 /*
5546 * Allocate the array of lists of CCBs hashed by DSA. 5546 * Allocate the array of lists of CCBs hashed by DSA.
5547 */ 5547 */
5548 np->ccbh = kcalloc(sizeof(struct sym_ccb **), CCB_HASH_SIZE, GFP_KERNEL); 5548 np->ccbh = kcalloc(CCB_HASH_SIZE, sizeof(struct sym_ccb **), GFP_KERNEL);
5549 if (!np->ccbh) 5549 if (!np->ccbh)
5550 goto attach_failed; 5550 goto attach_failed;
5551 5551
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index fc12d5df10e2..2978c09860ee 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -151,32 +151,6 @@ config SERIAL_8250_MANY_PORTS
151 say N here to save some memory. You can also say Y if you have an 151 say N here to save some memory. You can also say Y if you have an
152 "intelligent" multiport card such as Cyclades, Digiboards, etc. 152 "intelligent" multiport card such as Cyclades, Digiboards, etc.
153 153
154config SERIAL_8250_SHARE_IRQ
155 bool "Support for sharing serial interrupts"
156 depends on SERIAL_8250_EXTENDED
157 help
158 Some serial boards have hardware support which allows multiple dumb
159 serial ports on the same board to share a single IRQ. To enable
160 support for this in the serial driver, say Y here.
161
162config SERIAL_8250_DETECT_IRQ
163 bool "Autodetect IRQ on standard ports (unsafe)"
164 depends on SERIAL_8250_EXTENDED
165 help
166 Say Y here if you want the kernel to try to guess which IRQ
167 to use for your serial port.
168
169 This is considered unsafe; it is far better to configure the IRQ in
170 a boot script using the setserial command.
171
172 If unsure, say N.
173
174config SERIAL_8250_RSA
175 bool "Support RSA serial ports"
176 depends on SERIAL_8250_EXTENDED
177 help
178 ::: To be written :::
179
180# 154#
181# Multi-port serial cards 155# Multi-port serial cards
182# 156#
@@ -199,7 +173,6 @@ config SERIAL_8250_ACCENT
199 To compile this driver as a module, choose M here: the module 173 To compile this driver as a module, choose M here: the module
200 will be called 8250_accent. 174 will be called 8250_accent.
201 175
202
203config SERIAL_8250_BOCA 176config SERIAL_8250_BOCA
204 tristate "Support Boca cards" 177 tristate "Support Boca cards"
205 depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS 178 depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
@@ -230,6 +203,32 @@ config SERIAL_8250_HUB6
230 To compile this driver as a module, choose M here: the module 203 To compile this driver as a module, choose M here: the module
231 will be called 8250_hub6. 204 will be called 8250_hub6.
232 205
206config SERIAL_8250_SHARE_IRQ
207 bool "Support for sharing serial interrupts"
208 depends on SERIAL_8250_EXTENDED
209 help
210 Some serial boards have hardware support which allows multiple dumb
211 serial ports on the same board to share a single IRQ. To enable
212 support for this in the serial driver, say Y here.
213
214config SERIAL_8250_DETECT_IRQ
215 bool "Autodetect IRQ on standard ports (unsafe)"
216 depends on SERIAL_8250_EXTENDED
217 help
218 Say Y here if you want the kernel to try to guess which IRQ
219 to use for your serial port.
220
221 This is considered unsafe; it is far better to configure the IRQ in
222 a boot script using the setserial command.
223
224 If unsure, say N.
225
226config SERIAL_8250_RSA
227 bool "Support RSA serial ports"
228 depends on SERIAL_8250_EXTENDED
229 help
230 ::: To be written :::
231
233config SERIAL_8250_MCA 232config SERIAL_8250_MCA
234 tristate "Support 8250-type ports on MCA buses" 233 tristate "Support 8250-type ports on MCA buses"
235 depends on SERIAL_8250 != n && MCA 234 depends on SERIAL_8250 != n && MCA
@@ -664,7 +663,7 @@ config V850E_UART
664 663
665config V850E_UARTB 664config V850E_UARTB
666 bool 665 bool
667 depends V850E_UART && V850E_ME2 666 depends on V850E_UART && V850E_ME2
668 default y 667 default y
669 668
670config V850E_UART_CONSOLE 669config V850E_UART_CONSOLE
@@ -910,7 +909,7 @@ config SERIAL_M32R_PLDSIO
910 909
911config SERIAL_TXX9 910config SERIAL_TXX9
912 bool "TMPTX39XX/49XX SIO support" 911 bool "TMPTX39XX/49XX SIO support"
913 depends HAS_TXX9_SERIAL 912 depends on HAS_TXX9_SERIAL
914 select SERIAL_CORE 913 select SERIAL_CORE
915 default y 914 default y
916 915
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 7d623003e65e..71e6a24d8c28 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -1510,7 +1510,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1510 } 1510 }
1511 1511
1512 if ( (retval = pci_request_regions(dev, "icom"))) { 1512 if ( (retval = pci_request_regions(dev, "icom"))) {
1513 dev_err(&dev->dev, "pci_request_region FAILED\n"); 1513 dev_err(&dev->dev, "pci_request_regions FAILED\n");
1514 pci_disable_device(dev); 1514 pci_disable_device(dev);
1515 return retval; 1515 return retval;
1516 } 1516 }
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 9031b57f12dd..c53b69610a51 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -319,6 +319,28 @@ static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag)
319 319
320 sci_out(port, SCFCR, fcr_val); 320 sci_out(port, SCFCR, fcr_val);
321} 321}
322#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
323static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag)
324{
325 unsigned int fcr_val = 0;
326
327 if (cflag & CRTSCTS) {
328 fcr_val |= SCFCR_MCE;
329
330 ctrl_outw(0x0000, PORT_PSCR);
331 } else {
332 unsigned short data;
333
334 data = ctrl_inw(PORT_PSCR);
335 data &= 0x033f;
336 data |= 0x0400;
337 ctrl_outw(data, PORT_PSCR);
338
339 ctrl_outw(ctrl_inw(SCSPTR0) & 0x17, SCSPTR0);
340 }
341
342 sci_out(port, SCFCR, fcr_val);
343}
322#else 344#else
323/* For SH7750 */ 345/* For SH7750 */
324static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) 346static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag)
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index e4557cc4f74b..77f7d6351ab1 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -90,6 +90,13 @@
90# define SCSPTR3 0xffe30010 /* 16 bit SCIF */ 90# define SCSPTR3 0xffe30010 /* 16 bit SCIF */
91# define SCSCR_INIT(port) 0x32 /* TIE=0,RIE=0,TE=1,RE=1,REIE=0,CKE=1 */ 91# define SCSCR_INIT(port) 0x32 /* TIE=0,RIE=0,TE=1,RE=1,REIE=0,CKE=1 */
92# define SCIF_ONLY 92# define SCIF_ONLY
93#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
94# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */
95# define SCSPTR0 SCPDR0
96# define SCIF_ORER 0x0001 /* overrun error bit */
97# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
98# define SCIF_ONLY
99# define PORT_PSCR 0xA405011E
93#elif defined(CONFIG_CPU_SUBTYPE_SH4_202) 100#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
94# define SCSPTR2 0xffe80020 /* 16 bit SCIF */ 101# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
95# define SCIF_ORER 0x0001 /* overrun error bit */ 102# define SCIF_ORER 0x0001 /* overrun error bit */
@@ -495,6 +502,7 @@ static inline int sci_rxd_in(struct uart_port *port)
495 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ 502 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
496 if (port->mapbase == 0xfe620000) 503 if (port->mapbase == 0xfe620000)
497 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ 504 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
505 return 1;
498} 506}
499#elif defined(CONFIG_CPU_SUBTYPE_SH7300) 507#elif defined(CONFIG_CPU_SUBTYPE_SH7300)
500static inline int sci_rxd_in(struct uart_port *port) 508static inline int sci_rxd_in(struct uart_port *port)
@@ -521,6 +529,13 @@ static inline int sci_rxd_in(struct uart_port *port)
521 return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ 529 return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
522 return 1; 530 return 1;
523} 531}
532#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
533static inline int sci_rxd_in(struct uart_port *port)
534{
535 if (port->mapbase == 0xffe00000)
536 return ctrl_inb(SCPDR0) & 0x0001 ? 1 : 0; /* SCIF0 */
537 return 1;
538}
524#elif defined(CONFIG_CPU_SUBTYPE_ST40STB1) 539#elif defined(CONFIG_CPU_SUBTYPE_ST40STB1)
525static inline int sci_rxd_in(struct uart_port *port) 540static inline int sci_rxd_in(struct uart_port *port)
526{ 541{
@@ -550,6 +565,7 @@ static inline int sci_rxd_in(struct uart_port *port)
550 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ 565 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
551 if (port->mapbase == 0xff925000) 566 if (port->mapbase == 0xff925000)
552 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ 567 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
568 return 1;
553} 569}
554#elif defined(CONFIG_CPU_SUBTYPE_SH7780) 570#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
555static inline int sci_rxd_in(struct uart_port *port) 571static inline int sci_rxd_in(struct uart_port *port)
@@ -558,6 +574,7 @@ static inline int sci_rxd_in(struct uart_port *port)
558 return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ 574 return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
559 if (port->mapbase == 0xffe10000) 575 if (port->mapbase == 0xffe10000)
560 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ 576 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
577 return 1;
561} 578}
562#elif defined(CONFIG_CPU_SUBTYPE_SH7206) 579#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
563static inline int sci_rxd_in(struct uart_port *port) 580static inline int sci_rxd_in(struct uart_port *port)
@@ -570,6 +587,7 @@ static inline int sci_rxd_in(struct uart_port *port)
570 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ 587 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
571 if (port->mapbase == 0xfffe9800) 588 if (port->mapbase == 0xfffe9800)
572 return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ 589 return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
590 return 1;
573} 591}
574#elif defined(CONFIG_CPU_SUBTYPE_SH7619) 592#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
575static inline int sci_rxd_in(struct uart_port *port) 593static inline int sci_rxd_in(struct uart_port *port)
@@ -580,6 +598,7 @@ static inline int sci_rxd_in(struct uart_port *port)
580 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ 598 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
581 if (port->mapbase == 0xf8420000) 599 if (port->mapbase == 0xf8420000)
582 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ 600 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
601 return 1;
583} 602}
584#endif 603#endif
585 604
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 494d9b856488..6ed3f1da9296 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -49,6 +49,14 @@ MODULE_LICENSE("GPL");
49#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) 49#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
50#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0) 50#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
51 51
52/* for testing SSCR1 changes that require SSP restart, basically
53 * everything except the service and interrupt enables */
54#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_EBCEI | SSCR1_SCFR \
55 | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
56 | SSCR1_RWOT | SSCR1_TRAIL | SSCR1_PINTE \
57 | SSCR1_STRF | SSCR1_EFWR |SSCR1_RFT \
58 | SSCR1_TFT | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
59
52#define DEFINE_SSP_REG(reg, off) \ 60#define DEFINE_SSP_REG(reg, off) \
53static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \ 61static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
54static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); } 62static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
@@ -123,8 +131,8 @@ struct driver_data {
123 u8 n_bytes; 131 u8 n_bytes;
124 u32 dma_width; 132 u32 dma_width;
125 int cs_change; 133 int cs_change;
126 void (*write)(struct driver_data *drv_data); 134 int (*write)(struct driver_data *drv_data);
127 void (*read)(struct driver_data *drv_data); 135 int (*read)(struct driver_data *drv_data);
128 irqreturn_t (*transfer_handler)(struct driver_data *drv_data); 136 irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
129 void (*cs_control)(u32 command); 137 void (*cs_control)(u32 command);
130}; 138};
@@ -132,7 +140,6 @@ struct driver_data {
132struct chip_data { 140struct chip_data {
133 u32 cr0; 141 u32 cr0;
134 u32 cr1; 142 u32 cr1;
135 u32 to;
136 u32 psp; 143 u32 psp;
137 u32 timeout; 144 u32 timeout;
138 u8 n_bytes; 145 u8 n_bytes;
@@ -143,8 +150,8 @@ struct chip_data {
143 u8 enable_dma; 150 u8 enable_dma;
144 u8 bits_per_word; 151 u8 bits_per_word;
145 u32 speed_hz; 152 u32 speed_hz;
146 void (*write)(struct driver_data *drv_data); 153 int (*write)(struct driver_data *drv_data);
147 void (*read)(struct driver_data *drv_data); 154 int (*read)(struct driver_data *drv_data);
148 void (*cs_control)(u32 command); 155 void (*cs_control)(u32 command);
149}; 156};
150 157
@@ -166,114 +173,118 @@ static int flush(struct driver_data *drv_data)
166 return limit; 173 return limit;
167} 174}
168 175
169static void restore_state(struct driver_data *drv_data)
170{
171 void *reg = drv_data->ioaddr;
172
173 /* Clear status and disable clock */
174 write_SSSR(drv_data->clear_sr, reg);
175 write_SSCR0(drv_data->cur_chip->cr0 & ~SSCR0_SSE, reg);
176
177 /* Load the registers */
178 write_SSCR1(drv_data->cur_chip->cr1, reg);
179 write_SSCR0(drv_data->cur_chip->cr0, reg);
180 if (drv_data->ssp_type != PXA25x_SSP) {
181 write_SSTO(0, reg);
182 write_SSPSP(drv_data->cur_chip->psp, reg);
183 }
184}
185
186static void null_cs_control(u32 command) 176static void null_cs_control(u32 command)
187{ 177{
188} 178}
189 179
190static void null_writer(struct driver_data *drv_data) 180static int null_writer(struct driver_data *drv_data)
191{ 181{
192 void *reg = drv_data->ioaddr; 182 void *reg = drv_data->ioaddr;
193 u8 n_bytes = drv_data->n_bytes; 183 u8 n_bytes = drv_data->n_bytes;
194 184
195 while ((read_SSSR(reg) & SSSR_TNF) 185 if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
196 && (drv_data->tx < drv_data->tx_end)) { 186 || (drv_data->tx == drv_data->tx_end))
197 write_SSDR(0, reg); 187 return 0;
198 drv_data->tx += n_bytes; 188
199 } 189 write_SSDR(0, reg);
190 drv_data->tx += n_bytes;
191
192 return 1;
200} 193}
201 194
202static void null_reader(struct driver_data *drv_data) 195static int null_reader(struct driver_data *drv_data)
203{ 196{
204 void *reg = drv_data->ioaddr; 197 void *reg = drv_data->ioaddr;
205 u8 n_bytes = drv_data->n_bytes; 198 u8 n_bytes = drv_data->n_bytes;
206 199
207 while ((read_SSSR(reg) & SSSR_RNE) 200 while ((read_SSSR(reg) & SSSR_RNE)
208 && (drv_data->rx < drv_data->rx_end)) { 201 && (drv_data->rx < drv_data->rx_end)) {
209 read_SSDR(reg); 202 read_SSDR(reg);
210 drv_data->rx += n_bytes; 203 drv_data->rx += n_bytes;
211 } 204 }
205
206 return drv_data->rx == drv_data->rx_end;
212} 207}
213 208
214static void u8_writer(struct driver_data *drv_data) 209static int u8_writer(struct driver_data *drv_data)
215{ 210{
216 void *reg = drv_data->ioaddr; 211 void *reg = drv_data->ioaddr;
217 212
218 while ((read_SSSR(reg) & SSSR_TNF) 213 if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
219 && (drv_data->tx < drv_data->tx_end)) { 214 || (drv_data->tx == drv_data->tx_end))
220 write_SSDR(*(u8 *)(drv_data->tx), reg); 215 return 0;
221 ++drv_data->tx; 216
222 } 217 write_SSDR(*(u8 *)(drv_data->tx), reg);
218 ++drv_data->tx;
219
220 return 1;
223} 221}
224 222
225static void u8_reader(struct driver_data *drv_data) 223static int u8_reader(struct driver_data *drv_data)
226{ 224{
227 void *reg = drv_data->ioaddr; 225 void *reg = drv_data->ioaddr;
228 226
229 while ((read_SSSR(reg) & SSSR_RNE) 227 while ((read_SSSR(reg) & SSSR_RNE)
230 && (drv_data->rx < drv_data->rx_end)) { 228 && (drv_data->rx < drv_data->rx_end)) {
231 *(u8 *)(drv_data->rx) = read_SSDR(reg); 229 *(u8 *)(drv_data->rx) = read_SSDR(reg);
232 ++drv_data->rx; 230 ++drv_data->rx;
233 } 231 }
232
233 return drv_data->rx == drv_data->rx_end;
234} 234}
235 235
236static void u16_writer(struct driver_data *drv_data) 236static int u16_writer(struct driver_data *drv_data)
237{ 237{
238 void *reg = drv_data->ioaddr; 238 void *reg = drv_data->ioaddr;
239 239
240 while ((read_SSSR(reg) & SSSR_TNF) 240 if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
241 && (drv_data->tx < drv_data->tx_end)) { 241 || (drv_data->tx == drv_data->tx_end))
242 write_SSDR(*(u16 *)(drv_data->tx), reg); 242 return 0;
243 drv_data->tx += 2; 243
244 } 244 write_SSDR(*(u16 *)(drv_data->tx), reg);
245 drv_data->tx += 2;
246
247 return 1;
245} 248}
246 249
247static void u16_reader(struct driver_data *drv_data) 250static int u16_reader(struct driver_data *drv_data)
248{ 251{
249 void *reg = drv_data->ioaddr; 252 void *reg = drv_data->ioaddr;
250 253
251 while ((read_SSSR(reg) & SSSR_RNE) 254 while ((read_SSSR(reg) & SSSR_RNE)
252 && (drv_data->rx < drv_data->rx_end)) { 255 && (drv_data->rx < drv_data->rx_end)) {
253 *(u16 *)(drv_data->rx) = read_SSDR(reg); 256 *(u16 *)(drv_data->rx) = read_SSDR(reg);
254 drv_data->rx += 2; 257 drv_data->rx += 2;
255 } 258 }
259
260 return drv_data->rx == drv_data->rx_end;
256} 261}
257static void u32_writer(struct driver_data *drv_data) 262
263static int u32_writer(struct driver_data *drv_data)
258{ 264{
259 void *reg = drv_data->ioaddr; 265 void *reg = drv_data->ioaddr;
260 266
261 while ((read_SSSR(reg) & SSSR_TNF) 267 if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
262 && (drv_data->tx < drv_data->tx_end)) { 268 || (drv_data->tx == drv_data->tx_end))
263 write_SSDR(*(u32 *)(drv_data->tx), reg); 269 return 0;
264 drv_data->tx += 4; 270
265 } 271 write_SSDR(*(u32 *)(drv_data->tx), reg);
272 drv_data->tx += 4;
273
274 return 1;
266} 275}
267 276
268static void u32_reader(struct driver_data *drv_data) 277static int u32_reader(struct driver_data *drv_data)
269{ 278{
270 void *reg = drv_data->ioaddr; 279 void *reg = drv_data->ioaddr;
271 280
272 while ((read_SSSR(reg) & SSSR_RNE) 281 while ((read_SSSR(reg) & SSSR_RNE)
273 && (drv_data->rx < drv_data->rx_end)) { 282 && (drv_data->rx < drv_data->rx_end)) {
274 *(u32 *)(drv_data->rx) = read_SSDR(reg); 283 *(u32 *)(drv_data->rx) = read_SSDR(reg);
275 drv_data->rx += 4; 284 drv_data->rx += 4;
276 } 285 }
286
287 return drv_data->rx == drv_data->rx_end;
277} 288}
278 289
279static void *next_transfer(struct driver_data *drv_data) 290static void *next_transfer(struct driver_data *drv_data)
@@ -409,166 +420,134 @@ static int wait_dma_channel_stop(int channel)
409 return limit; 420 return limit;
410} 421}
411 422
412static void dma_handler(int channel, void *data) 423void dma_error_stop(struct driver_data *drv_data, const char *msg)
413{ 424{
414 struct driver_data *drv_data = data;
415 struct spi_message *msg = drv_data->cur_msg;
416 void *reg = drv_data->ioaddr; 425 void *reg = drv_data->ioaddr;
417 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
418 u32 trailing_sssr = 0;
419 426
420 if (irq_status & DCSR_BUSERR) { 427 /* Stop and reset */
428 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
429 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
430 write_SSSR(drv_data->clear_sr, reg);
431 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
432 if (drv_data->ssp_type != PXA25x_SSP)
433 write_SSTO(0, reg);
434 flush(drv_data);
435 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
421 436
422 /* Disable interrupts, clear status and reset DMA */ 437 unmap_dma_buffers(drv_data);
423 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
424 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
425 if (drv_data->ssp_type != PXA25x_SSP)
426 write_SSTO(0, reg);
427 write_SSSR(drv_data->clear_sr, reg);
428 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
429 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
430 438
431 if (flush(drv_data) == 0) 439 dev_err(&drv_data->pdev->dev, "%s\n", msg);
432 dev_err(&drv_data->pdev->dev,
433 "dma_handler: flush fail\n");
434 440
435 unmap_dma_buffers(drv_data); 441 drv_data->cur_msg->state = ERROR_STATE;
442 tasklet_schedule(&drv_data->pump_transfers);
443}
444
445static void dma_transfer_complete(struct driver_data *drv_data)
446{
447 void *reg = drv_data->ioaddr;
448 struct spi_message *msg = drv_data->cur_msg;
449
450 /* Clear and disable interrupts on SSP and DMA channels*/
451 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
452 write_SSSR(drv_data->clear_sr, reg);
453 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
454 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
455
456 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
457 dev_err(&drv_data->pdev->dev,
458 "dma_handler: dma rx channel stop failed\n");
459
460 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
461 dev_err(&drv_data->pdev->dev,
462 "dma_transfer: ssp rx stall failed\n");
463
464 unmap_dma_buffers(drv_data);
465
466 /* update the buffer pointer for the amount completed in dma */
467 drv_data->rx += drv_data->len -
468 (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
469
470 /* read trailing data from fifo, it does not matter how many
471 * bytes are in the fifo just read until buffer is full
472 * or fifo is empty, which ever occurs first */
473 drv_data->read(drv_data);
474
475 /* return count of what was actually read */
476 msg->actual_length += drv_data->len -
477 (drv_data->rx_end - drv_data->rx);
478
479 /* Release chip select if requested, transfer delays are
480 * handled in pump_transfers */
481 if (drv_data->cs_change)
482 drv_data->cs_control(PXA2XX_CS_DEASSERT);
483
484 /* Move to next transfer */
485 msg->state = next_transfer(drv_data);
486
487 /* Schedule transfer tasklet */
488 tasklet_schedule(&drv_data->pump_transfers);
489}
490
491static void dma_handler(int channel, void *data)
492{
493 struct driver_data *drv_data = data;
494 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
495
496 if (irq_status & DCSR_BUSERR) {
436 497
437 if (channel == drv_data->tx_channel) 498 if (channel == drv_data->tx_channel)
438 dev_err(&drv_data->pdev->dev, 499 dma_error_stop(drv_data,
439 "dma_handler: bad bus address on " 500 "dma_handler: "
440 "tx channel %d, source %x target = %x\n", 501 "bad bus address on tx channel");
441 channel, DSADR(channel), DTADR(channel));
442 else 502 else
443 dev_err(&drv_data->pdev->dev, 503 dma_error_stop(drv_data,
444 "dma_handler: bad bus address on " 504 "dma_handler: "
445 "rx channel %d, source %x target = %x\n", 505 "bad bus address on rx channel");
446 channel, DSADR(channel), DTADR(channel)); 506 return;
447
448 msg->state = ERROR_STATE;
449 tasklet_schedule(&drv_data->pump_transfers);
450 } 507 }
451 508
452 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ 509 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
453 if ((drv_data->ssp_type == PXA25x_SSP) 510 if ((channel == drv_data->tx_channel)
454 && (channel == drv_data->tx_channel) 511 && (irq_status & DCSR_ENDINTR)
455 && (irq_status & DCSR_ENDINTR)) { 512 && (drv_data->ssp_type == PXA25x_SSP)) {
456 513
457 /* Wait for rx to stall */ 514 /* Wait for rx to stall */
458 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) 515 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
459 dev_err(&drv_data->pdev->dev, 516 dev_err(&drv_data->pdev->dev,
460 "dma_handler: ssp rx stall failed\n"); 517 "dma_handler: ssp rx stall failed\n");
461 518
462 /* Clear and disable interrupts on SSP and DMA channels*/ 519 /* finish this transfer, start the next */
463 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 520 dma_transfer_complete(drv_data);
464 write_SSSR(drv_data->clear_sr, reg);
465 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
466 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
467 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
468 dev_err(&drv_data->pdev->dev,
469 "dma_handler: dma rx channel stop failed\n");
470
471 unmap_dma_buffers(drv_data);
472
473 /* Read trailing bytes */
474 /* Calculate number of trailing bytes, read them */
475 trailing_sssr = read_SSSR(reg);
476 if ((trailing_sssr & 0xf008) != 0xf000) {
477 drv_data->rx = drv_data->rx_end -
478 (((trailing_sssr >> 12) & 0x0f) + 1);
479 drv_data->read(drv_data);
480 }
481 msg->actual_length += drv_data->len;
482
483 /* Release chip select if requested, transfer delays are
484 * handled in pump_transfers */
485 if (drv_data->cs_change)
486 drv_data->cs_control(PXA2XX_CS_DEASSERT);
487
488 /* Move to next transfer */
489 msg->state = next_transfer(drv_data);
490
491 /* Schedule transfer tasklet */
492 tasklet_schedule(&drv_data->pump_transfers);
493 } 521 }
494} 522}
495 523
496static irqreturn_t dma_transfer(struct driver_data *drv_data) 524static irqreturn_t dma_transfer(struct driver_data *drv_data)
497{ 525{
498 u32 irq_status; 526 u32 irq_status;
499 u32 trailing_sssr = 0;
500 struct spi_message *msg = drv_data->cur_msg;
501 void *reg = drv_data->ioaddr; 527 void *reg = drv_data->ioaddr;
502 528
503 irq_status = read_SSSR(reg) & drv_data->mask_sr; 529 irq_status = read_SSSR(reg) & drv_data->mask_sr;
504 if (irq_status & SSSR_ROR) { 530 if (irq_status & SSSR_ROR) {
505 /* Clear and disable interrupts on SSP and DMA channels*/ 531 dma_error_stop(drv_data, "dma_transfer: fifo overrun");
506 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
507 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
508 if (drv_data->ssp_type != PXA25x_SSP)
509 write_SSTO(0, reg);
510 write_SSSR(drv_data->clear_sr, reg);
511 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
512 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
513 unmap_dma_buffers(drv_data);
514
515 if (flush(drv_data) == 0)
516 dev_err(&drv_data->pdev->dev,
517 "dma_transfer: flush fail\n");
518
519 dev_warn(&drv_data->pdev->dev, "dma_transfer: fifo overun\n");
520
521 drv_data->cur_msg->state = ERROR_STATE;
522 tasklet_schedule(&drv_data->pump_transfers);
523
524 return IRQ_HANDLED; 532 return IRQ_HANDLED;
525 } 533 }
526 534
527 /* Check for false positive timeout */ 535 /* Check for false positive timeout */
528 if ((irq_status & SSSR_TINT) && DCSR(drv_data->tx_channel) & DCSR_RUN) { 536 if ((irq_status & SSSR_TINT)
537 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
529 write_SSSR(SSSR_TINT, reg); 538 write_SSSR(SSSR_TINT, reg);
530 return IRQ_HANDLED; 539 return IRQ_HANDLED;
531 } 540 }
532 541
533 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { 542 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
534 543
535 /* Clear and disable interrupts on SSP and DMA channels*/ 544 /* Clear and disable timeout interrupt, do the rest in
536 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 545 * dma_transfer_complete */
537 if (drv_data->ssp_type != PXA25x_SSP) 546 if (drv_data->ssp_type != PXA25x_SSP)
538 write_SSTO(0, reg); 547 write_SSTO(0, reg);
539 write_SSSR(drv_data->clear_sr, reg);
540 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
541 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
542 548
543 if (wait_dma_channel_stop(drv_data->rx_channel) == 0) 549 /* finish this transfer, start the next */
544 dev_err(&drv_data->pdev->dev, 550 dma_transfer_complete(drv_data);
545 "dma_transfer: dma rx channel stop failed\n");
546
547 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
548 dev_err(&drv_data->pdev->dev,
549 "dma_transfer: ssp rx stall failed\n");
550
551 unmap_dma_buffers(drv_data);
552
553 /* Calculate number of trailing bytes, read them */
554 trailing_sssr = read_SSSR(reg);
555 if ((trailing_sssr & 0xf008) != 0xf000) {
556 drv_data->rx = drv_data->rx_end -
557 (((trailing_sssr >> 12) & 0x0f) + 1);
558 drv_data->read(drv_data);
559 }
560 msg->actual_length += drv_data->len;
561
562 /* Release chip select if requested, transfer delays are
563 * handled in pump_transfers */
564 if (drv_data->cs_change)
565 drv_data->cs_control(PXA2XX_CS_DEASSERT);
566
567 /* Move to next transfer */
568 msg->state = next_transfer(drv_data);
569
570 /* Schedule transfer tasklet */
571 tasklet_schedule(&drv_data->pump_transfers);
572 551
573 return IRQ_HANDLED; 552 return IRQ_HANDLED;
574 } 553 }
@@ -577,89 +556,103 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data)
577 return IRQ_NONE; 556 return IRQ_NONE;
578} 557}
579 558
580static irqreturn_t interrupt_transfer(struct driver_data *drv_data) 559static void int_error_stop(struct driver_data *drv_data, const char* msg)
581{ 560{
582 struct spi_message *msg = drv_data->cur_msg;
583 void *reg = drv_data->ioaddr; 561 void *reg = drv_data->ioaddr;
584 unsigned long limit = loops_per_jiffy << 1;
585 u32 irq_status;
586 u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
587 drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
588
589 while ((irq_status = read_SSSR(reg) & irq_mask)) {
590
591 if (irq_status & SSSR_ROR) {
592 562
593 /* Clear and disable interrupts */ 563 /* Stop and reset SSP */
594 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 564 write_SSSR(drv_data->clear_sr, reg);
595 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 565 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
596 if (drv_data->ssp_type != PXA25x_SSP) 566 if (drv_data->ssp_type != PXA25x_SSP)
597 write_SSTO(0, reg); 567 write_SSTO(0, reg);
598 write_SSSR(drv_data->clear_sr, reg); 568 flush(drv_data);
569 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
599 570
600 if (flush(drv_data) == 0) 571 dev_err(&drv_data->pdev->dev, "%s\n", msg);
601 dev_err(&drv_data->pdev->dev,
602 "interrupt_transfer: flush fail\n");
603 572
604 /* Stop the SSP */ 573 drv_data->cur_msg->state = ERROR_STATE;
574 tasklet_schedule(&drv_data->pump_transfers);
575}
605 576
606 dev_warn(&drv_data->pdev->dev, 577static void int_transfer_complete(struct driver_data *drv_data)
607 "interrupt_transfer: fifo overun\n"); 578{
579 void *reg = drv_data->ioaddr;
608 580
609 msg->state = ERROR_STATE; 581 /* Stop SSP */
610 tasklet_schedule(&drv_data->pump_transfers); 582 write_SSSR(drv_data->clear_sr, reg);
583 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
584 if (drv_data->ssp_type != PXA25x_SSP)
585 write_SSTO(0, reg);
611 586
612 return IRQ_HANDLED; 587 /* Update total byte transfered return count actual bytes read */
613 } 588 drv_data->cur_msg->actual_length += drv_data->len -
589 (drv_data->rx_end - drv_data->rx);
614 590
615 /* Look for false positive timeout */ 591 /* Release chip select if requested, transfer delays are
616 if ((irq_status & SSSR_TINT) 592 * handled in pump_transfers */
617 && (drv_data->rx < drv_data->rx_end)) 593 if (drv_data->cs_change)
618 write_SSSR(SSSR_TINT, reg); 594 drv_data->cs_control(PXA2XX_CS_DEASSERT);
619 595
620 /* Pump data */ 596 /* Move to next transfer */
621 drv_data->read(drv_data); 597 drv_data->cur_msg->state = next_transfer(drv_data);
622 drv_data->write(drv_data);
623 598
624 if (drv_data->tx == drv_data->tx_end) { 599 /* Schedule transfer tasklet */
625 /* Disable tx interrupt */ 600 tasklet_schedule(&drv_data->pump_transfers);
626 write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg); 601}
627 irq_mask = drv_data->mask_sr & ~SSSR_TFS;
628 602
629 /* PXA25x_SSP has no timeout, read trailing bytes */ 603static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
630 if (drv_data->ssp_type == PXA25x_SSP) { 604{
631 while ((read_SSSR(reg) & SSSR_BSY) && limit--) 605 void *reg = drv_data->ioaddr;
632 drv_data->read(drv_data);
633 606
634 if (limit == 0) 607 u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
635 dev_err(&drv_data->pdev->dev, 608 drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
636 "interrupt_transfer: "
637 "trailing byte read failed\n");
638 }
639 }
640 609
641 if ((irq_status & SSSR_TINT) 610 u32 irq_status = read_SSSR(reg) & irq_mask;
642 || (drv_data->rx == drv_data->rx_end)) {
643 611
644 /* Clear timeout */ 612 if (irq_status & SSSR_ROR) {
645 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 613 int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
646 if (drv_data->ssp_type != PXA25x_SSP) 614 return IRQ_HANDLED;
647 write_SSTO(0, reg); 615 }
648 write_SSSR(drv_data->clear_sr, reg);
649 616
650 /* Update total byte transfered */ 617 if (irq_status & SSSR_TINT) {
651 msg->actual_length += drv_data->len; 618 write_SSSR(SSSR_TINT, reg);
619 if (drv_data->read(drv_data)) {
620 int_transfer_complete(drv_data);
621 return IRQ_HANDLED;
622 }
623 }
652 624
653 /* Release chip select if requested, transfer delays are 625 /* Drain rx fifo, Fill tx fifo and prevent overruns */
654 * handled in pump_transfers */ 626 do {
655 if (drv_data->cs_change) 627 if (drv_data->read(drv_data)) {
656 drv_data->cs_control(PXA2XX_CS_DEASSERT); 628 int_transfer_complete(drv_data);
629 return IRQ_HANDLED;
630 }
631 } while (drv_data->write(drv_data));
657 632
658 /* Move to next transfer */ 633 if (drv_data->read(drv_data)) {
659 msg->state = next_transfer(drv_data); 634 int_transfer_complete(drv_data);
635 return IRQ_HANDLED;
636 }
660 637
661 /* Schedule transfer tasklet */ 638 if (drv_data->tx == drv_data->tx_end) {
662 tasklet_schedule(&drv_data->pump_transfers); 639 write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg);
640 /* PXA25x_SSP has no timeout, read trailing bytes */
641 if (drv_data->ssp_type == PXA25x_SSP) {
642 if (!wait_ssp_rx_stall(reg))
643 {
644 int_error_stop(drv_data, "interrupt_transfer: "
645 "rx stall failed");
646 return IRQ_HANDLED;
647 }
648 if (!drv_data->read(drv_data))
649 {
650 int_error_stop(drv_data,
651 "interrupt_transfer: "
652 "trailing byte read failed");
653 return IRQ_HANDLED;
654 }
655 int_transfer_complete(drv_data);
663 } 656 }
664 } 657 }
665 658
@@ -681,7 +674,7 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
681 write_SSSR(drv_data->clear_sr, reg); 674 write_SSSR(drv_data->clear_sr, reg);
682 675
683 dev_err(&drv_data->pdev->dev, "bad message state " 676 dev_err(&drv_data->pdev->dev, "bad message state "
684 "in interrupt handler"); 677 "in interrupt handler\n");
685 678
686 /* Never fail */ 679 /* Never fail */
687 return IRQ_HANDLED; 680 return IRQ_HANDLED;
@@ -690,6 +683,102 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
690 return drv_data->transfer_handler(drv_data); 683 return drv_data->transfer_handler(drv_data);
691} 684}
692 685
686int set_dma_burst_and_threshold(struct chip_data *chip, struct spi_device *spi,
687 u8 bits_per_word, u32 *burst_code,
688 u32 *threshold)
689{
690 struct pxa2xx_spi_chip *chip_info =
691 (struct pxa2xx_spi_chip *)spi->controller_data;
692 int bytes_per_word;
693 int burst_bytes;
694 int thresh_words;
695 int req_burst_size;
696 int retval = 0;
697
698 /* Set the threshold (in registers) to equal the same amount of data
699 * as represented by burst size (in bytes). The computation below
700 * is (burst_size rounded up to nearest 8 byte, word or long word)
701 * divided by (bytes/register); the tx threshold is the inverse of
702 * the rx, so that there will always be enough data in the rx fifo
703 * to satisfy a burst, and there will always be enough space in the
704 * tx fifo to accept a burst (a tx burst will overwrite the fifo if
705 * there is not enough space), there must always remain enough empty
706 * space in the rx fifo for any data loaded to the tx fifo.
707 * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
708 * will be 8, or half the fifo;
709 * The threshold can only be set to 2, 4 or 8, but not 16, because
710 * to burst 16 to the tx fifo, the fifo would have to be empty;
711 * however, the minimum fifo trigger level is 1, and the tx will
712 * request service when the fifo is at this level, with only 15 spaces.
713 */
714
715 /* find bytes/word */
716 if (bits_per_word <= 8)
717 bytes_per_word = 1;
718 else if (bits_per_word <= 16)
719 bytes_per_word = 2;
720 else
721 bytes_per_word = 4;
722
723 /* use struct pxa2xx_spi_chip->dma_burst_size if available */
724 if (chip_info)
725 req_burst_size = chip_info->dma_burst_size;
726 else {
727 switch (chip->dma_burst_size) {
728 default:
729 /* if the default burst size is not set,
730 * do it now */
731 chip->dma_burst_size = DCMD_BURST8;
732 case DCMD_BURST8:
733 req_burst_size = 8;
734 break;
735 case DCMD_BURST16:
736 req_burst_size = 16;
737 break;
738 case DCMD_BURST32:
739 req_burst_size = 32;
740 break;
741 }
742 }
743 if (req_burst_size <= 8) {
744 *burst_code = DCMD_BURST8;
745 burst_bytes = 8;
746 } else if (req_burst_size <= 16) {
747 if (bytes_per_word == 1) {
748 /* don't burst more than 1/2 the fifo */
749 *burst_code = DCMD_BURST8;
750 burst_bytes = 8;
751 retval = 1;
752 } else {
753 *burst_code = DCMD_BURST16;
754 burst_bytes = 16;
755 }
756 } else {
757 if (bytes_per_word == 1) {
758 /* don't burst more than 1/2 the fifo */
759 *burst_code = DCMD_BURST8;
760 burst_bytes = 8;
761 retval = 1;
762 } else if (bytes_per_word == 2) {
763 /* don't burst more than 1/2 the fifo */
764 *burst_code = DCMD_BURST16;
765 burst_bytes = 16;
766 retval = 1;
767 } else {
768 *burst_code = DCMD_BURST32;
769 burst_bytes = 32;
770 }
771 }
772
773 thresh_words = burst_bytes / bytes_per_word;
774
775 /* thresh_words will be between 2 and 8 */
776 *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
777 | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
778
779 return retval;
780}
781
693static void pump_transfers(unsigned long data) 782static void pump_transfers(unsigned long data)
694{ 783{
695 struct driver_data *drv_data = (struct driver_data *)data; 784 struct driver_data *drv_data = (struct driver_data *)data;
@@ -702,6 +791,9 @@ static void pump_transfers(unsigned long data)
702 u8 bits = 0; 791 u8 bits = 0;
703 u32 speed = 0; 792 u32 speed = 0;
704 u32 cr0; 793 u32 cr0;
794 u32 cr1;
795 u32 dma_thresh = drv_data->cur_chip->dma_threshold;
796 u32 dma_burst = drv_data->cur_chip->dma_burst_size;
705 797
706 /* Get current state information */ 798 /* Get current state information */
707 message = drv_data->cur_msg; 799 message = drv_data->cur_msg;
@@ -731,6 +823,16 @@ static void pump_transfers(unsigned long data)
731 udelay(previous->delay_usecs); 823 udelay(previous->delay_usecs);
732 } 824 }
733 825
826 /* Check transfer length */
827 if (transfer->len > 8191)
828 {
829 dev_warn(&drv_data->pdev->dev, "pump_transfers: transfer "
830 "length greater than 8191\n");
831 message->status = -EINVAL;
832 giveback(drv_data);
833 return;
834 }
835
734 /* Setup the transfer state based on the type of transfer */ 836 /* Setup the transfer state based on the type of transfer */
735 if (flush(drv_data) == 0) { 837 if (flush(drv_data) == 0) {
736 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); 838 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
@@ -747,17 +849,15 @@ static void pump_transfers(unsigned long data)
747 drv_data->rx_end = drv_data->rx + transfer->len; 849 drv_data->rx_end = drv_data->rx + transfer->len;
748 drv_data->rx_dma = transfer->rx_dma; 850 drv_data->rx_dma = transfer->rx_dma;
749 drv_data->tx_dma = transfer->tx_dma; 851 drv_data->tx_dma = transfer->tx_dma;
750 drv_data->len = transfer->len; 852 drv_data->len = transfer->len & DCMD_LENGTH;
751 drv_data->write = drv_data->tx ? chip->write : null_writer; 853 drv_data->write = drv_data->tx ? chip->write : null_writer;
752 drv_data->read = drv_data->rx ? chip->read : null_reader; 854 drv_data->read = drv_data->rx ? chip->read : null_reader;
753 drv_data->cs_change = transfer->cs_change; 855 drv_data->cs_change = transfer->cs_change;
754 856
755 /* Change speed and bit per word on a per transfer */ 857 /* Change speed and bit per word on a per transfer */
858 cr0 = chip->cr0;
756 if (transfer->speed_hz || transfer->bits_per_word) { 859 if (transfer->speed_hz || transfer->bits_per_word) {
757 860
758 /* Disable clock */
759 write_SSCR0(chip->cr0 & ~SSCR0_SSE, reg);
760 cr0 = chip->cr0;
761 bits = chip->bits_per_word; 861 bits = chip->bits_per_word;
762 speed = chip->speed_hz; 862 speed = chip->speed_hz;
763 863
@@ -796,15 +896,24 @@ static void pump_transfers(unsigned long data)
796 drv_data->write = drv_data->write != null_writer ? 896 drv_data->write = drv_data->write != null_writer ?
797 u32_writer : null_writer; 897 u32_writer : null_writer;
798 } 898 }
899 /* if bits/word is changed in dma mode, then must check the
900 * thresholds and burst also */
901 if (chip->enable_dma) {
902 if (set_dma_burst_and_threshold(chip, message->spi,
903 bits, &dma_burst,
904 &dma_thresh))
905 if (printk_ratelimit())
906 dev_warn(&message->spi->dev,
907 "pump_transfer: "
908 "DMA burst size reduced to "
909 "match bits_per_word\n");
910 }
799 911
800 cr0 = clk_div 912 cr0 = clk_div
801 | SSCR0_Motorola 913 | SSCR0_Motorola
802 | SSCR0_DataSize(bits > 16 ? bits - 16 : bits) 914 | SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
803 | SSCR0_SSE 915 | SSCR0_SSE
804 | (bits > 16 ? SSCR0_EDSS : 0); 916 | (bits > 16 ? SSCR0_EDSS : 0);
805
806 /* Start it back up */
807 write_SSCR0(cr0, reg);
808 } 917 }
809 918
810 message->state = RUNNING_STATE; 919 message->state = RUNNING_STATE;
@@ -823,13 +932,13 @@ static void pump_transfers(unsigned long data)
823 /* No target address increment */ 932 /* No target address increment */
824 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC 933 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
825 | drv_data->dma_width 934 | drv_data->dma_width
826 | chip->dma_burst_size 935 | dma_burst
827 | drv_data->len; 936 | drv_data->len;
828 else 937 else
829 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR 938 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
830 | DCMD_FLOWSRC 939 | DCMD_FLOWSRC
831 | drv_data->dma_width 940 | drv_data->dma_width
832 | chip->dma_burst_size 941 | dma_burst
833 | drv_data->len; 942 | drv_data->len;
834 943
835 /* Setup tx DMA Channel */ 944 /* Setup tx DMA Channel */
@@ -840,13 +949,13 @@ static void pump_transfers(unsigned long data)
840 /* No source address increment */ 949 /* No source address increment */
841 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG 950 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
842 | drv_data->dma_width 951 | drv_data->dma_width
843 | chip->dma_burst_size 952 | dma_burst
844 | drv_data->len; 953 | drv_data->len;
845 else 954 else
846 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR 955 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
847 | DCMD_FLOWTRG 956 | DCMD_FLOWTRG
848 | drv_data->dma_width 957 | drv_data->dma_width
849 | chip->dma_burst_size 958 | dma_burst
850 | drv_data->len; 959 | drv_data->len;
851 960
852 /* Enable dma end irqs on SSP to detect end of transfer */ 961 /* Enable dma end irqs on SSP to detect end of transfer */
@@ -856,16 +965,11 @@ static void pump_transfers(unsigned long data)
856 /* Fix me, need to handle cs polarity */ 965 /* Fix me, need to handle cs polarity */
857 drv_data->cs_control(PXA2XX_CS_ASSERT); 966 drv_data->cs_control(PXA2XX_CS_ASSERT);
858 967
859 /* Go baby, go */ 968 /* Clear status and start DMA engine */
969 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
860 write_SSSR(drv_data->clear_sr, reg); 970 write_SSSR(drv_data->clear_sr, reg);
861 DCSR(drv_data->rx_channel) |= DCSR_RUN; 971 DCSR(drv_data->rx_channel) |= DCSR_RUN;
862 DCSR(drv_data->tx_channel) |= DCSR_RUN; 972 DCSR(drv_data->tx_channel) |= DCSR_RUN;
863 if (drv_data->ssp_type != PXA25x_SSP)
864 write_SSTO(chip->timeout, reg);
865 write_SSCR1(chip->cr1
866 | chip->dma_threshold
867 | drv_data->dma_cr1,
868 reg);
869 } else { 973 } else {
870 /* Ensure we have the correct interrupt handler */ 974 /* Ensure we have the correct interrupt handler */
871 drv_data->transfer_handler = interrupt_transfer; 975 drv_data->transfer_handler = interrupt_transfer;
@@ -873,14 +977,25 @@ static void pump_transfers(unsigned long data)
873 /* Fix me, need to handle cs polarity */ 977 /* Fix me, need to handle cs polarity */
874 drv_data->cs_control(PXA2XX_CS_ASSERT); 978 drv_data->cs_control(PXA2XX_CS_ASSERT);
875 979
876 /* Go baby, go */ 980 /* Clear status */
981 cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
877 write_SSSR(drv_data->clear_sr, reg); 982 write_SSSR(drv_data->clear_sr, reg);
983 }
984
985 /* see if we need to reload the config registers */
986 if ((read_SSCR0(reg) != cr0)
987 || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) !=
988 (cr1 & SSCR1_CHANGE_MASK)) {
989
990 write_SSCR0(cr0 & ~SSCR0_SSE, reg);
878 if (drv_data->ssp_type != PXA25x_SSP) 991 if (drv_data->ssp_type != PXA25x_SSP)
879 write_SSTO(chip->timeout, reg); 992 write_SSTO(chip->timeout, reg);
880 write_SSCR1(chip->cr1 993 write_SSCR1(cr1, reg);
881 | chip->threshold 994 write_SSCR0(cr0, reg);
882 | drv_data->int_cr1, 995 } else {
883 reg); 996 if (drv_data->ssp_type != PXA25x_SSP)
997 write_SSTO(chip->timeout, reg);
998 write_SSCR1(cr1, reg);
884 } 999 }
885} 1000}
886 1001
@@ -915,9 +1030,9 @@ static void pump_messages(struct work_struct *work)
915 struct spi_transfer, 1030 struct spi_transfer,
916 transfer_list); 1031 transfer_list);
917 1032
918 /* Setup the SSP using the per chip configuration */ 1033 /* prepare to setup the SSP, in pump_transfers, using the per
1034 * chip configuration */
919 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); 1035 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
920 restore_state(drv_data);
921 1036
922 /* Mark as busy and launch transfers */ 1037 /* Mark as busy and launch transfers */
923 tasklet_schedule(&drv_data->pump_transfers); 1038 tasklet_schedule(&drv_data->pump_transfers);
@@ -963,63 +1078,77 @@ static int setup(struct spi_device *spi)
963 spi->bits_per_word = 8; 1078 spi->bits_per_word = 8;
964 1079
965 if (drv_data->ssp_type != PXA25x_SSP 1080 if (drv_data->ssp_type != PXA25x_SSP
966 && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) 1081 && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) {
1082 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
1083 "b/w not 4-32 for type non-PXA25x_SSP\n",
1084 drv_data->ssp_type, spi->bits_per_word);
967 return -EINVAL; 1085 return -EINVAL;
968 else if (spi->bits_per_word < 4 || spi->bits_per_word > 16) 1086 }
1087 else if (drv_data->ssp_type == PXA25x_SSP
1088 && (spi->bits_per_word < 4
1089 || spi->bits_per_word > 16)) {
1090 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
1091 "b/w not 4-16 for type PXA25x_SSP\n",
1092 drv_data->ssp_type, spi->bits_per_word);
969 return -EINVAL; 1093 return -EINVAL;
1094 }
970 1095
971 /* Only alloc (or use chip_info) on first setup */ 1096 /* Only alloc on first setup */
972 chip = spi_get_ctldata(spi); 1097 chip = spi_get_ctldata(spi);
973 if (chip == NULL) { 1098 if (!chip) {
974 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 1099 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
975 if (!chip) 1100 if (!chip) {
1101 dev_err(&spi->dev,
1102 "failed setup: can't allocate chip data\n");
976 return -ENOMEM; 1103 return -ENOMEM;
1104 }
977 1105
978 chip->cs_control = null_cs_control; 1106 chip->cs_control = null_cs_control;
979 chip->enable_dma = 0; 1107 chip->enable_dma = 0;
980 chip->timeout = SSP_TIMEOUT(1000); 1108 chip->timeout = 1000;
981 chip->threshold = SSCR1_RxTresh(1) | SSCR1_TxTresh(1); 1109 chip->threshold = SSCR1_RxTresh(1) | SSCR1_TxTresh(1);
982 chip->dma_burst_size = drv_data->master_info->enable_dma ? 1110 chip->dma_burst_size = drv_data->master_info->enable_dma ?
983 DCMD_BURST8 : 0; 1111 DCMD_BURST8 : 0;
984
985 chip_info = spi->controller_data;
986 } 1112 }
987 1113
1114 /* protocol drivers may change the chip settings, so...
1115 * if chip_info exists, use it */
1116 chip_info = spi->controller_data;
1117
988 /* chip_info isn't always needed */ 1118 /* chip_info isn't always needed */
1119 chip->cr1 = 0;
989 if (chip_info) { 1120 if (chip_info) {
990 if (chip_info->cs_control) 1121 if (chip_info->cs_control)
991 chip->cs_control = chip_info->cs_control; 1122 chip->cs_control = chip_info->cs_control;
992 1123
993 chip->timeout = SSP_TIMEOUT(chip_info->timeout_microsecs); 1124 chip->timeout = chip_info->timeout;
994 1125
995 chip->threshold = SSCR1_RxTresh(chip_info->rx_threshold) 1126 chip->threshold = (SSCR1_RxTresh(chip_info->rx_threshold) &
996 | SSCR1_TxTresh(chip_info->tx_threshold); 1127 SSCR1_RFT) |
1128 (SSCR1_TxTresh(chip_info->tx_threshold) &
1129 SSCR1_TFT);
997 1130
998 chip->enable_dma = chip_info->dma_burst_size != 0 1131 chip->enable_dma = chip_info->dma_burst_size != 0
999 && drv_data->master_info->enable_dma; 1132 && drv_data->master_info->enable_dma;
1000 chip->dma_threshold = 0; 1133 chip->dma_threshold = 0;
1001 1134
1002 if (chip->enable_dma) {
1003 if (chip_info->dma_burst_size <= 8) {
1004 chip->dma_threshold = SSCR1_RxTresh(8)
1005 | SSCR1_TxTresh(8);
1006 chip->dma_burst_size = DCMD_BURST8;
1007 } else if (chip_info->dma_burst_size <= 16) {
1008 chip->dma_threshold = SSCR1_RxTresh(16)
1009 | SSCR1_TxTresh(16);
1010 chip->dma_burst_size = DCMD_BURST16;
1011 } else {
1012 chip->dma_threshold = SSCR1_RxTresh(32)
1013 | SSCR1_TxTresh(32);
1014 chip->dma_burst_size = DCMD_BURST32;
1015 }
1016 }
1017
1018
1019 if (chip_info->enable_loopback) 1135 if (chip_info->enable_loopback)
1020 chip->cr1 = SSCR1_LBM; 1136 chip->cr1 = SSCR1_LBM;
1021 } 1137 }
1022 1138
1139 /* set dma burst and threshold outside of chip_info path so that if
1140 * chip_info goes away after setting chip->enable_dma, the
1141 * burst and threshold can still respond to changes in bits_per_word */
1142 if (chip->enable_dma) {
1143 /* set up legal burst and threshold for dma */
1144 if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word,
1145 &chip->dma_burst_size,
1146 &chip->dma_threshold)) {
1147 dev_warn(&spi->dev, "in setup: DMA burst size reduced "
1148 "to match bits_per_word\n");
1149 }
1150 }
1151
1023 if (drv_data->ioaddr == SSP1_VIRT) 1152 if (drv_data->ioaddr == SSP1_VIRT)
1024 clk_div = SSP1_SerClkDiv(spi->max_speed_hz); 1153 clk_div = SSP1_SerClkDiv(spi->max_speed_hz);
1025 else if (drv_data->ioaddr == SSP2_VIRT) 1154 else if (drv_data->ioaddr == SSP2_VIRT)
@@ -1027,7 +1156,11 @@ static int setup(struct spi_device *spi)
1027 else if (drv_data->ioaddr == SSP3_VIRT) 1156 else if (drv_data->ioaddr == SSP3_VIRT)
1028 clk_div = SSP3_SerClkDiv(spi->max_speed_hz); 1157 clk_div = SSP3_SerClkDiv(spi->max_speed_hz);
1029 else 1158 else
1159 {
1160 dev_err(&spi->dev, "failed setup: unknown IO address=0x%p\n",
1161 drv_data->ioaddr);
1030 return -ENODEV; 1162 return -ENODEV;
1163 }
1031 chip->speed_hz = spi->max_speed_hz; 1164 chip->speed_hz = spi->max_speed_hz;
1032 1165
1033 chip->cr0 = clk_div 1166 chip->cr0 = clk_div
@@ -1071,7 +1204,6 @@ static int setup(struct spi_device *spi)
1071 chip->write = u32_writer; 1204 chip->write = u32_writer;
1072 } else { 1205 } else {
1073 dev_err(&spi->dev, "invalid wordsize\n"); 1206 dev_err(&spi->dev, "invalid wordsize\n");
1074 kfree(chip);
1075 return -ENODEV; 1207 return -ENODEV;
1076 } 1208 }
1077 chip->bits_per_word = spi->bits_per_word; 1209 chip->bits_per_word = spi->bits_per_word;
@@ -1162,6 +1294,12 @@ static int destroy_queue(struct driver_data *drv_data)
1162 int status; 1294 int status;
1163 1295
1164 status = stop_queue(drv_data); 1296 status = stop_queue(drv_data);
1297 /* we are unloading the module or failing to load (only two calls
1298 * to this routine), and neither call can handle a return value.
1299 * However, destroy_workqueue calls flush_workqueue, and that will
1300 * block until all work is done. If the reason that stop_queue
1301 * timed out is that the work will never finish, then it does no
1302 * good to call destroy_workqueue, so return anyway. */
1165 if (status != 0) 1303 if (status != 0)
1166 return status; 1304 return status;
1167 1305
@@ -1360,7 +1498,16 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
1360 /* Remove the queue */ 1498 /* Remove the queue */
1361 status = destroy_queue(drv_data); 1499 status = destroy_queue(drv_data);
1362 if (status != 0) 1500 if (status != 0)
1363 return status; 1501 /* the kernel does not check the return status of this
1502 * this routine (mod->exit, within the kernel). Therefore
1503 * nothing is gained by returning from here, the module is
1504 * going away regardless, and we should not leave any more
1505 * resources allocated than necessary. We cannot free the
1506 * message memory in drv_data->queue, but we can release the
1507 * resources below. I think the kernel should honor -EBUSY
1508 * returns but... */
1509 dev_err(&pdev->dev, "pxa2xx_spi_remove: workqueue will not "
1510 "complete, message memory not freed\n");
1364 1511
1365 /* Disable the SSP at the peripheral and SOC level */ 1512 /* Disable the SSP at the peripheral and SOC level */
1366 write_SSCR0(0, drv_data->ioaddr); 1513 write_SSCR0(0, drv_data->ioaddr);
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 72f3db99ff94..3e0abbb49fe1 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -598,7 +598,7 @@ at91_ep_alloc_request(struct usb_ep *_ep, unsigned int gfp_flags)
598{ 598{
599 struct at91_request *req; 599 struct at91_request *req;
600 600
601 req = kcalloc(1, sizeof (struct at91_request), gfp_flags); 601 req = kzalloc(sizeof (struct at91_request), gfp_flags);
602 if (!req) 602 if (!req)
603 return NULL; 603 return NULL;
604 604
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index 5516c59ed5ec..2d12bf9f19d6 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -2195,7 +2195,7 @@ static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags)
2195 if (size == 0) 2195 if (size == 0)
2196 return NULL; 2196 return NULL;
2197 2197
2198 gb = (struct gs_buf *)kmalloc(sizeof(struct gs_buf), kmalloc_flags); 2198 gb = kmalloc(sizeof(struct gs_buf), kmalloc_flags);
2199 if (gb == NULL) 2199 if (gb == NULL)
2200 return NULL; 2200 return NULL;
2201 2201
diff --git a/drivers/usb/host/hc_crisv10.c b/drivers/usb/host/hc_crisv10.c
index 9325e46a68c0..282d82efc0b0 100644
--- a/drivers/usb/host/hc_crisv10.c
+++ b/drivers/usb/host/hc_crisv10.c
@@ -365,7 +365,7 @@ static inline struct urb *urb_list_first(int epid)
365/* Adds an urb_entry last in the list for this epid. */ 365/* Adds an urb_entry last in the list for this epid. */
366static inline void urb_list_add(struct urb *urb, int epid) 366static inline void urb_list_add(struct urb *urb, int epid)
367{ 367{
368 urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), KMALLOC_FLAG); 368 urb_entry_t *urb_entry = kmalloc(sizeof(urb_entry_t), KMALLOC_FLAG);
369 assert(urb_entry); 369 assert(urb_entry);
370 370
371 urb_entry->urb = urb; 371 urb_entry->urb = urb;
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
index c703f73e1655..6c7f3efb1d40 100644
--- a/drivers/usb/misc/auerswald.c
+++ b/drivers/usb/misc/auerswald.c
@@ -766,7 +766,7 @@ static int auerbuf_setup (pauerbufctl_t bcp, unsigned int numElements, unsigned
766 bep->bufp = kmalloc (bufsize, GFP_KERNEL); 766 bep->bufp = kmalloc (bufsize, GFP_KERNEL);
767 if (!bep->bufp) 767 if (!bep->bufp)
768 goto bl_fail; 768 goto bl_fail;
769 bep->dr = (struct usb_ctrlrequest *) kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL); 769 bep->dr = kmalloc(sizeof (struct usb_ctrlrequest), GFP_KERNEL);
770 if (!bep->dr) 770 if (!bep->dr)
771 goto bl_fail; 771 goto bl_fail;
772 bep->urbp = usb_alloc_urb (0, GFP_KERNEL); 772 bep->urbp = usb_alloc_urb (0, GFP_KERNEL);
@@ -1969,7 +1969,7 @@ static int auerswald_probe (struct usb_interface *intf,
1969 info("device is a %s", cp->dev_desc); 1969 info("device is a %s", cp->dev_desc);
1970 1970
1971 /* get the maximum allowed control transfer length */ 1971 /* get the maximum allowed control transfer length */
1972 pbuf = (__le16 *) kmalloc (2, GFP_KERNEL); /* use an allocated buffer because of urb target */ 1972 pbuf = kmalloc(2, GFP_KERNEL); /* use an allocated buffer because of urb target */
1973 if (!pbuf) { 1973 if (!pbuf) {
1974 err( "out of memory"); 1974 err( "out of memory");
1975 goto pfail; 1975 goto pfail;
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 7e8a0acd52ee..70250252ae2a 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -705,7 +705,7 @@ static int uss720_probe(struct usb_interface *intf,
705 /* 705 /*
706 * Allocate parport interface 706 * Allocate parport interface
707 */ 707 */
708 if (!(priv = kcalloc(sizeof(struct parport_uss720_private), 1, GFP_KERNEL))) { 708 if (!(priv = kzalloc(sizeof(struct parport_uss720_private), GFP_KERNEL))) {
709 usb_put_dev(usbdev); 709 usb_put_dev(usbdev);
710 return -ENOMEM; 710 return -ENOMEM;
711 } 711 }
diff --git a/drivers/usb/net/rndis_host.c b/drivers/usb/net/rndis_host.c
index 99f26b3e502f..ea5f44de3de2 100644
--- a/drivers/usb/net/rndis_host.c
+++ b/drivers/usb/net/rndis_host.c
@@ -469,7 +469,7 @@ static void rndis_unbind(struct usbnet *dev, struct usb_interface *intf)
469 struct rndis_halt *halt; 469 struct rndis_halt *halt;
470 470
471 /* try to clear any rndis state/activity (no i/o from stack!) */ 471 /* try to clear any rndis state/activity (no i/o from stack!) */
472 halt = kcalloc(1, sizeof *halt, GFP_KERNEL); 472 halt = kzalloc(sizeof *halt, GFP_KERNEL);
473 if (halt) { 473 if (halt) {
474 halt->msg_type = RNDIS_MSG_HALT; 474 halt->msg_type = RNDIS_MSG_HALT;
475 halt->msg_len = ccpu2(sizeof *halt); 475 halt->msg_len = ccpu2(sizeof *halt);
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index a1fdb85b8c0a..45cdf9bc43b2 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -1493,7 +1493,7 @@ static struct cypress_buf *cypress_buf_alloc(unsigned int size)
1493 if (size == 0) 1493 if (size == 0)
1494 return NULL; 1494 return NULL;
1495 1495
1496 cb = (struct cypress_buf *)kmalloc(sizeof(struct cypress_buf), GFP_KERNEL); 1496 cb = kmalloc(sizeof(struct cypress_buf), GFP_KERNEL);
1497 if (cb == NULL) 1497 if (cb == NULL)
1498 return NULL; 1498 return NULL;
1499 1499
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 9d9ea874639c..efd9ce3f931f 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1681,7 +1681,7 @@ dbg( "digi_startup: TOP" );
1681 for( i=0; i<serial->type->num_ports+1; i++ ) { 1681 for( i=0; i<serial->type->num_ports+1; i++ ) {
1682 1682
1683 /* allocate port private structure */ 1683 /* allocate port private structure */
1684 priv = (struct digi_port *)kmalloc( sizeof(struct digi_port), 1684 priv = kmalloc( sizeof(struct digi_port),
1685 GFP_KERNEL ); 1685 GFP_KERNEL );
1686 if( priv == (struct digi_port *)0 ) { 1686 if( priv == (struct digi_port *)0 ) {
1687 while( --i >= 0 ) 1687 while( --i >= 0 )
@@ -1714,7 +1714,7 @@ dbg( "digi_startup: TOP" );
1714 } 1714 }
1715 1715
1716 /* allocate serial private structure */ 1716 /* allocate serial private structure */
1717 serial_priv = (struct digi_serial *)kmalloc( sizeof(struct digi_serial), 1717 serial_priv = kmalloc( sizeof(struct digi_serial),
1718 GFP_KERNEL ); 1718 GFP_KERNEL );
1719 if( serial_priv == (struct digi_serial *)0 ) { 1719 if( serial_priv == (struct digi_serial *)0 ) {
1720 for( i=0; i<serial->type->num_ports+1; i++ ) 1720 for( i=0; i<serial->type->num_ports+1; i++ )
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 2da2684e0809..980285c0233a 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2811,7 +2811,7 @@ static struct edge_buf *edge_buf_alloc(unsigned int size)
2811 if (size == 0) 2811 if (size == 0)
2812 return NULL; 2812 return NULL;
2813 2813
2814 eb = (struct edge_buf *)kmalloc(sizeof(struct edge_buf), GFP_KERNEL); 2814 eb = kmalloc(sizeof(struct edge_buf), GFP_KERNEL);
2815 if (eb == NULL) 2815 if (eb == NULL)
2816 return NULL; 2816 return NULL;
2817 2817
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index d72cf8bc7f76..42f757a5b876 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -595,7 +595,7 @@ static int ipaq_open(struct usb_serial_port *port, struct file *filp)
595 595
596 bytes_in = 0; 596 bytes_in = 0;
597 bytes_out = 0; 597 bytes_out = 0;
598 priv = (struct ipaq_private *)kmalloc(sizeof(struct ipaq_private), GFP_KERNEL); 598 priv = kmalloc(sizeof(struct ipaq_private), GFP_KERNEL);
599 if (priv == NULL) { 599 if (priv == NULL) {
600 err("%s - Out of memory", __FUNCTION__); 600 err("%s - Out of memory", __FUNCTION__);
601 return -ENOMEM; 601 return -ENOMEM;
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index e284d6c0fd35..62bea0c923bd 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -269,7 +269,7 @@ static int kobil_open (struct usb_serial_port *port, struct file *filp)
269 } 269 }
270 270
271 // allocate memory for write_urb transfer buffer 271 // allocate memory for write_urb transfer buffer
272 port->write_urb->transfer_buffer = (unsigned char *) kmalloc(write_urb_transfer_buffer_length, GFP_KERNEL); 272 port->write_urb->transfer_buffer = kmalloc(write_urb_transfer_buffer_length, GFP_KERNEL);
273 if (! port->write_urb->transfer_buffer) { 273 if (! port->write_urb->transfer_buffer) {
274 kfree(transfer_buffer); 274 kfree(transfer_buffer);
275 usb_free_urb(port->write_urb); 275 usb_free_urb(port->write_urb);
@@ -696,7 +696,7 @@ static int kobil_ioctl(struct usb_serial_port *port, struct file *file,
696 return 0; 696 return 0;
697 697
698 case TCFLSH: // 0x540B 698 case TCFLSH: // 0x540B
699 transfer_buffer = (unsigned char *) kmalloc(transfer_buffer_length, GFP_KERNEL); 699 transfer_buffer = kmalloc(transfer_buffer_length, GFP_KERNEL);
700 if (! transfer_buffer) { 700 if (! transfer_buffer) {
701 return -ENOBUFS; 701 return -ENOBUFS;
702 } 702 }
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index d124d780e42e..5dc2ac9afa90 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -159,7 +159,7 @@ static struct pl2303_buf *pl2303_buf_alloc(unsigned int size)
159 if (size == 0) 159 if (size == 0)
160 return NULL; 160 return NULL;
161 161
162 pb = (struct pl2303_buf *)kmalloc(sizeof(struct pl2303_buf), GFP_KERNEL); 162 pb = kmalloc(sizeof(struct pl2303_buf), GFP_KERNEL);
163 if (pb == NULL) 163 if (pb == NULL)
164 return NULL; 164 return NULL;
165 165
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index f42eb9ea6405..83189005c6fb 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1710,7 +1710,7 @@ static struct circ_buf *ti_buf_alloc(void)
1710{ 1710{
1711 struct circ_buf *cb; 1711 struct circ_buf *cb;
1712 1712
1713 cb = (struct circ_buf *)kmalloc(sizeof(struct circ_buf), GFP_KERNEL); 1713 cb = kmalloc(sizeof(struct circ_buf), GFP_KERNEL);
1714 if (cb == NULL) 1714 if (cb == NULL)
1715 return NULL; 1715 return NULL;
1716 1716
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index dc45e58e2b8c..5483d8564c1b 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -416,7 +416,7 @@ static int whiteheat_attach (struct usb_serial *serial)
416 for (i = 0; i < serial->num_ports; i++) { 416 for (i = 0; i < serial->num_ports; i++) {
417 port = serial->port[i]; 417 port = serial->port[i];
418 418
419 info = (struct whiteheat_private *)kmalloc(sizeof(struct whiteheat_private), GFP_KERNEL); 419 info = kmalloc(sizeof(struct whiteheat_private), GFP_KERNEL);
420 if (info == NULL) { 420 if (info == NULL) {
421 err("%s: Out of memory for port structures\n", serial->type->description); 421 err("%s: Out of memory for port structures\n", serial->type->description);
422 goto no_private; 422 goto no_private;
@@ -487,7 +487,7 @@ static int whiteheat_attach (struct usb_serial *serial)
487 usb_set_serial_port_data(port, info); 487 usb_set_serial_port_data(port, info);
488 } 488 }
489 489
490 command_info = (struct whiteheat_command_private *)kmalloc(sizeof(struct whiteheat_command_private), GFP_KERNEL); 490 command_info = kmalloc(sizeof(struct whiteheat_command_private), GFP_KERNEL);
491 if (command_info == NULL) { 491 if (command_info == NULL) {
492 err("%s: Out of memory for port structures\n", serial->type->description); 492 err("%s: Out of memory for port structures\n", serial->type->description);
493 goto no_command_private; 493 goto no_command_private;
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index fb8bacaae27c..e3528eca29a5 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -646,7 +646,7 @@ sddr09_read_sg_test_only(struct us_data *us) {
646 return result; 646 return result;
647 } 647 }
648 648
649 buf = (unsigned char *) kmalloc(bulklen, GFP_NOIO); 649 buf = kmalloc(bulklen, GFP_NOIO);
650 if (!buf) 650 if (!buf)
651 return -ENOMEM; 651 return -ENOMEM;
652 652
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index ab1daecfeac6..4e83f01e894e 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1615,6 +1615,16 @@ config FB_PNX4008_DUM_RGB
1615 ---help--- 1615 ---help---
1616 Say Y here to enable support for PNX4008 RGB Framebuffer 1616 Say Y here to enable support for PNX4008 RGB Framebuffer
1617 1617
1618config FB_IBM_GXT4500
1619 tristate "Framebuffer support for IBM GXT4500P adaptor"
1620 depends on PPC
1621 select FB_CFB_FILLRECT
1622 select FB_CFB_COPYAREA
1623 select FB_CFB_IMAGEBLIT
1624 ---help---
1625 Say Y here to enable support for the IBM GXT4500P display
1626 adaptor, found on some IBM System P (pSeries) machines.
1627
1618config FB_VIRTUAL 1628config FB_VIRTUAL
1619 tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)" 1629 tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
1620 depends on FB 1630 depends on FB
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index a6980e9a2481..309a26dd164a 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_FB_IMX) += imxfb.o
99obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o 99obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o
100obj-$(CONFIG_FB_PNX4008_DUM) += pnx4008/ 100obj-$(CONFIG_FB_PNX4008_DUM) += pnx4008/
101obj-$(CONFIG_FB_PNX4008_DUM_RGB) += pnx4008/ 101obj-$(CONFIG_FB_PNX4008_DUM_RGB) += pnx4008/
102obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o
102 103
103# Platform or fallback drivers go here 104# Platform or fallback drivers go here
104obj-$(CONFIG_FB_VESA) += vesafb.o 105obj-$(CONFIG_FB_VESA) += vesafb.o
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 6761b68c35e9..6c9dc2e69c82 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -447,7 +447,7 @@ static int clcdfb_probe(struct amba_device *dev, void *id)
447 goto out; 447 goto out;
448 } 448 }
449 449
450 fb = (struct clcd_fb *) kmalloc(sizeof(struct clcd_fb), GFP_KERNEL); 450 fb = kmalloc(sizeof(struct clcd_fb), GFP_KERNEL);
451 if (!fb) { 451 if (!fb) {
452 printk(KERN_INFO "CLCD: could not allocate new clcd_fb struct\n"); 452 printk(KERN_INFO "CLCD: could not allocate new clcd_fb struct\n");
453 ret = -ENOMEM; 453 ret = -ENOMEM;
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index 88a47845c4f7..1a849b870bcc 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -2906,14 +2906,6 @@ static int ami_decode_var(struct fb_var_screeninfo *var,
2906 par->crsr.spot_x = par->crsr.spot_y = 0; 2906 par->crsr.spot_x = par->crsr.spot_y = 0;
2907 par->crsr.height = par->crsr.width = 0; 2907 par->crsr.height = par->crsr.width = 0;
2908 2908
2909#if 0 /* fbmon not done. uncomment for 2.5.x -brad */
2910 if (!fbmon_valid_timings(pixclock[clk_shift], htotal, vtotal,
2911 &fb_info)) {
2912 DPRINTK("mode doesn't fit for monitor\n");
2913 return -EINVAL;
2914 }
2915#endif
2916
2917 return 0; 2909 return 0;
2918} 2910}
2919 2911
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 176f9b85cdbe..09684d7a7ce9 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -1488,10 +1488,6 @@ static int atyfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
1488 else 1488 else
1489 info->var.accel_flags = 0; 1489 info->var.accel_flags = 0;
1490 1490
1491#if 0 /* fbmon is not done. uncomment for 2.5.x -brad */
1492 if (!fbmon_valid_timings(pixclock, htotal, vtotal, info))
1493 return -EINVAL;
1494#endif
1495 aty_crtc_to_var(&crtc, var); 1491 aty_crtc_to_var(&crtc, var);
1496 var->pixclock = par->pll_ops->pll_to_var(info, &pll); 1492 var->pixclock = par->pll_ops->pll_to_var(info, &pll);
1497 return 0; 1493 return 0;
diff --git a/drivers/video/aty/radeon_i2c.c b/drivers/video/aty/radeon_i2c.c
index 869725a13c21..e7c5b219ad1b 100644
--- a/drivers/video/aty/radeon_i2c.c
+++ b/drivers/video/aty/radeon_i2c.c
@@ -120,19 +120,19 @@ void radeon_create_i2c_busses(struct radeonfb_info *rinfo)
120void radeon_delete_i2c_busses(struct radeonfb_info *rinfo) 120void radeon_delete_i2c_busses(struct radeonfb_info *rinfo)
121{ 121{
122 if (rinfo->i2c[0].rinfo) 122 if (rinfo->i2c[0].rinfo)
123 i2c_bit_del_bus(&rinfo->i2c[0].adapter); 123 i2c_del_adapter(&rinfo->i2c[0].adapter);
124 rinfo->i2c[0].rinfo = NULL; 124 rinfo->i2c[0].rinfo = NULL;
125 125
126 if (rinfo->i2c[1].rinfo) 126 if (rinfo->i2c[1].rinfo)
127 i2c_bit_del_bus(&rinfo->i2c[1].adapter); 127 i2c_del_adapter(&rinfo->i2c[1].adapter);
128 rinfo->i2c[1].rinfo = NULL; 128 rinfo->i2c[1].rinfo = NULL;
129 129
130 if (rinfo->i2c[2].rinfo) 130 if (rinfo->i2c[2].rinfo)
131 i2c_bit_del_bus(&rinfo->i2c[2].adapter); 131 i2c_del_adapter(&rinfo->i2c[2].adapter);
132 rinfo->i2c[2].rinfo = NULL; 132 rinfo->i2c[2].rinfo = NULL;
133 133
134 if (rinfo->i2c[3].rinfo) 134 if (rinfo->i2c[3].rinfo)
135 i2c_bit_del_bus(&rinfo->i2c[3].adapter); 135 i2c_del_adapter(&rinfo->i2c[3].adapter);
136 rinfo->i2c[3].rinfo = NULL; 136 rinfo->i2c[3].rinfo = NULL;
137} 137}
138 138
diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
new file mode 100644
index 000000000000..3adf6ab0768f
--- /dev/null
+++ b/drivers/video/gxt4500.c
@@ -0,0 +1,741 @@
1/*
2 * Frame buffer device for IBM GXT4500P display adaptor
3 *
4 * Copyright (C) 2006 Paul Mackerras, IBM Corp. <paulus@samba.org>
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/fb.h>
10#include <linux/console.h>
11#include <linux/pci.h>
12#include <linux/pci_ids.h>
13#include <linux/delay.h>
14
15#define PCI_DEVICE_ID_IBM_GXT4500P 0x21c
16
17/* GXT4500P registers */
18
19/* Registers in PCI config space */
20#define CFG_ENDIAN0 0x40
21
22/* Misc control/status registers */
23#define STATUS 0x1000
24#define CTRL_REG0 0x1004
25#define CR0_HALT_DMA 0x4
26#define CR0_RASTER_RESET 0x8
27#define CR0_GEOM_RESET 0x10
28#define CR0_MEM_CTRLER_RESET 0x20
29
30/* Framebuffer control registers */
31#define FB_AB_CTRL 0x1100
32#define FB_CD_CTRL 0x1104
33#define FB_WID_CTRL 0x1108
34#define FB_Z_CTRL 0x110c
35#define FB_VGA_CTRL 0x1110
36#define REFRESH_AB_CTRL 0x1114
37#define REFRESH_CD_CTRL 0x1118
38#define FB_OVL_CTRL 0x111c
39#define FB_CTRL_TYPE 0x80000000
40#define FB_CTRL_WIDTH_MASK 0x007f0000
41#define FB_CTRL_WIDTH_SHIFT 16
42#define FB_CTRL_START_SEG_MASK 0x00003fff
43
44#define REFRESH_START 0x1098
45#define REFRESH_SIZE 0x109c
46
47/* "Direct" framebuffer access registers */
48#define DFA_FB_A 0x11e0
49#define DFA_FB_B 0x11e4
50#define DFA_FB_C 0x11e8
51#define DFA_FB_D 0x11ec
52#define DFA_FB_ENABLE 0x80000000
53#define DFA_FB_BASE_MASK 0x03f00000
54#define DFA_FB_STRIDE_1k 0x00000000
55#define DFA_FB_STRIDE_2k 0x00000010
56#define DFA_FB_STRIDE_4k 0x00000020
57#define DFA_PIX_8BIT 0x00000000
58#define DFA_PIX_16BIT_565 0x00000001
59#define DFA_PIX_16BIT_1555 0x00000002
60#define DFA_PIX_24BIT 0x00000004
61#define DFA_PIX_32BIT 0x00000005
62
63/* maps DFA_PIX_* to pixel size in bytes */
64static const unsigned char pixsize[] = {
65 1, 2, 2, 2, 4, 4
66};
67
68/* Display timing generator registers */
69#define DTG_CONTROL 0x1900
70#define DTG_CTL_SCREEN_REFRESH 2
71#define DTG_CTL_ENABLE 1
72#define DTG_HORIZ_EXTENT 0x1904
73#define DTG_HORIZ_DISPLAY 0x1908
74#define DTG_HSYNC_START 0x190c
75#define DTG_HSYNC_END 0x1910
76#define DTG_HSYNC_END_COMP 0x1914
77#define DTG_VERT_EXTENT 0x1918
78#define DTG_VERT_DISPLAY 0x191c
79#define DTG_VSYNC_START 0x1920
80#define DTG_VSYNC_END 0x1924
81#define DTG_VERT_SHORT 0x1928
82
83/* PLL/RAMDAC registers */
84#define DISP_CTL 0x402c
85#define DISP_CTL_OFF 2
86#define SYNC_CTL 0x4034
87#define SYNC_CTL_SYNC_ON_RGB 1
88#define SYNC_CTL_SYNC_OFF 2
89#define SYNC_CTL_HSYNC_INV 8
90#define SYNC_CTL_VSYNC_INV 0x10
91#define SYNC_CTL_HSYNC_OFF 0x20
92#define SYNC_CTL_VSYNC_OFF 0x40
93
94#define PLL_M 0x4040
95#define PLL_N 0x4044
96#define PLL_POSTDIV 0x4048
97
98/* Hardware cursor */
99#define CURSOR_X 0x4078
100#define CURSOR_Y 0x407c
101#define CURSOR_HOTSPOT 0x4080
102#define CURSOR_MODE 0x4084
103#define CURSOR_MODE_OFF 0
104#define CURSOR_MODE_4BPP 1
105#define CURSOR_PIXMAP 0x5000
106#define CURSOR_CMAP 0x7400
107
108/* Window attribute table */
109#define WAT_FMT 0x4100
110#define WAT_FMT_24BIT 0
111#define WAT_FMT_16BIT_565 1
112#define WAT_FMT_16BIT_1555 2
113#define WAT_FMT_32BIT 3 /* 0 vs. 3 is a guess */
114#define WAT_FMT_8BIT_332 9
115#define WAT_FMT_8BIT 0xa
116#define WAT_FMT_NO_CMAP 4 /* ORd in to other values */
117#define WAT_CMAP_OFFSET 0x4104 /* 4-bit value gets << 6 */
118#define WAT_CTRL 0x4108
119#define WAT_CTRL_SEL_B 1 /* select B buffer if 1 */
120#define WAT_CTRL_NO_INC 2
121#define WAT_GAMMA_CTRL 0x410c
122#define WAT_GAMMA_DISABLE 1 /* disables gamma cmap */
123#define WAT_OVL_CTRL 0x430c /* controls overlay */
124
125/* Indexed by DFA_PIX_* values */
126static const unsigned char watfmt[] = {
127 WAT_FMT_8BIT, WAT_FMT_16BIT_565, WAT_FMT_16BIT_1555, 0,
128 WAT_FMT_24BIT, WAT_FMT_32BIT
129};
130
131/* Colormap array; 1k entries of 4 bytes each */
132#define CMAP 0x6000
133
134#define readreg(par, reg) readl((par)->regs + (reg))
135#define writereg(par, reg, val) writel((val), (par)->regs + (reg))
136
137struct gxt4500_par {
138 void __iomem *regs;
139
140 int pixfmt; /* pixel format, see DFA_PIX_* values */
141
142 /* PLL parameters */
143 int pll_m; /* ref clock divisor */
144 int pll_n; /* VCO divisor */
145 int pll_pd1; /* first post-divisor */
146 int pll_pd2; /* second post-divisor */
147
148 u32 pseudo_palette[16]; /* used in color blits */
149};
150
151/* mode requested by user */
152static char *mode_option;
153
154/* default mode: 1280x1024 @ 60 Hz, 8 bpp */
155static const struct fb_videomode defaultmode __devinitdata = {
156 .refresh = 60,
157 .xres = 1280,
158 .yres = 1024,
159 .pixclock = 9295,
160 .left_margin = 248,
161 .right_margin = 48,
162 .upper_margin = 38,
163 .lower_margin = 1,
164 .hsync_len = 112,
165 .vsync_len = 3,
166 .vmode = FB_VMODE_NONINTERLACED
167};
168
169/*
170 * The refclk and VCO dividers appear to use a linear feedback shift
171 * register, which gets reloaded when it reaches a terminal value, at
172 * which point the divider output is toggled. Thus one can obtain
173 * whatever divisor is required by putting the appropriate value into
174 * the reload register. For a divisor of N, one puts the value from
175 * the LFSR sequence that comes N-1 places before the terminal value
176 * into the reload register.
177 */
178
179static const unsigned char mdivtab[] = {
180/* 1 */ 0x3f, 0x00, 0x20, 0x10, 0x28, 0x14, 0x2a, 0x15, 0x0a,
181/* 10 */ 0x25, 0x32, 0x19, 0x0c, 0x26, 0x13, 0x09, 0x04, 0x22, 0x11,
182/* 20 */ 0x08, 0x24, 0x12, 0x29, 0x34, 0x1a, 0x2d, 0x36, 0x1b, 0x0d,
183/* 30 */ 0x06, 0x23, 0x31, 0x38, 0x1c, 0x2e, 0x17, 0x0b, 0x05, 0x02,
184/* 40 */ 0x21, 0x30, 0x18, 0x2c, 0x16, 0x2b, 0x35, 0x3a, 0x1d, 0x0e,
185/* 50 */ 0x27, 0x33, 0x39, 0x3c, 0x1e, 0x2f, 0x37, 0x3b, 0x3d, 0x3e,
186/* 60 */ 0x1f, 0x0f, 0x07, 0x03, 0x01,
187};
188
189static const unsigned char ndivtab[] = {
190/* 2 */ 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0x78, 0xbc, 0x5e,
191/* 10 */ 0x2f, 0x17, 0x0b, 0x85, 0xc2, 0xe1, 0x70, 0x38, 0x9c, 0x4e,
192/* 20 */ 0xa7, 0xd3, 0xe9, 0xf4, 0xfa, 0xfd, 0xfe, 0x7f, 0xbf, 0xdf,
193/* 30 */ 0xef, 0x77, 0x3b, 0x1d, 0x8e, 0xc7, 0xe3, 0x71, 0xb8, 0xdc,
194/* 40 */ 0x6e, 0xb7, 0x5b, 0x2d, 0x16, 0x8b, 0xc5, 0xe2, 0xf1, 0xf8,
195/* 50 */ 0xfc, 0x7e, 0x3f, 0x9f, 0xcf, 0x67, 0xb3, 0xd9, 0x6c, 0xb6,
196/* 60 */ 0xdb, 0x6d, 0x36, 0x9b, 0x4d, 0x26, 0x13, 0x89, 0xc4, 0x62,
197/* 70 */ 0xb1, 0xd8, 0xec, 0xf6, 0xfb, 0x7d, 0xbe, 0x5f, 0xaf, 0x57,
198/* 80 */ 0x2b, 0x95, 0x4a, 0x25, 0x92, 0x49, 0xa4, 0x52, 0x29, 0x94,
199/* 90 */ 0xca, 0x65, 0xb2, 0x59, 0x2c, 0x96, 0xcb, 0xe5, 0xf2, 0x79,
200/* 100 */ 0x3c, 0x1e, 0x0f, 0x07, 0x83, 0x41, 0x20, 0x90, 0x48, 0x24,
201/* 110 */ 0x12, 0x09, 0x84, 0x42, 0xa1, 0x50, 0x28, 0x14, 0x8a, 0x45,
202/* 120 */ 0xa2, 0xd1, 0xe8, 0x74, 0xba, 0xdd, 0xee, 0xf7, 0x7b, 0x3d,
203/* 130 */ 0x9e, 0x4f, 0x27, 0x93, 0xc9, 0xe4, 0x72, 0x39, 0x1c, 0x0e,
204/* 140 */ 0x87, 0xc3, 0x61, 0x30, 0x18, 0x8c, 0xc6, 0x63, 0x31, 0x98,
205/* 150 */ 0xcc, 0xe6, 0x73, 0xb9, 0x5c, 0x2e, 0x97, 0x4b, 0xa5, 0xd2,
206/* 160 */ 0x69, 0xb4, 0xda, 0xed, 0x76, 0xbb, 0x5d, 0xae, 0xd7, 0x6b,
207/* 170 */ 0xb5, 0x5a, 0xad, 0x56, 0xab, 0xd5, 0x6a, 0x35, 0x1a, 0x8d,
208/* 180 */ 0x46, 0x23, 0x11, 0x88, 0x44, 0x22, 0x91, 0xc8, 0x64, 0x32,
209/* 190 */ 0x19, 0x0c, 0x86, 0x43, 0x21, 0x10, 0x08, 0x04, 0x02, 0x81,
210/* 200 */ 0x40, 0xa0, 0xd0, 0x68, 0x34, 0x9a, 0xcd, 0x66, 0x33, 0x99,
211/* 210 */ 0x4c, 0xa6, 0x53, 0xa9, 0xd4, 0xea, 0x75, 0x3a, 0x9d, 0xce,
212/* 220 */ 0xe7, 0xf3, 0xf9, 0x7c, 0x3e, 0x1f, 0x8f, 0x47, 0xa3, 0x51,
213/* 230 */ 0xa8, 0x54, 0xaa, 0x55, 0x2a, 0x15, 0x0a, 0x05, 0x82, 0xc1,
214/* 240 */ 0x60, 0xb0, 0x58, 0xac, 0xd6, 0xeb, 0xf5, 0x7a, 0xbd, 0xde,
215/* 250 */ 0x6f, 0x37, 0x1b, 0x0d, 0x06, 0x03, 0x01,
216};
217
218#define REF_PERIOD_PS 9259 /* period of reference clock in ps */
219
220static int calc_pll(int period_ps, struct gxt4500_par *par)
221{
222 int m, n, pdiv1, pdiv2, postdiv;
223 int pll_period, best_error, t;
224
225 /* only deal with range 1MHz - 400MHz */
226 if (period_ps < 2500 || period_ps > 1000000)
227 return -1;
228
229 best_error = 1000000;
230 for (pdiv1 = 1; pdiv1 <= 8; ++pdiv1) {
231 for (pdiv2 = 1; pdiv2 <= pdiv1; ++pdiv2) {
232 postdiv = pdiv1 * pdiv2;
233 pll_period = (period_ps + postdiv - 1) / postdiv;
234 /* keep pll in range 500..1250 MHz */
235 if (pll_period < 800 || pll_period > 2000)
236 continue;
237 for (m = 3; m <= 40; ++m) {
238 n = REF_PERIOD_PS * m * postdiv / period_ps;
239 if (n < 5 || n > 256)
240 continue;
241 t = REF_PERIOD_PS * m * postdiv / n;
242 t -= period_ps;
243 if (t >= 0 && t < best_error) {
244 par->pll_m = m;
245 par->pll_n = n;
246 par->pll_pd1 = pdiv1;
247 par->pll_pd2 = pdiv2;
248 best_error = t;
249 }
250 }
251 }
252 }
253 if (best_error == 1000000)
254 return -1;
255 return 0;
256}
257
258static int calc_pixclock(struct gxt4500_par *par)
259{
260 return REF_PERIOD_PS * par->pll_m * par->pll_pd1 * par->pll_pd2
261 / par->pll_n;
262}
263
264static int gxt4500_var_to_par(struct fb_var_screeninfo *var,
265 struct gxt4500_par *par)
266{
267 if (var->xres + var->xoffset > var->xres_virtual ||
268 var->yres + var->yoffset > var->yres_virtual ||
269 var->xres_virtual > 4096)
270 return -EINVAL;
271 if ((var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED)
272 return -EINVAL;
273
274 if (calc_pll(var->pixclock, par) < 0)
275 return -EINVAL;
276
277 switch (var->bits_per_pixel) {
278 case 32:
279 if (var->transp.length)
280 par->pixfmt = DFA_PIX_32BIT;
281 else
282 par->pixfmt = DFA_PIX_24BIT;
283 break;
284 case 24:
285 par->pixfmt = DFA_PIX_24BIT;
286 break;
287 case 16:
288 if (var->green.length == 5)
289 par->pixfmt = DFA_PIX_16BIT_1555;
290 else
291 par->pixfmt = DFA_PIX_16BIT_565;
292 break;
293 case 8:
294 par->pixfmt = DFA_PIX_8BIT;
295 break;
296 default:
297 return -EINVAL;
298 }
299
300 return 0;
301}
302
303static const struct fb_bitfield eightbits = {0, 8};
304static const struct fb_bitfield nobits = {0, 0};
305
306static void gxt4500_unpack_pixfmt(struct fb_var_screeninfo *var,
307 int pixfmt)
308{
309 var->bits_per_pixel = pixsize[pixfmt] * 8;
310 var->red = eightbits;
311 var->green = eightbits;
312 var->blue = eightbits;
313 var->transp = nobits;
314
315 switch (pixfmt) {
316 case DFA_PIX_16BIT_565:
317 var->red.length = 5;
318 var->green.length = 6;
319 var->blue.length = 5;
320 break;
321 case DFA_PIX_16BIT_1555:
322 var->red.length = 5;
323 var->green.length = 5;
324 var->blue.length = 5;
325 var->transp.length = 1;
326 break;
327 case DFA_PIX_32BIT:
328 var->transp.length = 8;
329 break;
330 }
331 if (pixfmt != DFA_PIX_8BIT) {
332 var->green.offset = var->red.length;
333 var->blue.offset = var->green.offset + var->green.length;
334 if (var->transp.length)
335 var->transp.offset =
336 var->blue.offset + var->blue.length;
337 }
338}
339
340static int gxt4500_check_var(struct fb_var_screeninfo *var,
341 struct fb_info *info)
342{
343 struct gxt4500_par par;
344 int err;
345
346 par = *(struct gxt4500_par *)info->par;
347 err = gxt4500_var_to_par(var, &par);
348 if (!err) {
349 var->pixclock = calc_pixclock(&par);
350 gxt4500_unpack_pixfmt(var, par.pixfmt);
351 }
352 return err;
353}
354
355static int gxt4500_set_par(struct fb_info *info)
356{
357 struct gxt4500_par *par = info->par;
358 struct fb_var_screeninfo *var = &info->var;
359 int err;
360 u32 ctrlreg;
361 unsigned int dfa_ctl, pixfmt, stride;
362 unsigned int wid_tiles, i;
363 unsigned int prefetch_pix, htot;
364 struct gxt4500_par save_par;
365
366 save_par = *par;
367 err = gxt4500_var_to_par(var, par);
368 if (err) {
369 *par = save_par;
370 return err;
371 }
372
373 /* turn off DTG for now */
374 ctrlreg = readreg(par, DTG_CONTROL);
375 ctrlreg &= ~(DTG_CTL_ENABLE | DTG_CTL_SCREEN_REFRESH);
376 writereg(par, DTG_CONTROL, ctrlreg);
377
378 /* set PLL registers */
379 writereg(par, PLL_M, mdivtab[par->pll_m - 1]);
380 writereg(par, PLL_N, ndivtab[par->pll_n - 2]);
381 writereg(par, PLL_POSTDIV,
382 ((8 - par->pll_pd1) << 3) | (8 - par->pll_pd2));
383 msleep(20);
384
385 /* turn off hardware cursor */
386 writereg(par, CURSOR_MODE, CURSOR_MODE_OFF);
387
388 /* reset raster engine */
389 writereg(par, CTRL_REG0, CR0_RASTER_RESET | (CR0_RASTER_RESET << 16));
390 udelay(10);
391 writereg(par, CTRL_REG0, CR0_RASTER_RESET << 16);
392
393 /* set display timing generator registers */
394 htot = var->xres + var->left_margin + var->right_margin +
395 var->hsync_len;
396 writereg(par, DTG_HORIZ_EXTENT, htot - 1);
397 writereg(par, DTG_HORIZ_DISPLAY, var->xres - 1);
398 writereg(par, DTG_HSYNC_START, var->xres + var->right_margin - 1);
399 writereg(par, DTG_HSYNC_END,
400 var->xres + var->right_margin + var->hsync_len - 1);
401 writereg(par, DTG_HSYNC_END_COMP,
402 var->xres + var->right_margin + var->hsync_len - 1);
403 writereg(par, DTG_VERT_EXTENT,
404 var->yres + var->upper_margin + var->lower_margin +
405 var->vsync_len - 1);
406 writereg(par, DTG_VERT_DISPLAY, var->yres - 1);
407 writereg(par, DTG_VSYNC_START, var->yres + var->lower_margin - 1);
408 writereg(par, DTG_VSYNC_END,
409 var->yres + var->lower_margin + var->vsync_len - 1);
410 prefetch_pix = 3300000 / var->pixclock;
411 if (prefetch_pix >= htot)
412 prefetch_pix = htot - 1;
413 writereg(par, DTG_VERT_SHORT, htot - prefetch_pix - 1);
414 ctrlreg |= DTG_CTL_ENABLE | DTG_CTL_SCREEN_REFRESH;
415 writereg(par, DTG_CONTROL, ctrlreg);
416
417 /* calculate stride in DFA aperture */
418 if (var->xres_virtual > 2048) {
419 stride = 4096;
420 dfa_ctl = DFA_FB_STRIDE_4k;
421 } else if (var->xres_virtual > 1024) {
422 stride = 2048;
423 dfa_ctl = DFA_FB_STRIDE_2k;
424 } else {
425 stride = 1024;
426 dfa_ctl = DFA_FB_STRIDE_1k;
427 }
428
429 /* Set up framebuffer definition */
430 wid_tiles = (var->xres_virtual + 63) >> 6;
431
432 /* XXX add proper FB allocation here someday */
433 writereg(par, FB_AB_CTRL, FB_CTRL_TYPE | (wid_tiles << 16) | 0);
434 writereg(par, REFRESH_AB_CTRL, FB_CTRL_TYPE | (wid_tiles << 16) | 0);
435 writereg(par, FB_CD_CTRL, FB_CTRL_TYPE | (wid_tiles << 16) | 0);
436 writereg(par, REFRESH_CD_CTRL, FB_CTRL_TYPE | (wid_tiles << 16) | 0);
437 writereg(par, REFRESH_START, (var->xoffset << 16) | var->yoffset);
438 writereg(par, REFRESH_SIZE, (var->xres << 16) | var->yres);
439
440 /* Set up framebuffer access by CPU */
441
442 pixfmt = par->pixfmt;
443 dfa_ctl |= DFA_FB_ENABLE | pixfmt;
444 writereg(par, DFA_FB_A, dfa_ctl);
445
446 /*
447 * Set up window attribute table.
448 * We set all WAT entries the same so it doesn't matter what the
449 * window ID (WID) plane contains.
450 */
451 for (i = 0; i < 32; ++i) {
452 writereg(par, WAT_FMT + (i << 4), watfmt[pixfmt]);
453 writereg(par, WAT_CMAP_OFFSET + (i << 4), 0);
454 writereg(par, WAT_CTRL + (i << 4), 0);
455 writereg(par, WAT_GAMMA_CTRL + (i << 4), WAT_GAMMA_DISABLE);
456 }
457
458 /* Set sync polarity etc. */
459 ctrlreg = readreg(par, SYNC_CTL) &
460 ~(SYNC_CTL_SYNC_ON_RGB | SYNC_CTL_HSYNC_INV |
461 SYNC_CTL_VSYNC_INV);
462 if (var->sync & FB_SYNC_ON_GREEN)
463 ctrlreg |= SYNC_CTL_SYNC_ON_RGB;
464 if (!(var->sync & FB_SYNC_HOR_HIGH_ACT))
465 ctrlreg |= SYNC_CTL_HSYNC_INV;
466 if (!(var->sync & FB_SYNC_VERT_HIGH_ACT))
467 ctrlreg |= SYNC_CTL_VSYNC_INV;
468 writereg(par, SYNC_CTL, ctrlreg);
469
470 info->fix.line_length = stride * pixsize[pixfmt];
471 info->fix.visual = (pixfmt == DFA_PIX_8BIT)? FB_VISUAL_PSEUDOCOLOR:
472 FB_VISUAL_DIRECTCOLOR;
473
474 return 0;
475}
476
477static int gxt4500_setcolreg(unsigned int reg, unsigned int red,
478 unsigned int green, unsigned int blue,
479 unsigned int transp, struct fb_info *info)
480{
481 u32 cmap_entry;
482 struct gxt4500_par *par = info->par;
483
484 if (reg > 1023)
485 return 1;
486 cmap_entry = ((transp & 0xff00) << 16) | ((blue & 0xff00) << 8) |
487 (green & 0xff00) | (red >> 8);
488 writereg(par, CMAP + reg * 4, cmap_entry);
489
490 if (reg < 16 && par->pixfmt != DFA_PIX_8BIT) {
491 u32 *pal = info->pseudo_palette;
492 u32 val = reg;
493 switch (par->pixfmt) {
494 case DFA_PIX_16BIT_565:
495 val |= (reg << 11) | (reg << 6);
496 break;
497 case DFA_PIX_16BIT_1555:
498 val |= (reg << 10) | (reg << 5);
499 break;
500 case DFA_PIX_32BIT:
501 val |= (reg << 24);
502 /* fall through */
503 case DFA_PIX_24BIT:
504 val |= (reg << 16) | (reg << 8);
505 break;
506 }
507 pal[reg] = val;
508 }
509
510 return 0;
511}
512
513static int gxt4500_pan_display(struct fb_var_screeninfo *var,
514 struct fb_info *info)
515{
516 struct gxt4500_par *par = info->par;
517
518 if (var->xoffset & 7)
519 return -EINVAL;
520 if (var->xoffset + var->xres > var->xres_virtual ||
521 var->yoffset + var->yres > var->yres_virtual)
522 return -EINVAL;
523
524 writereg(par, REFRESH_START, (var->xoffset << 16) | var->yoffset);
525 return 0;
526}
527
528static int gxt4500_blank(int blank, struct fb_info *info)
529{
530 struct gxt4500_par *par = info->par;
531 int ctrl, dctl;
532
533 ctrl = readreg(par, SYNC_CTL);
534 ctrl &= ~(SYNC_CTL_SYNC_OFF | SYNC_CTL_HSYNC_OFF | SYNC_CTL_VSYNC_OFF);
535 dctl = readreg(par, DISP_CTL);
536 dctl |= DISP_CTL_OFF;
537 switch (blank) {
538 case FB_BLANK_UNBLANK:
539 dctl &= ~DISP_CTL_OFF;
540 break;
541 case FB_BLANK_POWERDOWN:
542 ctrl |= SYNC_CTL_SYNC_OFF;
543 break;
544 case FB_BLANK_HSYNC_SUSPEND:
545 ctrl |= SYNC_CTL_HSYNC_OFF;
546 break;
547 case FB_BLANK_VSYNC_SUSPEND:
548 ctrl |= SYNC_CTL_VSYNC_OFF;
549 break;
550 default: ;
551 }
552 writereg(par, SYNC_CTL, ctrl);
553 writereg(par, DISP_CTL, dctl);
554
555 return 0;
556}
557
558static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
559 .id = "IBM GXT4500P",
560 .type = FB_TYPE_PACKED_PIXELS,
561 .visual = FB_VISUAL_PSEUDOCOLOR,
562 .xpanstep = 8,
563 .ypanstep = 1,
564 .mmio_len = 0x20000,
565};
566
567static struct fb_ops gxt4500_ops = {
568 .owner = THIS_MODULE,
569 .fb_check_var = gxt4500_check_var,
570 .fb_set_par = gxt4500_set_par,
571 .fb_setcolreg = gxt4500_setcolreg,
572 .fb_pan_display = gxt4500_pan_display,
573 .fb_blank = gxt4500_blank,
574 .fb_fillrect = cfb_fillrect,
575 .fb_copyarea = cfb_copyarea,
576 .fb_imageblit = cfb_imageblit,
577};
578
579/* PCI functions */
580static int __devinit gxt4500_probe(struct pci_dev *pdev,
581 const struct pci_device_id *ent)
582{
583 int err;
584 unsigned long reg_phys, fb_phys;
585 struct gxt4500_par *par;
586 struct fb_info *info;
587 struct fb_var_screeninfo var;
588
589 err = pci_enable_device(pdev);
590 if (err) {
591 dev_err(&pdev->dev, "gxt4500: cannot enable PCI device: %d\n",
592 err);
593 return err;
594 }
595
596 reg_phys = pci_resource_start(pdev, 0);
597 if (!request_mem_region(reg_phys, pci_resource_len(pdev, 0),
598 "gxt4500 regs")) {
599 dev_err(&pdev->dev, "gxt4500: cannot get registers\n");
600 goto err_nodev;
601 }
602
603 fb_phys = pci_resource_start(pdev, 1);
604 if (!request_mem_region(fb_phys, pci_resource_len(pdev, 1),
605 "gxt4500 FB")) {
606 dev_err(&pdev->dev, "gxt4500: cannot get framebuffer\n");
607 goto err_free_regs;
608 }
609
610 info = framebuffer_alloc(sizeof(struct gxt4500_par), &pdev->dev);
611 if (!info) {
612 dev_err(&pdev->dev, "gxt4500: cannot alloc FB info record");
613 goto err_free_fb;
614 }
615 par = info->par;
616 info->fix = gxt4500_fix;
617 info->pseudo_palette = par->pseudo_palette;
618
619 info->fix.mmio_start = reg_phys;
620 par->regs = ioremap(reg_phys, pci_resource_len(pdev, 0));
621 if (!par->regs) {
622 dev_err(&pdev->dev, "gxt4500: cannot map registers\n");
623 goto err_free_all;
624 }
625
626 info->fix.smem_start = fb_phys;
627 info->fix.smem_len = pci_resource_len(pdev, 1);
628 info->screen_base = ioremap(fb_phys, pci_resource_len(pdev, 1));
629 if (!info->screen_base) {
630 dev_err(&pdev->dev, "gxt4500: cannot map framebuffer\n");
631 goto err_unmap_regs;
632 }
633
634 pci_set_drvdata(pdev, info);
635
636 /* Set byte-swapping for DFA aperture for all pixel sizes */
637 pci_write_config_dword(pdev, CFG_ENDIAN0, 0x333300);
638
639 info->fbops = &gxt4500_ops;
640 info->flags = FBINFO_FLAG_DEFAULT;
641
642 err = fb_alloc_cmap(&info->cmap, 256, 0);
643 if (err) {
644 dev_err(&pdev->dev, "gxt4500: cannot allocate cmap\n");
645 goto err_unmap_all;
646 }
647
648 gxt4500_blank(FB_BLANK_UNBLANK, info);
649
650 if (!fb_find_mode(&var, info, mode_option, NULL, 0, &defaultmode, 8)) {
651 dev_err(&pdev->dev, "gxt4500: cannot find valid video mode\n");
652 goto err_free_cmap;
653 }
654 info->var = var;
655 if (gxt4500_set_par(info)) {
656 printk(KERN_ERR "gxt4500: cannot set video mode\n");
657 goto err_free_cmap;
658 }
659
660 if (register_framebuffer(info) < 0) {
661 dev_err(&pdev->dev, "gxt4500: cannot register framebuffer\n");
662 goto err_free_cmap;
663 }
664 printk(KERN_INFO "fb%d: %s frame buffer device\n",
665 info->node, info->fix.id);
666
667 return 0;
668
669 err_free_cmap:
670 fb_dealloc_cmap(&info->cmap);
671 err_unmap_all:
672 iounmap(info->screen_base);
673 err_unmap_regs:
674 iounmap(par->regs);
675 err_free_all:
676 framebuffer_release(info);
677 err_free_fb:
678 release_mem_region(fb_phys, pci_resource_len(pdev, 1));
679 err_free_regs:
680 release_mem_region(reg_phys, pci_resource_len(pdev, 0));
681 err_nodev:
682 return -ENODEV;
683}
684
685static void __devexit gxt4500_remove(struct pci_dev *pdev)
686{
687 struct fb_info *info = pci_get_drvdata(pdev);
688 struct gxt4500_par *par;
689
690 if (!info)
691 return;
692 par = info->par;
693 unregister_framebuffer(info);
694 fb_dealloc_cmap(&info->cmap);
695 iounmap(par->regs);
696 iounmap(info->screen_base);
697 release_mem_region(pci_resource_start(pdev, 0),
698 pci_resource_len(pdev, 0));
699 release_mem_region(pci_resource_start(pdev, 1),
700 pci_resource_len(pdev, 1));
701 framebuffer_release(info);
702}
703
704/* supported chipsets */
705static const struct pci_device_id gxt4500_pci_tbl[] = {
706 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_GXT4500P,
707 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
708 { 0 }
709};
710
711MODULE_DEVICE_TABLE(pci, gxt4500_pci_tbl);
712
713static struct pci_driver gxt4500_driver = {
714 .name = "gxt4500",
715 .id_table = gxt4500_pci_tbl,
716 .probe = gxt4500_probe,
717 .remove = __devexit_p(gxt4500_remove),
718};
719
720static int __devinit gxt4500_init(void)
721{
722#ifndef MODULE
723 if (fb_get_options("gxt4500", &mode_option))
724 return -ENODEV;
725#endif
726
727 return pci_register_driver(&gxt4500_driver);
728}
729module_init(gxt4500_init);
730
731static void __exit gxt4500_exit(void)
732{
733 pci_unregister_driver(&gxt4500_driver);
734}
735module_exit(gxt4500_exit);
736
737MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
738MODULE_DESCRIPTION("FBDev driver for IBM GXT4500P");
739MODULE_LICENSE("GPL");
740module_param(mode_option, charp, 0);
741MODULE_PARM_DESC(mode_option, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\"");
diff --git a/drivers/video/i810/i810-i2c.c b/drivers/video/i810/i810-i2c.c
index b952e4504abe..961f4d404467 100644
--- a/drivers/video/i810/i810-i2c.c
+++ b/drivers/video/i810/i810-i2c.c
@@ -137,15 +137,15 @@ void i810_create_i2c_busses(struct i810fb_par *par)
137void i810_delete_i2c_busses(struct i810fb_par *par) 137void i810_delete_i2c_busses(struct i810fb_par *par)
138{ 138{
139 if (par->chan[0].par) 139 if (par->chan[0].par)
140 i2c_bit_del_bus(&par->chan[0].adapter); 140 i2c_del_adapter(&par->chan[0].adapter);
141 par->chan[0].par = NULL; 141 par->chan[0].par = NULL;
142 142
143 if (par->chan[1].par) 143 if (par->chan[1].par)
144 i2c_bit_del_bus(&par->chan[1].adapter); 144 i2c_del_adapter(&par->chan[1].adapter);
145 par->chan[1].par = NULL; 145 par->chan[1].par = NULL;
146 146
147 if (par->chan[2].par) 147 if (par->chan[2].par)
148 i2c_bit_del_bus(&par->chan[2].adapter); 148 i2c_del_adapter(&par->chan[2].adapter);
149 par->chan[2].par = NULL; 149 par->chan[2].par = NULL;
150} 150}
151 151
diff --git a/drivers/video/intelfb/intelfb_i2c.c b/drivers/video/intelfb/intelfb_i2c.c
index 5686e2164e39..33bc41f50540 100644
--- a/drivers/video/intelfb/intelfb_i2c.c
+++ b/drivers/video/intelfb/intelfb_i2c.c
@@ -188,11 +188,11 @@ void intelfb_delete_i2c_busses(struct intelfb_info *dinfo)
188 188
189 for (i = 0; i < MAX_OUTPUTS; i++) { 189 for (i = 0; i < MAX_OUTPUTS; i++) {
190 if (dinfo->output[i].i2c_bus.dinfo) { 190 if (dinfo->output[i].i2c_bus.dinfo) {
191 i2c_bit_del_bus(&dinfo->output[i].i2c_bus.adapter); 191 i2c_del_adapter(&dinfo->output[i].i2c_bus.adapter);
192 dinfo->output[i].i2c_bus.dinfo = NULL; 192 dinfo->output[i].i2c_bus.dinfo = NULL;
193 } 193 }
194 if (dinfo->output[i].ddc_bus.dinfo) { 194 if (dinfo->output[i].ddc_bus.dinfo) {
195 i2c_bit_del_bus(&dinfo->output[i].ddc_bus.adapter); 195 i2c_del_adapter(&dinfo->output[i].ddc_bus.adapter);
196 dinfo->output[i].ddc_bus.dinfo = NULL; 196 dinfo->output[i].ddc_bus.dinfo = NULL;
197 } 197 }
198 } 198 }
diff --git a/drivers/video/matrox/i2c-matroxfb.c b/drivers/video/matrox/i2c-matroxfb.c
index 795c1a99a680..fe28848e7b52 100644
--- a/drivers/video/matrox/i2c-matroxfb.c
+++ b/drivers/video/matrox/i2c-matroxfb.c
@@ -124,7 +124,7 @@ static int i2c_bus_reg(struct i2c_bit_adapter* b, struct matrox_fb_info* minfo,
124 124
125static void i2c_bit_bus_del(struct i2c_bit_adapter* b) { 125static void i2c_bit_bus_del(struct i2c_bit_adapter* b) {
126 if (b->initialized) { 126 if (b->initialized) {
127 i2c_bit_del_bus(&b->adapter); 127 i2c_del_adapter(&b->adapter);
128 b->initialized = 0; 128 b->initialized = 0;
129 } 129 }
130} 130}
@@ -146,7 +146,7 @@ static void* i2c_matroxfb_probe(struct matrox_fb_info* minfo) {
146 unsigned long flags; 146 unsigned long flags;
147 struct matroxfb_dh_maven_info* m2info; 147 struct matroxfb_dh_maven_info* m2info;
148 148
149 m2info = (struct matroxfb_dh_maven_info*)kmalloc(sizeof(*m2info), GFP_KERNEL); 149 m2info = kmalloc(sizeof(*m2info), GFP_KERNEL);
150 if (!m2info) 150 if (!m2info)
151 return NULL; 151 return NULL;
152 152
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index e9b4115fcad0..cb2aa402ddfd 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -2028,7 +2028,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
2028 } 2028 }
2029 2029
2030#ifdef CONFIG_FB_MATROX_MULTIHEAD 2030#ifdef CONFIG_FB_MATROX_MULTIHEAD
2031 minfo = (struct matrox_fb_info*)kmalloc(sizeof(*minfo), GFP_KERNEL); 2031 minfo = kmalloc(sizeof(*minfo), GFP_KERNEL);
2032 if (!minfo) 2032 if (!minfo)
2033 return -1; 2033 return -1;
2034#else 2034#else
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index 27eb4bb4f89f..2c9801090fae 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -694,7 +694,7 @@ static void* matroxfb_crtc2_probe(struct matrox_fb_info* minfo) {
694 /* hardware is CRTC2 incapable... */ 694 /* hardware is CRTC2 incapable... */
695 if (!ACCESS_FBINFO(devflags.crtc2)) 695 if (!ACCESS_FBINFO(devflags.crtc2))
696 return NULL; 696 return NULL;
697 m2info = (struct matroxfb_dh_fb_info*)kmalloc(sizeof(*m2info), GFP_KERNEL); 697 m2info = kmalloc(sizeof(*m2info), GFP_KERNEL);
698 if (!m2info) { 698 if (!m2info) {
699 printk(KERN_ERR "matroxfb_crtc2: Not enough memory for CRTC2 control structs\n"); 699 printk(KERN_ERR "matroxfb_crtc2: Not enough memory for CRTC2 control structs\n");
700 return NULL; 700 return NULL;
diff --git a/drivers/video/nvidia/nv_i2c.c b/drivers/video/nvidia/nv_i2c.c
index 442e85328341..8454adf2d178 100644
--- a/drivers/video/nvidia/nv_i2c.c
+++ b/drivers/video/nvidia/nv_i2c.c
@@ -147,15 +147,15 @@ void nvidia_create_i2c_busses(struct nvidia_par *par)
147void nvidia_delete_i2c_busses(struct nvidia_par *par) 147void nvidia_delete_i2c_busses(struct nvidia_par *par)
148{ 148{
149 if (par->chan[0].par) 149 if (par->chan[0].par)
150 i2c_bit_del_bus(&par->chan[0].adapter); 150 i2c_del_adapter(&par->chan[0].adapter);
151 par->chan[0].par = NULL; 151 par->chan[0].par = NULL;
152 152
153 if (par->chan[1].par) 153 if (par->chan[1].par)
154 i2c_bit_del_bus(&par->chan[1].adapter); 154 i2c_del_adapter(&par->chan[1].adapter);
155 par->chan[1].par = NULL; 155 par->chan[1].par = NULL;
156 156
157 if (par->chan[2].par) 157 if (par->chan[2].par)
158 i2c_bit_del_bus(&par->chan[2].adapter); 158 i2c_del_adapter(&par->chan[2].adapter);
159 par->chan[2].par = NULL; 159 par->chan[2].par = NULL;
160 160
161} 161}
diff --git a/drivers/video/riva/rivafb-i2c.c b/drivers/video/riva/rivafb-i2c.c
index c15b259af644..01b85e3b0ae1 100644
--- a/drivers/video/riva/rivafb-i2c.c
+++ b/drivers/video/riva/rivafb-i2c.c
@@ -144,15 +144,15 @@ void riva_create_i2c_busses(struct riva_par *par)
144void riva_delete_i2c_busses(struct riva_par *par) 144void riva_delete_i2c_busses(struct riva_par *par)
145{ 145{
146 if (par->chan[0].par) 146 if (par->chan[0].par)
147 i2c_bit_del_bus(&par->chan[0].adapter); 147 i2c_del_adapter(&par->chan[0].adapter);
148 par->chan[0].par = NULL; 148 par->chan[0].par = NULL;
149 149
150 if (par->chan[1].par) 150 if (par->chan[1].par)
151 i2c_bit_del_bus(&par->chan[1].adapter); 151 i2c_del_adapter(&par->chan[1].adapter);
152 par->chan[1].par = NULL; 152 par->chan[1].par = NULL;
153 153
154 if (par->chan[2].par) 154 if (par->chan[2].par)
155 i2c_bit_del_bus(&par->chan[2].adapter); 155 i2c_del_adapter(&par->chan[2].adapter);
156 par->chan[2].par = NULL; 156 par->chan[2].par = NULL;
157} 157}
158 158
diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c
index cef5bf591cdf..1411f3b6a009 100644
--- a/drivers/video/savage/savagefb-i2c.c
+++ b/drivers/video/savage/savagefb-i2c.c
@@ -208,7 +208,7 @@ void savagefb_delete_i2c_busses(struct fb_info *info)
208 struct savagefb_par *par = info->par; 208 struct savagefb_par *par = info->par;
209 209
210 if (par->chan.par) 210 if (par->chan.par)
211 i2c_bit_del_bus(&par->chan.adapter); 211 i2c_del_adapter(&par->chan.adapter);
212 212
213 par->chan.par = NULL; 213 par->chan.par = NULL;
214} 214}
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index 711cb11d6eb3..59cd1e750f30 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -21,6 +21,11 @@
21 * Remove never finished and bogus 24/32bit support 21 * Remove never finished and bogus 24/32bit support
22 * Clean up macro abuse 22 * Clean up macro abuse
23 * Minor tidying for format. 23 * Minor tidying for format.
24 * 12/2006 Helge Deller <deller@gmx.de>
25 * add /sys/class/graphics/fbX/vgapass sysfs-interface
26 * add module option "mode_option" to set initial screen mode
27 * use fbdev default videomode database
28 * remove debug functions from ioctl
24 */ 29 */
25 30
26/* 31/*
@@ -65,19 +70,10 @@
65 * 70 *
66 * sstfb specific ioctls: 71 * sstfb specific ioctls:
67 * toggle vga (0x46db) : toggle vga_pass_through 72 * toggle vga (0x46db) : toggle vga_pass_through
68 * fill fb (0x46dc) : fills fb
69 * test disp (0x46de) : draws a test image
70 */ 73 */
71 74
72#undef SST_DEBUG 75#undef SST_DEBUG
73 76
74/*
75 Default video mode .
76 0 800x600@60 took from glide
77 1 640x480@75 took from glide
78 2 1024x768@76 std fb.mode
79 3 640x480@60 glide default */
80#define DEFAULT_MODE 3
81 77
82/* 78/*
83 * Includes 79 * Includes
@@ -92,20 +88,24 @@
92#include <linux/init.h> 88#include <linux/init.h>
93#include <linux/slab.h> 89#include <linux/slab.h>
94#include <asm/io.h> 90#include <asm/io.h>
95#include <asm/ioctl.h>
96#include <asm/uaccess.h> 91#include <asm/uaccess.h>
97#include <video/sstfb.h> 92#include <video/sstfb.h>
98 93
99 94
100/* initialized by setup */ 95/* initialized by setup */
101 96
102static int vgapass; /* enable Vga passthrough cable */ 97static int vgapass; /* enable VGA passthrough cable */
103static int mem; /* mem size in MB, 0 = autodetect */ 98static int mem; /* mem size in MB, 0 = autodetect */
104static int clipping = 1; /* use clipping (slower, safer) */ 99static int clipping = 1; /* use clipping (slower, safer) */
105static int gfxclk; /* force FBI freq in Mhz . Dangerous */ 100static int gfxclk; /* force FBI freq in Mhz . Dangerous */
106static int slowpci; /* slow PCI settings */ 101static int slowpci; /* slow PCI settings */
107 102
108static char *mode_option __devinitdata; 103/*
104 Possible default video modes: 800x600@60, 640x480@75, 1024x768@76, 640x480@60
105*/
106#define DEFAULT_VIDEO_MODE "640x480@60"
107
108static char *mode_option __devinitdata = DEFAULT_VIDEO_MODE;
109 109
110enum { 110enum {
111 ID_VOODOO1 = 0, 111 ID_VOODOO1 = 0,
@@ -119,48 +119,11 @@ static struct sst_spec voodoo_spec[] __devinitdata = {
119 { .name = "Voodoo2", .default_gfx_clock = 75000, .max_gfxclk = 85 }, 119 { .name = "Voodoo2", .default_gfx_clock = 75000, .max_gfxclk = 85 },
120}; 120};
121 121
122static struct fb_var_screeninfo sstfb_default =
123#if ( DEFAULT_MODE == 0 )
124 { /* 800x600@60, 16 bpp .borowed from glide/sst1/include/sst1init.h */
125 800, 600, 800, 600, 0, 0, 16, 0,
126 {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0},
127 0, 0, -1, -1, 0,
128 25000, 86, 41, 23, 1, 127, 4,
129 0, FB_VMODE_NONINTERLACED };
130#elif ( DEFAULT_MODE == 1 )
131 {/* 640x480@75, 16 bpp .borowed from glide/sst1/include/sst1init.h */
132 640, 480, 640, 480, 0, 0, 16, 0,
133 {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0},
134 0, 0, -1, -1, 0,
135 31746, 118, 17, 16, 1, 63, 3,
136 0, FB_VMODE_NONINTERLACED };
137#elif ( DEFAULT_MODE == 2 )
138 { /* 1024x768@76 took from my /etc/fb.modes */
139 1024, 768, 1024, 768,0, 0, 16,0,
140 {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0},
141 0, 0, -1, -1, 0,
142 11764, 208, 8, 36, 16, 120, 3 ,
143 0, FB_VMODE_NONINTERLACED };
144#elif ( DEFAULT_MODE == 3 )
145 { /* 640x480@60 , 16bpp glide default ?*/
146 640, 480, 640, 480, 0, 0, 16, 0,
147 {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0},
148 0, 0, -1, -1, 0,
149 39721 , 38, 26 , 25 ,18 , 96 ,2,
150 0, FB_VMODE_NONINTERLACED };
151#elif
152 #error "Invalid DEFAULT_MODE value !"
153#endif
154
155 122
156/* 123/*
157 * debug functions 124 * debug functions
158 */ 125 */
159 126
160static void sstfb_drawdebugimage(struct fb_info *info);
161static int sstfb_dump_regs(struct fb_info *info);
162
163
164#if (SST_DEBUG_REG > 0) 127#if (SST_DEBUG_REG > 0)
165static void sst_dbg_print_read_reg(u32 reg, u32 val) { 128static void sst_dbg_print_read_reg(u32 reg, u32 val) {
166 const char *regname; 129 const char *regname;
@@ -726,51 +689,77 @@ static int sstfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
726 return 0; 689 return 0;
727} 690}
728 691
729static int sstfb_ioctl(struct fb_info *info, u_int cmd, u_long arg) 692static void sstfb_setvgapass( struct fb_info *info, int enable )
730{ 693{
731 struct sstfb_par *par = info->par; 694 struct sstfb_par *par = info->par;
732 struct pci_dev *sst_dev = par->dev; 695 struct pci_dev *sst_dev = par->dev;
733 u32 fbiinit0, tmp, val; 696 u32 fbiinit0, tmp;
734 u_long p; 697
698 enable = enable ? 1:0;
699 if (par->vgapass == enable)
700 return;
701 par->vgapass = enable;
702
703 pci_read_config_dword(sst_dev, PCI_INIT_ENABLE, &tmp);
704 pci_write_config_dword(sst_dev, PCI_INIT_ENABLE,
705 tmp | PCI_EN_INIT_WR );
706 fbiinit0 = sst_read (FBIINIT0);
707 if (par->vgapass) {
708 sst_write(FBIINIT0, fbiinit0 & ~DIS_VGA_PASSTHROUGH);
709 printk(KERN_INFO "fb%d: Enabling VGA pass-through\n", info->node );
710 } else {
711 sst_write(FBIINIT0, fbiinit0 | DIS_VGA_PASSTHROUGH);
712 printk(KERN_INFO "fb%d: Disabling VGA pass-through\n", info->node );
713 }
714 pci_write_config_dword(sst_dev, PCI_INIT_ENABLE, tmp);
715}
716
717static ssize_t store_vgapass(struct device *device, struct device_attribute *attr,
718 const char *buf, size_t count)
719{
720 struct fb_info *info = dev_get_drvdata(device);
721 char ** last = NULL;
722 int val;
723
724 val = simple_strtoul(buf, last, 0);
725 sstfb_setvgapass(info, val);
726
727 return count;
728}
729
730static ssize_t show_vgapass(struct device *device, struct device_attribute *attr,
731 char *buf)
732{
733 struct fb_info *info = dev_get_drvdata(device);
734 struct sstfb_par *par = info->par;
735 return snprintf(buf, PAGE_SIZE, "%d\n", par->vgapass);
736}
737
738static struct device_attribute device_attrs[] = {
739 __ATTR(vgapass, S_IRUGO|S_IWUSR, show_vgapass, store_vgapass)
740 };
741
742static int sstfb_ioctl(struct fb_info *info, unsigned int cmd,
743 unsigned long arg)
744{
745 struct sstfb_par *par;
746 u32 val;
735 747
736 switch (cmd) { 748 switch (cmd) {
737 749 /* set/get VGA pass_through mode */
738 /* dump current FBIINIT values to system log */ 750 case SSTFB_SET_VGAPASS:
739 case _IO('F', 0xdb): /* 0x46db */
740 return sstfb_dump_regs(info);
741
742 /* fills lfb with #arg pixels */
743 case _IOW('F', 0xdc, u32): /* 0x46dc */
744 if (copy_from_user(&val, (void __user *)arg, sizeof(val))) 751 if (copy_from_user(&val, (void __user *)arg, sizeof(val)))
745 return -EFAULT; 752 return -EFAULT;
746 if (val > info->fix.smem_len) 753 sstfb_setvgapass(info, val);
747 val = info->fix.smem_len;
748 for (p = 0 ; p < val; p += 2)
749 writew(p >> 6, info->screen_base + p);
750 return 0; 754 return 0;
751 755 case SSTFB_GET_VGAPASS:
752 /* change VGA pass_through mode */ 756 par = info->par;
753 case _IOW('F', 0xdd, u32): /* 0x46dd */ 757 val = par->vgapass;
754 if (copy_from_user(&val, (void __user *)arg, sizeof(val))) 758 if (copy_to_user((void __user *)arg, &val, sizeof(val)))
755 return -EFAULT; 759 return -EFAULT;
756 pci_read_config_dword(sst_dev, PCI_INIT_ENABLE, &tmp);
757 pci_write_config_dword(sst_dev, PCI_INIT_ENABLE,
758 tmp | PCI_EN_INIT_WR );
759 fbiinit0 = sst_read (FBIINIT0);
760 if (val)
761 sst_write(FBIINIT0, fbiinit0 & ~EN_VGA_PASSTHROUGH);
762 else
763 sst_write(FBIINIT0, fbiinit0 | EN_VGA_PASSTHROUGH);
764 pci_write_config_dword(sst_dev, PCI_INIT_ENABLE, tmp);
765 return 0;
766
767 /* draw test image */
768 case _IO('F', 0xde): /* 0x46de */
769 f_dprintk("test color display at %d bpp\n",
770 info->var.bits_per_pixel);
771 sstfb_drawdebugimage(info);
772 return 0; 760 return 0;
773 } 761 }
762
774 return -EINVAL; 763 return -EINVAL;
775} 764}
776 765
@@ -804,6 +793,7 @@ static void sstfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
804/* 793/*
805 * FillRect 2D command (solidfill or invert (via ROP_XOR)) - Voodoo2 only 794 * FillRect 2D command (solidfill or invert (via ROP_XOR)) - Voodoo2 only
806 */ 795 */
796#if 0
807static void sstfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 797static void sstfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
808{ 798{
809 struct sstfb_par *par = info->par; 799 struct sstfb_par *par = info->par;
@@ -825,6 +815,7 @@ static void sstfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
825 | (BLT_16BPP_FMT << 3) /* | BIT(14) */ | BIT(15) | BIT(16) ); 815 | (BLT_16BPP_FMT << 3) /* | BIT(14) */ | BIT(15) | BIT(16) );
826 sst_wait_idle(); 816 sst_wait_idle();
827} 817}
818#endif
828 819
829 820
830 821
@@ -1156,6 +1147,7 @@ static int __devinit sst_init(struct fb_info *info, struct sstfb_par *par)
1156 struct pll_timing gfx_timings; 1147 struct pll_timing gfx_timings;
1157 struct sst_spec *spec; 1148 struct sst_spec *spec;
1158 int Fout; 1149 int Fout;
1150 int gfx_clock;
1159 1151
1160 spec = &voodoo_spec[par->type]; 1152 spec = &voodoo_spec[par->type];
1161 f_ddprintk(" fbiinit0 fbiinit1 fbiinit2 fbiinit3 fbiinit4 " 1153 f_ddprintk(" fbiinit0 fbiinit1 fbiinit2 fbiinit3 fbiinit4 "
@@ -1196,15 +1188,15 @@ static int __devinit sst_init(struct fb_info *info, struct sstfb_par *par)
1196 } 1188 }
1197 1189
1198 /* set graphic clock */ 1190 /* set graphic clock */
1199 par->gfx_clock = spec->default_gfx_clock; 1191 gfx_clock = spec->default_gfx_clock;
1200 if ((gfxclk >10 ) && (gfxclk < spec->max_gfxclk)) { 1192 if ((gfxclk >10 ) && (gfxclk < spec->max_gfxclk)) {
1201 printk(KERN_INFO "sstfb: Using supplied graphic freq : %dMHz\n", gfxclk); 1193 printk(KERN_INFO "sstfb: Using supplied graphic freq : %dMHz\n", gfxclk);
1202 par->gfx_clock = gfxclk *1000; 1194 gfx_clock = gfxclk *1000;
1203 } else if (gfxclk) { 1195 } else if (gfxclk) {
1204 printk(KERN_WARNING "sstfb: %dMhz is way out of spec! Using default\n", gfxclk); 1196 printk(KERN_WARNING "sstfb: %dMhz is way out of spec! Using default\n", gfxclk);
1205 } 1197 }
1206 1198
1207 sst_calc_pll(par->gfx_clock, &Fout, &gfx_timings); 1199 sst_calc_pll(gfx_clock, &Fout, &gfx_timings);
1208 par->dac_sw.set_pll(info, &gfx_timings, GFX_CLOCK); 1200 par->dac_sw.set_pll(info, &gfx_timings, GFX_CLOCK);
1209 1201
1210 /* disable fbiinit remap */ 1202 /* disable fbiinit remap */
@@ -1215,10 +1207,11 @@ static int __devinit sst_init(struct fb_info *info, struct sstfb_par *par)
1215 fbiinit0 = FBIINIT0_DEFAULT; 1207 fbiinit0 = FBIINIT0_DEFAULT;
1216 fbiinit1 = FBIINIT1_DEFAULT; 1208 fbiinit1 = FBIINIT1_DEFAULT;
1217 fbiinit4 = FBIINIT4_DEFAULT; 1209 fbiinit4 = FBIINIT4_DEFAULT;
1218 if (vgapass) 1210 par->vgapass = vgapass;
1219 fbiinit0 &= ~EN_VGA_PASSTHROUGH; 1211 if (par->vgapass)
1212 fbiinit0 &= ~DIS_VGA_PASSTHROUGH;
1220 else 1213 else
1221 fbiinit0 |= EN_VGA_PASSTHROUGH; 1214 fbiinit0 |= DIS_VGA_PASSTHROUGH;
1222 if (slowpci) { 1215 if (slowpci) {
1223 fbiinit1 |= SLOW_PCI_WRITES; 1216 fbiinit1 |= SLOW_PCI_WRITES;
1224 fbiinit4 |= SLOW_PCI_READS; 1217 fbiinit4 |= SLOW_PCI_READS;
@@ -1267,7 +1260,7 @@ static void __devexit sst_shutdown(struct fb_info *info)
1267 /* TODO maybe shutdown the dac, vrefresh and so on... */ 1260 /* TODO maybe shutdown the dac, vrefresh and so on... */
1268 pci_write_config_dword(dev, PCI_INIT_ENABLE, 1261 pci_write_config_dword(dev, PCI_INIT_ENABLE,
1269 PCI_EN_INIT_WR); 1262 PCI_EN_INIT_WR);
1270 sst_unset_bits(FBIINIT0, FBI_RESET | FIFO_RESET | EN_VGA_PASSTHROUGH); 1263 sst_unset_bits(FBIINIT0, FBI_RESET | FIFO_RESET | DIS_VGA_PASSTHROUGH);
1271 pci_write_config_dword(dev, PCI_VCLK_DISABLE,0); 1264 pci_write_config_dword(dev, PCI_VCLK_DISABLE,0);
1272 /* maybe keep fbiinit* and PCI_INIT_enable in the fb_info struct 1265 /* maybe keep fbiinit* and PCI_INIT_enable in the fb_info struct
1273 * from start ? */ 1266 * from start ? */
@@ -1278,8 +1271,7 @@ static void __devexit sst_shutdown(struct fb_info *info)
1278/* 1271/*
1279 * Interface to the world 1272 * Interface to the world
1280 */ 1273 */
1281#ifndef MODULE 1274static int __devinit sstfb_setup(char *options)
1282static int __init sstfb_setup(char *options)
1283{ 1275{
1284 char *this_opt; 1276 char *this_opt;
1285 1277
@@ -1312,7 +1304,7 @@ static int __init sstfb_setup(char *options)
1312 } 1304 }
1313 return 0; 1305 return 0;
1314} 1306}
1315#endif 1307
1316 1308
1317static struct fb_ops sstfb_ops = { 1309static struct fb_ops sstfb_ops = {
1318 .owner = THIS_MODULE, 1310 .owner = THIS_MODULE,
@@ -1416,15 +1408,10 @@ static int __devinit sstfb_probe(struct pci_dev *pdev,
1416 */ 1408 */
1417 fix->line_length = 2048; /* default value, for 24 or 32bit: 4096 */ 1409 fix->line_length = 2048; /* default value, for 24 or 32bit: 4096 */
1418 1410
1419 if ( mode_option && 1411 fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 16);
1420 fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 16)) {
1421 printk(KERN_ERR "sstfb: can't set supplied video mode. Using default\n");
1422 info->var = sstfb_default;
1423 } else
1424 info->var = sstfb_default;
1425 1412
1426 if (sstfb_check_var(&info->var, info)) { 1413 if (sstfb_check_var(&info->var, info)) {
1427 printk(KERN_ERR "sstfb: invalid default video mode.\n"); 1414 printk(KERN_ERR "sstfb: invalid video mode.\n");
1428 goto fail; 1415 goto fail;
1429 } 1416 }
1430 1417
@@ -1442,10 +1429,11 @@ static int __devinit sstfb_probe(struct pci_dev *pdev,
1442 goto fail; 1429 goto fail;
1443 } 1430 }
1444 1431
1445 if (1) /* set to 0 to see an initial bitmap instead */ 1432 sstfb_clear_screen(info);
1446 sstfb_clear_screen(info); 1433
1447 else 1434 if (device_create_file(info->dev, &device_attrs[0]))
1448 sstfb_drawdebugimage(info); 1435 printk(KERN_WARNING "sstfb: can't create sysfs entry.\n");
1436
1449 1437
1450 printk(KERN_INFO "fb%d: %s frame buffer device at 0x%p\n", 1438 printk(KERN_INFO "fb%d: %s frame buffer device at 0x%p\n",
1451 info->node, fix->id, info->screen_base); 1439 info->node, fix->id, info->screen_base);
@@ -1453,6 +1441,7 @@ static int __devinit sstfb_probe(struct pci_dev *pdev,
1453 return 0; 1441 return 0;
1454 1442
1455fail: 1443fail:
1444 fb_dealloc_cmap(&info->cmap);
1456 iounmap(info->screen_base); 1445 iounmap(info->screen_base);
1457fail_fb_remap: 1446fail_fb_remap:
1458 iounmap(par->mmio_vbase); 1447 iounmap(par->mmio_vbase);
@@ -1473,21 +1462,23 @@ static void __devexit sstfb_remove(struct pci_dev *pdev)
1473 info = pci_get_drvdata(pdev); 1462 info = pci_get_drvdata(pdev);
1474 par = info->par; 1463 par = info->par;
1475 1464
1465 device_remove_file(info->dev, &device_attrs[0]);
1476 sst_shutdown(info); 1466 sst_shutdown(info);
1477 unregister_framebuffer(info);
1478 iounmap(info->screen_base); 1467 iounmap(info->screen_base);
1479 iounmap(par->mmio_vbase); 1468 iounmap(par->mmio_vbase);
1480 release_mem_region(info->fix.smem_start, 0x400000); 1469 release_mem_region(info->fix.smem_start, 0x400000);
1481 release_mem_region(info->fix.mmio_start, info->fix.mmio_len); 1470 release_mem_region(info->fix.mmio_start, info->fix.mmio_len);
1471 fb_dealloc_cmap(&info->cmap);
1472 unregister_framebuffer(info);
1482 framebuffer_release(info); 1473 framebuffer_release(info);
1483} 1474}
1484 1475
1485 1476
1486static struct pci_device_id sstfb_id_tbl[] = { 1477static const struct pci_device_id sstfb_id_tbl[] = {
1487 { PCI_VENDOR_ID_3DFX, PCI_DEVICE_ID_3DFX_VOODOO, 1478 { PCI_DEVICE(PCI_VENDOR_ID_3DFX, PCI_DEVICE_ID_3DFX_VOODOO ),
1488 PCI_ANY_ID, PCI_ANY_ID, 0, 0, ID_VOODOO1 }, 1479 .driver_data = ID_VOODOO1, },
1489 { PCI_VENDOR_ID_3DFX, PCI_DEVICE_ID_3DFX_VOODOO2, 1480 { PCI_DEVICE(PCI_VENDOR_ID_3DFX, PCI_DEVICE_ID_3DFX_VOODOO2),
1490 PCI_ANY_ID, PCI_ANY_ID, 0, 0, ID_VOODOO2 }, 1481 .driver_data = ID_VOODOO2, },
1491 { 0 }, 1482 { 0 },
1492}; 1483};
1493 1484
@@ -1501,142 +1492,23 @@ static struct pci_driver sstfb_driver = {
1501 1492
1502static int __devinit sstfb_init(void) 1493static int __devinit sstfb_init(void)
1503{ 1494{
1504#ifndef MODULE
1505 char *option = NULL; 1495 char *option = NULL;
1506 1496
1507 if (fb_get_options("sstfb", &option)) 1497 if (fb_get_options("sstfb", &option))
1508 return -ENODEV; 1498 return -ENODEV;
1509 sstfb_setup(option); 1499 sstfb_setup(option);
1510#endif 1500
1511 return pci_register_driver(&sstfb_driver); 1501 return pci_register_driver(&sstfb_driver);
1512} 1502}
1513 1503
1514#ifdef MODULE
1515static void __devexit sstfb_exit(void) 1504static void __devexit sstfb_exit(void)
1516{ 1505{
1517 pci_unregister_driver(&sstfb_driver); 1506 pci_unregister_driver(&sstfb_driver);
1518} 1507}
1519#endif
1520 1508
1521 1509
1522/*
1523 * testing and debugging functions
1524 */
1525
1526static int sstfb_dump_regs(struct fb_info *info)
1527{
1528#ifdef SST_DEBUG
1529 static struct { u32 reg ; const char *reg_name;} pci_regs[] = {
1530 { PCI_INIT_ENABLE, "initenable"},
1531 { PCI_VCLK_ENABLE, "enable vclk"},
1532 { PCI_VCLK_DISABLE, "disable vclk"},
1533 };
1534
1535 static struct { u32 reg ; const char *reg_name;} sst_regs[] = {
1536 {FBIINIT0,"fbiinit0"},
1537 {FBIINIT1,"fbiinit1"},
1538 {FBIINIT2,"fbiinit2"},
1539 {FBIINIT3,"fbiinit3"},
1540 {FBIINIT4,"fbiinit4"},
1541 {FBIINIT5,"fbiinit5"},
1542 {FBIINIT6,"fbiinit6"},
1543 {FBIINIT7,"fbiinit7"},
1544 {LFBMODE,"lfbmode"},
1545 {FBZMODE,"fbzmode"},
1546 };
1547
1548 const int pci_s = ARRAY_SIZE(pci_regs);
1549 const int sst_s = ARRAY_SIZE(sst_regs);
1550 struct sstfb_par *par = info->par;
1551 struct pci_dev *dev = par->dev;
1552 u32 pci_res[pci_s];
1553 u32 sst_res[sst_s];
1554 int i;
1555
1556 for (i=0; i<pci_s; i++) {
1557 pci_read_config_dword(dev, pci_regs[i].reg, &pci_res[i]);
1558 }
1559 for (i=0; i<sst_s; i++) {
1560 sst_res[i] = sst_read(sst_regs[i].reg);
1561 }
1562
1563 dprintk("hardware register dump:\n");
1564 for (i=0; i<pci_s; i++) {
1565 dprintk("%s %0#10x\n", pci_regs[i].reg_name, pci_res[i]);
1566 }
1567 for (i=0; i<sst_s; i++) {
1568 dprintk("%s %0#10x\n", sst_regs[i].reg_name, sst_res[i]);
1569 }
1570 return 0;
1571#else
1572 return -EINVAL;
1573#endif
1574}
1575
1576static void sstfb_fillrect_softw( struct fb_info *info, const struct fb_fillrect *rect)
1577{
1578 u8 __iomem *fbbase_virt = info->screen_base;
1579 int x, y, w = info->var.bits_per_pixel == 16 ? 2 : 4;
1580 u32 color = rect->color, height = rect->height;
1581 u8 __iomem *p;
1582
1583 if (w==2) color |= color<<16;
1584 for (y=rect->dy; height; y++, height--) {
1585 p = fbbase_virt + y*info->fix.line_length + rect->dx*w;
1586 x = rect->width;
1587 if (w==2) x>>=1;
1588 while (x) {
1589 writel(color, p);
1590 p += 4;
1591 x--;
1592 }
1593 }
1594}
1595
1596static void sstfb_drawrect_XY( struct fb_info *info, int x, int y,
1597 int w, int h, int color, int hwfunc)
1598{
1599 struct fb_fillrect rect;
1600 rect.dx = x;
1601 rect.dy = y;
1602 rect.height = h;
1603 rect.width = w;
1604 rect.color = color;
1605 rect.rop = ROP_COPY;
1606 if (hwfunc)
1607 sstfb_fillrect(info, &rect);
1608 else
1609 sstfb_fillrect_softw(info, &rect);
1610}
1611
1612/* print some squares on the fb */
1613static void sstfb_drawdebugimage(struct fb_info *info)
1614{
1615 static int idx;
1616
1617 /* clear screen */
1618 sstfb_clear_screen(info);
1619
1620 idx = (idx+1) & 1;
1621
1622 /* white rect */
1623 sstfb_drawrect_XY(info, 0, 0, 50, 50, 0xffff, idx);
1624
1625 /* blue rect */
1626 sstfb_drawrect_XY(info, 50, 50, 50, 50, 0x001f, idx);
1627
1628 /* green rect */
1629 sstfb_drawrect_XY(info, 100, 100, 80, 80, 0x07e0, idx);
1630
1631 /* red rect */
1632 sstfb_drawrect_XY(info, 250, 250, 120, 100, 0xf800, idx);
1633}
1634
1635module_init(sstfb_init); 1510module_init(sstfb_init);
1636
1637#ifdef MODULE
1638module_exit(sstfb_exit); 1511module_exit(sstfb_exit);
1639#endif
1640 1512
1641MODULE_AUTHOR("(c) 2000,2002 Ghozlane Toumi <gtoumi@laposte.net>"); 1513MODULE_AUTHOR("(c) 2000,2002 Ghozlane Toumi <gtoumi@laposte.net>");
1642MODULE_DESCRIPTION("FBDev driver for 3dfx Voodoo Graphics and Voodoo2 based video boards"); 1514MODULE_DESCRIPTION("FBDev driver for 3dfx Voodoo Graphics and Voodoo2 based video boards");
@@ -1652,3 +1524,6 @@ module_param(gfxclk, int, 0);
1652MODULE_PARM_DESC(gfxclk, "Force graphic chip frequency in MHz. DANGEROUS. (default=auto)"); 1524MODULE_PARM_DESC(gfxclk, "Force graphic chip frequency in MHz. DANGEROUS. (default=auto)");
1653module_param(slowpci, bool, 0); 1525module_param(slowpci, bool, 0);
1654MODULE_PARM_DESC(slowpci, "Uses slow PCI settings (0 or 1) (default=0)"); 1526MODULE_PARM_DESC(slowpci, "Uses slow PCI settings (0 or 1) (default=0)");
1527module_param(mode_option, charp, 0);
1528MODULE_PARM_DESC(mode_option, "Initial video mode (default=" DEFAULT_VIDEO_MODE ")");
1529
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index d18d6424cd21..904e5aeb696c 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -9,7 +9,7 @@ config W1_SLAVE_THERM
9 tristate "Thermal family implementation" 9 tristate "Thermal family implementation"
10 depends on W1 10 depends on W1
11 help 11 help
12 Say Y here if you want to connect 1-wire thermal sensors to you 12 Say Y here if you want to connect 1-wire thermal sensors to your
13 wire. 13 wire.
14 14
15config W1_SLAVE_SMEM 15config W1_SLAVE_SMEM
@@ -17,7 +17,7 @@ config W1_SLAVE_SMEM
17 depends on W1 17 depends on W1
18 help 18 help
19 Say Y here if you want to connect 1-wire 19 Say Y here if you want to connect 1-wire
20 simple 64bit memory rom(ds2401/ds2411/ds1990*) to you wire. 20 simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
21 21
22config W1_SLAVE_DS2433 22config W1_SLAVE_DS2433
23 tristate "4kb EEPROM family support (DS2433)" 23 tristate "4kb EEPROM family support (DS2433)"
diff --git a/fs/Kconfig b/fs/Kconfig
index b3b5aa0edff9..276ff3baaafe 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -12,9 +12,7 @@ config EXT2_FS
12 Ext2 is a standard Linux file system for hard disks. 12 Ext2 is a standard Linux file system for hard disks.
13 13
14 To compile this file system support as a module, choose M here: the 14 To compile this file system support as a module, choose M here: the
15 module will be called ext2. Be aware however that the file system 15 module will be called ext2.
16 of your root partition (the one containing the directory /) cannot
17 be compiled as a module, and so this could be dangerous.
18 16
19 If unsure, say Y. 17 If unsure, say Y.
20 18
@@ -98,9 +96,7 @@ config EXT3_FS
98 (available at <http://sourceforge.net/projects/e2fsprogs/>). 96 (available at <http://sourceforge.net/projects/e2fsprogs/>).
99 97
100 To compile this file system support as a module, choose M here: the 98 To compile this file system support as a module, choose M here: the
101 module will be called ext3. Be aware however that the file system 99 module will be called ext3.
102 of your root partition (the one containing the directory /) cannot
103 be compiled as a module, and so this may be dangerous.
104 100
105config EXT3_FS_XATTR 101config EXT3_FS_XATTR
106 bool "Ext3 extended attributes" 102 bool "Ext3 extended attributes"
@@ -163,9 +159,7 @@ config EXT4DEV_FS
163 features will be added to ext4dev gradually. 159 features will be added to ext4dev gradually.
164 160
165 To compile this file system support as a module, choose M here. The 161 To compile this file system support as a module, choose M here. The
166 module will be called ext4dev. Be aware, however, that the filesystem 162 module will be called ext4dev.
167 of your root partition (the one containing the directory /) cannot
168 be compiled as a module, and so this could be dangerous.
169 163
170 If unsure, say N. 164 If unsure, say N.
171 165
@@ -1008,7 +1002,7 @@ config TMPFS_POSIX_ACL
1008 1002
1009config HUGETLBFS 1003config HUGETLBFS
1010 bool "HugeTLB file system support" 1004 bool "HugeTLB file system support"
1011 depends X86 || IA64 || PPC64 || SPARC64 || SUPERH || BROKEN 1005 depends on X86 || IA64 || PPC64 || SPARC64 || SUPERH || BROKEN
1012 help 1006 help
1013 hugetlbfs is a filesystem backing for HugeTLB pages, based on 1007 hugetlbfs is a filesystem backing for HugeTLB pages, based on
1014 ramfs. For architectures that support it, say Y here and read 1008 ramfs. For architectures that support it, say Y here and read
diff --git a/fs/aio.c b/fs/aio.c
index d3a6ec2c9627..5f577a63bdf0 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -586,7 +586,7 @@ static void use_mm(struct mm_struct *mm)
586 * Note that on UML this *requires* PF_BORROWED_MM to be set, otherwise 586 * Note that on UML this *requires* PF_BORROWED_MM to be set, otherwise
587 * it won't work. Update it accordingly if you change it here 587 * it won't work. Update it accordingly if you change it here
588 */ 588 */
589 activate_mm(active_mm, mm); 589 switch_mm(active_mm, mm, tsk);
590 task_unlock(tsk); 590 task_unlock(tsk);
591 591
592 mmdrop(active_mm); 592 mmdrop(active_mm);
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 9c48250fd726..e8f6c5ad3e90 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -313,7 +313,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
313 struct autofs_sb_info *sbi; 313 struct autofs_sb_info *sbi;
314 struct autofs_info *ino; 314 struct autofs_info *ino;
315 315
316 sbi = (struct autofs_sb_info *) kmalloc(sizeof(*sbi), GFP_KERNEL); 316 sbi = kmalloc(sizeof(*sbi), GFP_KERNEL);
317 if ( !sbi ) 317 if ( !sbi )
318 goto fail_unlock; 318 goto fail_unlock;
319 DPRINTK("starting up, sbi = %p",sbi); 319 DPRINTK("starting up, sbi = %p",sbi);
diff --git a/fs/befs/btree.c b/fs/befs/btree.c
index 81b042ee24e6..af5bb93276f8 100644
--- a/fs/befs/btree.c
+++ b/fs/befs/btree.c
@@ -260,7 +260,7 @@ befs_btree_find(struct super_block *sb, befs_data_stream * ds,
260 goto error; 260 goto error;
261 } 261 }
262 262
263 this_node = (befs_btree_node *) kmalloc(sizeof (befs_btree_node), 263 this_node = kmalloc(sizeof (befs_btree_node),
264 GFP_NOFS); 264 GFP_NOFS);
265 if (!this_node) { 265 if (!this_node) {
266 befs_error(sb, "befs_btree_find() failed to allocate %u " 266 befs_error(sb, "befs_btree_find() failed to allocate %u "
diff --git a/fs/befs/debug.c b/fs/befs/debug.c
index e831a8f30849..b8e304a0661e 100644
--- a/fs/befs/debug.c
+++ b/fs/befs/debug.c
@@ -28,7 +28,7 @@ void
28befs_error(const struct super_block *sb, const char *fmt, ...) 28befs_error(const struct super_block *sb, const char *fmt, ...)
29{ 29{
30 va_list args; 30 va_list args;
31 char *err_buf = (char *) kmalloc(ERRBUFSIZE, GFP_KERNEL); 31 char *err_buf = kmalloc(ERRBUFSIZE, GFP_KERNEL);
32 if (err_buf == NULL) { 32 if (err_buf == NULL) {
33 printk(KERN_ERR "could not allocate %d bytes\n", ERRBUFSIZE); 33 printk(KERN_ERR "could not allocate %d bytes\n", ERRBUFSIZE);
34 return; 34 return;
@@ -46,7 +46,7 @@ void
46befs_warning(const struct super_block *sb, const char *fmt, ...) 46befs_warning(const struct super_block *sb, const char *fmt, ...)
47{ 47{
48 va_list args; 48 va_list args;
49 char *err_buf = (char *) kmalloc(ERRBUFSIZE, GFP_KERNEL); 49 char *err_buf = kmalloc(ERRBUFSIZE, GFP_KERNEL);
50 if (err_buf == NULL) { 50 if (err_buf == NULL) {
51 printk(KERN_ERR "could not allocate %d bytes\n", ERRBUFSIZE); 51 printk(KERN_ERR "could not allocate %d bytes\n", ERRBUFSIZE);
52 return; 52 return;
@@ -70,7 +70,7 @@ befs_debug(const struct super_block *sb, const char *fmt, ...)
70 char *err_buf = NULL; 70 char *err_buf = NULL;
71 71
72 if (BEFS_SB(sb)->mount_opts.debug) { 72 if (BEFS_SB(sb)->mount_opts.debug) {
73 err_buf = (char *) kmalloc(ERRBUFSIZE, GFP_KERNEL); 73 err_buf = kmalloc(ERRBUFSIZE, GFP_KERNEL);
74 if (err_buf == NULL) { 74 if (err_buf == NULL) {
75 printk(KERN_ERR "could not allocate %d bytes\n", 75 printk(KERN_ERR "could not allocate %d bytes\n",
76 ERRBUFSIZE); 76 ERRBUFSIZE);
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index eac175ed9f44..134c99941a63 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * fs/bfs/inode.c 2 * fs/bfs/inode.c
3 * BFS superblock and inode operations. 3 * BFS superblock and inode operations.
4 * Copyright (C) 1999,2000 Tigran Aivazian <tigran@veritas.com> 4 * Copyright (C) 1999-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5 * From fs/minix, Copyright (C) 1991, 1992 Linus Torvalds. 5 * From fs/minix, Copyright (C) 1991, 1992 Linus Torvalds.
6 * 6 *
7 * Made endianness-clean by Andrew Stribblehill <ads@wompom.org>, 2005. 7 * Made endianness-clean by Andrew Stribblehill <ads@wompom.org>, 2005.
@@ -18,7 +18,7 @@
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include "bfs.h" 19#include "bfs.h"
20 20
21MODULE_AUTHOR("Tigran A. Aivazian <tigran@veritas.com>"); 21MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
22MODULE_DESCRIPTION("SCO UnixWare BFS filesystem for Linux"); 22MODULE_DESCRIPTION("SCO UnixWare BFS filesystem for Linux");
23MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
24 24
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 76f06f6bc2f6..6e6d4568d548 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -706,12 +706,11 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
706 return -ELIBBAD; 706 return -ELIBBAD;
707 707
708 size = sizeof(*loadmap) + nloads * sizeof(*seg); 708 size = sizeof(*loadmap) + nloads * sizeof(*seg);
709 loadmap = kmalloc(size, GFP_KERNEL); 709 loadmap = kzalloc(size, GFP_KERNEL);
710 if (!loadmap) 710 if (!loadmap)
711 return -ENOMEM; 711 return -ENOMEM;
712 712
713 params->loadmap = loadmap; 713 params->loadmap = loadmap;
714 memset(loadmap, 0, size);
715 714
716 loadmap->version = ELF32_FDPIC_LOADMAP_VERSION; 715 loadmap->version = ELF32_FDPIC_LOADMAP_VERSION;
717 loadmap->nsegs = nloads; 716 loadmap->nsegs = nloads;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 00687ea62738..c2e08252af35 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -311,7 +311,7 @@ static Node *create_entry(const char __user *buffer, size_t count)
311 311
312 err = -ENOMEM; 312 err = -ENOMEM;
313 memsize = sizeof(Node) + count + 8; 313 memsize = sizeof(Node) + count + 8;
314 e = (Node *) kmalloc(memsize, GFP_USER); 314 e = kmalloc(memsize, GFP_USER);
315 if (!e) 315 if (!e)
316 goto out; 316 goto out;
317 317
diff --git a/fs/bio.c b/fs/bio.c
index 7ec737eda72b..7618bcb18368 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -916,7 +916,7 @@ void bio_set_pages_dirty(struct bio *bio)
916 } 916 }
917} 917}
918 918
919static void bio_release_pages(struct bio *bio) 919void bio_release_pages(struct bio *bio)
920{ 920{
921 struct bio_vec *bvec = bio->bi_io_vec; 921 struct bio_vec *bvec = bio->bi_io_vec;
922 int i; 922 int i;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 197f93921847..1715d6b5f411 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -129,43 +129,191 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
129 return 0; 129 return 0;
130} 130}
131 131
132static int 132static int blk_end_aio(struct bio *bio, unsigned int bytes_done, int error)
133blkdev_get_blocks(struct inode *inode, sector_t iblock,
134 struct buffer_head *bh, int create)
135{ 133{
136 sector_t end_block = max_block(I_BDEV(inode)); 134 struct kiocb *iocb = bio->bi_private;
137 unsigned long max_blocks = bh->b_size >> inode->i_blkbits; 135 atomic_t *bio_count = &iocb->ki_bio_count;
138 136
139 if ((iblock + max_blocks) > end_block) { 137 if (bio_data_dir(bio) == READ)
140 max_blocks = end_block - iblock; 138 bio_check_pages_dirty(bio);
141 if ((long)max_blocks <= 0) { 139 else {
142 if (create) 140 bio_release_pages(bio);
143 return -EIO; /* write fully beyond EOF */ 141 bio_put(bio);
144 /* 142 }
145 * It is a read which is fully beyond EOF. We return 143
146 * a !buffer_mapped buffer 144 /* iocb->ki_nbytes stores error code from LLDD */
147 */ 145 if (error)
148 max_blocks = 0; 146 iocb->ki_nbytes = -EIO;
149 } 147
148 if (atomic_dec_and_test(bio_count)) {
149 if (iocb->ki_nbytes < 0)
150 aio_complete(iocb, iocb->ki_nbytes, 0);
151 else
152 aio_complete(iocb, iocb->ki_left, 0);
150 } 153 }
151 154
152 bh->b_bdev = I_BDEV(inode);
153 bh->b_blocknr = iblock;
154 bh->b_size = max_blocks << inode->i_blkbits;
155 if (max_blocks)
156 set_buffer_mapped(bh);
157 return 0; 155 return 0;
158} 156}
159 157
158#define VEC_SIZE 16
159struct pvec {
160 unsigned short nr;
161 unsigned short idx;
162 struct page *page[VEC_SIZE];
163};
164
165#define PAGES_SPANNED(addr, len) \
166 (DIV_ROUND_UP((addr) + (len), PAGE_SIZE) - (addr) / PAGE_SIZE);
167
168/*
169 * get page pointer for user addr, we internally cache struct page array for
170 * (addr, count) range in pvec to avoid frequent call to get_user_pages. If
171 * internal page list is exhausted, a batch count of up to VEC_SIZE is used
172 * to get next set of page struct.
173 */
174static struct page *blk_get_page(unsigned long addr, size_t count, int rw,
175 struct pvec *pvec)
176{
177 int ret, nr_pages;
178 if (pvec->idx == pvec->nr) {
179 nr_pages = PAGES_SPANNED(addr, count);
180 nr_pages = min(nr_pages, VEC_SIZE);
181 down_read(&current->mm->mmap_sem);
182 ret = get_user_pages(current, current->mm, addr, nr_pages,
183 rw == READ, 0, pvec->page, NULL);
184 up_read(&current->mm->mmap_sem);
185 if (ret < 0)
186 return ERR_PTR(ret);
187 pvec->nr = ret;
188 pvec->idx = 0;
189 }
190 return pvec->page[pvec->idx++];
191}
192
160static ssize_t 193static ssize_t
161blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 194blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
162 loff_t offset, unsigned long nr_segs) 195 loff_t pos, unsigned long nr_segs)
163{ 196{
164 struct file *file = iocb->ki_filp; 197 struct inode *inode = iocb->ki_filp->f_mapping->host;
165 struct inode *inode = file->f_mapping->host; 198 unsigned blkbits = blksize_bits(bdev_hardsect_size(I_BDEV(inode)));
199 unsigned blocksize_mask = (1 << blkbits) - 1;
200 unsigned long seg = 0; /* iov segment iterator */
201 unsigned long nvec; /* number of bio vec needed */
202 unsigned long cur_off; /* offset into current page */
203 unsigned long cur_len; /* I/O len of current page, up to PAGE_SIZE */
204
205 unsigned long addr; /* user iovec address */
206 size_t count; /* user iovec len */
207 size_t nbytes = iocb->ki_nbytes = iocb->ki_left; /* total xfer size */
208 loff_t size; /* size of block device */
209 struct bio *bio;
210 atomic_t *bio_count = &iocb->ki_bio_count;
211 struct page *page;
212 struct pvec pvec;
213
214 pvec.nr = 0;
215 pvec.idx = 0;
216
217 if (pos & blocksize_mask)
218 return -EINVAL;
219
220 size = i_size_read(inode);
221 if (pos + nbytes > size) {
222 nbytes = size - pos;
223 iocb->ki_left = nbytes;
224 }
225
226 /*
227 * check first non-zero iov alignment, the remaining
228 * iov alignment is checked inside bio loop below.
229 */
230 do {
231 addr = (unsigned long) iov[seg].iov_base;
232 count = min(iov[seg].iov_len, nbytes);
233 if (addr & blocksize_mask || count & blocksize_mask)
234 return -EINVAL;
235 } while (!count && ++seg < nr_segs);
236 atomic_set(bio_count, 1);
237
238 while (nbytes) {
239 /* roughly estimate number of bio vec needed */
240 nvec = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
241 nvec = max(nvec, nr_segs - seg);
242 nvec = min(nvec, (unsigned long) BIO_MAX_PAGES);
243
244 /* bio_alloc should not fail with GFP_KERNEL flag */
245 bio = bio_alloc(GFP_KERNEL, nvec);
246 bio->bi_bdev = I_BDEV(inode);
247 bio->bi_end_io = blk_end_aio;
248 bio->bi_private = iocb;
249 bio->bi_sector = pos >> blkbits;
250same_bio:
251 cur_off = addr & ~PAGE_MASK;
252 cur_len = PAGE_SIZE - cur_off;
253 if (count < cur_len)
254 cur_len = count;
255
256 page = blk_get_page(addr, count, rw, &pvec);
257 if (unlikely(IS_ERR(page)))
258 goto backout;
259
260 if (bio_add_page(bio, page, cur_len, cur_off)) {
261 pos += cur_len;
262 addr += cur_len;
263 count -= cur_len;
264 nbytes -= cur_len;
265
266 if (count)
267 goto same_bio;
268 while (++seg < nr_segs) {
269 addr = (unsigned long) iov[seg].iov_base;
270 count = iov[seg].iov_len;
271 if (!count)
272 continue;
273 if (unlikely(addr & blocksize_mask ||
274 count & blocksize_mask)) {
275 page = ERR_PTR(-EINVAL);
276 goto backout;
277 }
278 count = min(count, nbytes);
279 goto same_bio;
280 }
281 }
166 282
167 return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode), 283 /* bio is ready, submit it */
168 iov, offset, nr_segs, blkdev_get_blocks, NULL); 284 if (rw == READ)
285 bio_set_pages_dirty(bio);
286 atomic_inc(bio_count);
287 submit_bio(rw, bio);
288 }
289
290completion:
291 iocb->ki_left -= nbytes;
292 nbytes = iocb->ki_left;
293 iocb->ki_pos += nbytes;
294
295 blk_run_address_space(inode->i_mapping);
296 if (atomic_dec_and_test(bio_count))
297 aio_complete(iocb, nbytes, 0);
298
299 return -EIOCBQUEUED;
300
301backout:
302 /*
303 * back out nbytes count constructed so far for this bio,
304 * we will throw away current bio.
305 */
306 nbytes += bio->bi_size;
307 bio_release_pages(bio);
308 bio_put(bio);
309
310 /*
311 * if no bio was submmitted, return the error code.
312 * otherwise, proceed with pending I/O completion.
313 */
314 if (atomic_read(bio_count) == 1)
315 return PTR_ERR(page);
316 goto completion;
169} 317}
170 318
171static int blkdev_writepage(struct page *page, struct writeback_control *wbc) 319static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
diff --git a/fs/buffer.c b/fs/buffer.c
index 517860f2d75b..d1f1b54d3108 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -35,6 +35,7 @@
35#include <linux/hash.h> 35#include <linux/hash.h>
36#include <linux/suspend.h> 36#include <linux/suspend.h>
37#include <linux/buffer_head.h> 37#include <linux/buffer_head.h>
38#include <linux/task_io_accounting_ops.h>
38#include <linux/bio.h> 39#include <linux/bio.h>
39#include <linux/notifier.h> 40#include <linux/notifier.h>
40#include <linux/cpu.h> 41#include <linux/cpu.h>
@@ -724,20 +725,21 @@ int __set_page_dirty_buffers(struct page *page)
724 } 725 }
725 spin_unlock(&mapping->private_lock); 726 spin_unlock(&mapping->private_lock);
726 727
727 if (!TestSetPageDirty(page)) { 728 if (TestSetPageDirty(page))
728 write_lock_irq(&mapping->tree_lock); 729 return 0;
729 if (page->mapping) { /* Race with truncate? */ 730
730 if (mapping_cap_account_dirty(mapping)) 731 write_lock_irq(&mapping->tree_lock);
731 __inc_zone_page_state(page, NR_FILE_DIRTY); 732 if (page->mapping) { /* Race with truncate? */
732 radix_tree_tag_set(&mapping->page_tree, 733 if (mapping_cap_account_dirty(mapping)) {
733 page_index(page), 734 __inc_zone_page_state(page, NR_FILE_DIRTY);
734 PAGECACHE_TAG_DIRTY); 735 task_io_account_write(PAGE_CACHE_SIZE);
735 } 736 }
736 write_unlock_irq(&mapping->tree_lock); 737 radix_tree_tag_set(&mapping->page_tree,
737 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 738 page_index(page), PAGECACHE_TAG_DIRTY);
738 return 1;
739 } 739 }
740 return 0; 740 write_unlock_irq(&mapping->tree_lock);
741 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
742 return 1;
741} 743}
742EXPORT_SYMBOL(__set_page_dirty_buffers); 744EXPORT_SYMBOL(__set_page_dirty_buffers);
743 745
@@ -2851,8 +2853,13 @@ int try_to_free_buffers(struct page *page)
2851 * could encounter a non-uptodate page, which is unresolvable. 2853 * could encounter a non-uptodate page, which is unresolvable.
2852 * This only applies in the rare case where try_to_free_buffers 2854 * This only applies in the rare case where try_to_free_buffers
2853 * succeeds but the page is not freed. 2855 * succeeds but the page is not freed.
2856 *
2857 * Also, during truncate, discard_buffer will have marked all
2858 * the page's buffers clean. We discover that here and clean
2859 * the page also.
2854 */ 2860 */
2855 clear_page_dirty(page); 2861 if (test_clear_page_dirty(page))
2862 task_io_account_cancelled_write(PAGE_CACHE_SIZE);
2856 } 2863 }
2857out: 2864out:
2858 if (buffers_to_free) { 2865 if (buffers_to_free) {
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 098790eb2aa1..472e33e0f3cf 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -4876,7 +4876,7 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
4876 } else { 4876 } else {
4877 /* Add file to outstanding requests */ 4877 /* Add file to outstanding requests */
4878 /* BB change to kmem cache alloc */ 4878 /* BB change to kmem cache alloc */
4879 dnotify_req = (struct dir_notify_req *) kmalloc( 4879 dnotify_req = kmalloc(
4880 sizeof(struct dir_notify_req), 4880 sizeof(struct dir_notify_req),
4881 GFP_KERNEL); 4881 GFP_KERNEL);
4882 if(dnotify_req) { 4882 if(dnotify_req) {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 1aa95a50cac2..0f05cab5d24a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -29,6 +29,7 @@
29#include <linux/pagevec.h> 29#include <linux/pagevec.h>
30#include <linux/smp_lock.h> 30#include <linux/smp_lock.h>
31#include <linux/writeback.h> 31#include <linux/writeback.h>
32#include <linux/task_io_accounting_ops.h>
32#include <linux/delay.h> 33#include <linux/delay.h>
33#include <asm/div64.h> 34#include <asm/div64.h>
34#include "cifsfs.h" 35#include "cifsfs.h"
@@ -1812,6 +1813,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
1812 cFYI(1, ("Read error in readpages: %d", rc)); 1813 cFYI(1, ("Read error in readpages: %d", rc));
1813 break; 1814 break;
1814 } else if (bytes_read > 0) { 1815 } else if (bytes_read > 0) {
1816 task_io_account_read(bytes_read);
1815 pSMBr = (struct smb_com_read_rsp *)smb_read_data; 1817 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1816 cifs_copy_cache_pages(mapping, page_list, bytes_read, 1818 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1817 smb_read_data + 4 /* RFC1001 hdr */ + 1819 smb_read_data + 4 /* RFC1001 hdr */ +
diff --git a/fs/compat.c b/fs/compat.c
index b766964a625c..0ec70e3cee0a 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1679,19 +1679,19 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
1679{ 1679{
1680 fd_set_bits fds; 1680 fd_set_bits fds;
1681 char *bits; 1681 char *bits;
1682 int size, max_fdset, ret = -EINVAL; 1682 int size, max_fds, ret = -EINVAL;
1683 struct fdtable *fdt; 1683 struct fdtable *fdt;
1684 1684
1685 if (n < 0) 1685 if (n < 0)
1686 goto out_nofds; 1686 goto out_nofds;
1687 1687
1688 /* max_fdset can increase, so grab it once to avoid race */ 1688 /* max_fds can increase, so grab it once to avoid race */
1689 rcu_read_lock(); 1689 rcu_read_lock();
1690 fdt = files_fdtable(current->files); 1690 fdt = files_fdtable(current->files);
1691 max_fdset = fdt->max_fdset; 1691 max_fds = fdt->max_fds;
1692 rcu_read_unlock(); 1692 rcu_read_unlock();
1693 if (n > max_fdset) 1693 if (n > max_fds)
1694 n = max_fdset; 1694 n = max_fds;
1695 1695
1696 /* 1696 /*
1697 * We need 6 bitmaps (in/out/ex for both incoming and outgoing), 1697 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 5981e17f46f0..d9d0833444f5 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29#include <linux/pagemap.h> 29#include <linux/pagemap.h>
30#include <linux/task_io_accounting_ops.h>
30#include <linux/bio.h> 31#include <linux/bio.h>
31#include <linux/wait.h> 32#include <linux/wait.h>
32#include <linux/err.h> 33#include <linux/err.h>
@@ -121,8 +122,7 @@ struct dio {
121 122
122 /* BIO completion state */ 123 /* BIO completion state */
123 spinlock_t bio_lock; /* protects BIO fields below */ 124 spinlock_t bio_lock; /* protects BIO fields below */
124 int bio_count; /* nr bios to be completed */ 125 unsigned long refcount; /* direct_io_worker() and bios */
125 int bios_in_flight; /* nr bios in flight */
126 struct bio *bio_list; /* singly linked via bi_private */ 126 struct bio *bio_list; /* singly linked via bi_private */
127 struct task_struct *waiter; /* waiting task (NULL if none) */ 127 struct task_struct *waiter; /* waiting task (NULL if none) */
128 128
@@ -209,76 +209,55 @@ static struct page *dio_get_page(struct dio *dio)
209 return dio->pages[dio->head++]; 209 return dio->pages[dio->head++];
210} 210}
211 211
212/* 212/**
213 * Called when all DIO BIO I/O has been completed - let the filesystem 213 * dio_complete() - called when all DIO BIO I/O has been completed
214 * know, if it registered an interest earlier via get_block. Pass the 214 * @offset: the byte offset in the file of the completed operation
215 * private field of the map buffer_head so that filesystems can use it 215 *
216 * to hold additional state between get_block calls and dio_complete. 216 * This releases locks as dictated by the locking type, lets interested parties
217 */ 217 * know that a DIO operation has completed, and calculates the resulting return
218static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes) 218 * code for the operation.
219{ 219 *
220 if (dio->end_io && dio->result) 220 * It lets the filesystem know if it registered an interest earlier via
221 dio->end_io(dio->iocb, offset, bytes, dio->map_bh.b_private); 221 * get_block. Pass the private field of the map buffer_head so that
222 if (dio->lock_type == DIO_LOCKING) 222 * filesystems can use it to hold additional state between get_block calls and
223 /* lockdep: non-owner release */ 223 * dio_complete.
224 up_read_non_owner(&dio->inode->i_alloc_sem);
225}
226
227/*
228 * Called when a BIO has been processed. If the count goes to zero then IO is
229 * complete and we can signal this to the AIO layer.
230 */ 224 */
231static void finished_one_bio(struct dio *dio) 225static int dio_complete(struct dio *dio, loff_t offset, int ret)
232{ 226{
233 unsigned long flags; 227 ssize_t transferred = 0;
234 228
235 spin_lock_irqsave(&dio->bio_lock, flags); 229 /*
236 if (dio->bio_count == 1) { 230 * AIO submission can race with bio completion to get here while
237 if (dio->is_async) { 231 * expecting to have the last io completed by bio completion.
238 ssize_t transferred; 232 * In that case -EIOCBQUEUED is in fact not an error we want
239 loff_t offset; 233 * to preserve through this call.
240 234 */
241 /* 235 if (ret == -EIOCBQUEUED)
242 * Last reference to the dio is going away. 236 ret = 0;
243 * Drop spinlock and complete the DIO.
244 */
245 spin_unlock_irqrestore(&dio->bio_lock, flags);
246 237
247 /* Check for short read case */ 238 if (dio->result) {
248 transferred = dio->result; 239 transferred = dio->result;
249 offset = dio->iocb->ki_pos;
250 240
251 if ((dio->rw == READ) && 241 /* Check for short read case */
252 ((offset + transferred) > dio->i_size)) 242 if ((dio->rw == READ) && ((offset + transferred) > dio->i_size))
253 transferred = dio->i_size - offset; 243 transferred = dio->i_size - offset;
244 }
254 245
255 /* check for error in completion path */ 246 if (dio->end_io && dio->result)
256 if (dio->io_error) 247 dio->end_io(dio->iocb, offset, transferred,
257 transferred = dio->io_error; 248 dio->map_bh.b_private);
249 if (dio->lock_type == DIO_LOCKING)
250 /* lockdep: non-owner release */
251 up_read_non_owner(&dio->inode->i_alloc_sem);
258 252
259 dio_complete(dio, offset, transferred); 253 if (ret == 0)
254 ret = dio->page_errors;
255 if (ret == 0)
256 ret = dio->io_error;
257 if (ret == 0)
258 ret = transferred;
260 259
261 /* Complete AIO later if falling back to buffered i/o */ 260 return ret;
262 if (dio->result == dio->size ||
263 ((dio->rw == READ) && dio->result)) {
264 aio_complete(dio->iocb, transferred, 0);
265 kfree(dio);
266 return;
267 } else {
268 /*
269 * Falling back to buffered
270 */
271 spin_lock_irqsave(&dio->bio_lock, flags);
272 dio->bio_count--;
273 if (dio->waiter)
274 wake_up_process(dio->waiter);
275 spin_unlock_irqrestore(&dio->bio_lock, flags);
276 return;
277 }
278 }
279 }
280 dio->bio_count--;
281 spin_unlock_irqrestore(&dio->bio_lock, flags);
282} 261}
283 262
284static int dio_bio_complete(struct dio *dio, struct bio *bio); 263static int dio_bio_complete(struct dio *dio, struct bio *bio);
@@ -288,12 +267,27 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio);
288static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error) 267static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error)
289{ 268{
290 struct dio *dio = bio->bi_private; 269 struct dio *dio = bio->bi_private;
270 unsigned long remaining;
271 unsigned long flags;
291 272
292 if (bio->bi_size) 273 if (bio->bi_size)
293 return 1; 274 return 1;
294 275
295 /* cleanup the bio */ 276 /* cleanup the bio */
296 dio_bio_complete(dio, bio); 277 dio_bio_complete(dio, bio);
278
279 spin_lock_irqsave(&dio->bio_lock, flags);
280 remaining = --dio->refcount;
281 if (remaining == 1 && dio->waiter)
282 wake_up_process(dio->waiter);
283 spin_unlock_irqrestore(&dio->bio_lock, flags);
284
285 if (remaining == 0) {
286 int ret = dio_complete(dio, dio->iocb->ki_pos, 0);
287 aio_complete(dio->iocb, ret, 0);
288 kfree(dio);
289 }
290
297 return 0; 291 return 0;
298} 292}
299 293
@@ -315,8 +309,7 @@ static int dio_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
315 spin_lock_irqsave(&dio->bio_lock, flags); 309 spin_lock_irqsave(&dio->bio_lock, flags);
316 bio->bi_private = dio->bio_list; 310 bio->bi_private = dio->bio_list;
317 dio->bio_list = bio; 311 dio->bio_list = bio;
318 dio->bios_in_flight--; 312 if (--dio->refcount == 1 && dio->waiter)
319 if (dio->waiter && dio->bios_in_flight == 0)
320 wake_up_process(dio->waiter); 313 wake_up_process(dio->waiter);
321 spin_unlock_irqrestore(&dio->bio_lock, flags); 314 spin_unlock_irqrestore(&dio->bio_lock, flags);
322 return 0; 315 return 0;
@@ -347,6 +340,8 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev,
347 * In the AIO read case we speculatively dirty the pages before starting IO. 340 * In the AIO read case we speculatively dirty the pages before starting IO.
348 * During IO completion, any of these pages which happen to have been written 341 * During IO completion, any of these pages which happen to have been written
349 * back will be redirtied by bio_check_pages_dirty(). 342 * back will be redirtied by bio_check_pages_dirty().
343 *
344 * bios hold a dio reference between submit_bio and ->end_io.
350 */ 345 */
351static void dio_bio_submit(struct dio *dio) 346static void dio_bio_submit(struct dio *dio)
352{ 347{
@@ -354,12 +349,14 @@ static void dio_bio_submit(struct dio *dio)
354 unsigned long flags; 349 unsigned long flags;
355 350
356 bio->bi_private = dio; 351 bio->bi_private = dio;
352
357 spin_lock_irqsave(&dio->bio_lock, flags); 353 spin_lock_irqsave(&dio->bio_lock, flags);
358 dio->bio_count++; 354 dio->refcount++;
359 dio->bios_in_flight++;
360 spin_unlock_irqrestore(&dio->bio_lock, flags); 355 spin_unlock_irqrestore(&dio->bio_lock, flags);
356
361 if (dio->is_async && dio->rw == READ) 357 if (dio->is_async && dio->rw == READ)
362 bio_set_pages_dirty(bio); 358 bio_set_pages_dirty(bio);
359
363 submit_bio(dio->rw, bio); 360 submit_bio(dio->rw, bio);
364 361
365 dio->bio = NULL; 362 dio->bio = NULL;
@@ -376,28 +373,37 @@ static void dio_cleanup(struct dio *dio)
376} 373}
377 374
378/* 375/*
379 * Wait for the next BIO to complete. Remove it and return it. 376 * Wait for the next BIO to complete. Remove it and return it. NULL is
377 * returned once all BIOs have been completed. This must only be called once
378 * all bios have been issued so that dio->refcount can only decrease. This
379 * requires that that the caller hold a reference on the dio.
380 */ 380 */
381static struct bio *dio_await_one(struct dio *dio) 381static struct bio *dio_await_one(struct dio *dio)
382{ 382{
383 unsigned long flags; 383 unsigned long flags;
384 struct bio *bio; 384 struct bio *bio = NULL;
385 385
386 spin_lock_irqsave(&dio->bio_lock, flags); 386 spin_lock_irqsave(&dio->bio_lock, flags);
387 while (dio->bio_list == NULL) { 387
388 set_current_state(TASK_UNINTERRUPTIBLE); 388 /*
389 if (dio->bio_list == NULL) { 389 * Wait as long as the list is empty and there are bios in flight. bio
390 dio->waiter = current; 390 * completion drops the count, maybe adds to the list, and wakes while
391 spin_unlock_irqrestore(&dio->bio_lock, flags); 391 * holding the bio_lock so we don't need set_current_state()'s barrier
392 blk_run_address_space(dio->inode->i_mapping); 392 * and can call it after testing our condition.
393 io_schedule(); 393 */
394 spin_lock_irqsave(&dio->bio_lock, flags); 394 while (dio->refcount > 1 && dio->bio_list == NULL) {
395 dio->waiter = NULL; 395 __set_current_state(TASK_UNINTERRUPTIBLE);
396 } 396 dio->waiter = current;
397 set_current_state(TASK_RUNNING); 397 spin_unlock_irqrestore(&dio->bio_lock, flags);
398 io_schedule();
399 /* wake up sets us TASK_RUNNING */
400 spin_lock_irqsave(&dio->bio_lock, flags);
401 dio->waiter = NULL;
402 }
403 if (dio->bio_list) {
404 bio = dio->bio_list;
405 dio->bio_list = bio->bi_private;
398 } 406 }
399 bio = dio->bio_list;
400 dio->bio_list = bio->bi_private;
401 spin_unlock_irqrestore(&dio->bio_lock, flags); 407 spin_unlock_irqrestore(&dio->bio_lock, flags);
402 return bio; 408 return bio;
403} 409}
@@ -426,34 +432,24 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
426 } 432 }
427 bio_put(bio); 433 bio_put(bio);
428 } 434 }
429 finished_one_bio(dio);
430 return uptodate ? 0 : -EIO; 435 return uptodate ? 0 : -EIO;
431} 436}
432 437
433/* 438/*
434 * Wait on and process all in-flight BIOs. 439 * Wait on and process all in-flight BIOs. This must only be called once
440 * all bios have been issued so that the refcount can only decrease.
441 * This just waits for all bios to make it through dio_bio_complete. IO
442 * errors are propogated through dio->io_error and should be propogated via
443 * dio_complete().
435 */ 444 */
436static int dio_await_completion(struct dio *dio) 445static void dio_await_completion(struct dio *dio)
437{ 446{
438 int ret = 0; 447 struct bio *bio;
439 448 do {
440 if (dio->bio) 449 bio = dio_await_one(dio);
441 dio_bio_submit(dio); 450 if (bio)
442 451 dio_bio_complete(dio, bio);
443 /* 452 } while (bio);
444 * The bio_lock is not held for the read of bio_count.
445 * This is ok since it is the dio_bio_complete() that changes
446 * bio_count.
447 */
448 while (dio->bio_count) {
449 struct bio *bio = dio_await_one(dio);
450 int ret2;
451
452 ret2 = dio_bio_complete(dio, bio);
453 if (ret == 0)
454 ret = ret2;
455 }
456 return ret;
457} 453}
458 454
459/* 455/*
@@ -675,6 +671,13 @@ submit_page_section(struct dio *dio, struct page *page,
675{ 671{
676 int ret = 0; 672 int ret = 0;
677 673
674 if (dio->rw & WRITE) {
675 /*
676 * Read accounting is performed in submit_bio()
677 */
678 task_io_account_write(len);
679 }
680
678 /* 681 /*
679 * Can we just grow the current page's presence in the dio? 682 * Can we just grow the current page's presence in the dio?
680 */ 683 */
@@ -953,6 +956,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
953 struct dio *dio) 956 struct dio *dio)
954{ 957{
955 unsigned long user_addr; 958 unsigned long user_addr;
959 unsigned long flags;
956 int seg; 960 int seg;
957 ssize_t ret = 0; 961 ssize_t ret = 0;
958 ssize_t ret2; 962 ssize_t ret2;
@@ -983,17 +987,8 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
983 dio->iocb = iocb; 987 dio->iocb = iocb;
984 dio->i_size = i_size_read(inode); 988 dio->i_size = i_size_read(inode);
985 989
986 /*
987 * BIO completion state.
988 *
989 * ->bio_count starts out at one, and we decrement it to zero after all
990 * BIOs are submitted. This to avoid the situation where a really fast
991 * (or synchronous) device could take the count to zero while we're
992 * still submitting BIOs.
993 */
994 dio->bio_count = 1;
995 dio->bios_in_flight = 0;
996 spin_lock_init(&dio->bio_lock); 990 spin_lock_init(&dio->bio_lock);
991 dio->refcount = 1;
997 dio->bio_list = NULL; 992 dio->bio_list = NULL;
998 dio->waiter = NULL; 993 dio->waiter = NULL;
999 994
@@ -1069,6 +1064,9 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1069 if (dio->bio) 1064 if (dio->bio)
1070 dio_bio_submit(dio); 1065 dio_bio_submit(dio);
1071 1066
1067 /* All IO is now issued, send it on its way */
1068 blk_run_address_space(inode->i_mapping);
1069
1072 /* 1070 /*
1073 * It is possible that, we return short IO due to end of file. 1071 * It is possible that, we return short IO due to end of file.
1074 * In that case, we need to release all the pages we got hold on. 1072 * In that case, we need to release all the pages we got hold on.
@@ -1084,74 +1082,41 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1084 mutex_unlock(&dio->inode->i_mutex); 1082 mutex_unlock(&dio->inode->i_mutex);
1085 1083
1086 /* 1084 /*
1087 * OK, all BIOs are submitted, so we can decrement bio_count to truly 1085 * The only time we want to leave bios in flight is when a successful
1088 * reflect the number of to-be-processed BIOs. 1086 * partial aio read or full aio write have been setup. In that case
1087 * bio completion will call aio_complete. The only time it's safe to
1088 * call aio_complete is when we return -EIOCBQUEUED, so we key on that.
1089 * This had *better* be the only place that raises -EIOCBQUEUED.
1089 */ 1090 */
1090 if (dio->is_async) { 1091 BUG_ON(ret == -EIOCBQUEUED);
1091 int should_wait = 0; 1092 if (dio->is_async && ret == 0 && dio->result &&
1093 ((rw & READ) || (dio->result == dio->size)))
1094 ret = -EIOCBQUEUED;
1092 1095
1093 if (dio->result < dio->size && (rw & WRITE)) { 1096 if (ret != -EIOCBQUEUED)
1094 dio->waiter = current; 1097 dio_await_completion(dio);
1095 should_wait = 1;
1096 }
1097 if (ret == 0)
1098 ret = dio->result;
1099 finished_one_bio(dio); /* This can free the dio */
1100 blk_run_address_space(inode->i_mapping);
1101 if (should_wait) {
1102 unsigned long flags;
1103 /*
1104 * Wait for already issued I/O to drain out and
1105 * release its references to user-space pages
1106 * before returning to fallback on buffered I/O
1107 */
1108
1109 spin_lock_irqsave(&dio->bio_lock, flags);
1110 set_current_state(TASK_UNINTERRUPTIBLE);
1111 while (dio->bio_count) {
1112 spin_unlock_irqrestore(&dio->bio_lock, flags);
1113 io_schedule();
1114 spin_lock_irqsave(&dio->bio_lock, flags);
1115 set_current_state(TASK_UNINTERRUPTIBLE);
1116 }
1117 spin_unlock_irqrestore(&dio->bio_lock, flags);
1118 set_current_state(TASK_RUNNING);
1119 kfree(dio);
1120 }
1121 } else {
1122 ssize_t transferred = 0;
1123
1124 finished_one_bio(dio);
1125 ret2 = dio_await_completion(dio);
1126 if (ret == 0)
1127 ret = ret2;
1128 if (ret == 0)
1129 ret = dio->page_errors;
1130 if (dio->result) {
1131 loff_t i_size = i_size_read(inode);
1132
1133 transferred = dio->result;
1134 /*
1135 * Adjust the return value if the read crossed a
1136 * non-block-aligned EOF.
1137 */
1138 if (rw == READ && (offset + transferred > i_size))
1139 transferred = i_size - offset;
1140 }
1141 dio_complete(dio, offset, transferred);
1142 if (ret == 0)
1143 ret = transferred;
1144 1098
1145 /* We could have also come here on an AIO file extend */ 1099 /*
1146 if (!is_sync_kiocb(iocb) && (rw & WRITE) && 1100 * Sync will always be dropping the final ref and completing the
1147 ret >= 0 && dio->result == dio->size) 1101 * operation. AIO can if it was a broken operation described above or
1148 /* 1102 * in fact if all the bios race to complete before we get here. In
1149 * For AIO writes where we have completed the 1103 * that case dio_complete() translates the EIOCBQUEUED into the proper
1150 * i/o, we have to mark the the aio complete. 1104 * return code that the caller will hand to aio_complete().
1151 */ 1105 *
1152 aio_complete(iocb, ret, 0); 1106 * This is managed by the bio_lock instead of being an atomic_t so that
1107 * completion paths can drop their ref and use the remaining count to
1108 * decide to wake the submission path atomically.
1109 */
1110 spin_lock_irqsave(&dio->bio_lock, flags);
1111 ret2 = --dio->refcount;
1112 spin_unlock_irqrestore(&dio->bio_lock, flags);
1113 BUG_ON(!dio->is_async && ret2 != 0);
1114 if (ret2 == 0) {
1115 ret = dio_complete(dio, offset, ret);
1153 kfree(dio); 1116 kfree(dio);
1154 } 1117 } else
1118 BUG_ON(ret != -EIOCBQUEUED);
1119
1155 return ret; 1120 return ret;
1156} 1121}
1157 1122
diff --git a/fs/exec.c b/fs/exec.c
index 12d8cd461b41..11fe93f7363c 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -783,7 +783,7 @@ static void flush_old_files(struct files_struct * files)
783 j++; 783 j++;
784 i = j * __NFDBITS; 784 i = j * __NFDBITS;
785 fdt = files_fdtable(files); 785 fdt = files_fdtable(files);
786 if (i >= fdt->max_fds || i >= fdt->max_fdset) 786 if (i >= fdt->max_fds)
787 break; 787 break;
788 set = fdt->close_on_exec->fds_bits[j]; 788 set = fdt->close_on_exec->fds_bits[j];
789 if (!set) 789 if (!set)
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 2bdaef35da54..8e382a5d51bd 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -77,10 +77,9 @@ repeat:
77 start = files->next_fd; 77 start = files->next_fd;
78 78
79 newfd = start; 79 newfd = start;
80 if (start < fdt->max_fdset) { 80 if (start < fdt->max_fds)
81 newfd = find_next_zero_bit(fdt->open_fds->fds_bits, 81 newfd = find_next_zero_bit(fdt->open_fds->fds_bits,
82 fdt->max_fdset, start); 82 fdt->max_fds, start);
83 }
84 83
85 error = -EMFILE; 84 error = -EMFILE;
86 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) 85 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
diff --git a/fs/file.c b/fs/file.c
index 51aef675470f..857fa49e984c 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -32,46 +32,28 @@ struct fdtable_defer {
32 */ 32 */
33static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list); 33static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list);
34 34
35 35static inline void * alloc_fdmem(unsigned int size)
36/*
37 * Allocate an fd array, using kmalloc or vmalloc.
38 * Note: the array isn't cleared at allocation time.
39 */
40struct file ** alloc_fd_array(int num)
41{ 36{
42 struct file **new_fds;
43 int size = num * sizeof(struct file *);
44
45 if (size <= PAGE_SIZE) 37 if (size <= PAGE_SIZE)
46 new_fds = (struct file **) kmalloc(size, GFP_KERNEL); 38 return kmalloc(size, GFP_KERNEL);
47 else 39 else
48 new_fds = (struct file **) vmalloc(size); 40 return vmalloc(size);
49 return new_fds;
50} 41}
51 42
52void free_fd_array(struct file **array, int num) 43static inline void free_fdarr(struct fdtable *fdt)
53{ 44{
54 int size = num * sizeof(struct file *); 45 if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *)))
55 46 kfree(fdt->fd);
56 if (!array) {
57 printk (KERN_ERR "free_fd_array: array = 0 (num = %d)\n", num);
58 return;
59 }
60
61 if (num <= NR_OPEN_DEFAULT) /* Don't free the embedded fd array! */
62 return;
63 else if (size <= PAGE_SIZE)
64 kfree(array);
65 else 47 else
66 vfree(array); 48 vfree(fdt->fd);
67} 49}
68 50
69static void __free_fdtable(struct fdtable *fdt) 51static inline void free_fdset(struct fdtable *fdt)
70{ 52{
71 free_fdset(fdt->open_fds, fdt->max_fdset); 53 if (fdt->max_fds <= (PAGE_SIZE * BITS_PER_BYTE / 2))
72 free_fdset(fdt->close_on_exec, fdt->max_fdset); 54 kfree(fdt->open_fds);
73 free_fd_array(fdt->fd, fdt->max_fds); 55 else
74 kfree(fdt); 56 vfree(fdt->open_fds);
75} 57}
76 58
77static void free_fdtable_work(struct work_struct *work) 59static void free_fdtable_work(struct work_struct *work)
@@ -86,41 +68,32 @@ static void free_fdtable_work(struct work_struct *work)
86 spin_unlock_bh(&f->lock); 68 spin_unlock_bh(&f->lock);
87 while(fdt) { 69 while(fdt) {
88 struct fdtable *next = fdt->next; 70 struct fdtable *next = fdt->next;
89 __free_fdtable(fdt); 71 vfree(fdt->fd);
72 free_fdset(fdt);
73 kfree(fdt);
90 fdt = next; 74 fdt = next;
91 } 75 }
92} 76}
93 77
94static void free_fdtable_rcu(struct rcu_head *rcu) 78void free_fdtable_rcu(struct rcu_head *rcu)
95{ 79{
96 struct fdtable *fdt = container_of(rcu, struct fdtable, rcu); 80 struct fdtable *fdt = container_of(rcu, struct fdtable, rcu);
97 int fdset_size, fdarray_size;
98 struct fdtable_defer *fddef; 81 struct fdtable_defer *fddef;
99 82
100 BUG_ON(!fdt); 83 BUG_ON(!fdt);
101 fdset_size = fdt->max_fdset / 8;
102 fdarray_size = fdt->max_fds * sizeof(struct file *);
103 84
104 if (fdt->free_files) { 85 if (fdt->max_fds <= NR_OPEN_DEFAULT) {
105 /* 86 /*
106 * The this fdtable was embedded in the files structure 87 * This fdtable is embedded in the files structure and that
107 * and the files structure itself was getting destroyed. 88 * structure itself is getting destroyed.
108 * It is now safe to free the files structure.
109 */ 89 */
110 kmem_cache_free(files_cachep, fdt->free_files); 90 kmem_cache_free(files_cachep,
91 container_of(fdt, struct files_struct, fdtab));
111 return; 92 return;
112 } 93 }
113 if (fdt->max_fdset <= EMBEDDED_FD_SET_SIZE && 94 if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) {
114 fdt->max_fds <= NR_OPEN_DEFAULT) {
115 /*
116 * The fdtable was embedded
117 */
118 return;
119 }
120 if (fdset_size <= PAGE_SIZE && fdarray_size <= PAGE_SIZE) {
121 kfree(fdt->open_fds);
122 kfree(fdt->close_on_exec);
123 kfree(fdt->fd); 95 kfree(fdt->fd);
96 kfree(fdt->open_fds);
124 kfree(fdt); 97 kfree(fdt);
125 } else { 98 } else {
126 fddef = &get_cpu_var(fdtable_defer_list); 99 fddef = &get_cpu_var(fdtable_defer_list);
@@ -134,136 +107,74 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
134 } 107 }
135} 108}
136 109
137void free_fdtable(struct fdtable *fdt)
138{
139 if (fdt->free_files ||
140 fdt->max_fdset > EMBEDDED_FD_SET_SIZE ||
141 fdt->max_fds > NR_OPEN_DEFAULT)
142 call_rcu(&fdt->rcu, free_fdtable_rcu);
143}
144
145/* 110/*
146 * Expand the fdset in the files_struct. Called with the files spinlock 111 * Expand the fdset in the files_struct. Called with the files spinlock
147 * held for write. 112 * held for write.
148 */ 113 */
149static void copy_fdtable(struct fdtable *nfdt, struct fdtable *fdt) 114static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
150{ 115{
151 int i; 116 unsigned int cpy, set;
152 int count;
153
154 BUG_ON(nfdt->max_fdset < fdt->max_fdset);
155 BUG_ON(nfdt->max_fds < fdt->max_fds);
156 /* Copy the existing tables and install the new pointers */
157
158 i = fdt->max_fdset / (sizeof(unsigned long) * 8);
159 count = (nfdt->max_fdset - fdt->max_fdset) / 8;
160 117
161 /* 118 BUG_ON(nfdt->max_fds < ofdt->max_fds);
162 * Don't copy the entire array if the current fdset is 119 if (ofdt->max_fds == 0)
163 * not yet initialised. 120 return;
164 */
165 if (i) {
166 memcpy (nfdt->open_fds, fdt->open_fds,
167 fdt->max_fdset/8);
168 memcpy (nfdt->close_on_exec, fdt->close_on_exec,
169 fdt->max_fdset/8);
170 memset (&nfdt->open_fds->fds_bits[i], 0, count);
171 memset (&nfdt->close_on_exec->fds_bits[i], 0, count);
172 }
173
174 /* Don't copy/clear the array if we are creating a new
175 fd array for fork() */
176 if (fdt->max_fds) {
177 memcpy(nfdt->fd, fdt->fd,
178 fdt->max_fds * sizeof(struct file *));
179 /* clear the remainder of the array */
180 memset(&nfdt->fd[fdt->max_fds], 0,
181 (nfdt->max_fds - fdt->max_fds) *
182 sizeof(struct file *));
183 }
184}
185
186/*
187 * Allocate an fdset array, using kmalloc or vmalloc.
188 * Note: the array isn't cleared at allocation time.
189 */
190fd_set * alloc_fdset(int num)
191{
192 fd_set *new_fdset;
193 int size = num / 8;
194 121
195 if (size <= PAGE_SIZE) 122 cpy = ofdt->max_fds * sizeof(struct file *);
196 new_fdset = (fd_set *) kmalloc(size, GFP_KERNEL); 123 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
197 else 124 memcpy(nfdt->fd, ofdt->fd, cpy);
198 new_fdset = (fd_set *) vmalloc(size); 125 memset((char *)(nfdt->fd) + cpy, 0, set);
199 return new_fdset; 126
127 cpy = ofdt->max_fds / BITS_PER_BYTE;
128 set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE;
129 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
130 memset((char *)(nfdt->open_fds) + cpy, 0, set);
131 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
132 memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
200} 133}
201 134
202void free_fdset(fd_set *array, int num) 135static struct fdtable * alloc_fdtable(unsigned int nr)
203{ 136{
204 if (num <= EMBEDDED_FD_SET_SIZE) /* Don't free an embedded fdset */ 137 struct fdtable *fdt;
205 return; 138 char *data;
206 else if (num <= 8 * PAGE_SIZE)
207 kfree(array);
208 else
209 vfree(array);
210}
211 139
212static struct fdtable *alloc_fdtable(int nr) 140 /*
213{ 141 * Figure out how many fds we actually want to support in this fdtable.
214 struct fdtable *fdt = NULL; 142 * Allocation steps are keyed to the size of the fdarray, since it
215 int nfds = 0; 143 * grows far faster than any of the other dynamic data. We try to fit
216 fd_set *new_openset = NULL, *new_execset = NULL; 144 * the fdarray into comfortable page-tuned chunks: starting at 1024B
217 struct file **new_fds; 145 * and growing in powers of two from there on.
146 */
147 nr /= (1024 / sizeof(struct file *));
148 nr = roundup_pow_of_two(nr + 1);
149 nr *= (1024 / sizeof(struct file *));
150 if (nr > NR_OPEN)
151 nr = NR_OPEN;
218 152
219 fdt = kzalloc(sizeof(*fdt), GFP_KERNEL); 153 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL);
220 if (!fdt) 154 if (!fdt)
221 goto out; 155 goto out;
222 156 fdt->max_fds = nr;
223 nfds = max_t(int, 8 * L1_CACHE_BYTES, roundup_pow_of_two(nr + 1)); 157 data = alloc_fdmem(nr * sizeof(struct file *));
224 if (nfds > NR_OPEN) 158 if (!data)
225 nfds = NR_OPEN; 159 goto out_fdt;
160 fdt->fd = (struct file **)data;
161 data = alloc_fdmem(max_t(unsigned int,
162 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES));
163 if (!data)
164 goto out_arr;
165 fdt->open_fds = (fd_set *)data;
166 data += nr / BITS_PER_BYTE;
167 fdt->close_on_exec = (fd_set *)data;
168 INIT_RCU_HEAD(&fdt->rcu);
169 fdt->next = NULL;
226 170
227 new_openset = alloc_fdset(nfds);
228 new_execset = alloc_fdset(nfds);
229 if (!new_openset || !new_execset)
230 goto out;
231 fdt->open_fds = new_openset;
232 fdt->close_on_exec = new_execset;
233 fdt->max_fdset = nfds;
234
235 nfds = NR_OPEN_DEFAULT;
236 /*
237 * Expand to the max in easy steps, and keep expanding it until
238 * we have enough for the requested fd array size.
239 */
240 do {
241#if NR_OPEN_DEFAULT < 256
242 if (nfds < 256)
243 nfds = 256;
244 else
245#endif
246 if (nfds < (PAGE_SIZE / sizeof(struct file *)))
247 nfds = PAGE_SIZE / sizeof(struct file *);
248 else {
249 nfds = nfds * 2;
250 if (nfds > NR_OPEN)
251 nfds = NR_OPEN;
252 }
253 } while (nfds <= nr);
254 new_fds = alloc_fd_array(nfds);
255 if (!new_fds)
256 goto out2;
257 fdt->fd = new_fds;
258 fdt->max_fds = nfds;
259 fdt->free_files = NULL;
260 return fdt; 171 return fdt;
261out2: 172
262 nfds = fdt->max_fdset; 173out_arr:
263out: 174 free_fdarr(fdt);
264 free_fdset(new_openset, nfds); 175out_fdt:
265 free_fdset(new_execset, nfds);
266 kfree(fdt); 176 kfree(fdt);
177out:
267 return NULL; 178 return NULL;
268} 179}
269 180
@@ -290,14 +201,17 @@ static int expand_fdtable(struct files_struct *files, int nr)
290 * we dropped the lock 201 * we dropped the lock
291 */ 202 */
292 cur_fdt = files_fdtable(files); 203 cur_fdt = files_fdtable(files);
293 if (nr >= cur_fdt->max_fds || nr >= cur_fdt->max_fdset) { 204 if (nr >= cur_fdt->max_fds) {
294 /* Continue as planned */ 205 /* Continue as planned */
295 copy_fdtable(new_fdt, cur_fdt); 206 copy_fdtable(new_fdt, cur_fdt);
296 rcu_assign_pointer(files->fdt, new_fdt); 207 rcu_assign_pointer(files->fdt, new_fdt);
297 free_fdtable(cur_fdt); 208 if (cur_fdt->max_fds > NR_OPEN_DEFAULT)
209 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
298 } else { 210 } else {
299 /* Somebody else expanded, so undo our attempt */ 211 /* Somebody else expanded, so undo our attempt */
300 __free_fdtable(new_fdt); 212 free_fdarr(new_fdt);
213 free_fdset(new_fdt);
214 kfree(new_fdt);
301 } 215 }
302 return 1; 216 return 1;
303} 217}
@@ -316,11 +230,10 @@ int expand_files(struct files_struct *files, int nr)
316 230
317 fdt = files_fdtable(files); 231 fdt = files_fdtable(files);
318 /* Do we need to expand? */ 232 /* Do we need to expand? */
319 if (nr < fdt->max_fdset && nr < fdt->max_fds) 233 if (nr < fdt->max_fds)
320 return 0; 234 return 0;
321 /* Can we expand? */ 235 /* Can we expand? */
322 if (fdt->max_fdset >= NR_OPEN || fdt->max_fds >= NR_OPEN || 236 if (nr >= NR_OPEN)
323 nr >= NR_OPEN)
324 return -EMFILE; 237 return -EMFILE;
325 238
326 /* All good, so we try */ 239 /* All good, so we try */
diff --git a/fs/inode.c b/fs/inode.c
index d00de182ecb9..bf21dc6d0dbd 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1144,7 +1144,6 @@ sector_t bmap(struct inode * inode, sector_t block)
1144 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); 1144 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1145 return res; 1145 return res;
1146} 1146}
1147
1148EXPORT_SYMBOL(bmap); 1147EXPORT_SYMBOL(bmap);
1149 1148
1150/** 1149/**
@@ -1163,27 +1162,43 @@ void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
1163 1162
1164 if (IS_RDONLY(inode)) 1163 if (IS_RDONLY(inode))
1165 return; 1164 return;
1166 1165 if (inode->i_flags & S_NOATIME)
1167 if ((inode->i_flags & S_NOATIME) || 1166 return;
1168 (inode->i_sb->s_flags & MS_NOATIME) || 1167 if (inode->i_sb->s_flags & MS_NOATIME)
1169 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))) 1168 return;
1169 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1170 return; 1170 return;
1171 1171
1172 /* 1172 /*
1173 * We may have a NULL vfsmount when coming from NFSD 1173 * We may have a NULL vfsmount when coming from NFSD
1174 */ 1174 */
1175 if (mnt && 1175 if (mnt) {
1176 ((mnt->mnt_flags & MNT_NOATIME) || 1176 if (mnt->mnt_flags & MNT_NOATIME)
1177 ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))) 1177 return;
1178 return; 1178 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1179 return;
1179 1180
1180 now = current_fs_time(inode->i_sb); 1181 if (mnt->mnt_flags & MNT_RELATIME) {
1181 if (!timespec_equal(&inode->i_atime, &now)) { 1182 /*
1182 inode->i_atime = now; 1183 * With relative atime, only update atime if the
1183 mark_inode_dirty_sync(inode); 1184 * previous atime is earlier than either the ctime or
1185 * mtime.
1186 */
1187 if (timespec_compare(&inode->i_mtime,
1188 &inode->i_atime) < 0 &&
1189 timespec_compare(&inode->i_ctime,
1190 &inode->i_atime) < 0)
1191 return;
1192 }
1184 } 1193 }
1185}
1186 1194
1195 now = current_fs_time(inode->i_sb);
1196 if (timespec_equal(&inode->i_atime, &now))
1197 return;
1198
1199 inode->i_atime = now;
1200 mark_inode_dirty_sync(inode);
1201}
1187EXPORT_SYMBOL(touch_atime); 1202EXPORT_SYMBOL(touch_atime);
1188 1203
1189/** 1204/**
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index d38e0d575e48..cceaf57e3778 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -55,7 +55,7 @@ get_transaction(journal_t *journal, transaction_t *transaction)
55 spin_lock_init(&transaction->t_handle_lock); 55 spin_lock_init(&transaction->t_handle_lock);
56 56
57 /* Set up the commit timer for the new transaction. */ 57 /* Set up the commit timer for the new transaction. */
58 journal->j_commit_timer.expires = transaction->t_expires; 58 journal->j_commit_timer.expires = round_jiffies(transaction->t_expires);
59 add_timer(&journal->j_commit_timer); 59 add_timer(&journal->j_commit_timer);
60 60
61 J_ASSERT(journal->j_running_transaction == NULL); 61 J_ASSERT(journal->j_running_transaction == NULL);
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index 7b40c69f44eb..43baa1afa021 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -818,7 +818,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
818 818
819 D1({ 819 D1({
820 int len = dentry->d_name.len; 820 int len = dentry->d_name.len;
821 char *_name = (char *) kmalloc(len + 1, GFP_KERNEL); 821 char *_name = kmalloc(len + 1, GFP_KERNEL);
822 memcpy(_name, dentry->d_name.name, len); 822 memcpy(_name, dentry->d_name.name, len);
823 _name[len] = '\0'; 823 _name[len] = '\0';
824 printk("***jffs_mkdir(): dir = 0x%p, name = \"%s\", " 824 printk("***jffs_mkdir(): dir = 0x%p, name = \"%s\", "
@@ -964,7 +964,7 @@ jffs_remove(struct inode *dir, struct dentry *dentry, int type)
964 D1({ 964 D1({
965 int len = dentry->d_name.len; 965 int len = dentry->d_name.len;
966 const char *name = dentry->d_name.name; 966 const char *name = dentry->d_name.name;
967 char *_name = (char *) kmalloc(len + 1, GFP_KERNEL); 967 char *_name = kmalloc(len + 1, GFP_KERNEL);
968 memcpy(_name, name, len); 968 memcpy(_name, name, len);
969 _name[len] = '\0'; 969 _name[len] = '\0';
970 printk("***jffs_remove(): file = \"%s\", ino = %ld\n", _name, dentry->d_inode->i_ino); 970 printk("***jffs_remove(): file = \"%s\", ino = %ld\n", _name, dentry->d_inode->i_ino);
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c
index d0e783f199ea..6dd18911b44c 100644
--- a/fs/jffs/intrep.c
+++ b/fs/jffs/intrep.c
@@ -436,7 +436,7 @@ jffs_checksum_flash(struct mtd_info *mtd, loff_t start, int size, __u32 *result)
436 int i, length; 436 int i, length;
437 437
438 /* Allocate read buffer */ 438 /* Allocate read buffer */
439 read_buf = (__u8 *) kmalloc (sizeof(__u8) * 4096, GFP_KERNEL); 439 read_buf = kmalloc(sizeof(__u8) * 4096, GFP_KERNEL);
440 if (!read_buf) { 440 if (!read_buf) {
441 printk(KERN_NOTICE "kmalloc failed in jffs_checksum_flash()\n"); 441 printk(KERN_NOTICE "kmalloc failed in jffs_checksum_flash()\n");
442 return -ENOMEM; 442 return -ENOMEM;
@@ -744,11 +744,11 @@ static int check_partly_erased_sectors(struct jffs_fmcontrol *fmc){
744 744
745 745
746 /* Allocate read buffers */ 746 /* Allocate read buffers */
747 read_buf1 = (__u8 *) kmalloc (sizeof(__u8) * READ_AHEAD_BYTES, GFP_KERNEL); 747 read_buf1 = kmalloc(sizeof(__u8) * READ_AHEAD_BYTES, GFP_KERNEL);
748 if (!read_buf1) 748 if (!read_buf1)
749 return -ENOMEM; 749 return -ENOMEM;
750 750
751 read_buf2 = (__u8 *) kmalloc (sizeof(__u8) * READ_AHEAD_BYTES, GFP_KERNEL); 751 read_buf2 = kmalloc(sizeof(__u8) * READ_AHEAD_BYTES, GFP_KERNEL);
752 if (!read_buf2) { 752 if (!read_buf2) {
753 kfree(read_buf1); 753 kfree(read_buf1);
754 return -ENOMEM; 754 return -ENOMEM;
@@ -876,7 +876,7 @@ jffs_scan_flash(struct jffs_control *c)
876 } 876 }
877 877
878 /* Allocate read buffer */ 878 /* Allocate read buffer */
879 read_buf = (__u8 *) kmalloc (sizeof(__u8) * 4096, GFP_KERNEL); 879 read_buf = kmalloc(sizeof(__u8) * 4096, GFP_KERNEL);
880 if (!read_buf) { 880 if (!read_buf) {
881 flash_safe_release(fmc->mtd); 881 flash_safe_release(fmc->mtd);
882 return -ENOMEM; 882 return -ENOMEM;
@@ -1463,7 +1463,7 @@ jffs_insert_node(struct jffs_control *c, struct jffs_file *f,
1463 kfree(f->name); 1463 kfree(f->name);
1464 DJM(no_name--); 1464 DJM(no_name--);
1465 } 1465 }
1466 if (!(f->name = (char *) kmalloc(raw_inode->nsize + 1, 1466 if (!(f->name = kmalloc(raw_inode->nsize + 1,
1467 GFP_KERNEL))) { 1467 GFP_KERNEL))) {
1468 return -ENOMEM; 1468 return -ENOMEM;
1469 } 1469 }
@@ -1737,7 +1737,7 @@ jffs_find_child(struct jffs_file *dir, const char *name, int len)
1737 printk("jffs_find_child(): Found \"%s\".\n", f->name); 1737 printk("jffs_find_child(): Found \"%s\".\n", f->name);
1738 } 1738 }
1739 else { 1739 else {
1740 char *copy = (char *) kmalloc(len + 1, GFP_KERNEL); 1740 char *copy = kmalloc(len + 1, GFP_KERNEL);
1741 if (copy) { 1741 if (copy) {
1742 memcpy(copy, name, len); 1742 memcpy(copy, name, len);
1743 copy[len] = '\0'; 1743 copy[len] = '\0';
@@ -2627,7 +2627,7 @@ jffs_print_tree(struct jffs_file *first_file, int indent)
2627 return; 2627 return;
2628 } 2628 }
2629 2629
2630 if (!(space = (char *) kmalloc(indent + 1, GFP_KERNEL))) { 2630 if (!(space = kmalloc(indent + 1, GFP_KERNEL))) {
2631 printk("jffs_print_tree(): Out of memory!\n"); 2631 printk("jffs_print_tree(): Out of memory!\n");
2632 return; 2632 return;
2633 } 2633 }
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 47bc0b5d1324..6d62f3222892 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -3777,12 +3777,12 @@ static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
3777 struct component_name lkey; 3777 struct component_name lkey;
3778 struct component_name rkey; 3778 struct component_name rkey;
3779 3779
3780 lkey.name = (wchar_t *) kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t), 3780 lkey.name = kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
3781 GFP_KERNEL); 3781 GFP_KERNEL);
3782 if (lkey.name == NULL) 3782 if (lkey.name == NULL)
3783 return -ENOMEM; 3783 return -ENOMEM;
3784 3784
3785 rkey.name = (wchar_t *) kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t), 3785 rkey.name = kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
3786 GFP_KERNEL); 3786 GFP_KERNEL);
3787 if (rkey.name == NULL) { 3787 if (rkey.name == NULL) {
3788 kfree(lkey.name); 3788 kfree(lkey.name);
diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h
index eb550b339bb8..38f70ac03bec 100644
--- a/fs/jfs/jfs_filsys.h
+++ b/fs/jfs/jfs_filsys.h
@@ -29,31 +29,21 @@
29/* 29/*
30 * file system option (superblock flag) 30 * file system option (superblock flag)
31 */ 31 */
32/* mount time flag to disable journaling to disk */ 32
33#define JFS_NOINTEGRITY 0x00000010 33/* directory option */
34#define JFS_UNICODE 0x00000001 /* unicode name */
34 35
35/* mount time flags for error handling */ 36/* mount time flags for error handling */
36#define JFS_ERR_REMOUNT_RO 0x00000002 /* remount read-only */ 37#define JFS_ERR_REMOUNT_RO 0x00000002 /* remount read-only */
37#define JFS_ERR_CONTINUE 0x00000004 /* continue */ 38#define JFS_ERR_CONTINUE 0x00000004 /* continue */
38#define JFS_ERR_PANIC 0x00000008 /* panic */ 39#define JFS_ERR_PANIC 0x00000008 /* panic */
39 40
41/* Quota support */
40#define JFS_USRQUOTA 0x00000010 42#define JFS_USRQUOTA 0x00000010
41#define JFS_GRPQUOTA 0x00000020 43#define JFS_GRPQUOTA 0x00000020
42 44
43/* platform option (conditional compilation) */ 45/* mount time flag to disable journaling to disk */
44#define JFS_AIX 0x80000000 /* AIX support */ 46#define JFS_NOINTEGRITY 0x00000040
45/* POSIX name/directory support */
46
47#define JFS_OS2 0x40000000 /* OS/2 support */
48/* case-insensitive name/directory support */
49
50#define JFS_DFS 0x20000000 /* DCE DFS LFS support */
51
52#define JFS_LINUX 0x10000000 /* Linux support */
53/* case-sensitive name/directory support */
54
55/* directory option */
56#define JFS_UNICODE 0x00000001 /* unicode name */
57 47
58/* commit option */ 48/* commit option */
59#define JFS_COMMIT 0x00000f00 /* commit option mask */ 49#define JFS_COMMIT 0x00000f00 /* commit option mask */
@@ -61,6 +51,7 @@
61#define JFS_LAZYCOMMIT 0x00000200 /* lazy commit */ 51#define JFS_LAZYCOMMIT 0x00000200 /* lazy commit */
62#define JFS_TMPFS 0x00000400 /* temporary file system - 52#define JFS_TMPFS 0x00000400 /* temporary file system -
63 * do not log/commit: 53 * do not log/commit:
54 * Never implemented
64 */ 55 */
65 56
66/* log logical volume option */ 57/* log logical volume option */
@@ -74,16 +65,25 @@
74#define JFS_SPARSE 0x00020000 /* sparse regular file */ 65#define JFS_SPARSE 0x00020000 /* sparse regular file */
75 66
76/* DASD Limits F226941 */ 67/* DASD Limits F226941 */
77#define JFS_DASD_ENABLED 0x00040000 /* DASD limits enabled */ 68#define JFS_DASD_ENABLED 0x00040000 /* DASD limits enabled */
78#define JFS_DASD_PRIME 0x00080000 /* Prime DASD usage on boot */ 69#define JFS_DASD_PRIME 0x00080000 /* Prime DASD usage on boot */
79 70
80/* big endian flag */ 71/* big endian flag */
81#define JFS_SWAP_BYTES 0x00100000 /* running on big endian computer */ 72#define JFS_SWAP_BYTES 0x00100000 /* running on big endian computer */
82 73
83/* Directory index */ 74/* Directory index */
84#define JFS_DIR_INDEX 0x00200000 /* Persistent index for */ 75#define JFS_DIR_INDEX 0x00200000 /* Persistent index for */
85 /* directory entries */
86 76
77/* platform options */
78#define JFS_LINUX 0x10000000 /* Linux support */
79#define JFS_DFS 0x20000000 /* DCE DFS LFS support */
80/* Never implemented */
81
82#define JFS_OS2 0x40000000 /* OS/2 support */
83/* case-insensitive name/directory support */
84
85#define JFS_AIX 0x80000000 /* AIX support */
86/* POSIX name/directory support - Never implemented*/
87 87
88/* 88/*
89 * buffer cache configuration 89 * buffer cache configuration
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index ee9b473b7b80..53f63b47a6d3 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -120,7 +120,7 @@ int diMount(struct inode *ipimap)
120 * allocate/initialize the in-memory inode map control structure 120 * allocate/initialize the in-memory inode map control structure
121 */ 121 */
122 /* allocate the in-memory inode map control structure. */ 122 /* allocate the in-memory inode map control structure. */
123 imap = (struct inomap *) kmalloc(sizeof(struct inomap), GFP_KERNEL); 123 imap = kmalloc(sizeof(struct inomap), GFP_KERNEL);
124 if (imap == NULL) { 124 if (imap == NULL) {
125 jfs_err("diMount: kmalloc returned NULL!"); 125 jfs_err("diMount: kmalloc returned NULL!");
126 return -ENOMEM; 126 return -ENOMEM;
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 92681c9e9b20..062707745162 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -36,7 +36,7 @@ struct nlm_wait {
36 struct nlm_host * b_host; 36 struct nlm_host * b_host;
37 struct file_lock * b_lock; /* local file lock */ 37 struct file_lock * b_lock; /* local file lock */
38 unsigned short b_reclaim; /* got to reclaim lock */ 38 unsigned short b_reclaim; /* got to reclaim lock */
39 u32 b_status; /* grant callback status */ 39 __be32 b_status; /* grant callback status */
40}; 40};
41 41
42static LIST_HEAD(nlm_blocked); 42static LIST_HEAD(nlm_blocked);
@@ -53,7 +53,7 @@ struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *
53 block->b_host = host; 53 block->b_host = host;
54 block->b_lock = fl; 54 block->b_lock = fl;
55 init_waitqueue_head(&block->b_wait); 55 init_waitqueue_head(&block->b_wait);
56 block->b_status = NLM_LCK_BLOCKED; 56 block->b_status = nlm_lck_blocked;
57 list_add(&block->b_list, &nlm_blocked); 57 list_add(&block->b_list, &nlm_blocked);
58 } 58 }
59 return block; 59 return block;
@@ -89,7 +89,7 @@ int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
89 * nlmclnt_lock for an explanation. 89 * nlmclnt_lock for an explanation.
90 */ 90 */
91 ret = wait_event_interruptible_timeout(block->b_wait, 91 ret = wait_event_interruptible_timeout(block->b_wait,
92 block->b_status != NLM_LCK_BLOCKED, 92 block->b_status != nlm_lck_blocked,
93 timeout); 93 timeout);
94 if (ret < 0) 94 if (ret < 0)
95 return -ERESTARTSYS; 95 return -ERESTARTSYS;
@@ -131,7 +131,7 @@ __be32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock
131 /* Alright, we found a lock. Set the return status 131 /* Alright, we found a lock. Set the return status
132 * and wake up the caller 132 * and wake up the caller
133 */ 133 */
134 block->b_status = NLM_LCK_GRANTED; 134 block->b_status = nlm_granted;
135 wake_up(&block->b_wait); 135 wake_up(&block->b_wait);
136 res = nlm_granted; 136 res = nlm_granted;
137 } 137 }
@@ -211,7 +211,7 @@ restart:
211 /* Now, wake up all processes that sleep on a blocked lock */ 211 /* Now, wake up all processes that sleep on a blocked lock */
212 list_for_each_entry(block, &nlm_blocked, b_list) { 212 list_for_each_entry(block, &nlm_blocked, b_list) {
213 if (block->b_host == host) { 213 if (block->b_host == host) {
214 block->b_status = NLM_LCK_DENIED_GRACE_PERIOD; 214 block->b_status = nlm_lck_denied_grace_period;
215 wake_up(&block->b_wait); 215 wake_up(&block->b_wait);
216 } 216 }
217 } 217 }
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 80a1a6dccc8f..0b4acc1c5e7d 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -27,7 +27,7 @@
27static int nlmclnt_test(struct nlm_rqst *, struct file_lock *); 27static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
28static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *); 28static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
29static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *); 29static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
30static int nlm_stat_to_errno(u32 stat); 30static int nlm_stat_to_errno(__be32 stat);
31static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host); 31static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
32static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *); 32static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
33 33
@@ -325,7 +325,7 @@ nlmclnt_call(struct nlm_rqst *req, u32 proc)
325 } 325 }
326 break; 326 break;
327 } else 327 } else
328 if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) { 328 if (resp->status == nlm_lck_denied_grace_period) {
329 dprintk("lockd: server in grace period\n"); 329 dprintk("lockd: server in grace period\n");
330 if (argp->reclaim) { 330 if (argp->reclaim) {
331 printk(KERN_WARNING 331 printk(KERN_WARNING
@@ -411,10 +411,10 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
411 goto out; 411 goto out;
412 412
413 switch (req->a_res.status) { 413 switch (req->a_res.status) {
414 case NLM_LCK_GRANTED: 414 case nlm_granted:
415 fl->fl_type = F_UNLCK; 415 fl->fl_type = F_UNLCK;
416 break; 416 break;
417 case NLM_LCK_DENIED: 417 case nlm_lck_denied:
418 /* 418 /*
419 * Report the conflicting lock back to the application. 419 * Report the conflicting lock back to the application.
420 */ 420 */
@@ -524,9 +524,9 @@ again:
524 if (!req->a_args.block) 524 if (!req->a_args.block)
525 break; 525 break;
526 /* Did a reclaimer thread notify us of a server reboot? */ 526 /* Did a reclaimer thread notify us of a server reboot? */
527 if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) 527 if (resp->status == nlm_lck_denied_grace_period)
528 continue; 528 continue;
529 if (resp->status != NLM_LCK_BLOCKED) 529 if (resp->status != nlm_lck_blocked)
530 break; 530 break;
531 /* Wait on an NLM blocking lock */ 531 /* Wait on an NLM blocking lock */
532 status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); 532 status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
@@ -535,11 +535,11 @@ again:
535 */ 535 */
536 if (status < 0) 536 if (status < 0)
537 goto out_unblock; 537 goto out_unblock;
538 if (resp->status != NLM_LCK_BLOCKED) 538 if (resp->status != nlm_lck_blocked)
539 break; 539 break;
540 } 540 }
541 541
542 if (resp->status == NLM_LCK_GRANTED) { 542 if (resp->status == nlm_granted) {
543 down_read(&host->h_rwsem); 543 down_read(&host->h_rwsem);
544 /* Check whether or not the server has rebooted */ 544 /* Check whether or not the server has rebooted */
545 if (fl->fl_u.nfs_fl.state != host->h_state) { 545 if (fl->fl_u.nfs_fl.state != host->h_state) {
@@ -556,7 +556,7 @@ again:
556out_unblock: 556out_unblock:
557 nlmclnt_finish_block(block); 557 nlmclnt_finish_block(block);
558 /* Cancel the blocked request if it is still pending */ 558 /* Cancel the blocked request if it is still pending */
559 if (resp->status == NLM_LCK_BLOCKED) 559 if (resp->status == nlm_lck_blocked)
560 nlmclnt_cancel(host, req->a_args.block, fl); 560 nlmclnt_cancel(host, req->a_args.block, fl);
561out: 561out:
562 nlm_release_call(req); 562 nlm_release_call(req);
@@ -585,12 +585,12 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
585 req->a_args.reclaim = 1; 585 req->a_args.reclaim = 1;
586 586
587 if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0 587 if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0
588 && req->a_res.status == NLM_LCK_GRANTED) 588 && req->a_res.status == nlm_granted)
589 return 0; 589 return 0;
590 590
591 printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d " 591 printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
592 "(errno %d, status %d)\n", fl->fl_pid, 592 "(errno %d, status %d)\n", fl->fl_pid,
593 status, req->a_res.status); 593 status, ntohl(req->a_res.status));
594 594
595 /* 595 /*
596 * FIXME: This is a serious failure. We can 596 * FIXME: This is a serious failure. We can
@@ -637,10 +637,10 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
637 if (status < 0) 637 if (status < 0)
638 goto out; 638 goto out;
639 639
640 if (resp->status == NLM_LCK_GRANTED) 640 if (resp->status == nlm_granted)
641 goto out; 641 goto out;
642 642
643 if (resp->status != NLM_LCK_DENIED_NOLOCKS) 643 if (resp->status != nlm_lck_denied_nolocks)
644 printk("lockd: unexpected unlock status: %d\n", resp->status); 644 printk("lockd: unexpected unlock status: %d\n", resp->status);
645 /* What to do now? I'm out of my depth... */ 645 /* What to do now? I'm out of my depth... */
646 status = -ENOLCK; 646 status = -ENOLCK;
@@ -652,7 +652,7 @@ out:
652static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) 652static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
653{ 653{
654 struct nlm_rqst *req = data; 654 struct nlm_rqst *req = data;
655 int status = req->a_res.status; 655 u32 status = ntohl(req->a_res.status);
656 656
657 if (RPC_ASSASSINATED(task)) 657 if (RPC_ASSASSINATED(task))
658 goto die; 658 goto die;
@@ -720,6 +720,7 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl
720static void nlmclnt_cancel_callback(struct rpc_task *task, void *data) 720static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
721{ 721{
722 struct nlm_rqst *req = data; 722 struct nlm_rqst *req = data;
723 u32 status = ntohl(req->a_res.status);
723 724
724 if (RPC_ASSASSINATED(task)) 725 if (RPC_ASSASSINATED(task))
725 goto die; 726 goto die;
@@ -731,9 +732,9 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
731 } 732 }
732 733
733 dprintk("lockd: cancel status %u (task %u)\n", 734 dprintk("lockd: cancel status %u (task %u)\n",
734 req->a_res.status, task->tk_pid); 735 status, task->tk_pid);
735 736
736 switch (req->a_res.status) { 737 switch (status) {
737 case NLM_LCK_GRANTED: 738 case NLM_LCK_GRANTED:
738 case NLM_LCK_DENIED_GRACE_PERIOD: 739 case NLM_LCK_DENIED_GRACE_PERIOD:
739 case NLM_LCK_DENIED: 740 case NLM_LCK_DENIED:
@@ -744,7 +745,7 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
744 goto retry_cancel; 745 goto retry_cancel;
745 default: 746 default:
746 printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n", 747 printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
747 req->a_res.status); 748 status);
748 } 749 }
749 750
750die: 751die:
@@ -768,9 +769,9 @@ static const struct rpc_call_ops nlmclnt_cancel_ops = {
768 * Convert an NLM status code to a generic kernel errno 769 * Convert an NLM status code to a generic kernel errno
769 */ 770 */
770static int 771static int
771nlm_stat_to_errno(u32 status) 772nlm_stat_to_errno(__be32 status)
772{ 773{
773 switch(status) { 774 switch(ntohl(status)) {
774 case NLM_LCK_GRANTED: 775 case NLM_LCK_GRANTED:
775 return 0; 776 return 0;
776 case NLM_LCK_DENIED: 777 case NLM_LCK_DENIED:
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 5c054b20fd5e..c7db0a5bccdc 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -645,7 +645,7 @@ static const struct rpc_call_ops nlmsvc_grant_ops = {
645 * block. 645 * block.
646 */ 646 */
647void 647void
648nlmsvc_grant_reply(struct nlm_cookie *cookie, u32 status) 648nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
649{ 649{
650 struct nlm_block *block; 650 struct nlm_block *block;
651 651
@@ -655,7 +655,7 @@ nlmsvc_grant_reply(struct nlm_cookie *cookie, u32 status)
655 return; 655 return;
656 656
657 if (block) { 657 if (block) {
658 if (status == NLM_LCK_DENIED_GRACE_PERIOD) { 658 if (status == nlm_lck_denied_grace_period) {
659 /* Try again in a couple of seconds */ 659 /* Try again in a couple of seconds */
660 nlmsvc_insert_block(block, 10 * HZ); 660 nlmsvc_insert_block(block, 10 * HZ);
661 } else { 661 } else {
diff --git a/fs/lockd/svcshare.c b/fs/lockd/svcshare.c
index 6220dc2a3f2c..068886de4dda 100644
--- a/fs/lockd/svcshare.c
+++ b/fs/lockd/svcshare.c
@@ -39,7 +39,7 @@ nlmsvc_share_file(struct nlm_host *host, struct nlm_file *file,
39 return nlm_lck_denied; 39 return nlm_lck_denied;
40 } 40 }
41 41
42 share = (struct nlm_share *) kmalloc(sizeof(*share) + oh->len, 42 share = kmalloc(sizeof(*share) + oh->len,
43 GFP_KERNEL); 43 GFP_KERNEL);
44 if (share == NULL) 44 if (share == NULL)
45 return nlm_lck_denied_nolocks; 45 return nlm_lck_denied_nolocks;
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index b7c949256e5a..34dae5d70738 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -361,7 +361,7 @@ nlmsvc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
361{ 361{
362 if (!(p = nlm_decode_cookie(p, &resp->cookie))) 362 if (!(p = nlm_decode_cookie(p, &resp->cookie)))
363 return 0; 363 return 0;
364 resp->status = ntohl(*p++); 364 resp->status = *p++;
365 return xdr_argsize_check(rqstp, p); 365 return xdr_argsize_check(rqstp, p);
366} 366}
367 367
@@ -407,8 +407,8 @@ nlmclt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
407{ 407{
408 if (!(p = nlm_decode_cookie(p, &resp->cookie))) 408 if (!(p = nlm_decode_cookie(p, &resp->cookie)))
409 return -EIO; 409 return -EIO;
410 resp->status = ntohl(*p++); 410 resp->status = *p++;
411 if (resp->status == NLM_LCK_DENIED) { 411 if (resp->status == nlm_lck_denied) {
412 struct file_lock *fl = &resp->lock.fl; 412 struct file_lock *fl = &resp->lock.fl;
413 u32 excl; 413 u32 excl;
414 s32 start, len, end; 414 s32 start, len, end;
@@ -506,7 +506,7 @@ nlmclt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
506{ 506{
507 if (!(p = nlm_decode_cookie(p, &resp->cookie))) 507 if (!(p = nlm_decode_cookie(p, &resp->cookie)))
508 return -EIO; 508 return -EIO;
509 resp->status = ntohl(*p++); 509 resp->status = *p++;
510 return 0; 510 return 0;
511} 511}
512 512
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index f4c0b2b9f75a..a78240551219 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -367,7 +367,7 @@ nlm4svc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
367{ 367{
368 if (!(p = nlm4_decode_cookie(p, &resp->cookie))) 368 if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
369 return 0; 369 return 0;
370 resp->status = ntohl(*p++); 370 resp->status = *p++;
371 return xdr_argsize_check(rqstp, p); 371 return xdr_argsize_check(rqstp, p);
372} 372}
373 373
@@ -413,8 +413,8 @@ nlm4clt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
413{ 413{
414 if (!(p = nlm4_decode_cookie(p, &resp->cookie))) 414 if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
415 return -EIO; 415 return -EIO;
416 resp->status = ntohl(*p++); 416 resp->status = *p++;
417 if (resp->status == NLM_LCK_DENIED) { 417 if (resp->status == nlm_lck_denied) {
418 struct file_lock *fl = &resp->lock.fl; 418 struct file_lock *fl = &resp->lock.fl;
419 u32 excl; 419 u32 excl;
420 s64 start, end, len; 420 s64 start, end, len;
@@ -512,7 +512,7 @@ nlm4clt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
512{ 512{
513 if (!(p = nlm4_decode_cookie(p, &resp->cookie))) 513 if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
514 return -EIO; 514 return -EIO;
515 resp->status = ntohl(*p++); 515 resp->status = *p++;
516 return 0; 516 return 0;
517} 517}
518 518
diff --git a/fs/namespace.c b/fs/namespace.c
index fde8553faa76..5ef336c1103c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -368,6 +368,7 @@ static int show_vfsmnt(struct seq_file *m, void *v)
368 { MNT_NOEXEC, ",noexec" }, 368 { MNT_NOEXEC, ",noexec" },
369 { MNT_NOATIME, ",noatime" }, 369 { MNT_NOATIME, ",noatime" },
370 { MNT_NODIRATIME, ",nodiratime" }, 370 { MNT_NODIRATIME, ",nodiratime" },
371 { MNT_RELATIME, ",relatime" },
371 { 0, NULL } 372 { 0, NULL }
372 }; 373 };
373 struct proc_fs_info *fs_infop; 374 struct proc_fs_info *fs_infop;
@@ -1405,9 +1406,11 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
1405 mnt_flags |= MNT_NOATIME; 1406 mnt_flags |= MNT_NOATIME;
1406 if (flags & MS_NODIRATIME) 1407 if (flags & MS_NODIRATIME)
1407 mnt_flags |= MNT_NODIRATIME; 1408 mnt_flags |= MNT_NODIRATIME;
1409 if (flags & MS_RELATIME)
1410 mnt_flags |= MNT_RELATIME;
1408 1411
1409 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | 1412 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
1410 MS_NOATIME | MS_NODIRATIME); 1413 MS_NOATIME | MS_NODIRATIME | MS_RELATIME);
1411 1414
1412 /* ... and get the mountpoint */ 1415 /* ... and get the mountpoint */
1413 retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd); 1416 retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 47462ac94474..67a90bf795d5 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -327,11 +327,12 @@ static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options)
327 char *optarg; 327 char *optarg;
328 unsigned long optint; 328 unsigned long optint;
329 int version = 0; 329 int version = 0;
330 int ret;
330 331
331 data->flags = 0; 332 data->flags = 0;
332 data->int_flags = 0; 333 data->int_flags = 0;
333 data->mounted_uid = 0; 334 data->mounted_uid = 0;
334 data->wdog_pid = -1; 335 data->wdog_pid = NULL;
335 data->ncp_fd = ~0; 336 data->ncp_fd = ~0;
336 data->time_out = 10; 337 data->time_out = 10;
337 data->retry_count = 20; 338 data->retry_count = 20;
@@ -343,8 +344,9 @@ static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options)
343 data->mounted_vol[0] = 0; 344 data->mounted_vol[0] = 0;
344 345
345 while ((optval = ncp_getopt("ncpfs", &options, ncp_opts, NULL, &optarg, &optint)) != 0) { 346 while ((optval = ncp_getopt("ncpfs", &options, ncp_opts, NULL, &optarg, &optint)) != 0) {
346 if (optval < 0) 347 ret = optval;
347 return optval; 348 if (ret < 0)
349 goto err;
348 switch (optval) { 350 switch (optval) {
349 case 'u': 351 case 'u':
350 data->uid = optint; 352 data->uid = optint;
@@ -371,7 +373,7 @@ static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options)
371 data->flags = optint; 373 data->flags = optint;
372 break; 374 break;
373 case 'w': 375 case 'w':
374 data->wdog_pid = optint; 376 data->wdog_pid = find_get_pid(optint);
375 break; 377 break;
376 case 'n': 378 case 'n':
377 data->ncp_fd = optint; 379 data->ncp_fd = optint;
@@ -380,18 +382,21 @@ static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options)
380 data->info_fd = optint; 382 data->info_fd = optint;
381 break; 383 break;
382 case 'v': 384 case 'v':
383 if (optint < NCP_MOUNT_VERSION_V4) { 385 ret = -ECHRNG;
384 return -ECHRNG; 386 if (optint < NCP_MOUNT_VERSION_V4)
385 } 387 goto err;
386 if (optint > NCP_MOUNT_VERSION_V5) { 388 if (optint > NCP_MOUNT_VERSION_V5)
387 return -ECHRNG; 389 goto err;
388 }
389 version = optint; 390 version = optint;
390 break; 391 break;
391 392
392 } 393 }
393 } 394 }
394 return 0; 395 return 0;
396err:
397 put_pid(data->wdog_pid);
398 data->wdog_pid = NULL;
399 return ret;
395} 400}
396 401
397static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) 402static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
@@ -409,6 +414,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
409#endif 414#endif
410 struct ncp_entry_info finfo; 415 struct ncp_entry_info finfo;
411 416
417 data.wdog_pid = NULL;
412 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL); 418 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
413 if (!server) 419 if (!server)
414 return -ENOMEM; 420 return -ENOMEM;
@@ -425,7 +431,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
425 data.flags = md->flags; 431 data.flags = md->flags;
426 data.int_flags = NCP_IMOUNT_LOGGEDIN_POSSIBLE; 432 data.int_flags = NCP_IMOUNT_LOGGEDIN_POSSIBLE;
427 data.mounted_uid = md->mounted_uid; 433 data.mounted_uid = md->mounted_uid;
428 data.wdog_pid = md->wdog_pid; 434 data.wdog_pid = find_get_pid(md->wdog_pid);
429 data.ncp_fd = md->ncp_fd; 435 data.ncp_fd = md->ncp_fd;
430 data.time_out = md->time_out; 436 data.time_out = md->time_out;
431 data.retry_count = md->retry_count; 437 data.retry_count = md->retry_count;
@@ -445,7 +451,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
445 data.flags = md->flags; 451 data.flags = md->flags;
446 data.int_flags = 0; 452 data.int_flags = 0;
447 data.mounted_uid = md->mounted_uid; 453 data.mounted_uid = md->mounted_uid;
448 data.wdog_pid = md->wdog_pid; 454 data.wdog_pid = find_get_pid(md->wdog_pid);
449 data.ncp_fd = md->ncp_fd; 455 data.ncp_fd = md->ncp_fd;
450 data.time_out = md->time_out; 456 data.time_out = md->time_out;
451 data.retry_count = md->retry_count; 457 data.retry_count = md->retry_count;
@@ -679,6 +685,7 @@ out_fput:
679 */ 685 */
680 fput(ncp_filp); 686 fput(ncp_filp);
681out: 687out:
688 put_pid(data.wdog_pid);
682 sb->s_fs_info = NULL; 689 sb->s_fs_info = NULL;
683 kfree(server); 690 kfree(server);
684 return error; 691 return error;
@@ -711,7 +718,8 @@ static void ncp_put_super(struct super_block *sb)
711 if (server->info_filp) 718 if (server->info_filp)
712 fput(server->info_filp); 719 fput(server->info_filp);
713 fput(server->ncp_filp); 720 fput(server->ncp_filp);
714 kill_proc(server->m.wdog_pid, SIGTERM, 1); 721 kill_pid(server->m.wdog_pid, SIGTERM, 1);
722 put_pid(server->m.wdog_pid);
715 723
716 kfree(server->priv.data); 724 kfree(server->priv.data);
717 kfree(server->auth.object_name); 725 kfree(server->auth.object_name);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ee458aeab24a..b3fd29baadc3 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1877,7 +1877,7 @@ static int nfs4_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir,
1877 struct nfs_server *server = NFS_SERVER(dir->d_inode); 1877 struct nfs_server *server = NFS_SERVER(dir->d_inode);
1878 struct unlink_desc *up; 1878 struct unlink_desc *up;
1879 1879
1880 up = (struct unlink_desc *) kmalloc(sizeof(*up), GFP_KERNEL); 1880 up = kmalloc(sizeof(*up), GFP_KERNEL);
1881 if (!up) 1881 if (!up)
1882 return -ENOMEM; 1882 return -ENOMEM;
1883 1883
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index f37df46d2eaa..248dd92e6a56 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -787,15 +787,20 @@ exp_get_by_name(svc_client *clp, struct vfsmount *mnt, struct dentry *dentry,
787 key.ex_dentry = dentry; 787 key.ex_dentry = dentry;
788 788
789 exp = svc_export_lookup(&key); 789 exp = svc_export_lookup(&key);
790 if (exp != NULL) 790 if (exp != NULL) {
791 switch (cache_check(&svc_export_cache, &exp->h, reqp)) { 791 int err;
792
793 err = cache_check(&svc_export_cache, &exp->h, reqp);
794 switch (err) {
792 case 0: break; 795 case 0: break;
793 case -EAGAIN: 796 case -EAGAIN:
794 exp = ERR_PTR(-EAGAIN); 797 case -ETIMEDOUT:
798 exp = ERR_PTR(err);
795 break; 799 break;
796 default: 800 default:
797 exp = NULL; 801 exp = NULL;
798 } 802 }
803 }
799 804
800 return exp; 805 return exp;
801} 806}
@@ -950,6 +955,8 @@ exp_export(struct nfsctl_export *nxp)
950 955
951 exp = exp_get_by_name(clp, nd.mnt, nd.dentry, NULL); 956 exp = exp_get_by_name(clp, nd.mnt, nd.dentry, NULL);
952 957
958 memset(&new, 0, sizeof(new));
959
953 /* must make sure there won't be an ex_fsid clash */ 960 /* must make sure there won't be an ex_fsid clash */
954 if ((nxp->ex_flags & NFSEXP_FSID) && 961 if ((nxp->ex_flags & NFSEXP_FSID) &&
955 (fsid_key = exp_get_fsid_key(clp, nxp->ex_dev)) && 962 (fsid_key = exp_get_fsid_key(clp, nxp->ex_dev)) &&
@@ -980,6 +987,9 @@ exp_export(struct nfsctl_export *nxp)
980 987
981 new.h.expiry_time = NEVER; 988 new.h.expiry_time = NEVER;
982 new.h.flags = 0; 989 new.h.flags = 0;
990 new.ex_path = kstrdup(nxp->ex_path, GFP_KERNEL);
991 if (!new.ex_path)
992 goto finish;
983 new.ex_client = clp; 993 new.ex_client = clp;
984 new.ex_mnt = nd.mnt; 994 new.ex_mnt = nd.mnt;
985 new.ex_dentry = nd.dentry; 995 new.ex_dentry = nd.dentry;
@@ -1000,10 +1010,11 @@ exp_export(struct nfsctl_export *nxp)
1000 /* failed to create at least one index */ 1010 /* failed to create at least one index */
1001 exp_do_unexport(exp); 1011 exp_do_unexport(exp);
1002 cache_flush(); 1012 cache_flush();
1003 err = -ENOMEM; 1013 } else
1004 } 1014 err = 0;
1005
1006finish: 1015finish:
1016 if (new.ex_path)
1017 kfree(new.ex_path);
1007 if (exp) 1018 if (exp)
1008 exp_put(exp); 1019 exp_put(exp);
1009 if (fsid_key && !IS_ERR(fsid_key)) 1020 if (fsid_key && !IS_ERR(fsid_key))
@@ -1104,6 +1115,10 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
1104 path, nd.dentry, clp->name, 1115 path, nd.dentry, clp->name,
1105 inode->i_sb->s_id, inode->i_ino); 1116 inode->i_sb->s_id, inode->i_ino);
1106 exp = exp_parent(clp, nd.mnt, nd.dentry, NULL); 1117 exp = exp_parent(clp, nd.mnt, nd.dentry, NULL);
1118 if (IS_ERR(exp)) {
1119 err = PTR_ERR(exp);
1120 goto out;
1121 }
1107 if (!exp) { 1122 if (!exp) {
1108 dprintk("nfsd: exp_rootfh export not found.\n"); 1123 dprintk("nfsd: exp_rootfh export not found.\n");
1109 goto out; 1124 goto out;
@@ -1159,12 +1174,10 @@ exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp,
1159 mk_fsid_v1(fsidv, 0); 1174 mk_fsid_v1(fsidv, 0);
1160 1175
1161 exp = exp_find(clp, 1, fsidv, creq); 1176 exp = exp_find(clp, 1, fsidv, creq);
1162 if (IS_ERR(exp) && PTR_ERR(exp) == -EAGAIN) 1177 if (IS_ERR(exp))
1163 return nfserr_dropit; 1178 return nfserrno(PTR_ERR(exp));
1164 if (exp == NULL) 1179 if (exp == NULL)
1165 return nfserr_perm; 1180 return nfserr_perm;
1166 else if (IS_ERR(exp))
1167 return nfserrno(PTR_ERR(exp));
1168 rv = fh_compose(fhp, exp, exp->ex_dentry, NULL); 1181 rv = fh_compose(fhp, exp, exp->ex_dentry, NULL);
1169 exp_put(exp); 1182 exp_put(exp);
1170 return rv; 1183 return rv;
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index 11fdaf7721b4..221acd1f11f6 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -22,7 +22,7 @@
22/* 22/*
23 * Note: we hold the dentry use count while the file is open. 23 * Note: we hold the dentry use count while the file is open.
24 */ 24 */
25static u32 25static __be32
26nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp) 26nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp)
27{ 27{
28 __be32 nfserr; 28 __be32 nfserr;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 50bc94243ca1..8522729830db 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -33,13 +33,6 @@
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Note: some routines in this file are just trivial wrappers
38 * (e.g. nfsd4_lookup()) defined solely for the sake of consistent
39 * naming. Since all such routines have been declared "inline",
40 * there shouldn't be any associated overhead. At some point in
41 * the future, I might inline these "by hand" to clean up a
42 * little.
43 */ 36 */
44 37
45#include <linux/param.h> 38#include <linux/param.h>
@@ -161,8 +154,9 @@ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_
161} 154}
162 155
163 156
164static inline __be32 157static __be32
165nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open, struct nfs4_stateowner **replay_owner) 158nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
159 struct nfsd4_open *open)
166{ 160{
167 __be32 status; 161 __be32 status;
168 dprintk("NFSD: nfsd4_open filename %.*s op_stateowner %p\n", 162 dprintk("NFSD: nfsd4_open filename %.*s op_stateowner %p\n",
@@ -179,11 +173,11 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open
179 status = nfsd4_process_open1(open); 173 status = nfsd4_process_open1(open);
180 if (status == nfserr_replay_me) { 174 if (status == nfserr_replay_me) {
181 struct nfs4_replay *rp = &open->op_stateowner->so_replay; 175 struct nfs4_replay *rp = &open->op_stateowner->so_replay;
182 fh_put(current_fh); 176 fh_put(&cstate->current_fh);
183 current_fh->fh_handle.fh_size = rp->rp_openfh_len; 177 cstate->current_fh.fh_handle.fh_size = rp->rp_openfh_len;
184 memcpy(&current_fh->fh_handle.fh_base, rp->rp_openfh, 178 memcpy(&cstate->current_fh.fh_handle.fh_base, rp->rp_openfh,
185 rp->rp_openfh_len); 179 rp->rp_openfh_len);
186 status = fh_verify(rqstp, current_fh, 0, MAY_NOP); 180 status = fh_verify(rqstp, &cstate->current_fh, 0, MAY_NOP);
187 if (status) 181 if (status)
188 dprintk("nfsd4_open: replay failed" 182 dprintk("nfsd4_open: replay failed"
189 " restoring previous filehandle\n"); 183 " restoring previous filehandle\n");
@@ -215,7 +209,8 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open
215 * (3) set open->op_truncate if the file is to be 209 * (3) set open->op_truncate if the file is to be
216 * truncated after opening, (4) do permission checking. 210 * truncated after opening, (4) do permission checking.
217 */ 211 */
218 status = do_open_lookup(rqstp, current_fh, open); 212 status = do_open_lookup(rqstp, &cstate->current_fh,
213 open);
219 if (status) 214 if (status)
220 goto out; 215 goto out;
221 break; 216 break;
@@ -227,7 +222,8 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open
227 * open->op_truncate if the file is to be truncated 222 * open->op_truncate if the file is to be truncated
228 * after opening, (3) do permission checking. 223 * after opening, (3) do permission checking.
229 */ 224 */
230 status = do_open_fhandle(rqstp, current_fh, open); 225 status = do_open_fhandle(rqstp, &cstate->current_fh,
226 open);
231 if (status) 227 if (status)
232 goto out; 228 goto out;
233 break; 229 break;
@@ -248,11 +244,11 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open
248 * successful, it (1) truncates the file if open->op_truncate was 244 * successful, it (1) truncates the file if open->op_truncate was
249 * set, (2) sets open->op_stateid, (3) sets open->op_delegation. 245 * set, (2) sets open->op_stateid, (3) sets open->op_delegation.
250 */ 246 */
251 status = nfsd4_process_open2(rqstp, current_fh, open); 247 status = nfsd4_process_open2(rqstp, &cstate->current_fh, open);
252out: 248out:
253 if (open->op_stateowner) { 249 if (open->op_stateowner) {
254 nfs4_get_stateowner(open->op_stateowner); 250 nfs4_get_stateowner(open->op_stateowner);
255 *replay_owner = open->op_stateowner; 251 cstate->replay_owner = open->op_stateowner;
256 } 252 }
257 nfs4_unlock_state(); 253 nfs4_unlock_state();
258 return status; 254 return status;
@@ -261,71 +257,80 @@ out:
261/* 257/*
262 * filehandle-manipulating ops. 258 * filehandle-manipulating ops.
263 */ 259 */
264static inline __be32 260static __be32
265nfsd4_getfh(struct svc_fh *current_fh, struct svc_fh **getfh) 261nfsd4_getfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
262 struct svc_fh **getfh)
266{ 263{
267 if (!current_fh->fh_dentry) 264 if (!cstate->current_fh.fh_dentry)
268 return nfserr_nofilehandle; 265 return nfserr_nofilehandle;
269 266
270 *getfh = current_fh; 267 *getfh = &cstate->current_fh;
271 return nfs_ok; 268 return nfs_ok;
272} 269}
273 270
274static inline __be32 271static __be32
275nfsd4_putfh(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_putfh *putfh) 272nfsd4_putfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
273 struct nfsd4_putfh *putfh)
276{ 274{
277 fh_put(current_fh); 275 fh_put(&cstate->current_fh);
278 current_fh->fh_handle.fh_size = putfh->pf_fhlen; 276 cstate->current_fh.fh_handle.fh_size = putfh->pf_fhlen;
279 memcpy(&current_fh->fh_handle.fh_base, putfh->pf_fhval, putfh->pf_fhlen); 277 memcpy(&cstate->current_fh.fh_handle.fh_base, putfh->pf_fhval,
280 return fh_verify(rqstp, current_fh, 0, MAY_NOP); 278 putfh->pf_fhlen);
279 return fh_verify(rqstp, &cstate->current_fh, 0, MAY_NOP);
281} 280}
282 281
283static inline __be32 282static __be32
284nfsd4_putrootfh(struct svc_rqst *rqstp, struct svc_fh *current_fh) 283nfsd4_putrootfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
284 void *arg)
285{ 285{
286 __be32 status; 286 __be32 status;
287 287
288 fh_put(current_fh); 288 fh_put(&cstate->current_fh);
289 status = exp_pseudoroot(rqstp->rq_client, current_fh, 289 status = exp_pseudoroot(rqstp->rq_client, &cstate->current_fh,
290 &rqstp->rq_chandle); 290 &rqstp->rq_chandle);
291 return status; 291 return status;
292} 292}
293 293
294static inline __be32 294static __be32
295nfsd4_restorefh(struct svc_fh *current_fh, struct svc_fh *save_fh) 295nfsd4_restorefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
296 void *arg)
296{ 297{
297 if (!save_fh->fh_dentry) 298 if (!cstate->save_fh.fh_dentry)
298 return nfserr_restorefh; 299 return nfserr_restorefh;
299 300
300 fh_dup2(current_fh, save_fh); 301 fh_dup2(&cstate->current_fh, &cstate->save_fh);
301 return nfs_ok; 302 return nfs_ok;
302} 303}
303 304
304static inline __be32 305static __be32
305nfsd4_savefh(struct svc_fh *current_fh, struct svc_fh *save_fh) 306nfsd4_savefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
307 void *arg)
306{ 308{
307 if (!current_fh->fh_dentry) 309 if (!cstate->current_fh.fh_dentry)
308 return nfserr_nofilehandle; 310 return nfserr_nofilehandle;
309 311
310 fh_dup2(save_fh, current_fh); 312 fh_dup2(&cstate->save_fh, &cstate->current_fh);
311 return nfs_ok; 313 return nfs_ok;
312} 314}
313 315
314/* 316/*
315 * misc nfsv4 ops 317 * misc nfsv4 ops
316 */ 318 */
317static inline __be32 319static __be32
318nfsd4_access(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_access *access) 320nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
321 struct nfsd4_access *access)
319{ 322{
320 if (access->ac_req_access & ~NFS3_ACCESS_FULL) 323 if (access->ac_req_access & ~NFS3_ACCESS_FULL)
321 return nfserr_inval; 324 return nfserr_inval;
322 325
323 access->ac_resp_access = access->ac_req_access; 326 access->ac_resp_access = access->ac_req_access;
324 return nfsd_access(rqstp, current_fh, &access->ac_resp_access, &access->ac_supported); 327 return nfsd_access(rqstp, &cstate->current_fh, &access->ac_resp_access,
328 &access->ac_supported);
325} 329}
326 330
327static inline __be32 331static __be32
328nfsd4_commit(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_commit *commit) 332nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
333 struct nfsd4_commit *commit)
329{ 334{
330 __be32 status; 335 __be32 status;
331 336
@@ -333,14 +338,16 @@ nfsd4_commit(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_com
333 *p++ = nfssvc_boot.tv_sec; 338 *p++ = nfssvc_boot.tv_sec;
334 *p++ = nfssvc_boot.tv_usec; 339 *p++ = nfssvc_boot.tv_usec;
335 340
336 status = nfsd_commit(rqstp, current_fh, commit->co_offset, commit->co_count); 341 status = nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
342 commit->co_count);
337 if (status == nfserr_symlink) 343 if (status == nfserr_symlink)
338 status = nfserr_inval; 344 status = nfserr_inval;
339 return status; 345 return status;
340} 346}
341 347
342static __be32 348static __be32
343nfsd4_create(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_create *create) 349nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
350 struct nfsd4_create *create)
344{ 351{
345 struct svc_fh resfh; 352 struct svc_fh resfh;
346 __be32 status; 353 __be32 status;
@@ -348,7 +355,7 @@ nfsd4_create(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_cre
348 355
349 fh_init(&resfh, NFS4_FHSIZE); 356 fh_init(&resfh, NFS4_FHSIZE);
350 357
351 status = fh_verify(rqstp, current_fh, S_IFDIR, MAY_CREATE); 358 status = fh_verify(rqstp, &cstate->current_fh, S_IFDIR, MAY_CREATE);
352 if (status == nfserr_symlink) 359 if (status == nfserr_symlink)
353 status = nfserr_notdir; 360 status = nfserr_notdir;
354 if (status) 361 if (status)
@@ -365,9 +372,10 @@ nfsd4_create(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_cre
365 */ 372 */
366 create->cr_linkname[create->cr_linklen] = 0; 373 create->cr_linkname[create->cr_linklen] = 0;
367 374
368 status = nfsd_symlink(rqstp, current_fh, create->cr_name, 375 status = nfsd_symlink(rqstp, &cstate->current_fh,
369 create->cr_namelen, create->cr_linkname, 376 create->cr_name, create->cr_namelen,
370 create->cr_linklen, &resfh, &create->cr_iattr); 377 create->cr_linkname, create->cr_linklen,
378 &resfh, &create->cr_iattr);
371 break; 379 break;
372 380
373 case NF4BLK: 381 case NF4BLK:
@@ -375,9 +383,9 @@ nfsd4_create(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_cre
375 if (MAJOR(rdev) != create->cr_specdata1 || 383 if (MAJOR(rdev) != create->cr_specdata1 ||
376 MINOR(rdev) != create->cr_specdata2) 384 MINOR(rdev) != create->cr_specdata2)
377 return nfserr_inval; 385 return nfserr_inval;
378 status = nfsd_create(rqstp, current_fh, create->cr_name, 386 status = nfsd_create(rqstp, &cstate->current_fh,
379 create->cr_namelen, &create->cr_iattr, 387 create->cr_name, create->cr_namelen,
380 S_IFBLK, rdev, &resfh); 388 &create->cr_iattr, S_IFBLK, rdev, &resfh);
381 break; 389 break;
382 390
383 case NF4CHR: 391 case NF4CHR:
@@ -385,28 +393,28 @@ nfsd4_create(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_cre
385 if (MAJOR(rdev) != create->cr_specdata1 || 393 if (MAJOR(rdev) != create->cr_specdata1 ||
386 MINOR(rdev) != create->cr_specdata2) 394 MINOR(rdev) != create->cr_specdata2)
387 return nfserr_inval; 395 return nfserr_inval;
388 status = nfsd_create(rqstp, current_fh, create->cr_name, 396 status = nfsd_create(rqstp, &cstate->current_fh,
389 create->cr_namelen, &create->cr_iattr, 397 create->cr_name, create->cr_namelen,
390 S_IFCHR, rdev, &resfh); 398 &create->cr_iattr,S_IFCHR, rdev, &resfh);
391 break; 399 break;
392 400
393 case NF4SOCK: 401 case NF4SOCK:
394 status = nfsd_create(rqstp, current_fh, create->cr_name, 402 status = nfsd_create(rqstp, &cstate->current_fh,
395 create->cr_namelen, &create->cr_iattr, 403 create->cr_name, create->cr_namelen,
396 S_IFSOCK, 0, &resfh); 404 &create->cr_iattr, S_IFSOCK, 0, &resfh);
397 break; 405 break;
398 406
399 case NF4FIFO: 407 case NF4FIFO:
400 status = nfsd_create(rqstp, current_fh, create->cr_name, 408 status = nfsd_create(rqstp, &cstate->current_fh,
401 create->cr_namelen, &create->cr_iattr, 409 create->cr_name, create->cr_namelen,
402 S_IFIFO, 0, &resfh); 410 &create->cr_iattr, S_IFIFO, 0, &resfh);
403 break; 411 break;
404 412
405 case NF4DIR: 413 case NF4DIR:
406 create->cr_iattr.ia_valid &= ~ATTR_SIZE; 414 create->cr_iattr.ia_valid &= ~ATTR_SIZE;
407 status = nfsd_create(rqstp, current_fh, create->cr_name, 415 status = nfsd_create(rqstp, &cstate->current_fh,
408 create->cr_namelen, &create->cr_iattr, 416 create->cr_name, create->cr_namelen,
409 S_IFDIR, 0, &resfh); 417 &create->cr_iattr, S_IFDIR, 0, &resfh);
410 break; 418 break;
411 419
412 default: 420 default:
@@ -414,21 +422,22 @@ nfsd4_create(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_cre
414 } 422 }
415 423
416 if (!status) { 424 if (!status) {
417 fh_unlock(current_fh); 425 fh_unlock(&cstate->current_fh);
418 set_change_info(&create->cr_cinfo, current_fh); 426 set_change_info(&create->cr_cinfo, &cstate->current_fh);
419 fh_dup2(current_fh, &resfh); 427 fh_dup2(&cstate->current_fh, &resfh);
420 } 428 }
421 429
422 fh_put(&resfh); 430 fh_put(&resfh);
423 return status; 431 return status;
424} 432}
425 433
426static inline __be32 434static __be32
427nfsd4_getattr(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_getattr *getattr) 435nfsd4_getattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
436 struct nfsd4_getattr *getattr)
428{ 437{
429 __be32 status; 438 __be32 status;
430 439
431 status = fh_verify(rqstp, current_fh, 0, MAY_NOP); 440 status = fh_verify(rqstp, &cstate->current_fh, 0, MAY_NOP);
432 if (status) 441 if (status)
433 return status; 442 return status;
434 443
@@ -438,26 +447,28 @@ nfsd4_getattr(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_ge
438 getattr->ga_bmval[0] &= NFSD_SUPPORTED_ATTRS_WORD0; 447 getattr->ga_bmval[0] &= NFSD_SUPPORTED_ATTRS_WORD0;
439 getattr->ga_bmval[1] &= NFSD_SUPPORTED_ATTRS_WORD1; 448 getattr->ga_bmval[1] &= NFSD_SUPPORTED_ATTRS_WORD1;
440 449
441 getattr->ga_fhp = current_fh; 450 getattr->ga_fhp = &cstate->current_fh;
442 return nfs_ok; 451 return nfs_ok;
443} 452}
444 453
445static inline __be32 454static __be32
446nfsd4_link(struct svc_rqst *rqstp, struct svc_fh *current_fh, 455nfsd4_link(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
447 struct svc_fh *save_fh, struct nfsd4_link *link) 456 struct nfsd4_link *link)
448{ 457{
449 __be32 status = nfserr_nofilehandle; 458 __be32 status = nfserr_nofilehandle;
450 459
451 if (!save_fh->fh_dentry) 460 if (!cstate->save_fh.fh_dentry)
452 return status; 461 return status;
453 status = nfsd_link(rqstp, current_fh, link->li_name, link->li_namelen, save_fh); 462 status = nfsd_link(rqstp, &cstate->current_fh,
463 link->li_name, link->li_namelen, &cstate->save_fh);
454 if (!status) 464 if (!status)
455 set_change_info(&link->li_cinfo, current_fh); 465 set_change_info(&link->li_cinfo, &cstate->current_fh);
456 return status; 466 return status;
457} 467}
458 468
459static __be32 469static __be32
460nfsd4_lookupp(struct svc_rqst *rqstp, struct svc_fh *current_fh) 470nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
471 void *arg)
461{ 472{
462 struct svc_fh tmp_fh; 473 struct svc_fh tmp_fh;
463 __be32 ret; 474 __be32 ret;
@@ -466,22 +477,27 @@ nfsd4_lookupp(struct svc_rqst *rqstp, struct svc_fh *current_fh)
466 if((ret = exp_pseudoroot(rqstp->rq_client, &tmp_fh, 477 if((ret = exp_pseudoroot(rqstp->rq_client, &tmp_fh,
467 &rqstp->rq_chandle)) != 0) 478 &rqstp->rq_chandle)) != 0)
468 return ret; 479 return ret;
469 if (tmp_fh.fh_dentry == current_fh->fh_dentry) { 480 if (tmp_fh.fh_dentry == cstate->current_fh.fh_dentry) {
470 fh_put(&tmp_fh); 481 fh_put(&tmp_fh);
471 return nfserr_noent; 482 return nfserr_noent;
472 } 483 }
473 fh_put(&tmp_fh); 484 fh_put(&tmp_fh);
474 return nfsd_lookup(rqstp, current_fh, "..", 2, current_fh); 485 return nfsd_lookup(rqstp, &cstate->current_fh,
486 "..", 2, &cstate->current_fh);
475} 487}
476 488
477static inline __be32 489static __be32
478nfsd4_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lookup *lookup) 490nfsd4_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
491 struct nfsd4_lookup *lookup)
479{ 492{
480 return nfsd_lookup(rqstp, current_fh, lookup->lo_name, lookup->lo_len, current_fh); 493 return nfsd_lookup(rqstp, &cstate->current_fh,
494 lookup->lo_name, lookup->lo_len,
495 &cstate->current_fh);
481} 496}
482 497
483static inline __be32 498static __be32
484nfsd4_read(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_read *read) 499nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
500 struct nfsd4_read *read)
485{ 501{
486 __be32 status; 502 __be32 status;
487 503
@@ -493,7 +509,8 @@ nfsd4_read(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_read
493 509
494 nfs4_lock_state(); 510 nfs4_lock_state();
495 /* check stateid */ 511 /* check stateid */
496 if ((status = nfs4_preprocess_stateid_op(current_fh, &read->rd_stateid, 512 if ((status = nfs4_preprocess_stateid_op(&cstate->current_fh,
513 &read->rd_stateid,
497 CHECK_FH | RD_STATE, &read->rd_filp))) { 514 CHECK_FH | RD_STATE, &read->rd_filp))) {
498 dprintk("NFSD: nfsd4_read: couldn't process stateid!\n"); 515 dprintk("NFSD: nfsd4_read: couldn't process stateid!\n");
499 goto out; 516 goto out;
@@ -504,12 +521,13 @@ nfsd4_read(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_read
504out: 521out:
505 nfs4_unlock_state(); 522 nfs4_unlock_state();
506 read->rd_rqstp = rqstp; 523 read->rd_rqstp = rqstp;
507 read->rd_fhp = current_fh; 524 read->rd_fhp = &cstate->current_fh;
508 return status; 525 return status;
509} 526}
510 527
511static inline __be32 528static __be32
512nfsd4_readdir(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_readdir *readdir) 529nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
530 struct nfsd4_readdir *readdir)
513{ 531{
514 u64 cookie = readdir->rd_cookie; 532 u64 cookie = readdir->rd_cookie;
515 static const nfs4_verifier zeroverf; 533 static const nfs4_verifier zeroverf;
@@ -527,48 +545,51 @@ nfsd4_readdir(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_re
527 return nfserr_bad_cookie; 545 return nfserr_bad_cookie;
528 546
529 readdir->rd_rqstp = rqstp; 547 readdir->rd_rqstp = rqstp;
530 readdir->rd_fhp = current_fh; 548 readdir->rd_fhp = &cstate->current_fh;
531 return nfs_ok; 549 return nfs_ok;
532} 550}
533 551
534static inline __be32 552static __be32
535nfsd4_readlink(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_readlink *readlink) 553nfsd4_readlink(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
554 struct nfsd4_readlink *readlink)
536{ 555{
537 readlink->rl_rqstp = rqstp; 556 readlink->rl_rqstp = rqstp;
538 readlink->rl_fhp = current_fh; 557 readlink->rl_fhp = &cstate->current_fh;
539 return nfs_ok; 558 return nfs_ok;
540} 559}
541 560
542static inline __be32 561static __be32
543nfsd4_remove(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_remove *remove) 562nfsd4_remove(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
563 struct nfsd4_remove *remove)
544{ 564{
545 __be32 status; 565 __be32 status;
546 566
547 if (nfs4_in_grace()) 567 if (nfs4_in_grace())
548 return nfserr_grace; 568 return nfserr_grace;
549 status = nfsd_unlink(rqstp, current_fh, 0, remove->rm_name, remove->rm_namelen); 569 status = nfsd_unlink(rqstp, &cstate->current_fh, 0,
570 remove->rm_name, remove->rm_namelen);
550 if (status == nfserr_symlink) 571 if (status == nfserr_symlink)
551 return nfserr_notdir; 572 return nfserr_notdir;
552 if (!status) { 573 if (!status) {
553 fh_unlock(current_fh); 574 fh_unlock(&cstate->current_fh);
554 set_change_info(&remove->rm_cinfo, current_fh); 575 set_change_info(&remove->rm_cinfo, &cstate->current_fh);
555 } 576 }
556 return status; 577 return status;
557} 578}
558 579
559static inline __be32 580static __be32
560nfsd4_rename(struct svc_rqst *rqstp, struct svc_fh *current_fh, 581nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
561 struct svc_fh *save_fh, struct nfsd4_rename *rename) 582 struct nfsd4_rename *rename)
562{ 583{
563 __be32 status = nfserr_nofilehandle; 584 __be32 status = nfserr_nofilehandle;
564 585
565 if (!save_fh->fh_dentry) 586 if (!cstate->save_fh.fh_dentry)
566 return status; 587 return status;
567 if (nfs4_in_grace() && !(save_fh->fh_export->ex_flags 588 if (nfs4_in_grace() && !(cstate->save_fh.fh_export->ex_flags
568 & NFSEXP_NOSUBTREECHECK)) 589 & NFSEXP_NOSUBTREECHECK))
569 return nfserr_grace; 590 return nfserr_grace;
570 status = nfsd_rename(rqstp, save_fh, rename->rn_sname, 591 status = nfsd_rename(rqstp, &cstate->save_fh, rename->rn_sname,
571 rename->rn_snamelen, current_fh, 592 rename->rn_snamelen, &cstate->current_fh,
572 rename->rn_tname, rename->rn_tnamelen); 593 rename->rn_tname, rename->rn_tnamelen);
573 594
574 /* the underlying filesystem returns different error's than required 595 /* the underlying filesystem returns different error's than required
@@ -576,27 +597,28 @@ nfsd4_rename(struct svc_rqst *rqstp, struct svc_fh *current_fh,
576 if (status == nfserr_isdir) 597 if (status == nfserr_isdir)
577 status = nfserr_exist; 598 status = nfserr_exist;
578 else if ((status == nfserr_notdir) && 599 else if ((status == nfserr_notdir) &&
579 (S_ISDIR(save_fh->fh_dentry->d_inode->i_mode) && 600 (S_ISDIR(cstate->save_fh.fh_dentry->d_inode->i_mode) &&
580 S_ISDIR(current_fh->fh_dentry->d_inode->i_mode))) 601 S_ISDIR(cstate->current_fh.fh_dentry->d_inode->i_mode)))
581 status = nfserr_exist; 602 status = nfserr_exist;
582 else if (status == nfserr_symlink) 603 else if (status == nfserr_symlink)
583 status = nfserr_notdir; 604 status = nfserr_notdir;
584 605
585 if (!status) { 606 if (!status) {
586 set_change_info(&rename->rn_sinfo, current_fh); 607 set_change_info(&rename->rn_sinfo, &cstate->current_fh);
587 set_change_info(&rename->rn_tinfo, save_fh); 608 set_change_info(&rename->rn_tinfo, &cstate->save_fh);
588 } 609 }
589 return status; 610 return status;
590} 611}
591 612
592static inline __be32 613static __be32
593nfsd4_setattr(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_setattr *setattr) 614nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
615 struct nfsd4_setattr *setattr)
594{ 616{
595 __be32 status = nfs_ok; 617 __be32 status = nfs_ok;
596 618
597 if (setattr->sa_iattr.ia_valid & ATTR_SIZE) { 619 if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
598 nfs4_lock_state(); 620 nfs4_lock_state();
599 status = nfs4_preprocess_stateid_op(current_fh, 621 status = nfs4_preprocess_stateid_op(&cstate->current_fh,
600 &setattr->sa_stateid, CHECK_FH | WR_STATE, NULL); 622 &setattr->sa_stateid, CHECK_FH | WR_STATE, NULL);
601 nfs4_unlock_state(); 623 nfs4_unlock_state();
602 if (status) { 624 if (status) {
@@ -606,16 +628,18 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_se
606 } 628 }
607 status = nfs_ok; 629 status = nfs_ok;
608 if (setattr->sa_acl != NULL) 630 if (setattr->sa_acl != NULL)
609 status = nfsd4_set_nfs4_acl(rqstp, current_fh, setattr->sa_acl); 631 status = nfsd4_set_nfs4_acl(rqstp, &cstate->current_fh,
632 setattr->sa_acl);
610 if (status) 633 if (status)
611 return status; 634 return status;
612 status = nfsd_setattr(rqstp, current_fh, &setattr->sa_iattr, 635 status = nfsd_setattr(rqstp, &cstate->current_fh, &setattr->sa_iattr,
613 0, (time_t)0); 636 0, (time_t)0);
614 return status; 637 return status;
615} 638}
616 639
617static inline __be32 640static __be32
618nfsd4_write(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_write *write) 641nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
642 struct nfsd4_write *write)
619{ 643{
620 stateid_t *stateid = &write->wr_stateid; 644 stateid_t *stateid = &write->wr_stateid;
621 struct file *filp = NULL; 645 struct file *filp = NULL;
@@ -628,7 +652,7 @@ nfsd4_write(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_writ
628 return nfserr_inval; 652 return nfserr_inval;
629 653
630 nfs4_lock_state(); 654 nfs4_lock_state();
631 status = nfs4_preprocess_stateid_op(current_fh, stateid, 655 status = nfs4_preprocess_stateid_op(&cstate->current_fh, stateid,
632 CHECK_FH | WR_STATE, &filp); 656 CHECK_FH | WR_STATE, &filp);
633 if (filp) 657 if (filp)
634 get_file(filp); 658 get_file(filp);
@@ -645,9 +669,9 @@ nfsd4_write(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_writ
645 *p++ = nfssvc_boot.tv_sec; 669 *p++ = nfssvc_boot.tv_sec;
646 *p++ = nfssvc_boot.tv_usec; 670 *p++ = nfssvc_boot.tv_usec;
647 671
648 status = nfsd_write(rqstp, current_fh, filp, write->wr_offset, 672 status = nfsd_write(rqstp, &cstate->current_fh, filp,
649 rqstp->rq_vec, write->wr_vlen, write->wr_buflen, 673 write->wr_offset, rqstp->rq_vec, write->wr_vlen,
650 &write->wr_how_written); 674 write->wr_buflen, &write->wr_how_written);
651 if (filp) 675 if (filp)
652 fput(filp); 676 fput(filp);
653 677
@@ -662,13 +686,14 @@ nfsd4_write(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_writ
662 * to NFS_OK after the call; NVERIFY by mapping NFSERR_NOT_SAME to NFS_OK. 686 * to NFS_OK after the call; NVERIFY by mapping NFSERR_NOT_SAME to NFS_OK.
663 */ 687 */
664static __be32 688static __be32
665nfsd4_verify(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_verify *verify) 689_nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
690 struct nfsd4_verify *verify)
666{ 691{
667 __be32 *buf, *p; 692 __be32 *buf, *p;
668 int count; 693 int count;
669 __be32 status; 694 __be32 status;
670 695
671 status = fh_verify(rqstp, current_fh, 0, MAY_NOP); 696 status = fh_verify(rqstp, &cstate->current_fh, 0, MAY_NOP);
672 if (status) 697 if (status)
673 return status; 698 return status;
674 699
@@ -689,8 +714,9 @@ nfsd4_verify(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_ver
689 if (!buf) 714 if (!buf)
690 return nfserr_resource; 715 return nfserr_resource;
691 716
692 status = nfsd4_encode_fattr(current_fh, current_fh->fh_export, 717 status = nfsd4_encode_fattr(&cstate->current_fh,
693 current_fh->fh_dentry, buf, 718 cstate->current_fh.fh_export,
719 cstate->current_fh.fh_dentry, buf,
694 &count, verify->ve_bmval, 720 &count, verify->ve_bmval,
695 rqstp); 721 rqstp);
696 722
@@ -712,6 +738,26 @@ out_kfree:
712 return status; 738 return status;
713} 739}
714 740
741static __be32
742nfsd4_nverify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
743 struct nfsd4_verify *verify)
744{
745 __be32 status;
746
747 status = _nfsd4_verify(rqstp, cstate, verify);
748 return status == nfserr_not_same ? nfs_ok : status;
749}
750
751static __be32
752nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
753 struct nfsd4_verify *verify)
754{
755 __be32 status;
756
757 status = _nfsd4_verify(rqstp, cstate, verify);
758 return status == nfserr_same ? nfs_ok : status;
759}
760
715/* 761/*
716 * NULL call. 762 * NULL call.
717 */ 763 */
@@ -727,6 +773,42 @@ static inline void nfsd4_increment_op_stats(u32 opnum)
727 nfsdstats.nfs4_opcount[opnum]++; 773 nfsdstats.nfs4_opcount[opnum]++;
728} 774}
729 775
776static void cstate_free(struct nfsd4_compound_state *cstate)
777{
778 if (cstate == NULL)
779 return;
780 fh_put(&cstate->current_fh);
781 fh_put(&cstate->save_fh);
782 BUG_ON(cstate->replay_owner);
783 kfree(cstate);
784}
785
786static struct nfsd4_compound_state *cstate_alloc(void)
787{
788 struct nfsd4_compound_state *cstate;
789
790 cstate = kmalloc(sizeof(struct nfsd4_compound_state), GFP_KERNEL);
791 if (cstate == NULL)
792 return NULL;
793 fh_init(&cstate->current_fh, NFS4_FHSIZE);
794 fh_init(&cstate->save_fh, NFS4_FHSIZE);
795 cstate->replay_owner = NULL;
796 return cstate;
797}
798
799typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
800 void *);
801
802struct nfsd4_operation {
803 nfsd4op_func op_func;
804 u32 op_flags;
805/* Most ops require a valid current filehandle; a few don't: */
806#define ALLOWED_WITHOUT_FH 1
807/* GETATTR and ops not listed as returning NFS4ERR_MOVED: */
808#define ALLOWED_ON_ABSENT_FS 2
809};
810
811static struct nfsd4_operation nfsd4_ops[];
730 812
731/* 813/*
732 * COMPOUND call. 814 * COMPOUND call.
@@ -737,21 +819,15 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
737 struct nfsd4_compoundres *resp) 819 struct nfsd4_compoundres *resp)
738{ 820{
739 struct nfsd4_op *op; 821 struct nfsd4_op *op;
740 struct svc_fh *current_fh = NULL; 822 struct nfsd4_operation *opdesc;
741 struct svc_fh *save_fh = NULL; 823 struct nfsd4_compound_state *cstate = NULL;
742 struct nfs4_stateowner *replay_owner = NULL; 824 int slack_bytes;
743 int slack_space; /* in words, not bytes! */
744 __be32 status; 825 __be32 status;
745 826
746 status = nfserr_resource; 827 status = nfserr_resource;
747 current_fh = kmalloc(sizeof(*current_fh), GFP_KERNEL); 828 cstate = cstate_alloc();
748 if (current_fh == NULL) 829 if (cstate == NULL)
749 goto out;
750 fh_init(current_fh, NFS4_FHSIZE);
751 save_fh = kmalloc(sizeof(*save_fh), GFP_KERNEL);
752 if (save_fh == NULL)
753 goto out; 830 goto out;
754 fh_init(save_fh, NFS4_FHSIZE);
755 831
756 resp->xbuf = &rqstp->rq_res; 832 resp->xbuf = &rqstp->rq_res;
757 resp->p = rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len; 833 resp->p = rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len;
@@ -790,164 +866,44 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
790 * failed response to the next operation. If we don't 866 * failed response to the next operation. If we don't
791 * have enough room, fail with ERR_RESOURCE. 867 * have enough room, fail with ERR_RESOURCE.
792 */ 868 */
793/* FIXME - is slack_space *really* words, or bytes??? - neilb */ 869 slack_bytes = (char *)resp->end - (char *)resp->p;
794 slack_space = (char *)resp->end - (char *)resp->p; 870 if (slack_bytes < COMPOUND_SLACK_SPACE
795 if (slack_space < COMPOUND_SLACK_SPACE + COMPOUND_ERR_SLACK_SPACE) { 871 + COMPOUND_ERR_SLACK_SPACE) {
796 BUG_ON(slack_space < COMPOUND_ERR_SLACK_SPACE); 872 BUG_ON(slack_bytes < COMPOUND_ERR_SLACK_SPACE);
797 op->status = nfserr_resource; 873 op->status = nfserr_resource;
798 goto encode_op; 874 goto encode_op;
799 } 875 }
800 876
801 /* All operations except RENEW, SETCLIENTID, RESTOREFH 877 opdesc = &nfsd4_ops[op->opnum];
802 * SETCLIENTID_CONFIRM, PUTFH and PUTROOTFH 878
803 * require a valid current filehandle 879 if (!cstate->current_fh.fh_dentry) {
804 */ 880 if (!(opdesc->op_flags & ALLOWED_WITHOUT_FH)) {
805 if (!current_fh->fh_dentry) {
806 if (!((op->opnum == OP_PUTFH) ||
807 (op->opnum == OP_PUTROOTFH) ||
808 (op->opnum == OP_SETCLIENTID) ||
809 (op->opnum == OP_SETCLIENTID_CONFIRM) ||
810 (op->opnum == OP_RENEW) ||
811 (op->opnum == OP_RESTOREFH) ||
812 (op->opnum == OP_RELEASE_LOCKOWNER))) {
813 op->status = nfserr_nofilehandle; 881 op->status = nfserr_nofilehandle;
814 goto encode_op; 882 goto encode_op;
815 } 883 }
816 } 884 } else if (cstate->current_fh.fh_export->ex_fslocs.migrated &&
817 /* Check must be done at start of each operation, except 885 !(opdesc->op_flags & ALLOWED_ON_ABSENT_FS)) {
818 * for GETATTR and ops not listed as returning NFS4ERR_MOVED
819 */
820 else if (current_fh->fh_export->ex_fslocs.migrated &&
821 !((op->opnum == OP_GETATTR) ||
822 (op->opnum == OP_PUTROOTFH) ||
823 (op->opnum == OP_PUTPUBFH) ||
824 (op->opnum == OP_RENEW) ||
825 (op->opnum == OP_SETCLIENTID) ||
826 (op->opnum == OP_RELEASE_LOCKOWNER))) {
827 op->status = nfserr_moved; 886 op->status = nfserr_moved;
828 goto encode_op; 887 goto encode_op;
829 } 888 }
830 switch (op->opnum) { 889
831 case OP_ACCESS: 890 if (opdesc->op_func)
832 op->status = nfsd4_access(rqstp, current_fh, &op->u.access); 891 op->status = opdesc->op_func(rqstp, cstate, &op->u);
833 break; 892 else
834 case OP_CLOSE:
835 op->status = nfsd4_close(rqstp, current_fh, &op->u.close, &replay_owner);
836 break;
837 case OP_COMMIT:
838 op->status = nfsd4_commit(rqstp, current_fh, &op->u.commit);
839 break;
840 case OP_CREATE:
841 op->status = nfsd4_create(rqstp, current_fh, &op->u.create);
842 break;
843 case OP_DELEGRETURN:
844 op->status = nfsd4_delegreturn(rqstp, current_fh, &op->u.delegreturn);
845 break;
846 case OP_GETATTR:
847 op->status = nfsd4_getattr(rqstp, current_fh, &op->u.getattr);
848 break;
849 case OP_GETFH:
850 op->status = nfsd4_getfh(current_fh, &op->u.getfh);
851 break;
852 case OP_LINK:
853 op->status = nfsd4_link(rqstp, current_fh, save_fh, &op->u.link);
854 break;
855 case OP_LOCK:
856 op->status = nfsd4_lock(rqstp, current_fh, &op->u.lock, &replay_owner);
857 break;
858 case OP_LOCKT:
859 op->status = nfsd4_lockt(rqstp, current_fh, &op->u.lockt);
860 break;
861 case OP_LOCKU:
862 op->status = nfsd4_locku(rqstp, current_fh, &op->u.locku, &replay_owner);
863 break;
864 case OP_LOOKUP:
865 op->status = nfsd4_lookup(rqstp, current_fh, &op->u.lookup);
866 break;
867 case OP_LOOKUPP:
868 op->status = nfsd4_lookupp(rqstp, current_fh);
869 break;
870 case OP_NVERIFY:
871 op->status = nfsd4_verify(rqstp, current_fh, &op->u.nverify);
872 if (op->status == nfserr_not_same)
873 op->status = nfs_ok;
874 break;
875 case OP_OPEN:
876 op->status = nfsd4_open(rqstp, current_fh, &op->u.open, &replay_owner);
877 break;
878 case OP_OPEN_CONFIRM:
879 op->status = nfsd4_open_confirm(rqstp, current_fh, &op->u.open_confirm, &replay_owner);
880 break;
881 case OP_OPEN_DOWNGRADE:
882 op->status = nfsd4_open_downgrade(rqstp, current_fh, &op->u.open_downgrade, &replay_owner);
883 break;
884 case OP_PUTFH:
885 op->status = nfsd4_putfh(rqstp, current_fh, &op->u.putfh);
886 break;
887 case OP_PUTROOTFH:
888 op->status = nfsd4_putrootfh(rqstp, current_fh);
889 break;
890 case OP_READ:
891 op->status = nfsd4_read(rqstp, current_fh, &op->u.read);
892 break;
893 case OP_READDIR:
894 op->status = nfsd4_readdir(rqstp, current_fh, &op->u.readdir);
895 break;
896 case OP_READLINK:
897 op->status = nfsd4_readlink(rqstp, current_fh, &op->u.readlink);
898 break;
899 case OP_REMOVE:
900 op->status = nfsd4_remove(rqstp, current_fh, &op->u.remove);
901 break;
902 case OP_RENAME:
903 op->status = nfsd4_rename(rqstp, current_fh, save_fh, &op->u.rename);
904 break;
905 case OP_RENEW:
906 op->status = nfsd4_renew(&op->u.renew);
907 break;
908 case OP_RESTOREFH:
909 op->status = nfsd4_restorefh(current_fh, save_fh);
910 break;
911 case OP_SAVEFH:
912 op->status = nfsd4_savefh(current_fh, save_fh);
913 break;
914 case OP_SETATTR:
915 op->status = nfsd4_setattr(rqstp, current_fh, &op->u.setattr);
916 break;
917 case OP_SETCLIENTID:
918 op->status = nfsd4_setclientid(rqstp, &op->u.setclientid);
919 break;
920 case OP_SETCLIENTID_CONFIRM:
921 op->status = nfsd4_setclientid_confirm(rqstp, &op->u.setclientid_confirm);
922 break;
923 case OP_VERIFY:
924 op->status = nfsd4_verify(rqstp, current_fh, &op->u.verify);
925 if (op->status == nfserr_same)
926 op->status = nfs_ok;
927 break;
928 case OP_WRITE:
929 op->status = nfsd4_write(rqstp, current_fh, &op->u.write);
930 break;
931 case OP_RELEASE_LOCKOWNER:
932 op->status = nfsd4_release_lockowner(rqstp, &op->u.release_lockowner);
933 break;
934 default:
935 BUG_ON(op->status == nfs_ok); 893 BUG_ON(op->status == nfs_ok);
936 break;
937 }
938 894
939encode_op: 895encode_op:
940 if (op->status == nfserr_replay_me) { 896 if (op->status == nfserr_replay_me) {
941 op->replay = &replay_owner->so_replay; 897 op->replay = &cstate->replay_owner->so_replay;
942 nfsd4_encode_replay(resp, op); 898 nfsd4_encode_replay(resp, op);
943 status = op->status = op->replay->rp_status; 899 status = op->status = op->replay->rp_status;
944 } else { 900 } else {
945 nfsd4_encode_operation(resp, op); 901 nfsd4_encode_operation(resp, op);
946 status = op->status; 902 status = op->status;
947 } 903 }
948 if (replay_owner && (replay_owner != (void *)(-1))) { 904 if (cstate->replay_owner) {
949 nfs4_put_stateowner(replay_owner); 905 nfs4_put_stateowner(cstate->replay_owner);
950 replay_owner = NULL; 906 cstate->replay_owner = NULL;
951 } 907 }
952 /* XXX Ugh, we need to get rid of this kind of special case: */ 908 /* XXX Ugh, we need to get rid of this kind of special case: */
953 if (op->opnum == OP_READ && op->u.read.rd_filp) 909 if (op->opnum == OP_READ && op->u.read.rd_filp)
@@ -958,15 +914,124 @@ encode_op:
958 914
959out: 915out:
960 nfsd4_release_compoundargs(args); 916 nfsd4_release_compoundargs(args);
961 if (current_fh) 917 cstate_free(cstate);
962 fh_put(current_fh);
963 kfree(current_fh);
964 if (save_fh)
965 fh_put(save_fh);
966 kfree(save_fh);
967 return status; 918 return status;
968} 919}
969 920
921static struct nfsd4_operation nfsd4_ops[OP_RELEASE_LOCKOWNER+1] = {
922 [OP_ACCESS] = {
923 .op_func = (nfsd4op_func)nfsd4_access,
924 },
925 [OP_CLOSE] = {
926 .op_func = (nfsd4op_func)nfsd4_close,
927 },
928 [OP_COMMIT] = {
929 .op_func = (nfsd4op_func)nfsd4_commit,
930 },
931 [OP_CREATE] = {
932 .op_func = (nfsd4op_func)nfsd4_create,
933 },
934 [OP_DELEGRETURN] = {
935 .op_func = (nfsd4op_func)nfsd4_delegreturn,
936 },
937 [OP_GETATTR] = {
938 .op_func = (nfsd4op_func)nfsd4_getattr,
939 .op_flags = ALLOWED_ON_ABSENT_FS,
940 },
941 [OP_GETFH] = {
942 .op_func = (nfsd4op_func)nfsd4_getfh,
943 },
944 [OP_LINK] = {
945 .op_func = (nfsd4op_func)nfsd4_link,
946 },
947 [OP_LOCK] = {
948 .op_func = (nfsd4op_func)nfsd4_lock,
949 },
950 [OP_LOCKT] = {
951 .op_func = (nfsd4op_func)nfsd4_lockt,
952 },
953 [OP_LOCKU] = {
954 .op_func = (nfsd4op_func)nfsd4_locku,
955 },
956 [OP_LOOKUP] = {
957 .op_func = (nfsd4op_func)nfsd4_lookup,
958 },
959 [OP_LOOKUPP] = {
960 .op_func = (nfsd4op_func)nfsd4_lookupp,
961 },
962 [OP_NVERIFY] = {
963 .op_func = (nfsd4op_func)nfsd4_nverify,
964 },
965 [OP_OPEN] = {
966 .op_func = (nfsd4op_func)nfsd4_open,
967 },
968 [OP_OPEN_CONFIRM] = {
969 .op_func = (nfsd4op_func)nfsd4_open_confirm,
970 },
971 [OP_OPEN_DOWNGRADE] = {
972 .op_func = (nfsd4op_func)nfsd4_open_downgrade,
973 },
974 [OP_PUTFH] = {
975 .op_func = (nfsd4op_func)nfsd4_putfh,
976 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
977 },
978 [OP_PUTPUBFH] = {
979 /* unsupported; just for future reference: */
980 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
981 },
982 [OP_PUTROOTFH] = {
983 .op_func = (nfsd4op_func)nfsd4_putrootfh,
984 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
985 },
986 [OP_READ] = {
987 .op_func = (nfsd4op_func)nfsd4_read,
988 },
989 [OP_READDIR] = {
990 .op_func = (nfsd4op_func)nfsd4_readdir,
991 },
992 [OP_READLINK] = {
993 .op_func = (nfsd4op_func)nfsd4_readlink,
994 },
995 [OP_REMOVE] = {
996 .op_func = (nfsd4op_func)nfsd4_remove,
997 },
998 [OP_RENAME] = {
999 .op_func = (nfsd4op_func)nfsd4_rename,
1000 },
1001 [OP_RENEW] = {
1002 .op_func = (nfsd4op_func)nfsd4_renew,
1003 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
1004 },
1005 [OP_RESTOREFH] = {
1006 .op_func = (nfsd4op_func)nfsd4_restorefh,
1007 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
1008 },
1009 [OP_SAVEFH] = {
1010 .op_func = (nfsd4op_func)nfsd4_savefh,
1011 },
1012 [OP_SETATTR] = {
1013 .op_func = (nfsd4op_func)nfsd4_setattr,
1014 },
1015 [OP_SETCLIENTID] = {
1016 .op_func = (nfsd4op_func)nfsd4_setclientid,
1017 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
1018 },
1019 [OP_SETCLIENTID_CONFIRM] = {
1020 .op_func = (nfsd4op_func)nfsd4_setclientid_confirm,
1021 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
1022 },
1023 [OP_VERIFY] = {
1024 .op_func = (nfsd4op_func)nfsd4_verify,
1025 },
1026 [OP_WRITE] = {
1027 .op_func = (nfsd4op_func)nfsd4_write,
1028 },
1029 [OP_RELEASE_LOCKOWNER] = {
1030 .op_func = (nfsd4op_func)nfsd4_release_lockowner,
1031 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
1032 },
1033};
1034
970#define nfs4svc_decode_voidargs NULL 1035#define nfs4svc_decode_voidargs NULL
971#define nfs4svc_release_void NULL 1036#define nfs4svc_release_void NULL
972#define nfsd4_voidres nfsd4_voidargs 1037#define nfsd4_voidres nfsd4_voidargs
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index b7179bd45a1e..9de89df961f4 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -711,7 +711,8 @@ out_err:
711 * 711 *
712 */ 712 */
713__be32 713__be32
714nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_setclientid *setclid) 714nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
715 struct nfsd4_setclientid *setclid)
715{ 716{
716 __be32 ip_addr = rqstp->rq_addr.sin_addr.s_addr; 717 __be32 ip_addr = rqstp->rq_addr.sin_addr.s_addr;
717 struct xdr_netobj clname = { 718 struct xdr_netobj clname = {
@@ -876,7 +877,9 @@ out:
876 * NOTE: callback information will be processed here in a future patch 877 * NOTE: callback information will be processed here in a future patch
877 */ 878 */
878__be32 879__be32
879nfsd4_setclientid_confirm(struct svc_rqst *rqstp, struct nfsd4_setclientid_confirm *setclientid_confirm) 880nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
881 struct nfsd4_compound_state *cstate,
882 struct nfsd4_setclientid_confirm *setclientid_confirm)
880{ 883{
881 __be32 ip_addr = rqstp->rq_addr.sin_addr.s_addr; 884 __be32 ip_addr = rqstp->rq_addr.sin_addr.s_addr;
882 struct nfs4_client *conf, *unconf; 885 struct nfs4_client *conf, *unconf;
@@ -1833,7 +1836,8 @@ static void laundromat_main(struct work_struct *);
1833static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main); 1836static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
1834 1837
1835__be32 1838__be32
1836nfsd4_renew(clientid_t *clid) 1839nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1840 clientid_t *clid)
1837{ 1841{
1838 struct nfs4_client *clp; 1842 struct nfs4_client *clp;
1839 __be32 status; 1843 __be32 status;
@@ -2241,24 +2245,25 @@ check_replay:
2241} 2245}
2242 2246
2243__be32 2247__be32
2244nfsd4_open_confirm(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open_confirm *oc, struct nfs4_stateowner **replay_owner) 2248nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2249 struct nfsd4_open_confirm *oc)
2245{ 2250{
2246 __be32 status; 2251 __be32 status;
2247 struct nfs4_stateowner *sop; 2252 struct nfs4_stateowner *sop;
2248 struct nfs4_stateid *stp; 2253 struct nfs4_stateid *stp;
2249 2254
2250 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n", 2255 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
2251 (int)current_fh->fh_dentry->d_name.len, 2256 (int)cstate->current_fh.fh_dentry->d_name.len,
2252 current_fh->fh_dentry->d_name.name); 2257 cstate->current_fh.fh_dentry->d_name.name);
2253 2258
2254 status = fh_verify(rqstp, current_fh, S_IFREG, 0); 2259 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
2255 if (status) 2260 if (status)
2256 return status; 2261 return status;
2257 2262
2258 nfs4_lock_state(); 2263 nfs4_lock_state();
2259 2264
2260 if ((status = nfs4_preprocess_seqid_op(current_fh, oc->oc_seqid, 2265 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2261 &oc->oc_req_stateid, 2266 oc->oc_seqid, &oc->oc_req_stateid,
2262 CHECK_FH | CONFIRM | OPEN_STATE, 2267 CHECK_FH | CONFIRM | OPEN_STATE,
2263 &oc->oc_stateowner, &stp, NULL))) 2268 &oc->oc_stateowner, &stp, NULL)))
2264 goto out; 2269 goto out;
@@ -2278,7 +2283,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs
2278out: 2283out:
2279 if (oc->oc_stateowner) { 2284 if (oc->oc_stateowner) {
2280 nfs4_get_stateowner(oc->oc_stateowner); 2285 nfs4_get_stateowner(oc->oc_stateowner);
2281 *replay_owner = oc->oc_stateowner; 2286 cstate->replay_owner = oc->oc_stateowner;
2282 } 2287 }
2283 nfs4_unlock_state(); 2288 nfs4_unlock_state();
2284 return status; 2289 return status;
@@ -2310,22 +2315,25 @@ reset_union_bmap_deny(unsigned long deny, unsigned long *bmap)
2310} 2315}
2311 2316
2312__be32 2317__be32
2313nfsd4_open_downgrade(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open_downgrade *od, struct nfs4_stateowner **replay_owner) 2318nfsd4_open_downgrade(struct svc_rqst *rqstp,
2319 struct nfsd4_compound_state *cstate,
2320 struct nfsd4_open_downgrade *od)
2314{ 2321{
2315 __be32 status; 2322 __be32 status;
2316 struct nfs4_stateid *stp; 2323 struct nfs4_stateid *stp;
2317 unsigned int share_access; 2324 unsigned int share_access;
2318 2325
2319 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n", 2326 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n",
2320 (int)current_fh->fh_dentry->d_name.len, 2327 (int)cstate->current_fh.fh_dentry->d_name.len,
2321 current_fh->fh_dentry->d_name.name); 2328 cstate->current_fh.fh_dentry->d_name.name);
2322 2329
2323 if (!access_valid(od->od_share_access) 2330 if (!access_valid(od->od_share_access)
2324 || !deny_valid(od->od_share_deny)) 2331 || !deny_valid(od->od_share_deny))
2325 return nfserr_inval; 2332 return nfserr_inval;
2326 2333
2327 nfs4_lock_state(); 2334 nfs4_lock_state();
2328 if ((status = nfs4_preprocess_seqid_op(current_fh, od->od_seqid, 2335 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2336 od->od_seqid,
2329 &od->od_stateid, 2337 &od->od_stateid,
2330 CHECK_FH | OPEN_STATE, 2338 CHECK_FH | OPEN_STATE,
2331 &od->od_stateowner, &stp, NULL))) 2339 &od->od_stateowner, &stp, NULL)))
@@ -2355,7 +2363,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct n
2355out: 2363out:
2356 if (od->od_stateowner) { 2364 if (od->od_stateowner) {
2357 nfs4_get_stateowner(od->od_stateowner); 2365 nfs4_get_stateowner(od->od_stateowner);
2358 *replay_owner = od->od_stateowner; 2366 cstate->replay_owner = od->od_stateowner;
2359 } 2367 }
2360 nfs4_unlock_state(); 2368 nfs4_unlock_state();
2361 return status; 2369 return status;
@@ -2365,18 +2373,20 @@ out:
2365 * nfs4_unlock_state() called after encode 2373 * nfs4_unlock_state() called after encode
2366 */ 2374 */
2367__be32 2375__be32
2368nfsd4_close(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_close *close, struct nfs4_stateowner **replay_owner) 2376nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2377 struct nfsd4_close *close)
2369{ 2378{
2370 __be32 status; 2379 __be32 status;
2371 struct nfs4_stateid *stp; 2380 struct nfs4_stateid *stp;
2372 2381
2373 dprintk("NFSD: nfsd4_close on file %.*s\n", 2382 dprintk("NFSD: nfsd4_close on file %.*s\n",
2374 (int)current_fh->fh_dentry->d_name.len, 2383 (int)cstate->current_fh.fh_dentry->d_name.len,
2375 current_fh->fh_dentry->d_name.name); 2384 cstate->current_fh.fh_dentry->d_name.name);
2376 2385
2377 nfs4_lock_state(); 2386 nfs4_lock_state();
2378 /* check close_lru for replay */ 2387 /* check close_lru for replay */
2379 if ((status = nfs4_preprocess_seqid_op(current_fh, close->cl_seqid, 2388 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2389 close->cl_seqid,
2380 &close->cl_stateid, 2390 &close->cl_stateid,
2381 CHECK_FH | OPEN_STATE | CLOSE_STATE, 2391 CHECK_FH | OPEN_STATE | CLOSE_STATE,
2382 &close->cl_stateowner, &stp, NULL))) 2392 &close->cl_stateowner, &stp, NULL)))
@@ -2397,22 +2407,24 @@ nfsd4_close(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_clos
2397out: 2407out:
2398 if (close->cl_stateowner) { 2408 if (close->cl_stateowner) {
2399 nfs4_get_stateowner(close->cl_stateowner); 2409 nfs4_get_stateowner(close->cl_stateowner);
2400 *replay_owner = close->cl_stateowner; 2410 cstate->replay_owner = close->cl_stateowner;
2401 } 2411 }
2402 nfs4_unlock_state(); 2412 nfs4_unlock_state();
2403 return status; 2413 return status;
2404} 2414}
2405 2415
2406__be32 2416__be32
2407nfsd4_delegreturn(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_delegreturn *dr) 2417nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2418 struct nfsd4_delegreturn *dr)
2408{ 2419{
2409 __be32 status; 2420 __be32 status;
2410 2421
2411 if ((status = fh_verify(rqstp, current_fh, S_IFREG, 0))) 2422 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
2412 goto out; 2423 goto out;
2413 2424
2414 nfs4_lock_state(); 2425 nfs4_lock_state();
2415 status = nfs4_preprocess_stateid_op(current_fh, &dr->dr_stateid, DELEG_RET, NULL); 2426 status = nfs4_preprocess_stateid_op(&cstate->current_fh,
2427 &dr->dr_stateid, DELEG_RET, NULL);
2416 nfs4_unlock_state(); 2428 nfs4_unlock_state();
2417out: 2429out:
2418 return status; 2430 return status;
@@ -2635,7 +2647,8 @@ check_lock_length(u64 offset, u64 length)
2635 * LOCK operation 2647 * LOCK operation
2636 */ 2648 */
2637__be32 2649__be32
2638nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock *lock, struct nfs4_stateowner **replay_owner) 2650nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2651 struct nfsd4_lock *lock)
2639{ 2652{
2640 struct nfs4_stateowner *open_sop = NULL; 2653 struct nfs4_stateowner *open_sop = NULL;
2641 struct nfs4_stateowner *lock_sop = NULL; 2654 struct nfs4_stateowner *lock_sop = NULL;
@@ -2654,7 +2667,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2654 if (check_lock_length(lock->lk_offset, lock->lk_length)) 2667 if (check_lock_length(lock->lk_offset, lock->lk_length))
2655 return nfserr_inval; 2668 return nfserr_inval;
2656 2669
2657 if ((status = fh_verify(rqstp, current_fh, S_IFREG, MAY_LOCK))) { 2670 if ((status = fh_verify(rqstp, &cstate->current_fh,
2671 S_IFREG, MAY_LOCK))) {
2658 dprintk("NFSD: nfsd4_lock: permission denied!\n"); 2672 dprintk("NFSD: nfsd4_lock: permission denied!\n");
2659 return status; 2673 return status;
2660 } 2674 }
@@ -2675,7 +2689,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2675 goto out; 2689 goto out;
2676 2690
2677 /* validate and update open stateid and open seqid */ 2691 /* validate and update open stateid and open seqid */
2678 status = nfs4_preprocess_seqid_op(current_fh, 2692 status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2679 lock->lk_new_open_seqid, 2693 lock->lk_new_open_seqid,
2680 &lock->lk_new_open_stateid, 2694 &lock->lk_new_open_stateid,
2681 CHECK_FH | OPEN_STATE, 2695 CHECK_FH | OPEN_STATE,
@@ -2702,7 +2716,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2702 goto out; 2716 goto out;
2703 } else { 2717 } else {
2704 /* lock (lock owner + lock stateid) already exists */ 2718 /* lock (lock owner + lock stateid) already exists */
2705 status = nfs4_preprocess_seqid_op(current_fh, 2719 status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2706 lock->lk_old_lock_seqid, 2720 lock->lk_old_lock_seqid,
2707 &lock->lk_old_lock_stateid, 2721 &lock->lk_old_lock_stateid,
2708 CHECK_FH | LOCK_STATE, 2722 CHECK_FH | LOCK_STATE,
@@ -2759,7 +2773,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2759 conflock.fl_ops = NULL; 2773 conflock.fl_ops = NULL;
2760 conflock.fl_lmops = NULL; 2774 conflock.fl_lmops = NULL;
2761 err = posix_lock_file_conf(filp, &file_lock, &conflock); 2775 err = posix_lock_file_conf(filp, &file_lock, &conflock);
2762 dprintk("NFSD: nfsd4_lock: posix_lock_file_conf status %d\n",status);
2763 switch (-err) { 2776 switch (-err) {
2764 case 0: /* success! */ 2777 case 0: /* success! */
2765 update_stateid(&lock_stp->st_stateid); 2778 update_stateid(&lock_stp->st_stateid);
@@ -2785,7 +2798,7 @@ out:
2785 release_stateowner(lock_sop); 2798 release_stateowner(lock_sop);
2786 if (lock->lk_replay_owner) { 2799 if (lock->lk_replay_owner) {
2787 nfs4_get_stateowner(lock->lk_replay_owner); 2800 nfs4_get_stateowner(lock->lk_replay_owner);
2788 *replay_owner = lock->lk_replay_owner; 2801 cstate->replay_owner = lock->lk_replay_owner;
2789 } 2802 }
2790 nfs4_unlock_state(); 2803 nfs4_unlock_state();
2791 return status; 2804 return status;
@@ -2795,7 +2808,8 @@ out:
2795 * LOCKT operation 2808 * LOCKT operation
2796 */ 2809 */
2797__be32 2810__be32
2798nfsd4_lockt(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lockt *lockt) 2811nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2812 struct nfsd4_lockt *lockt)
2799{ 2813{
2800 struct inode *inode; 2814 struct inode *inode;
2801 struct file file; 2815 struct file file;
@@ -2816,14 +2830,14 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2816 if (STALE_CLIENTID(&lockt->lt_clientid)) 2830 if (STALE_CLIENTID(&lockt->lt_clientid))
2817 goto out; 2831 goto out;
2818 2832
2819 if ((status = fh_verify(rqstp, current_fh, S_IFREG, 0))) { 2833 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) {
2820 dprintk("NFSD: nfsd4_lockt: fh_verify() failed!\n"); 2834 dprintk("NFSD: nfsd4_lockt: fh_verify() failed!\n");
2821 if (status == nfserr_symlink) 2835 if (status == nfserr_symlink)
2822 status = nfserr_inval; 2836 status = nfserr_inval;
2823 goto out; 2837 goto out;
2824 } 2838 }
2825 2839
2826 inode = current_fh->fh_dentry->d_inode; 2840 inode = cstate->current_fh.fh_dentry->d_inode;
2827 locks_init_lock(&file_lock); 2841 locks_init_lock(&file_lock);
2828 switch (lockt->lt_type) { 2842 switch (lockt->lt_type) {
2829 case NFS4_READ_LT: 2843 case NFS4_READ_LT:
@@ -2862,7 +2876,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2862 * only the dentry:inode set. 2876 * only the dentry:inode set.
2863 */ 2877 */
2864 memset(&file, 0, sizeof (struct file)); 2878 memset(&file, 0, sizeof (struct file));
2865 file.f_path.dentry = current_fh->fh_dentry; 2879 file.f_path.dentry = cstate->current_fh.fh_dentry;
2866 2880
2867 status = nfs_ok; 2881 status = nfs_ok;
2868 if (posix_test_lock(&file, &file_lock, &conflock)) { 2882 if (posix_test_lock(&file, &file_lock, &conflock)) {
@@ -2875,7 +2889,8 @@ out:
2875} 2889}
2876 2890
2877__be32 2891__be32
2878nfsd4_locku(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_locku *locku, struct nfs4_stateowner **replay_owner) 2892nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2893 struct nfsd4_locku *locku)
2879{ 2894{
2880 struct nfs4_stateid *stp; 2895 struct nfs4_stateid *stp;
2881 struct file *filp = NULL; 2896 struct file *filp = NULL;
@@ -2892,7 +2907,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2892 2907
2893 nfs4_lock_state(); 2908 nfs4_lock_state();
2894 2909
2895 if ((status = nfs4_preprocess_seqid_op(current_fh, 2910 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2896 locku->lu_seqid, 2911 locku->lu_seqid,
2897 &locku->lu_stateid, 2912 &locku->lu_stateid,
2898 CHECK_FH | LOCK_STATE, 2913 CHECK_FH | LOCK_STATE,
@@ -2933,7 +2948,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
2933out: 2948out:
2934 if (locku->lu_stateowner) { 2949 if (locku->lu_stateowner) {
2935 nfs4_get_stateowner(locku->lu_stateowner); 2950 nfs4_get_stateowner(locku->lu_stateowner);
2936 *replay_owner = locku->lu_stateowner; 2951 cstate->replay_owner = locku->lu_stateowner;
2937 } 2952 }
2938 nfs4_unlock_state(); 2953 nfs4_unlock_state();
2939 return status; 2954 return status;
@@ -2968,7 +2983,9 @@ out:
2968} 2983}
2969 2984
2970__be32 2985__be32
2971nfsd4_release_lockowner(struct svc_rqst *rqstp, struct nfsd4_release_lockowner *rlockowner) 2986nfsd4_release_lockowner(struct svc_rqst *rqstp,
2987 struct nfsd4_compound_state *cstate,
2988 struct nfsd4_release_lockowner *rlockowner)
2972{ 2989{
2973 clientid_t *clid = &rlockowner->rl_clientid; 2990 clientid_t *clid = &rlockowner->rl_clientid;
2974 struct nfs4_stateowner *sop; 2991 struct nfs4_stateowner *sop;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index f3f239db04bb..fea46368afb2 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1845,15 +1845,11 @@ nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd,
1845 1845
1846 exp_get(exp); 1846 exp_get(exp);
1847 if (d_mountpoint(dentry)) { 1847 if (d_mountpoint(dentry)) {
1848 if (nfsd_cross_mnt(cd->rd_rqstp, &dentry, &exp)) { 1848 int err;
1849 /* 1849
1850 * -EAGAIN is the only error returned from 1850 err = nfsd_cross_mnt(cd->rd_rqstp, &dentry, &exp);
1851 * nfsd_cross_mnt() and it indicates that an 1851 if (err) {
1852 * up-call has been initiated to fill in the export 1852 nfserr = nfserrno(err);
1853 * options on exp. When the answer comes back,
1854 * this call will be retried.
1855 */
1856 nfserr = nfserr_dropit;
1857 goto out_put; 1853 goto out_put;
1858 } 1854 }
1859 1855
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 727ab3bd450d..b06bf9f70efc 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -169,9 +169,11 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
169 exp = exp_find(rqstp->rq_client, 0, tfh, &rqstp->rq_chandle); 169 exp = exp_find(rqstp->rq_client, 0, tfh, &rqstp->rq_chandle);
170 } 170 }
171 171
172 error = nfserr_dropit; 172 if (IS_ERR(exp) && (PTR_ERR(exp) == -EAGAIN
173 if (IS_ERR(exp) && PTR_ERR(exp) == -EAGAIN) 173 || PTR_ERR(exp) == -ETIMEDOUT)) {
174 error = nfserrno(PTR_ERR(exp));
174 goto out; 175 goto out;
176 }
175 177
176 error = nfserr_stale; 178 error = nfserr_stale;
177 if (!exp || IS_ERR(exp)) 179 if (!exp || IS_ERR(exp))
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 4883d7586229..7a79c23aa6d4 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -99,7 +99,7 @@ static struct raparm_hbucket raparm_hash[RAPARM_HASH_SIZE];
99/* 99/*
100 * Called from nfsd_lookup and encode_dirent. Check if we have crossed 100 * Called from nfsd_lookup and encode_dirent. Check if we have crossed
101 * a mount point. 101 * a mount point.
102 * Returns -EAGAIN leaving *dpp and *expp unchanged, 102 * Returns -EAGAIN or -ETIMEDOUT leaving *dpp and *expp unchanged,
103 * or nfs_ok having possibly changed *dpp and *expp 103 * or nfs_ok having possibly changed *dpp and *expp
104 */ 104 */
105int 105int
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index edc91ca3792a..f27e5378caf2 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1959,7 +1959,7 @@ int ocfs2_prepare_truncate(struct ocfs2_super *osb,
1959 goto bail; 1959 goto bail;
1960 } 1960 }
1961 1961
1962 *tc = kcalloc(1, sizeof(struct ocfs2_truncate_context), GFP_KERNEL); 1962 *tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL);
1963 if (!(*tc)) { 1963 if (!(*tc)) {
1964 status = -ENOMEM; 1964 status = -ENOMEM;
1965 mlog_errno(status); 1965 mlog_errno(status);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 4cd9a9580456..a25ef5a50386 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1553,7 +1553,7 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
1553 struct o2hb_region *reg = NULL; 1553 struct o2hb_region *reg = NULL;
1554 struct config_item *ret = NULL; 1554 struct config_item *ret = NULL;
1555 1555
1556 reg = kcalloc(1, sizeof(struct o2hb_region), GFP_KERNEL); 1556 reg = kzalloc(sizeof(struct o2hb_region), GFP_KERNEL);
1557 if (reg == NULL) 1557 if (reg == NULL)
1558 goto out; /* ENOMEM */ 1558 goto out; /* ENOMEM */
1559 1559
@@ -1679,7 +1679,7 @@ struct config_group *o2hb_alloc_hb_set(void)
1679 struct o2hb_heartbeat_group *hs = NULL; 1679 struct o2hb_heartbeat_group *hs = NULL;
1680 struct config_group *ret = NULL; 1680 struct config_group *ret = NULL;
1681 1681
1682 hs = kcalloc(1, sizeof(struct o2hb_heartbeat_group), GFP_KERNEL); 1682 hs = kzalloc(sizeof(struct o2hb_heartbeat_group), GFP_KERNEL);
1683 if (hs == NULL) 1683 if (hs == NULL)
1684 goto out; 1684 goto out;
1685 1685
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index d11753c50bc1..b17333a0606b 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -35,7 +35,7 @@
35/* for now we operate under the assertion that there can be only one 35/* for now we operate under the assertion that there can be only one
36 * cluster active at a time. Changing this will require trickling 36 * cluster active at a time. Changing this will require trickling
37 * cluster references throughout where nodes are looked up */ 37 * cluster references throughout where nodes are looked up */
38static struct o2nm_cluster *o2nm_single_cluster = NULL; 38struct o2nm_cluster *o2nm_single_cluster = NULL;
39 39
40#define OCFS2_MAX_HB_CTL_PATH 256 40#define OCFS2_MAX_HB_CTL_PATH 256
41static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl"; 41static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
@@ -97,17 +97,6 @@ const char *o2nm_get_hb_ctl_path(void)
97} 97}
98EXPORT_SYMBOL_GPL(o2nm_get_hb_ctl_path); 98EXPORT_SYMBOL_GPL(o2nm_get_hb_ctl_path);
99 99
100struct o2nm_cluster {
101 struct config_group cl_group;
102 unsigned cl_has_local:1;
103 u8 cl_local_node;
104 rwlock_t cl_nodes_lock;
105 struct o2nm_node *cl_nodes[O2NM_MAX_NODES];
106 struct rb_root cl_node_ip_tree;
107 /* this bitmap is part of a hack for disk bitmap.. will go eventually. - zab */
108 unsigned long cl_nodes_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
109};
110
111struct o2nm_node *o2nm_get_node_by_num(u8 node_num) 100struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
112{ 101{
113 struct o2nm_node *node = NULL; 102 struct o2nm_node *node = NULL;
@@ -543,6 +532,179 @@ static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group)
543} 532}
544#endif 533#endif
545 534
535struct o2nm_cluster_attribute {
536 struct configfs_attribute attr;
537 ssize_t (*show)(struct o2nm_cluster *, char *);
538 ssize_t (*store)(struct o2nm_cluster *, const char *, size_t);
539};
540
541static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count,
542 unsigned int *val)
543{
544 unsigned long tmp;
545 char *p = (char *)page;
546
547 tmp = simple_strtoul(p, &p, 0);
548 if (!p || (*p && (*p != '\n')))
549 return -EINVAL;
550
551 if (tmp == 0)
552 return -EINVAL;
553 if (tmp >= (u32)-1)
554 return -ERANGE;
555
556 *val = tmp;
557
558 return count;
559}
560
561static ssize_t o2nm_cluster_attr_idle_timeout_ms_read(
562 struct o2nm_cluster *cluster, char *page)
563{
564 return sprintf(page, "%u\n", cluster->cl_idle_timeout_ms);
565}
566
567static ssize_t o2nm_cluster_attr_idle_timeout_ms_write(
568 struct o2nm_cluster *cluster, const char *page, size_t count)
569{
570 ssize_t ret;
571 unsigned int val;
572
573 ret = o2nm_cluster_attr_write(page, count, &val);
574
575 if (ret > 0) {
576 if (cluster->cl_idle_timeout_ms != val
577 && o2net_num_connected_peers()) {
578 mlog(ML_NOTICE,
579 "o2net: cannot change idle timeout after "
580 "the first peer has agreed to it."
581 " %d connected peers\n",
582 o2net_num_connected_peers());
583 ret = -EINVAL;
584 } else if (val <= cluster->cl_keepalive_delay_ms) {
585 mlog(ML_NOTICE, "o2net: idle timeout must be larger "
586 "than keepalive delay\n");
587 ret = -EINVAL;
588 } else {
589 cluster->cl_idle_timeout_ms = val;
590 }
591 }
592
593 return ret;
594}
595
596static ssize_t o2nm_cluster_attr_keepalive_delay_ms_read(
597 struct o2nm_cluster *cluster, char *page)
598{
599 return sprintf(page, "%u\n", cluster->cl_keepalive_delay_ms);
600}
601
602static ssize_t o2nm_cluster_attr_keepalive_delay_ms_write(
603 struct o2nm_cluster *cluster, const char *page, size_t count)
604{
605 ssize_t ret;
606 unsigned int val;
607
608 ret = o2nm_cluster_attr_write(page, count, &val);
609
610 if (ret > 0) {
611 if (cluster->cl_keepalive_delay_ms != val
612 && o2net_num_connected_peers()) {
613 mlog(ML_NOTICE,
614 "o2net: cannot change keepalive delay after"
615 " the first peer has agreed to it."
616 " %d connected peers\n",
617 o2net_num_connected_peers());
618 ret = -EINVAL;
619 } else if (val >= cluster->cl_idle_timeout_ms) {
620 mlog(ML_NOTICE, "o2net: keepalive delay must be "
621 "smaller than idle timeout\n");
622 ret = -EINVAL;
623 } else {
624 cluster->cl_keepalive_delay_ms = val;
625 }
626 }
627
628 return ret;
629}
630
631static ssize_t o2nm_cluster_attr_reconnect_delay_ms_read(
632 struct o2nm_cluster *cluster, char *page)
633{
634 return sprintf(page, "%u\n", cluster->cl_reconnect_delay_ms);
635}
636
637static ssize_t o2nm_cluster_attr_reconnect_delay_ms_write(
638 struct o2nm_cluster *cluster, const char *page, size_t count)
639{
640 return o2nm_cluster_attr_write(page, count,
641 &cluster->cl_reconnect_delay_ms);
642}
643static struct o2nm_cluster_attribute o2nm_cluster_attr_idle_timeout_ms = {
644 .attr = { .ca_owner = THIS_MODULE,
645 .ca_name = "idle_timeout_ms",
646 .ca_mode = S_IRUGO | S_IWUSR },
647 .show = o2nm_cluster_attr_idle_timeout_ms_read,
648 .store = o2nm_cluster_attr_idle_timeout_ms_write,
649};
650
651static struct o2nm_cluster_attribute o2nm_cluster_attr_keepalive_delay_ms = {
652 .attr = { .ca_owner = THIS_MODULE,
653 .ca_name = "keepalive_delay_ms",
654 .ca_mode = S_IRUGO | S_IWUSR },
655 .show = o2nm_cluster_attr_keepalive_delay_ms_read,
656 .store = o2nm_cluster_attr_keepalive_delay_ms_write,
657};
658
659static struct o2nm_cluster_attribute o2nm_cluster_attr_reconnect_delay_ms = {
660 .attr = { .ca_owner = THIS_MODULE,
661 .ca_name = "reconnect_delay_ms",
662 .ca_mode = S_IRUGO | S_IWUSR },
663 .show = o2nm_cluster_attr_reconnect_delay_ms_read,
664 .store = o2nm_cluster_attr_reconnect_delay_ms_write,
665};
666
667static struct configfs_attribute *o2nm_cluster_attrs[] = {
668 &o2nm_cluster_attr_idle_timeout_ms.attr,
669 &o2nm_cluster_attr_keepalive_delay_ms.attr,
670 &o2nm_cluster_attr_reconnect_delay_ms.attr,
671 NULL,
672};
673static ssize_t o2nm_cluster_show(struct config_item *item,
674 struct configfs_attribute *attr,
675 char *page)
676{
677 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
678 struct o2nm_cluster_attribute *o2nm_cluster_attr =
679 container_of(attr, struct o2nm_cluster_attribute, attr);
680 ssize_t ret = 0;
681
682 if (o2nm_cluster_attr->show)
683 ret = o2nm_cluster_attr->show(cluster, page);
684 return ret;
685}
686
687static ssize_t o2nm_cluster_store(struct config_item *item,
688 struct configfs_attribute *attr,
689 const char *page, size_t count)
690{
691 struct o2nm_cluster *cluster = to_o2nm_cluster(item);
692 struct o2nm_cluster_attribute *o2nm_cluster_attr =
693 container_of(attr, struct o2nm_cluster_attribute, attr);
694 ssize_t ret;
695
696 if (o2nm_cluster_attr->store == NULL) {
697 ret = -EINVAL;
698 goto out;
699 }
700
701 ret = o2nm_cluster_attr->store(cluster, page, count);
702 if (ret < count)
703 goto out;
704out:
705 return ret;
706}
707
546static struct config_item *o2nm_node_group_make_item(struct config_group *group, 708static struct config_item *o2nm_node_group_make_item(struct config_group *group,
547 const char *name) 709 const char *name)
548{ 710{
@@ -552,7 +714,7 @@ static struct config_item *o2nm_node_group_make_item(struct config_group *group,
552 if (strlen(name) > O2NM_MAX_NAME_LEN) 714 if (strlen(name) > O2NM_MAX_NAME_LEN)
553 goto out; /* ENAMETOOLONG */ 715 goto out; /* ENAMETOOLONG */
554 716
555 node = kcalloc(1, sizeof(struct o2nm_node), GFP_KERNEL); 717 node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL);
556 if (node == NULL) 718 if (node == NULL)
557 goto out; /* ENOMEM */ 719 goto out; /* ENOMEM */
558 720
@@ -624,10 +786,13 @@ static void o2nm_cluster_release(struct config_item *item)
624 786
625static struct configfs_item_operations o2nm_cluster_item_ops = { 787static struct configfs_item_operations o2nm_cluster_item_ops = {
626 .release = o2nm_cluster_release, 788 .release = o2nm_cluster_release,
789 .show_attribute = o2nm_cluster_show,
790 .store_attribute = o2nm_cluster_store,
627}; 791};
628 792
629static struct config_item_type o2nm_cluster_type = { 793static struct config_item_type o2nm_cluster_type = {
630 .ct_item_ops = &o2nm_cluster_item_ops, 794 .ct_item_ops = &o2nm_cluster_item_ops,
795 .ct_attrs = o2nm_cluster_attrs,
631 .ct_owner = THIS_MODULE, 796 .ct_owner = THIS_MODULE,
632}; 797};
633 798
@@ -660,8 +825,8 @@ static struct config_group *o2nm_cluster_group_make_group(struct config_group *g
660 if (o2nm_single_cluster) 825 if (o2nm_single_cluster)
661 goto out; /* ENOSPC */ 826 goto out; /* ENOSPC */
662 827
663 cluster = kcalloc(1, sizeof(struct o2nm_cluster), GFP_KERNEL); 828 cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL);
664 ns = kcalloc(1, sizeof(struct o2nm_node_group), GFP_KERNEL); 829 ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL);
665 defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL); 830 defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
666 o2hb_group = o2hb_alloc_hb_set(); 831 o2hb_group = o2hb_alloc_hb_set();
667 if (cluster == NULL || ns == NULL || o2hb_group == NULL || defs == NULL) 832 if (cluster == NULL || ns == NULL || o2hb_group == NULL || defs == NULL)
@@ -678,6 +843,9 @@ static struct config_group *o2nm_cluster_group_make_group(struct config_group *g
678 cluster->cl_group.default_groups[2] = NULL; 843 cluster->cl_group.default_groups[2] = NULL;
679 rwlock_init(&cluster->cl_nodes_lock); 844 rwlock_init(&cluster->cl_nodes_lock);
680 cluster->cl_node_ip_tree = RB_ROOT; 845 cluster->cl_node_ip_tree = RB_ROOT;
846 cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT;
847 cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT;
848 cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT;
681 849
682 ret = &cluster->cl_group; 850 ret = &cluster->cl_group;
683 o2nm_single_cluster = cluster; 851 o2nm_single_cluster = cluster;
diff --git a/fs/ocfs2/cluster/nodemanager.h b/fs/ocfs2/cluster/nodemanager.h
index fce8033c310f..8fb23cacc2f5 100644
--- a/fs/ocfs2/cluster/nodemanager.h
+++ b/fs/ocfs2/cluster/nodemanager.h
@@ -53,6 +53,23 @@ struct o2nm_node {
53 unsigned long nd_set_attributes; 53 unsigned long nd_set_attributes;
54}; 54};
55 55
56struct o2nm_cluster {
57 struct config_group cl_group;
58 unsigned cl_has_local:1;
59 u8 cl_local_node;
60 rwlock_t cl_nodes_lock;
61 struct o2nm_node *cl_nodes[O2NM_MAX_NODES];
62 struct rb_root cl_node_ip_tree;
63 unsigned int cl_idle_timeout_ms;
64 unsigned int cl_keepalive_delay_ms;
65 unsigned int cl_reconnect_delay_ms;
66
67 /* this bitmap is part of a hack for disk bitmap.. will go eventually. - zab */
68 unsigned long cl_nodes_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
69};
70
71extern struct o2nm_cluster *o2nm_single_cluster;
72
56u8 o2nm_this_node(void); 73u8 o2nm_this_node(void);
57 74
58int o2nm_configured_node_map(unsigned long *map, unsigned bytes); 75int o2nm_configured_node_map(unsigned long *map, unsigned bytes);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 9b3209dc0b16..ae4ff4a6636b 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -147,6 +147,28 @@ static void o2net_listen_data_ready(struct sock *sk, int bytes);
147static void o2net_sc_send_keep_req(struct work_struct *work); 147static void o2net_sc_send_keep_req(struct work_struct *work);
148static void o2net_idle_timer(unsigned long data); 148static void o2net_idle_timer(unsigned long data);
149static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); 149static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
150static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc);
151
152/*
153 * FIXME: These should use to_o2nm_cluster_from_node(), but we end up
154 * losing our parent link to the cluster during shutdown. This can be
155 * solved by adding a pre-removal callback to configfs, or passing
156 * around the cluster with the node. -jeffm
157 */
158static inline int o2net_reconnect_delay(struct o2nm_node *node)
159{
160 return o2nm_single_cluster->cl_reconnect_delay_ms;
161}
162
163static inline int o2net_keepalive_delay(struct o2nm_node *node)
164{
165 return o2nm_single_cluster->cl_keepalive_delay_ms;
166}
167
168static inline int o2net_idle_timeout(struct o2nm_node *node)
169{
170 return o2nm_single_cluster->cl_idle_timeout_ms;
171}
150 172
151static inline int o2net_sys_err_to_errno(enum o2net_system_error err) 173static inline int o2net_sys_err_to_errno(enum o2net_system_error err)
152{ 174{
@@ -271,6 +293,8 @@ static void sc_kref_release(struct kref *kref)
271{ 293{
272 struct o2net_sock_container *sc = container_of(kref, 294 struct o2net_sock_container *sc = container_of(kref,
273 struct o2net_sock_container, sc_kref); 295 struct o2net_sock_container, sc_kref);
296 BUG_ON(timer_pending(&sc->sc_idle_timeout));
297
274 sclog(sc, "releasing\n"); 298 sclog(sc, "releasing\n");
275 299
276 if (sc->sc_sock) { 300 if (sc->sc_sock) {
@@ -300,7 +324,7 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
300 struct page *page = NULL; 324 struct page *page = NULL;
301 325
302 page = alloc_page(GFP_NOFS); 326 page = alloc_page(GFP_NOFS);
303 sc = kcalloc(1, sizeof(*sc), GFP_NOFS); 327 sc = kzalloc(sizeof(*sc), GFP_NOFS);
304 if (sc == NULL || page == NULL) 328 if (sc == NULL || page == NULL)
305 goto out; 329 goto out;
306 330
@@ -356,6 +380,13 @@ static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc,
356 sc_put(sc); 380 sc_put(sc);
357} 381}
358 382
383static atomic_t o2net_connected_peers = ATOMIC_INIT(0);
384
385int o2net_num_connected_peers(void)
386{
387 return atomic_read(&o2net_connected_peers);
388}
389
359static void o2net_set_nn_state(struct o2net_node *nn, 390static void o2net_set_nn_state(struct o2net_node *nn,
360 struct o2net_sock_container *sc, 391 struct o2net_sock_container *sc,
361 unsigned valid, int err) 392 unsigned valid, int err)
@@ -366,6 +397,11 @@ static void o2net_set_nn_state(struct o2net_node *nn,
366 397
367 assert_spin_locked(&nn->nn_lock); 398 assert_spin_locked(&nn->nn_lock);
368 399
400 if (old_sc && !sc)
401 atomic_dec(&o2net_connected_peers);
402 else if (!old_sc && sc)
403 atomic_inc(&o2net_connected_peers);
404
369 /* the node num comparison and single connect/accept path should stop 405 /* the node num comparison and single connect/accept path should stop
370 * an non-null sc from being overwritten with another */ 406 * an non-null sc from being overwritten with another */
371 BUG_ON(sc && nn->nn_sc && nn->nn_sc != sc); 407 BUG_ON(sc && nn->nn_sc && nn->nn_sc != sc);
@@ -424,9 +460,9 @@ static void o2net_set_nn_state(struct o2net_node *nn,
424 /* delay if we're withing a RECONNECT_DELAY of the 460 /* delay if we're withing a RECONNECT_DELAY of the
425 * last attempt */ 461 * last attempt */
426 delay = (nn->nn_last_connect_attempt + 462 delay = (nn->nn_last_connect_attempt +
427 msecs_to_jiffies(O2NET_RECONNECT_DELAY_MS)) 463 msecs_to_jiffies(o2net_reconnect_delay(sc->sc_node)))
428 - jiffies; 464 - jiffies;
429 if (delay > msecs_to_jiffies(O2NET_RECONNECT_DELAY_MS)) 465 if (delay > msecs_to_jiffies(o2net_reconnect_delay(sc->sc_node)))
430 delay = 0; 466 delay = 0;
431 mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay); 467 mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay);
432 queue_delayed_work(o2net_wq, &nn->nn_connect_work, delay); 468 queue_delayed_work(o2net_wq, &nn->nn_connect_work, delay);
@@ -678,7 +714,7 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
678 goto out; 714 goto out;
679 } 715 }
680 716
681 nmh = kcalloc(1, sizeof(struct o2net_msg_handler), GFP_NOFS); 717 nmh = kzalloc(sizeof(struct o2net_msg_handler), GFP_NOFS);
682 if (nmh == NULL) { 718 if (nmh == NULL) {
683 ret = -ENOMEM; 719 ret = -ENOMEM;
684 goto out; 720 goto out;
@@ -1099,13 +1135,51 @@ static int o2net_check_handshake(struct o2net_sock_container *sc)
1099 return -1; 1135 return -1;
1100 } 1136 }
1101 1137
1138 /*
1139 * Ensure timeouts are consistent with other nodes, otherwise
1140 * we can end up with one node thinking that the other must be down,
1141 * but isn't. This can ultimately cause corruption.
1142 */
1143 if (be32_to_cpu(hand->o2net_idle_timeout_ms) !=
1144 o2net_idle_timeout(sc->sc_node)) {
1145 mlog(ML_NOTICE, SC_NODEF_FMT " uses a network idle timeout of "
1146 "%u ms, but we use %u ms locally. disconnecting\n",
1147 SC_NODEF_ARGS(sc),
1148 be32_to_cpu(hand->o2net_idle_timeout_ms),
1149 o2net_idle_timeout(sc->sc_node));
1150 o2net_ensure_shutdown(nn, sc, -ENOTCONN);
1151 return -1;
1152 }
1153
1154 if (be32_to_cpu(hand->o2net_keepalive_delay_ms) !=
1155 o2net_keepalive_delay(sc->sc_node)) {
1156 mlog(ML_NOTICE, SC_NODEF_FMT " uses a keepalive delay of "
1157 "%u ms, but we use %u ms locally. disconnecting\n",
1158 SC_NODEF_ARGS(sc),
1159 be32_to_cpu(hand->o2net_keepalive_delay_ms),
1160 o2net_keepalive_delay(sc->sc_node));
1161 o2net_ensure_shutdown(nn, sc, -ENOTCONN);
1162 return -1;
1163 }
1164
1165 if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) !=
1166 O2HB_MAX_WRITE_TIMEOUT_MS) {
1167 mlog(ML_NOTICE, SC_NODEF_FMT " uses a heartbeat timeout of "
1168 "%u ms, but we use %u ms locally. disconnecting\n",
1169 SC_NODEF_ARGS(sc),
1170 be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
1171 O2HB_MAX_WRITE_TIMEOUT_MS);
1172 o2net_ensure_shutdown(nn, sc, -ENOTCONN);
1173 return -1;
1174 }
1175
1102 sc->sc_handshake_ok = 1; 1176 sc->sc_handshake_ok = 1;
1103 1177
1104 spin_lock(&nn->nn_lock); 1178 spin_lock(&nn->nn_lock);
1105 /* set valid and queue the idle timers only if it hasn't been 1179 /* set valid and queue the idle timers only if it hasn't been
1106 * shut down already */ 1180 * shut down already */
1107 if (nn->nn_sc == sc) { 1181 if (nn->nn_sc == sc) {
1108 o2net_sc_postpone_idle(sc); 1182 o2net_sc_reset_idle_timer(sc);
1109 o2net_set_nn_state(nn, sc, 1, 0); 1183 o2net_set_nn_state(nn, sc, 1, 0);
1110 } 1184 }
1111 spin_unlock(&nn->nn_lock); 1185 spin_unlock(&nn->nn_lock);
@@ -1131,6 +1205,23 @@ static int o2net_advance_rx(struct o2net_sock_container *sc)
1131 sclog(sc, "receiving\n"); 1205 sclog(sc, "receiving\n");
1132 do_gettimeofday(&sc->sc_tv_advance_start); 1206 do_gettimeofday(&sc->sc_tv_advance_start);
1133 1207
1208 if (unlikely(sc->sc_handshake_ok == 0)) {
1209 if(sc->sc_page_off < sizeof(struct o2net_handshake)) {
1210 data = page_address(sc->sc_page) + sc->sc_page_off;
1211 datalen = sizeof(struct o2net_handshake) - sc->sc_page_off;
1212 ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
1213 if (ret > 0)
1214 sc->sc_page_off += ret;
1215 }
1216
1217 if (sc->sc_page_off == sizeof(struct o2net_handshake)) {
1218 o2net_check_handshake(sc);
1219 if (unlikely(sc->sc_handshake_ok == 0))
1220 ret = -EPROTO;
1221 }
1222 goto out;
1223 }
1224
1134 /* do we need more header? */ 1225 /* do we need more header? */
1135 if (sc->sc_page_off < sizeof(struct o2net_msg)) { 1226 if (sc->sc_page_off < sizeof(struct o2net_msg)) {
1136 data = page_address(sc->sc_page) + sc->sc_page_off; 1227 data = page_address(sc->sc_page) + sc->sc_page_off;
@@ -1138,15 +1229,6 @@ static int o2net_advance_rx(struct o2net_sock_container *sc)
1138 ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen); 1229 ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
1139 if (ret > 0) { 1230 if (ret > 0) {
1140 sc->sc_page_off += ret; 1231 sc->sc_page_off += ret;
1141
1142 /* this working relies on the handshake being
1143 * smaller than the normal message header */
1144 if (sc->sc_page_off >= sizeof(struct o2net_handshake)&&
1145 !sc->sc_handshake_ok && o2net_check_handshake(sc)) {
1146 ret = -EPROTO;
1147 goto out;
1148 }
1149
1150 /* only swab incoming here.. we can 1232 /* only swab incoming here.. we can
1151 * only get here once as we cross from 1233 * only get here once as we cross from
1152 * being under to over */ 1234 * being under to over */
@@ -1248,6 +1330,18 @@ static int o2net_set_nodelay(struct socket *sock)
1248 return ret; 1330 return ret;
1249} 1331}
1250 1332
1333static void o2net_initialize_handshake(void)
1334{
1335 o2net_hand->o2hb_heartbeat_timeout_ms = cpu_to_be32(
1336 O2HB_MAX_WRITE_TIMEOUT_MS);
1337 o2net_hand->o2net_idle_timeout_ms = cpu_to_be32(
1338 o2net_idle_timeout(NULL));
1339 o2net_hand->o2net_keepalive_delay_ms = cpu_to_be32(
1340 o2net_keepalive_delay(NULL));
1341 o2net_hand->o2net_reconnect_delay_ms = cpu_to_be32(
1342 o2net_reconnect_delay(NULL));
1343}
1344
1251/* ------------------------------------------------------------ */ 1345/* ------------------------------------------------------------ */
1252 1346
1253/* called when a connect completes and after a sock is accepted. the 1347/* called when a connect completes and after a sock is accepted. the
@@ -1262,6 +1356,7 @@ static void o2net_sc_connect_completed(struct work_struct *work)
1262 (unsigned long long)O2NET_PROTOCOL_VERSION, 1356 (unsigned long long)O2NET_PROTOCOL_VERSION,
1263 (unsigned long long)be64_to_cpu(o2net_hand->connector_id)); 1357 (unsigned long long)be64_to_cpu(o2net_hand->connector_id));
1264 1358
1359 o2net_initialize_handshake();
1265 o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand)); 1360 o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand));
1266 sc_put(sc); 1361 sc_put(sc);
1267} 1362}
@@ -1287,8 +1382,10 @@ static void o2net_idle_timer(unsigned long data)
1287 1382
1288 do_gettimeofday(&now); 1383 do_gettimeofday(&now);
1289 1384
1290 printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for 10 " 1385 printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
1291 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc)); 1386 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
1387 o2net_idle_timeout(sc->sc_node) / 1000,
1388 o2net_idle_timeout(sc->sc_node) % 1000);
1292 mlog(ML_NOTICE, "here are some times that might help debug the " 1389 mlog(ML_NOTICE, "here are some times that might help debug the "
1293 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " 1390 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
1294 "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", 1391 "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
@@ -1306,14 +1403,21 @@ static void o2net_idle_timer(unsigned long data)
1306 o2net_sc_queue_work(sc, &sc->sc_shutdown_work); 1403 o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
1307} 1404}
1308 1405
1309static void o2net_sc_postpone_idle(struct o2net_sock_container *sc) 1406static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc)
1310{ 1407{
1311 o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work); 1408 o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
1312 o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work, 1409 o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
1313 O2NET_KEEPALIVE_DELAY_SECS * HZ); 1410 msecs_to_jiffies(o2net_keepalive_delay(sc->sc_node)));
1314 do_gettimeofday(&sc->sc_tv_timer); 1411 do_gettimeofday(&sc->sc_tv_timer);
1315 mod_timer(&sc->sc_idle_timeout, 1412 mod_timer(&sc->sc_idle_timeout,
1316 jiffies + (O2NET_IDLE_TIMEOUT_SECS * HZ)); 1413 jiffies + msecs_to_jiffies(o2net_idle_timeout(sc->sc_node)));
1414}
1415
1416static void o2net_sc_postpone_idle(struct o2net_sock_container *sc)
1417{
1418 /* Only push out an existing timer */
1419 if (timer_pending(&sc->sc_idle_timeout))
1420 o2net_sc_reset_idle_timer(sc);
1317} 1421}
1318 1422
1319/* this work func is kicked whenever a path sets the nn state which doesn't 1423/* this work func is kicked whenever a path sets the nn state which doesn't
@@ -1435,9 +1539,12 @@ static void o2net_connect_expired(struct work_struct *work)
1435 1539
1436 spin_lock(&nn->nn_lock); 1540 spin_lock(&nn->nn_lock);
1437 if (!nn->nn_sc_valid) { 1541 if (!nn->nn_sc_valid) {
1542 struct o2nm_node *node = nn->nn_sc->sc_node;
1438 mlog(ML_ERROR, "no connection established with node %u after " 1543 mlog(ML_ERROR, "no connection established with node %u after "
1439 "%u seconds, giving up and returning errors.\n", 1544 "%u.%u seconds, giving up and returning errors.\n",
1440 o2net_num_from_nn(nn), O2NET_IDLE_TIMEOUT_SECS); 1545 o2net_num_from_nn(nn),
1546 o2net_idle_timeout(node) / 1000,
1547 o2net_idle_timeout(node) % 1000);
1441 1548
1442 o2net_set_nn_state(nn, NULL, 0, -ENOTCONN); 1549 o2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
1443 } 1550 }
@@ -1478,6 +1585,8 @@ static void o2net_hb_node_down_cb(struct o2nm_node *node, int node_num,
1478 1585
1479 if (node_num != o2nm_this_node()) 1586 if (node_num != o2nm_this_node())
1480 o2net_disconnect_node(node); 1587 o2net_disconnect_node(node);
1588
1589 BUG_ON(atomic_read(&o2net_connected_peers) < 0);
1481} 1590}
1482 1591
1483static void o2net_hb_node_up_cb(struct o2nm_node *node, int node_num, 1592static void o2net_hb_node_up_cb(struct o2nm_node *node, int node_num,
@@ -1489,14 +1598,14 @@ static void o2net_hb_node_up_cb(struct o2nm_node *node, int node_num,
1489 1598
1490 /* ensure an immediate connect attempt */ 1599 /* ensure an immediate connect attempt */
1491 nn->nn_last_connect_attempt = jiffies - 1600 nn->nn_last_connect_attempt = jiffies -
1492 (msecs_to_jiffies(O2NET_RECONNECT_DELAY_MS) + 1); 1601 (msecs_to_jiffies(o2net_reconnect_delay(node)) + 1);
1493 1602
1494 if (node_num != o2nm_this_node()) { 1603 if (node_num != o2nm_this_node()) {
1495 /* heartbeat doesn't work unless a local node number is 1604 /* heartbeat doesn't work unless a local node number is
1496 * configured and doing so brings up the o2net_wq, so we can 1605 * configured and doing so brings up the o2net_wq, so we can
1497 * use it.. */ 1606 * use it.. */
1498 queue_delayed_work(o2net_wq, &nn->nn_connect_expired, 1607 queue_delayed_work(o2net_wq, &nn->nn_connect_expired,
1499 O2NET_IDLE_TIMEOUT_SECS * HZ); 1608 msecs_to_jiffies(o2net_idle_timeout(node)));
1500 1609
1501 /* believe it or not, accept and node hearbeating testing 1610 /* believe it or not, accept and node hearbeating testing
1502 * can succeed for this node before we got here.. so 1611 * can succeed for this node before we got here.. so
@@ -1641,6 +1750,7 @@ static int o2net_accept_one(struct socket *sock)
1641 o2net_register_callbacks(sc->sc_sock->sk, sc); 1750 o2net_register_callbacks(sc->sc_sock->sk, sc);
1642 o2net_sc_queue_work(sc, &sc->sc_rx_work); 1751 o2net_sc_queue_work(sc, &sc->sc_rx_work);
1643 1752
1753 o2net_initialize_handshake();
1644 o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand)); 1754 o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand));
1645 1755
1646out: 1756out:
@@ -1808,9 +1918,9 @@ int o2net_init(void)
1808 1918
1809 o2quo_init(); 1919 o2quo_init();
1810 1920
1811 o2net_hand = kcalloc(1, sizeof(struct o2net_handshake), GFP_KERNEL); 1921 o2net_hand = kzalloc(sizeof(struct o2net_handshake), GFP_KERNEL);
1812 o2net_keep_req = kcalloc(1, sizeof(struct o2net_msg), GFP_KERNEL); 1922 o2net_keep_req = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
1813 o2net_keep_resp = kcalloc(1, sizeof(struct o2net_msg), GFP_KERNEL); 1923 o2net_keep_resp = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
1814 if (!o2net_hand || !o2net_keep_req || !o2net_keep_resp) { 1924 if (!o2net_hand || !o2net_keep_req || !o2net_keep_resp) {
1815 kfree(o2net_hand); 1925 kfree(o2net_hand);
1816 kfree(o2net_keep_req); 1926 kfree(o2net_keep_req);
diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h
index 616ff2b8434a..21a4e43df836 100644
--- a/fs/ocfs2/cluster/tcp.h
+++ b/fs/ocfs2/cluster/tcp.h
@@ -54,6 +54,13 @@ typedef int (o2net_msg_handler_func)(struct o2net_msg *msg, u32 len, void *data)
54 54
55#define O2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(struct o2net_msg)) 55#define O2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(struct o2net_msg))
56 56
57/* same as hb delay, we're waiting for another node to recognize our hb */
58#define O2NET_RECONNECT_DELAY_MS_DEFAULT 2000
59
60#define O2NET_KEEPALIVE_DELAY_MS_DEFAULT 5000
61#define O2NET_IDLE_TIMEOUT_MS_DEFAULT 10000
62
63
57/* TODO: figure this out.... */ 64/* TODO: figure this out.... */
58static inline int o2net_link_down(int err, struct socket *sock) 65static inline int o2net_link_down(int err, struct socket *sock)
59{ 66{
@@ -101,6 +108,7 @@ void o2net_unregister_hb_callbacks(void);
101int o2net_start_listening(struct o2nm_node *node); 108int o2net_start_listening(struct o2nm_node *node);
102void o2net_stop_listening(struct o2nm_node *node); 109void o2net_stop_listening(struct o2nm_node *node);
103void o2net_disconnect_node(struct o2nm_node *node); 110void o2net_disconnect_node(struct o2nm_node *node);
111int o2net_num_connected_peers(void);
104 112
105int o2net_init(void); 113int o2net_init(void);
106void o2net_exit(void); 114void o2net_exit(void);
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index daebbd3a2c8c..b700dc9624d1 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -27,23 +27,20 @@
27#define O2NET_MSG_KEEP_REQ_MAGIC ((u16)0xfa57) 27#define O2NET_MSG_KEEP_REQ_MAGIC ((u16)0xfa57)
28#define O2NET_MSG_KEEP_RESP_MAGIC ((u16)0xfa58) 28#define O2NET_MSG_KEEP_RESP_MAGIC ((u16)0xfa58)
29 29
30/* same as hb delay, we're waiting for another node to recognize our hb */
31#define O2NET_RECONNECT_DELAY_MS O2HB_REGION_TIMEOUT_MS
32
33/* we're delaying our quorum decision so that heartbeat will have timed 30/* we're delaying our quorum decision so that heartbeat will have timed
34 * out truly dead nodes by the time we come around to making decisions 31 * out truly dead nodes by the time we come around to making decisions
35 * on their number */ 32 * on their number */
36#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS) 33#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
37 34
38#define O2NET_KEEPALIVE_DELAY_SECS 5
39#define O2NET_IDLE_TIMEOUT_SECS 10
40
41/* 35/*
42 * This version number represents quite a lot, unfortunately. It not 36 * This version number represents quite a lot, unfortunately. It not
43 * only represents the raw network message protocol on the wire but also 37 * only represents the raw network message protocol on the wire but also
44 * locking semantics of the file system using the protocol. It should 38 * locking semantics of the file system using the protocol. It should
45 * be somewhere else, I'm sure, but right now it isn't. 39 * be somewhere else, I'm sure, but right now it isn't.
46 * 40 *
41 * New in version 5:
42 * - Network timeout checking protocol
43 *
47 * New in version 4: 44 * New in version 4:
48 * - Remove i_generation from lock names for better stat performance. 45 * - Remove i_generation from lock names for better stat performance.
49 * 46 *
@@ -54,10 +51,14 @@
54 * - full 64 bit i_size in the metadata lock lvbs 51 * - full 64 bit i_size in the metadata lock lvbs
55 * - introduction of "rw" lock and pushing meta/data locking down 52 * - introduction of "rw" lock and pushing meta/data locking down
56 */ 53 */
57#define O2NET_PROTOCOL_VERSION 4ULL 54#define O2NET_PROTOCOL_VERSION 5ULL
58struct o2net_handshake { 55struct o2net_handshake {
59 __be64 protocol_version; 56 __be64 protocol_version;
60 __be64 connector_id; 57 __be64 connector_id;
58 __be32 o2hb_heartbeat_timeout_ms;
59 __be32 o2net_idle_timeout_ms;
60 __be32 o2net_keepalive_delay_ms;
61 __be32 o2net_reconnect_delay_ms;
61}; 62};
62 63
63struct o2net_node { 64struct o2net_node {
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 420a375a3949..f0b25f2dd205 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -920,7 +920,7 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
920 920
921 mlog_entry("%p", dlm); 921 mlog_entry("%p", dlm);
922 922
923 ctxt = kcalloc(1, sizeof(*ctxt), GFP_KERNEL); 923 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
924 if (!ctxt) { 924 if (!ctxt) {
925 status = -ENOMEM; 925 status = -ENOMEM;
926 mlog_errno(status); 926 mlog_errno(status);
@@ -1223,7 +1223,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
1223 int i; 1223 int i;
1224 struct dlm_ctxt *dlm = NULL; 1224 struct dlm_ctxt *dlm = NULL;
1225 1225
1226 dlm = kcalloc(1, sizeof(*dlm), GFP_KERNEL); 1226 dlm = kzalloc(sizeof(*dlm), GFP_KERNEL);
1227 if (!dlm) { 1227 if (!dlm) {
1228 mlog_errno(-ENOMEM); 1228 mlog_errno(-ENOMEM);
1229 goto leave; 1229 goto leave;
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 42a1b91979b5..e5ca3db197f6 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -408,13 +408,13 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
408 struct dlm_lock *lock; 408 struct dlm_lock *lock;
409 int kernel_allocated = 0; 409 int kernel_allocated = 0;
410 410
411 lock = kcalloc(1, sizeof(*lock), GFP_NOFS); 411 lock = kzalloc(sizeof(*lock), GFP_NOFS);
412 if (!lock) 412 if (!lock)
413 return NULL; 413 return NULL;
414 414
415 if (!lksb) { 415 if (!lksb) {
416 /* zero memory only if kernel-allocated */ 416 /* zero memory only if kernel-allocated */
417 lksb = kcalloc(1, sizeof(*lksb), GFP_NOFS); 417 lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
418 if (!lksb) { 418 if (!lksb) {
419 kfree(lock); 419 kfree(lock);
420 return NULL; 420 return NULL;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 856012b4fa49..0ad872055cb3 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1939,7 +1939,7 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
1939 int ignore_higher, u8 request_from, u32 flags) 1939 int ignore_higher, u8 request_from, u32 flags)
1940{ 1940{
1941 struct dlm_work_item *item; 1941 struct dlm_work_item *item;
1942 item = kcalloc(1, sizeof(*item), GFP_NOFS); 1942 item = kzalloc(sizeof(*item), GFP_NOFS);
1943 if (!item) 1943 if (!item)
1944 return -ENOMEM; 1944 return -ENOMEM;
1945 1945
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index fb3e2b0817f1..367a11e9e2ed 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -757,7 +757,7 @@ static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
757 } 757 }
758 BUG_ON(num == dead_node); 758 BUG_ON(num == dead_node);
759 759
760 ndata = kcalloc(1, sizeof(*ndata), GFP_NOFS); 760 ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
761 if (!ndata) { 761 if (!ndata) {
762 dlm_destroy_recovery_area(dlm, dead_node); 762 dlm_destroy_recovery_area(dlm, dead_node);
763 return -ENOMEM; 763 return -ENOMEM;
@@ -842,7 +842,7 @@ int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
842 } 842 }
843 BUG_ON(lr->dead_node != dlm->reco.dead_node); 843 BUG_ON(lr->dead_node != dlm->reco.dead_node);
844 844
845 item = kcalloc(1, sizeof(*item), GFP_NOFS); 845 item = kzalloc(sizeof(*item), GFP_NOFS);
846 if (!item) { 846 if (!item) {
847 dlm_put(dlm); 847 dlm_put(dlm);
848 return -ENOMEM; 848 return -ENOMEM;
@@ -1323,7 +1323,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
1323 1323
1324 ret = -ENOMEM; 1324 ret = -ENOMEM;
1325 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS); 1325 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1326 item = kcalloc(1, sizeof(*item), GFP_NOFS); 1326 item = kzalloc(sizeof(*item), GFP_NOFS);
1327 if (!buf || !item) 1327 if (!buf || !item)
1328 goto leave; 1328 goto leave;
1329 1329
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 69fba16efbd1..e6220137bf69 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -770,7 +770,7 @@ static int ocfs2_lock_create(struct ocfs2_super *osb,
770 int dlm_flags) 770 int dlm_flags)
771{ 771{
772 int ret = 0; 772 int ret = 0;
773 enum dlm_status status; 773 enum dlm_status status = DLM_NORMAL;
774 unsigned long flags; 774 unsigned long flags;
775 775
776 mlog_entry_void(); 776 mlog_entry_void();
@@ -1138,6 +1138,7 @@ int ocfs2_rw_lock(struct inode *inode, int write)
1138{ 1138{
1139 int status, level; 1139 int status, level;
1140 struct ocfs2_lock_res *lockres; 1140 struct ocfs2_lock_res *lockres;
1141 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1141 1142
1142 BUG_ON(!inode); 1143 BUG_ON(!inode);
1143 1144
@@ -1147,6 +1148,9 @@ int ocfs2_rw_lock(struct inode *inode, int write)
1147 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1148 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1148 write ? "EXMODE" : "PRMODE"); 1149 write ? "EXMODE" : "PRMODE");
1149 1150
1151 if (ocfs2_mount_local(osb))
1152 return 0;
1153
1150 lockres = &OCFS2_I(inode)->ip_rw_lockres; 1154 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1151 1155
1152 level = write ? LKM_EXMODE : LKM_PRMODE; 1156 level = write ? LKM_EXMODE : LKM_PRMODE;
@@ -1164,6 +1168,7 @@ void ocfs2_rw_unlock(struct inode *inode, int write)
1164{ 1168{
1165 int level = write ? LKM_EXMODE : LKM_PRMODE; 1169 int level = write ? LKM_EXMODE : LKM_PRMODE;
1166 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres; 1170 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1171 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1167 1172
1168 mlog_entry_void(); 1173 mlog_entry_void();
1169 1174
@@ -1171,7 +1176,8 @@ void ocfs2_rw_unlock(struct inode *inode, int write)
1171 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1176 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1172 write ? "EXMODE" : "PRMODE"); 1177 write ? "EXMODE" : "PRMODE");
1173 1178
1174 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); 1179 if (!ocfs2_mount_local(osb))
1180 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1175 1181
1176 mlog_exit_void(); 1182 mlog_exit_void();
1177} 1183}
@@ -1182,6 +1188,7 @@ int ocfs2_data_lock_full(struct inode *inode,
1182{ 1188{
1183 int status = 0, level; 1189 int status = 0, level;
1184 struct ocfs2_lock_res *lockres; 1190 struct ocfs2_lock_res *lockres;
1191 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1185 1192
1186 BUG_ON(!inode); 1193 BUG_ON(!inode);
1187 1194
@@ -1201,6 +1208,9 @@ int ocfs2_data_lock_full(struct inode *inode,
1201 goto out; 1208 goto out;
1202 } 1209 }
1203 1210
1211 if (ocfs2_mount_local(osb))
1212 goto out;
1213
1204 lockres = &OCFS2_I(inode)->ip_data_lockres; 1214 lockres = &OCFS2_I(inode)->ip_data_lockres;
1205 1215
1206 level = write ? LKM_EXMODE : LKM_PRMODE; 1216 level = write ? LKM_EXMODE : LKM_PRMODE;
@@ -1269,6 +1279,7 @@ void ocfs2_data_unlock(struct inode *inode,
1269{ 1279{
1270 int level = write ? LKM_EXMODE : LKM_PRMODE; 1280 int level = write ? LKM_EXMODE : LKM_PRMODE;
1271 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_data_lockres; 1281 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_data_lockres;
1282 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1272 1283
1273 mlog_entry_void(); 1284 mlog_entry_void();
1274 1285
@@ -1276,7 +1287,8 @@ void ocfs2_data_unlock(struct inode *inode,
1276 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1287 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1277 write ? "EXMODE" : "PRMODE"); 1288 write ? "EXMODE" : "PRMODE");
1278 1289
1279 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb))) 1290 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
1291 !ocfs2_mount_local(osb))
1280 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); 1292 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1281 1293
1282 mlog_exit_void(); 1294 mlog_exit_void();
@@ -1467,8 +1479,9 @@ static int ocfs2_meta_lock_update(struct inode *inode,
1467{ 1479{
1468 int status = 0; 1480 int status = 0;
1469 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1481 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1470 struct ocfs2_lock_res *lockres; 1482 struct ocfs2_lock_res *lockres = NULL;
1471 struct ocfs2_dinode *fe; 1483 struct ocfs2_dinode *fe;
1484 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1472 1485
1473 mlog_entry_void(); 1486 mlog_entry_void();
1474 1487
@@ -1483,10 +1496,12 @@ static int ocfs2_meta_lock_update(struct inode *inode,
1483 } 1496 }
1484 spin_unlock(&oi->ip_lock); 1497 spin_unlock(&oi->ip_lock);
1485 1498
1486 lockres = &oi->ip_meta_lockres; 1499 if (!ocfs2_mount_local(osb)) {
1500 lockres = &oi->ip_meta_lockres;
1487 1501
1488 if (!ocfs2_should_refresh_lock_res(lockres)) 1502 if (!ocfs2_should_refresh_lock_res(lockres))
1489 goto bail; 1503 goto bail;
1504 }
1490 1505
1491 /* This will discard any caching information we might have had 1506 /* This will discard any caching information we might have had
1492 * for the inode metadata. */ 1507 * for the inode metadata. */
@@ -1496,7 +1511,7 @@ static int ocfs2_meta_lock_update(struct inode *inode,
1496 * map (directories, bitmap files, etc) */ 1511 * map (directories, bitmap files, etc) */
1497 ocfs2_extent_map_trunc(inode, 0); 1512 ocfs2_extent_map_trunc(inode, 0);
1498 1513
1499 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) { 1514 if (lockres && ocfs2_meta_lvb_is_trustable(inode, lockres)) {
1500 mlog(0, "Trusting LVB on inode %llu\n", 1515 mlog(0, "Trusting LVB on inode %llu\n",
1501 (unsigned long long)oi->ip_blkno); 1516 (unsigned long long)oi->ip_blkno);
1502 ocfs2_refresh_inode_from_lvb(inode); 1517 ocfs2_refresh_inode_from_lvb(inode);
@@ -1543,7 +1558,8 @@ static int ocfs2_meta_lock_update(struct inode *inode,
1543 1558
1544 status = 0; 1559 status = 0;
1545bail_refresh: 1560bail_refresh:
1546 ocfs2_complete_lock_res_refresh(lockres, status); 1561 if (lockres)
1562 ocfs2_complete_lock_res_refresh(lockres, status);
1547bail: 1563bail:
1548 mlog_exit(status); 1564 mlog_exit(status);
1549 return status; 1565 return status;
@@ -1585,7 +1601,7 @@ int ocfs2_meta_lock_full(struct inode *inode,
1585 int arg_flags) 1601 int arg_flags)
1586{ 1602{
1587 int status, level, dlm_flags, acquired; 1603 int status, level, dlm_flags, acquired;
1588 struct ocfs2_lock_res *lockres; 1604 struct ocfs2_lock_res *lockres = NULL;
1589 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1605 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1590 struct buffer_head *local_bh = NULL; 1606 struct buffer_head *local_bh = NULL;
1591 1607
@@ -1607,6 +1623,9 @@ int ocfs2_meta_lock_full(struct inode *inode,
1607 goto bail; 1623 goto bail;
1608 } 1624 }
1609 1625
1626 if (ocfs2_mount_local(osb))
1627 goto local;
1628
1610 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY)) 1629 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1611 wait_event(osb->recovery_event, 1630 wait_event(osb->recovery_event,
1612 ocfs2_node_map_is_empty(osb, &osb->recovery_map)); 1631 ocfs2_node_map_is_empty(osb, &osb->recovery_map));
@@ -1636,6 +1655,7 @@ int ocfs2_meta_lock_full(struct inode *inode,
1636 wait_event(osb->recovery_event, 1655 wait_event(osb->recovery_event,
1637 ocfs2_node_map_is_empty(osb, &osb->recovery_map)); 1656 ocfs2_node_map_is_empty(osb, &osb->recovery_map));
1638 1657
1658local:
1639 /* 1659 /*
1640 * We only see this flag if we're being called from 1660 * We only see this flag if we're being called from
1641 * ocfs2_read_locked_inode(). It means we're locking an inode 1661 * ocfs2_read_locked_inode(). It means we're locking an inode
@@ -1644,7 +1664,8 @@ int ocfs2_meta_lock_full(struct inode *inode,
1644 */ 1664 */
1645 if (inode->i_state & I_NEW) { 1665 if (inode->i_state & I_NEW) {
1646 status = 0; 1666 status = 0;
1647 ocfs2_complete_lock_res_refresh(lockres, 0); 1667 if (lockres)
1668 ocfs2_complete_lock_res_refresh(lockres, 0);
1648 goto bail; 1669 goto bail;
1649 } 1670 }
1650 1671
@@ -1767,6 +1788,7 @@ void ocfs2_meta_unlock(struct inode *inode,
1767{ 1788{
1768 int level = ex ? LKM_EXMODE : LKM_PRMODE; 1789 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1769 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres; 1790 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres;
1791 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1770 1792
1771 mlog_entry_void(); 1793 mlog_entry_void();
1772 1794
@@ -1774,7 +1796,8 @@ void ocfs2_meta_unlock(struct inode *inode,
1774 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1796 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1775 ex ? "EXMODE" : "PRMODE"); 1797 ex ? "EXMODE" : "PRMODE");
1776 1798
1777 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb))) 1799 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
1800 !ocfs2_mount_local(osb))
1778 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); 1801 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1779 1802
1780 mlog_exit_void(); 1803 mlog_exit_void();
@@ -1783,7 +1806,7 @@ void ocfs2_meta_unlock(struct inode *inode,
1783int ocfs2_super_lock(struct ocfs2_super *osb, 1806int ocfs2_super_lock(struct ocfs2_super *osb,
1784 int ex) 1807 int ex)
1785{ 1808{
1786 int status; 1809 int status = 0;
1787 int level = ex ? LKM_EXMODE : LKM_PRMODE; 1810 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1788 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres; 1811 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
1789 struct buffer_head *bh; 1812 struct buffer_head *bh;
@@ -1794,6 +1817,9 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
1794 if (ocfs2_is_hard_readonly(osb)) 1817 if (ocfs2_is_hard_readonly(osb))
1795 return -EROFS; 1818 return -EROFS;
1796 1819
1820 if (ocfs2_mount_local(osb))
1821 goto bail;
1822
1797 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0); 1823 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
1798 if (status < 0) { 1824 if (status < 0) {
1799 mlog_errno(status); 1825 mlog_errno(status);
@@ -1832,7 +1858,8 @@ void ocfs2_super_unlock(struct ocfs2_super *osb,
1832 int level = ex ? LKM_EXMODE : LKM_PRMODE; 1858 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1833 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres; 1859 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
1834 1860
1835 ocfs2_cluster_unlock(osb, lockres, level); 1861 if (!ocfs2_mount_local(osb))
1862 ocfs2_cluster_unlock(osb, lockres, level);
1836} 1863}
1837 1864
1838int ocfs2_rename_lock(struct ocfs2_super *osb) 1865int ocfs2_rename_lock(struct ocfs2_super *osb)
@@ -1843,6 +1870,9 @@ int ocfs2_rename_lock(struct ocfs2_super *osb)
1843 if (ocfs2_is_hard_readonly(osb)) 1870 if (ocfs2_is_hard_readonly(osb))
1844 return -EROFS; 1871 return -EROFS;
1845 1872
1873 if (ocfs2_mount_local(osb))
1874 return 0;
1875
1846 status = ocfs2_cluster_lock(osb, lockres, LKM_EXMODE, 0, 0); 1876 status = ocfs2_cluster_lock(osb, lockres, LKM_EXMODE, 0, 0);
1847 if (status < 0) 1877 if (status < 0)
1848 mlog_errno(status); 1878 mlog_errno(status);
@@ -1854,7 +1884,8 @@ void ocfs2_rename_unlock(struct ocfs2_super *osb)
1854{ 1884{
1855 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres; 1885 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
1856 1886
1857 ocfs2_cluster_unlock(osb, lockres, LKM_EXMODE); 1887 if (!ocfs2_mount_local(osb))
1888 ocfs2_cluster_unlock(osb, lockres, LKM_EXMODE);
1858} 1889}
1859 1890
1860int ocfs2_dentry_lock(struct dentry *dentry, int ex) 1891int ocfs2_dentry_lock(struct dentry *dentry, int ex)
@@ -1869,6 +1900,9 @@ int ocfs2_dentry_lock(struct dentry *dentry, int ex)
1869 if (ocfs2_is_hard_readonly(osb)) 1900 if (ocfs2_is_hard_readonly(osb))
1870 return -EROFS; 1901 return -EROFS;
1871 1902
1903 if (ocfs2_mount_local(osb))
1904 return 0;
1905
1872 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0); 1906 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
1873 if (ret < 0) 1907 if (ret < 0)
1874 mlog_errno(ret); 1908 mlog_errno(ret);
@@ -1882,7 +1916,8 @@ void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
1882 struct ocfs2_dentry_lock *dl = dentry->d_fsdata; 1916 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
1883 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); 1917 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
1884 1918
1885 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level); 1919 if (!ocfs2_mount_local(osb))
1920 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
1886} 1921}
1887 1922
1888/* Reference counting of the dlm debug structure. We want this because 1923/* Reference counting of the dlm debug structure. We want this because
@@ -2145,12 +2180,15 @@ static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
2145 2180
2146int ocfs2_dlm_init(struct ocfs2_super *osb) 2181int ocfs2_dlm_init(struct ocfs2_super *osb)
2147{ 2182{
2148 int status; 2183 int status = 0;
2149 u32 dlm_key; 2184 u32 dlm_key;
2150 struct dlm_ctxt *dlm; 2185 struct dlm_ctxt *dlm = NULL;
2151 2186
2152 mlog_entry_void(); 2187 mlog_entry_void();
2153 2188
2189 if (ocfs2_mount_local(osb))
2190 goto local;
2191
2154 status = ocfs2_dlm_init_debug(osb); 2192 status = ocfs2_dlm_init_debug(osb);
2155 if (status < 0) { 2193 if (status < 0) {
2156 mlog_errno(status); 2194 mlog_errno(status);
@@ -2178,11 +2216,12 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
2178 goto bail; 2216 goto bail;
2179 } 2217 }
2180 2218
2219 dlm_register_eviction_cb(dlm, &osb->osb_eviction_cb);
2220
2221local:
2181 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb); 2222 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
2182 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb); 2223 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
2183 2224
2184 dlm_register_eviction_cb(dlm, &osb->osb_eviction_cb);
2185
2186 osb->dlm = dlm; 2225 osb->dlm = dlm;
2187 2226
2188 status = 0; 2227 status = 0;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index e9a82ad95c1e..9fd590b9bde3 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -153,6 +153,14 @@ int ocfs2_should_update_atime(struct inode *inode,
153 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))) 153 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
154 return 0; 154 return 0;
155 155
156 if (vfsmnt->mnt_flags & MNT_RELATIME) {
157 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
158 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
159 return 1;
160
161 return 0;
162 }
163
156 now = CURRENT_TIME; 164 now = CURRENT_TIME;
157 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum)) 165 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
158 return 0; 166 return 0;
diff --git a/fs/ocfs2/heartbeat.c b/fs/ocfs2/heartbeat.c
index cbfd45a97a63..8fc52d6d0ce7 100644
--- a/fs/ocfs2/heartbeat.c
+++ b/fs/ocfs2/heartbeat.c
@@ -154,6 +154,9 @@ int ocfs2_register_hb_callbacks(struct ocfs2_super *osb)
154{ 154{
155 int status; 155 int status;
156 156
157 if (ocfs2_mount_local(osb))
158 return 0;
159
157 status = o2hb_register_callback(&osb->osb_hb_down); 160 status = o2hb_register_callback(&osb->osb_hb_down);
158 if (status < 0) { 161 if (status < 0) {
159 mlog_errno(status); 162 mlog_errno(status);
@@ -172,6 +175,9 @@ void ocfs2_clear_hb_callbacks(struct ocfs2_super *osb)
172{ 175{
173 int status; 176 int status;
174 177
178 if (ocfs2_mount_local(osb))
179 return;
180
175 status = o2hb_unregister_callback(&osb->osb_hb_down); 181 status = o2hb_unregister_callback(&osb->osb_hb_down);
176 if (status < 0) 182 if (status < 0)
177 mlog_errno(status); 183 mlog_errno(status);
@@ -186,6 +192,9 @@ void ocfs2_stop_heartbeat(struct ocfs2_super *osb)
186 int ret; 192 int ret;
187 char *argv[5], *envp[3]; 193 char *argv[5], *envp[3];
188 194
195 if (ocfs2_mount_local(osb))
196 return;
197
189 if (!osb->uuid_str) { 198 if (!osb->uuid_str) {
190 /* This can happen if we don't get far enough in mount... */ 199 /* This can happen if we don't get far enough in mount... */
191 mlog(0, "No UUID with which to stop heartbeat!\n\n"); 200 mlog(0, "No UUID with which to stop heartbeat!\n\n");
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 42e361f3054f..e4d91493d7d7 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -423,7 +423,8 @@ static int ocfs2_read_locked_inode(struct inode *inode,
423 * cluster lock before trusting anything anyway. 423 * cluster lock before trusting anything anyway.
424 */ 424 */
425 can_lock = !(args->fi_flags & OCFS2_FI_FLAG_SYSFILE) 425 can_lock = !(args->fi_flags & OCFS2_FI_FLAG_SYSFILE)
426 && !(args->fi_flags & OCFS2_FI_FLAG_NOLOCK); 426 && !(args->fi_flags & OCFS2_FI_FLAG_NOLOCK)
427 && !ocfs2_mount_local(osb);
427 428
428 /* 429 /*
429 * To maintain backwards compatibility with older versions of 430 * To maintain backwards compatibility with older versions of
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 1d7f4ab1e5ed..825cb0ae1b4c 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -144,8 +144,10 @@ handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs)
144 ocfs2_abort(osb->sb, "Detected aborted journal"); 144 ocfs2_abort(osb->sb, "Detected aborted journal");
145 handle = ERR_PTR(-EROFS); 145 handle = ERR_PTR(-EROFS);
146 } 146 }
147 } else 147 } else {
148 atomic_inc(&(osb->journal->j_num_trans)); 148 if (!ocfs2_mount_local(osb))
149 atomic_inc(&(osb->journal->j_num_trans));
150 }
149 151
150 return handle; 152 return handle;
151} 153}
@@ -507,9 +509,23 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
507 509
508 BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0); 510 BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0);
509 511
510 status = ocfs2_journal_toggle_dirty(osb, 0); 512 if (ocfs2_mount_local(osb)) {
511 if (status < 0) 513 journal_lock_updates(journal->j_journal);
512 mlog_errno(status); 514 status = journal_flush(journal->j_journal);
515 journal_unlock_updates(journal->j_journal);
516 if (status < 0)
517 mlog_errno(status);
518 }
519
520 if (status == 0) {
521 /*
522 * Do not toggle if flush was unsuccessful otherwise
523 * will leave dirty metadata in a "clean" journal
524 */
525 status = ocfs2_journal_toggle_dirty(osb, 0);
526 if (status < 0)
527 mlog_errno(status);
528 }
513 529
514 /* Shutdown the kernel journal system */ 530 /* Shutdown the kernel journal system */
515 journal_destroy(journal->j_journal); 531 journal_destroy(journal->j_journal);
@@ -549,7 +565,7 @@ static void ocfs2_clear_journal_error(struct super_block *sb,
549 } 565 }
550} 566}
551 567
552int ocfs2_journal_load(struct ocfs2_journal *journal) 568int ocfs2_journal_load(struct ocfs2_journal *journal, int local)
553{ 569{
554 int status = 0; 570 int status = 0;
555 struct ocfs2_super *osb; 571 struct ocfs2_super *osb;
@@ -576,14 +592,18 @@ int ocfs2_journal_load(struct ocfs2_journal *journal)
576 } 592 }
577 593
578 /* Launch the commit thread */ 594 /* Launch the commit thread */
579 osb->commit_task = kthread_run(ocfs2_commit_thread, osb, "ocfs2cmt"); 595 if (!local) {
580 if (IS_ERR(osb->commit_task)) { 596 osb->commit_task = kthread_run(ocfs2_commit_thread, osb,
581 status = PTR_ERR(osb->commit_task); 597 "ocfs2cmt");
598 if (IS_ERR(osb->commit_task)) {
599 status = PTR_ERR(osb->commit_task);
600 osb->commit_task = NULL;
601 mlog(ML_ERROR, "unable to launch ocfs2commit thread, "
602 "error=%d", status);
603 goto done;
604 }
605 } else
582 osb->commit_task = NULL; 606 osb->commit_task = NULL;
583 mlog(ML_ERROR, "unable to launch ocfs2commit thread, error=%d",
584 status);
585 goto done;
586 }
587 607
588done: 608done:
589 mlog_exit(status); 609 mlog_exit(status);
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 899112ad8136..e1216364d191 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -157,7 +157,7 @@ int ocfs2_journal_init(struct ocfs2_journal *journal,
157void ocfs2_journal_shutdown(struct ocfs2_super *osb); 157void ocfs2_journal_shutdown(struct ocfs2_super *osb);
158int ocfs2_journal_wipe(struct ocfs2_journal *journal, 158int ocfs2_journal_wipe(struct ocfs2_journal *journal,
159 int full); 159 int full);
160int ocfs2_journal_load(struct ocfs2_journal *journal); 160int ocfs2_journal_load(struct ocfs2_journal *journal, int local);
161int ocfs2_check_journals_nolocks(struct ocfs2_super *osb); 161int ocfs2_check_journals_nolocks(struct ocfs2_super *osb);
162void ocfs2_recovery_thread(struct ocfs2_super *osb, 162void ocfs2_recovery_thread(struct ocfs2_super *osb,
163 int node_num); 163 int node_num);
@@ -174,6 +174,9 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode)
174{ 174{
175 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 175 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
176 176
177 if (ocfs2_mount_local(osb))
178 return;
179
177 if (!ocfs2_inode_fully_checkpointed(inode)) { 180 if (!ocfs2_inode_fully_checkpointed(inode)) {
178 /* WARNING: This only kicks off a single 181 /* WARNING: This only kicks off a single
179 * checkpoint. If someone races you and adds more 182 * checkpoint. If someone races you and adds more
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 698d79a74ef8..4dedd9789108 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -776,7 +776,7 @@ static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
776{ 776{
777 int status; 777 int status;
778 778
779 *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL); 779 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
780 if (!(*ac)) { 780 if (!(*ac)) {
781 status = -ENOMEM; 781 status = -ENOMEM;
782 mlog_errno(status); 782 mlog_errno(status);
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 69f85ae392dc..51b020447683 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -83,10 +83,12 @@ static struct vm_operations_struct ocfs2_file_vm_ops = {
83int ocfs2_mmap(struct file *file, struct vm_area_struct *vma) 83int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
84{ 84{
85 int ret = 0, lock_level = 0; 85 int ret = 0, lock_level = 0;
86 struct ocfs2_super *osb = OCFS2_SB(file->f_dentry->d_inode->i_sb);
86 87
87 /* We don't want to support shared writable mappings yet. */ 88 /* We don't want to support shared writable mappings yet. */
88 if (((vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_MAYSHARE)) 89 if (!ocfs2_mount_local(osb) &&
89 && ((vma->vm_flags & VM_WRITE) || (vma->vm_flags & VM_MAYWRITE))) { 90 ((vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_MAYSHARE)) &&
91 ((vma->vm_flags & VM_WRITE) || (vma->vm_flags & VM_MAYWRITE))) {
90 mlog(0, "disallow shared writable mmaps %lx\n", vma->vm_flags); 92 mlog(0, "disallow shared writable mmaps %lx\n", vma->vm_flags);
91 /* This is -EINVAL because generic_file_readonly_mmap 93 /* This is -EINVAL because generic_file_readonly_mmap
92 * returns it in a similar situation. */ 94 * returns it in a similar situation. */
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 21db45ddf144..9637039c2633 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -587,9 +587,11 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
587 } 587 }
588 588
589 ocfs2_inode_set_new(osb, inode); 589 ocfs2_inode_set_new(osb, inode);
590 status = ocfs2_create_new_inode_locks(inode); 590 if (!ocfs2_mount_local(osb)) {
591 if (status < 0) 591 status = ocfs2_create_new_inode_locks(inode);
592 mlog_errno(status); 592 if (status < 0)
593 mlog_errno(status);
594 }
593 595
594 status = 0; /* error in ocfs2_create_new_inode_locks is not 596 status = 0; /* error in ocfs2_create_new_inode_locks is not
595 * critical */ 597 * critical */
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index b767fd7da6eb..db8e77cd35d3 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -349,6 +349,11 @@ static inline int ocfs2_is_soft_readonly(struct ocfs2_super *osb)
349 return ret; 349 return ret;
350} 350}
351 351
352static inline int ocfs2_mount_local(struct ocfs2_super *osb)
353{
354 return (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT);
355}
356
352#define OCFS2_IS_VALID_DINODE(ptr) \ 357#define OCFS2_IS_VALID_DINODE(ptr) \
353 (!strcmp((ptr)->i_signature, OCFS2_INODE_SIGNATURE)) 358 (!strcmp((ptr)->i_signature, OCFS2_INODE_SIGNATURE))
354 359
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 3330a5dc6be2..b5c68567077e 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -86,7 +86,7 @@
86 OCFS2_SB(sb)->s_feature_incompat &= ~(mask) 86 OCFS2_SB(sb)->s_feature_incompat &= ~(mask)
87 87
88#define OCFS2_FEATURE_COMPAT_SUPP 0 88#define OCFS2_FEATURE_COMPAT_SUPP 0
89#define OCFS2_FEATURE_INCOMPAT_SUPP 0 89#define OCFS2_FEATURE_INCOMPAT_SUPP OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT
90#define OCFS2_FEATURE_RO_COMPAT_SUPP 0 90#define OCFS2_FEATURE_RO_COMPAT_SUPP 0
91 91
92/* 92/*
@@ -96,6 +96,18 @@
96 */ 96 */
97#define OCFS2_FEATURE_INCOMPAT_HEARTBEAT_DEV 0x0002 97#define OCFS2_FEATURE_INCOMPAT_HEARTBEAT_DEV 0x0002
98 98
99/*
100 * tunefs sets this incompat flag before starting the resize and clears it
101 * at the end. This flag protects users from inadvertently mounting the fs
102 * after an aborted run without fsck-ing.
103 */
104#define OCFS2_FEATURE_INCOMPAT_RESIZE_INPROG 0x0004
105
106/* Used to denote a non-clustered volume */
107#define OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT 0x0008
108
109/* Support for sparse allocation in b-trees */
110#define OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC 0x0010
99 111
100/* 112/*
101 * Flags on ocfs2_dinode.i_flags 113 * Flags on ocfs2_dinode.i_flags
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index aa6f5aadedc4..2d3ac32cb74e 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -175,7 +175,7 @@ int ocfs2_init_slot_info(struct ocfs2_super *osb)
175 struct buffer_head *bh = NULL; 175 struct buffer_head *bh = NULL;
176 struct ocfs2_slot_info *si; 176 struct ocfs2_slot_info *si;
177 177
178 si = kcalloc(1, sizeof(struct ocfs2_slot_info), GFP_KERNEL); 178 si = kzalloc(sizeof(struct ocfs2_slot_info), GFP_KERNEL);
179 if (!si) { 179 if (!si) {
180 status = -ENOMEM; 180 status = -ENOMEM;
181 mlog_errno(status); 181 mlog_errno(status);
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 000d71cca6c5..6dbb11762759 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -488,7 +488,7 @@ int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
488 int status; 488 int status;
489 u32 slot; 489 u32 slot;
490 490
491 *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL); 491 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
492 if (!(*ac)) { 492 if (!(*ac)) {
493 status = -ENOMEM; 493 status = -ENOMEM;
494 mlog_errno(status); 494 mlog_errno(status);
@@ -530,7 +530,7 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
530{ 530{
531 int status; 531 int status;
532 532
533 *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL); 533 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
534 if (!(*ac)) { 534 if (!(*ac)) {
535 status = -ENOMEM; 535 status = -ENOMEM;
536 mlog_errno(status); 536 mlog_errno(status);
@@ -595,7 +595,7 @@ int ocfs2_reserve_clusters(struct ocfs2_super *osb,
595 595
596 mlog_entry_void(); 596 mlog_entry_void();
597 597
598 *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL); 598 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
599 if (!(*ac)) { 599 if (!(*ac)) {
600 status = -ENOMEM; 600 status = -ENOMEM;
601 mlog_errno(status); 601 mlog_errno(status);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 4bf39540e652..6e300a88a47e 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -508,6 +508,27 @@ bail:
508 return status; 508 return status;
509} 509}
510 510
511static int ocfs2_verify_heartbeat(struct ocfs2_super *osb)
512{
513 if (ocfs2_mount_local(osb)) {
514 if (osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) {
515 mlog(ML_ERROR, "Cannot heartbeat on a locally "
516 "mounted device.\n");
517 return -EINVAL;
518 }
519 }
520
521 if (!(osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL)) {
522 if (!ocfs2_mount_local(osb) && !ocfs2_is_hard_readonly(osb)) {
523 mlog(ML_ERROR, "Heartbeat has to be started to mount "
524 "a read-write clustered device.\n");
525 return -EINVAL;
526 }
527 }
528
529 return 0;
530}
531
511static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) 532static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
512{ 533{
513 struct dentry *root; 534 struct dentry *root;
@@ -516,16 +537,24 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
516 struct inode *inode = NULL; 537 struct inode *inode = NULL;
517 struct ocfs2_super *osb = NULL; 538 struct ocfs2_super *osb = NULL;
518 struct buffer_head *bh = NULL; 539 struct buffer_head *bh = NULL;
540 char nodestr[8];
519 541
520 mlog_entry("%p, %p, %i", sb, data, silent); 542 mlog_entry("%p, %p, %i", sb, data, silent);
521 543
522 /* for now we only have one cluster/node, make sure we see it 544 if (!ocfs2_parse_options(sb, data, &parsed_opt, 0)) {
523 * in the heartbeat universe */
524 if (!o2hb_check_local_node_heartbeating()) {
525 status = -EINVAL; 545 status = -EINVAL;
526 goto read_super_error; 546 goto read_super_error;
527 } 547 }
528 548
549 /* for now we only have one cluster/node, make sure we see it
550 * in the heartbeat universe */
551 if (parsed_opt & OCFS2_MOUNT_HB_LOCAL) {
552 if (!o2hb_check_local_node_heartbeating()) {
553 status = -EINVAL;
554 goto read_super_error;
555 }
556 }
557
529 /* probe for superblock */ 558 /* probe for superblock */
530 status = ocfs2_sb_probe(sb, &bh, &sector_size); 559 status = ocfs2_sb_probe(sb, &bh, &sector_size);
531 if (status < 0) { 560 if (status < 0) {
@@ -541,11 +570,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
541 } 570 }
542 brelse(bh); 571 brelse(bh);
543 bh = NULL; 572 bh = NULL;
544
545 if (!ocfs2_parse_options(sb, data, &parsed_opt, 0)) {
546 status = -EINVAL;
547 goto read_super_error;
548 }
549 osb->s_mount_opt = parsed_opt; 573 osb->s_mount_opt = parsed_opt;
550 574
551 sb->s_magic = OCFS2_SUPER_MAGIC; 575 sb->s_magic = OCFS2_SUPER_MAGIC;
@@ -588,21 +612,16 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
588 } 612 }
589 613
590 if (!ocfs2_is_hard_readonly(osb)) { 614 if (!ocfs2_is_hard_readonly(osb)) {
591 /* If this isn't a hard readonly mount, then we need
592 * to make sure that heartbeat is in a valid state,
593 * and that we mark ourselves soft readonly is -oro
594 * was specified. */
595 if (!(osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL)) {
596 mlog(ML_ERROR, "No heartbeat for device (%s)\n",
597 sb->s_id);
598 status = -EINVAL;
599 goto read_super_error;
600 }
601
602 if (sb->s_flags & MS_RDONLY) 615 if (sb->s_flags & MS_RDONLY)
603 ocfs2_set_ro_flag(osb, 0); 616 ocfs2_set_ro_flag(osb, 0);
604 } 617 }
605 618
619 status = ocfs2_verify_heartbeat(osb);
620 if (status < 0) {
621 mlog_errno(status);
622 goto read_super_error;
623 }
624
606 osb->osb_debug_root = debugfs_create_dir(osb->uuid_str, 625 osb->osb_debug_root = debugfs_create_dir(osb->uuid_str,
607 ocfs2_debugfs_root); 626 ocfs2_debugfs_root);
608 if (!osb->osb_debug_root) { 627 if (!osb->osb_debug_root) {
@@ -635,9 +654,14 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
635 654
636 ocfs2_complete_mount_recovery(osb); 655 ocfs2_complete_mount_recovery(osb);
637 656
638 printk(KERN_INFO "ocfs2: Mounting device (%s) on (node %d, slot %d) " 657 if (ocfs2_mount_local(osb))
658 snprintf(nodestr, sizeof(nodestr), "local");
659 else
660 snprintf(nodestr, sizeof(nodestr), "%d", osb->node_num);
661
662 printk(KERN_INFO "ocfs2: Mounting device (%s) on (node %s, slot %d) "
639 "with %s data mode.\n", 663 "with %s data mode.\n",
640 osb->dev_str, osb->node_num, osb->slot_num, 664 osb->dev_str, nodestr, osb->slot_num,
641 osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" : 665 osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" :
642 "ordered"); 666 "ordered");
643 667
@@ -999,7 +1023,11 @@ static int ocfs2_fill_local_node_info(struct ocfs2_super *osb)
999 1023
1000 /* XXX hold a ref on the node while mounte? easy enough, if 1024 /* XXX hold a ref on the node while mounte? easy enough, if
1001 * desirable. */ 1025 * desirable. */
1002 osb->node_num = o2nm_this_node(); 1026 if (ocfs2_mount_local(osb))
1027 osb->node_num = 0;
1028 else
1029 osb->node_num = o2nm_this_node();
1030
1003 if (osb->node_num == O2NM_MAX_NODES) { 1031 if (osb->node_num == O2NM_MAX_NODES) {
1004 mlog(ML_ERROR, "could not find this host's node number\n"); 1032 mlog(ML_ERROR, "could not find this host's node number\n");
1005 status = -ENOENT; 1033 status = -ENOENT;
@@ -1084,6 +1112,9 @@ static int ocfs2_mount_volume(struct super_block *sb)
1084 goto leave; 1112 goto leave;
1085 } 1113 }
1086 1114
1115 if (ocfs2_mount_local(osb))
1116 goto leave;
1117
1087 /* This should be sent *after* we recovered our journal as it 1118 /* This should be sent *after* we recovered our journal as it
1088 * will cause other nodes to unmark us as needing 1119 * will cause other nodes to unmark us as needing
1089 * recovery. However, we need to send it *before* dropping the 1120 * recovery. However, we need to send it *before* dropping the
@@ -1114,6 +1145,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
1114{ 1145{
1115 int tmp; 1146 int tmp;
1116 struct ocfs2_super *osb = NULL; 1147 struct ocfs2_super *osb = NULL;
1148 char nodestr[8];
1117 1149
1118 mlog_entry("(0x%p)\n", sb); 1150 mlog_entry("(0x%p)\n", sb);
1119 1151
@@ -1177,8 +1209,13 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
1177 1209
1178 atomic_set(&osb->vol_state, VOLUME_DISMOUNTED); 1210 atomic_set(&osb->vol_state, VOLUME_DISMOUNTED);
1179 1211
1180 printk(KERN_INFO "ocfs2: Unmounting device (%s) on (node %d)\n", 1212 if (ocfs2_mount_local(osb))
1181 osb->dev_str, osb->node_num); 1213 snprintf(nodestr, sizeof(nodestr), "local");
1214 else
1215 snprintf(nodestr, sizeof(nodestr), "%d", osb->node_num);
1216
1217 printk(KERN_INFO "ocfs2: Unmounting device (%s) on (node %s)\n",
1218 osb->dev_str, nodestr);
1182 1219
1183 ocfs2_delete_osb(osb); 1220 ocfs2_delete_osb(osb);
1184 kfree(osb); 1221 kfree(osb);
@@ -1194,7 +1231,7 @@ static int ocfs2_setup_osb_uuid(struct ocfs2_super *osb, const unsigned char *uu
1194 1231
1195 BUG_ON(uuid_bytes != OCFS2_VOL_UUID_LEN); 1232 BUG_ON(uuid_bytes != OCFS2_VOL_UUID_LEN);
1196 1233
1197 osb->uuid_str = kcalloc(1, OCFS2_VOL_UUID_LEN * 2 + 1, GFP_KERNEL); 1234 osb->uuid_str = kzalloc(OCFS2_VOL_UUID_LEN * 2 + 1, GFP_KERNEL);
1198 if (osb->uuid_str == NULL) 1235 if (osb->uuid_str == NULL)
1199 return -ENOMEM; 1236 return -ENOMEM;
1200 1237
@@ -1225,7 +1262,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
1225 1262
1226 mlog_entry_void(); 1263 mlog_entry_void();
1227 1264
1228 osb = kcalloc(1, sizeof(struct ocfs2_super), GFP_KERNEL); 1265 osb = kzalloc(sizeof(struct ocfs2_super), GFP_KERNEL);
1229 if (!osb) { 1266 if (!osb) {
1230 status = -ENOMEM; 1267 status = -ENOMEM;
1231 mlog_errno(status); 1268 mlog_errno(status);
@@ -1350,7 +1387,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
1350 */ 1387 */
1351 /* initialize our journal structure */ 1388 /* initialize our journal structure */
1352 1389
1353 journal = kcalloc(1, sizeof(struct ocfs2_journal), GFP_KERNEL); 1390 journal = kzalloc(sizeof(struct ocfs2_journal), GFP_KERNEL);
1354 if (!journal) { 1391 if (!journal) {
1355 mlog(ML_ERROR, "unable to alloc journal\n"); 1392 mlog(ML_ERROR, "unable to alloc journal\n");
1356 status = -ENOMEM; 1393 status = -ENOMEM;
@@ -1536,6 +1573,7 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
1536{ 1573{
1537 int status = 0; 1574 int status = 0;
1538 int dirty; 1575 int dirty;
1576 int local;
1539 struct ocfs2_dinode *local_alloc = NULL; /* only used if we 1577 struct ocfs2_dinode *local_alloc = NULL; /* only used if we
1540 * recover 1578 * recover
1541 * ourselves. */ 1579 * ourselves. */
@@ -1563,8 +1601,10 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
1563 "recovering volume.\n"); 1601 "recovering volume.\n");
1564 } 1602 }
1565 1603
1604 local = ocfs2_mount_local(osb);
1605
1566 /* will play back anything left in the journal. */ 1606 /* will play back anything left in the journal. */
1567 ocfs2_journal_load(osb->journal); 1607 ocfs2_journal_load(osb->journal, local);
1568 1608
1569 if (dirty) { 1609 if (dirty) {
1570 /* recover my local alloc if we didn't unmount cleanly. */ 1610 /* recover my local alloc if we didn't unmount cleanly. */
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c
index 5b4dca79990b..0afd8b9af70f 100644
--- a/fs/ocfs2/vote.c
+++ b/fs/ocfs2/vote.c
@@ -479,7 +479,7 @@ static struct ocfs2_net_wait_ctxt *ocfs2_new_net_wait_ctxt(unsigned int response
479{ 479{
480 struct ocfs2_net_wait_ctxt *w; 480 struct ocfs2_net_wait_ctxt *w;
481 481
482 w = kcalloc(1, sizeof(*w), GFP_NOFS); 482 w = kzalloc(sizeof(*w), GFP_NOFS);
483 if (!w) { 483 if (!w) {
484 mlog_errno(-ENOMEM); 484 mlog_errno(-ENOMEM);
485 goto bail; 485 goto bail;
@@ -642,7 +642,7 @@ static struct ocfs2_vote_msg * ocfs2_new_vote_request(struct ocfs2_super *osb,
642 642
643 BUG_ON(!ocfs2_is_valid_vote_request(type)); 643 BUG_ON(!ocfs2_is_valid_vote_request(type));
644 644
645 request = kcalloc(1, sizeof(*request), GFP_NOFS); 645 request = kzalloc(sizeof(*request), GFP_NOFS);
646 if (!request) { 646 if (!request) {
647 mlog_errno(-ENOMEM); 647 mlog_errno(-ENOMEM);
648 } else { 648 } else {
@@ -1000,6 +1000,9 @@ int ocfs2_register_net_handlers(struct ocfs2_super *osb)
1000{ 1000{
1001 int status = 0; 1001 int status = 0;
1002 1002
1003 if (ocfs2_mount_local(osb))
1004 return 0;
1005
1003 status = o2net_register_handler(OCFS2_MESSAGE_TYPE_RESPONSE, 1006 status = o2net_register_handler(OCFS2_MESSAGE_TYPE_RESPONSE,
1004 osb->net_key, 1007 osb->net_key,
1005 sizeof(struct ocfs2_response_msg), 1008 sizeof(struct ocfs2_response_msg),
diff --git a/fs/open.c b/fs/open.c
index 0d94319e8681..c989fb4cf7b9 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -864,8 +864,7 @@ int get_unused_fd(void)
864 864
865repeat: 865repeat:
866 fdt = files_fdtable(files); 866 fdt = files_fdtable(files);
867 fd = find_next_zero_bit(fdt->open_fds->fds_bits, 867 fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,
868 fdt->max_fdset,
869 files->next_fd); 868 files->next_fd);
870 869
871 /* 870 /*
diff --git a/fs/pipe.c b/fs/pipe.c
index f8b6bdcb879a..9a06e8e48e8d 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -207,7 +207,7 @@ int generic_pipe_buf_pin(struct pipe_inode_info *info, struct pipe_buffer *buf)
207 return 0; 207 return 0;
208} 208}
209 209
210static struct pipe_buf_operations anon_pipe_buf_ops = { 210static const struct pipe_buf_operations anon_pipe_buf_ops = {
211 .can_merge = 1, 211 .can_merge = 1,
212 .map = generic_pipe_buf_map, 212 .map = generic_pipe_buf_map,
213 .unmap = generic_pipe_buf_unmap, 213 .unmap = generic_pipe_buf_unmap,
@@ -243,7 +243,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
243 if (bufs) { 243 if (bufs) {
244 int curbuf = pipe->curbuf; 244 int curbuf = pipe->curbuf;
245 struct pipe_buffer *buf = pipe->bufs + curbuf; 245 struct pipe_buffer *buf = pipe->bufs + curbuf;
246 struct pipe_buf_operations *ops = buf->ops; 246 const struct pipe_buf_operations *ops = buf->ops;
247 void *addr; 247 void *addr;
248 size_t chars = buf->len; 248 size_t chars = buf->len;
249 int error, atomic; 249 int error, atomic;
@@ -365,7 +365,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
365 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) & 365 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
366 (PIPE_BUFFERS-1); 366 (PIPE_BUFFERS-1);
367 struct pipe_buffer *buf = pipe->bufs + lastbuf; 367 struct pipe_buffer *buf = pipe->bufs + lastbuf;
368 struct pipe_buf_operations *ops = buf->ops; 368 const struct pipe_buf_operations *ops = buf->ops;
369 int offset = buf->offset + buf->len; 369 int offset = buf->offset + buf->len;
370 370
371 if (ops->can_merge && offset + chars <= PAGE_SIZE) { 371 if (ops->can_merge && offset + chars <= PAGE_SIZE) {
@@ -756,7 +756,7 @@ const struct file_operations rdwr_fifo_fops = {
756 .fasync = pipe_rdwr_fasync, 756 .fasync = pipe_rdwr_fasync,
757}; 757};
758 758
759static struct file_operations read_pipe_fops = { 759static const struct file_operations read_pipe_fops = {
760 .llseek = no_llseek, 760 .llseek = no_llseek,
761 .read = do_sync_read, 761 .read = do_sync_read,
762 .aio_read = pipe_read, 762 .aio_read = pipe_read,
@@ -768,7 +768,7 @@ static struct file_operations read_pipe_fops = {
768 .fasync = pipe_read_fasync, 768 .fasync = pipe_read_fasync,
769}; 769};
770 770
771static struct file_operations write_pipe_fops = { 771static const struct file_operations write_pipe_fops = {
772 .llseek = no_llseek, 772 .llseek = no_llseek,
773 .read = bad_pipe_r, 773 .read = bad_pipe_r,
774 .write = do_sync_write, 774 .write = do_sync_write,
@@ -780,7 +780,7 @@ static struct file_operations write_pipe_fops = {
780 .fasync = pipe_write_fasync, 780 .fasync = pipe_write_fasync,
781}; 781};
782 782
783static struct file_operations rdwr_pipe_fops = { 783static const struct file_operations rdwr_pipe_fops = {
784 .llseek = no_llseek, 784 .llseek = no_llseek,
785 .read = do_sync_read, 785 .read = do_sync_read,
786 .aio_read = pipe_read, 786 .aio_read = pipe_read,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index fd959d5b5a80..77a57b5799c4 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1804,6 +1804,27 @@ static int proc_base_fill_cache(struct file *filp, void *dirent, filldir_t filld
1804 proc_base_instantiate, task, p); 1804 proc_base_instantiate, task, p);
1805} 1805}
1806 1806
1807#ifdef CONFIG_TASK_IO_ACCOUNTING
1808static int proc_pid_io_accounting(struct task_struct *task, char *buffer)
1809{
1810 return sprintf(buffer,
1811 "rchar: %llu\n"
1812 "wchar: %llu\n"
1813 "syscr: %llu\n"
1814 "syscw: %llu\n"
1815 "read_bytes: %llu\n"
1816 "write_bytes: %llu\n"
1817 "cancelled_write_bytes: %llu\n",
1818 (unsigned long long)task->rchar,
1819 (unsigned long long)task->wchar,
1820 (unsigned long long)task->syscr,
1821 (unsigned long long)task->syscw,
1822 (unsigned long long)task->ioac.read_bytes,
1823 (unsigned long long)task->ioac.write_bytes,
1824 (unsigned long long)task->ioac.cancelled_write_bytes);
1825}
1826#endif
1827
1807/* 1828/*
1808 * Thread groups 1829 * Thread groups
1809 */ 1830 */
@@ -1855,6 +1876,9 @@ static struct pid_entry tgid_base_stuff[] = {
1855#ifdef CONFIG_FAULT_INJECTION 1876#ifdef CONFIG_FAULT_INJECTION
1856 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject), 1877 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
1857#endif 1878#endif
1879#ifdef CONFIG_TASK_IO_ACCOUNTING
1880 INF("io", S_IRUGO, pid_io_accounting),
1881#endif
1858}; 1882};
1859 1883
1860static int proc_tgid_base_readdir(struct file * filp, 1884static int proc_tgid_base_readdir(struct file * filp,
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index dc3e580d1dca..92ea7743fe8f 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -47,6 +47,7 @@
47#include <linux/vmalloc.h> 47#include <linux/vmalloc.h>
48#include <linux/crash_dump.h> 48#include <linux/crash_dump.h>
49#include <linux/pid_namespace.h> 49#include <linux/pid_namespace.h>
50#include <linux/compile.h>
50#include <asm/uaccess.h> 51#include <asm/uaccess.h>
51#include <asm/pgtable.h> 52#include <asm/pgtable.h>
52#include <asm/io.h> 53#include <asm/io.h>
@@ -253,8 +254,15 @@ static int version_read_proc(char *page, char **start, off_t off,
253{ 254{
254 int len; 255 int len;
255 256
256 len = sprintf(page, linux_banner, 257 /* FIXED STRING! Don't touch! */
257 utsname()->release, utsname()->version); 258 len = snprintf(page, PAGE_SIZE,
259 "%s version %s"
260 " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ")"
261 " (" LINUX_COMPILER ")"
262 " %s\n",
263 utsname()->sysname,
264 utsname()->release,
265 utsname()->version);
258 return proc_calc_metrics(page, start, off, count, eof, len); 266 return proc_calc_metrics(page, start, off, count, eof, len);
259} 267}
260 268
diff --git a/fs/read_write.c b/fs/read_write.c
index 1d3dda4fa70c..707ac21700d3 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -450,8 +450,6 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
450 return seg; 450 return seg;
451} 451}
452 452
453EXPORT_UNUSED_SYMBOL(iov_shorten); /* June 2006 */
454
455ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov, 453ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
456 unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn) 454 unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn)
457{ 455{
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 97ae1b92bc47..5296a29cc5eb 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -135,7 +135,7 @@ static void *posix_acl_to_disk(const struct posix_acl *acl, size_t * size)
135 int n; 135 int n;
136 136
137 *size = reiserfs_acl_size(acl->a_count); 137 *size = reiserfs_acl_size(acl->a_count);
138 ext_acl = (reiserfs_acl_header *) kmalloc(sizeof(reiserfs_acl_header) + 138 ext_acl = kmalloc(sizeof(reiserfs_acl_header) +
139 acl->a_count * 139 acl->a_count *
140 sizeof(reiserfs_acl_entry), 140 sizeof(reiserfs_acl_entry),
141 GFP_NOFS); 141 GFP_NOFS);
diff --git a/fs/select.c b/fs/select.c
index dcbc1112b7ec..fe0893afd931 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -311,7 +311,7 @@ static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
311{ 311{
312 fd_set_bits fds; 312 fd_set_bits fds;
313 void *bits; 313 void *bits;
314 int ret, max_fdset; 314 int ret, max_fds;
315 unsigned int size; 315 unsigned int size;
316 struct fdtable *fdt; 316 struct fdtable *fdt;
317 /* Allocate small arguments on the stack to save memory and be faster */ 317 /* Allocate small arguments on the stack to save memory and be faster */
@@ -321,13 +321,13 @@ static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
321 if (n < 0) 321 if (n < 0)
322 goto out_nofds; 322 goto out_nofds;
323 323
324 /* max_fdset can increase, so grab it once to avoid race */ 324 /* max_fds can increase, so grab it once to avoid race */
325 rcu_read_lock(); 325 rcu_read_lock();
326 fdt = files_fdtable(current->files); 326 fdt = files_fdtable(current->files);
327 max_fdset = fdt->max_fdset; 327 max_fds = fdt->max_fds;
328 rcu_read_unlock(); 328 rcu_read_unlock();
329 if (n > max_fdset) 329 if (n > max_fds)
330 n = max_fdset; 330 n = max_fds;
331 331
332 /* 332 /*
333 * We need 6 bitmaps (in/out/ex for both incoming and outgoing), 333 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 4af4cd729a5a..84dfe3f3482e 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -482,12 +482,13 @@ smb_put_super(struct super_block *sb)
482 smb_close_socket(server); 482 smb_close_socket(server);
483 483
484 if (server->conn_pid) 484 if (server->conn_pid)
485 kill_proc(server->conn_pid, SIGTERM, 1); 485 kill_pid(server->conn_pid, SIGTERM, 1);
486 486
487 kfree(server->ops); 487 kfree(server->ops);
488 smb_unload_nls(server); 488 smb_unload_nls(server);
489 sb->s_fs_info = NULL; 489 sb->s_fs_info = NULL;
490 smb_unlock_server(server); 490 smb_unlock_server(server);
491 put_pid(server->conn_pid);
491 kfree(server); 492 kfree(server);
492} 493}
493 494
@@ -530,7 +531,7 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
530 INIT_LIST_HEAD(&server->xmitq); 531 INIT_LIST_HEAD(&server->xmitq);
531 INIT_LIST_HEAD(&server->recvq); 532 INIT_LIST_HEAD(&server->recvq);
532 server->conn_error = 0; 533 server->conn_error = 0;
533 server->conn_pid = 0; 534 server->conn_pid = NULL;
534 server->state = CONN_INVALID; /* no connection yet */ 535 server->state = CONN_INVALID; /* no connection yet */
535 server->generation = 0; 536 server->generation = 0;
536 537
diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
index a5ced9e0c6c4..feac46050619 100644
--- a/fs/smbfs/proc.c
+++ b/fs/smbfs/proc.c
@@ -877,7 +877,7 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
877 goto out_putf; 877 goto out_putf;
878 878
879 server->sock_file = filp; 879 server->sock_file = filp;
880 server->conn_pid = current->pid; 880 server->conn_pid = get_pid(task_pid(current));
881 server->opt = *opt; 881 server->opt = *opt;
882 server->generation += 1; 882 server->generation += 1;
883 server->state = CONN_VALID; 883 server->state = CONN_VALID;
@@ -971,8 +971,8 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
971 } 971 }
972 972
973 VERBOSE("protocol=%d, max_xmit=%d, pid=%d capabilities=0x%x\n", 973 VERBOSE("protocol=%d, max_xmit=%d, pid=%d capabilities=0x%x\n",
974 server->opt.protocol, server->opt.max_xmit, server->conn_pid, 974 server->opt.protocol, server->opt.max_xmit,
975 server->opt.capabilities); 975 pid_nr(server->conn_pid), server->opt.capabilities);
976 976
977 /* FIXME: this really should be done by smbmount. */ 977 /* FIXME: this really should be done by smbmount. */
978 if (server->opt.max_xmit > SMB_MAX_PACKET_SIZE) { 978 if (server->opt.max_xmit > SMB_MAX_PACKET_SIZE) {
diff --git a/fs/smbfs/smbiod.c b/fs/smbfs/smbiod.c
index e67540441288..89eaf31f1d46 100644
--- a/fs/smbfs/smbiod.c
+++ b/fs/smbfs/smbiod.c
@@ -152,7 +152,7 @@ int smbiod_retry(struct smb_sb_info *server)
152{ 152{
153 struct list_head *head; 153 struct list_head *head;
154 struct smb_request *req; 154 struct smb_request *req;
155 pid_t pid = server->conn_pid; 155 struct pid *pid = get_pid(server->conn_pid);
156 int result = 0; 156 int result = 0;
157 157
158 VERBOSE("state: %d\n", server->state); 158 VERBOSE("state: %d\n", server->state);
@@ -222,7 +222,7 @@ int smbiod_retry(struct smb_sb_info *server)
222 /* 222 /*
223 * Note: use the "priv" flag, as a user process may need to reconnect. 223 * Note: use the "priv" flag, as a user process may need to reconnect.
224 */ 224 */
225 result = kill_proc(pid, SIGUSR1, 1); 225 result = kill_pid(pid, SIGUSR1, 1);
226 if (result) { 226 if (result) {
227 /* FIXME: this is most likely fatal, umount? */ 227 /* FIXME: this is most likely fatal, umount? */
228 printk(KERN_ERR "smb_retry: signal failed [%d]\n", result); 228 printk(KERN_ERR "smb_retry: signal failed [%d]\n", result);
@@ -233,6 +233,7 @@ int smbiod_retry(struct smb_sb_info *server)
233 /* FIXME: The retried requests should perhaps get a "time boost". */ 233 /* FIXME: The retried requests should perhaps get a "time boost". */
234 234
235out: 235out:
236 put_pid(pid);
236 return result; 237 return result;
237} 238}
238 239
diff --git a/fs/splice.c b/fs/splice.c
index bbd0aeb3f68e..2fca6ebf4cc2 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -42,7 +42,7 @@ struct splice_pipe_desc {
42 struct partial_page *partial; /* pages[] may not be contig */ 42 struct partial_page *partial; /* pages[] may not be contig */
43 int nr_pages; /* number of pages in map */ 43 int nr_pages; /* number of pages in map */
44 unsigned int flags; /* splice flags */ 44 unsigned int flags; /* splice flags */
45 struct pipe_buf_operations *ops;/* ops associated with output pipe */ 45 const struct pipe_buf_operations *ops;/* ops associated with output pipe */
46}; 46};
47 47
48/* 48/*
@@ -139,7 +139,7 @@ error:
139 return err; 139 return err;
140} 140}
141 141
142static struct pipe_buf_operations page_cache_pipe_buf_ops = { 142static const struct pipe_buf_operations page_cache_pipe_buf_ops = {
143 .can_merge = 0, 143 .can_merge = 0,
144 .map = generic_pipe_buf_map, 144 .map = generic_pipe_buf_map,
145 .unmap = generic_pipe_buf_unmap, 145 .unmap = generic_pipe_buf_unmap,
@@ -159,7 +159,7 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
159 return generic_pipe_buf_steal(pipe, buf); 159 return generic_pipe_buf_steal(pipe, buf);
160} 160}
161 161
162static struct pipe_buf_operations user_page_pipe_buf_ops = { 162static const struct pipe_buf_operations user_page_pipe_buf_ops = {
163 .can_merge = 0, 163 .can_merge = 0,
164 .map = generic_pipe_buf_map, 164 .map = generic_pipe_buf_map,
165 .unmap = generic_pipe_buf_unmap, 165 .unmap = generic_pipe_buf_unmap,
@@ -724,7 +724,7 @@ static ssize_t __splice_from_pipe(struct pipe_inode_info *pipe,
724 for (;;) { 724 for (;;) {
725 if (pipe->nrbufs) { 725 if (pipe->nrbufs) {
726 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf; 726 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
727 struct pipe_buf_operations *ops = buf->ops; 727 const struct pipe_buf_operations *ops = buf->ops;
728 728
729 sd.len = buf->len; 729 sd.len = buf->len;
730 if (sd.len > sd.total_len) 730 if (sd.len > sd.total_len)
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 8e6b56fc1cad..b56eb754e2d2 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1406,7 +1406,7 @@ xfs_vm_direct_IO(
1406 xfs_end_io_direct); 1406 xfs_end_io_direct);
1407 } 1407 }
1408 1408
1409 if (unlikely(ret <= 0 && iocb->private)) 1409 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
1410 xfs_destroy_ioend(iocb->private); 1410 xfs_destroy_ioend(iocb->private);
1411 return ret; 1411 return ret;
1412} 1412}
diff --git a/include/asm-arm/arch-pnx4008/i2c.h b/include/asm-arm/arch-pnx4008/i2c.h
new file mode 100644
index 000000000000..92e8d65006f7
--- /dev/null
+++ b/include/asm-arm/arch-pnx4008/i2c.h
@@ -0,0 +1,67 @@
1/*
2 * PNX4008-specific tweaks for I2C IP3204 block
3 *
4 * Author: Vitaly Wool <vwool@ru.mvista.com>
5 *
6 * 2005 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 */
11
12#ifndef __ASM_ARCH_I2C_H__
13#define __ASM_ARCH_I2C_H__
14
15#include <linux/pm.h>
16#include <linux/platform_device.h>
17
18enum {
19 mstatus_tdi = 0x00000001,
20 mstatus_afi = 0x00000002,
21 mstatus_nai = 0x00000004,
22 mstatus_drmi = 0x00000008,
23 mstatus_active = 0x00000020,
24 mstatus_scl = 0x00000040,
25 mstatus_sda = 0x00000080,
26 mstatus_rff = 0x00000100,
27 mstatus_rfe = 0x00000200,
28 mstatus_tff = 0x00000400,
29 mstatus_tfe = 0x00000800,
30};
31
32enum {
33 mcntrl_tdie = 0x00000001,
34 mcntrl_afie = 0x00000002,
35 mcntrl_naie = 0x00000004,
36 mcntrl_drmie = 0x00000008,
37 mcntrl_daie = 0x00000020,
38 mcntrl_rffie = 0x00000040,
39 mcntrl_tffie = 0x00000080,
40 mcntrl_reset = 0x00000100,
41 mcntrl_cdbmode = 0x00000400,
42};
43
44enum {
45 rw_bit = 1 << 0,
46 start_bit = 1 << 8,
47 stop_bit = 1 << 9,
48};
49
50#define I2C_REG_RX(a) ((a)->ioaddr) /* Rx FIFO reg (RO) */
51#define I2C_REG_TX(a) ((a)->ioaddr) /* Tx FIFO reg (WO) */
52#define I2C_REG_STS(a) ((a)->ioaddr + 0x04) /* Status reg (RO) */
53#define I2C_REG_CTL(a) ((a)->ioaddr + 0x08) /* Ctl reg */
54#define I2C_REG_CKL(a) ((a)->ioaddr + 0x0c) /* Clock divider low */
55#define I2C_REG_CKH(a) ((a)->ioaddr + 0x10) /* Clock divider high */
56#define I2C_REG_ADR(a) ((a)->ioaddr + 0x14) /* I2C address */
57#define I2C_REG_RFL(a) ((a)->ioaddr + 0x18) /* Rx FIFO level (RO) */
58#define I2C_REG_TFL(a) ((a)->ioaddr + 0x1c) /* Tx FIFO level (RO) */
59#define I2C_REG_RXB(a) ((a)->ioaddr + 0x20) /* Num of bytes Rx-ed (RO) */
60#define I2C_REG_TXB(a) ((a)->ioaddr + 0x24) /* Num of bytes Tx-ed (RO) */
61#define I2C_REG_TXS(a) ((a)->ioaddr + 0x28) /* Tx slave FIFO (RO) */
62#define I2C_REG_STFL(a) ((a)->ioaddr + 0x2c) /* Tx slave FIFO level (RO) */
63
64#define HCLK_MHZ 13
65#define I2C_CHIP_NAME "PNX4008-I2C"
66
67#endif /* __ASM_ARCH_I2C_H___ */
diff --git a/include/asm-arm/arch-pxa/pxa2xx_spi.h b/include/asm-arm/arch-pxa/pxa2xx_spi.h
index 915590c391c8..acc7ec7a84a1 100644
--- a/include/asm-arm/arch-pxa/pxa2xx_spi.h
+++ b/include/asm-arm/arch-pxa/pxa2xx_spi.h
@@ -27,16 +27,13 @@
27#define SSP1_SerClkDiv(x) (((CLOCK_SPEED_HZ/2/(x+1))<<8)&0x0000ff00) 27#define SSP1_SerClkDiv(x) (((CLOCK_SPEED_HZ/2/(x+1))<<8)&0x0000ff00)
28#define SSP2_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) 28#define SSP2_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00)
29#define SSP3_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) 29#define SSP3_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00)
30#define SSP_TIMEOUT_SCALE (2712)
31#elif defined(CONFIG_PXA27x) 30#elif defined(CONFIG_PXA27x)
32#define CLOCK_SPEED_HZ 13000000 31#define CLOCK_SPEED_HZ 13000000
33#define SSP1_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) 32#define SSP1_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00)
34#define SSP2_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) 33#define SSP2_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00)
35#define SSP3_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) 34#define SSP3_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00)
36#define SSP_TIMEOUT_SCALE (769)
37#endif 35#endif
38 36
39#define SSP_TIMEOUT(x) ((x*10000)/SSP_TIMEOUT_SCALE)
40#define SSP1_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(1))))) 37#define SSP1_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(1)))))
41#define SSP2_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(2))))) 38#define SSP2_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(2)))))
42#define SSP3_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(3))))) 39#define SSP3_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(3)))))
@@ -63,7 +60,7 @@ struct pxa2xx_spi_chip {
63 u8 tx_threshold; 60 u8 tx_threshold;
64 u8 rx_threshold; 61 u8 rx_threshold;
65 u8 dma_burst_size; 62 u8 dma_burst_size;
66 u32 timeout_microsecs; 63 u32 timeout;
67 u8 enable_loopback; 64 u8 enable_loopback;
68 void (*cs_control)(u32 command); 65 void (*cs_control)(u32 command);
69}; 66};
diff --git a/include/asm-arm/thread_info.h b/include/asm-arm/thread_info.h
index d9b8bddc8732..5014794f9eb3 100644
--- a/include/asm-arm/thread_info.h
+++ b/include/asm-arm/thread_info.h
@@ -147,6 +147,7 @@ extern void iwmmxt_task_switch(struct thread_info *);
147#define TIF_POLLING_NRFLAG 16 147#define TIF_POLLING_NRFLAG 16
148#define TIF_USING_IWMMXT 17 148#define TIF_USING_IWMMXT 17
149#define TIF_MEMDIE 18 149#define TIF_MEMDIE 18
150#define TIF_FREEZE 19
150 151
151#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 152#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
152#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 153#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -154,6 +155,7 @@ extern void iwmmxt_task_switch(struct thread_info *);
154#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 155#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
155#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 156#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
156#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) 157#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
158#define _TIF_FREEZE (1 << TIF_FREEZE)
157 159
158/* 160/*
159 * Change these and you break ASM code in entry-common.S 161 * Change these and you break ASM code in entry-common.S
diff --git a/include/asm-avr32/arch-at32ap/at32ap7000.h b/include/asm-avr32/arch-at32ap/at32ap7000.h
new file mode 100644
index 000000000000..ba85e04553d4
--- /dev/null
+++ b/include/asm-avr32/arch-at32ap/at32ap7000.h
@@ -0,0 +1,33 @@
1/*
2 * Pin definitions for AT32AP7000.
3 *
4 * Copyright (C) 2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ASM_ARCH_AT32AP7000_H__
11#define __ASM_ARCH_AT32AP7000_H__
12
13#define GPIO_PERIPH_A 0
14#define GPIO_PERIPH_B 1
15
16#define NR_GPIO_CONTROLLERS 4
17
18/*
19 * Pin numbers identifying specific GPIO pins on the chip. They can
20 * also be converted to IRQ numbers by passing them through
21 * gpio_to_irq().
22 */
23#define GPIO_PIOA_BASE (0)
24#define GPIO_PIOB_BASE (GPIO_PIOA_BASE + 32)
25#define GPIO_PIOC_BASE (GPIO_PIOB_BASE + 32)
26#define GPIO_PIOD_BASE (GPIO_PIOC_BASE + 32)
27
28#define GPIO_PIN_PA(N) (GPIO_PIOA_BASE + (N))
29#define GPIO_PIN_PB(N) (GPIO_PIOB_BASE + (N))
30#define GPIO_PIN_PC(N) (GPIO_PIOC_BASE + (N))
31#define GPIO_PIN_PD(N) (GPIO_PIOD_BASE + (N))
32
33#endif /* __ASM_ARCH_AT32AP7000_H__ */
diff --git a/include/asm-avr32/arch-at32ap/board.h b/include/asm-avr32/arch-at32ap/board.h
index a39b3e999f18..b120ee030c86 100644
--- a/include/asm-avr32/arch-at32ap/board.h
+++ b/include/asm-avr32/arch-at32ap/board.h
@@ -21,10 +21,7 @@ void at32_map_usart(unsigned int hw_id, unsigned int line);
21struct platform_device *at32_add_device_usart(unsigned int id); 21struct platform_device *at32_add_device_usart(unsigned int id);
22 22
23struct eth_platform_data { 23struct eth_platform_data {
24 u8 valid;
25 u8 mii_phy_addr;
26 u8 is_rmii; 24 u8 is_rmii;
27 u8 hw_addr[6];
28}; 25};
29struct platform_device * 26struct platform_device *
30at32_add_device_eth(unsigned int id, struct eth_platform_data *data); 27at32_add_device_eth(unsigned int id, struct eth_platform_data *data);
diff --git a/include/asm-avr32/arch-at32ap/portmux.h b/include/asm-avr32/arch-at32ap/portmux.h
index 4d50421262a1..83c690571322 100644
--- a/include/asm-avr32/arch-at32ap/portmux.h
+++ b/include/asm-avr32/arch-at32ap/portmux.h
@@ -7,10 +7,20 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#ifndef __ASM_AVR32_AT32_PORTMUX_H__ 10#ifndef __ASM_ARCH_PORTMUX_H__
11#define __ASM_AVR32_AT32_PORTMUX_H__ 11#define __ASM_ARCH_PORTMUX_H__
12 12
13void portmux_set_func(unsigned int portmux_id, unsigned int pin_id, 13/*
14 unsigned int function_id); 14 * Set up pin multiplexing, called from board init only.
15 *
16 * The following flags determine the initial state of the pin.
17 */
18#define AT32_GPIOF_PULLUP 0x00000001 /* Enable pull-up */
19#define AT32_GPIOF_OUTPUT 0x00000002 /* Enable output driver */
20#define AT32_GPIOF_HIGH 0x00000004 /* Set output high */
21
22void at32_select_periph(unsigned int pin, unsigned int periph,
23 unsigned long flags);
24void at32_select_gpio(unsigned int pin, unsigned long flags);
15 25
16#endif /* __ASM_AVR32_AT32_PORTMUX_H__ */ 26#endif /* __ASM_ARCH_PORTMUX_H__ */
diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h
index 0580b5d62bba..5c01e27f0b41 100644
--- a/include/asm-avr32/dma-mapping.h
+++ b/include/asm-avr32/dma-mapping.h
@@ -109,7 +109,7 @@ static inline dma_addr_t
109dma_map_single(struct device *dev, void *cpu_addr, size_t size, 109dma_map_single(struct device *dev, void *cpu_addr, size_t size,
110 enum dma_data_direction direction) 110 enum dma_data_direction direction)
111{ 111{
112 dma_cache_sync(cpu_addr, size, direction); 112 dma_cache_sync(dev, cpu_addr, size, direction);
113 return virt_to_bus(cpu_addr); 113 return virt_to_bus(cpu_addr);
114} 114}
115 115
@@ -211,7 +211,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
211 211
212 sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset; 212 sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset;
213 virt = page_address(sg[i].page) + sg[i].offset; 213 virt = page_address(sg[i].page) + sg[i].offset;
214 dma_cache_sync(virt, sg[i].length, direction); 214 dma_cache_sync(dev, virt, sg[i].length, direction);
215 } 215 }
216 216
217 return nents; 217 return nents;
@@ -256,14 +256,14 @@ static inline void
256dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 256dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
257 size_t size, enum dma_data_direction direction) 257 size_t size, enum dma_data_direction direction)
258{ 258{
259 dma_cache_sync(bus_to_virt(dma_handle), size, direction); 259 dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
260} 260}
261 261
262static inline void 262static inline void
263dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 263dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
264 size_t size, enum dma_data_direction direction) 264 size_t size, enum dma_data_direction direction)
265{ 265{
266 dma_cache_sync(bus_to_virt(dma_handle), size, direction); 266 dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
267} 267}
268 268
269/** 269/**
@@ -286,7 +286,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
286 int i; 286 int i;
287 287
288 for (i = 0; i < nents; i++) { 288 for (i = 0; i < nents; i++) {
289 dma_cache_sync(page_address(sg[i].page) + sg[i].offset, 289 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
290 sg[i].length, direction); 290 sg[i].length, direction);
291 } 291 }
292} 292}
@@ -298,7 +298,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
298 int i; 298 int i;
299 299
300 for (i = 0; i < nents; i++) { 300 for (i = 0; i < nents; i++) {
301 dma_cache_sync(page_address(sg[i].page) + sg[i].offset, 301 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
302 sg[i].length, direction); 302 sg[i].length, direction);
303 } 303 }
304} 304}
diff --git a/include/asm-avr32/pgalloc.h b/include/asm-avr32/pgalloc.h
index 7492cfb92ced..bb82e70cde8d 100644
--- a/include/asm-avr32/pgalloc.h
+++ b/include/asm-avr32/pgalloc.h
@@ -28,7 +28,7 @@ static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
28static __inline__ pgd_t *pgd_alloc(struct mm_struct *mm) 28static __inline__ pgd_t *pgd_alloc(struct mm_struct *mm)
29{ 29{
30 unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t)); 30 unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
31 pgd_t *pgd = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL); 31 pgd_t *pgd = kmalloc(pgd_size, GFP_KERNEL);
32 32
33 if (pgd) 33 if (pgd)
34 memset(pgd, 0, pgd_size); 34 memset(pgd, 0, pgd_size);
diff --git a/include/asm-frv/thread_info.h b/include/asm-frv/thread_info.h
index d66c48e6ef14..d881f518e6a9 100644
--- a/include/asm-frv/thread_info.h
+++ b/include/asm-frv/thread_info.h
@@ -116,6 +116,7 @@ register struct thread_info *__current_thread_info asm("gr15");
116#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */ 116#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
117#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 117#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
118#define TIF_MEMDIE 17 /* OOM killer killed process */ 118#define TIF_MEMDIE 17 /* OOM killer killed process */
119#define TIF_FREEZE 18 /* freezing for suspend */
119 120
120#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 121#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
121#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 122#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -125,6 +126,7 @@ register struct thread_info *__current_thread_info asm("gr15");
125#define _TIF_IRET (1 << TIF_IRET) 126#define _TIF_IRET (1 << TIF_IRET)
126#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 127#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
127#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 128#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
129#define _TIF_FREEZE (1 << TIF_FREEZE)
128 130
129#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 131#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
130#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 132#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 6e9fcebbf89f..7437ccaada77 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -242,6 +242,7 @@
242 *(.initcall4s.init) \ 242 *(.initcall4s.init) \
243 *(.initcall5.init) \ 243 *(.initcall5.init) \
244 *(.initcall5s.init) \ 244 *(.initcall5s.init) \
245 *(.initcallrootfs.init) \
245 *(.initcall6.init) \ 246 *(.initcall6.init) \
246 *(.initcall6s.init) \ 247 *(.initcall6s.init) \
247 *(.initcall7.init) \ 248 *(.initcall7.init) \
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 5679d4993072..609a3899475c 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -100,6 +100,8 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
100 100
101#define MSR_P6_PERFCTR0 0xc1 101#define MSR_P6_PERFCTR0 0xc1
102#define MSR_P6_PERFCTR1 0xc2 102#define MSR_P6_PERFCTR1 0xc2
103#define MSR_FSB_FREQ 0xcd
104
103 105
104#define MSR_IA32_BBL_CR_CTL 0x119 106#define MSR_IA32_BBL_CR_CTL 0x119
105 107
@@ -130,6 +132,9 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
130#define MSR_IA32_PERF_STATUS 0x198 132#define MSR_IA32_PERF_STATUS 0x198
131#define MSR_IA32_PERF_CTL 0x199 133#define MSR_IA32_PERF_CTL 0x199
132 134
135#define MSR_IA32_MPERF 0xE7
136#define MSR_IA32_APERF 0xE8
137
133#define MSR_IA32_THERM_CONTROL 0x19a 138#define MSR_IA32_THERM_CONTROL 0x19a
134#define MSR_IA32_THERM_INTERRUPT 0x19b 139#define MSR_IA32_THERM_INTERRUPT 0x19b
135#define MSR_IA32_THERM_STATUS 0x19c 140#define MSR_IA32_THERM_STATUS 0x19c
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 46d32ad92082..4b187bb377b4 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -134,6 +134,7 @@ static inline struct thread_info *current_thread_info(void)
134#define TIF_MEMDIE 16 134#define TIF_MEMDIE 16
135#define TIF_DEBUG 17 /* uses debug registers */ 135#define TIF_DEBUG 17 /* uses debug registers */
136#define TIF_IO_BITMAP 18 /* uses I/O bitmap */ 136#define TIF_IO_BITMAP 18 /* uses I/O bitmap */
137#define TIF_FREEZE 19 /* is freezing for suspend */
137 138
138#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 139#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
139#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 140#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -147,6 +148,7 @@ static inline struct thread_info *current_thread_info(void)
147#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 148#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
148#define _TIF_DEBUG (1<<TIF_DEBUG) 149#define _TIF_DEBUG (1<<TIF_DEBUG)
149#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) 150#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
151#define _TIF_FREEZE (1<<TIF_FREEZE)
150 152
151/* work to do on interrupt/exception return */ 153/* work to do on interrupt/exception return */
152#define _TIF_WORK_MASK \ 154#define _TIF_WORK_MASK \
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index 978d09596130..ac58580ad664 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -89,6 +89,7 @@ static inline int node_to_first_cpu(int node)
89 .flags = SD_LOAD_BALANCE \ 89 .flags = SD_LOAD_BALANCE \
90 | SD_BALANCE_EXEC \ 90 | SD_BALANCE_EXEC \
91 | SD_BALANCE_FORK \ 91 | SD_BALANCE_FORK \
92 | SD_SERIALIZE \
92 | SD_WAKE_BALANCE, \ 93 | SD_WAKE_BALANCE, \
93 .last_balance = jiffies, \ 94 .last_balance = jiffies, \
94 .balance_interval = 1, \ 95 .balance_interval = 1, \
diff --git a/include/asm-ia64/break.h b/include/asm-ia64/break.h
index 8167828edc4b..f03402039896 100644
--- a/include/asm-ia64/break.h
+++ b/include/asm-ia64/break.h
@@ -12,8 +12,8 @@
12 * OS-specific debug break numbers: 12 * OS-specific debug break numbers:
13 */ 13 */
14#define __IA64_BREAK_KDB 0x80100 14#define __IA64_BREAK_KDB 0x80100
15#define __IA64_BREAK_KPROBE 0x80200 15#define __IA64_BREAK_KPROBE 0x81000 /* .. 0x81fff */
16#define __IA64_BREAK_JPROBE 0x80300 16#define __IA64_BREAK_JPROBE 0x82000
17 17
18/* 18/*
19 * OS-specific break numbers: 19 * OS-specific break numbers:
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index 1b45b71c79b9..828ae00e47c1 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -115,6 +115,7 @@ struct arch_specific_insn {
115 #define INST_FLAG_BREAK_INST 4 115 #define INST_FLAG_BREAK_INST 4
116 unsigned long inst_flag; 116 unsigned long inst_flag;
117 unsigned short target_br_reg; 117 unsigned short target_br_reg;
118 unsigned short slot;
118}; 119};
119 120
120extern int kprobe_exceptions_notify(struct notifier_block *self, 121extern int kprobe_exceptions_notify(struct notifier_block *self,
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h
index 1d45e1518fb3..e52b8508083b 100644
--- a/include/asm-ia64/sn/xpc.h
+++ b/include/asm-ia64/sn/xpc.h
@@ -673,7 +673,7 @@ extern irqreturn_t xpc_notify_IRQ_handler(int, void *);
673extern void xpc_dropped_IPI_check(struct xpc_partition *); 673extern void xpc_dropped_IPI_check(struct xpc_partition *);
674extern void xpc_activate_partition(struct xpc_partition *); 674extern void xpc_activate_partition(struct xpc_partition *);
675extern void xpc_activate_kthreads(struct xpc_channel *, int); 675extern void xpc_activate_kthreads(struct xpc_channel *, int);
676extern void xpc_create_kthreads(struct xpc_channel *, int); 676extern void xpc_create_kthreads(struct xpc_channel *, int, int);
677extern void xpc_disconnect_wait(int); 677extern void xpc_disconnect_wait(int);
678 678
679 679
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 8adcde0934ca..9b505b25544f 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -88,6 +88,7 @@ struct thread_info {
88#define TIF_MEMDIE 17 88#define TIF_MEMDIE 17
89#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ 89#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
90#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */ 90#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
91#define TIF_FREEZE 20 /* is freezing for suspend */
91 92
92#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 93#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
93#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 94#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
@@ -98,6 +99,7 @@ struct thread_info {
98#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 99#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
99#define _TIF_MCA_INIT (1 << TIF_MCA_INIT) 100#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
100#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED) 101#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
102#define _TIF_FREEZE (1 << TIF_FREEZE)
101 103
102/* "work to do on user-return" bits */ 104/* "work to do on user-return" bits */
103#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 105#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index a6e38565ab4c..22ed6749557e 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -101,6 +101,7 @@ void build_cpu_to_node_map(void);
101 .flags = SD_LOAD_BALANCE \ 101 .flags = SD_LOAD_BALANCE \
102 | SD_BALANCE_EXEC \ 102 | SD_BALANCE_EXEC \
103 | SD_BALANCE_FORK \ 103 | SD_BALANCE_FORK \
104 | SD_SERIALIZE \
104 | SD_WAKE_BALANCE, \ 105 | SD_WAKE_BALANCE, \
105 .last_balance = jiffies, \ 106 .last_balance = jiffies, \
106 .balance_interval = 64, \ 107 .balance_interval = 64, \
diff --git a/include/asm-m68k/swim_iop.h b/include/asm-m68k/swim_iop.h
deleted file mode 100644
index f29b67876b01..000000000000
--- a/include/asm-m68k/swim_iop.h
+++ /dev/null
@@ -1,221 +0,0 @@
1/*
2 * SWIM access through the IOP
3 * Written by Joshua M. Thompson
4 */
5
6/* IOP number and channel number for the SWIM */
7
8#define SWIM_IOP IOP_NUM_ISM
9#define SWIM_CHAN 1
10
11/* Command code: */
12
13#define CMD_INIT 0x01 /* Initialize */
14#define CMD_SHUTDOWN 0x02 /* Shutdown */
15#define CMD_START_POLL 0x03 /* Start insert/eject polling */
16#define CMD_STOP_POLL 0x04 /* Stop insert/eject polling */
17#define CMD_SETHFSTAG 0x05 /* Set HFS tag buffer address */
18#define CMD_STATUS 0x06 /* Status */
19#define CMD_EJECT 0x07 /* Eject */
20#define CMD_FORMAT 0x08 /* Format */
21#define CMD_FORMAT_VERIFY 0x09 /* Format and Verify */
22#define CMD_WRITE 0x0A /* Write */
23#define CMD_READ 0x0B /* Read */
24#define CMD_READ_VERIFY 0x0C /* Read and Verify */
25#define CMD_CACHE_CTRL 0x0D /* Cache control */
26#define CMD_TAGBUFF_CTRL 0x0E /* Tag buffer control */
27#define CMD_GET_ICON 0x0F /* Get Icon */
28
29/* Drive types: */
30
31/* note: apple sez DRV_FDHD is 4, but I get back a type */
32/* of 5 when I do a drive status check on my FDHD */
33
34#define DRV_NONE 0 /* No drive */
35#define DRV_UNKNOWN 1 /* Unspecified drive */
36#define DRV_400K 2 /* 400K */
37#define DRV_800K 3 /* 400K/800K */
38#define DRV_FDHD 5 /* 400K/800K/720K/1440K */
39#define DRV_HD20 7 /* Apple HD20 */
40
41/* Format types: */
42
43#define FMT_HD20 0x0001 /* Apple HD20 */
44#define FMT_400K 0x0002 /* 400K (GCR) */
45#define FMT_800K 0x0004 /* 800K (GCR) */
46#define FMT_720K 0x0008 /* 720K (MFM) */
47#define FMT_1440K 0x0010 /* 1.44M (MFM) */
48
49#define FMD_KIND_400K 1
50#define FMD_KIND_800K 2
51#define FMD_KIND_720K 3
52#define FMD_KIND_1440K 1
53
54/* Icon Flags: */
55
56#define ICON_MEDIA 0x01 /* Have IOP supply media icon */
57#define ICON_DRIVE 0x01 /* Have IOP supply drive icon */
58
59/* Error codes: */
60
61#define gcrOnMFMErr -400 /* GCR (400/800K) on HD media */
62#define verErr -84 /* verify failed */
63#define fmt2Err -83 /* can't get enough sync during format */
64#define fmt1Err -82 /* can't find sector 0 after track format */
65#define sectNFErr -81 /* can't find sector */
66#define seekErr -80 /* drive error during seek */
67#define spdAdjErr -79 /* can't set drive speed */
68#define twoSideErr -78 /* drive is single-sided */
69#define initIWMErr -77 /* error during initialization */
70#define tk0badErr -76 /* track zero is bad */
71#define cantStepErr -75 /* drive error during step */
72#define wrUnderrun -74 /* write underrun occurred */
73#define badDBtSlp -73 /* bad data bitslip marks */
74#define badDCksum -72 /* bad data checksum */
75#define noDtaMkErr -71 /* can't find data mark */
76#define badBtSlpErr -70 /* bad address bitslip marks */
77#define badCksmErr -69 /* bad address-mark checksum */
78#define dataVerErr -68 /* read-verify failed */
79#define noAdrMkErr -67 /* can't find an address mark */
80#define noNybErr -66 /* no nybbles? disk is probably degaussed */
81#define offLinErr -65 /* no disk in drive */
82#define noDriveErr -64 /* drive isn't connected */
83#define nsDrvErr -56 /* no such drive */
84#define paramErr -50 /* bad positioning information */
85#define wPrErr -44 /* write protected */
86#define openErr -23 /* already initialized */
87
88#ifndef __ASSEMBLY__
89
90struct swim_drvstatus {
91 __u16 curr_track; /* Current track number */
92 __u8 write_prot; /* 0x80 if disk is write protected */
93 __u8 disk_in_drive; /* 0x01 or 0x02 if a disk is in the drive */
94 __u8 installed; /* 0x01 if drive installed, 0xFF if not */
95 __u8 num_sides; /* 0x80 if two-sided format supported */
96 __u8 two_sided; /* 0xff if two-sided format diskette */
97 __u8 new_interface; /* 0x00 if old 400K drive, 0xFF if newer */
98 __u16 errors; /* Disk error count */
99 struct { /* 32 bits */
100 __u16 reserved;
101 __u16 :4;
102 __u16 external:1; /* Drive is external */
103 __u16 scsi:1; /* Drive is a SCSI drive */
104 __u16 fixed:1; /* Drive has fixed media */
105 __u16 secondary:1; /* Drive is secondary drive */
106 __u8 type; /* Drive type */
107 } info;
108 __u8 mfm_drive; /* 0xFF if this is an FDHD drive */
109 __u8 mfm_disk; /* 0xFF if 720K/1440K (MFM) disk */
110 __u8 mfm_format; /* 0x00 if 720K, 0xFF if 1440K */
111 __u8 ctlr_type; /* 0x00 if IWM, 0xFF if SWIM */
112 __u16 curr_format; /* Current format type */
113 __u16 allowed_fmt; /* Allowed format types */
114 __u32 num_blocks; /* Number of blocks on disk */
115 __u8 icon_flags; /* Icon flags */
116 __u8 unusued;
117};
118
119/* Commands issued from the host to the IOP: */
120
121struct swimcmd_init {
122 __u8 code; /* CMD_INIT */
123 __u8 unusued;
124 __u16 error;
125 __u8 drives[28]; /* drive type list */
126};
127
128struct swimcmd_startpoll {
129 __u8 code; /* CMD_START_POLL */
130 __u8 unusued;
131 __u16 error;
132};
133
134struct swimcmd_sethfstag {
135 __u8 code; /* CMD_SETHFSTAG */
136 __u8 unusued;
137 __u16 error;
138 caddr_t tagbuf; /* HFS tag buffer address */
139};
140
141struct swimcmd_status {
142 __u8 code; /* CMD_STATUS */
143 __u8 drive_num;
144 __u16 error;
145 struct swim_drvstatus status;
146};
147
148struct swimcmd_eject {
149 __u8 code; /* CMD_EJECT */
150 __u8 drive_num;
151 __u16 error;
152 struct swim_drvstatus status;
153};
154
155struct swimcmd_format {
156 __u8 code; /* CMD_FORMAT */
157 __u8 drive_num;
158 __u16 error;
159 union {
160 struct {
161 __u16 fmt; /* format kind */
162 __u8 hdrbyte; /* fmt byte for hdr (0=default) */
163 __u8 interleave; /* interleave (0 = default) */
164 caddr_t databuf; /* sector data buff (0=default */
165 caddr_t tagbuf; /* tag data buffer (0=default) */
166 } f;
167 struct swim_drvstatus status;
168 } p;
169};
170
171struct swimcmd_fmtverify {
172 __u8 code; /* CMD_FORMAT_VERIFY */
173 __u8 drive_num;
174 __u16 error;
175};
176
177struct swimcmd_rw {
178 __u8 code; /* CMD_READ, CMD_WRITE or CMD_READ_VERIFY */
179 __u8 drive_num;
180 __u16 error;
181 caddr_t buffer; /* R/W buffer address */
182 __u32 first_block; /* Starting block */
183 __u32 num_blocks; /* Number of blocks */
184 __u8 tag[12]; /* tag data */
185};
186
187struct swimcmd_cachectl {
188 __u8 code; /* CMD_CACHE_CTRL */
189 __u8 unused;
190 __u16 error;
191 __u8 enable; /* Nonzero to enable cache */
192 __u8 install; /* +1 = install, -1 = remove, 0 = neither */
193};
194
195struct swimcmd_tagbufctl {
196 __u8 code; /* CMD_TAGBUFF_CTRL */
197 __u8 unused;
198 __u16 error;
199 caddr_t buf; /* buffer address or 0 to disable */
200};
201
202struct swimcmd_geticon {
203 __u8 code; /* CMD_GET_ICON */
204 __u8 drive_num;
205 __u16 error;
206 caddr_t buffer; /* Nuffer address */
207 __u16 kind; /* 0 = media icon, 1 = drive icon */
208 __u16 unused;
209 __u16 max_bytes; /* maximum byte count */
210};
211
212/* Messages from the SWIM IOP to the host CPU: */
213
214struct swimmsg_status {
215 __u8 code; /* 1 = insert, 2 = eject, 3 = status changed */
216 __u8 drive_num;
217 __u16 error;
218 struct swim_drvstatus status;
219};
220
221#endif /* __ASSEMBLY__ */
diff --git a/include/asm-mips/compat.h b/include/asm-mips/compat.h
index 55a0152feb08..432653d7ae09 100644
--- a/include/asm-mips/compat.h
+++ b/include/asm-mips/compat.h
@@ -5,6 +5,7 @@
5 */ 5 */
6#include <linux/types.h> 6#include <linux/types.h>
7#include <asm/page.h> 7#include <asm/page.h>
8#include <asm/ptrace.h>
8 9
9#define COMPAT_USER_HZ 100 10#define COMPAT_USER_HZ 100
10 11
diff --git a/include/asm-mips/mach-ip27/irq.h b/include/asm-mips/mach-ip27/irq.h
index 806213ce31b6..25f0c3f39adf 100644
--- a/include/asm-mips/mach-ip27/irq.h
+++ b/include/asm-mips/mach-ip27/irq.h
@@ -10,8 +10,6 @@
10#ifndef __ASM_MACH_IP27_IRQ_H 10#ifndef __ASM_MACH_IP27_IRQ_H
11#define __ASM_MACH_IP27_IRQ_H 11#define __ASM_MACH_IP27_IRQ_H
12 12
13#include <asm/sn/arch.h>
14
15/* 13/*
16 * A hardwired interrupt number is completly stupid for this system - a 14 * A hardwired interrupt number is completly stupid for this system - a
17 * large configuration might have thousands if not tenthousands of 15 * large configuration might have thousands if not tenthousands of
diff --git a/include/asm-mips/mach-ip27/topology.h b/include/asm-mips/mach-ip27/topology.h
index a13b715fd9ca..44790fdc5d00 100644
--- a/include/asm-mips/mach-ip27/topology.h
+++ b/include/asm-mips/mach-ip27/topology.h
@@ -1,7 +1,6 @@
1#ifndef _ASM_MACH_TOPOLOGY_H 1#ifndef _ASM_MACH_TOPOLOGY_H
2#define _ASM_MACH_TOPOLOGY_H 1 2#define _ASM_MACH_TOPOLOGY_H 1
3 3
4#include <asm/sn/arch.h>
5#include <asm/sn/hub.h> 4#include <asm/sn/hub.h>
6#include <asm/mmzone.h> 5#include <asm/mmzone.h>
7 6
diff --git a/include/asm-mips/pci.h b/include/asm-mips/pci.h
index c4d68bebdca6..7f0f120ca07c 100644
--- a/include/asm-mips/pci.h
+++ b/include/asm-mips/pci.h
@@ -187,4 +187,10 @@ static inline void pcibios_add_platform_entries(struct pci_dev *dev)
187/* Do platform specific device initialization at pci_enable_device() time */ 187/* Do platform specific device initialization at pci_enable_device() time */
188extern int pcibios_plat_dev_init(struct pci_dev *dev); 188extern int pcibios_plat_dev_init(struct pci_dev *dev);
189 189
190/* Chances are this interrupt is wired PC-style ... */
191static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
192{
193 return channel ? 15 : 14;
194}
195
190#endif /* _ASM_PCI_H */ 196#endif /* _ASM_PCI_H */
diff --git a/include/asm-mips/ptrace.h b/include/asm-mips/ptrace.h
index 30bf555faeaa..8a1f2b6f04ac 100644
--- a/include/asm-mips/ptrace.h
+++ b/include/asm-mips/ptrace.h
@@ -82,6 +82,14 @@ struct pt_regs {
82 82
83extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit); 83extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit);
84 84
85extern NORET_TYPE void die(const char *, struct pt_regs *);
86
87static inline void die_if_kernel(const char *str, struct pt_regs *regs)
88{
89 if (unlikely(!user_mode(regs)))
90 die(str, regs);
91}
92
85#endif 93#endif
86 94
87#endif /* _ASM_PTRACE_H */ 95#endif /* _ASM_PTRACE_H */
diff --git a/include/asm-mips/sn/arch.h b/include/asm-mips/sn/arch.h
index 51174af6ac52..da523de628be 100644
--- a/include/asm-mips/sn/arch.h
+++ b/include/asm-mips/sn/arch.h
@@ -18,7 +18,6 @@
18#endif 18#endif
19 19
20typedef u64 hubreg_t; 20typedef u64 hubreg_t;
21typedef u64 nic_t;
22 21
23#define cputonasid(cpu) (cpu_data[(cpu)].p_nasid) 22#define cputonasid(cpu) (cpu_data[(cpu)].p_nasid)
24#define cputoslice(cpu) (cpu_data[(cpu)].p_slice) 23#define cputoslice(cpu) (cpu_data[(cpu)].p_slice)
diff --git a/include/asm-mips/sn/klconfig.h b/include/asm-mips/sn/klconfig.h
index 15d70ca56187..82aeb9e322db 100644
--- a/include/asm-mips/sn/klconfig.h
+++ b/include/asm-mips/sn/klconfig.h
@@ -61,6 +61,8 @@
61#endif /* CONFIG_SGI_IP35 */ 61#endif /* CONFIG_SGI_IP35 */
62#endif /* CONFIG_SGI_IP27 || CONFIG_SGI_IP35 */ 62#endif /* CONFIG_SGI_IP27 || CONFIG_SGI_IP35 */
63 63
64typedef u64 nic_t;
65
64#define KLCFGINFO_MAGIC 0xbeedbabe 66#define KLCFGINFO_MAGIC 0xbeedbabe
65 67
66typedef s32 klconf_off_t; 68typedef s32 klconf_off_t;
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 9428057a50cf..5e1289c85ed9 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -19,7 +19,6 @@
19#include <asm/barrier.h> 19#include <asm/barrier.h>
20#include <asm/cpu-features.h> 20#include <asm/cpu-features.h>
21#include <asm/dsp.h> 21#include <asm/dsp.h>
22#include <asm/ptrace.h>
23#include <asm/war.h> 22#include <asm/war.h>
24 23
25 24
@@ -336,14 +335,6 @@ extern void *set_except_vector(int n, void *addr);
336extern unsigned long ebase; 335extern unsigned long ebase;
337extern void per_cpu_trap_init(void); 336extern void per_cpu_trap_init(void);
338 337
339extern NORET_TYPE void die(const char *, struct pt_regs *);
340
341static inline void die_if_kernel(const char *str, struct pt_regs *regs)
342{
343 if (unlikely(!user_mode(regs)))
344 die(str, regs);
345}
346
347extern int stop_a_enabled; 338extern int stop_a_enabled;
348 339
349/* 340/*
diff --git a/include/asm-powerpc/Kbuild b/include/asm-powerpc/Kbuild
index 1e637381c118..703970fb0ec0 100644
--- a/include/asm-powerpc/Kbuild
+++ b/include/asm-powerpc/Kbuild
@@ -17,7 +17,6 @@ header-y += ipc.h
17header-y += poll.h 17header-y += poll.h
18header-y += shmparam.h 18header-y += shmparam.h
19header-y += sockios.h 19header-y += sockios.h
20header-y += spu_info.h
21header-y += ucontext.h 20header-y += ucontext.h
22header-y += ioctl.h 21header-y += ioctl.h
23header-y += linkage.h 22header-y += linkage.h
@@ -37,6 +36,7 @@ unifdef-y += posix_types.h
37unifdef-y += ptrace.h 36unifdef-y += ptrace.h
38unifdef-y += seccomp.h 37unifdef-y += seccomp.h
39unifdef-y += signal.h 38unifdef-y += signal.h
39unifdef-y += spu_info.h
40unifdef-y += termios.h 40unifdef-y += termios.h
41unifdef-y += types.h 41unifdef-y += types.h
42unifdef-y += unistd.h 42unifdef-y += unistd.h
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index 0288144ea024..8f757f6246e4 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -209,7 +209,7 @@ int __ilog2_u32(u32 n)
209 209
210#ifdef __powerpc64__ 210#ifdef __powerpc64__
211static inline __attribute__((const)) 211static inline __attribute__((const))
212int __ilog2_u64(u32 n) 212int __ilog2_u64(u64 n)
213{ 213{
214 int bit; 214 int bit;
215 asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n)); 215 asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n));
diff --git a/include/asm-powerpc/bug.h b/include/asm-powerpc/bug.h
index 978b2c7e84ea..709568879f73 100644
--- a/include/asm-powerpc/bug.h
+++ b/include/asm-powerpc/bug.h
@@ -13,36 +13,39 @@
13 13
14#ifndef __ASSEMBLY__ 14#ifndef __ASSEMBLY__
15 15
16struct bug_entry {
17 unsigned long bug_addr;
18 long line;
19 const char *file;
20 const char *function;
21};
22
23struct bug_entry *find_bug(unsigned long bugaddr);
24
25/*
26 * If this bit is set in the line number it means that the trap
27 * is for WARN_ON rather than BUG or BUG_ON.
28 */
29#define BUG_WARNING_TRAP 0x1000000
30
31#ifdef CONFIG_BUG 16#ifdef CONFIG_BUG
32 17
18/* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and
19 sizeof(struct bug_entry), respectively */
20#ifdef CONFIG_DEBUG_BUGVERBOSE
21#define _EMIT_BUG_ENTRY \
22 ".section __bug_table,\"a\"\n" \
23 "2:\t" PPC_LONG "1b, %0\n" \
24 "\t.short %1, %2\n" \
25 ".org 2b+%3\n" \
26 ".previous\n"
27#else
28#define _EMIT_BUG_ENTRY \
29 ".section __bug_table,\"a\"\n" \
30 "2:\t" PPC_LONG "1b\n" \
31 "\t.short %2\n" \
32 ".org 2b+%3\n" \
33 ".previous\n"
34#endif
35
33/* 36/*
34 * BUG_ON() and WARN_ON() do their best to cooperate with compile-time 37 * BUG_ON() and WARN_ON() do their best to cooperate with compile-time
35 * optimisations. However depending on the complexity of the condition 38 * optimisations. However depending on the complexity of the condition
36 * some compiler versions may not produce optimal results. 39 * some compiler versions may not produce optimal results.
37 */ 40 */
38 41
39#define BUG() do { \ 42#define BUG() do { \
40 __asm__ __volatile__( \ 43 __asm__ __volatile__( \
41 "1: twi 31,0,0\n" \ 44 "1: twi 31,0,0\n" \
42 ".section __bug_table,\"a\"\n" \ 45 _EMIT_BUG_ENTRY \
43 "\t"PPC_LONG" 1b,%0,%1,%2\n" \ 46 : : "i" (__FILE__), "i" (__LINE__), \
44 ".previous" \ 47 "i" (0), "i" (sizeof(struct bug_entry))); \
45 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \ 48 for(;;) ; \
46} while (0) 49} while (0)
47 50
48#define BUG_ON(x) do { \ 51#define BUG_ON(x) do { \
@@ -51,23 +54,21 @@ struct bug_entry *find_bug(unsigned long bugaddr);
51 BUG(); \ 54 BUG(); \
52 } else { \ 55 } else { \
53 __asm__ __volatile__( \ 56 __asm__ __volatile__( \
54 "1: "PPC_TLNEI" %0,0\n" \ 57 "1: "PPC_TLNEI" %4,0\n" \
55 ".section __bug_table,\"a\"\n" \ 58 _EMIT_BUG_ENTRY \
56 "\t"PPC_LONG" 1b,%1,%2,%3\n" \ 59 : : "i" (__FILE__), "i" (__LINE__), "i" (0), \
57 ".previous" \ 60 "i" (sizeof(struct bug_entry)), \
58 : : "r" ((long)(x)), "i" (__LINE__), \ 61 "r" ((long)(x))); \
59 "i" (__FILE__), "i" (__FUNCTION__)); \
60 } \ 62 } \
61} while (0) 63} while (0)
62 64
63#define __WARN() do { \ 65#define __WARN() do { \
64 __asm__ __volatile__( \ 66 __asm__ __volatile__( \
65 "1: twi 31,0,0\n" \ 67 "1: twi 31,0,0\n" \
66 ".section __bug_table,\"a\"\n" \ 68 _EMIT_BUG_ENTRY \
67 "\t"PPC_LONG" 1b,%0,%1,%2\n" \ 69 : : "i" (__FILE__), "i" (__LINE__), \
68 ".previous" \ 70 "i" (BUGFLAG_WARNING), \
69 : : "i" (__LINE__ + BUG_WARNING_TRAP), \ 71 "i" (sizeof(struct bug_entry))); \
70 "i" (__FILE__), "i" (__FUNCTION__)); \
71} while (0) 72} while (0)
72 73
73#define WARN_ON(x) ({ \ 74#define WARN_ON(x) ({ \
@@ -77,13 +78,12 @@ struct bug_entry *find_bug(unsigned long bugaddr);
77 __WARN(); \ 78 __WARN(); \
78 } else { \ 79 } else { \
79 __asm__ __volatile__( \ 80 __asm__ __volatile__( \
80 "1: "PPC_TLNEI" %0,0\n" \ 81 "1: "PPC_TLNEI" %4,0\n" \
81 ".section __bug_table,\"a\"\n" \ 82 _EMIT_BUG_ENTRY \
82 "\t"PPC_LONG" 1b,%1,%2,%3\n" \ 83 : : "i" (__FILE__), "i" (__LINE__), \
83 ".previous" \ 84 "i" (BUGFLAG_WARNING), \
84 : : "r" (__ret_warn_on), \ 85 "i" (sizeof(struct bug_entry)), \
85 "i" (__LINE__ + BUG_WARNING_TRAP), \ 86 "r" (__ret_warn_on)); \
86 "i" (__FILE__), "i" (__FUNCTION__)); \
87 } \ 87 } \
88 unlikely(__ret_warn_on); \ 88 unlikely(__ret_warn_on); \
89}) 89})
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 6fe5c9d4ca3b..7384b8086b75 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -126,6 +126,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
126#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) 126#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
127#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000) 127#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000)
128#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000) 128#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000)
129#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x0000000000800000)
129 130
130/* 131/*
131 * Add the 64-bit processor unique features in the top half of the word; 132 * Add the 64-bit processor unique features in the top half of the word;
@@ -152,6 +153,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
152#define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000) 153#define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000)
153#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000) 154#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000)
154#define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000) 155#define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000)
156#define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000)
155 157
156#ifndef __ASSEMBLY__ 158#ifndef __ASSEMBLY__
157 159
@@ -295,6 +297,9 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
295#define CPU_FTRS_E300 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \ 297#define CPU_FTRS_E300 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \
296 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \ 298 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
297 CPU_FTR_COMMON) 299 CPU_FTR_COMMON)
300#define CPU_FTRS_E300C2 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \
301 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
302 CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE)
298#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 303#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
299 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE) 304 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE)
300#define CPU_FTRS_8XX (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB) 305#define CPU_FTRS_8XX (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB)
@@ -330,13 +335,14 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
330 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 335 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
331 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 336 CPU_FTR_MMCRA | CPU_FTR_SMT | \
332 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 337 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
333 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE) 338 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
339 CPU_FTR_DSCR)
334#define CPU_FTRS_POWER6X (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ 340#define CPU_FTRS_POWER6X (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
335 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 341 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
336 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 342 CPU_FTR_MMCRA | CPU_FTR_SMT | \
337 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 343 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
338 CPU_FTR_PURR | CPU_FTR_CI_LARGE_PAGE | \ 344 CPU_FTR_PURR | CPU_FTR_CI_LARGE_PAGE | \
339 CPU_FTR_SPURR | CPU_FTR_REAL_LE) 345 CPU_FTR_SPURR | CPU_FTR_REAL_LE | CPU_FTR_DSCR)
340#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ 346#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
341 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 347 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
342 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 348 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -364,7 +370,8 @@ enum {
364 CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 | 370 CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 |
365 CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 | 371 CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 |
366 CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX | 372 CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX |
367 CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_CLASSIC32 | 373 CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 |
374 CPU_FTRS_CLASSIC32 |
368#else 375#else
369 CPU_FTRS_GENERIC_32 | 376 CPU_FTRS_GENERIC_32 |
370#endif 377#endif
@@ -403,7 +410,8 @@ enum {
403 CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 & 410 CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 &
404 CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 & 411 CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 &
405 CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX & 412 CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX &
406 CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_CLASSIC32 & 413 CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 &
414 CPU_FTRS_CLASSIC32 &
407#else 415#else
408 CPU_FTRS_GENERIC_32 & 416 CPU_FTRS_GENERIC_32 &
409#endif 417#endif
diff --git a/include/asm-powerpc/dcr-native.h b/include/asm-powerpc/dcr-native.h
index fd4a5f5e33d1..d7a1bc1551c6 100644
--- a/include/asm-powerpc/dcr-native.h
+++ b/include/asm-powerpc/dcr-native.h
@@ -20,8 +20,7 @@
20#ifndef _ASM_POWERPC_DCR_NATIVE_H 20#ifndef _ASM_POWERPC_DCR_NATIVE_H
21#define _ASM_POWERPC_DCR_NATIVE_H 21#define _ASM_POWERPC_DCR_NATIVE_H
22#ifdef __KERNEL__ 22#ifdef __KERNEL__
23 23#ifndef __ASSEMBLY__
24#include <asm/reg.h>
25 24
26typedef struct {} dcr_host_t; 25typedef struct {} dcr_host_t;
27 26
@@ -32,7 +31,41 @@ typedef struct {} dcr_host_t;
32#define dcr_read(host, dcr_n) mfdcr(dcr_n) 31#define dcr_read(host, dcr_n) mfdcr(dcr_n)
33#define dcr_write(host, dcr_n, value) mtdcr(dcr_n, value) 32#define dcr_write(host, dcr_n, value) mtdcr(dcr_n, value)
34 33
34/* Device Control Registers */
35void __mtdcr(int reg, unsigned int val);
36unsigned int __mfdcr(int reg);
37#define mfdcr(rn) \
38 ({unsigned int rval; \
39 if (__builtin_constant_p(rn)) \
40 asm volatile("mfdcr %0," __stringify(rn) \
41 : "=r" (rval)); \
42 else \
43 rval = __mfdcr(rn); \
44 rval;})
45
46#define mtdcr(rn, v) \
47do { \
48 if (__builtin_constant_p(rn)) \
49 asm volatile("mtdcr " __stringify(rn) ",%0" \
50 : : "r" (v)); \
51 else \
52 __mtdcr(rn, v); \
53} while (0)
54
55/* R/W of indirect DCRs make use of standard naming conventions for DCRs */
56#define mfdcri(base, reg) \
57({ \
58 mtdcr(base ## _CFGADDR, base ## _ ## reg); \
59 mfdcr(base ## _CFGDATA); \
60})
61
62#define mtdcri(base, reg, data) \
63do { \
64 mtdcr(base ## _CFGADDR, base ## _ ## reg); \
65 mtdcr(base ## _CFGDATA, data); \
66} while (0)
35 67
68#endif /* __ASSEMBLY__ */
36#endif /* __KERNEL__ */ 69#endif /* __KERNEL__ */
37#endif /* _ASM_POWERPC_DCR_NATIVE_H */ 70#endif /* _ASM_POWERPC_DCR_NATIVE_H */
38 71
diff --git a/include/asm-powerpc/dcr.h b/include/asm-powerpc/dcr.h
index 473f2c7fd892..b66c5e6941f0 100644
--- a/include/asm-powerpc/dcr.h
+++ b/include/asm-powerpc/dcr.h
@@ -20,6 +20,7 @@
20#ifndef _ASM_POWERPC_DCR_H 20#ifndef _ASM_POWERPC_DCR_H
21#define _ASM_POWERPC_DCR_H 21#define _ASM_POWERPC_DCR_H
22#ifdef __KERNEL__ 22#ifdef __KERNEL__
23#ifdef CONFIG_PPC_DCR
23 24
24#ifdef CONFIG_PPC_DCR_NATIVE 25#ifdef CONFIG_PPC_DCR_NATIVE
25#include <asm/dcr-native.h> 26#include <asm/dcr-native.h>
@@ -38,5 +39,6 @@ extern unsigned int dcr_resource_len(struct device_node *np,
38 unsigned int index); 39 unsigned int index);
39#endif /* CONFIG_PPC_MERGE */ 40#endif /* CONFIG_PPC_MERGE */
40 41
42#endif /* CONFIG_PPC_DCR */
41#endif /* __KERNEL__ */ 43#endif /* __KERNEL__ */
42#endif /* _ASM_POWERPC_DCR_H */ 44#endif /* _ASM_POWERPC_DCR_H */
diff --git a/include/asm-powerpc/hw_irq.h b/include/asm-powerpc/hw_irq.h
index d604863d72fb..9e4dd98eb220 100644
--- a/include/asm-powerpc/hw_irq.h
+++ b/include/asm-powerpc/hw_irq.h
@@ -107,25 +107,6 @@ static inline void local_irq_save_ptr(unsigned long *flags)
107 107
108#endif /* CONFIG_PPC64 */ 108#endif /* CONFIG_PPC64 */
109 109
110#define mask_irq(irq) \
111 ({ \
112 irq_desc_t *desc = get_irq_desc(irq); \
113 if (desc->chip && desc->chip->disable) \
114 desc->chip->disable(irq); \
115 })
116#define unmask_irq(irq) \
117 ({ \
118 irq_desc_t *desc = get_irq_desc(irq); \
119 if (desc->chip && desc->chip->enable) \
120 desc->chip->enable(irq); \
121 })
122#define ack_irq(irq) \
123 ({ \
124 irq_desc_t *desc = get_irq_desc(irq); \
125 if (desc->chip && desc->chip->ack) \
126 desc->chip->ack(irq); \
127 })
128
129/* 110/*
130 * interrupt-retrigger: should we handle this via lost interrupts and IPIs 111 * interrupt-retrigger: should we handle this via lost interrupts and IPIs
131 * or should we not care like we do now ? --BenH. 112 * or should we not care like we do now ? --BenH.
diff --git a/include/asm-powerpc/module.h b/include/asm-powerpc/module.h
index 584fabfb4f08..e5f14b13ccf0 100644
--- a/include/asm-powerpc/module.h
+++ b/include/asm-powerpc/module.h
@@ -46,8 +46,6 @@ struct mod_arch_specific {
46 unsigned int num_bugs; 46 unsigned int num_bugs;
47}; 47};
48 48
49extern struct bug_entry *module_find_bug(unsigned long bugaddr);
50
51/* 49/*
52 * Select ELF headers. 50 * Select ELF headers.
53 * Make empty section for module_frob_arch_sections to expand. 51 * Make empty section for module_frob_arch_sections to expand.
diff --git a/include/asm-powerpc/pci-bridge.h b/include/asm-powerpc/pci-bridge.h
index 7bb7f9009806..cb02c9d1ef93 100644
--- a/include/asm-powerpc/pci-bridge.h
+++ b/include/asm-powerpc/pci-bridge.h
@@ -31,12 +31,12 @@ struct pci_controller {
31 int last_busno; 31 int last_busno;
32 32
33 void __iomem *io_base_virt; 33 void __iomem *io_base_virt;
34 unsigned long io_base_phys; 34 resource_size_t io_base_phys;
35 35
36 /* Some machines have a non 1:1 mapping of 36 /* Some machines have a non 1:1 mapping of
37 * the PCI memory space in the CPU bus space 37 * the PCI memory space in the CPU bus space
38 */ 38 */
39 unsigned long pci_mem_offset; 39 resource_size_t pci_mem_offset;
40 unsigned long pci_io_size; 40 unsigned long pci_io_size;
41 41
42 struct pci_ops *ops; 42 struct pci_ops *ops;
diff --git a/include/asm-powerpc/pci.h b/include/asm-powerpc/pci.h
index 16f13319c769..ac656ee6bb19 100644
--- a/include/asm-powerpc/pci.h
+++ b/include/asm-powerpc/pci.h
@@ -143,8 +143,13 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
143/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ 143/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
144#define HAVE_PCI_MMAP 1 144#define HAVE_PCI_MMAP 1
145 145
146#ifdef CONFIG_PPC64 146#if defined(CONFIG_PPC64) || defined(CONFIG_NOT_COHERENT_CACHE)
147/* pci_unmap_{single,page} is not a nop, thus... */ 147/*
148 * For 64-bit kernels, pci_unmap_{single,page} is not a nop.
149 * For 32-bit non-coherent kernels, pci_dma_sync_single_for_cpu() and
150 * so on are not nops.
151 * and thus...
152 */
148#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 153#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
149 dma_addr_t ADDR_NAME; 154 dma_addr_t ADDR_NAME;
150#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ 155#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
@@ -158,6 +163,20 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
158#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 163#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
159 (((PTR)->LEN_NAME) = (VAL)) 164 (((PTR)->LEN_NAME) = (VAL))
160 165
166#else /* 32-bit && coherent */
167
168/* pci_unmap_{page,single} is a nop so... */
169#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
170#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
171#define pci_unmap_addr(PTR, ADDR_NAME) (0)
172#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
173#define pci_unmap_len(PTR, LEN_NAME) (0)
174#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
175
176#endif /* CONFIG_PPC64 || CONFIG_NOT_COHERENT_CACHE */
177
178#ifdef CONFIG_PPC64
179
161/* The PCI address space does not equal the physical memory address 180/* The PCI address space does not equal the physical memory address
162 * space (we have an IOMMU). The IDE and SCSI device layers use 181 * space (we have an IOMMU). The IDE and SCSI device layers use
163 * this boolean for bounce buffer decisions. 182 * this boolean for bounce buffer decisions.
@@ -172,16 +191,8 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
172 */ 191 */
173#define PCI_DMA_BUS_IS_PHYS (1) 192#define PCI_DMA_BUS_IS_PHYS (1)
174 193
175/* pci_unmap_{page,single} is a nop so... */
176#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
177#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
178#define pci_unmap_addr(PTR, ADDR_NAME) (0)
179#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
180#define pci_unmap_len(PTR, LEN_NAME) (0)
181#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
182
183#endif /* CONFIG_PPC64 */ 194#endif /* CONFIG_PPC64 */
184 195
185extern void pcibios_resource_to_bus(struct pci_dev *dev, 196extern void pcibios_resource_to_bus(struct pci_dev *dev,
186 struct pci_bus_region *region, 197 struct pci_bus_region *region,
187 struct resource *res); 198 struct resource *res);
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h
index 6faae7b14d55..a3631b15754c 100644
--- a/include/asm-powerpc/reg.h
+++ b/include/asm-powerpc/reg.h
@@ -143,6 +143,7 @@
143 143
144/* Special Purpose Registers (SPRNs)*/ 144/* Special Purpose Registers (SPRNs)*/
145#define SPRN_CTR 0x009 /* Count Register */ 145#define SPRN_CTR 0x009 /* Count Register */
146#define SPRN_DSCR 0x11
146#define SPRN_CTRLF 0x088 147#define SPRN_CTRLF 0x088
147#define SPRN_CTRLT 0x098 148#define SPRN_CTRLT 0x098
148#define CTRL_CT 0xc0000000 /* current thread */ 149#define CTRL_CT 0xc0000000 /* current thread */
@@ -163,6 +164,7 @@
163#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ 164#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
164#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ 165#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
165#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ 166#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
167#define SPRN_SPURR 0x134 /* Scaled PURR */
166#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */ 168#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
167#define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */ 169#define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */
168#define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */ 170#define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */
diff --git a/include/asm-powerpc/rtas.h b/include/asm-powerpc/rtas.h
index 5a0c136c0416..8eaa7b28d9d0 100644
--- a/include/asm-powerpc/rtas.h
+++ b/include/asm-powerpc/rtas.h
@@ -159,6 +159,7 @@ extern struct rtas_t rtas;
159 159
160extern void enter_rtas(unsigned long); 160extern void enter_rtas(unsigned long);
161extern int rtas_token(const char *service); 161extern int rtas_token(const char *service);
162extern int rtas_service_present(const char *service);
162extern int rtas_call(int token, int, int, int *, ...); 163extern int rtas_call(int token, int, int, int *, ...);
163extern void rtas_restart(char *cmd); 164extern void rtas_restart(char *cmd);
164extern void rtas_power_off(void); 165extern void rtas_power_off(void);
@@ -221,8 +222,6 @@ extern int rtas_get_error_log_max(void);
221extern spinlock_t rtas_data_buf_lock; 222extern spinlock_t rtas_data_buf_lock;
222extern char rtas_data_buf[RTAS_DATA_BUF_SIZE]; 223extern char rtas_data_buf[RTAS_DATA_BUF_SIZE];
223 224
224extern void rtas_stop_self(void);
225
226/* RMO buffer reserved for user-space RTAS use */ 225/* RMO buffer reserved for user-space RTAS use */
227extern unsigned long rtas_rmo_buf; 226extern unsigned long rtas_rmo_buf;
228 227
diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h
index d339e2e88b11..3f32ca8bfec9 100644
--- a/include/asm-powerpc/thread_info.h
+++ b/include/asm-powerpc/thread_info.h
@@ -122,6 +122,7 @@ static inline struct thread_info *current_thread_info(void)
122#define TIF_RESTOREALL 12 /* Restore all regs (implies NOERROR) */ 122#define TIF_RESTOREALL 12 /* Restore all regs (implies NOERROR) */
123#define TIF_NOERROR 14 /* Force successful syscall return */ 123#define TIF_NOERROR 14 /* Force successful syscall return */
124#define TIF_RESTORE_SIGMASK 15 /* Restore signal mask in do_signal */ 124#define TIF_RESTORE_SIGMASK 15 /* Restore signal mask in do_signal */
125#define TIF_FREEZE 16 /* Freezing for suspend */
125 126
126/* as above, but as bit values */ 127/* as above, but as bit values */
127#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 128#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -138,6 +139,7 @@ static inline struct thread_info *current_thread_info(void)
138#define _TIF_RESTOREALL (1<<TIF_RESTOREALL) 139#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
139#define _TIF_NOERROR (1<<TIF_NOERROR) 140#define _TIF_NOERROR (1<<TIF_NOERROR)
140#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 141#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
142#define _TIF_FREEZE (1<<TIF_FREEZE)
141#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP) 143#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
142 144
143#define _TIF_USER_WORK_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \ 145#define _TIF_USER_WORK_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index 50c014007de7..6610495f5f16 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -66,6 +66,7 @@ static inline int pcibus_to_node(struct pci_bus *bus)
66 | SD_BALANCE_EXEC \ 66 | SD_BALANCE_EXEC \
67 | SD_BALANCE_NEWIDLE \ 67 | SD_BALANCE_NEWIDLE \
68 | SD_WAKE_IDLE \ 68 | SD_WAKE_IDLE \
69 | SD_SERIALIZE \
69 | SD_WAKE_BALANCE, \ 70 | SD_WAKE_BALANCE, \
70 .last_balance = jiffies, \ 71 .last_balance = jiffies, \
71 .balance_interval = 1, \ 72 .balance_interval = 1, \
diff --git a/include/asm-ppc/pci-bridge.h b/include/asm-ppc/pci-bridge.h
index 6c955d0c1ef0..4d35b844bc58 100644
--- a/include/asm-ppc/pci-bridge.h
+++ b/include/asm-ppc/pci-bridge.h
@@ -20,8 +20,8 @@ extern unsigned long pci_bus_mem_base_phys(unsigned int bus);
20extern struct pci_controller* pcibios_alloc_controller(void); 20extern struct pci_controller* pcibios_alloc_controller(void);
21 21
22/* Helper function for setting up resources */ 22/* Helper function for setting up resources */
23extern void pci_init_resource(struct resource *res, unsigned long start, 23extern void pci_init_resource(struct resource *res, resource_size_t start,
24 unsigned long end, int flags, char *name); 24 resource_size_t end, int flags, char *name);
25 25
26/* Get the PCI host controller for a bus */ 26/* Get the PCI host controller for a bus */
27extern struct pci_controller* pci_bus_to_hose(int bus); 27extern struct pci_controller* pci_bus_to_hose(int bus);
@@ -50,12 +50,12 @@ struct pci_controller {
50 int bus_offset; 50 int bus_offset;
51 51
52 void __iomem *io_base_virt; 52 void __iomem *io_base_virt;
53 unsigned long io_base_phys; 53 resource_size_t io_base_phys;
54 54
55 /* Some machines (PReP) have a non 1:1 mapping of 55 /* Some machines (PReP) have a non 1:1 mapping of
56 * the PCI memory space in the CPU bus space 56 * the PCI memory space in the CPU bus space
57 */ 57 */
58 unsigned long pci_mem_offset; 58 resource_size_t pci_mem_offset;
59 59
60 struct pci_ops *ops; 60 struct pci_ops *ops;
61 volatile unsigned int __iomem *cfg_addr; 61 volatile unsigned int __iomem *cfg_addr;
diff --git a/include/asm-ppc/pci.h b/include/asm-ppc/pci.h
index 11ffaaa5da16..9d162028dab9 100644
--- a/include/asm-ppc/pci.h
+++ b/include/asm-ppc/pci.h
@@ -61,6 +61,27 @@ extern unsigned long pci_bus_to_phys(unsigned int ba, int busnr);
61 */ 61 */
62#define PCI_DMA_BUS_IS_PHYS (1) 62#define PCI_DMA_BUS_IS_PHYS (1)
63 63
64#ifdef CONFIG_NOT_COHERENT_CACHE
65/*
66 * pci_unmap_{page,single} are NOPs but pci_dma_sync_single_for_cpu()
67 * and so on are not, so...
68 */
69
70#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
71 dma_addr_t ADDR_NAME;
72#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
73 __u32 LEN_NAME;
74#define pci_unmap_addr(PTR, ADDR_NAME) \
75 ((PTR)->ADDR_NAME)
76#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
77 (((PTR)->ADDR_NAME) = (VAL))
78#define pci_unmap_len(PTR, LEN_NAME) \
79 ((PTR)->LEN_NAME)
80#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
81 (((PTR)->LEN_NAME) = (VAL))
82
83#else /* coherent */
84
64/* pci_unmap_{page,single} is a nop so... */ 85/* pci_unmap_{page,single} is a nop so... */
65#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) 86#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
66#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) 87#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
@@ -69,6 +90,8 @@ extern unsigned long pci_bus_to_phys(unsigned int ba, int busnr);
69#define pci_unmap_len(PTR, LEN_NAME) (0) 90#define pci_unmap_len(PTR, LEN_NAME) (0)
70#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) 91#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
71 92
93#endif /* CONFIG_NOT_COHERENT_CACHE */
94
72#ifdef CONFIG_PCI 95#ifdef CONFIG_PCI
73static inline void pci_dma_burst_advice(struct pci_dev *pdev, 96static inline void pci_dma_burst_advice(struct pci_dev *pdev,
74 enum pci_dma_burst_strategy *strat, 97 enum pci_dma_burst_strategy *strat,
diff --git a/include/asm-ppc/reg_booke.h b/include/asm-ppc/reg_booke.h
index 602fbadeaf48..a263fc1e65c4 100644
--- a/include/asm-ppc/reg_booke.h
+++ b/include/asm-ppc/reg_booke.h
@@ -9,41 +9,9 @@
9#ifndef __ASM_PPC_REG_BOOKE_H__ 9#ifndef __ASM_PPC_REG_BOOKE_H__
10#define __ASM_PPC_REG_BOOKE_H__ 10#define __ASM_PPC_REG_BOOKE_H__
11 11
12#ifndef __ASSEMBLY__ 12#include <asm/dcr.h>
13/* Device Control Registers */
14void __mtdcr(int reg, unsigned int val);
15unsigned int __mfdcr(int reg);
16#define mfdcr(rn) \
17 ({unsigned int rval; \
18 if (__builtin_constant_p(rn)) \
19 asm volatile("mfdcr %0," __stringify(rn) \
20 : "=r" (rval)); \
21 else \
22 rval = __mfdcr(rn); \
23 rval;})
24
25#define mtdcr(rn, v) \
26do { \
27 if (__builtin_constant_p(rn)) \
28 asm volatile("mtdcr " __stringify(rn) ",%0" \
29 : : "r" (v)); \
30 else \
31 __mtdcr(rn, v); \
32} while (0)
33
34/* R/W of indirect DCRs make use of standard naming conventions for DCRs */
35#define mfdcri(base, reg) \
36({ \
37 mtdcr(base ## _CFGADDR, base ## _ ## reg); \
38 mfdcr(base ## _CFGDATA); \
39})
40
41#define mtdcri(base, reg, data) \
42do { \
43 mtdcr(base ## _CFGADDR, base ## _ ## reg); \
44 mtdcr(base ## _CFGDATA, data); \
45} while (0)
46 13
14#ifndef __ASSEMBLY__
47/* Performance Monitor Registers */ 15/* Performance Monitor Registers */
48#define mfpmr(rn) ({unsigned int rval; \ 16#define mfpmr(rn) ({unsigned int rval; \
49 asm volatile("mfpmr %0," __stringify(rn) \ 17 asm volatile("mfpmr %0," __stringify(rn) \
diff --git a/include/asm-sh/atomic-irq.h b/include/asm-sh/atomic-irq.h
new file mode 100644
index 000000000000..74f7943cff6f
--- /dev/null
+++ b/include/asm-sh/atomic-irq.h
@@ -0,0 +1,71 @@
1#ifndef __ASM_SH_ATOMIC_IRQ_H
2#define __ASM_SH_ATOMIC_IRQ_H
3
4/*
5 * To get proper branch prediction for the main line, we must branch
6 * forward to code at the end of this object's .text section, then
7 * branch back to restart the operation.
8 */
9static inline void atomic_add(int i, atomic_t *v)
10{
11 unsigned long flags;
12
13 local_irq_save(flags);
14 *(long *)v += i;
15 local_irq_restore(flags);
16}
17
18static inline void atomic_sub(int i, atomic_t *v)
19{
20 unsigned long flags;
21
22 local_irq_save(flags);
23 *(long *)v -= i;
24 local_irq_restore(flags);
25}
26
27static inline int atomic_add_return(int i, atomic_t *v)
28{
29 unsigned long temp, flags;
30
31 local_irq_save(flags);
32 temp = *(long *)v;
33 temp += i;
34 *(long *)v = temp;
35 local_irq_restore(flags);
36
37 return temp;
38}
39
40static inline int atomic_sub_return(int i, atomic_t *v)
41{
42 unsigned long temp, flags;
43
44 local_irq_save(flags);
45 temp = *(long *)v;
46 temp -= i;
47 *(long *)v = temp;
48 local_irq_restore(flags);
49
50 return temp;
51}
52
53static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
54{
55 unsigned long flags;
56
57 local_irq_save(flags);
58 *(long *)v &= ~mask;
59 local_irq_restore(flags);
60}
61
62static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
63{
64 unsigned long flags;
65
66 local_irq_save(flags);
67 *(long *)v |= mask;
68 local_irq_restore(flags);
69}
70
71#endif /* __ASM_SH_ATOMIC_IRQ_H */
diff --git a/include/asm-sh/atomic-llsc.h b/include/asm-sh/atomic-llsc.h
new file mode 100644
index 000000000000..4b00b78e3f4f
--- /dev/null
+++ b/include/asm-sh/atomic-llsc.h
@@ -0,0 +1,107 @@
1#ifndef __ASM_SH_ATOMIC_LLSC_H
2#define __ASM_SH_ATOMIC_LLSC_H
3
4/*
5 * To get proper branch prediction for the main line, we must branch
6 * forward to code at the end of this object's .text section, then
7 * branch back to restart the operation.
8 */
9static inline void atomic_add(int i, atomic_t *v)
10{
11 unsigned long tmp;
12
13 __asm__ __volatile__ (
14"1: movli.l @%2, %0 ! atomic_add \n"
15" add %1, %0 \n"
16" movco.l %0, @%2 \n"
17" bf 1b \n"
18 : "=&z" (tmp)
19 : "r" (i), "r" (&v->counter)
20 : "t");
21}
22
23static inline void atomic_sub(int i, atomic_t *v)
24{
25 unsigned long tmp;
26
27 __asm__ __volatile__ (
28"1: movli.l @%2, %0 ! atomic_sub \n"
29" sub %1, %0 \n"
30" movco.l %0, @%2 \n"
31" bf 1b \n"
32 : "=&z" (tmp)
33 : "r" (i), "r" (&v->counter)
34 : "t");
35}
36
37/*
38 * SH-4A note:
39 *
40 * We basically get atomic_xxx_return() for free compared with
41 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
42 * encoding, so the retval is automatically set without having to
43 * do any special work.
44 */
45static inline int atomic_add_return(int i, atomic_t *v)
46{
47 unsigned long temp;
48
49 __asm__ __volatile__ (
50"1: movli.l @%2, %0 ! atomic_add_return \n"
51" add %1, %0 \n"
52" movco.l %0, @%2 \n"
53" bf 1b \n"
54" synco \n"
55 : "=&z" (temp)
56 : "r" (i), "r" (&v->counter)
57 : "t");
58
59 return temp;
60}
61
62static inline int atomic_sub_return(int i, atomic_t *v)
63{
64 unsigned long temp;
65
66 __asm__ __volatile__ (
67"1: movli.l @%2, %0 ! atomic_sub_return \n"
68" sub %1, %0 \n"
69" movco.l %0, @%2 \n"
70" bf 1b \n"
71" synco \n"
72 : "=&z" (temp)
73 : "r" (i), "r" (&v->counter)
74 : "t");
75
76 return temp;
77}
78
79static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
80{
81 unsigned long tmp;
82
83 __asm__ __volatile__ (
84"1: movli.l @%2, %0 ! atomic_clear_mask \n"
85" and %1, %0 \n"
86" movco.l %0, @%2 \n"
87" bf 1b \n"
88 : "=&z" (tmp)
89 : "r" (~mask), "r" (&v->counter)
90 : "t");
91}
92
93static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
94{
95 unsigned long tmp;
96
97 __asm__ __volatile__ (
98"1: movli.l @%2, %0 ! atomic_set_mask \n"
99" or %1, %0 \n"
100" movco.l %0, @%2 \n"
101" bf 1b \n"
102 : "=&z" (tmp)
103 : "r" (mask), "r" (&v->counter)
104 : "t");
105}
106
107#endif /* __ASM_SH_ATOMIC_LLSC_H */
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 28305c3cbddf..e12570b9339d 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -17,119 +17,14 @@ typedef struct { volatile int counter; } atomic_t;
17#include <linux/compiler.h> 17#include <linux/compiler.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20/*
21 * To get proper branch prediction for the main line, we must branch
22 * forward to code at the end of this object's .text section, then
23 * branch back to restart the operation.
24 */
25static inline void atomic_add(int i, atomic_t *v)
26{
27#ifdef CONFIG_CPU_SH4A 20#ifdef CONFIG_CPU_SH4A
28 unsigned long tmp; 21#include <asm/atomic-llsc.h>
29
30 __asm__ __volatile__ (
31"1: movli.l @%2, %0 ! atomic_add \n"
32" add %1, %0 \n"
33" movco.l %0, @%2 \n"
34" bf 1b \n"
35 : "=&z" (tmp)
36 : "r" (i), "r" (&v->counter)
37 : "t");
38#else 22#else
39 unsigned long flags; 23#include <asm/atomic-irq.h>
40
41 local_irq_save(flags);
42 *(long *)v += i;
43 local_irq_restore(flags);
44#endif
45}
46
47static inline void atomic_sub(int i, atomic_t *v)
48{
49#ifdef CONFIG_CPU_SH4A
50 unsigned long tmp;
51
52 __asm__ __volatile__ (
53"1: movli.l @%2, %0 ! atomic_sub \n"
54" sub %1, %0 \n"
55" movco.l %0, @%2 \n"
56" bf 1b \n"
57 : "=&z" (tmp)
58 : "r" (i), "r" (&v->counter)
59 : "t");
60#else
61 unsigned long flags;
62
63 local_irq_save(flags);
64 *(long *)v -= i;
65 local_irq_restore(flags);
66#endif 24#endif
67}
68
69/*
70 * SH-4A note:
71 *
72 * We basically get atomic_xxx_return() for free compared with
73 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
74 * encoding, so the retval is automatically set without having to
75 * do any special work.
76 */
77static inline int atomic_add_return(int i, atomic_t *v)
78{
79 unsigned long temp;
80
81#ifdef CONFIG_CPU_SH4A
82 __asm__ __volatile__ (
83"1: movli.l @%2, %0 ! atomic_add_return \n"
84" add %1, %0 \n"
85" movco.l %0, @%2 \n"
86" bf 1b \n"
87" synco \n"
88 : "=&z" (temp)
89 : "r" (i), "r" (&v->counter)
90 : "t");
91#else
92 unsigned long flags;
93
94 local_irq_save(flags);
95 temp = *(long *)v;
96 temp += i;
97 *(long *)v = temp;
98 local_irq_restore(flags);
99#endif
100
101 return temp;
102}
103 25
104#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 26#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
105 27
106static inline int atomic_sub_return(int i, atomic_t *v)
107{
108 unsigned long temp;
109
110#ifdef CONFIG_CPU_SH4A
111 __asm__ __volatile__ (
112"1: movli.l @%2, %0 ! atomic_sub_return \n"
113" sub %1, %0 \n"
114" movco.l %0, @%2 \n"
115" bf 1b \n"
116" synco \n"
117 : "=&z" (temp)
118 : "r" (i), "r" (&v->counter)
119 : "t");
120#else
121 unsigned long flags;
122
123 local_irq_save(flags);
124 temp = *(long *)v;
125 temp -= i;
126 *(long *)v = temp;
127 local_irq_restore(flags);
128#endif
129
130 return temp;
131}
132
133#define atomic_dec_return(v) atomic_sub_return(1,(v)) 28#define atomic_dec_return(v) atomic_sub_return(1,(v))
134#define atomic_inc_return(v) atomic_add_return(1,(v)) 29#define atomic_inc_return(v) atomic_add_return(1,(v))
135 30
@@ -180,50 +75,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
180} 75}
181#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 76#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
182 77
183static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
184{
185#ifdef CONFIG_CPU_SH4A
186 unsigned long tmp;
187
188 __asm__ __volatile__ (
189"1: movli.l @%2, %0 ! atomic_clear_mask \n"
190" and %1, %0 \n"
191" movco.l %0, @%2 \n"
192" bf 1b \n"
193 : "=&z" (tmp)
194 : "r" (~mask), "r" (&v->counter)
195 : "t");
196#else
197 unsigned long flags;
198
199 local_irq_save(flags);
200 *(long *)v &= ~mask;
201 local_irq_restore(flags);
202#endif
203}
204
205static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
206{
207#ifdef CONFIG_CPU_SH4A
208 unsigned long tmp;
209
210 __asm__ __volatile__ (
211"1: movli.l @%2, %0 ! atomic_set_mask \n"
212" or %1, %0 \n"
213" movco.l %0, @%2 \n"
214" bf 1b \n"
215 : "=&z" (tmp)
216 : "r" (mask), "r" (&v->counter)
217 : "t");
218#else
219 unsigned long flags;
220
221 local_irq_save(flags);
222 *(long *)v |= mask;
223 local_irq_restore(flags);
224#endif
225}
226
227/* Atomic operations are already serializing on SH */ 78/* Atomic operations are already serializing on SH */
228#define smp_mb__before_atomic_dec() barrier() 79#define smp_mb__before_atomic_dec() barrier()
229#define smp_mb__after_atomic_dec() barrier() 80#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-sh/bug.h b/include/asm-sh/bug.h
index 1b4fc52a59e8..2f89dd06d0cd 100644
--- a/include/asm-sh/bug.h
+++ b/include/asm-sh/bug.h
@@ -1,19 +1,54 @@
1#ifndef __ASM_SH_BUG_H 1#ifndef __ASM_SH_BUG_H
2#define __ASM_SH_BUG_H 2#define __ASM_SH_BUG_H
3 3
4
5#ifdef CONFIG_BUG 4#ifdef CONFIG_BUG
6/* 5
7 * Tell the user there is some problem. 6struct bug_frame {
8 */ 7 unsigned short opcode;
9#define BUG() do { \ 8 unsigned short line;
10 printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ 9 const char *file;
11 *(volatile int *)0 = 0; \ 10 const char *func;
11};
12
13struct pt_regs;
14
15extern void handle_BUG(struct pt_regs *);
16
17#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */
18
19#ifdef CONFIG_DEBUG_BUGVERBOSE
20
21#define BUG() \
22do { \
23 __asm__ __volatile__ ( \
24 ".align 2\n\t" \
25 ".short %O0\n\t" \
26 ".short %O1\n\t" \
27 ".long %O2\n\t" \
28 ".long %O3\n\t" \
29 : \
30 : "n" (TRAPA_BUG_OPCODE), \
31 "i" (__LINE__), "X" (__FILE__), \
32 "X" (__FUNCTION__)); \
33} while (0)
34
35#else
36
37#define BUG() \
38do { \
39 __asm__ __volatile__ ( \
40 ".align 2\n\t" \
41 ".short %O0\n\t" \
42 : \
43 : "n" (TRAPA_BUG_OPCODE)); \
12} while (0) 44} while (0)
13 45
46#endif /* CONFIG_DEBUG_BUGVERBOSE */
47
14#define HAVE_ARCH_BUG 48#define HAVE_ARCH_BUG
15#endif 49
50#endif /* CONFIG_BUG */
16 51
17#include <asm-generic/bug.h> 52#include <asm-generic/bug.h>
18 53
19#endif 54#endif /* __ASM_SH_BUG_H */
diff --git a/include/asm-sh/bugs.h b/include/asm-sh/bugs.h
index 795047da5e17..a294997a8412 100644
--- a/include/asm-sh/bugs.h
+++ b/include/asm-sh/bugs.h
@@ -16,9 +16,8 @@
16 16
17static void __init check_bugs(void) 17static void __init check_bugs(void)
18{ 18{
19 extern char *get_cpu_subtype(void);
20 extern unsigned long loops_per_jiffy; 19 extern unsigned long loops_per_jiffy;
21 char *p= &init_utsname()->machine[2]; /* "sh" */ 20 char *p = &init_utsname()->machine[2]; /* "sh" */
22 21
23 cpu_data->loops_per_jiffy = loops_per_jiffy; 22 cpu_data->loops_per_jiffy = loops_per_jiffy;
24 23
@@ -40,6 +39,15 @@ static void __init check_bugs(void)
40 *p++ = '4'; 39 *p++ = '4';
41 *p++ = 'a'; 40 *p++ = 'a';
42 break; 41 break;
42 case CPU_SH73180 ... CPU_SH7722:
43 *p++ = '4';
44 *p++ = 'a';
45 *p++ = 'l';
46 *p++ = '-';
47 *p++ = 'd';
48 *p++ = 's';
49 *p++ = 'p';
50 break;
43 default: 51 default:
44 *p++ = '?'; 52 *p++ = '?';
45 *p++ = '!'; 53 *p++ = '!';
diff --git a/include/asm-sh/checksum.h b/include/asm-sh/checksum.h
index d44344c88e73..4bc8357e8892 100644
--- a/include/asm-sh/checksum.h
+++ b/include/asm-sh/checksum.h
@@ -34,25 +34,26 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
34 */ 34 */
35 35
36asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, 36asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
37 int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr); 37 int len, __wsum sum,
38 int *src_err_ptr, int *dst_err_ptr);
38 39
39/* 40/*
40 * Note: when you get a NULL pointer exception here this means someone 41 * Note: when you get a NULL pointer exception here this means someone
41 * passed in an incorrect kernel address to one of these functions. 42 * passed in an incorrect kernel address to one of these functions.
42 * 43 *
43 * If you use these functions directly please don't forget the 44 * If you use these functions directly please don't forget the
44 * access_ok(). 45 * access_ok().
45 */ 46 */
46static __inline__ 47static inline
47__wsum csum_partial_copy_nocheck(const void *src, void *dst, 48__wsum csum_partial_copy_nocheck(const void *src, void *dst,
48 int len, __wsum sum) 49 int len, __wsum sum)
49{ 50{
50 return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); 51 return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
51} 52}
52 53
53static __inline__ 54static inline
54__wsum csum_partial_copy_from_user(const void __user *src, void *dst, 55__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
55 int len, __wsum sum, int *err_ptr) 56 int len, __wsum sum, int *err_ptr)
56{ 57{
57 return csum_partial_copy_generic((__force const void *)src, dst, 58 return csum_partial_copy_generic((__force const void *)src, dst,
58 len, sum, err_ptr, NULL); 59 len, sum, err_ptr, NULL);
@@ -62,7 +63,7 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
62 * Fold a partial checksum 63 * Fold a partial checksum
63 */ 64 */
64 65
65static __inline__ __sum16 csum_fold(__wsum sum) 66static inline __sum16 csum_fold(__wsum sum)
66{ 67{
67 unsigned int __dummy; 68 unsigned int __dummy;
68 __asm__("swap.w %0, %1\n\t" 69 __asm__("swap.w %0, %1\n\t"
@@ -85,7 +86,7 @@ static __inline__ __sum16 csum_fold(__wsum sum)
85 * i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted 86 * i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted
86 * for linux by * Arnt Gulbrandsen. 87 * for linux by * Arnt Gulbrandsen.
87 */ 88 */
88static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 89static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
89{ 90{
90 unsigned int sum, __dummy0, __dummy1; 91 unsigned int sum, __dummy0, __dummy1;
91 92
@@ -113,10 +114,10 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
113 return csum_fold(sum); 114 return csum_fold(sum);
114} 115}
115 116
116static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 117static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
117 unsigned short len, 118 unsigned short len,
118 unsigned short proto, 119 unsigned short proto,
119 __wsum sum) 120 __wsum sum)
120{ 121{
121#ifdef __LITTLE_ENDIAN__ 122#ifdef __LITTLE_ENDIAN__
122 unsigned long len_proto = (proto + len) << 8; 123 unsigned long len_proto = (proto + len) << 8;
@@ -132,6 +133,7 @@ static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
132 : "=r" (sum), "=r" (len_proto) 133 : "=r" (sum), "=r" (len_proto)
133 : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum) 134 : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum)
134 : "t"); 135 : "t");
136
135 return sum; 137 return sum;
136} 138}
137 139
@@ -139,30 +141,28 @@ static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
139 * computes the checksum of the TCP/UDP pseudo-header 141 * computes the checksum of the TCP/UDP pseudo-header
140 * returns a 16-bit checksum, already complemented 142 * returns a 16-bit checksum, already complemented
141 */ 143 */
142static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, 144static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
143 unsigned short len, 145 unsigned short len,
144 unsigned short proto, 146 unsigned short proto,
145 __wsum sum) 147 __wsum sum)
146{ 148{
147 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 149 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
148} 150}
149 151
150/* 152/*
151 * this routine is used for miscellaneous IP-like checksums, mainly 153 * this routine is used for miscellaneous IP-like checksums, mainly
152 * in icmp.c 154 * in icmp.c
153 */ 155 */
154 156static inline __sum16 ip_compute_csum(const void *buff, int len)
155static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
156{ 157{
157 return csum_fold (csum_partial(buff, len, 0)); 158 return csum_fold(csum_partial(buff, len, 0));
158} 159}
159 160
160#define _HAVE_ARCH_IPV6_CSUM 161#define _HAVE_ARCH_IPV6_CSUM
161#ifdef CONFIG_IPV6 162static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
162static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, 163 const struct in6_addr *daddr,
163 const struct in6_addr *daddr, 164 __u32 len, unsigned short proto,
164 __u32 len, unsigned short proto, 165 __wsum sum)
165 __wsum sum)
166{ 166{
167 unsigned int __dummy; 167 unsigned int __dummy;
168 __asm__("clrt\n\t" 168 __asm__("clrt\n\t"
@@ -187,22 +187,21 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
187 "movt %1\n\t" 187 "movt %1\n\t"
188 "add %1, %0\n" 188 "add %1, %0\n"
189 : "=r" (sum), "=&r" (__dummy) 189 : "=r" (sum), "=&r" (__dummy)
190 : "r" (saddr), "r" (daddr), 190 : "r" (saddr), "r" (daddr),
191 "r" (htonl(len)), "r" (htonl(proto)), "0" (sum) 191 "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
192 : "t"); 192 : "t");
193 193
194 return csum_fold(sum); 194 return csum_fold(sum);
195} 195}
196#endif
197 196
198/* 197/*
199 * Copy and checksum to user 198 * Copy and checksum to user
200 */ 199 */
201#define HAVE_CSUM_COPY_USER 200#define HAVE_CSUM_COPY_USER
202static __inline__ __wsum csum_and_copy_to_user (const void *src, 201static inline __wsum csum_and_copy_to_user(const void *src,
203 void __user *dst, 202 void __user *dst,
204 int len, __wsum sum, 203 int len, __wsum sum,
205 int *err_ptr) 204 int *err_ptr)
206{ 205{
207 if (access_ok(VERIFY_WRITE, dst, len)) 206 if (access_ok(VERIFY_WRITE, dst, len))
208 return csum_partial_copy_generic((__force const void *)src, 207 return csum_partial_copy_generic((__force const void *)src,
diff --git a/include/asm-sh/cpu-sh4/cache.h b/include/asm-sh/cpu-sh4/cache.h
index 6e9c7e6ee8e4..f92b20a0983d 100644
--- a/include/asm-sh/cpu-sh4/cache.h
+++ b/include/asm-sh/cpu-sh4/cache.h
@@ -22,7 +22,7 @@
22#define CCR_CACHE_ICE 0x0100 /* Instruction Cache Enable */ 22#define CCR_CACHE_ICE 0x0100 /* Instruction Cache Enable */
23#define CCR_CACHE_ICI 0x0800 /* IC Invalidate */ 23#define CCR_CACHE_ICI 0x0800 /* IC Invalidate */
24#define CCR_CACHE_IIX 0x8000 /* IC Index Enable */ 24#define CCR_CACHE_IIX 0x8000 /* IC Index Enable */
25#ifndef CONFIG_CPU_SUBTYPE_SH7780 25#ifndef CONFIG_CPU_SH4A
26#define CCR_CACHE_EMODE 0x80000000 /* EMODE Enable */ 26#define CCR_CACHE_EMODE 0x80000000 /* EMODE Enable */
27#endif 27#endif
28 28
diff --git a/include/asm-sh/cpu-sh4/freq.h b/include/asm-sh/cpu-sh4/freq.h
index ef2b9b1ae41f..602d061ca2dc 100644
--- a/include/asm-sh/cpu-sh4/freq.h
+++ b/include/asm-sh/cpu-sh4/freq.h
@@ -10,7 +10,7 @@
10#ifndef __ASM_CPU_SH4_FREQ_H 10#ifndef __ASM_CPU_SH4_FREQ_H
11#define __ASM_CPU_SH4_FREQ_H 11#define __ASM_CPU_SH4_FREQ_H
12 12
13#if defined(CONFIG_CPU_SUBTYPE_SH73180) 13#if defined(CONFIG_CPU_SUBTYPE_SH73180) || defined(CONFIG_CPU_SUBTYPE_SH7722)
14#define FRQCR 0xa4150000 14#define FRQCR 0xa4150000
15#elif defined(CONFIG_CPU_SUBTYPE_SH7780) 15#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
16#define FRQCR 0xffc80000 16#define FRQCR 0xffc80000
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 37ab0c131a4d..8d0867b98e05 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -67,7 +67,7 @@ static inline dma_addr_t dma_map_single(struct device *dev,
67 if (dev->bus == &pci_bus_type) 67 if (dev->bus == &pci_bus_type)
68 return virt_to_bus(ptr); 68 return virt_to_bus(ptr);
69#endif 69#endif
70 dma_cache_sync(ptr, size, dir); 70 dma_cache_sync(dev, ptr, size, dir);
71 71
72 return virt_to_bus(ptr); 72 return virt_to_bus(ptr);
73} 73}
@@ -81,7 +81,7 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
81 81
82 for (i = 0; i < nents; i++) { 82 for (i = 0; i < nents; i++) {
83#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 83#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
84 dma_cache_sync(page_address(sg[i].page) + sg[i].offset, 84 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
85 sg[i].length, dir); 85 sg[i].length, dir);
86#endif 86#endif
87 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 87 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
@@ -112,7 +112,7 @@ static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
112 if (dev->bus == &pci_bus_type) 112 if (dev->bus == &pci_bus_type)
113 return; 113 return;
114#endif 114#endif
115 dma_cache_sync(bus_to_virt(dma_handle), size, dir); 115 dma_cache_sync(dev, bus_to_virt(dma_handle), size, dir);
116} 116}
117 117
118static inline void dma_sync_single_range(struct device *dev, 118static inline void dma_sync_single_range(struct device *dev,
@@ -124,7 +124,7 @@ static inline void dma_sync_single_range(struct device *dev,
124 if (dev->bus == &pci_bus_type) 124 if (dev->bus == &pci_bus_type)
125 return; 125 return;
126#endif 126#endif
127 dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir); 127 dma_cache_sync(dev, bus_to_virt(dma_handle) + offset, size, dir);
128} 128}
129 129
130static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, 130static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
@@ -134,7 +134,7 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
134 134
135 for (i = 0; i < nelems; i++) { 135 for (i = 0; i < nelems; i++) {
136#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 136#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
137 dma_cache_sync(page_address(sg[i].page) + sg[i].offset, 137 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
138 sg[i].length, dir); 138 sg[i].length, dir);
139#endif 139#endif
140 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 140 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
diff --git a/include/asm-sh/irq.h b/include/asm-sh/irq.h
index fd576088e47e..bff965ef4b95 100644
--- a/include/asm-sh/irq.h
+++ b/include/asm-sh/irq.h
@@ -37,7 +37,8 @@
37# define ONCHIP_NR_IRQS 144 37# define ONCHIP_NR_IRQS 144
38#elif defined(CONFIG_CPU_SUBTYPE_SH7300) || \ 38#elif defined(CONFIG_CPU_SUBTYPE_SH7300) || \
39 defined(CONFIG_CPU_SUBTYPE_SH73180) || \ 39 defined(CONFIG_CPU_SUBTYPE_SH73180) || \
40 defined(CONFIG_CPU_SUBTYPE_SH7343) 40 defined(CONFIG_CPU_SUBTYPE_SH7343) || \
41 defined(CONFIG_CPU_SUBTYPE_SH7722)
41# define ONCHIP_NR_IRQS 109 42# define ONCHIP_NR_IRQS 109
42#elif defined(CONFIG_CPU_SUBTYPE_SH7780) 43#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
43# define ONCHIP_NR_IRQS 111 44# define ONCHIP_NR_IRQS 111
@@ -79,6 +80,8 @@
79# define OFFCHIP_NR_IRQS 16 80# define OFFCHIP_NR_IRQS 16
80#elif defined(CONFIG_SH_7343_SOLUTION_ENGINE) 81#elif defined(CONFIG_SH_7343_SOLUTION_ENGINE)
81# define OFFCHIP_NR_IRQS 12 82# define OFFCHIP_NR_IRQS 12
83#elif defined(CONFIG_SH_7722_SOLUTION_ENGINE)
84# define OFFCHIP_NR_IRQS 14
82#elif defined(CONFIG_SH_UNKNOWN) 85#elif defined(CONFIG_SH_UNKNOWN)
83# define OFFCHIP_NR_IRQS 16 /* Must also be last */ 86# define OFFCHIP_NR_IRQS 16 /* Must also be last */
84#else 87#else
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index c84901dbd8e5..036ca2843866 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -508,16 +508,50 @@ struct vm_area_struct;
508extern void update_mmu_cache(struct vm_area_struct * vma, 508extern void update_mmu_cache(struct vm_area_struct * vma,
509 unsigned long address, pte_t pte); 509 unsigned long address, pte_t pte);
510 510
511/* Encode and de-code a swap entry */
512/* 511/*
512 * Encode and de-code a swap entry
513 *
514 * Constraints:
515 * _PAGE_FILE at bit 0
516 * _PAGE_PRESENT at bit 8
517 * _PAGE_PROTNONE at bit 9
518 *
519 * For the normal case, we encode the swap type into bits 0:7 and the
520 * swap offset into bits 10:30. For the 64-bit PTE case, we keep the
521 * preserved bits in the low 32-bits and use the upper 32 as the swap
522 * offset (along with a 5-bit type), following the same approach as x86
523 * PAE. This keeps the logic quite simple, and allows for a full 32
524 * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with
525 * in the pte_low case.
526 *
527 * As is evident by the Alpha code, if we ever get a 64-bit unsigned
528 * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes
529 * much cleaner..
530 *
513 * NOTE: We should set ZEROs at the position of _PAGE_PRESENT 531 * NOTE: We should set ZEROs at the position of _PAGE_PRESENT
514 * and _PAGE_PROTNONE bits 532 * and _PAGE_PROTNONE bits
515 */ 533 */
516#define __swp_type(x) ((x).val & 0xff) 534#ifdef CONFIG_X2TLB
517#define __swp_offset(x) ((x).val >> 10) 535#define __swp_type(x) ((x).val & 0x1f)
518#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 10) }) 536#define __swp_offset(x) ((x).val >> 5)
519#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 }) 537#define __swp_entry(type, offset) ((swp_entry_t){ (type) | (offset) << 5})
520#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 }) 538#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
539#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
540
541/*
542 * Encode and decode a nonlinear file mapping entry
543 */
544#define pte_to_pgoff(pte) ((pte).pte_high)
545#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
546
547#define PTE_FILE_MAX_BITS 32
548#else
549#define __swp_type(x) ((x).val & 0xff)
550#define __swp_offset(x) ((x).val >> 10)
551#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10})
552
553#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })
554#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })
521 555
522/* 556/*
523 * Encode and decode a nonlinear file mapping entry 557 * Encode and decode a nonlinear file mapping entry
@@ -525,6 +559,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
525#define PTE_FILE_MAX_BITS 29 559#define PTE_FILE_MAX_BITS 29
526#define pte_to_pgoff(pte) (pte_val(pte) >> 1) 560#define pte_to_pgoff(pte) (pte_val(pte) >> 1)
527#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE }) 561#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE })
562#endif
528 563
529typedef pte_t *pte_addr_t; 564typedef pte_t *pte_addr_t;
530 565
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index 6f1dd7ca1b1d..e29f2abb92de 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -27,6 +27,8 @@
27#define CCN_CVR 0xff000040 27#define CCN_CVR 0xff000040
28#define CCN_PRR 0xff000044 28#define CCN_PRR 0xff000044
29 29
30const char *get_cpu_subtype(void);
31
30/* 32/*
31 * CPU type and hardware bug flags. Kept separately for each CPU. 33 * CPU type and hardware bug flags. Kept separately for each CPU.
32 * 34 *
@@ -52,8 +54,10 @@ enum cpu_type {
52 CPU_SH7760, CPU_ST40RA, CPU_ST40GX1, CPU_SH4_202, CPU_SH4_501, 54 CPU_SH7760, CPU_ST40RA, CPU_ST40GX1, CPU_SH4_202, CPU_SH4_501,
53 55
54 /* SH-4A types */ 56 /* SH-4A types */
55 CPU_SH73180, CPU_SH7343, CPU_SH7770, CPU_SH7780, CPU_SH7781, 57 CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785,
56 CPU_SH7785, 58
59 /* SH4AL-DSP types */
60 CPU_SH73180, CPU_SH7343, CPU_SH7722,
57 61
58 /* Unknown subtype */ 62 /* Unknown subtype */
59 CPU_SH_NONE 63 CPU_SH_NONE
diff --git a/include/asm-sh/push-switch.h b/include/asm-sh/push-switch.h
index dfc6bad567f0..4903f9e52dd8 100644
--- a/include/asm-sh/push-switch.h
+++ b/include/asm-sh/push-switch.h
@@ -4,6 +4,7 @@
4#include <linux/timer.h> 4#include <linux/timer.h>
5#include <linux/interrupt.h> 5#include <linux/interrupt.h>
6#include <linux/workqueue.h> 6#include <linux/workqueue.h>
7#include <linux/platform_device.h>
7 8
8struct push_switch { 9struct push_switch {
9 /* switch state */ 10 /* switch state */
@@ -12,6 +13,8 @@ struct push_switch {
12 struct timer_list debounce; 13 struct timer_list debounce;
13 /* workqueue */ 14 /* workqueue */
14 struct work_struct work; 15 struct work_struct work;
16 /* platform device, for workqueue handler */
17 struct platform_device *pdev;
15}; 18};
16 19
17struct push_switch_platform_info { 20struct push_switch_platform_info {
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h
index 0c01dc550819..879f741105db 100644
--- a/include/asm-sh/thread_info.h
+++ b/include/asm-sh/thread_info.h
@@ -106,6 +106,7 @@ static inline struct thread_info *current_thread_info(void)
106#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 106#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
107#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 107#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
108#define TIF_MEMDIE 18 108#define TIF_MEMDIE 18
109#define TIF_FREEZE 19
109 110
110#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 111#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
111#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 112#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -114,6 +115,7 @@ static inline struct thread_info *current_thread_info(void)
114#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 115#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
115#define _TIF_USEDFPU (1<<TIF_USEDFPU) 116#define _TIF_USEDFPU (1<<TIF_USEDFPU)
116#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 117#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
118#define _TIF_FREEZE (1<<TIF_FREEZE)
117 119
118#define _TIF_WORK_MASK 0x000000FE /* work to do on interrupt/exception return */ 120#define _TIF_WORK_MASK 0x000000FE /* work to do on interrupt/exception return */
119#define _TIF_ALLWORK_MASK 0x000000FF /* work to do on any return to u-space */ 121#define _TIF_ALLWORK_MASK 0x000000FF /* work to do on any return to u-space */
diff --git a/include/asm-sh64/pgalloc.h b/include/asm-sh64/pgalloc.h
index b29dd468817e..cb803e56cb64 100644
--- a/include/asm-sh64/pgalloc.h
+++ b/include/asm-sh64/pgalloc.h
@@ -41,7 +41,7 @@ static inline void pgd_init(unsigned long page)
41static inline pgd_t *get_pgd_slow(void) 41static inline pgd_t *get_pgd_slow(void)
42{ 42{
43 unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t)); 43 unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
44 pgd_t *ret = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL); 44 pgd_t *ret = kmalloc(pgd_size, GFP_KERNEL);
45 return ret; 45 return ret;
46} 46}
47 47
diff --git a/include/asm-sparc64/dma.h b/include/asm-sparc64/dma.h
index 27f65972b3bb..93e5a062df88 100644
--- a/include/asm-sparc64/dma.h
+++ b/include/asm-sparc64/dma.h
@@ -152,9 +152,9 @@ extern void dvma_init(struct sbus_bus *);
152#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL)) 152#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
153 153
154/* Yes, I hack a lot of elisp in my spare time... */ 154/* Yes, I hack a lot of elisp in my spare time... */
155#define DMA_ERROR_P(regs) (((sbus_readl((regs) + DMA_CSR) & DMA_HNDL_ERROR)) 155#define DMA_ERROR_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_HNDL_ERROR))
156#define DMA_IRQ_P(regs) (((sbus_readl((regs) + DMA_CSR)) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))) 156#define DMA_IRQ_P(regs) ((sbus_readl((regs) + DMA_CSR)) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
157#define DMA_WRITE_P(regs) (((sbus_readl((regs) + DMA_CSR) & DMA_ST_WRITE)) 157#define DMA_WRITE_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_ST_WRITE))
158#define DMA_OFF(__regs) \ 158#define DMA_OFF(__regs) \
159do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \ 159do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
160 tmp &= ~DMA_ENABLE; \ 160 tmp &= ~DMA_ENABLE; \
diff --git a/include/asm-sparc64/irqflags.h b/include/asm-sparc64/irqflags.h
new file mode 100644
index 000000000000..024fc54d0682
--- /dev/null
+++ b/include/asm-sparc64/irqflags.h
@@ -0,0 +1,89 @@
1/*
2 * include/asm-sparc64/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13#ifndef __ASSEMBLY__
14
15static inline unsigned long __raw_local_save_flags(void)
16{
17 unsigned long flags;
18
19 __asm__ __volatile__(
20 "rdpr %%pil, %0"
21 : "=r" (flags)
22 );
23
24 return flags;
25}
26
27#define raw_local_save_flags(flags) \
28 do { (flags) = __raw_local_save_flags(); } while (0)
29
30static inline void raw_local_irq_restore(unsigned long flags)
31{
32 __asm__ __volatile__(
33 "wrpr %0, %%pil"
34 : /* no output */
35 : "r" (flags)
36 : "memory"
37 );
38}
39
40static inline void raw_local_irq_disable(void)
41{
42 __asm__ __volatile__(
43 "wrpr 15, %%pil"
44 : /* no outputs */
45 : /* no inputs */
46 : "memory"
47 );
48}
49
50static inline void raw_local_irq_enable(void)
51{
52 __asm__ __volatile__(
53 "wrpr 0, %%pil"
54 : /* no outputs */
55 : /* no inputs */
56 : "memory"
57 );
58}
59
60static inline int raw_irqs_disabled_flags(unsigned long flags)
61{
62 return (flags > 0);
63}
64
65static inline int raw_irqs_disabled(void)
66{
67 unsigned long flags = __raw_local_save_flags();
68
69 return raw_irqs_disabled_flags(flags);
70}
71
72/*
73 * For spinlocks, etc:
74 */
75static inline unsigned long __raw_local_irq_save(void)
76{
77 unsigned long flags = __raw_local_save_flags();
78
79 raw_local_irq_disable();
80
81 return flags;
82}
83
84#define raw_local_irq_save(flags) \
85 do { (flags) = __raw_local_irq_save(); } while (0)
86
87#endif /* (__ASSEMBLY__) */
88
89#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h
index c9f5c34d318c..becc38fa06c5 100644
--- a/include/asm-sparc64/kprobes.h
+++ b/include/asm-sparc64/kprobes.h
@@ -13,7 +13,11 @@ typedef u32 kprobe_opcode_t;
13#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry 13#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
14#define arch_remove_kprobe(p) do {} while (0) 14#define arch_remove_kprobe(p) do {} while (0)
15#define ARCH_INACTIVE_KPROBE_COUNT 0 15#define ARCH_INACTIVE_KPROBE_COUNT 0
16#define flush_insn_slot(p) do { } while (0) 16
17#define flush_insn_slot(p) \
18do { flushi(&(p)->ainsn.insn[0]); \
19 flushi(&(p)->ainsn.insn[1]); \
20} while (0)
17 21
18/* Architecture specific copy of original instruction*/ 22/* Architecture specific copy of original instruction*/
19struct arch_specific_insn { 23struct arch_specific_insn {
@@ -23,7 +27,7 @@ struct arch_specific_insn {
23 27
24struct prev_kprobe { 28struct prev_kprobe {
25 struct kprobe *kp; 29 struct kprobe *kp;
26 unsigned int status; 30 unsigned long status;
27 unsigned long orig_tnpc; 31 unsigned long orig_tnpc;
28 unsigned long orig_tstate_pil; 32 unsigned long orig_tstate_pil;
29}; 33};
@@ -33,10 +37,7 @@ struct kprobe_ctlblk {
33 unsigned long kprobe_status; 37 unsigned long kprobe_status;
34 unsigned long kprobe_orig_tnpc; 38 unsigned long kprobe_orig_tnpc;
35 unsigned long kprobe_orig_tstate_pil; 39 unsigned long kprobe_orig_tstate_pil;
36 long *jprobe_saved_esp;
37 struct pt_regs jprobe_saved_regs; 40 struct pt_regs jprobe_saved_regs;
38 struct pt_regs *jprobe_saved_regs_location;
39 struct sparc_stackf jprobe_saved_stack;
40 struct prev_kprobe prev_kprobe; 41 struct prev_kprobe prev_kprobe;
41}; 42};
42 43
diff --git a/include/asm-sparc64/rwsem.h b/include/asm-sparc64/rwsem.h
index cef5e8270421..1294b7ce5d06 100644
--- a/include/asm-sparc64/rwsem.h
+++ b/include/asm-sparc64/rwsem.h
@@ -23,20 +23,33 @@ struct rw_semaphore {
23 signed int count; 23 signed int count;
24 spinlock_t wait_lock; 24 spinlock_t wait_lock;
25 struct list_head wait_list; 25 struct list_head wait_list;
26#ifdef CONFIG_DEBUG_LOCK_ALLOC
27 struct lockdep_map dep_map;
28#endif
26}; 29};
27 30
31#ifdef CONFIG_DEBUG_LOCK_ALLOC
32# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
33#else
34# define __RWSEM_DEP_MAP_INIT(lockname)
35#endif
36
28#define __RWSEM_INITIALIZER(name) \ 37#define __RWSEM_INITIALIZER(name) \
29{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) } 38{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
39 __RWSEM_DEP_MAP_INIT(name) }
30 40
31#define DECLARE_RWSEM(name) \ 41#define DECLARE_RWSEM(name) \
32 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 42 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
33 43
34static __inline__ void init_rwsem(struct rw_semaphore *sem) 44extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
35{ 45 struct lock_class_key *key);
36 sem->count = RWSEM_UNLOCKED_VALUE; 46
37 spin_lock_init(&sem->wait_lock); 47#define init_rwsem(sem) \
38 INIT_LIST_HEAD(&sem->wait_list); 48do { \
39} 49 static struct lock_class_key __key; \
50 \
51 __init_rwsem((sem), #sem, &__key); \
52} while (0)
40 53
41extern void __down_read(struct rw_semaphore *sem); 54extern void __down_read(struct rw_semaphore *sem);
42extern int __down_read_trylock(struct rw_semaphore *sem); 55extern int __down_read_trylock(struct rw_semaphore *sem);
@@ -46,6 +59,11 @@ extern void __up_read(struct rw_semaphore *sem);
46extern void __up_write(struct rw_semaphore *sem); 59extern void __up_write(struct rw_semaphore *sem);
47extern void __downgrade_write(struct rw_semaphore *sem); 60extern void __downgrade_write(struct rw_semaphore *sem);
48 61
62static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
63{
64 __down_write(sem);
65}
66
49static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) 67static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
50{ 68{
51 return atomic_add_return(delta, (atomic_t *)(&sem->count)); 69 return atomic_add_return(delta, (atomic_t *)(&sem->count));
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index a8b7432c9a70..32281acb878b 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -7,6 +7,9 @@
7#include <asm/visasm.h> 7#include <asm/visasm.h>
8 8
9#ifndef __ASSEMBLY__ 9#ifndef __ASSEMBLY__
10
11#include <linux/irqflags.h>
12
10/* 13/*
11 * Sparc (general) CPU types 14 * Sparc (general) CPU types
12 */ 15 */
@@ -72,52 +75,6 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
72 75
73#endif 76#endif
74 77
75#define setipl(__new_ipl) \
76 __asm__ __volatile__("wrpr %0, %%pil" : : "r" (__new_ipl) : "memory")
77
78#define local_irq_disable() \
79 __asm__ __volatile__("wrpr 15, %%pil" : : : "memory")
80
81#define local_irq_enable() \
82 __asm__ __volatile__("wrpr 0, %%pil" : : : "memory")
83
84#define getipl() \
85({ unsigned long retval; __asm__ __volatile__("rdpr %%pil, %0" : "=r" (retval)); retval; })
86
87#define swap_pil(__new_pil) \
88({ unsigned long retval; \
89 __asm__ __volatile__("rdpr %%pil, %0\n\t" \
90 "wrpr %1, %%pil" \
91 : "=&r" (retval) \
92 : "r" (__new_pil) \
93 : "memory"); \
94 retval; \
95})
96
97#define read_pil_and_cli() \
98({ unsigned long retval; \
99 __asm__ __volatile__("rdpr %%pil, %0\n\t" \
100 "wrpr 15, %%pil" \
101 : "=r" (retval) \
102 : : "memory"); \
103 retval; \
104})
105
106#define local_save_flags(flags) ((flags) = getipl())
107#define local_irq_save(flags) ((flags) = read_pil_and_cli())
108#define local_irq_restore(flags) setipl((flags))
109
110/* On sparc64 IRQ flags are the PIL register. A value of zero
111 * means all interrupt levels are enabled, any other value means
112 * only IRQ levels greater than that value will be received.
113 * Consequently this means that the lowest IRQ level is one.
114 */
115#define irqs_disabled() \
116({ unsigned long flags; \
117 local_save_flags(flags);\
118 (flags > 0); \
119})
120
121#define nop() __asm__ __volatile__ ("nop") 78#define nop() __asm__ __volatile__ ("nop")
122 79
123#define read_barrier_depends() do { } while(0) 80#define read_barrier_depends() do { } while(0)
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h
index f2352606a79f..c2a16e188499 100644
--- a/include/asm-sparc64/ttable.h
+++ b/include/asm-sparc64/ttable.h
@@ -137,10 +137,49 @@
137#endif 137#endif
138#define BREAKPOINT_TRAP TRAP(breakpoint_trap) 138#define BREAKPOINT_TRAP TRAP(breakpoint_trap)
139 139
140#ifdef CONFIG_TRACE_IRQFLAGS
141
142#define TRAP_IRQ(routine, level) \
143 rdpr %pil, %g2; \
144 wrpr %g0, 15, %pil; \
145 sethi %hi(1f-4), %g7; \
146 ba,pt %xcc, etrap_irq; \
147 or %g7, %lo(1f-4), %g7; \
148 nop; \
149 nop; \
150 nop; \
151 .subsection 2; \
1521: call trace_hardirqs_off; \
153 nop; \
154 mov level, %o0; \
155 call routine; \
156 add %sp, PTREGS_OFF, %o1; \
157 ba,a,pt %xcc, rtrap_irq; \
158 .previous;
159
160#define TICK_SMP_IRQ \
161 rdpr %pil, %g2; \
162 wrpr %g0, 15, %pil; \
163 sethi %hi(1f-4), %g7; \
164 ba,pt %xcc, etrap_irq; \
165 or %g7, %lo(1f-4), %g7; \
166 nop; \
167 nop; \
168 nop; \
169 .subsection 2; \
1701: call trace_hardirqs_off; \
171 nop; \
172 call smp_percpu_timer_interrupt; \
173 add %sp, PTREGS_OFF, %o0; \
174 ba,a,pt %xcc, rtrap_irq; \
175 .previous;
176
177#else
178
140#define TRAP_IRQ(routine, level) \ 179#define TRAP_IRQ(routine, level) \
141 rdpr %pil, %g2; \ 180 rdpr %pil, %g2; \
142 wrpr %g0, 15, %pil; \ 181 wrpr %g0, 15, %pil; \
143 b,pt %xcc, etrap_irq; \ 182 ba,pt %xcc, etrap_irq; \
144 rd %pc, %g7; \ 183 rd %pc, %g7; \
145 mov level, %o0; \ 184 mov level, %o0; \
146 call routine; \ 185 call routine; \
@@ -151,12 +190,14 @@
151 rdpr %pil, %g2; \ 190 rdpr %pil, %g2; \
152 wrpr %g0, 15, %pil; \ 191 wrpr %g0, 15, %pil; \
153 sethi %hi(109f), %g7; \ 192 sethi %hi(109f), %g7; \
154 b,pt %xcc, etrap_irq; \ 193 ba,pt %xcc, etrap_irq; \
155109: or %g7, %lo(109b), %g7; \ 194109: or %g7, %lo(109b), %g7; \
156 call smp_percpu_timer_interrupt; \ 195 call smp_percpu_timer_interrupt; \
157 add %sp, PTREGS_OFF, %o0; \ 196 add %sp, PTREGS_OFF, %o0; \
158 ba,a,pt %xcc, rtrap_irq; 197 ba,a,pt %xcc, rtrap_irq;
159 198
199#endif
200
160#define TRAP_IVEC TRAP_NOSAVE(do_ivec) 201#define TRAP_IVEC TRAP_NOSAVE(do_ivec)
161 202
162#define BTRAP(lvl) TRAP_ARG(bad_trap, lvl) 203#define BTRAP(lvl) TRAP_ARG(bad_trap, lvl)
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 952783d35c7b..3227bc93d69b 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -189,6 +189,7 @@ static inline unsigned int cpuid_edx(unsigned int op)
189 189
190#define MSR_IA32_PERFCTR0 0xc1 190#define MSR_IA32_PERFCTR0 0xc1
191#define MSR_IA32_PERFCTR1 0xc2 191#define MSR_IA32_PERFCTR1 0xc2
192#define MSR_FSB_FREQ 0xcd
192 193
193#define MSR_MTRRcap 0x0fe 194#define MSR_MTRRcap 0x0fe
194#define MSR_IA32_BBL_CR_CTL 0x119 195#define MSR_IA32_BBL_CR_CTL 0x119
@@ -311,6 +312,9 @@ static inline unsigned int cpuid_edx(unsigned int op)
311#define MSR_IA32_PERF_STATUS 0x198 312#define MSR_IA32_PERF_STATUS 0x198
312#define MSR_IA32_PERF_CTL 0x199 313#define MSR_IA32_PERF_CTL 0x199
313 314
315#define MSR_IA32_MPERF 0xE7
316#define MSR_IA32_APERF 0xE8
317
314#define MSR_IA32_THERM_CONTROL 0x19a 318#define MSR_IA32_THERM_CONTROL 0x19a
315#define MSR_IA32_THERM_INTERRUPT 0x19b 319#define MSR_IA32_THERM_INTERRUPT 0x19b
316#define MSR_IA32_THERM_STATUS 0x19c 320#define MSR_IA32_THERM_STATUS 0x19c
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 787a08114b48..74a6c74397f7 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -122,6 +122,7 @@ static inline struct thread_info *stack_thread_info(void)
122#define TIF_MEMDIE 20 122#define TIF_MEMDIE 20
123#define TIF_DEBUG 21 /* uses debug registers */ 123#define TIF_DEBUG 21 /* uses debug registers */
124#define TIF_IO_BITMAP 22 /* uses I/O bitmap */ 124#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
125#define TIF_FREEZE 23 /* is freezing for suspend */
125 126
126#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 127#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
127#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 128#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -137,6 +138,7 @@ static inline struct thread_info *stack_thread_info(void)
137#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 138#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
138#define _TIF_DEBUG (1<<TIF_DEBUG) 139#define _TIF_DEBUG (1<<TIF_DEBUG)
139#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) 140#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
141#define _TIF_FREEZE (1<<TIF_FREEZE)
140 142
141/* work to do on interrupt/exception return */ 143/* work to do on interrupt/exception return */
142#define _TIF_WORK_MASK \ 144#define _TIF_WORK_MASK \
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 5c8f49280dbc..2facec5914d2 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -47,6 +47,7 @@ extern int __node_distance(int, int);
47 .flags = SD_LOAD_BALANCE \ 47 .flags = SD_LOAD_BALANCE \
48 | SD_BALANCE_FORK \ 48 | SD_BALANCE_FORK \
49 | SD_BALANCE_EXEC \ 49 | SD_BALANCE_EXEC \
50 | SD_SERIALIZE \
50 | SD_WAKE_BALANCE, \ 51 | SD_WAKE_BALANCE, \
51 .last_balance = jiffies, \ 52 .last_balance = jiffies, \
52 .balance_interval = 1, \ 53 .balance_interval = 1, \
diff --git a/include/asm-xtensa/asmmacro.h b/include/asm-xtensa/asmmacro.h
new file mode 100644
index 000000000000..76915cabad17
--- /dev/null
+++ b/include/asm-xtensa/asmmacro.h
@@ -0,0 +1,153 @@
1/*
2 * include/asm-xtensa/asmmacro.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2005 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_ASMMACRO_H
12#define _XTENSA_ASMMACRO_H
13
14#include <asm/variant/core.h>
15
16/*
17 * Some little helpers for loops. Use zero-overhead-loops
18 * where applicable and if supported by the processor.
19 *
20 * __loopi ar, at, size, inc
21 * ar register initialized with the start address
22 * at scratch register used by macro
23 * size size immediate value
24 * inc increment
25 *
26 * __loops ar, as, at, inc_log2[, mask_log2][, cond][, ncond]
27 * ar register initialized with the start address
28 * as register initialized with the size
29 * at scratch register use by macro
30 * inc_log2 increment [in log2]
31 * mask_log2 mask [in log2]
32 * cond true condition (used in loop'cond')
33 * ncond false condition (used in b'ncond')
34 *
35 * __loop as
36 * restart loop. 'as' register must not have been modified!
37 *
38 * __endla ar, at, incr
39 * ar start address (modified)
40 * as scratch register used by macro
41 * inc increment
42 */
43
44/*
45 * loop for given size as immediate
46 */
47
48 .macro __loopi ar, at, size, incr
49
50#if XCHAL_HAVE_LOOPS
51 movi \at, ((\size + \incr - 1) / (\incr))
52 loop \at, 99f
53#else
54 addi \at, \ar, \size
55 98:
56#endif
57
58 .endm
59
60/*
61 * loop for given size in register
62 */
63
64 .macro __loops ar, as, at, incr_log2, mask_log2, cond, ncond
65
66#if XCHAL_HAVE_LOOPS
67 .ifgt \incr_log2 - 1
68 addi \at, \as, (1 << \incr_log2) - 1
69 .ifnc \mask_log2,
70 extui \at, \at, \incr_log2, \mask_log2
71 .else
72 srli \at, \at, \incr_log2
73 .endif
74 .endif
75 loop\cond \at, 99f
76#else
77 .ifnc \mask_log2,
78 extui \at, \as, \incr_log2, \mask_log2
79 .else
80 .ifnc \ncond,
81 srli \at, \as, \incr_log2
82 .endif
83 .endif
84 .ifnc \ncond,
85 b\ncond \at, 99f
86
87 .endif
88 .ifnc \mask_log2,
89 slli \at, \at, \incr_log2
90 add \at, \ar, \at
91 .else
92 add \at, \ar, \as
93 .endif
94#endif
95 98:
96
97 .endm
98
99/*
100 * loop from ar to ax
101 */
102
103 .macro __loopt ar, as, at, incr_log2
104
105#if XCHAL_HAVE_LOOPS
106 sub \at, \as, \ar
107 .ifgt \incr_log2 - 1
108 addi \at, \at, (1 << \incr_log2) - 1
109 srli \at, \at, \incr_log2
110 .endif
111 loop \at, 99f
112#else
113 98:
114#endif
115
116 .endm
117
118/*
119 * restart loop. registers must be unchanged
120 */
121
122 .macro __loop as
123
124#if XCHAL_HAVE_LOOPS
125 loop \as, 99f
126#else
127 98:
128#endif
129
130 .endm
131
132/*
133 * end of loop with no increment of the address.
134 */
135
136 .macro __endl ar, as
137#if !XCHAL_HAVE_LOOPS
138 bltu \ar, \as, 98b
139#endif
140 99:
141 .endm
142
143/*
144 * end of loop with increment of the address.
145 */
146
147 .macro __endla ar, as, incr
148 addi \ar, \ar, \incr
149 __endl \ar \as
150 .endm
151
152
153#endif /* _XTENSA_ASMMACRO_H */
diff --git a/include/asm-xtensa/bug.h b/include/asm-xtensa/bug.h
index 56703659b204..3e52d72712f1 100644
--- a/include/asm-xtensa/bug.h
+++ b/include/asm-xtensa/bug.h
@@ -13,29 +13,6 @@
13#ifndef _XTENSA_BUG_H 13#ifndef _XTENSA_BUG_H
14#define _XTENSA_BUG_H 14#define _XTENSA_BUG_H
15 15
16#include <linux/stringify.h> 16#include <asm-generic/bug.h>
17
18#define ILL __asm__ __volatile__ (".byte 0,0,0\n")
19
20#ifdef CONFIG_KALLSYMS
21# define BUG() do { \
22 printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
23 ILL; \
24} while (0)
25#else
26# define BUG() do { \
27 printk("kernel BUG!\n"); \
28 ILL; \
29} while (0)
30#endif
31
32#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
33#define PAGE_BUG(page) do { BUG(); } while (0)
34#define WARN_ON(condition) do { \
35 if (unlikely((condition)!=0)) { \
36 printk ("Warning in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
37 dump_stack(); \
38 } \
39} while (0)
40 17
41#endif /* _XTENSA_BUG_H */ 18#endif /* _XTENSA_BUG_H */
diff --git a/include/asm-xtensa/byteorder.h b/include/asm-xtensa/byteorder.h
index 0b1552569aae..0f540a5f4c01 100644
--- a/include/asm-xtensa/byteorder.h
+++ b/include/asm-xtensa/byteorder.h
@@ -11,10 +11,9 @@
11#ifndef _XTENSA_BYTEORDER_H 11#ifndef _XTENSA_BYTEORDER_H
12#define _XTENSA_BYTEORDER_H 12#define _XTENSA_BYTEORDER_H
13 13
14#include <asm/processor.h>
15#include <asm/types.h> 14#include <asm/types.h>
16 15
17static __inline__ __const__ __u32 ___arch__swab32(__u32 x) 16static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
18{ 17{
19 __u32 res; 18 __u32 res;
20 /* instruction sequence from Xtensa ISA release 2/2000 */ 19 /* instruction sequence from Xtensa ISA release 2/2000 */
@@ -29,7 +28,7 @@ static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
29 return res; 28 return res;
30} 29}
31 30
32static __inline__ __const__ __u16 ___arch__swab16(__u16 x) 31static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
33{ 32{
34 /* Given that 'short' values are signed (i.e., can be negative), 33 /* Given that 'short' values are signed (i.e., can be negative),
35 * we cannot assume that the upper 16-bits of the register are 34 * we cannot assume that the upper 16-bits of the register are
diff --git a/include/asm-xtensa/cache.h b/include/asm-xtensa/cache.h
index 1e79c0e27460..1c4a78f29ae2 100644
--- a/include/asm-xtensa/cache.h
+++ b/include/asm-xtensa/cache.h
@@ -4,7 +4,6 @@
4 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 6 * for more details.
7 * 2 of the License, or (at your option) any later version.
8 * 7 *
9 * (C) 2001 - 2005 Tensilica Inc. 8 * (C) 2001 - 2005 Tensilica Inc.
10 */ 9 */
@@ -12,21 +11,14 @@
12#ifndef _XTENSA_CACHE_H 11#ifndef _XTENSA_CACHE_H
13#define _XTENSA_CACHE_H 12#define _XTENSA_CACHE_H
14 13
15#include <xtensa/config/core.h> 14#include <asm/variant/core.h>
16 15
17#if XCHAL_ICACHE_SIZE > 0 16#define L1_CACHE_SHIFT XCHAL_DCACHE_LINEWIDTH
18# if (XCHAL_ICACHE_SIZE % (XCHAL_ICACHE_LINESIZE*XCHAL_ICACHE_WAYS*4)) != 0 17#define L1_CACHE_BYTES XCHAL_DCACHE_LINESIZE
19# error cache configuration outside expected/supported range! 18#define SMP_CACHE_BYTES L1_CACHE_BYTES
20# endif
21#endif
22 19
23#if XCHAL_DCACHE_SIZE > 0 20#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
24# if (XCHAL_DCACHE_SIZE % (XCHAL_DCACHE_LINESIZE*XCHAL_DCACHE_WAYS*4)) != 0 21#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
25# error cache configuration outside expected/supported range!
26# endif
27#endif
28 22
29#define L1_CACHE_SHIFT XCHAL_CACHE_LINEWIDTH_MAX
30#define L1_CACHE_BYTES XCHAL_CACHE_LINESIZE_MAX
31 23
32#endif /* _XTENSA_CACHE_H */ 24#endif /* _XTENSA_CACHE_H */
diff --git a/include/asm-xtensa/cacheasm.h b/include/asm-xtensa/cacheasm.h
new file mode 100644
index 000000000000..2c20a58f94cd
--- /dev/null
+++ b/include/asm-xtensa/cacheasm.h
@@ -0,0 +1,177 @@
1/*
2 * include/asm-xtensa/cacheasm.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2006 Tensilica Inc.
9 */
10
11#include <asm/cache.h>
12#include <asm/asmmacro.h>
13#include <linux/stringify.h>
14
15/*
16 * Define cache functions as macros here so that they can be used
17 * by the kernel and boot loader. We should consider moving them to a
18 * library that can be linked by both.
19 *
20 * Locking
21 *
22 * ___unlock_dcache_all
23 * ___unlock_icache_all
24 *
25 * Flush and invaldating
26 *
27 * ___flush_invalidate_dcache_{all|range|page}
28 * ___flush_dcache_{all|range|page}
29 * ___invalidate_dcache_{all|range|page}
30 * ___invalidate_icache_{all|range|page}
31 *
32 */
33
34 .macro __loop_cache_all ar at insn size line_width
35
36 movi \ar, 0
37
38 __loopi \ar, \at, \size, (4 << (\line_width))
39 \insn \ar, 0 << (\line_width)
40 \insn \ar, 1 << (\line_width)
41 \insn \ar, 2 << (\line_width)
42 \insn \ar, 3 << (\line_width)
43 __endla \ar, \at, 4 << (\line_width)
44
45 .endm
46
47
48 .macro __loop_cache_range ar as at insn line_width
49
50 extui \at, \ar, 0, \line_width
51 add \as, \as, \at
52
53 __loops \ar, \as, \at, \line_width
54 \insn \ar, 0
55 __endla \ar, \at, (1 << (\line_width))
56
57 .endm
58
59
60 .macro __loop_cache_page ar at insn line_width
61
62 __loopi \ar, \at, PAGE_SIZE, 4 << (\line_width)
63 \insn \ar, 0 << (\line_width)
64 \insn \ar, 1 << (\line_width)
65 \insn \ar, 2 << (\line_width)
66 \insn \ar, 3 << (\line_width)
67 __endla \ar, \at, 4 << (\line_width)
68
69 .endm
70
71
72#if XCHAL_DCACHE_LINE_LOCKABLE
73
74 .macro ___unlock_dcache_all ar at
75
76 __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
77
78 .endm
79
80#endif
81
82#if XCHAL_ICACHE_LINE_LOCKABLE
83
84 .macro ___unlock_icache_all ar at
85
86 __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
87
88 .endm
89#endif
90
91 .macro ___flush_invalidate_dcache_all ar at
92
93 __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
94
95 .endm
96
97
98 .macro ___flush_dcache_all ar at
99
100 __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
101
102 .endm
103
104
105 .macro ___invalidate_dcache_all ar at
106
107 __loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
108 XCHAL_DCACHE_LINEWIDTH
109
110 .endm
111
112
113 .macro ___invalidate_icache_all ar at
114
115 __loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
116 XCHAL_ICACHE_LINEWIDTH
117
118 .endm
119
120
121
122 .macro ___flush_invalidate_dcache_range ar as at
123
124 __loop_cache_range \ar \as \at dhwbi XCHAL_DCACHE_LINEWIDTH
125
126 .endm
127
128
129 .macro ___flush_dcache_range ar as at
130
131 __loop_cache_range \ar \as \at dhwb XCHAL_DCACHE_LINEWIDTH
132
133 .endm
134
135
136 .macro ___invalidate_dcache_range ar as at
137
138 __loop_cache_range \ar \as \at dhi XCHAL_DCACHE_LINEWIDTH
139
140 .endm
141
142
143 .macro ___invalidate_icache_range ar as at
144
145 __loop_cache_range \ar \as \at ihi XCHAL_ICACHE_LINEWIDTH
146
147 .endm
148
149
150
151 .macro ___flush_invalidate_dcache_page ar as
152
153 __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
154
155 .endm
156
157
158 .macro ___flush_dcache_page ar as
159
160 __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
161
162 .endm
163
164
165 .macro ___invalidate_dcache_page ar as
166
167 __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
168
169 .endm
170
171
172 .macro ___invalidate_icache_page ar as
173
174 __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
175
176 .endm
177
diff --git a/include/asm-xtensa/cacheflush.h b/include/asm-xtensa/cacheflush.h
index 44a36e087844..337765b629de 100644
--- a/include/asm-xtensa/cacheflush.h
+++ b/include/asm-xtensa/cacheflush.h
@@ -5,7 +5,7 @@
5 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 6 * for more details.
7 * 7 *
8 * (C) 2001 - 2005 Tensilica Inc. 8 * (C) 2001 - 2006 Tensilica Inc.
9 */ 9 */
10 10
11#ifndef _XTENSA_CACHEFLUSH_H 11#ifndef _XTENSA_CACHEFLUSH_H
diff --git a/include/asm-xtensa/checksum.h b/include/asm-xtensa/checksum.h
index 5435aff9a4b7..23534c60b3a4 100644
--- a/include/asm-xtensa/checksum.h
+++ b/include/asm-xtensa/checksum.h
@@ -12,7 +12,7 @@
12#define _XTENSA_CHECKSUM_H 12#define _XTENSA_CHECKSUM_H
13 13
14#include <linux/in6.h> 14#include <linux/in6.h>
15#include <xtensa/config/core.h> 15#include <asm/variant/core.h>
16 16
17/* 17/*
18 * computes the checksum of a memory block at buff, length len, 18 * computes the checksum of a memory block at buff, length len,
diff --git a/include/asm-xtensa/coprocessor.h b/include/asm-xtensa/coprocessor.h
index 5093034723be..bd09ec02d57f 100644
--- a/include/asm-xtensa/coprocessor.h
+++ b/include/asm-xtensa/coprocessor.h
@@ -11,7 +11,16 @@
11#ifndef _XTENSA_COPROCESSOR_H 11#ifndef _XTENSA_COPROCESSOR_H
12#define _XTENSA_COPROCESSOR_H 12#define _XTENSA_COPROCESSOR_H
13 13
14#include <xtensa/config/core.h> 14#include <asm/variant/core.h>
15#include <asm/variant/tie.h>
16
17#if !XCHAL_HAVE_CP
18
19#define XTENSA_CP_EXTRA_OFFSET 0
20#define XTENSA_CP_EXTRA_ALIGN 1 /* must be a power of 2 */
21#define XTENSA_CP_EXTRA_SIZE 0
22
23#else
15 24
16#define XTOFS(last_start,last_size,align) \ 25#define XTOFS(last_start,last_size,align) \
17 ((last_start+last_size+align-1) & -align) 26 ((last_start+last_size+align-1) & -align)
@@ -67,4 +76,6 @@ extern void save_coprocessor_registers(void*, int);
67# endif 76# endif
68#endif 77#endif
69 78
79#endif
80
70#endif /* _XTENSA_COPROCESSOR_H */ 81#endif /* _XTENSA_COPROCESSOR_H */
diff --git a/include/asm-xtensa/dma.h b/include/asm-xtensa/dma.h
index db2633f67789..e30f3abf48f0 100644
--- a/include/asm-xtensa/dma.h
+++ b/include/asm-xtensa/dma.h
@@ -12,7 +12,6 @@
12#define _XTENSA_DMA_H 12#define _XTENSA_DMA_H
13 13
14#include <asm/io.h> /* need byte IO */ 14#include <asm/io.h> /* need byte IO */
15#include <xtensa/config/core.h>
16 15
17/* 16/*
18 * This is only to be defined if we have PC-like DMA. 17 * This is only to be defined if we have PC-like DMA.
@@ -44,7 +43,9 @@
44 * enters another area, and virt_to_phys() may not return 43 * enters another area, and virt_to_phys() may not return
45 * the value desired). 44 * the value desired).
46 */ 45 */
47#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KSEG_CACHED_SIZE - 1) 46
47#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KIO_SIZE - 1)
48
48 49
49/* Reserve and release a DMA channel */ 50/* Reserve and release a DMA channel */
50extern int request_dma(unsigned int dmanr, const char * device_id); 51extern int request_dma(unsigned int dmanr, const char * device_id);
diff --git a/include/asm-xtensa/elf.h b/include/asm-xtensa/elf.h
index de0667453b2e..f0f9fd8560a5 100644
--- a/include/asm-xtensa/elf.h
+++ b/include/asm-xtensa/elf.h
@@ -13,9 +13,8 @@
13#ifndef _XTENSA_ELF_H 13#ifndef _XTENSA_ELF_H
14#define _XTENSA_ELF_H 14#define _XTENSA_ELF_H
15 15
16#include <asm/variant/core.h>
16#include <asm/ptrace.h> 17#include <asm/ptrace.h>
17#include <asm/coprocessor.h>
18#include <xtensa/config/core.h>
19 18
20/* Xtensa processor ELF architecture-magic number */ 19/* Xtensa processor ELF architecture-magic number */
21 20
@@ -118,11 +117,15 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
118 * using memcpy(). But we do allow space for such alignment, 117 * using memcpy(). But we do allow space for such alignment,
119 * to allow optimizations of layout and copying. 118 * to allow optimizations of layout and copying.
120 */ 119 */
121 120#if 0
122#define TOTAL_FPREGS_SIZE \ 121#define TOTAL_FPREGS_SIZE \
123 (4 + XTENSA_CPE_LTABLE_SIZE + XTENSA_CP_EXTRA_SIZE) 122 (4 + XTENSA_CPE_LTABLE_SIZE + XTENSA_CP_EXTRA_SIZE)
124#define ELF_NFPREG \ 123#define ELF_NFPREG \
125 ((TOTAL_FPREGS_SIZE + sizeof(elf_fpreg_t) - 1) / sizeof(elf_fpreg_t)) 124 ((TOTAL_FPREGS_SIZE + sizeof(elf_fpreg_t) - 1) / sizeof(elf_fpreg_t))
125#else
126#define TOTAL_FPREGS_SIZE 0
127#define ELF_NFPREG 0
128#endif
126 129
127typedef unsigned int elf_fpreg_t; 130typedef unsigned int elf_fpreg_t;
128typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; 131typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
diff --git a/include/asm-xtensa/fcntl.h b/include/asm-xtensa/fcntl.h
index ec066ae96caf..0609fc691b72 100644
--- a/include/asm-xtensa/fcntl.h
+++ b/include/asm-xtensa/fcntl.h
@@ -14,48 +14,86 @@
14 14
15/* open/fcntl - O_SYNC is only implemented on blocks devices and on files 15/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
16 located on an ext2 file system */ 16 located on an ext2 file system */
17#define O_APPEND 0x0008 17#define O_ACCMODE 0003
18#define O_SYNC 0x0010 18#define O_RDONLY 00
19#define O_NONBLOCK 0x0080 19#define O_WRONLY 01
20#define O_CREAT 0x0100 /* not fcntl */ 20#define O_RDWR 02
21#define O_EXCL 0x0400 /* not fcntl */ 21#define O_CREAT 0100 /* not fcntl */
22#define O_NOCTTY 0x0800 /* not fcntl */ 22#define O_EXCL 0200 /* not fcntl */
23#define FASYNC 0x1000 /* fcntl, for BSD compatibility */ 23#define O_NOCTTY 0400 /* not fcntl */
24#define O_LARGEFILE 0x2000 /* allow large file opens - currently ignored */ 24#define O_TRUNC 01000 /* not fcntl */
25#define O_DIRECT 0x8000 /* direct disk access hint - currently ignored*/ 25#define O_APPEND 02000
26#define O_NOATIME 0x100000 26#define O_NONBLOCK 04000
27 27#define O_NDELAY O_NONBLOCK
28#define F_GETLK 14 28#define O_SYNC 010000
29#define F_GETLK64 15 29#define FASYNC 020000 /* fcntl, for BSD compatibility */
30#define O_DIRECT 040000 /* direct disk access hint */
31#define O_LARGEFILE 0100000
32#define O_DIRECTORY 0200000 /* must be a directory */
33#define O_NOFOLLOW 0400000 /* don't follow links */
34#define O_NOATIME 01000000
35
36#define F_DUPFD 0 /* dup */
37#define F_GETFD 1 /* get close_on_exec */
38#define F_SETFD 2 /* set/clear close_on_exec */
39#define F_GETFL 3 /* get file->f_flags */
40#define F_SETFL 4 /* set file->f_flags */
41#define F_GETLK 5
30#define F_SETLK 6 42#define F_SETLK 6
31#define F_SETLKW 7 43#define F_SETLKW 7
32#define F_SETLK64 16
33#define F_SETLKW64 17
34 44
35#define F_SETOWN 24 /* for sockets. */ 45#define F_SETOWN 8 /* for sockets. */
36#define F_GETOWN 23 /* for sockets. */ 46#define F_GETOWN 9 /* for sockets. */
47#define F_SETSIG 10 /* for sockets. */
48#define F_GETSIG 11 /* for sockets. */
49
50#define F_GETLK64 12 /* using 'struct flock64' */
51#define F_SETLK64 13
52#define F_SETLKW64 14
53
54/* for F_[GET|SET]FL */
55#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
56
57/* for posix fcntl() and lockf() */
58#define F_RDLCK 0
59#define F_WRLCK 1
60#define F_UNLCK 2
61
62/* for old implementation of bsd flock () */
63#define F_EXLCK 4 /* or 3 */
64#define F_SHLCK 8 /* or 4 */
37 65
38typedef struct flock { 66/* for leases */
67#define F_INPROGRESS 16
68
69/* operations for bsd flock(), also used by the kernel implementation */
70#define LOCK_SH 1 /* shared lock */
71#define LOCK_EX 2 /* exclusive lock */
72#define LOCK_NB 4 /* or'd with one of the above to prevent
73 blocking */
74#define LOCK_UN 8 /* remove lock */
75
76#define LOCK_MAND 32 /* This is a mandatory flock */
77#define LOCK_READ 64 /* ... Which allows concurrent read operations */
78#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
79#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
80
81struct flock {
39 short l_type; 82 short l_type;
40 short l_whence; 83 short l_whence;
41 __kernel_off_t l_start; 84 off_t l_start;
42 __kernel_off_t l_len; 85 off_t l_len;
43 long l_sysid; 86 pid_t l_pid;
44 __kernel_pid_t l_pid; 87};
45 long pad[4];
46} flock_t;
47 88
48struct flock64 { 89struct flock64 {
49 short l_type; 90 short l_type;
50 short l_whence; 91 short l_whence;
51 __kernel_off_t l_start; 92 loff_t l_start;
52 __kernel_off_t l_len; 93 loff_t l_len;
53 pid_t l_pid; 94 pid_t l_pid;
54}; 95};
55 96
56#define HAVE_ARCH_STRUCT_FLOCK 97#define F_LINUX_SPECIFIC_BASE 1024
57#define HAVE_ARCH_STRUCT_FLOCK64
58
59#include <asm-generic/fcntl.h>
60 98
61#endif /* _XTENSA_FCNTL_H */ 99#endif /* _XTENSA_FCNTL_H */
diff --git a/include/asm-xtensa/fixmap.h b/include/asm-xtensa/fixmap.h
deleted file mode 100644
index 4423b8ad4954..000000000000
--- a/include/asm-xtensa/fixmap.h
+++ /dev/null
@@ -1,252 +0,0 @@
1/*
2 * include/asm-xtensa/fixmap.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_FIXMAP_H
12#define _XTENSA_FIXMAP_H
13
14#include <asm/processor.h>
15
16#ifdef CONFIG_MMU
17
18/*
19 * Here we define all the compile-time virtual addresses.
20 */
21
22#if XCHAL_SEG_MAPPABLE_VADDR != 0
23# error "Current port requires virtual user space starting at 0"
24#endif
25#if XCHAL_SEG_MAPPABLE_SIZE < 0x80000000
26# error "Current port requires at least 0x8000000 bytes for user space"
27#endif
28
29/* Verify instruction/data ram/rom and xlmi don't overlay vmalloc space. */
30
31#define __IN_VMALLOC(addr) \
32 (((addr) >= VMALLOC_START) && ((addr) < VMALLOC_END))
33#define __SPAN_VMALLOC(start,end) \
34 (((start) < VMALLOC_START) && ((end) >= VMALLOC_END))
35#define INSIDE_VMALLOC(start,end) \
36 (__IN_VMALLOC((start)) || __IN_VMALLOC(end) || __SPAN_VMALLOC((start),(end)))
37
38#if XCHAL_NUM_INSTROM
39# if XCHAL_NUM_INSTROM == 1
40# if INSIDE_VMALLOC(XCHAL_INSTROM0_VADDR,XCHAL_INSTROM0_VADDR+XCHAL_INSTROM0_SIZE)
41# error vmalloc range conflicts with instrom0
42# endif
43# endif
44# if XCHAL_NUM_INSTROM == 2
45# if INSIDE_VMALLOC(XCHAL_INSTROM1_VADDR,XCHAL_INSTROM1_VADDR+XCHAL_INSTROM1_SIZE)
46# error vmalloc range conflicts with instrom1
47# endif
48# endif
49#endif
50
51#if XCHAL_NUM_INSTRAM
52# if XCHAL_NUM_INSTRAM == 1
53# if INSIDE_VMALLOC(XCHAL_INSTRAM0_VADDR,XCHAL_INSTRAM0_VADDR+XCHAL_INSTRAM0_SIZE)
54# error vmalloc range conflicts with instram0
55# endif
56# endif
57# if XCHAL_NUM_INSTRAM == 2
58# if INSIDE_VMALLOC(XCHAL_INSTRAM1_VADDR,XCHAL_INSTRAM1_VADDR+XCHAL_INSTRAM1_SIZE)
59# error vmalloc range conflicts with instram1
60# endif
61# endif
62#endif
63
64#if XCHAL_NUM_DATAROM
65# if XCHAL_NUM_DATAROM == 1
66# if INSIDE_VMALLOC(XCHAL_DATAROM0_VADDR,XCHAL_DATAROM0_VADDR+XCHAL_DATAROM0_SIZE)
67# error vmalloc range conflicts with datarom0
68# endif
69# endif
70# if XCHAL_NUM_DATAROM == 2
71# if INSIDE_VMALLOC(XCHAL_DATAROM1_VADDR,XCHAL_DATAROM1_VADDR+XCHAL_DATAROM1_SIZE)
72# error vmalloc range conflicts with datarom1
73# endif
74# endif
75#endif
76
77#if XCHAL_NUM_DATARAM
78# if XCHAL_NUM_DATARAM == 1
79# if INSIDE_VMALLOC(XCHAL_DATARAM0_VADDR,XCHAL_DATARAM0_VADDR+XCHAL_DATARAM0_SIZE)
80# error vmalloc range conflicts with dataram0
81# endif
82# endif
83# if XCHAL_NUM_DATARAM == 2
84# if INSIDE_VMALLOC(XCHAL_DATARAM1_VADDR,XCHAL_DATARAM1_VADDR+XCHAL_DATARAM1_SIZE)
85# error vmalloc range conflicts with dataram1
86# endif
87# endif
88#endif
89
90#if XCHAL_NUM_XLMI
91# if XCHAL_NUM_XLMI == 1
92# if INSIDE_VMALLOC(XCHAL_XLMI0_VADDR,XCHAL_XLMI0_VADDR+XCHAL_XLMI0_SIZE)
93# error vmalloc range conflicts with xlmi0
94# endif
95# endif
96# if XCHAL_NUM_XLMI == 2
97# if INSIDE_VMALLOC(XCHAL_XLMI1_VADDR,XCHAL_XLMI1_VADDR+XCHAL_XLMI1_SIZE)
98# error vmalloc range conflicts with xlmi1
99# endif
100# endif
101#endif
102
103#if (XCHAL_NUM_INSTROM > 2) || \
104 (XCHAL_NUM_INSTRAM > 2) || \
105 (XCHAL_NUM_DATARAM > 2) || \
106 (XCHAL_NUM_DATAROM > 2) || \
107 (XCHAL_NUM_XLMI > 2)
108# error Insufficient checks on vmalloc above for more than 2 devices
109#endif
110
111/*
112 * USER_VM_SIZE does not necessarily equal TASK_SIZE. We bumped
113 * TASK_SIZE down to 0x4000000 to simplify the handling of windowed
114 * call instructions (currently limited to a range of 1 GByte). User
115 * tasks may very well reclaim the VM space from 0x40000000 to
116 * 0x7fffffff in the future, so we do not want the kernel becoming
117 * accustomed to having any of its stuff (e.g., page tables) in this
118 * region. This VM region is no-man's land for now.
119 */
120
121#define USER_VM_START XCHAL_SEG_MAPPABLE_VADDR
122#define USER_VM_SIZE 0x80000000
123
124/* Size of page table: */
125
126#define PGTABLE_SIZE_BITS (32 - XCHAL_MMU_MIN_PTE_PAGE_SIZE + 2)
127#define PGTABLE_SIZE (1L << PGTABLE_SIZE_BITS)
128
129/* All kernel-mappable space: */
130
131#define KERNEL_ALLMAP_START (USER_VM_START + USER_VM_SIZE)
132#define KERNEL_ALLMAP_SIZE (XCHAL_SEG_MAPPABLE_SIZE - KERNEL_ALLMAP_START)
133
134/* Carve out page table at start of kernel-mappable area: */
135
136#if KERNEL_ALLMAP_SIZE < PGTABLE_SIZE
137#error "Gimme some space for page table!"
138#endif
139#define PGTABLE_START KERNEL_ALLMAP_START
140
141/* Remaining kernel-mappable space: */
142
143#define KERNEL_MAPPED_START (KERNEL_ALLMAP_START + PGTABLE_SIZE)
144#define KERNEL_MAPPED_SIZE (KERNEL_ALLMAP_SIZE - PGTABLE_SIZE)
145
146#if KERNEL_MAPPED_SIZE < 0x01000000 /* 16 MB is arbitrary for now */
147# error "Shouldn't the kernel have at least *some* mappable space?"
148#endif
149
150#define MAX_LOW_MEMORY XCHAL_KSEG_CACHED_SIZE
151
152#endif
153
154/*
155 * Some constants used elsewhere, but perhaps only in Xtensa header
156 * files, so maybe we can get rid of some and access compile-time HAL
157 * directly...
158 *
159 * Note: We assume that system RAM is located at the very start of the
160 * kernel segments !!
161 */
162#define KERNEL_VM_LOW XCHAL_KSEG_CACHED_VADDR
163#define KERNEL_VM_HIGH XCHAL_KSEG_BYPASS_VADDR
164#define KERNEL_SPACE XCHAL_KSEG_CACHED_VADDR
165
166/*
167 * Returns the physical/virtual addresses of the kernel space
168 * (works with the cached kernel segment only, which is the
169 * one normally used for kernel operation).
170 */
171
172/* PHYSICAL BYPASS CACHED
173 *
174 * bypass vaddr bypass paddr * cached vaddr
175 * cached vaddr cached paddr bypass vaddr *
176 * bypass paddr * bypass vaddr cached vaddr
177 * cached paddr * bypass vaddr cached vaddr
178 * other * * *
179 */
180
181#define PHYSADDR(a) \
182(((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR \
183 && (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
184 (unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_PADDR : \
185 ((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR \
186 && (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_SIZE) ? \
187 (unsigned)(a) - XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_PADDR : \
188 (unsigned)(a))
189
190#define BYPASS_ADDR(a) \
191(((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR \
192 && (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
193 (unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_VADDR : \
194 ((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR \
195 && (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ? \
196 (unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_BYPASS_VADDR : \
197 ((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR \
198 && (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_CACHED_SIZE)? \
199 (unsigned)(a) - XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_BYPASS_VADDR: \
200 (unsigned)(a))
201
202#define CACHED_ADDR(a) \
203(((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR \
204 && (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
205 (unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_CACHED_VADDR : \
206 ((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR \
207 && (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ? \
208 (unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_VADDR : \
209 ((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR \
210 && (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_BYPASS_SIZE) ? \
211 (unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_CACHED_VADDR : \
212 (unsigned)(a))
213
214#define PHYSADDR_IO(a) \
215(((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR \
216 && (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ? \
217 (unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_PADDR : \
218 ((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR \
219 && (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ? \
220 (unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_PADDR : \
221 (unsigned)(a))
222
223#define BYPASS_ADDR_IO(a) \
224(((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR \
225 && (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ? \
226 (unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_VADDR : \
227 ((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR \
228 && (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ? \
229 (unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_BYPASS_VADDR : \
230 ((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR \
231 && (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ? \
232 (unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_BYPASS_VADDR : \
233 (unsigned)(a))
234
235#define CACHED_ADDR_IO(a) \
236(((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR \
237 && (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ? \
238 (unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_CACHED_VADDR : \
239 ((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR \
240 && (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ? \
241 (unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_VADDR : \
242 ((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR \
243 && (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ? \
244 (unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_CACHED_VADDR : \
245 (unsigned)(a))
246
247#endif /* _XTENSA_ADDRSPACE_H */
248
249
250
251
252
diff --git a/include/asm-xtensa/io.h b/include/asm-xtensa/io.h
index 556e5eed34f5..31ffc3f119c1 100644
--- a/include/asm-xtensa/io.h
+++ b/include/asm-xtensa/io.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/include/asm-xtensa/io.h 2 * include/asm-xtensa/io.h
3 * 3 *
4 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
@@ -15,10 +15,11 @@
15#include <asm/byteorder.h> 15#include <asm/byteorder.h>
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/fixmap.h>
19
20#define _IO_BASE 0
21 18
19#define XCHAL_KIO_CACHED_VADDR 0xf0000000
20#define XCHAL_KIO_BYPASS_VADDR 0xf8000000
21#define XCHAL_KIO_PADDR 0xf0000000
22#define XCHAL_KIO_SIZE 0x08000000
22 23
23/* 24/*
24 * swap functions to change byte order from little-endian to big-endian and 25 * swap functions to change byte order from little-endian to big-endian and
@@ -42,40 +43,43 @@ static inline unsigned int _swapl (unsigned int v)
42 43
43static inline unsigned long virt_to_phys(volatile void * address) 44static inline unsigned long virt_to_phys(volatile void * address)
44{ 45{
45 return PHYSADDR((unsigned long)address); 46 return __pa(address);
46} 47}
47 48
48static inline void * phys_to_virt(unsigned long address) 49static inline void * phys_to_virt(unsigned long address)
49{ 50{
50 return (void*) CACHED_ADDR(address); 51 return __va(address);
51} 52}
52 53
53/* 54/*
54 * IO bus memory addresses are also 1:1 with the physical address 55 * virt_to_bus and bus_to_virt are deprecated.
55 */ 56 */
56 57
57static inline unsigned long virt_to_bus(volatile void * address) 58#define virt_to_bus(x) virt_to_phys(x)
58{ 59#define bus_to_virt(x) phys_to_virt(x)
59 return PHYSADDR((unsigned long)address);
60}
61
62static inline void * bus_to_virt (unsigned long address)
63{
64 return (void *) CACHED_ADDR(address);
65}
66 60
67/* 61/*
68 * Change "struct page" to physical address. 62 * Return the virtual (cached) address for the specified bus memory.
63 * Note that we currently don't support any address outside the KIO segment.
69 */ 64 */
70 65
71static inline void *ioremap(unsigned long offset, unsigned long size) 66static inline void *ioremap(unsigned long offset, unsigned long size)
72{ 67{
73 return (void *) CACHED_ADDR_IO(offset); 68 if (offset >= XCHAL_KIO_PADDR
69 && offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
70 return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
71
72 else
73 BUG();
74} 74}
75 75
76static inline void *ioremap_nocache(unsigned long offset, unsigned long size) 76static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
77{ 77{
78 return (void *) BYPASS_ADDR_IO(offset); 78 if (offset >= XCHAL_KIO_PADDR
79 && offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
80 return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
81 else
82 BUG();
79} 83}
80 84
81static inline void iounmap(void *addr) 85static inline void iounmap(void *addr)
@@ -121,9 +125,6 @@ static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
121 *(__force volatile __u32 *)(addr) = b; 125 *(__force volatile __u32 *)(addr) = b;
122} 126}
123 127
124
125
126
127/* These are the definitions for the x86 IO instructions 128/* These are the definitions for the x86 IO instructions
128 * inb/inw/inl/outb/outw/outl, the "string" versions 129 * inb/inw/inl/outb/outw/outl, the "string" versions
129 * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions 130 * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
@@ -131,11 +132,11 @@ static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
131 * The macros don't do byte-swapping. 132 * The macros don't do byte-swapping.
132 */ 133 */
133 134
134#define inb(port) readb((u8 *)((port)+_IO_BASE)) 135#define inb(port) readb((u8 *)((port)))
135#define outb(val, port) writeb((val),(u8 *)((unsigned long)(port)+_IO_BASE)) 136#define outb(val, port) writeb((val),(u8 *)((unsigned long)(port)))
136#define inw(port) readw((u16 *)((port)+_IO_BASE)) 137#define inw(port) readw((u16 *)((port)))
137#define outw(val, port) writew((val),(u16 *)((unsigned long)(port)+_IO_BASE)) 138#define outw(val, port) writew((val),(u16 *)((unsigned long)(port)))
138#define inl(port) readl((u32 *)((port)+_IO_BASE)) 139#define inl(port) readl((u32 *)((port)))
139#define outl(val, port) writel((val),(u32 *)((unsigned long)(port))) 140#define outl(val, port) writel((val),(u32 *)((unsigned long)(port)))
140 141
141#define inb_p(port) inb((port)) 142#define inb_p(port) inb((port))
@@ -180,14 +181,13 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
180 181
181 182
182/* 183/*
183 * * Convert a physical pointer to a virtual kernel pointer for /dev/mem 184 * Convert a physical pointer to a virtual kernel pointer for /dev/mem access
184 * * access 185 */
185 * */
186#define xlate_dev_mem_ptr(p) __va(p) 186#define xlate_dev_mem_ptr(p) __va(p)
187 187
188/* 188/*
189 * * Convert a virtual cached pointer to an uncached pointer 189 * Convert a virtual cached pointer to an uncached pointer
190 * */ 190 */
191#define xlate_dev_kmem_ptr(p) p 191#define xlate_dev_kmem_ptr(p) p
192 192
193 193
diff --git a/include/asm-xtensa/irq.h b/include/asm-xtensa/irq.h
index 049fde7e752d..fc73b7f11aff 100644
--- a/include/asm-xtensa/irq.h
+++ b/include/asm-xtensa/irq.h
@@ -12,8 +12,7 @@
12#define _XTENSA_IRQ_H 12#define _XTENSA_IRQ_H
13 13
14#include <asm/platform/hardware.h> 14#include <asm/platform/hardware.h>
15 15#include <asm/variant/core.h>
16#include <xtensa/config/core.h>
17 16
18#ifndef PLATFORM_NR_IRQS 17#ifndef PLATFORM_NR_IRQS
19# define PLATFORM_NR_IRQS 0 18# define PLATFORM_NR_IRQS 0
@@ -27,10 +26,5 @@ static __inline__ int irq_canonicalize(int irq)
27} 26}
28 27
29struct irqaction; 28struct irqaction;
30#if 0 // FIXME
31extern void disable_irq_nosync(unsigned int);
32extern void disable_irq(unsigned int);
33extern void enable_irq(unsigned int);
34#endif
35 29
36#endif /* _XTENSA_IRQ_H */ 30#endif /* _XTENSA_IRQ_H */
diff --git a/include/asm-xtensa/irq_regs.h b/include/asm-xtensa/irq_regs.h
new file mode 100644
index 000000000000..3dd9c0b70270
--- /dev/null
+++ b/include/asm-xtensa/irq_regs.h
@@ -0,0 +1 @@
#include <asm-generic/irq_regs.h>
diff --git a/include/asm-xtensa/mmu_context.h b/include/asm-xtensa/mmu_context.h
index af683a74a4ec..f14851f086c3 100644
--- a/include/asm-xtensa/mmu_context.h
+++ b/include/asm-xtensa/mmu_context.h
@@ -16,187 +16,32 @@
16#include <linux/stringify.h> 16#include <linux/stringify.h>
17 17
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19#include <asm/mmu_context.h>
20#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
21#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
22 21
23/* 22#define XCHAL_MMU_ASID_BITS 8
24 * Linux was ported to Xtensa assuming all auto-refill ways in set 0
25 * had the same properties (a very likely assumption). Multiple sets
26 * of auto-refill ways will still work properly, but not as optimally
27 * as the Xtensa designer may have assumed.
28 *
29 * We make this case a hard #error, killing the kernel build, to alert
30 * the developer to this condition (which is more likely an error).
31 * You super-duper clever developers can change it to a warning or
32 * remove it altogether if you think you know what you're doing. :)
33 */
34 23
35#if (XCHAL_HAVE_TLBS != 1) 24#if (XCHAL_HAVE_TLBS != 1)
36# error "Linux must have an MMU!" 25# error "Linux must have an MMU!"
37#endif 26#endif
38 27
39#if ((XCHAL_ITLB_ARF_WAYS == 0) || (XCHAL_DTLB_ARF_WAYS == 0))
40# error "MMU must have auto-refill ways"
41#endif
42
43#if ((XCHAL_ITLB_ARF_SETS != 1) || (XCHAL_DTLB_ARF_SETS != 1))
44# error Linux may not use all auto-refill ways as efficiently as you think
45#endif
46
47#if (XCHAL_MMU_MAX_PTE_PAGE_SIZE != XCHAL_MMU_MIN_PTE_PAGE_SIZE)
48# error Only one page size allowed!
49#endif
50
51extern unsigned long asid_cache; 28extern unsigned long asid_cache;
52extern pgd_t *current_pgd;
53
54/*
55 * Define the number of entries per auto-refill way in set 0 of both I and D
56 * TLBs. We deal only with set 0 here (an assumption further explained in
57 * assertions.h). Also, define the total number of ARF entries in both TLBs.
58 */
59
60#define ITLB_ENTRIES_PER_ARF_WAY (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES))
61#define DTLB_ENTRIES_PER_ARF_WAY (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,ENTRIES))
62
63#define ITLB_ENTRIES \
64 (ITLB_ENTRIES_PER_ARF_WAY * (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,WAYS)))
65#define DTLB_ENTRIES \
66 (DTLB_ENTRIES_PER_ARF_WAY * (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,WAYS)))
67
68
69/*
70 * SMALLEST_NTLB_ENTRIES is the smaller of ITLB_ENTRIES and DTLB_ENTRIES.
71 * In practice, they are probably equal. This macro simplifies function
72 * flush_tlb_range().
73 */
74
75#if (DTLB_ENTRIES < ITLB_ENTRIES)
76# define SMALLEST_NTLB_ENTRIES DTLB_ENTRIES
77#else
78# define SMALLEST_NTLB_ENTRIES ITLB_ENTRIES
79#endif
80
81
82/*
83 * asid_cache tracks only the ASID[USER_RING] field of the RASID special
84 * register, which is the current user-task asid allocation value.
85 * mm->context has the same meaning. When it comes time to write the
86 * asid_cache or mm->context values to the RASID special register, we first
87 * shift the value left by 8, then insert the value.
88 * ASID[0] always contains the kernel's asid value, and we reserve three
89 * other asid values that we never assign to user tasks.
90 */
91
92#define ASID_INC 0x1
93#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
94
95/*
96 * XCHAL_MMU_ASID_INVALID is a configurable Xtensa processor constant
97 * indicating invalid address space. XCHAL_MMU_ASID_KERNEL is a configurable
98 * Xtensa processor constant indicating the kernel address space. They can
99 * be arbitrary values.
100 *
101 * We identify three more unique, reserved ASID values to use in the unused
102 * ring positions. No other user process will be assigned these reserved
103 * ASID values.
104 *
105 * For example, given that
106 *
107 * XCHAL_MMU_ASID_INVALID == 0
108 * XCHAL_MMU_ASID_KERNEL == 1
109 *
110 * the following maze of #if statements would generate
111 *
112 * ASID_RESERVED_1 == 2
113 * ASID_RESERVED_2 == 3
114 * ASID_RESERVED_3 == 4
115 * ASID_FIRST_NONRESERVED == 5
116 */
117
118#if (XCHAL_MMU_ASID_INVALID != XCHAL_MMU_ASID_KERNEL + 1)
119# define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 1) & ASID_MASK)
120#else
121# define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 2) & ASID_MASK)
122#endif
123
124#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_1 + 1)
125# define ASID_RESERVED_2 ((ASID_RESERVED_1 + 1) & ASID_MASK)
126#else
127# define ASID_RESERVED_2 ((ASID_RESERVED_1 + 2) & ASID_MASK)
128#endif
129
130#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_2 + 1)
131# define ASID_RESERVED_3 ((ASID_RESERVED_2 + 1) & ASID_MASK)
132#else
133# define ASID_RESERVED_3 ((ASID_RESERVED_2 + 2) & ASID_MASK)
134#endif
135
136#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_3 + 1)
137# define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 1) & ASID_MASK)
138#else
139# define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 2) & ASID_MASK)
140#endif
141
142#define ASID_ALL_RESERVED ( ((ASID_RESERVED_1) << 24) + \
143 ((ASID_RESERVED_2) << 16) + \
144 ((ASID_RESERVED_3) << 8) + \
145 ((XCHAL_MMU_ASID_KERNEL)) )
146
147 29
148/* 30/*
149 * NO_CONTEXT is the invalid ASID value that we don't ever assign to 31 * NO_CONTEXT is the invalid ASID value that we don't ever assign to
150 * any user or kernel context. NO_CONTEXT is a better mnemonic than 32 * any user or kernel context.
151 * XCHAL_MMU_ASID_INVALID, so we use it in code instead. 33 *
152 */ 34 * 0 invalid
153 35 * 1 kernel
154#define NO_CONTEXT XCHAL_MMU_ASID_INVALID 36 * 2 reserved
155 37 * 3 reserved
156#if (KERNEL_RING != 0) 38 * 4...255 available
157# error The KERNEL_RING really should be zero.
158#endif
159
160#if (USER_RING >= XCHAL_MMU_RINGS)
161# error USER_RING cannot be greater than the highest numbered ring.
162#endif
163
164#if (USER_RING == KERNEL_RING)
165# error The user and kernel rings really should not be equal.
166#endif
167
168#if (USER_RING == 1)
169#define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \
170 ((ASID_RESERVED_2) << 16) + \
171 (((x) & (ASID_MASK)) << 8) + \
172 ((XCHAL_MMU_ASID_KERNEL)) )
173
174#elif (USER_RING == 2)
175#define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \
176 (((x) & (ASID_MASK)) << 16) + \
177 ((ASID_RESERVED_2) << 8) + \
178 ((XCHAL_MMU_ASID_KERNEL)) )
179
180#elif (USER_RING == 3)
181#define ASID_INSERT(x) ( (((x) & (ASID_MASK)) << 24) + \
182 ((ASID_RESERVED_1) << 16) + \
183 ((ASID_RESERVED_2) << 8) + \
184 ((XCHAL_MMU_ASID_KERNEL)) )
185
186#else
187#error Goofy value for USER_RING
188
189#endif /* USER_RING == 1 */
190
191
192/*
193 * All unused by hardware upper bits will be considered
194 * as a software asid extension.
195 */ 39 */
196 40
197#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) 41#define NO_CONTEXT 0
198#define ASID_FIRST_VERSION \ 42#define ASID_USER_FIRST 4
199 ((unsigned long)(~ASID_VERSION_MASK) + 1 + ASID_FIRST_NONRESERVED) 43#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
44#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
200 45
201static inline void set_rasid_register (unsigned long val) 46static inline void set_rasid_register (unsigned long val)
202{ 47{
@@ -207,67 +52,28 @@ static inline void set_rasid_register (unsigned long val)
207static inline unsigned long get_rasid_register (void) 52static inline unsigned long get_rasid_register (void)
208{ 53{
209 unsigned long tmp; 54 unsigned long tmp;
210 __asm__ __volatile__ (" rsr %0, "__stringify(RASID)"\n\t" : "=a" (tmp)); 55 __asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp));
211 return tmp; 56 return tmp;
212} 57}
213 58
214
215#if ((XCHAL_MMU_ASID_INVALID == 0) && (XCHAL_MMU_ASID_KERNEL == 1))
216
217static inline void 59static inline void
218get_new_mmu_context(struct mm_struct *mm, unsigned long asid) 60__get_new_mmu_context(struct mm_struct *mm)
219{ 61{
220 extern void flush_tlb_all(void); 62 extern void flush_tlb_all(void);
221 if (! ((asid += ASID_INC) & ASID_MASK) ) { 63 if (! (++asid_cache & ASID_MASK) ) {
222 flush_tlb_all(); /* start new asid cycle */ 64 flush_tlb_all(); /* start new asid cycle */
223 if (!asid) /* fix version if needed */ 65 asid_cache += ASID_USER_FIRST;
224 asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED;
225 asid += ASID_FIRST_NONRESERVED;
226 } 66 }
227 mm->context = asid_cache = asid; 67 mm->context = asid_cache;
228}
229
230#else
231#warning ASID_{INVALID,KERNEL} values impose non-optimal get_new_mmu_context implementation
232
233/* XCHAL_MMU_ASID_INVALID == 0 and XCHAL_MMU_ASID_KERNEL ==1 are
234 really the best, but if you insist... */
235
236static inline int validate_asid (unsigned long asid)
237{
238 switch (asid) {
239 case XCHAL_MMU_ASID_INVALID:
240 case XCHAL_MMU_ASID_KERNEL:
241 case ASID_RESERVED_1:
242 case ASID_RESERVED_2:
243 case ASID_RESERVED_3:
244 return 0; /* can't use these values as ASIDs */
245 }
246 return 1; /* valid */
247} 68}
248 69
249static inline void 70static inline void
250get_new_mmu_context(struct mm_struct *mm, unsigned long asid) 71__load_mmu_context(struct mm_struct *mm)
251{ 72{
252 extern void flush_tlb_all(void); 73 set_rasid_register(ASID_INSERT(mm->context));
253 while (1) { 74 invalidate_page_directory();
254 asid += ASID_INC;
255 if ( ! (asid & ASID_MASK) ) {
256 flush_tlb_all(); /* start new asid cycle */
257 if (!asid) /* fix version if needed */
258 asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED;
259 asid += ASID_FIRST_NONRESERVED;
260 break; /* no need to validate here */
261 }
262 if (validate_asid (asid & ASID_MASK))
263 break;
264 }
265 mm->context = asid_cache = asid;
266} 75}
267 76
268#endif
269
270
271/* 77/*
272 * Initialize the context related info for a new mm_struct 78 * Initialize the context related info for a new mm_struct
273 * instance. 79 * instance.
@@ -280,6 +86,20 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
280 return 0; 86 return 0;
281} 87}
282 88
89/*
90 * After we have set current->mm to a new value, this activates
91 * the context for the new mm so we see the new mappings.
92 */
93static inline void
94activate_mm(struct mm_struct *prev, struct mm_struct *next)
95{
96 /* Unconditionally get a new ASID. */
97
98 __get_new_mmu_context(next);
99 __load_mmu_context(next);
100}
101
102
283static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 103static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
284 struct task_struct *tsk) 104 struct task_struct *tsk)
285{ 105{
@@ -287,11 +107,10 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
287 107
288 /* Check if our ASID is of an older version and thus invalid */ 108 /* Check if our ASID is of an older version and thus invalid */
289 109
290 if ((next->context ^ asid) & ASID_VERSION_MASK) 110 if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK))
291 get_new_mmu_context(next, asid); 111 __get_new_mmu_context(next);
292 112
293 set_rasid_register (ASID_INSERT(next->context)); 113 __load_mmu_context(next);
294 invalidate_page_directory();
295} 114}
296 115
297#define deactivate_mm(tsk, mm) do { } while(0) 116#define deactivate_mm(tsk, mm) do { } while(0)
@@ -302,20 +121,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
302 */ 121 */
303static inline void destroy_context(struct mm_struct *mm) 122static inline void destroy_context(struct mm_struct *mm)
304{ 123{
305 /* Nothing to do. */
306}
307
308/*
309 * After we have set current->mm to a new value, this activates
310 * the context for the new mm so we see the new mappings.
311 */
312static inline void
313activate_mm(struct mm_struct *prev, struct mm_struct *next)
314{
315 /* Unconditionally get a new ASID. */
316
317 get_new_mmu_context(next, asid_cache);
318 set_rasid_register (ASID_INSERT(next->context));
319 invalidate_page_directory(); 124 invalidate_page_directory();
320} 125}
321 126
diff --git a/include/asm-xtensa/page.h b/include/asm-xtensa/page.h
index 40f4c6c3f580..c631d006194b 100644
--- a/include/asm-xtensa/page.h
+++ b/include/asm-xtensa/page.h
@@ -15,18 +15,24 @@
15 15
16#include <asm/processor.h> 16#include <asm/processor.h>
17 17
18#define XCHAL_KSEG_CACHED_VADDR 0xd0000000
19#define XCHAL_KSEG_BYPASS_VADDR 0xd8000000
20#define XCHAL_KSEG_PADDR 0x00000000
21#define XCHAL_KSEG_SIZE 0x08000000
22
18/* 23/*
19 * PAGE_SHIFT determines the page size 24 * PAGE_SHIFT determines the page size
20 * PAGE_ALIGN(x) aligns the pointer to the (next) page boundary 25 * PAGE_ALIGN(x) aligns the pointer to the (next) page boundary
21 */ 26 */
22 27
23#define PAGE_SHIFT XCHAL_MMU_MIN_PTE_PAGE_SIZE 28#define PAGE_SHIFT 12
24#define PAGE_SIZE (1 << PAGE_SHIFT) 29#define PAGE_SIZE (1 << PAGE_SHIFT)
25#define PAGE_MASK (~(PAGE_SIZE-1)) 30#define PAGE_MASK (~(PAGE_SIZE-1))
26#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK) 31#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK)
27 32
28#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS)
29#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR 33#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
34#define MAX_MEM_PFN XCHAL_KSEG_SIZE
35#define PGTABLE_START 0x80000000
30 36
31#ifdef __ASSEMBLY__ 37#ifdef __ASSEMBLY__
32 38
diff --git a/include/asm-xtensa/param.h b/include/asm-xtensa/param.h
index c0eec8260b0e..6f281392e3f8 100644
--- a/include/asm-xtensa/param.h
+++ b/include/asm-xtensa/param.h
@@ -11,7 +11,7 @@
11#ifndef _XTENSA_PARAM_H 11#ifndef _XTENSA_PARAM_H
12#define _XTENSA_PARAM_H 12#define _XTENSA_PARAM_H
13 13
14#include <xtensa/config/core.h> 14#include <asm/variant/core.h>
15 15
16#ifdef __KERNEL__ 16#ifdef __KERNEL__
17# define HZ 100 /* internal timer frequency */ 17# define HZ 100 /* internal timer frequency */
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h
index b4318934b10d..2d4b5db6ea63 100644
--- a/include/asm-xtensa/pgtable.h
+++ b/include/asm-xtensa/pgtable.h
@@ -14,45 +14,6 @@
14#include <asm-generic/pgtable-nopmd.h> 14#include <asm-generic/pgtable-nopmd.h>
15#include <asm/page.h> 15#include <asm/page.h>
16 16
17/* Assertions. */
18
19#ifdef CONFIG_MMU
20
21
22#if (XCHAL_MMU_RINGS < 2)
23# error Linux build assumes at least 2 ring levels.
24#endif
25
26#if (XCHAL_MMU_CA_BITS != 4)
27# error We assume exactly four bits for CA.
28#endif
29
30#if (XCHAL_MMU_SR_BITS != 0)
31# error We have no room for SR bits.
32#endif
33
34/*
35 * Use the first min-wired way for mapping page-table pages.
36 * Page coloring requires a second min-wired way.
37 */
38
39#if (XCHAL_DTLB_MINWIRED_SETS == 0)
40# error Need a min-wired way for mapping page-table pages
41#endif
42
43#define DTLB_WAY_PGTABLE XCHAL_DTLB_SET(XCHAL_DTLB_MINWIRED_SET0, WAY)
44
45#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
46# if XCHAL_DTLB_SET(XCHAL_DTLB_MINWIRED_SET0, WAYS) >= 2
47# define DTLB_WAY_DCACHE_ALIAS0 (DTLB_WAY_PGTABLE + 1)
48# define DTLB_WAY_DCACHE_ALIAS1 (DTLB_WAY_PGTABLE + 2)
49# else
50# error Page coloring requires its own wired dtlb way!
51# endif
52#endif
53
54#endif /* CONFIG_MMU */
55
56/* 17/*
57 * We only use two ring levels, user and kernel space. 18 * We only use two ring levels, user and kernel space.
58 */ 19 */
@@ -97,7 +58,7 @@
97#define PGD_ORDER 0 58#define PGD_ORDER 0
98#define PMD_ORDER 0 59#define PMD_ORDER 0
99#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) 60#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
100#define FIRST_USER_ADDRESS XCHAL_SEG_MAPPABLE_VADDR 61#define FIRST_USER_ADDRESS 0
101#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) 62#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
102 63
103/* virtual memory area. We keep a distance to other memory regions to be 64/* virtual memory area. We keep a distance to other memory regions to be
diff --git a/include/asm-xtensa/platform-iss/hardware.h b/include/asm-xtensa/platform-iss/hardware.h
index 22240f001803..6930c12adc16 100644
--- a/include/asm-xtensa/platform-iss/hardware.h
+++ b/include/asm-xtensa/platform-iss/hardware.h
@@ -12,18 +12,18 @@
12 * This file contains the default configuration of ISS. 12 * This file contains the default configuration of ISS.
13 */ 13 */
14 14
15#ifndef __ASM_XTENSA_ISS_HARDWARE 15#ifndef _XTENSA_PLATFORM_ISS_HARDWARE_H
16#define __ASM_XTENSA_ISS_HARDWARE 16#define _XTENSA_PLATFORM_ISS_HARDWARE_H
17 17
18/* 18/*
19 * Memory configuration. 19 * Memory configuration.
20 */ 20 */
21 21
22#define PLATFORM_DEFAULT_MEM_START XSHAL_RAM_PADDR 22#define PLATFORM_DEFAULT_MEM_START 0x00000000
23#define PLATFORM_DEFAULT_MEM_SIZE XSHAL_RAM_VSIZE 23#define PLATFORM_DEFAULT_MEM_SIZE 0x08000000
24 24
25/* 25/*
26 * Interrupt configuration. 26 * Interrupt configuration.
27 */ 27 */
28 28
29#endif /* __ASM_XTENSA_ISS_HARDWARE */ 29#endif /* _XTENSA_PLATFORM_ISS_HARDWARE_H */
diff --git a/include/asm-xtensa/platform-iss/simcall.h b/include/asm-xtensa/platform-iss/simcall.h
new file mode 100644
index 000000000000..6acb572759a6
--- /dev/null
+++ b/include/asm-xtensa/platform-iss/simcall.h
@@ -0,0 +1,62 @@
1/*
2 * include/asm-xtensa/platform-iss/hardware.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_PLATFORM_ISS_SIMCALL_H
12#define _XTENSA_PLATFORM_ISS_SIMCALL_H
13
14
15/*
16 * System call like services offered by the simulator host.
17 */
18
19#define SYS_nop 0 /* unused */
20#define SYS_exit 1 /*x*/
21#define SYS_fork 2
22#define SYS_read 3 /*x*/
23#define SYS_write 4 /*x*/
24#define SYS_open 5 /*x*/
25#define SYS_close 6 /*x*/
26#define SYS_rename 7 /*x 38 - waitpid */
27#define SYS_creat 8 /*x*/
28#define SYS_link 9 /*x (not implemented on WIN32) */
29#define SYS_unlink 10 /*x*/
30#define SYS_execv 11 /* n/a - execve */
31#define SYS_execve 12 /* 11 - chdir */
32#define SYS_pipe 13 /* 42 - time */
33#define SYS_stat 14 /* 106 - mknod */
34#define SYS_chmod 15
35#define SYS_chown 16 /* 202 - lchown */
36#define SYS_utime 17 /* 30 - break */
37#define SYS_wait 18 /* n/a - oldstat */
38#define SYS_lseek 19 /*x*/
39#define SYS_getpid 20
40#define SYS_isatty 21 /* n/a - mount */
41#define SYS_fstat 22 /* 108 - oldumount */
42#define SYS_time 23 /* 13 - setuid */
43#define SYS_gettimeofday 24 /*x 78 - getuid (not implemented on WIN32) */
44#define SYS_times 25 /*X 43 - stime (Xtensa-specific implementation) */
45#define SYS_socket 26
46#define SYS_sendto 27
47#define SYS_recvfrom 28
48#define SYS_select_one 29 /* not compitible select, one file descriptor at the time */
49#define SYS_bind 30
50#define SYS_ioctl 31
51
52/*
53 * SYS_select_one specifiers
54 */
55
56#define XTISS_SELECT_ONE_READ 1
57#define XTISS_SELECT_ONE_WRITE 2
58#define XTISS_SELECT_ONE_EXCEPT 3
59
60
61#endif /* _XTENSA_PLATFORM_ISS_SIMCALL_H */
62
diff --git a/include/asm-xtensa/posix_types.h b/include/asm-xtensa/posix_types.h
index 2c816b0e7762..3470b44c12ce 100644
--- a/include/asm-xtensa/posix_types.h
+++ b/include/asm-xtensa/posix_types.h
@@ -21,7 +21,7 @@
21 21
22typedef unsigned long __kernel_ino_t; 22typedef unsigned long __kernel_ino_t;
23typedef unsigned int __kernel_mode_t; 23typedef unsigned int __kernel_mode_t;
24typedef unsigned short __kernel_nlink_t; 24typedef unsigned long __kernel_nlink_t;
25typedef long __kernel_off_t; 25typedef long __kernel_off_t;
26typedef int __kernel_pid_t; 26typedef int __kernel_pid_t;
27typedef unsigned short __kernel_ipc_pid_t; 27typedef unsigned short __kernel_ipc_pid_t;
diff --git a/include/asm-xtensa/processor.h b/include/asm-xtensa/processor.h
index 8b96e77c9d82..4feb9f7f35a6 100644
--- a/include/asm-xtensa/processor.h
+++ b/include/asm-xtensa/processor.h
@@ -11,24 +11,18 @@
11#ifndef _XTENSA_PROCESSOR_H 11#ifndef _XTENSA_PROCESSOR_H
12#define _XTENSA_PROCESSOR_H 12#define _XTENSA_PROCESSOR_H
13 13
14#ifdef __ASSEMBLY__ 14#include <asm/variant/core.h>
15#define _ASMLANGUAGE 15#include <asm/coprocessor.h>
16#endif
17
18#include <xtensa/config/core.h>
19#include <xtensa/config/specreg.h>
20#include <xtensa/config/tie.h>
21#include <xtensa/config/system.h>
22 16
23#include <linux/compiler.h> 17#include <linux/compiler.h>
24#include <asm/ptrace.h> 18#include <asm/ptrace.h>
25#include <asm/types.h> 19#include <asm/types.h>
26#include <asm/coprocessor.h> 20#include <asm/regs.h>
27 21
28/* Assertions. */ 22/* Assertions. */
29 23
30#if (XCHAL_HAVE_WINDOWED != 1) 24#if (XCHAL_HAVE_WINDOWED != 1)
31#error Linux requires the Xtensa Windowed Registers Option. 25# error Linux requires the Xtensa Windowed Registers Option.
32#endif 26#endif
33 27
34/* 28/*
@@ -145,11 +139,11 @@ struct thread_struct {
145 * Note: We set-up ps as if we did a call4 to the new pc. 139 * Note: We set-up ps as if we did a call4 to the new pc.
146 * set_thread_state in signal.c depends on it. 140 * set_thread_state in signal.c depends on it.
147 */ 141 */
148#define USER_PS_VALUE ( (1 << XCHAL_PS_WOE_SHIFT) + \ 142#define USER_PS_VALUE ((1 << PS_WOE_BIT) | \
149 (1 << XCHAL_PS_CALLINC_SHIFT) + \ 143 (1 << PS_CALLINC_SHIFT) | \
150 (USER_RING << XCHAL_PS_RING_SHIFT) + \ 144 (USER_RING << PS_RING_SHIFT) | \
151 (1 << XCHAL_PS_PROGSTACK_SHIFT) + \ 145 (1 << PS_UM_BIT) | \
152 (1 << XCHAL_PS_EXCM_SHIFT) ) 146 (1 << PS_EXCM_BIT))
153 147
154/* Clearing a0 terminates the backtrace. */ 148/* Clearing a0 terminates the backtrace. */
155#define start_thread(regs, new_pc, new_sp) \ 149#define start_thread(regs, new_pc, new_sp) \
diff --git a/include/asm-xtensa/ptrace.h b/include/asm-xtensa/ptrace.h
index a5ac71a5205c..1b7fe363fad1 100644
--- a/include/asm-xtensa/ptrace.h
+++ b/include/asm-xtensa/ptrace.h
@@ -11,7 +11,7 @@
11#ifndef _XTENSA_PTRACE_H 11#ifndef _XTENSA_PTRACE_H
12#define _XTENSA_PTRACE_H 12#define _XTENSA_PTRACE_H
13 13
14#include <xtensa/config/core.h> 14#include <asm/variant/core.h>
15 15
16/* 16/*
17 * Kernel stack 17 * Kernel stack
diff --git a/include/asm-xtensa/regs.h b/include/asm-xtensa/regs.h
new file mode 100644
index 000000000000..c913d259faaa
--- /dev/null
+++ b/include/asm-xtensa/regs.h
@@ -0,0 +1,138 @@
1/*
2 * Copyright (c) 2006 Tensilica, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2.1 of the GNU Lesser General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this program; if not, write the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
22 * USA.
23 */
24
25#ifndef _XTENSA_REGS_H
26#define _XTENSA_REGS_H
27
28/* Special registers. */
29
30#define LBEG 0
31#define LEND 1
32#define LCOUNT 2
33#define SAR 3
34#define BR 4
35#define SCOMPARE1 12
36#define ACCHI 16
37#define ACCLO 17
38#define MR 32
39#define WINDOWBASE 72
40#define WINDOWSTART 73
41#define PTEVADDR 83
42#define RASID 90
43#define ITLBCFG 91
44#define DTLBCFG 92
45#define IBREAKENABLE 96
46#define DDR 104
47#define IBREAKA 128
48#define DBREAKA 144
49#define DBREAKC 160
50#define EPC 176
51#define EPC_1 177
52#define DEPC 192
53#define EPS 192
54#define EPS_1 193
55#define EXCSAVE 208
56#define EXCSAVE_1 209
57#define INTERRUPT 226
58#define INTENABLE 228
59#define PS 230
60#define THREADPTR 231
61#define EXCCAUSE 232
62#define DEBUGCAUSE 233
63#define CCOUNT 234
64#define PRID 235
65#define ICOUNT 236
66#define ICOUNTLEVEL 237
67#define EXCVADDR 238
68#define CCOMPARE 240
69#define MISC 244
70
71/* Special names for read-only and write-only interrupt registers. */
72
73#define INTREAD 226
74#define INTSET 226
75#define INTCLEAR 227
76
77/* EXCCAUSE register fields */
78
79#define EXCCAUSE_EXCCAUSE_SHIFT 0
80#define EXCCAUSE_EXCCAUSE_MASK 0x3F
81
82#define EXCCAUSE_ILLEGAL_INSTRUCTION 0
83#define EXCCAUSE_SYSTEM_CALL 1
84#define EXCCAUSE_INSTRUCTION_FETCH_ERROR 2
85#define EXCCAUSE_LOAD_STORE_ERROR 3
86#define EXCCAUSE_LEVEL1_INTERRUPT 4
87#define EXCCAUSE_ALLOCA 5
88#define EXCCAUSE_INTEGER_DIVIDE_BY_ZERO 6
89#define EXCCAUSE_SPECULATION 7
90#define EXCCAUSE_PRIVILEGED 8
91#define EXCCAUSE_UNALIGNED 9
92#define EXCCAUSE_ITLB_MISS 16
93#define EXCCAUSE_ITLB_MULTIHIT 17
94#define EXCCAUSE_ITLB_PRIVILEGE 18
95#define EXCCAUSE_ITLB_SIZE_RESTRICTION 19
96#define EXCCAUSE_FETCH_CACHE_ATTRIBUTE 20
97#define EXCCAUSE_DTLB_MISS 24
98#define EXCCAUSE_DTLB_MULTIHIT 25
99#define EXCCAUSE_DTLB_PRIVILEGE 26
100#define EXCCAUSE_DTLB_SIZE_RESTRICTION 27
101#define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28
102#define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29
103#define EXCCAUSE_FLOATING_POINT 40
104
105/* PS register fields. */
106
107#define PS_WOE_BIT 18
108#define PS_CALLINC_SHIFT 16
109#define PS_CALLINC_MASK 0x00030000
110#define PS_OWB_SHIFT 8
111#define PS_OWB_MASK 0x00000F00
112#define PS_RING_SHIFT 6
113#define PS_RING_MASK 0x000000C0
114#define PS_UM_BIT 5
115#define PS_EXCM_BIT 4
116#define PS_INTLEVEL_SHIFT 0
117#define PS_INTLEVEL_MASK 0x0000000F
118
119/* DBREAKCn register fields. */
120
121#define DBREAKC_MASK_BIT 0
122#define DBREAKC_MASK_MASK 0x0000003F
123#define DBREAKC_LOAD_BIT 30
124#define DBREAKC_LOAD_MASK 0x40000000
125#define DBREAKC_STOR_BIT 31
126#define DBREAKC_STOR_MASK 0x80000000
127
128/* DEBUGCAUSE register fields. */
129
130#define DEBUGCAUSE_DEBUGINT_BIT 5 /* External debug interrupt */
131#define DEBUGCAUSE_BREAKN_BIT 4 /* BREAK.N instruction */
132#define DEBUGCAUSE_BREAK_BIT 3 /* BREAK instruction */
133#define DEBUGCAUSE_DBREAK_BIT 2 /* DBREAK match */
134#define DEBUGCAUSE_IBREAK_BIT 1 /* IBREAK match */
135#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
136
137#endif /* _XTENSA_SPECREG_H */
138
diff --git a/include/asm-xtensa/sembuf.h b/include/asm-xtensa/sembuf.h
index 2d26c47666fe..c15870493b33 100644
--- a/include/asm-xtensa/sembuf.h
+++ b/include/asm-xtensa/sembuf.h
@@ -25,7 +25,7 @@
25 25
26struct semid64_ds { 26struct semid64_ds {
27 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ 27 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
28#if XCHAL_HAVE_LE 28#ifdef __XTENSA_EL__
29 __kernel_time_t sem_otime; /* last semop time */ 29 __kernel_time_t sem_otime; /* last semop time */
30 unsigned long __unused1; 30 unsigned long __unused1;
31 __kernel_time_t sem_ctime; /* last change time */ 31 __kernel_time_t sem_ctime; /* last change time */
diff --git a/include/asm-xtensa/shmbuf.h b/include/asm-xtensa/shmbuf.h
index a30b81a4b933..ad4b0121782c 100644
--- a/include/asm-xtensa/shmbuf.h
+++ b/include/asm-xtensa/shmbuf.h
@@ -19,6 +19,7 @@
19#ifndef _XTENSA_SHMBUF_H 19#ifndef _XTENSA_SHMBUF_H
20#define _XTENSA_SHMBUF_H 20#define _XTENSA_SHMBUF_H
21 21
22#if defined (__XTENSA_EL__)
22struct shmid64_ds { 23struct shmid64_ds {
23 struct ipc64_perm shm_perm; /* operation perms */ 24 struct ipc64_perm shm_perm; /* operation perms */
24 size_t shm_segsz; /* size of segment (bytes) */ 25 size_t shm_segsz; /* size of segment (bytes) */
@@ -34,6 +35,26 @@ struct shmid64_ds {
34 unsigned long __unused4; 35 unsigned long __unused4;
35 unsigned long __unused5; 36 unsigned long __unused5;
36}; 37};
38#elif defined (__XTENSA_EB__)
39struct shmid64_ds {
40 struct ipc64_perm shm_perm; /* operation perms */
41 size_t shm_segsz; /* size of segment (bytes) */
42 __kernel_time_t shm_atime; /* last attach time */
43 unsigned long __unused1;
44 __kernel_time_t shm_dtime; /* last detach time */
45 unsigned long __unused2;
46 __kernel_time_t shm_ctime; /* last change time */
47 unsigned long __unused3;
48 __kernel_pid_t shm_cpid; /* pid of creator */
49 __kernel_pid_t shm_lpid; /* pid of last operator */
50 unsigned long shm_nattch; /* no. of current attaches */
51 unsigned long __unused4;
52 unsigned long __unused5;
53};
54#else
55# error endian order not defined
56#endif
57
37 58
38struct shminfo64 { 59struct shminfo64 {
39 unsigned long shmmax; 60 unsigned long shmmax;
diff --git a/include/asm-xtensa/stat.h b/include/asm-xtensa/stat.h
index 2f4662ff6c3a..149f4bce092f 100644
--- a/include/asm-xtensa/stat.h
+++ b/include/asm-xtensa/stat.h
@@ -13,93 +13,57 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15 15
16struct __old_kernel_stat {
17 unsigned short st_dev;
18 unsigned short st_ino;
19 unsigned short st_mode;
20 unsigned short st_nlink;
21 unsigned short st_uid;
22 unsigned short st_gid;
23 unsigned short st_rdev;
24 unsigned long st_size;
25 unsigned long st_atime;
26 unsigned long st_mtime;
27 unsigned long st_ctime;
28};
29
30#define STAT_HAVE_NSEC 1 16#define STAT_HAVE_NSEC 1
31 17
32struct stat { 18struct stat {
33 unsigned short st_dev; 19 unsigned long st_dev;
34 unsigned short __pad1; 20 ino_t st_ino;
35 unsigned long st_ino; 21 mode_t st_mode;
36 unsigned short st_mode; 22 nlink_t st_nlink;
37 unsigned short st_nlink; 23 uid_t st_uid;
38 unsigned short st_uid; 24 gid_t st_gid;
39 unsigned short st_gid; 25 unsigned int st_rdev;
40 unsigned short st_rdev; 26 off_t st_size;
41 unsigned short __pad2; 27 unsigned long st_blksize;
42 unsigned long st_size; 28 unsigned long st_blocks;
43 unsigned long st_blksize; 29 unsigned long st_atime;
44 unsigned long st_blocks; 30 unsigned long st_atime_nsec;
45 unsigned long st_atime; 31 unsigned long st_mtime;
46 unsigned long st_atime_nsec; 32 unsigned long st_mtime_nsec;
47 unsigned long st_mtime; 33 unsigned long st_ctime;
48 unsigned long st_mtime_nsec; 34 unsigned long st_ctime_nsec;
49 unsigned long st_ctime; 35 unsigned long __unused4;
50 unsigned long st_ctime_nsec; 36 unsigned long __unused5;
51 unsigned long __unused4;
52 unsigned long __unused5;
53}; 37};
54 38
55/* This matches struct stat64 in glibc-2.2.3. */ 39/* This matches struct stat64 in glibc-2.3 */
56 40
57struct stat64 { 41struct stat64 {
58#ifdef __XTENSA_EL__ 42 unsigned long long st_dev; /* Device */
59 unsigned short st_dev; /* Device */ 43 unsigned long long st_ino; /* File serial number */
60 unsigned char __pad0[10];
61#else
62 unsigned char __pad0[6];
63 unsigned short st_dev;
64 unsigned char __pad1[2];
65#endif
66
67#define STAT64_HAS_BROKEN_ST_INO 1
68 unsigned long __st_ino; /* 32bit file serial number. */
69
70 unsigned int st_mode; /* File mode. */ 44 unsigned int st_mode; /* File mode. */
71 unsigned int st_nlink; /* Link count. */ 45 unsigned int st_nlink; /* Link count. */
72 unsigned int st_uid; /* User ID of the file's owner. */ 46 unsigned int st_uid; /* User ID of the file's owner. */
73 unsigned int st_gid; /* Group ID of the file's group. */ 47 unsigned int st_gid; /* Group ID of the file's group. */
74 48 unsigned long long st_rdev; /* Device number, if device. */
75#ifdef __XTENSA_EL__ 49 long long st_size; /* Size of file, in bytes. */
76 unsigned short st_rdev; /* Device number, if device. */ 50 long st_blksize; /* Optimal block size for I/O. */
77 unsigned char __pad3[10]; 51 unsigned long __unused2;
78#else 52#ifdef __XTENSA_EB__
79 unsigned char __pad2[6]; 53 unsigned long __unused3;
80 unsigned short st_rdev; 54 long st_blocks; /* Number 512-byte blocks allocated. */
81 unsigned char __pad3[2];
82#endif
83
84 long long int st_size; /* Size of file, in bytes. */
85 long int st_blksize; /* Optimal block size for I/O. */
86
87#ifdef __XTENSA_EL__
88 unsigned long st_blocks; /* Number 512-byte blocks allocated. */
89 unsigned long __pad4;
90#else 55#else
91 unsigned long __pad4; 56 long st_blocks; /* Number 512-byte blocks allocated. */
92 unsigned long st_blocks; 57 unsigned long __unused3;
93#endif 58#endif
94 59 long st_atime; /* Time of last access. */
95 unsigned long __pad5; 60 unsigned long st_atime_nsec;
96 long int st_atime; /* Time of last access. */ 61 long st_mtime; /* Time of last modification. */
97 unsigned long st_atime_nsec; 62 unsigned long st_mtime_nsec;
98 long int st_mtime; /* Time of last modification. */ 63 long st_ctime; /* Time of last status change. */
99 unsigned long st_mtime_nsec; 64 unsigned long st_ctime_nsec;
100 long int st_ctime; /* Time of last status change. */ 65 unsigned long __unused4;
101 unsigned long st_ctime_nsec; 66 unsigned long __unused5;
102 unsigned long long int st_ino; /* File serial number. */
103}; 67};
104 68
105#endif /* _XTENSA_STAT_H */ 69#endif /* _XTENSA_STAT_H */
diff --git a/include/asm-xtensa/syscall.h b/include/asm-xtensa/syscall.h
new file mode 100644
index 000000000000..6cb0d42f11c8
--- /dev/null
+++ b/include/asm-xtensa/syscall.h
@@ -0,0 +1,20 @@
1struct pt_regs;
2struct sigaction;
3asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*);
4asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*);
5asmlinkage long xtensa_pipe(int __user *);
6asmlinkage long xtensa_mmap2(unsigned long, unsigned long, unsigned long,
7 unsigned long, unsigned long, unsigned long);
8asmlinkage long xtensa_ptrace(long, long, long, long);
9asmlinkage long xtensa_sigreturn(struct pt_regs*);
10asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
11asmlinkage long xtensa_sigsuspend(struct pt_regs*);
12asmlinkage long xtensa_rt_sigsuspend(struct pt_regs*);
13asmlinkage long xtensa_sigaction(int, const struct old_sigaction*,
14 struct old_sigaction*);
15asmlinkage long xtensa_sigaltstack(struct pt_regs *regs);
16asmlinkage long sys_rt_sigaction(int,
17 const struct sigaction __user *,
18 struct sigaction __user *,
19 size_t);
20asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg);
diff --git a/include/asm-xtensa/system.h b/include/asm-xtensa/system.h
index 932bda92a21c..4aaed7fe6cfe 100644
--- a/include/asm-xtensa/system.h
+++ b/include/asm-xtensa/system.h
@@ -213,7 +213,7 @@ static inline void spill_registers(void)
213 unsigned int a0, ps; 213 unsigned int a0, ps;
214 214
215 __asm__ __volatile__ ( 215 __asm__ __volatile__ (
216 "movi a14," __stringify (PS_EXCM_MASK) " | 1\n\t" 216 "movi a14," __stringify (PS_EXCM_BIT) " | 1\n\t"
217 "mov a12, a0\n\t" 217 "mov a12, a0\n\t"
218 "rsr a13," __stringify(SAR) "\n\t" 218 "rsr a13," __stringify(SAR) "\n\t"
219 "xsr a14," __stringify(PS) "\n\t" 219 "xsr a14," __stringify(PS) "\n\t"
diff --git a/include/asm-xtensa/termbits.h b/include/asm-xtensa/termbits.h
index c780593ff5f9..057b9a3d8f83 100644
--- a/include/asm-xtensa/termbits.h
+++ b/include/asm-xtensa/termbits.h
@@ -30,6 +30,17 @@ struct termios {
30 cc_t c_cc[NCCS]; /* control characters */ 30 cc_t c_cc[NCCS]; /* control characters */
31}; 31};
32 32
33struct ktermios {
34 tcflag_t c_iflag; /* input mode flags */
35 tcflag_t c_oflag; /* output mode flags */
36 tcflag_t c_cflag; /* control mode flags */
37 tcflag_t c_lflag; /* local mode flags */
38 cc_t c_line; /* line discipline */
39 cc_t c_cc[NCCS]; /* control characters */
40 speed_t c_ispeed; /* input speed */
41 speed_t c_ospeed; /* output speed */
42};
43
33/* c_cc characters */ 44/* c_cc characters */
34 45
35#define VINTR 0 46#define VINTR 0
diff --git a/include/asm-xtensa/timex.h b/include/asm-xtensa/timex.h
index c7b705e66655..28c7985a4000 100644
--- a/include/asm-xtensa/timex.h
+++ b/include/asm-xtensa/timex.h
@@ -16,17 +16,22 @@
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <linux/stringify.h> 17#include <linux/stringify.h>
18 18
19#if XCHAL_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) == 1 19#define _INTLEVEL(x) XCHAL_INT ## x ## _LEVEL
20#define INTLEVEL(x) _INTLEVEL(x)
21
22#if INTLEVEL(XCHAL_TIMER0_INTERRUPT) == 1
20# define LINUX_TIMER 0 23# define LINUX_TIMER 0
21#elif XCHAL_INT_LEVEL(XCHAL_TIMER1_INTERRUPT) == 1 24# define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
25#elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) == 1
22# define LINUX_TIMER 1 26# define LINUX_TIMER 1
23#elif XCHAL_INT_LEVEL(XCHAL_TIMER2_INTERRUPT) == 1 27# define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
28#elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) == 1
24# define LINUX_TIMER 2 29# define LINUX_TIMER 2
30# define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
25#else 31#else
26# error "Bad timer number for Linux configurations!" 32# error "Bad timer number for Linux configurations!"
27#endif 33#endif
28 34
29#define LINUX_TIMER_INT XCHAL_TIMER_INTERRUPT(LINUX_TIMER)
30#define LINUX_TIMER_MASK (1L << LINUX_TIMER_INT) 35#define LINUX_TIMER_MASK (1L << LINUX_TIMER_INT)
31 36
32#define CLOCK_TICK_RATE 1193180 /* (everyone is using this value) */ 37#define CLOCK_TICK_RATE 1193180 /* (everyone is using this value) */
@@ -60,8 +65,8 @@ extern cycles_t cacheflush_time;
60 65
61#define WSR_CCOUNT(r) __asm__("wsr %0,"__stringify(CCOUNT) :: "a" (r)) 66#define WSR_CCOUNT(r) __asm__("wsr %0,"__stringify(CCOUNT) :: "a" (r))
62#define RSR_CCOUNT(r) __asm__("rsr %0,"__stringify(CCOUNT) : "=a" (r)) 67#define RSR_CCOUNT(r) __asm__("rsr %0,"__stringify(CCOUNT) : "=a" (r))
63#define WSR_CCOMPARE(x,r) __asm__("wsr %0,"__stringify(CCOMPARE_0)"+"__stringify(x) :: "a"(r)) 68#define WSR_CCOMPARE(x,r) __asm__("wsr %0,"__stringify(CCOMPARE)"+"__stringify(x) :: "a"(r))
64#define RSR_CCOMPARE(x,r) __asm__("rsr %0,"__stringify(CCOMPARE_0)"+"__stringify(x) : "=a"(r)) 69#define RSR_CCOMPARE(x,r) __asm__("rsr %0,"__stringify(CCOMPARE)"+"__stringify(x) : "=a"(r))
65 70
66static inline unsigned long get_ccount (void) 71static inline unsigned long get_ccount (void)
67{ 72{
diff --git a/include/asm-xtensa/tlbflush.h b/include/asm-xtensa/tlbflush.h
index 43f6ec859af9..7c637b3c352c 100644
--- a/include/asm-xtensa/tlbflush.h
+++ b/include/asm-xtensa/tlbflush.h
@@ -11,12 +11,20 @@
11#ifndef _XTENSA_TLBFLUSH_H 11#ifndef _XTENSA_TLBFLUSH_H
12#define _XTENSA_TLBFLUSH_H 12#define _XTENSA_TLBFLUSH_H
13 13
14#define DEBUG_TLB
15
16#ifdef __KERNEL__ 14#ifdef __KERNEL__
17 15
18#include <asm/processor.h>
19#include <linux/stringify.h> 16#include <linux/stringify.h>
17#include <asm/processor.h>
18
19#define DTLB_WAY_PGD 7
20
21#define ITLB_ARF_WAYS 4
22#define DTLB_ARF_WAYS 4
23
24#define ITLB_HIT_BIT 3
25#define DTLB_HIT_BIT 4
26
27#ifndef __ASSEMBLY__
20 28
21/* TLB flushing: 29/* TLB flushing:
22 * 30 *
@@ -46,11 +54,6 @@ static inline void flush_tlb_pgtables(struct mm_struct *mm,
46 54
47/* TLB operations. */ 55/* TLB operations. */
48 56
49#define ITLB_WAYS_LOG2 XCHAL_ITLB_WAY_BITS
50#define DTLB_WAYS_LOG2 XCHAL_DTLB_WAY_BITS
51#define ITLB_PROBE_SUCCESS (1 << ITLB_WAYS_LOG2)
52#define DTLB_PROBE_SUCCESS (1 << DTLB_WAYS_LOG2)
53
54static inline unsigned long itlb_probe(unsigned long addr) 57static inline unsigned long itlb_probe(unsigned long addr)
55{ 58{
56 unsigned long tmp; 59 unsigned long tmp;
@@ -131,29 +134,30 @@ static inline void write_itlb_entry (pte_t entry, int way)
131 134
132static inline void invalidate_page_directory (void) 135static inline void invalidate_page_directory (void)
133{ 136{
134 invalidate_dtlb_entry (DTLB_WAY_PGTABLE); 137 invalidate_dtlb_entry (DTLB_WAY_PGD);
138 invalidate_dtlb_entry (DTLB_WAY_PGD+1);
139 invalidate_dtlb_entry (DTLB_WAY_PGD+2);
135} 140}
136 141
137static inline void invalidate_itlb_mapping (unsigned address) 142static inline void invalidate_itlb_mapping (unsigned address)
138{ 143{
139 unsigned long tlb_entry; 144 unsigned long tlb_entry;
140 while ((tlb_entry = itlb_probe (address)) & ITLB_PROBE_SUCCESS) 145 if (((tlb_entry = itlb_probe(address)) & (1 << ITLB_HIT_BIT)) != 0)
141 invalidate_itlb_entry (tlb_entry); 146 invalidate_itlb_entry(tlb_entry);
142} 147}
143 148
144static inline void invalidate_dtlb_mapping (unsigned address) 149static inline void invalidate_dtlb_mapping (unsigned address)
145{ 150{
146 unsigned long tlb_entry; 151 unsigned long tlb_entry;
147 while ((tlb_entry = dtlb_probe (address)) & DTLB_PROBE_SUCCESS) 152 if (((tlb_entry = dtlb_probe(address)) & (1 << DTLB_HIT_BIT)) != 0)
148 invalidate_dtlb_entry (tlb_entry); 153 invalidate_dtlb_entry(tlb_entry);
149} 154}
150 155
151#define check_pgt_cache() do { } while (0) 156#define check_pgt_cache() do { } while (0)
152 157
153 158
154#ifdef DEBUG_TLB 159/*
155 160 * DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
156/* DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
157 * ISA and exist only for test purposes.. 161 * ISA and exist only for test purposes..
158 * You may find it helpful for MMU debugging, however. 162 * You may find it helpful for MMU debugging, however.
159 * 163 *
@@ -193,8 +197,6 @@ static inline unsigned long read_itlb_translation (int way)
193 return tmp; 197 return tmp;
194} 198}
195 199
196#endif /* DEBUG_TLB */ 200#endif /* __ASSEMBLY__ */
197
198
199#endif /* __KERNEL__ */ 201#endif /* __KERNEL__ */
200#endif /* _XTENSA_PGALLOC_H */ 202#endif /* _XTENSA_TLBFLUSH_H */
diff --git a/include/asm-xtensa/uaccess.h b/include/asm-xtensa/uaccess.h
index 88a64e1144d5..d6352da05b10 100644
--- a/include/asm-xtensa/uaccess.h
+++ b/include/asm-xtensa/uaccess.h
@@ -23,7 +23,6 @@
23 23
24#ifdef __ASSEMBLY__ 24#ifdef __ASSEMBLY__
25 25
26#define _ASMLANGUAGE
27#include <asm/current.h> 26#include <asm/current.h>
28#include <asm/asm-offsets.h> 27#include <asm/asm-offsets.h>
29#include <asm/processor.h> 28#include <asm/processor.h>
diff --git a/include/asm-xtensa/unistd.h b/include/asm-xtensa/unistd.h
index 2e1a1b997e7d..8a7fb6964ce1 100644
--- a/include/asm-xtensa/unistd.h
+++ b/include/asm-xtensa/unistd.h
@@ -11,212 +11,593 @@
11#ifndef _XTENSA_UNISTD_H 11#ifndef _XTENSA_UNISTD_H
12#define _XTENSA_UNISTD_H 12#define _XTENSA_UNISTD_H
13 13
14#define __NR_spill 0 14#ifndef __SYSCALL
15#define __NR_exit 1 15# define __SYSCALL(nr,func,nargs)
16#define __NR_read 3 16#endif
17#define __NR_write 4 17
18#define __NR_open 5 18#define __NR_spill 0
19#define __NR_close 6 19__SYSCALL( 0, sys_ni_syscall, 0)
20#define __NR_creat 8 20#define __NR_xtensa 1
21#define __NR_link 9 21__SYSCALL( 1, sys_ni_syscall, 0)
22#define __NR_unlink 10 22#define __NR_available4 2
23#define __NR_execve 11 23__SYSCALL( 2, sys_ni_syscall, 0)
24#define __NR_chdir 12 24#define __NR_available5 3
25#define __NR_mknod 14 25__SYSCALL( 3, sys_ni_syscall, 0)
26#define __NR_chmod 15 26#define __NR_available6 4
27#define __NR_lchown 16 27__SYSCALL( 4, sys_ni_syscall, 0)
28#define __NR_break 17 28#define __NR_available7 5
29#define __NR_lseek 19 29__SYSCALL( 5, sys_ni_syscall, 0)
30#define __NR_getpid 20 30#define __NR_available8 6
31#define __NR_mount 21 31__SYSCALL( 6, sys_ni_syscall, 0)
32#define __NR_setuid 23 32#define __NR_available9 7
33#define __NR_getuid 24 33__SYSCALL( 7, sys_ni_syscall, 0)
34#define __NR_ptrace 26 34
35#define __NR_utime 30 35/* File Operations */
36#define __NR_stty 31 36
37#define __NR_gtty 32 37#define __NR_open 8
38#define __NR_access 33 38__SYSCALL( 8, sys_open, 3)
39#define __NR_ftime 35 39#define __NR_close 9
40#define __NR_sync 36 40__SYSCALL( 9, sys_close, 1)
41#define __NR_kill 37 41#define __NR_dup 10
42#define __NR_rename 38 42__SYSCALL( 10, sys_dup, 1)
43#define __NR_mkdir 39 43#define __NR_dup2 11
44#define __NR_rmdir 40 44__SYSCALL( 11, sys_dup2, 2)
45#define __NR_dup 41 45#define __NR_read 12
46#define __NR_pipe 42 46__SYSCALL( 12, sys_read, 3)
47#define __NR_times 43 47#define __NR_write 13
48#define __NR_prof 44 48__SYSCALL( 13, sys_write, 3)
49#define __NR_brk 45 49#define __NR_select 14
50#define __NR_setgid 46 50__SYSCALL( 14, sys_select, 5)
51#define __NR_getgid 47 51#define __NR_lseek 15
52#define __NR_signal 48 52__SYSCALL( 15, sys_lseek, 3)
53#define __NR_geteuid 49 53#define __NR_poll 16
54#define __NR_getegid 50 54__SYSCALL( 16, sys_poll, 3)
55#define __NR_acct 51 55#define __NR__llseek 17
56#define __NR_lock 53 56__SYSCALL( 17, sys_llseek, 5)
57#define __NR_ioctl 54 57#define __NR_epoll_wait 18
58#define __NR_fcntl 55 58__SYSCALL( 18, sys_epoll_wait, 4)
59#define __NR_setpgid 57 59#define __NR_epoll_ctl 19
60#define __NR_ulimit 58 60__SYSCALL( 19, sys_epoll_ctl, 4)
61#define __NR_umask 60 61#define __NR_epoll_create 20
62#define __NR_chroot 61 62__SYSCALL( 20, sys_epoll_create, 1)
63#define __NR_ustat 62 63#define __NR_creat 21
64#define __NR_dup2 63 64__SYSCALL( 21, sys_creat, 2)
65#define __NR_getppid 64 65#define __NR_truncate 22
66#define __NR_setsid 66 66__SYSCALL( 22, sys_truncate, 2)
67#define __NR_sigaction 67 67#define __NR_ftruncate 23
68#define __NR_setreuid 70 68__SYSCALL( 23, sys_ftruncate, 2)
69#define __NR_setregid 71 69#define __NR_readv 24
70#define __NR_sigsuspend 72 70__SYSCALL( 24, sys_readv, 3)
71#define __NR_sigpending 73 71#define __NR_writev 25
72#define __NR_sethostname 74 72__SYSCALL( 25, sys_writev, 3)
73#define __NR_setrlimit 75 73#define __NR_fsync 26
74#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ 74__SYSCALL( 26, sys_fsync, 1)
75#define __NR_getrusage 77 75#define __NR_fdatasync 27
76#define __NR_gettimeofday 78 76__SYSCALL( 27, sys_fdatasync, 1)
77#define __NR_settimeofday 79 77#define __NR_truncate64 28
78#define __NR_getgroups 80 78__SYSCALL( 28, sys_truncate64, 2)
79#define __NR_setgroups 81 79#define __NR_ftruncate64 29
80#define __NR_select 82 80__SYSCALL( 29, sys_ftruncate64, 2)
81#define __NR_symlink 83 81#define __NR_pread64 30
82#define __NR_readlink 85 82__SYSCALL( 30, sys_pread64, 6)
83#define __NR_uselib 86 83#define __NR_pwrite64 31
84#define __NR_swapon 87 84__SYSCALL( 31, sys_pwrite64, 6)
85#define __NR_reboot 88 85
86#define __NR_munmap 91 86#define __NR_link 32
87#define __NR_truncate 92 87__SYSCALL( 32, sys_link, 2)
88#define __NR_ftruncate 93 88#define __NR_rename 33
89#define __NR_fchmod 94 89__SYSCALL( 33, sys_rename, 2)
90#define __NR_fchown 95 90#define __NR_symlink 34
91#define __NR_getpriority 96 91__SYSCALL( 34, sys_symlink, 2)
92#define __NR_setpriority 97 92#define __NR_readlink 35
93#define __NR_profil 98 93__SYSCALL( 35, sys_readlink, 3)
94#define __NR_statfs 99 94#define __NR_mknod 36
95#define __NR_fstatfs 100 95__SYSCALL( 36, sys_mknod, 3)
96#define __NR_ioperm 101 96#define __NR_pipe 37
97#define __NR_syslog 103 97__SYSCALL( 37, xtensa_pipe, 1)
98#define __NR_setitimer 104 98#define __NR_unlink 38
99#define __NR_getitimer 105 99__SYSCALL( 38, sys_unlink, 1)
100#define __NR_stat 106 100#define __NR_rmdir 39
101#define __NR_lstat 107 101__SYSCALL( 39, sys_rmdir, 1)
102#define __NR_fstat 108 102
103#define __NR_iopl 110 103#define __NR_mkdir 40
104#define __NR_vhangup 111 104__SYSCALL( 40, sys_mkdir, 2)
105#define __NR_idle 112 105#define __NR_chdir 41
106#define __NR_wait4 114 106__SYSCALL( 41, sys_chdir, 1)
107#define __NR_swapoff 115 107#define __NR_fchdir 42
108#define __NR_sysinfo 116 108__SYSCALL( 42, sys_fchdir, 1)
109#define __NR_fsync 118 109#define __NR_getcwd 43
110#define __NR_sigreturn 119 110__SYSCALL( 43, sys_getcwd, 2)
111#define __NR_clone 120 111
112#define __NR_setdomainname 121 112#define __NR_chmod 44
113#define __NR_uname 122 113__SYSCALL( 44, sys_chmod, 2)
114#define __NR_modify_ldt 123 114#define __NR_chown 45
115#define __NR_adjtimex 124 115__SYSCALL( 45, sys_chown, 3)
116#define __NR_mprotect 125 116#define __NR_stat 46
117#define __NR_create_module 127 117__SYSCALL( 46, sys_newstat, 2)
118#define __NR_init_module 128 118#define __NR_stat64 47
119#define __NR_delete_module 129 119__SYSCALL( 47, sys_stat64, 2)
120#define __NR_quotactl 131 120
121#define __NR_getpgid 132 121#define __NR_lchown 48
122#define __NR_fchdir 133 122__SYSCALL( 48, sys_lchown, 3)
123#define __NR_bdflush 134 123#define __NR_lstat 49
124#define __NR_sysfs 135 124__SYSCALL( 49, sys_newlstat, 2)
125#define __NR_personality 136 125#define __NR_lstat64 50
126#define __NR_setfsuid 138 126__SYSCALL( 50, sys_lstat64, 2)
127#define __NR_setfsgid 139 127#define __NR_available51 51
128#define __NR__llseek 140 128__SYSCALL( 51, sys_ni_syscall, 0)
129#define __NR_getdents 141 129
130#define __NR__newselect 142 130#define __NR_fchmod 52
131#define __NR_flock 143 131__SYSCALL( 52, sys_fchmod, 2)
132#define __NR_msync 144 132#define __NR_fchown 53
133#define __NR_readv 145 133__SYSCALL( 53, sys_fchown, 3)
134#define __NR_writev 146 134#define __NR_fstat 54
135#define __NR_cacheflush 147 135__SYSCALL( 54, sys_newfstat, 2)
136#define __NR_cachectl 148 136#define __NR_fstat64 55
137#define __NR_sysxtensa 149 137__SYSCALL( 55, sys_fstat64, 2)
138#define __NR_sysdummy 150 138
139#define __NR_getsid 151 139#define __NR_flock 56
140#define __NR_fdatasync 152 140__SYSCALL( 56, sys_flock, 2)
141#define __NR__sysctl 153 141#define __NR_access 57
142#define __NR_mlock 154 142__SYSCALL( 57, sys_access, 2)
143#define __NR_munlock 155 143#define __NR_umask 58
144#define __NR_mlockall 156 144__SYSCALL( 58, sys_umask, 1)
145#define __NR_munlockall 157 145#define __NR_getdents 59
146#define __NR_sched_setparam 158 146__SYSCALL( 59, sys_getdents, 3)
147#define __NR_sched_getparam 159 147#define __NR_getdents64 60
148#define __NR_sched_setscheduler 160 148__SYSCALL( 60, sys_getdents64, 3)
149#define __NR_sched_getscheduler 161 149#define __NR_fcntl64 61
150#define __NR_sched_yield 162 150__SYSCALL( 61, sys_fcntl64, 3)
151#define __NR_sched_get_priority_max 163 151#define __NR_available62 62
152#define __NR_sched_get_priority_min 164 152__SYSCALL( 62, sys_ni_syscall, 0)
153#define __NR_sched_rr_get_interval 165 153#define __NR_fadvise64_64 63
154#define __NR_nanosleep 166 154__SYSCALL( 63, sys_fadvise64_64, 6)
155#define __NR_mremap 167 155#define __NR_utime 64 /* glibc 2.3.3 ?? */
156#define __NR_accept 168 156__SYSCALL( 64, sys_utime, 2)
157#define __NR_bind 169 157#define __NR_utimes 65
158#define __NR_connect 170 158__SYSCALL( 65, sys_utimes, 2)
159#define __NR_getpeername 171 159#define __NR_ioctl 66
160#define __NR_getsockname 172 160__SYSCALL( 66, sys_ioctl, 3)
161#define __NR_getsockopt 173 161#define __NR_fcntl 67
162#define __NR_listen 174 162__SYSCALL( 67, sys_fcntl, 3)
163#define __NR_recv 175 163
164#define __NR_recvfrom 176 164#define __NR_setxattr 68
165#define __NR_recvmsg 177 165__SYSCALL( 68, sys_setxattr, 5)
166#define __NR_send 178 166#define __NR_getxattr 69
167#define __NR_sendmsg 179 167__SYSCALL( 69, sys_getxattr, 4)
168#define __NR_sendto 180 168#define __NR_listxattr 70
169#define __NR_setsockopt 181 169__SYSCALL( 70, sys_listxattr, 3)
170#define __NR_shutdown 182 170#define __NR_removexattr 71
171#define __NR_socket 183 171__SYSCALL( 71, sys_removexattr, 2)
172#define __NR_socketpair 184 172#define __NR_lsetxattr 72
173#define __NR_setresuid 185 173__SYSCALL( 72, sys_lsetxattr, 5)
174#define __NR_getresuid 186 174#define __NR_lgetxattr 73
175#define __NR_query_module 187 175__SYSCALL( 73, sys_lgetxattr, 4)
176#define __NR_poll 188 176#define __NR_llistxattr 74
177#define __NR_nfsservctl 189 177__SYSCALL( 74, sys_llistxattr, 3)
178#define __NR_setresgid 190 178#define __NR_lremovexattr 75
179#define __NR_getresgid 191 179__SYSCALL( 75, sys_lremovexattr, 2)
180#define __NR_prctl 192 180#define __NR_fsetxattr 76
181#define __NR_rt_sigreturn 193 181__SYSCALL( 76, sys_fsetxattr, 5)
182#define __NR_rt_sigaction 194 182#define __NR_fgetxattr 77
183#define __NR_rt_sigprocmask 195 183__SYSCALL( 77, sys_fgetxattr, 4)
184#define __NR_rt_sigpending 196 184#define __NR_flistxattr 78
185#define __NR_rt_sigtimedwait 197 185__SYSCALL( 78, sys_flistxattr, 3)
186#define __NR_rt_sigqueueinfo 198 186#define __NR_fremovexattr 79
187#define __NR_rt_sigsuspend 199 187__SYSCALL( 79, sys_fremovexattr, 2)
188#define __NR_pread 200 188
189#define __NR_pwrite 201 189/* File Map / Shared Memory Operations */
190#define __NR_chown 202 190
191#define __NR_getcwd 203 191#define __NR_mmap2 80
192#define __NR_capget 204 192__SYSCALL( 80, xtensa_mmap2, 6)
193#define __NR_capset 205 193#define __NR_munmap 81
194#define __NR_sigaltstack 206 194__SYSCALL( 81, sys_munmap, 2)
195#define __NR_sendfile 207 195#define __NR_mprotect 82
196#define __NR_mmap2 210 196__SYSCALL( 82, sys_mprotect, 3)
197#define __NR_truncate64 211 197#define __NR_brk 83
198#define __NR_ftruncate64 212 198__SYSCALL( 83, sys_brk, 1)
199#define __NR_stat64 213 199#define __NR_mlock 84
200#define __NR_lstat64 214 200__SYSCALL( 84, sys_mlock, 2)
201#define __NR_fstat64 215 201#define __NR_munlock 85
202#define __NR_pivot_root 216 202__SYSCALL( 85, sys_munlock, 2)
203#define __NR_mincore 217 203#define __NR_mlockall 86
204#define __NR_madvise 218 204__SYSCALL( 86, sys_mlockall, 1)
205#define __NR_getdents64 219 205#define __NR_munlockall 87
206 206__SYSCALL( 87, sys_munlockall, 0)
207/* Keep this last; should always equal the last valid call number. */ 207#define __NR_mremap 88
208#define __NR_Linux_syscalls 220 208__SYSCALL( 88, sys_mremap, 4)
209 209#define __NR_msync 89
210/* user-visible error numbers are in the range -1 - -125: see 210__SYSCALL( 89, sys_msync, 3)
211 * <asm-xtensa/errno.h> */ 211#define __NR_mincore 90
212 212__SYSCALL( 90, sys_mincore, 3)
213#define SYSXTENSA_RESERVED 0 /* don't use this */ 213#define __NR_madvise 91
214#define SYSXTENSA_ATOMIC_SET 1 /* set variable */ 214__SYSCALL( 91, sys_madvise, 3)
215#define SYSXTENSA_ATOMIC_EXG_ADD 2 /* exchange memory and add */ 215#define __NR_shmget 92
216#define SYSXTENSA_ATOMIC_ADD 3 /* add to memory */ 216__SYSCALL( 92, sys_shmget, 4)
217#define SYSXTENSA_ATOMIC_CMP_SWP 4 /* compare and swap */ 217#define __NR_shmat 93
218 218__SYSCALL( 93, xtensa_shmat, 4)
219#define SYSXTENSA_COUNT 5 /* count of syscall0 functions*/ 219#define __NR_shmctl 94
220__SYSCALL( 94, sys_shmctl, 4)
221#define __NR_shmdt 95
222__SYSCALL( 95, sys_shmdt, 4)
223
224/* Socket Operations */
225
226#define __NR_socket 96
227__SYSCALL( 96, sys_socket, 3)
228#define __NR_setsockopt 97
229__SYSCALL( 97, sys_setsockopt, 5)
230#define __NR_getsockopt 98
231__SYSCALL( 98, sys_getsockopt, 5)
232#define __NR_shutdown 99
233__SYSCALL( 99, sys_shutdown, 2)
234
235#define __NR_bind 100
236__SYSCALL(100, sys_bind, 3)
237#define __NR_connect 101
238__SYSCALL(101, sys_connect, 3)
239#define __NR_listen 102
240__SYSCALL(102, sys_listen, 2)
241#define __NR_accept 103
242__SYSCALL(103, sys_accept, 3)
243
244#define __NR_getsockname 104
245__SYSCALL(104, sys_getsockname, 3)
246#define __NR_getpeername 105
247__SYSCALL(105, sys_getpeername, 3)
248#define __NR_sendmsg 106
249__SYSCALL(106, sys_sendmsg, 3)
250#define __NR_recvmsg 107
251__SYSCALL(107, sys_recvmsg, 3)
252#define __NR_send 108
253__SYSCALL(108, sys_send, 4)
254#define __NR_recv 109
255__SYSCALL(109, sys_recv, 4)
256#define __NR_sendto 110
257__SYSCALL(110, sys_sendto, 6)
258#define __NR_recvfrom 111
259__SYSCALL(111, sys_recvfrom, 6)
260
261#define __NR_socketpair 112
262__SYSCALL(112, sys_socketpair, 4)
263#define __NR_sendfile 113
264__SYSCALL(113, sys_sendfile, 4)
265#define __NR_sendfile64 114
266__SYSCALL(114, sys_sendfile64, 4)
267#define __NR_available115 115
268__SYSCALL(115, sys_ni_syscall, 0)
269
270/* Process Operations */
271
272#define __NR_clone 116
273__SYSCALL(116, xtensa_clone, 5)
274#define __NR_execve 117
275__SYSCALL(117, xtensa_execve, 3)
276#define __NR_exit 118
277__SYSCALL(118, sys_exit, 1)
278#define __NR_exit_group 119
279__SYSCALL(119, sys_exit_group, 1)
280#define __NR_getpid 120
281__SYSCALL(120, sys_getpid, 0)
282#define __NR_wait4 121
283__SYSCALL(121, sys_wait4, 4)
284#define __NR_waitid 122
285__SYSCALL(122, sys_waitid, 5)
286#define __NR_kill 123
287__SYSCALL(123, sys_kill, 2)
288#define __NR_tkill 124
289__SYSCALL(124, sys_tkill, 2)
290#define __NR_tgkill 125
291__SYSCALL(125, sys_tgkill, 3)
292#define __NR_set_tid_address 126
293__SYSCALL(126, sys_set_tid_address, 1)
294#define __NR_gettid 127
295__SYSCALL(127, sys_gettid, 0)
296#define __NR_setsid 128
297__SYSCALL(128, sys_setsid, 0)
298#define __NR_getsid 129
299__SYSCALL(129, sys_getsid, 1)
300#define __NR_prctl 130
301__SYSCALL(130, sys_prctl, 5)
302#define __NR_personality 131
303__SYSCALL(131, sys_personality, 1)
304#define __NR_getpriority 132
305__SYSCALL(132, sys_getpriority, 2)
306#define __NR_setpriority 133
307__SYSCALL(133, sys_setpriority, 3)
308#define __NR_setitimer 134
309__SYSCALL(134, sys_setitimer, 3)
310#define __NR_getitimer 135
311__SYSCALL(135, sys_getitimer, 2)
312#define __NR_setuid 136
313__SYSCALL(136, sys_setuid, 1)
314#define __NR_getuid 137
315__SYSCALL(137, sys_getuid, 0)
316#define __NR_setgid 138
317__SYSCALL(138, sys_setgid, 1)
318#define __NR_getgid 139
319__SYSCALL(139, sys_getgid, 0)
320#define __NR_geteuid 140
321__SYSCALL(140, sys_geteuid, 0)
322#define __NR_getegid 141
323__SYSCALL(141, sys_getegid, 0)
324#define __NR_setreuid 142
325__SYSCALL(142, sys_setreuid, 2)
326#define __NR_setregid 143
327__SYSCALL(143, sys_setregid, 2)
328#define __NR_setresuid 144
329__SYSCALL(144, sys_setresuid, 3)
330#define __NR_getresuid 145
331__SYSCALL(145, sys_getresuid, 3)
332#define __NR_setresgid 146
333__SYSCALL(146, sys_setresgid, 3)
334#define __NR_getresgid 147
335__SYSCALL(147, sys_getresgid, 3)
336#define __NR_setpgid 148
337__SYSCALL(148, sys_setpgid, 2)
338#define __NR_getpgid 149
339__SYSCALL(149, sys_getpgid, 1)
340#define __NR_getppid 150
341__SYSCALL(150, sys_getppid, 0)
342#define __NR_available151 151
343__SYSCALL(151, sys_ni_syscall, 0)
344
345#define __NR_reserved152 152 /* set_thread_area */
346__SYSCALL(152, sys_ni_syscall, 0)
347#define __NR_reserved153 153 /* get_thread_area */
348__SYSCALL(153, sys_ni_syscall, 0)
349#define __NR_times 154
350__SYSCALL(154, sys_times, 1)
351#define __NR_acct 155
352__SYSCALL(155, sys_acct, 1)
353#define __NR_sched_setaffinity 156
354__SYSCALL(156, sys_sched_setaffinity, 3)
355#define __NR_sched_getaffinity 157
356__SYSCALL(157, sys_sched_getaffinity, 3)
357#define __NR_capget 158
358__SYSCALL(158, sys_capget, 2)
359#define __NR_capset 159
360__SYSCALL(159, sys_capset, 2)
361#define __NR_ptrace 160
362__SYSCALL(160, sys_ptrace, 4)
363#define __NR_semtimedop 161
364__SYSCALL(161, sys_semtimedop, 5)
365#define __NR_semget 162
366__SYSCALL(162, sys_semget, 4)
367#define __NR_semop 163
368__SYSCALL(163, sys_semop, 4)
369#define __NR_semctl 164
370__SYSCALL(164, sys_semctl, 4)
371#define __NR_available165 165
372__SYSCALL(165, sys_ni_syscall, 0)
373#define __NR_msgget 166
374__SYSCALL(166, sys_msgget, 4)
375#define __NR_msgsnd 167
376__SYSCALL(167, sys_msgsnd, 4)
377#define __NR_msgrcv 168
378__SYSCALL(168, sys_msgrcv, 4)
379#define __NR_msgctl 169
380__SYSCALL(169, sys_msgctl, 4)
381#define __NR_available170 170
382__SYSCALL(170, sys_ni_syscall, 0)
383#define __NR_available171 171
384__SYSCALL(171, sys_ni_syscall, 0)
385
386/* File System */
387
388#define __NR_mount 172
389__SYSCALL(172, sys_mount, 5)
390#define __NR_swapon 173
391__SYSCALL(173, sys_swapon, 2)
392#define __NR_chroot 174
393__SYSCALL(174, sys_chroot, 1)
394#define __NR_pivot_root 175
395__SYSCALL(175, sys_pivot_root, 2)
396#define __NR_umount 176
397__SYSCALL(176, sys_umount, 2)
398#define __NR_swapoff 177
399__SYSCALL(177, sys_swapoff, 1)
400#define __NR_sync 178
401__SYSCALL(178, sys_sync, 0)
402#define __NR_available179 179
403__SYSCALL(179, sys_ni_syscall, 0)
404#define __NR_setfsuid 180
405__SYSCALL(180, sys_setfsuid, 1)
406#define __NR_setfsgid 181
407__SYSCALL(181, sys_setfsgid, 1)
408#define __NR_sysfs 182
409__SYSCALL(182, sys_sysfs, 3)
410#define __NR_ustat 183
411__SYSCALL(183, sys_ustat, 2)
412#define __NR_statfs 184
413__SYSCALL(184, sys_statfs, 2)
414#define __NR_fstatfs 185
415__SYSCALL(185, sys_fstatfs, 2)
416#define __NR_statfs64 186
417__SYSCALL(186, sys_statfs64, 3)
418#define __NR_fstatfs64 187
419__SYSCALL(187, sys_fstatfs64, 3)
420
421/* System */
422
423#define __NR_setrlimit 188
424__SYSCALL(188, sys_setrlimit, 2)
425#define __NR_getrlimit 189
426__SYSCALL(189, sys_getrlimit, 2)
427#define __NR_getrusage 190
428__SYSCALL(190, sys_getrusage, 2)
429#define __NR_futex 191
430__SYSCALL(191, sys_futex, 5)
431#define __NR_gettimeofday 192
432__SYSCALL(192, sys_gettimeofday, 2)
433#define __NR_settimeofday 193
434__SYSCALL(193, sys_settimeofday, 2)
435#define __NR_adjtimex 194
436__SYSCALL(194, sys_adjtimex, 1)
437#define __NR_nanosleep 195
438__SYSCALL(195, sys_nanosleep, 2)
439#define __NR_getgroups 196
440__SYSCALL(196, sys_getgroups, 2)
441#define __NR_setgroups 197
442__SYSCALL(197, sys_setgroups, 2)
443#define __NR_sethostname 198
444__SYSCALL(198, sys_sethostname, 2)
445#define __NR_setdomainname 199
446__SYSCALL(199, sys_setdomainname, 2)
447#define __NR_syslog 200
448__SYSCALL(200, sys_syslog, 3)
449#define __NR_vhangup 201
450__SYSCALL(201, sys_vhangup, 0)
451#define __NR_uselib 202
452__SYSCALL(202, sys_uselib, 1)
453#define __NR_reboot 203
454__SYSCALL(203, sys_reboot, 3)
455#define __NR_quotactl 204
456__SYSCALL(204, sys_quotactl, 4)
457#define __NR_nfsservctl 205
458__SYSCALL(205, sys_nfsservctl, 3)
459#define __NR__sysctl 206
460__SYSCALL(206, sys_sysctl, 1)
461#define __NR_bdflush 207
462__SYSCALL(207, sys_bdflush, 2)
463#define __NR_uname 208
464__SYSCALL(208, sys_newuname, 1)
465#define __NR_sysinfo 209
466__SYSCALL(209, sys_sysinfo, 1)
467#define __NR_init_module 210
468__SYSCALL(210, sys_init_module, 2)
469#define __NR_delete_module 211
470__SYSCALL(211, sys_delete_module, 1)
471
472#define __NR_sched_setparam 212
473__SYSCALL(212, sys_sched_setparam, 2)
474#define __NR_sched_getparam 213
475__SYSCALL(213, sys_sched_getparam, 2)
476#define __NR_sched_setscheduler 214
477__SYSCALL(214, sys_sched_setscheduler, 3)
478#define __NR_sched_getscheduler 215
479__SYSCALL(215, sys_sched_getscheduler, 1)
480#define __NR_sched_get_priority_max 216
481__SYSCALL(216, sys_sched_get_priority_max, 1)
482#define __NR_sched_get_priority_min 217
483__SYSCALL(217, sys_sched_get_priority_min, 1)
484#define __NR_sched_rr_get_interval 218
485__SYSCALL(218, sys_sched_rr_get_interval, 2)
486#define __NR_sched_yield 219
487__SYSCALL(219, sys_sched_yield, 0)
488#define __NR_sigreturn 222
489__SYSCALL(222, xtensa_sigreturn, 0)
490
491/* Signal Handling */
492
493#define __NR_restart_syscall 223
494__SYSCALL(223, sys_restart_syscall, 0)
495#define __NR_sigaltstack 224
496__SYSCALL(224, xtensa_sigaltstack, 2)
497#define __NR_rt_sigreturn 225
498__SYSCALL(225, xtensa_rt_sigreturn, 1)
499#define __NR_rt_sigaction 226
500__SYSCALL(226, sys_rt_sigaction, 4)
501#define __NR_rt_sigprocmask 227
502__SYSCALL(227, sys_rt_sigprocmask, 4)
503#define __NR_rt_sigpending 228
504__SYSCALL(228, sys_rt_sigpending, 2)
505#define __NR_rt_sigtimedwait 229
506__SYSCALL(229, sys_rt_sigtimedwait, 4)
507#define __NR_rt_sigqueueinfo 230
508__SYSCALL(230, sys_rt_sigqueueinfo, 3)
509#define __NR_rt_sigsuspend 231
510__SYSCALL(231, xtensa_rt_sigsuspend, 2)
511
512/* Message */
513
514#define __NR_mq_open 232
515__SYSCALL(232, sys_mq_open, 4)
516#define __NR_mq_unlink 233
517__SYSCALL(233, sys_mq_unlink, 1)
518#define __NR_mq_timedsend 234
519__SYSCALL(234, sys_mq_timedsend, 5)
520#define __NR_mq_timedreceive 235
521__SYSCALL(235, sys_mq_timedreceive, 5)
522#define __NR_mq_notify 236
523__SYSCALL(236, sys_mq_notify, 2)
524#define __NR_mq_getsetattr 237
525__SYSCALL(237, sys_mq_getsetattr, 3)
526#define __NR_available238 238
527__SYSCALL(238, sys_ni_syscall, 0)
528
529/* IO */
530
531#define __NR_io_setup 239
532__SYSCALL(239, sys_io_setup, 2)
533#define __NR_io_destroy 240
534__SYSCALL(240, sys_io_destroy, 1)
535#define __NR_io_submit 241
536__SYSCALL(241, sys_io_submit, 3)
537#define __NR_io_getevents 242
538__SYSCALL(242, sys_io_getevents, 5)
539#define __NR_io_cancel 243
540__SYSCALL(243, sys_io_cancel, 3)
541#define __NR_clock_settime 244
542__SYSCALL(244, sys_clock_settime, 2)
543#define __NR_clock_gettime 245
544__SYSCALL(245, sys_clock_gettime, 2)
545#define __NR_clock_getres 246
546__SYSCALL(246, sys_clock_getres, 2)
547#define __NR_clock_nanosleep 247
548__SYSCALL(247, sys_clock_nanosleep, 4)
549
550/* Timer */
551
552#define __NR_timer_create 248
553__SYSCALL(248, sys_timer_create, 3)
554#define __NR_timer_delete 249
555__SYSCALL(249, sys_timer_delete, 1)
556#define __NR_timer_settime 250
557__SYSCALL(250, sys_timer_settime, 4)
558#define __NR_timer_gettime 251
559__SYSCALL(251, sys_timer_gettime, 2)
560#define __NR_timer_getoverrun 252
561__SYSCALL(252, sys_timer_getoverrun, 1)
562
563/* System */
564
565#define __NR_reserved244 253
566__SYSCALL(253, sys_ni_syscall, 0)
567#define __NR_lookup_dcookie 254
568__SYSCALL(254, sys_lookup_dcookie, 4)
569#define __NR_available255 255
570__SYSCALL(255, sys_ni_syscall, 0)
571#define __NR_add_key 256
572__SYSCALL(256, sys_add_key, 5)
573#define __NR_request_key 257
574__SYSCALL(257, sys_request_key, 5)
575#define __NR_keyctl 258
576__SYSCALL(258, sys_keyctl, 5)
577#define __NR_available259 259
578__SYSCALL(259, sys_ni_syscall, 0)
579
580#define __NR_syscall_count 261
581
582/*
583 * sysxtensa syscall handler
584 *
585 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
586 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
587 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
588 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
589 * a2 a6 a3 a4 a5
590 */
591
592#define SYS_XTENSA_RESERVED 0 /* don't use this */
593#define SYS_XTENSA_ATOMIC_SET 1 /* set variable */
594#define SYS_XTENSA_ATOMIC_EXG_ADD 2 /* exchange memory and add */
595#define SYS_XTENSA_ATOMIC_ADD 3 /* add to memory */
596#define SYS_XTENSA_ATOMIC_CMP_SWP 4 /* compare and swap */
597
598#define SYS_XTENSA_COUNT 5 /* count */
599
600#ifdef __KERNEL__
220 601
221/* 602/*
222 * "Conditional" syscalls 603 * "Conditional" syscalls
@@ -230,6 +611,9 @@
230#define __ARCH_WANT_SYS_UTIME 611#define __ARCH_WANT_SYS_UTIME
231#define __ARCH_WANT_SYS_LLSEEK 612#define __ARCH_WANT_SYS_LLSEEK
232#define __ARCH_WANT_SYS_RT_SIGACTION 613#define __ARCH_WANT_SYS_RT_SIGACTION
233#endif /* __KERNEL__ */ 614#define __ARCH_WANT_SYS_RT_SIGSUSPEND
615
616#endif /* __KERNEL__ */
234 617
235#endif /* _XTENSA_UNISTD_H */ 618#endif /* _XTENSA_UNISTD_H */
619
diff --git a/include/asm-xtensa/variant-fsf/core.h b/include/asm-xtensa/variant-fsf/core.h
new file mode 100644
index 000000000000..2f337605c744
--- /dev/null
+++ b/include/asm-xtensa/variant-fsf/core.h
@@ -0,0 +1,359 @@
1/*
2 * Xtensa processor core configuration information.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1999-2006 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_CORE_H
12#define _XTENSA_CORE_H
13
14
15/****************************************************************************
16 Parameters Useful for Any Code, USER or PRIVILEGED
17 ****************************************************************************/
18
19/*
20 * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
21 * configured, and a value of 0 otherwise. These macros are always defined.
22 */
23
24
25/*----------------------------------------------------------------------
26 ISA
27 ----------------------------------------------------------------------*/
28
29#define XCHAL_HAVE_BE 1 /* big-endian byte ordering */
30#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
31#define XCHAL_NUM_AREGS 64 /* num of physical addr regs */
32#define XCHAL_NUM_AREGS_LOG2 6 /* log2(XCHAL_NUM_AREGS) */
33#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
34#define XCHAL_HAVE_DEBUG 1 /* debug option */
35#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
36#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
37#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
38#define XCHAL_HAVE_MINMAX 0 /* MIN/MAX instructions */
39#define XCHAL_HAVE_SEXT 0 /* SEXT instruction */
40#define XCHAL_HAVE_CLAMPS 0 /* CLAMPS instruction */
41#define XCHAL_HAVE_MUL16 0 /* MUL16S/MUL16U instructions */
42#define XCHAL_HAVE_MUL32 0 /* MULL instruction */
43#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
44#define XCHAL_HAVE_L32R 1 /* L32R instruction */
45#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
46#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
47#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
48#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
49#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
50#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
51#define XCHAL_HAVE_ABS 1 /* ABS instruction */
52/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
53/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
54#define XCHAL_HAVE_RELEASE_SYNC 0 /* L32AI/S32RI instructions */
55#define XCHAL_HAVE_S32C1I 0 /* S32C1I instruction */
56#define XCHAL_HAVE_SPECULATION 0 /* speculation */
57#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
58#define XCHAL_NUM_CONTEXTS 1 /* */
59#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
60#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
61#define XCHAL_HAVE_PRID 1 /* processor ID register */
62#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
63#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
64#define XCHAL_HAVE_CP 0 /* CPENABLE reg (coprocessor) */
65#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one */
66#define XCHAL_HAVE_MAC16 0 /* MAC16 package */
67#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
68#define XCHAL_HAVE_FP 0 /* floating point pkg */
69#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
70#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
71#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
72
73
74/*----------------------------------------------------------------------
75 MISC
76 ----------------------------------------------------------------------*/
77
78#define XCHAL_NUM_WRITEBUFFER_ENTRIES 4 /* size of write buffer */
79#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
80#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
81/* In T1050, applies to selected core load and store instructions (see ISA): */
82#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
83#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
84
85#define XCHAL_CORE_ID "fsf" /* alphanum core name
86 (CoreID) set in the Xtensa
87 Processor Generator */
88
89#define XCHAL_BUILD_UNIQUE_ID 0x00006700 /* 22-bit sw build ID */
90
91/*
92 * These definitions describe the hardware targeted by this software.
93 */
94#define XCHAL_HW_CONFIGID0 0xC103C3FF /* ConfigID hi 32 bits*/
95#define XCHAL_HW_CONFIGID1 0x0C006700 /* ConfigID lo 32 bits*/
96#define XCHAL_HW_VERSION_NAME "LX2.0.0" /* full version name */
97#define XCHAL_HW_VERSION_MAJOR 2200 /* major ver# of targeted hw */
98#define XCHAL_HW_VERSION_MINOR 0 /* minor ver# of targeted hw */
99#define XTHAL_HW_REL_LX2 1
100#define XTHAL_HW_REL_LX2_0 1
101#define XTHAL_HW_REL_LX2_0_0 1
102#define XCHAL_HW_CONFIGID_RELIABLE 1
103/* If software targets a *range* of hardware versions, these are the bounds: */
104#define XCHAL_HW_MIN_VERSION_MAJOR 2200 /* major v of earliest tgt hw */
105#define XCHAL_HW_MIN_VERSION_MINOR 0 /* minor v of earliest tgt hw */
106#define XCHAL_HW_MAX_VERSION_MAJOR 2200 /* major v of latest tgt hw */
107#define XCHAL_HW_MAX_VERSION_MINOR 0 /* minor v of latest tgt hw */
108
109
110/*----------------------------------------------------------------------
111 CACHE
112 ----------------------------------------------------------------------*/
113
114#define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
115#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
116#define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
117#define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
118
119#define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
120#define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
121
122#define XCHAL_DCACHE_IS_WRITEBACK 0 /* writeback feature */
123
124
125
126
127/****************************************************************************
128 Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
129 ****************************************************************************/
130
131
132#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
133
134/*----------------------------------------------------------------------
135 CACHE
136 ----------------------------------------------------------------------*/
137
138#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
139
140/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
141
142/* Number of cache sets in log2(lines per way): */
143#define XCHAL_ICACHE_SETWIDTH 8
144#define XCHAL_DCACHE_SETWIDTH 8
145
146/* Cache set associativity (number of ways): */
147#define XCHAL_ICACHE_WAYS 2
148#define XCHAL_DCACHE_WAYS 2
149
150/* Cache features: */
151#define XCHAL_ICACHE_LINE_LOCKABLE 0
152#define XCHAL_DCACHE_LINE_LOCKABLE 0
153#define XCHAL_ICACHE_ECC_PARITY 0
154#define XCHAL_DCACHE_ECC_PARITY 0
155
156/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
157#define XCHAL_CA_BITS 4
158
159
160/*----------------------------------------------------------------------
161 INTERNAL I/D RAM/ROMs and XLMI
162 ----------------------------------------------------------------------*/
163
164#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
165#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
166#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
167#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
168#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
169#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
170
171
172/*----------------------------------------------------------------------
173 INTERRUPTS and TIMERS
174 ----------------------------------------------------------------------*/
175
176#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
177#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
178#define XCHAL_HAVE_NMI 0 /* non-maskable interrupt */
179#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
180#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
181#define XCHAL_NUM_INTERRUPTS 17 /* number of interrupts */
182#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
183#define XCHAL_NUM_EXTINTERRUPTS 10 /* num of external interrupts */
184#define XCHAL_NUM_INTLEVELS 4 /* number of interrupt levels
185 (not including level zero) */
186#define XCHAL_EXCM_LEVEL 1 /* level masked by PS.EXCM */
187 /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
188
189/* Masks of interrupts at each interrupt level: */
190#define XCHAL_INTLEVEL1_MASK 0x000064F9
191#define XCHAL_INTLEVEL2_MASK 0x00008902
192#define XCHAL_INTLEVEL3_MASK 0x00011204
193#define XCHAL_INTLEVEL4_MASK 0x00000000
194#define XCHAL_INTLEVEL5_MASK 0x00000000
195#define XCHAL_INTLEVEL6_MASK 0x00000000
196#define XCHAL_INTLEVEL7_MASK 0x00000000
197
198/* Masks of interrupts at each range 1..n of interrupt levels: */
199#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x000064F9
200#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x0000EDFB
201#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x0001FFFF
202#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x0001FFFF
203#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x0001FFFF
204#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x0001FFFF
205#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x0001FFFF
206
207/* Level of each interrupt: */
208#define XCHAL_INT0_LEVEL 1
209#define XCHAL_INT1_LEVEL 2
210#define XCHAL_INT2_LEVEL 3
211#define XCHAL_INT3_LEVEL 1
212#define XCHAL_INT4_LEVEL 1
213#define XCHAL_INT5_LEVEL 1
214#define XCHAL_INT6_LEVEL 1
215#define XCHAL_INT7_LEVEL 1
216#define XCHAL_INT8_LEVEL 2
217#define XCHAL_INT9_LEVEL 3
218#define XCHAL_INT10_LEVEL 1
219#define XCHAL_INT11_LEVEL 2
220#define XCHAL_INT12_LEVEL 3
221#define XCHAL_INT13_LEVEL 1
222#define XCHAL_INT14_LEVEL 1
223#define XCHAL_INT15_LEVEL 2
224#define XCHAL_INT16_LEVEL 3
225#define XCHAL_DEBUGLEVEL 4 /* debug interrupt level */
226#define XCHAL_HAVE_DEBUG_EXTERN_INT 0 /* OCD external db interrupt */
227
228/* Type of each interrupt: */
229#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
230#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
231#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
232#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
233#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
234#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
235#define XCHAL_INT6_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
236#define XCHAL_INT7_TYPE XTHAL_INTTYPE_EXTERN_EDGE
237#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_EDGE
238#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_EDGE
239#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
240#define XCHAL_INT11_TYPE XTHAL_INTTYPE_TIMER
241#define XCHAL_INT12_TYPE XTHAL_INTTYPE_TIMER
242#define XCHAL_INT13_TYPE XTHAL_INTTYPE_SOFTWARE
243#define XCHAL_INT14_TYPE XTHAL_INTTYPE_SOFTWARE
244#define XCHAL_INT15_TYPE XTHAL_INTTYPE_SOFTWARE
245#define XCHAL_INT16_TYPE XTHAL_INTTYPE_SOFTWARE
246
247/* Masks of interrupts for each type of interrupt: */
248#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFFE0000
249#define XCHAL_INTTYPE_MASK_SOFTWARE 0x0001E000
250#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000380
251#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000007F
252#define XCHAL_INTTYPE_MASK_TIMER 0x00001C00
253#define XCHAL_INTTYPE_MASK_NMI 0x00000000
254#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
255
256/* Interrupt numbers assigned to specific interrupt sources: */
257#define XCHAL_TIMER0_INTERRUPT 10 /* CCOMPARE0 */
258#define XCHAL_TIMER1_INTERRUPT 11 /* CCOMPARE1 */
259#define XCHAL_TIMER2_INTERRUPT 12 /* CCOMPARE2 */
260#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
261
262/* Interrupt numbers for levels at which only one interrupt is configured: */
263/* (There are many interrupts each at level(s) 1, 2, 3.) */
264
265
266/*
267 * External interrupt vectors/levels.
268 * These macros describe how Xtensa processor interrupt numbers
269 * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
270 * map to external BInterrupt<n> pins, for those interrupts
271 * configured as external (level-triggered, edge-triggered, or NMI).
272 * See the Xtensa processor databook for more details.
273 */
274
275/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
276#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
277#define XCHAL_EXTINT1_NUM 1 /* (intlevel 2) */
278#define XCHAL_EXTINT2_NUM 2 /* (intlevel 3) */
279#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
280#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
281#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
282#define XCHAL_EXTINT6_NUM 6 /* (intlevel 1) */
283#define XCHAL_EXTINT7_NUM 7 /* (intlevel 1) */
284#define XCHAL_EXTINT8_NUM 8 /* (intlevel 2) */
285#define XCHAL_EXTINT9_NUM 9 /* (intlevel 3) */
286
287
288/*----------------------------------------------------------------------
289 EXCEPTIONS and VECTORS
290 ----------------------------------------------------------------------*/
291
292#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
293 number: 1 == XEA1 (old)
294 2 == XEA2 (new)
295 0 == XEAX (extern) */
296#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
297#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
298#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
299#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
300#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
301
302#define XCHAL_RESET_VECTOR_VADDR 0xFE000020
303#define XCHAL_RESET_VECTOR_PADDR 0xFE000020
304#define XCHAL_USER_VECTOR_VADDR 0xD0000220
305#define XCHAL_USER_VECTOR_PADDR 0x00000220
306#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000200
307#define XCHAL_KERNEL_VECTOR_PADDR 0x00000200
308#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD0000290
309#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x00000290
310#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
311#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
312#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000240
313#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000240
314#define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD0000250
315#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x00000250
316#define XCHAL_INTLEVEL4_VECTOR_VADDR 0xFE000520
317#define XCHAL_INTLEVEL4_VECTOR_PADDR 0xFE000520
318#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
319#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL4_VECTOR_PADDR
320
321
322/*----------------------------------------------------------------------
323 DEBUG
324 ----------------------------------------------------------------------*/
325
326#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
327#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
328#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
329#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
330
331
332/*----------------------------------------------------------------------
333 MMU
334 ----------------------------------------------------------------------*/
335
336/* See <xtensa/config/core-matmap.h> header file for more details. */
337
338#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
339#define XCHAL_HAVE_SPANNING_WAY 0 /* one way maps I+D 4GB vaddr */
340#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
341#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
342#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
343#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
344#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
345 [autorefill] and protection)
346 usable for an MMU-based OS */
347/* If none of the above last 4 are set, it's a custom TLB configuration. */
348#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
349#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
350
351#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
352#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
353#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
354
355#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
356
357
358#endif /* _XTENSA_CORE_CONFIGURATION_H */
359
diff --git a/include/asm-xtensa/variant-fsf/tie.h b/include/asm-xtensa/variant-fsf/tie.h
new file mode 100644
index 000000000000..a73c71664918
--- /dev/null
+++ b/include/asm-xtensa/variant-fsf/tie.h
@@ -0,0 +1,22 @@
1/*
2 * Xtensa processor core configuration information.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1999-2006 Tensilica Inc.
9 */
10
11#ifndef XTENSA_TIE_H
12#define XTENSA_TIE_H
13
14/*----------------------------------------------------------------------
15 COPROCESSORS and EXTRA STATE
16 ----------------------------------------------------------------------*/
17
18#define XCHAL_CP_NUM 0 /* number of coprocessors */
19#define XCHAL_CP_MASK 0x00
20
21#endif /*XTENSA_CONFIG_TIE_H*/
22
diff --git a/include/asm-xtensa/xtensa/cacheasm.h b/include/asm-xtensa/xtensa/cacheasm.h
deleted file mode 100644
index 0cdbb0bf180e..000000000000
--- a/include/asm-xtensa/xtensa/cacheasm.h
+++ /dev/null
@@ -1,708 +0,0 @@
1#ifndef XTENSA_CACHEASM_H
2#define XTENSA_CACHEASM_H
3
4/*
5 * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
6 *
7 * include/asm-xtensa/xtensa/cacheasm.h -- assembler-specific cache
8 * related definitions that depend on CORE configuration.
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 *
14 * Copyright (C) 2002 Tensilica Inc.
15 */
16
17
18#include <xtensa/coreasm.h>
19
20
21/*
22 * This header file defines assembler macros of the form:
23 * <x>cache_<func>
24 * where <x> is 'i' or 'd' for instruction and data caches,
25 * and <func> indicates the function of the macro.
26 *
27 * The following functions <func> are defined,
28 * and apply only to the specified cache (I or D):
29 *
30 * reset
31 * Resets the cache.
32 *
33 * sync
34 * Makes sure any previous cache instructions have been completed;
35 * ie. makes sure any previous cache control operations
36 * have had full effect and been synchronized to memory.
37 * Eg. any invalidate completed [so as not to generate a hit],
38 * any writebacks or other pipelined writes written to memory, etc.
39 *
40 * invalidate_line (single cache line)
41 * invalidate_region (specified memory range)
42 * invalidate_all (entire cache)
43 * Invalidates all cache entries that cache
44 * data from the specified memory range.
45 * NOTE: locked entries are not invalidated.
46 *
47 * writeback_line (single cache line)
48 * writeback_region (specified memory range)
49 * writeback_all (entire cache)
50 * Writes back to memory all dirty cache entries
51 * that cache data from the specified memory range,
52 * and marks these entries as clean.
53 * NOTE: on some future implementations, this might
54 * also invalidate.
55 * NOTE: locked entries are written back, but never invalidated.
56 * NOTE: instruction caches never implement writeback.
57 *
58 * writeback_inv_line (single cache line)
59 * writeback_inv_region (specified memory range)
60 * writeback_inv_all (entire cache)
61 * Writes back to memory all dirty cache entries
62 * that cache data from the specified memory range,
63 * and invalidates these entries (including all clean
64 * cache entries that cache data from that range).
65 * NOTE: locked entries are written back but not invalidated.
66 * NOTE: instruction caches never implement writeback.
67 *
68 * lock_line (single cache line)
69 * lock_region (specified memory range)
70 * Prefetch and lock the specified memory range into cache.
71 * NOTE: if any part of the specified memory range cannot
72 * be locked, a ??? exception occurs. These macros don't
73 * do anything special (yet anyway) to handle this situation.
74 *
75 * unlock_line (single cache line)
76 * unlock_region (specified memory range)
77 * unlock_all (entire cache)
78 * Unlock cache entries that cache the specified memory range.
79 * Entries not already locked are unaffected.
80 */
81
82
83
84/*************************** GENERIC -- ALL CACHES ***************************/
85
86
87/*
88 * The following macros assume the following cache size/parameter limits
89 * in the current Xtensa core implementation:
90 * cache size: 1024 bytes minimum
91 * line size: 16 - 64 bytes
92 * way count: 1 - 4
93 *
94 * Minimum entries per way (ie. per associativity) = 1024 / 64 / 4 = 4
95 * Hence the assumption that each loop can execute four cache instructions.
96 *
97 * Correspondingly, the offset range of instructions is assumed able to cover
98 * four lines, ie. offsets {0,1,2,3} * line_size are assumed valid for
99 * both hit and indexed cache instructions. Ie. these offsets are all
100 * valid: 0, 16, 32, 48, 64, 96, 128, 192 (for line sizes 16, 32, 64).
101 * This is true of all original cache instructions
102 * (dhi, ihi, dhwb, dhwbi, dii, iii) which have offsets
103 * of 0 to 1020 in multiples of 4 (ie. 8 bits shifted by 2).
104 * This is also true of subsequent cache instructions
105 * (dhu, ihu, diu, iiu, diwb, diwbi, dpfl, ipfl) which have offsets
106 * of 0 to 240 in multiples of 16 (ie. 4 bits shifted by 4).
107 *
108 * (Maximum cache size, currently 32k, doesn't affect the following macros.
109 * Cache ways > MMU min page size cause aliasing but that's another matter.)
110 */
111
112
113
114/*
115 * Macro to apply an 'indexed' cache instruction to the entire cache.
116 *
117 * Parameters:
118 * cainst instruction/ that takes an address register parameter
119 * and an offset parameter (in range 0 .. 3*linesize).
120 * size size of cache in bytes
121 * linesize size of cache line in bytes
122 * assoc_or1 number of associativities (ways/sets) in cache
123 * if all sets affected by cainst,
124 * or 1 if only one set (or not all sets) of the cache
125 * is affected by cainst (eg. DIWB or DIWBI [not yet ISA defined]).
126 * aa, ab unique address registers (temporaries)
127 */
128
129 .macro cache_index_all cainst, size, linesize, assoc_or1, aa, ab
130
131 // Sanity-check on cache parameters:
132 .ifne (\size % (\linesize * \assoc_or1 * 4))
133 .err // cache configuration outside expected/supported range!
134 .endif
135
136 // \size byte cache, \linesize byte lines, \assoc_or1 way(s) affected by each \cainst.
137 movi \aa, (\size / (\linesize * \assoc_or1 * 4))
138 // Possible improvement: need only loop if \aa > 1 ;
139 // however that particular condition is highly unlikely.
140 movi \ab, 0 // to iterate over cache
141 floop \aa, cachex\@
142 \cainst \ab, 0*\linesize
143 \cainst \ab, 1*\linesize
144 \cainst \ab, 2*\linesize
145 \cainst \ab, 3*\linesize
146 addi \ab, \ab, 4*\linesize // move to next line
147 floopend \aa, cachex\@
148
149 .endm
150
151
152/*
153 * Macro to apply a 'hit' cache instruction to a memory region,
154 * ie. to any cache entries that cache a specified portion (region) of memory.
155 * Takes care of the unaligned cases, ie. may apply to one
156 * more cache line than $asize / lineSize if $aaddr is not aligned.
157 *
158 *
159 * Parameters are:
160 * cainst instruction/macro that takes an address register parameter
161 * and an offset parameter (currently always zero)
162 * and generates a cache instruction (eg. "dhi", "dhwb", "ihi", etc.)
163 * linesize_log2 log2(size of cache line in bytes)
164 * addr register containing start address of region (clobbered)
165 * asize register containing size of the region in bytes (clobbered)
166 * askew unique register used as temporary
167 *
168 * !?!?! 2DO: optimization: iterate max(cache_size and \asize) / linesize
169 */
170
171 .macro cache_hit_region cainst, linesize_log2, addr, asize, askew
172
173 // Make \asize the number of iterations:
174 extui \askew, \addr, 0, \linesize_log2 // get unalignment amount of \addr
175 add \asize, \asize, \askew // ... and add it to \asize
176 addi \asize, \asize, (1 << \linesize_log2) - 1 // round up!
177 srli \asize, \asize, \linesize_log2
178
179 // Iterate over region:
180 floopnez \asize, cacheh\@
181 \cainst \addr, 0
182 addi \addr, \addr, (1 << \linesize_log2) // move to next line
183 floopend \asize, cacheh\@
184
185 .endm
186
187
188
189
190
191/*************************** INSTRUCTION CACHE ***************************/
192
193
194/*
195 * Reset/initialize the instruction cache by simply invalidating it:
196 * (need to unlock first also, if cache locking implemented):
197 *
198 * Parameters:
199 * aa, ab unique address registers (temporaries)
200 */
201 .macro icache_reset aa, ab
202 icache_unlock_all \aa, \ab
203 icache_invalidate_all \aa, \ab
204 .endm
205
206
207/*
208 * Synchronize after an instruction cache operation,
209 * to be sure everything is in sync with memory as to be
210 * expected following any previous instruction cache control operations.
211 *
212 * Parameters are:
213 * ar an address register (temporary) (currently unused, but may be used in future)
214 */
215 .macro icache_sync ar
216#if XCHAL_ICACHE_SIZE > 0
217 isync
218#endif
219 .endm
220
221
222
223/*
224 * Invalidate a single line of the instruction cache.
225 * Parameters are:
226 * ar address register that contains (virtual) address to invalidate
227 * (may get clobbered in a future implementation, but not currently)
228 * offset (optional) offset to add to \ar to compute effective address to invalidate
229 * (note: some number of lsbits are ignored)
230 */
231 .macro icache_invalidate_line ar, offset
232#if XCHAL_ICACHE_SIZE > 0
233 ihi \ar, \offset // invalidate icache line
234 /*
235 * NOTE: in some version of the silicon [!!!SHOULD HAVE BEEN DOCUMENTED!!!]
236 * 'ihi' doesn't work, so it had been replaced with 'iii'
237 * (which would just invalidate more than it should,
238 * which should be okay other than the performance hit
239 * because cache locking did not exist in that version,
240 * unless user somehow relies on something being cached).
241 * [WHAT VERSION IS IT!!?!?
242 * IS THERE ANY WAY TO TEST FOR THAT HERE, TO OUTPUT 'III' ONLY IF NEEDED!?!?].
243 *
244 * iii \ar, \offset
245 */
246 icache_sync \ar
247#endif
248 .endm
249
250
251
252
253/*
254 * Invalidate instruction cache entries that cache a specified portion of memory.
255 * Parameters are:
256 * astart start address (register gets clobbered)
257 * asize size of the region in bytes (register gets clobbered)
258 * ac unique register used as temporary
259 */
260 .macro icache_invalidate_region astart, asize, ac
261#if XCHAL_ICACHE_SIZE > 0
262 // Instruction cache region invalidation:
263 cache_hit_region ihi, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac
264 icache_sync \ac
265 // End of instruction cache region invalidation
266#endif
267 .endm
268
269
270
271/*
272 * Invalidate entire instruction cache.
273 *
274 * Parameters:
275 * aa, ab unique address registers (temporaries)
276 */
277 .macro icache_invalidate_all aa, ab
278#if XCHAL_ICACHE_SIZE > 0
279 // Instruction cache invalidation:
280 cache_index_all iii, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, XCHAL_ICACHE_WAYS, \aa, \ab
281 icache_sync \aa
282 // End of instruction cache invalidation
283#endif
284 .endm
285
286
287
288/*
289 * Lock (prefetch & lock) a single line of the instruction cache.
290 *
291 * Parameters are:
292 * ar address register that contains (virtual) address to lock
293 * (may get clobbered in a future implementation, but not currently)
294 * offset offset to add to \ar to compute effective address to lock
295 * (note: some number of lsbits are ignored)
296 */
297 .macro icache_lock_line ar, offset
298#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
299 ipfl \ar, \offset /* prefetch and lock icache line */
300 icache_sync \ar
301#endif
302 .endm
303
304
305
306/*
307 * Lock (prefetch & lock) a specified portion of memory into the instruction cache.
308 * Parameters are:
309 * astart start address (register gets clobbered)
310 * asize size of the region in bytes (register gets clobbered)
311 * ac unique register used as temporary
312 */
313 .macro icache_lock_region astart, asize, ac
314#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
315 // Instruction cache region lock:
316 cache_hit_region ipfl, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac
317 icache_sync \ac
318 // End of instruction cache region lock
319#endif
320 .endm
321
322
323
324/*
325 * Unlock a single line of the instruction cache.
326 *
327 * Parameters are:
328 * ar address register that contains (virtual) address to unlock
329 * (may get clobbered in a future implementation, but not currently)
330 * offset offset to add to \ar to compute effective address to unlock
331 * (note: some number of lsbits are ignored)
332 */
333 .macro icache_unlock_line ar, offset
334#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
335 ihu \ar, \offset /* unlock icache line */
336 icache_sync \ar
337#endif
338 .endm
339
340
341
342/*
343 * Unlock a specified portion of memory from the instruction cache.
344 * Parameters are:
345 * astart start address (register gets clobbered)
346 * asize size of the region in bytes (register gets clobbered)
347 * ac unique register used as temporary
348 */
349 .macro icache_unlock_region astart, asize, ac
350#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
351 // Instruction cache region unlock:
352 cache_hit_region ihu, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac
353 icache_sync \ac
354 // End of instruction cache region unlock
355#endif
356 .endm
357
358
359
360/*
361 * Unlock entire instruction cache.
362 *
363 * Parameters:
364 * aa, ab unique address registers (temporaries)
365 */
366 .macro icache_unlock_all aa, ab
367#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
368 // Instruction cache unlock:
369 cache_index_all iiu, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, 1, \aa, \ab
370 icache_sync \aa
371 // End of instruction cache unlock
372#endif
373 .endm
374
375
376
377
378
379/*************************** DATA CACHE ***************************/
380
381
382
383/*
384 * Reset/initialize the data cache by simply invalidating it
385 * (need to unlock first also, if cache locking implemented):
386 *
387 * Parameters:
388 * aa, ab unique address registers (temporaries)
389 */
390 .macro dcache_reset aa, ab
391 dcache_unlock_all \aa, \ab
392 dcache_invalidate_all \aa, \ab
393 .endm
394
395
396
397
398/*
399 * Synchronize after a data cache operation,
400 * to be sure everything is in sync with memory as to be
401 * expected following any previous data cache control operations.
402 *
403 * Parameters are:
404 * ar an address register (temporary) (currently unused, but may be used in future)
405 */
406 .macro dcache_sync ar
407#if XCHAL_DCACHE_SIZE > 0
408 // This previous sequence errs on the conservative side (too much so); a DSYNC should be sufficient:
409 //memw // synchronize data cache changes relative to subsequent memory accesses
410 //isync // be conservative and ISYNC as well (just to be sure)
411
412 dsync
413#endif
414 .endm
415
416
417
418/*
419 * Synchronize after a data store operation,
420 * to be sure the stored data is completely off the processor
421 * (and assuming there is no buffering outside the processor,
422 * that the data is in memory). This may be required to
423 * ensure that the processor's write buffers are emptied.
424 * A MEMW followed by a read guarantees this, by definition.
425 * We also try to make sure the read itself completes.
426 *
427 * Parameters are:
428 * ar an address register (temporary)
429 */
430 .macro write_sync ar
431 memw // ensure previous memory accesses are complete prior to subsequent memory accesses
432 l32i \ar, sp, 0 // completing this read ensures any previous write has completed, because of MEMW
433 //slot
434 add \ar, \ar, \ar // use the result of the read to help ensure the read completes (in future architectures)
435 .endm
436
437
438/*
439 * Invalidate a single line of the data cache.
440 * Parameters are:
441 * ar address register that contains (virtual) address to invalidate
442 * (may get clobbered in a future implementation, but not currently)
443 * offset (optional) offset to add to \ar to compute effective address to invalidate
444 * (note: some number of lsbits are ignored)
445 */
446 .macro dcache_invalidate_line ar, offset
447#if XCHAL_DCACHE_SIZE > 0
448 dhi \ar, \offset
449 dcache_sync \ar
450#endif
451 .endm
452
453
454
455
456
457/*
458 * Invalidate data cache entries that cache a specified portion of memory.
459 * Parameters are:
460 * astart start address (register gets clobbered)
461 * asize size of the region in bytes (register gets clobbered)
462 * ac unique register used as temporary
463 */
464 .macro dcache_invalidate_region astart, asize, ac
465#if XCHAL_DCACHE_SIZE > 0
466 // Data cache region invalidation:
467 cache_hit_region dhi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
468 dcache_sync \ac
469 // End of data cache region invalidation
470#endif
471 .endm
472
473
474
475#if 0
476/*
477 * This is a work-around for a bug in SiChip1 (???).
478 * There should be a proper mechanism for not outputting
479 * these instructions when not needed.
480 * To enable work-around, uncomment this and replace 'dii'
481 * with 'dii_s1' everywhere, eg. in dcache_invalidate_all
482 * macro below.
483 */
484 .macro dii_s1 ar, offset
485 dii \ar, \offset
486 or \ar, \ar, \ar
487 or \ar, \ar, \ar
488 or \ar, \ar, \ar
489 or \ar, \ar, \ar
490 .endm
491#endif
492
493
494/*
495 * Invalidate entire data cache.
496 *
497 * Parameters:
498 * aa, ab unique address registers (temporaries)
499 */
500 .macro dcache_invalidate_all aa, ab
501#if XCHAL_DCACHE_SIZE > 0
502 // Data cache invalidation:
503 cache_index_all dii, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, XCHAL_DCACHE_WAYS, \aa, \ab
504 dcache_sync \aa
505 // End of data cache invalidation
506#endif
507 .endm
508
509
510
511/*
512 * Writeback a single line of the data cache.
513 * Parameters are:
514 * ar address register that contains (virtual) address to writeback
515 * (may get clobbered in a future implementation, but not currently)
516 * offset offset to add to \ar to compute effective address to writeback
517 * (note: some number of lsbits are ignored)
518 */
519 .macro dcache_writeback_line ar, offset
520#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
521 dhwb \ar, \offset
522 dcache_sync \ar
523#endif
524 .endm
525
526
527
528/*
529 * Writeback dirty data cache entries that cache a specified portion of memory.
530 * Parameters are:
531 * astart start address (register gets clobbered)
532 * asize size of the region in bytes (register gets clobbered)
533 * ac unique register used as temporary
534 */
535 .macro dcache_writeback_region astart, asize, ac
536#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
537 // Data cache region writeback:
538 cache_hit_region dhwb, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
539 dcache_sync \ac
540 // End of data cache region writeback
541#endif
542 .endm
543
544
545
546/*
547 * Writeback entire data cache.
548 * Parameters:
549 * aa, ab unique address registers (temporaries)
550 */
551 .macro dcache_writeback_all aa, ab
552#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
553 // Data cache writeback:
554 cache_index_all diwb, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab
555 dcache_sync \aa
556 // End of data cache writeback
557#endif
558 .endm
559
560
561
562/*
563 * Writeback and invalidate a single line of the data cache.
564 * Parameters are:
565 * ar address register that contains (virtual) address to writeback and invalidate
566 * (may get clobbered in a future implementation, but not currently)
567 * offset offset to add to \ar to compute effective address to writeback and invalidate
568 * (note: some number of lsbits are ignored)
569 */
570 .macro dcache_writeback_inv_line ar, offset
571#if XCHAL_DCACHE_SIZE > 0
572 dhwbi \ar, \offset /* writeback and invalidate dcache line */
573 dcache_sync \ar
574#endif
575 .endm
576
577
578
579/*
580 * Writeback and invalidate data cache entries that cache a specified portion of memory.
581 * Parameters are:
582 * astart start address (register gets clobbered)
583 * asize size of the region in bytes (register gets clobbered)
584 * ac unique register used as temporary
585 */
586 .macro dcache_writeback_inv_region astart, asize, ac
587#if XCHAL_DCACHE_SIZE > 0
588 // Data cache region writeback and invalidate:
589 cache_hit_region dhwbi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
590 dcache_sync \ac
591 // End of data cache region writeback and invalidate
592#endif
593 .endm
594
595
596
597/*
598 * Writeback and invalidate entire data cache.
599 * Parameters:
600 * aa, ab unique address registers (temporaries)
601 */
602 .macro dcache_writeback_inv_all aa, ab
603#if XCHAL_DCACHE_SIZE > 0
604 // Data cache writeback and invalidate:
605#if XCHAL_DCACHE_IS_WRITEBACK
606 cache_index_all diwbi, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab
607 dcache_sync \aa
608#else /*writeback*/
609 // Data cache does not support writeback, so just invalidate: */
610 dcache_invalidate_all \aa, \ab
611#endif /*writeback*/
612 // End of data cache writeback and invalidate
613#endif
614 .endm
615
616
617
618
619/*
620 * Lock (prefetch & lock) a single line of the data cache.
621 *
622 * Parameters are:
623 * ar address register that contains (virtual) address to lock
624 * (may get clobbered in a future implementation, but not currently)
625 * offset offset to add to \ar to compute effective address to lock
626 * (note: some number of lsbits are ignored)
627 */
628 .macro dcache_lock_line ar, offset
629#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
630 dpfl \ar, \offset /* prefetch and lock dcache line */
631 dcache_sync \ar
632#endif
633 .endm
634
635
636
637/*
638 * Lock (prefetch & lock) a specified portion of memory into the data cache.
639 * Parameters are:
640 * astart start address (register gets clobbered)
641 * asize size of the region in bytes (register gets clobbered)
642 * ac unique register used as temporary
643 */
644 .macro dcache_lock_region astart, asize, ac
645#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
646 // Data cache region lock:
647 cache_hit_region dpfl, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
648 dcache_sync \ac
649 // End of data cache region lock
650#endif
651 .endm
652
653
654
655/*
656 * Unlock a single line of the data cache.
657 *
658 * Parameters are:
659 * ar address register that contains (virtual) address to unlock
660 * (may get clobbered in a future implementation, but not currently)
661 * offset offset to add to \ar to compute effective address to unlock
662 * (note: some number of lsbits are ignored)
663 */
664 .macro dcache_unlock_line ar, offset
665#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
666 dhu \ar, \offset /* unlock dcache line */
667 dcache_sync \ar
668#endif
669 .endm
670
671
672
673/*
674 * Unlock a specified portion of memory from the data cache.
675 * Parameters are:
676 * astart start address (register gets clobbered)
677 * asize size of the region in bytes (register gets clobbered)
678 * ac unique register used as temporary
679 */
680 .macro dcache_unlock_region astart, asize, ac
681#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
682 // Data cache region unlock:
683 cache_hit_region dhu, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
684 dcache_sync \ac
685 // End of data cache region unlock
686#endif
687 .endm
688
689
690
691/*
692 * Unlock entire data cache.
693 *
694 * Parameters:
695 * aa, ab unique address registers (temporaries)
696 */
697 .macro dcache_unlock_all aa, ab
698#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
699 // Data cache unlock:
700 cache_index_all diu, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab
701 dcache_sync \aa
702 // End of data cache unlock
703#endif
704 .endm
705
706
707#endif /*XTENSA_CACHEASM_H*/
708
diff --git a/include/asm-xtensa/xtensa/cacheattrasm.h b/include/asm-xtensa/xtensa/cacheattrasm.h
deleted file mode 100644
index 1c3e117b3592..000000000000
--- a/include/asm-xtensa/xtensa/cacheattrasm.h
+++ /dev/null
@@ -1,432 +0,0 @@
1#ifndef XTENSA_CACHEATTRASM_H
2#define XTENSA_CACHEATTRASM_H
3
4/*
5 * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
6 *
7 * include/asm-xtensa/xtensa/cacheattrasm.h -- assembler-specific
8 * CACHEATTR register related definitions that depend on CORE
9 * configuration.
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 *
15 * Copyright (C) 2002 Tensilica Inc.
16 */
17
18
19#include <xtensa/coreasm.h>
20
21
22/*
23 * This header file defines assembler macros of the form:
24 * <x>cacheattr_<func>
25 * where:
26 * <x> is 'i', 'd' or absent for instruction, data
27 * or both caches; and
28 * <func> indicates the function of the macro.
29 *
30 * The following functions are defined:
31 *
32 * icacheattr_get
33 * Reads I-cache CACHEATTR into a2 (clobbers a3-a5).
34 *
35 * dcacheattr_get
36 * Reads D-cache CACHEATTR into a2 (clobbers a3-a5).
37 * (Note: for configs with a real CACHEATTR register, the
38 * above two macros are identical.)
39 *
40 * cacheattr_set
41 * Writes both I-cache and D-cache CACHEATTRs from a2 (a3-a8 clobbered).
42 * Works even when changing one's own code's attributes.
43 *
44 * icacheattr_is_enabled label
45 * Branches to \label if I-cache appears to have been enabled
46 * (eg. if CACHEATTR contains a cache-enabled attribute).
47 * (clobbers a2-a5,SAR)
48 *
49 * dcacheattr_is_enabled label
50 * Branches to \label if D-cache appears to have been enabled
51 * (eg. if CACHEATTR contains a cache-enabled attribute).
52 * (clobbers a2-a5,SAR)
53 *
54 * cacheattr_is_enabled label
55 * Branches to \label if either I-cache or D-cache appears to have been enabled
56 * (eg. if CACHEATTR contains a cache-enabled attribute).
57 * (clobbers a2-a5,SAR)
58 *
59 * The following macros are only defined under certain conditions:
60 *
61 * icacheattr_set (if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR)
62 * Writes I-cache CACHEATTR from a2 (a3-a8 clobbered).
63 *
64 * dcacheattr_set (if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR)
65 * Writes D-cache CACHEATTR from a2 (a3-a8 clobbered).
66 */
67
68
69
70/*************************** GENERIC -- ALL CACHES ***************************/
71
72/*
73 * _cacheattr_get
74 *
75 * (Internal macro.)
76 * Returns value of CACHEATTR register (or closest equivalent) in a2.
77 *
78 * Entry:
79 * (none)
80 * Exit:
81 * a2 value read from CACHEATTR
82 * a3-a5 clobbered (temporaries)
83 */
84 .macro _cacheattr_get tlb
85#if XCHAL_HAVE_CACHEATTR
86 rsr a2, CACHEATTR
87#elif XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
88 // We have a config that "mimics" CACHEATTR using a simplified
89 // "MMU" composed of a single statically-mapped way.
90 // DTLB and ITLB are independent, so there's no single
91 // cache attribute that can describe both. So for now
92 // just return the DTLB state.
93 movi a5, 0xE0000000
94 movi a2, 0
95 movi a3, 0
961: add a3, a3, a5 // next segment
97 r&tlb&1 a4, a3 // get PPN+CA of segment at 0xE0000000, 0xC0000000, ..., 0
98 dsync // interlock???
99 slli a2, a2, 4
100 extui a4, a4, 0, 4 // extract CA
101 or a2, a2, a4
102 bnez a3, 1b
103#else
104 // This macro isn't applicable to arbitrary MMU configurations.
105 // Just return zero.
106 movi a2, 0
107#endif
108 .endm
109
110 .macro icacheattr_get
111 _cacheattr_get itlb
112 .endm
113
114 .macro dcacheattr_get
115 _cacheattr_get dtlb
116 .endm
117
118
119#define XCHAL_CACHEATTR_ALL_BYPASS 0x22222222 /* default (powerup/reset) value of CACHEATTR, all BYPASS
120 mode (ie. disabled/bypassed caches) */
121
122#if XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
123
124#define XCHAL_FCA_ENAMASK 0x001A /* bitmap of fetch attributes that require enabled icache */
125#define XCHAL_LCA_ENAMASK 0x0003 /* bitmap of load attributes that require enabled dcache */
126#define XCHAL_SCA_ENAMASK 0x0003 /* bitmap of store attributes that require enabled dcache */
127#define XCHAL_LSCA_ENAMASK (XCHAL_LCA_ENAMASK|XCHAL_SCA_ENAMASK) /* l/s attrs requiring enabled dcache */
128#define XCHAL_ALLCA_ENAMASK (XCHAL_FCA_ENAMASK|XCHAL_LSCA_ENAMASK) /* all attrs requiring enabled caches */
129
130/*
131 * _cacheattr_is_enabled
132 *
133 * (Internal macro.)
134 * Branches to \label if CACHEATTR in a2 indicates an enabled
135 * cache, using mask in a3.
136 *
137 * Parameters:
138 * label where to branch to if cache is enabled
139 * Entry:
140 * a2 contains CACHEATTR value used to determine whether
141 * caches are enabled
142 * a3 16-bit constant where each bit correspond to
143 * one of the 16 possible CA values (in a CACHEATTR mask);
144 * CA values that indicate the cache is enabled
145 * have their corresponding bit set in this mask
146 * (eg. use XCHAL_xCA_ENAMASK , above)
147 * Exit:
148 * a2,a4,a5 clobbered
149 * SAR clobbered
150 */
151 .macro _cacheattr_is_enabled label
152 movi a4, 8 // loop 8 times
153.Lcaife\@:
154 extui a5, a2, 0, 4 // get CA nibble
155 ssr a5 // index into mask according to CA...
156 srl a5, a3 // ...and get CA's mask bit in a5 bit 0
157 bbsi.l a5, 0, \label // if CA indicates cache enabled, jump to label
158 srli a2, a2, 4 // next nibble
159 addi a4, a4, -1
160 bnez a4, .Lcaife\@ // loop for each nibble
161 .endm
162
163#else /* XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR */
164 .macro _cacheattr_is_enabled label
165 j \label // macro not applicable, assume caches always enabled
166 .endm
167#endif /* XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR */
168
169
170
171/*
172 * icacheattr_is_enabled
173 *
174 * Branches to \label if I-cache is enabled.
175 *
176 * Parameters:
177 * label where to branch to if icache is enabled
178 * Entry:
179 * (none)
180 * Exit:
181 * a2-a5, SAR clobbered (temporaries)
182 */
183 .macro icacheattr_is_enabled label
184#if XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
185 icacheattr_get
186 movi a3, XCHAL_FCA_ENAMASK
187#endif
188 _cacheattr_is_enabled \label
189 .endm
190
191/*
192 * dcacheattr_is_enabled
193 *
194 * Branches to \label if D-cache is enabled.
195 *
196 * Parameters:
197 * label where to branch to if dcache is enabled
198 * Entry:
199 * (none)
200 * Exit:
201 * a2-a5, SAR clobbered (temporaries)
202 */
203 .macro dcacheattr_is_enabled label
204#if XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
205 dcacheattr_get
206 movi a3, XCHAL_LSCA_ENAMASK
207#endif
208 _cacheattr_is_enabled \label
209 .endm
210
211/*
212 * cacheattr_is_enabled
213 *
214 * Branches to \label if either I-cache or D-cache is enabled.
215 *
216 * Parameters:
217 * label where to branch to if a cache is enabled
218 * Entry:
219 * (none)
220 * Exit:
221 * a2-a5, SAR clobbered (temporaries)
222 */
223 .macro cacheattr_is_enabled label
224#if XCHAL_HAVE_CACHEATTR
225 rsr a2, CACHEATTR
226 movi a3, XCHAL_ALLCA_ENAMASK
227#elif XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
228 icacheattr_get
229 movi a3, XCHAL_FCA_ENAMASK
230 _cacheattr_is_enabled \label
231 dcacheattr_get
232 movi a3, XCHAL_LSCA_ENAMASK
233#endif
234 _cacheattr_is_enabled \label
235 .endm
236
237
238
239/*
240 * The ISA does not have a defined way to change the
241 * instruction cache attributes of the running code,
242 * ie. of the memory area that encloses the current PC.
243 * However, each micro-architecture (or class of
244 * configurations within a micro-architecture)
245 * provides a way to deal with this issue.
246 *
247 * Here are a few macros used to implement the relevant
248 * approach taken.
249 */
250
251#if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
252 // We have a config that "mimics" CACHEATTR using a simplified
253 // "MMU" composed of a single statically-mapped way.
254
255/*
256 * icacheattr_set
257 *
258 * Entry:
259 * a2 cacheattr value to set
260 * Exit:
261 * a2 unchanged
262 * a3-a8 clobbered (temporaries)
263 */
264 .macro icacheattr_set
265
266 movi a5, 0xE0000000 // mask of upper 3 bits
267 movi a6, 3f // PC where ITLB is set
268 movi a3, 0 // start at region 0 (0 .. 7)
269 and a6, a6, a5 // upper 3 bits of local PC area
270 mov a7, a2 // copy a2 so it doesn't get clobbered
271 j 3f
272
273# if XCHAL_HAVE_XLT_CACHEATTR
274 // Can do translations, use generic method:
2751: sub a6, a3, a5 // address of some other segment
276 ritlb1 a8, a6 // save its PPN+CA
277 dsync // interlock??
278 witlb a4, a6 // make it translate to this code area
279 movi a6, 5f // where to jump into it
280 isync
281 sub a6, a6, a5 // adjust jump address within that other segment
282 jx a6
283
284 // Note that in the following code snippet, which runs at a different virtual
285 // address than it is assembled for, we avoid using literals (eg. via movi/l32r)
286 // just in case literals end up in a different 512 MB segment, and we avoid
287 // instructions that rely on the current PC being what is expected.
288 //
289 .align 4
290 _j 6f // this is at label '5' minus 4 bytes
291 .align 4
2925: witlb a4, a3 // we're in other segment, now can write previous segment's CA
293 isync
294 add a6, a6, a5 // back to previous segment
295 addi a6, a6, -4 // next jump label
296 jx a6
297
2986: sub a6, a3, a5 // address of some other segment
299 witlb a8, a6 // restore PPN+CA of other segment
300 mov a6, a3 // restore a6
301 isync
302# else /* XCHAL_HAVE_XLT_CACHEATTR */
303 // Use micro-architecture specific method.
304 // The following 4-instruction sequence is aligned such that
305 // it all fits within a single I-cache line. Sixteen byte
306 // alignment is sufficient for this (using XCHAL_ICACHE_LINESIZE
307 // actually causes problems because that can be greater than
308 // the alignment of the reset vector, where this macro is often
309 // invoked, which would cause the linker to align the reset
310 // vector code away from the reset vector!!).
311 .align 16 /*XCHAL_ICACHE_LINESIZE*/
3121: _witlb a4, a3 // write wired PTE (CA, no PPN) of 512MB segment to ITLB
313 _isync
314 nop
315 nop
316# endif /* XCHAL_HAVE_XLT_CACHEATTR */
317 beq a3, a5, 4f // done?
318
319 // Note that in the WITLB loop, we don't do any load/stores
320 // (may not be an issue here, but it is important in the DTLB case).
3212: srli a7, a7, 4 // next CA
322 sub a3, a3, a5 // next segment (add 0x20000000)
3233:
324# if XCHAL_HAVE_XLT_CACHEATTR /* if have translation, preserve it */
325 ritlb1 a8, a3 // get current PPN+CA of segment
326 dsync // interlock???
327 extui a4, a7, 0, 4 // extract CA to set
328 srli a8, a8, 4 // clear CA but keep PPN ...
329 slli a8, a8, 4 // ...
330 add a4, a4, a8 // combine new CA with PPN to preserve
331# else
332 extui a4, a7, 0, 4 // extract CA
333# endif
334 beq a3, a6, 1b // current PC's region? if so, do it in a safe way
335 witlb a4, a3 // write wired PTE (CA [+PPN]) of 512MB segment to ITLB
336 bne a3, a5, 2b
337 isync // make sure all ifetch changes take effect
3384:
339 .endm // icacheattr_set
340
341
342/*
343 * dcacheattr_set
344 *
345 * Entry:
346 * a2 cacheattr value to set
347 * Exit:
348 * a2 unchanged
349 * a3-a8 clobbered (temporaries)
350 */
351
352 .macro dcacheattr_set
353
354 movi a5, 0xE0000000 // mask of upper 3 bits
355 movi a3, 0 // start at region 0 (0 .. 7)
356 mov a7, a2 // copy a2 so it doesn't get clobbered
357 j 3f
358 // Note that in the WDTLB loop, we don't do any load/stores
359 // (including implicit l32r via movi) because it isn't safe.
3602: srli a7, a7, 4 // next CA
361 sub a3, a3, a5 // next segment (add 0x20000000)
3623:
363# if XCHAL_HAVE_XLT_CACHEATTR /* if have translation, preserve it */
364 rdtlb1 a8, a3 // get current PPN+CA of segment
365 dsync // interlock???
366 extui a4, a7, 0, 4 // extract CA to set
367 srli a8, a8, 4 // clear CA but keep PPN ...
368 slli a8, a8, 4 // ...
369 add a4, a4, a8 // combine new CA with PPN to preserve
370# else
371 extui a4, a7, 0, 4 // extract CA to set
372# endif
373 wdtlb a4, a3 // write wired PTE (CA [+PPN]) of 512MB segment to DTLB
374 bne a3, a5, 2b
375 dsync // make sure all data path changes take effect
376 .endm // dcacheattr_set
377
378#endif /* XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR */
379
380
381
382/*
383 * cacheattr_set
384 *
385 * Macro that sets the current CACHEATTR safely
386 * (both i and d) according to the current contents of a2.
387 * It works even when changing the cache attributes of
388 * the currently running code.
389 *
390 * Entry:
391 * a2 cacheattr value to set
392 * Exit:
393 * a2 unchanged
394 * a3-a8 clobbered (temporaries)
395 */
396 .macro cacheattr_set
397
398#if XCHAL_HAVE_CACHEATTR
399# if XCHAL_ICACHE_LINESIZE < 4
400 // No i-cache, so can always safely write to CACHEATTR:
401 wsr a2, CACHEATTR
402# else
403 // The Athens micro-architecture, when using the old
404 // exception architecture option (ie. with the CACHEATTR register)
405 // allows changing the cache attributes of the running code
406 // using the following exact sequence aligned to be within
407 // an instruction cache line. (NOTE: using XCHAL_ICACHE_LINESIZE
408 // alignment actually causes problems because that can be greater
409 // than the alignment of the reset vector, where this macro is often
410 // invoked, which would cause the linker to align the reset
411 // vector code away from the reset vector!!).
412 j 1f
413 .align 16 /*XCHAL_ICACHE_LINESIZE*/ // align to within an I-cache line
4141: _wsr a2, CACHEATTR
415 _isync
416 nop
417 nop
418# endif
419#elif XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
420 // DTLB and ITLB are independent, but to keep semantics
421 // of this macro we simply write to both.
422 icacheattr_set
423 dcacheattr_set
424#else
425 // This macro isn't applicable to arbitrary MMU configurations.
426 // Do nothing in this case.
427#endif
428 .endm
429
430
431#endif /*XTENSA_CACHEATTRASM_H*/
432
diff --git a/include/asm-xtensa/xtensa/config-linux_be/core.h b/include/asm-xtensa/xtensa/config-linux_be/core.h
deleted file mode 100644
index d54fe5eb1064..000000000000
--- a/include/asm-xtensa/xtensa/config-linux_be/core.h
+++ /dev/null
@@ -1,1270 +0,0 @@
1/*
2 * xtensa/config/core.h -- HAL definitions that are dependent on CORE configuration
3 *
4 * This header file is sometimes referred to as the "compile-time HAL" or CHAL.
5 * It was generated for a specific Xtensa processor configuration.
6 *
7 * Source for configuration-independent binaries (which link in a
8 * configuration-specific HAL library) must NEVER include this file.
9 * It is perfectly normal, however, for the HAL source itself to include this file.
10 */
11
12/*
13 * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of version 2.1 of the GNU Lesser General Public
17 * License as published by the Free Software Foundation.
18 *
19 * This program is distributed in the hope that it would be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 *
23 * Further, this software is distributed without any warranty that it is
24 * free of the rightful claim of any third person regarding infringement
25 * or the like. Any license provided herein, whether implied or
26 * otherwise, applies only to this software file. Patent licenses, if
27 * any, provided herein do not apply to combinations of this program with
28 * other software, or any other product whatsoever.
29 *
30 * You should have received a copy of the GNU Lesser General Public
31 * License along with this program; if not, write the Free Software
32 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
33 * USA.
34 */
35
36
37#ifndef XTENSA_CONFIG_CORE_H
38#define XTENSA_CONFIG_CORE_H
39
40#include <xtensa/hal.h>
41
42
43/*----------------------------------------------------------------------
44 GENERAL
45 ----------------------------------------------------------------------*/
46
47/*
48 * Separators for macros that expand into arrays.
49 * These can be predefined by files that #include this one,
50 * when different separators are required.
51 */
52/* Element separator for macros that expand into 1-dimensional arrays: */
53#ifndef XCHAL_SEP
54#define XCHAL_SEP ,
55#endif
56/* Array separator for macros that expand into 2-dimensional arrays: */
57#ifndef XCHAL_SEP2
58#define XCHAL_SEP2 },{
59#endif
60
61
62/*----------------------------------------------------------------------
63 ENDIANNESS
64 ----------------------------------------------------------------------*/
65
66#define XCHAL_HAVE_BE 1
67#define XCHAL_HAVE_LE 0
68#define XCHAL_MEMORY_ORDER XTHAL_BIGENDIAN
69
70
71/*----------------------------------------------------------------------
72 REGISTER WINDOWS
73 ----------------------------------------------------------------------*/
74
75#define XCHAL_HAVE_WINDOWED 1 /* 1 if windowed registers option configured, 0 otherwise */
76#define XCHAL_NUM_AREGS 64 /* number of physical address regs */
77#define XCHAL_NUM_AREGS_LOG2 6 /* log2(XCHAL_NUM_AREGS) */
78
79
80/*----------------------------------------------------------------------
81 ADDRESS ALIGNMENT
82 ----------------------------------------------------------------------*/
83
84/* These apply to a selected set of core load and store instructions only (see ISA): */
85#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* 1 if unaligned loads cause an exception, 0 otherwise */
86#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* 1 if unaligned stores cause an exception, 0 otherwise */
87
88
89/*----------------------------------------------------------------------
90 INTERRUPTS
91 ----------------------------------------------------------------------*/
92
93#define XCHAL_HAVE_INTERRUPTS 1 /* 1 if interrupt option configured, 0 otherwise */
94#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* 1 if high-priority interrupt option configured, 0 otherwise */
95#define XCHAL_HAVE_HIGHLEVEL_INTERRUPTS XCHAL_HAVE_HIGHPRI_INTERRUPTS
96#define XCHAL_HAVE_NMI 0 /* 1 if NMI option configured, 0 otherwise */
97#define XCHAL_NUM_INTERRUPTS 17 /* number of interrupts */
98#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* number of bits to hold an interrupt number: roundup(log2(number of interrupts)) */
99#define XCHAL_NUM_EXTINTERRUPTS 10 /* number of external interrupts */
100#define XCHAL_NUM_INTLEVELS 4 /* number of interrupt levels (not including level zero!) */
101#define XCHAL_NUM_LOWPRI_LEVELS 1 /* number of low-priority interrupt levels (always 1) */
102#define XCHAL_FIRST_HIGHPRI_LEVEL (XCHAL_NUM_LOWPRI_LEVELS+1) /* level of first high-priority interrupt (always 2) */
103#define XCHAL_EXCM_LEVEL 1 /* level of interrupts masked by PS.EXCM (XEA2 only; always 1 in T10xx);
104 for XEA1, where there is no PS.EXCM, this is always 1;
105 interrupts at levels FIRST_HIGHPRI <= n <= EXCM_LEVEL, if any,
106 are termed "medium priority" interrupts (post T10xx only) */
107/* Note: 1 <= LOWPRI_LEVELS <= EXCM_LEVEL < DEBUGLEVEL <= NUM_INTLEVELS < NMILEVEL <= 15 */
108
109/* Masks of interrupts at each interrupt level: */
110#define XCHAL_INTLEVEL0_MASK 0x00000000
111#define XCHAL_INTLEVEL1_MASK 0x000064F9
112#define XCHAL_INTLEVEL2_MASK 0x00008902
113#define XCHAL_INTLEVEL3_MASK 0x00011204
114#define XCHAL_INTLEVEL4_MASK 0x00000000
115#define XCHAL_INTLEVEL5_MASK 0x00000000
116#define XCHAL_INTLEVEL6_MASK 0x00000000
117#define XCHAL_INTLEVEL7_MASK 0x00000000
118#define XCHAL_INTLEVEL8_MASK 0x00000000
119#define XCHAL_INTLEVEL9_MASK 0x00000000
120#define XCHAL_INTLEVEL10_MASK 0x00000000
121#define XCHAL_INTLEVEL11_MASK 0x00000000
122#define XCHAL_INTLEVEL12_MASK 0x00000000
123#define XCHAL_INTLEVEL13_MASK 0x00000000
124#define XCHAL_INTLEVEL14_MASK 0x00000000
125#define XCHAL_INTLEVEL15_MASK 0x00000000
126/* As an array of entries (eg. for C constant arrays): */
127#define XCHAL_INTLEVEL_MASKS 0x00000000 XCHAL_SEP \
128 0x000064F9 XCHAL_SEP \
129 0x00008902 XCHAL_SEP \
130 0x00011204 XCHAL_SEP \
131 0x00000000 XCHAL_SEP \
132 0x00000000 XCHAL_SEP \
133 0x00000000 XCHAL_SEP \
134 0x00000000 XCHAL_SEP \
135 0x00000000 XCHAL_SEP \
136 0x00000000 XCHAL_SEP \
137 0x00000000 XCHAL_SEP \
138 0x00000000 XCHAL_SEP \
139 0x00000000 XCHAL_SEP \
140 0x00000000 XCHAL_SEP \
141 0x00000000 XCHAL_SEP \
142 0x00000000
143
144/* Masks of interrupts at each range 1..n of interrupt levels: */
145#define XCHAL_INTLEVEL0_ANDBELOW_MASK 0x00000000
146#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x000064F9
147#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x0000EDFB
148#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x0001FFFF
149#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x0001FFFF
150#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x0001FFFF
151#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x0001FFFF
152#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x0001FFFF
153#define XCHAL_INTLEVEL8_ANDBELOW_MASK 0x0001FFFF
154#define XCHAL_INTLEVEL9_ANDBELOW_MASK 0x0001FFFF
155#define XCHAL_INTLEVEL10_ANDBELOW_MASK 0x0001FFFF
156#define XCHAL_INTLEVEL11_ANDBELOW_MASK 0x0001FFFF
157#define XCHAL_INTLEVEL12_ANDBELOW_MASK 0x0001FFFF
158#define XCHAL_INTLEVEL13_ANDBELOW_MASK 0x0001FFFF
159#define XCHAL_INTLEVEL14_ANDBELOW_MASK 0x0001FFFF
160#define XCHAL_INTLEVEL15_ANDBELOW_MASK 0x0001FFFF
161#define XCHAL_LOWPRI_MASK XCHAL_INTLEVEL1_ANDBELOW_MASK /* mask of all low-priority interrupts */
162#define XCHAL_EXCM_MASK XCHAL_INTLEVEL1_ANDBELOW_MASK /* mask of all interrupts masked by PS.EXCM (or CEXCM) */
163/* As an array of entries (eg. for C constant arrays): */
164#define XCHAL_INTLEVEL_ANDBELOW_MASKS 0x00000000 XCHAL_SEP \
165 0x000064F9 XCHAL_SEP \
166 0x0000EDFB XCHAL_SEP \
167 0x0001FFFF XCHAL_SEP \
168 0x0001FFFF XCHAL_SEP \
169 0x0001FFFF XCHAL_SEP \
170 0x0001FFFF XCHAL_SEP \
171 0x0001FFFF XCHAL_SEP \
172 0x0001FFFF XCHAL_SEP \
173 0x0001FFFF XCHAL_SEP \
174 0x0001FFFF XCHAL_SEP \
175 0x0001FFFF XCHAL_SEP \
176 0x0001FFFF XCHAL_SEP \
177 0x0001FFFF XCHAL_SEP \
178 0x0001FFFF XCHAL_SEP \
179 0x0001FFFF
180
181/* Interrupt numbers for each interrupt level at which only one interrupt was configured: */
182/*#define XCHAL_INTLEVEL1_NUM ...more than one interrupt at this level...*/
183/*#define XCHAL_INTLEVEL2_NUM ...more than one interrupt at this level...*/
184/*#define XCHAL_INTLEVEL3_NUM ...more than one interrupt at this level...*/
185
186/* Level of each interrupt: */
187#define XCHAL_INT0_LEVEL 1
188#define XCHAL_INT1_LEVEL 2
189#define XCHAL_INT2_LEVEL 3
190#define XCHAL_INT3_LEVEL 1
191#define XCHAL_INT4_LEVEL 1
192#define XCHAL_INT5_LEVEL 1
193#define XCHAL_INT6_LEVEL 1
194#define XCHAL_INT7_LEVEL 1
195#define XCHAL_INT8_LEVEL 2
196#define XCHAL_INT9_LEVEL 3
197#define XCHAL_INT10_LEVEL 1
198#define XCHAL_INT11_LEVEL 2
199#define XCHAL_INT12_LEVEL 3
200#define XCHAL_INT13_LEVEL 1
201#define XCHAL_INT14_LEVEL 1
202#define XCHAL_INT15_LEVEL 2
203#define XCHAL_INT16_LEVEL 3
204#define XCHAL_INT17_LEVEL 0
205#define XCHAL_INT18_LEVEL 0
206#define XCHAL_INT19_LEVEL 0
207#define XCHAL_INT20_LEVEL 0
208#define XCHAL_INT21_LEVEL 0
209#define XCHAL_INT22_LEVEL 0
210#define XCHAL_INT23_LEVEL 0
211#define XCHAL_INT24_LEVEL 0
212#define XCHAL_INT25_LEVEL 0
213#define XCHAL_INT26_LEVEL 0
214#define XCHAL_INT27_LEVEL 0
215#define XCHAL_INT28_LEVEL 0
216#define XCHAL_INT29_LEVEL 0
217#define XCHAL_INT30_LEVEL 0
218#define XCHAL_INT31_LEVEL 0
219/* As an array of entries (eg. for C constant arrays): */
220#define XCHAL_INT_LEVELS 1 XCHAL_SEP \
221 2 XCHAL_SEP \
222 3 XCHAL_SEP \
223 1 XCHAL_SEP \
224 1 XCHAL_SEP \
225 1 XCHAL_SEP \
226 1 XCHAL_SEP \
227 1 XCHAL_SEP \
228 2 XCHAL_SEP \
229 3 XCHAL_SEP \
230 1 XCHAL_SEP \
231 2 XCHAL_SEP \
232 3 XCHAL_SEP \
233 1 XCHAL_SEP \
234 1 XCHAL_SEP \
235 2 XCHAL_SEP \
236 3 XCHAL_SEP \
237 0 XCHAL_SEP \
238 0 XCHAL_SEP \
239 0 XCHAL_SEP \
240 0 XCHAL_SEP \
241 0 XCHAL_SEP \
242 0 XCHAL_SEP \
243 0 XCHAL_SEP \
244 0 XCHAL_SEP \
245 0 XCHAL_SEP \
246 0 XCHAL_SEP \
247 0 XCHAL_SEP \
248 0 XCHAL_SEP \
249 0 XCHAL_SEP \
250 0 XCHAL_SEP \
251 0
252
253/* Type of each interrupt: */
254#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
255#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
256#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
257#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
258#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
259#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
260#define XCHAL_INT6_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
261#define XCHAL_INT7_TYPE XTHAL_INTTYPE_EXTERN_EDGE
262#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_EDGE
263#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_EDGE
264#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
265#define XCHAL_INT11_TYPE XTHAL_INTTYPE_TIMER
266#define XCHAL_INT12_TYPE XTHAL_INTTYPE_TIMER
267#define XCHAL_INT13_TYPE XTHAL_INTTYPE_SOFTWARE
268#define XCHAL_INT14_TYPE XTHAL_INTTYPE_SOFTWARE
269#define XCHAL_INT15_TYPE XTHAL_INTTYPE_SOFTWARE
270#define XCHAL_INT16_TYPE XTHAL_INTTYPE_SOFTWARE
271#define XCHAL_INT17_TYPE XTHAL_INTTYPE_UNCONFIGURED
272#define XCHAL_INT18_TYPE XTHAL_INTTYPE_UNCONFIGURED
273#define XCHAL_INT19_TYPE XTHAL_INTTYPE_UNCONFIGURED
274#define XCHAL_INT20_TYPE XTHAL_INTTYPE_UNCONFIGURED
275#define XCHAL_INT21_TYPE XTHAL_INTTYPE_UNCONFIGURED
276#define XCHAL_INT22_TYPE XTHAL_INTTYPE_UNCONFIGURED
277#define XCHAL_INT23_TYPE XTHAL_INTTYPE_UNCONFIGURED
278#define XCHAL_INT24_TYPE XTHAL_INTTYPE_UNCONFIGURED
279#define XCHAL_INT25_TYPE XTHAL_INTTYPE_UNCONFIGURED
280#define XCHAL_INT26_TYPE XTHAL_INTTYPE_UNCONFIGURED
281#define XCHAL_INT27_TYPE XTHAL_INTTYPE_UNCONFIGURED
282#define XCHAL_INT28_TYPE XTHAL_INTTYPE_UNCONFIGURED
283#define XCHAL_INT29_TYPE XTHAL_INTTYPE_UNCONFIGURED
284#define XCHAL_INT30_TYPE XTHAL_INTTYPE_UNCONFIGURED
285#define XCHAL_INT31_TYPE XTHAL_INTTYPE_UNCONFIGURED
286/* As an array of entries (eg. for C constant arrays): */
287#define XCHAL_INT_TYPES XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
288 XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
289 XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
290 XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
291 XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
292 XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
293 XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
294 XTHAL_INTTYPE_EXTERN_EDGE XCHAL_SEP \
295 XTHAL_INTTYPE_EXTERN_EDGE XCHAL_SEP \
296 XTHAL_INTTYPE_EXTERN_EDGE XCHAL_SEP \
297 XTHAL_INTTYPE_TIMER XCHAL_SEP \
298 XTHAL_INTTYPE_TIMER XCHAL_SEP \
299 XTHAL_INTTYPE_TIMER XCHAL_SEP \
300 XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \
301 XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \
302 XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \
303 XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \
304 XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
305 XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
306 XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
307 XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
308 XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
309 XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
310 XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
311 XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
312 XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
313 XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
314 XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
315 XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
316 XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
317 XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
318 XTHAL_INTTYPE_UNCONFIGURED
319
320/* Masks of interrupts for each type of interrupt: */
321#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFFE0000
322#define XCHAL_INTTYPE_MASK_SOFTWARE 0x0001E000
323#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000380
324#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000007F
325#define XCHAL_INTTYPE_MASK_TIMER 0x00001C00
326#define XCHAL_INTTYPE_MASK_NMI 0x00000000
327/* As an array of entries (eg. for C constant arrays): */
328#define XCHAL_INTTYPE_MASKS 0xFFFE0000 XCHAL_SEP \
329 0x0001E000 XCHAL_SEP \
330 0x00000380 XCHAL_SEP \
331 0x0000007F XCHAL_SEP \
332 0x00001C00 XCHAL_SEP \
333 0x00000000
334
335/* Interrupts assigned to each timer (CCOMPARE0 to CCOMPARE3), -1 if unassigned */
336#define XCHAL_TIMER0_INTERRUPT 10
337#define XCHAL_TIMER1_INTERRUPT 11
338#define XCHAL_TIMER2_INTERRUPT 12
339#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
340/* As an array of entries (eg. for C constant arrays): */
341#define XCHAL_TIMER_INTERRUPTS 10 XCHAL_SEP \
342 11 XCHAL_SEP \
343 12 XCHAL_SEP \
344 XTHAL_TIMER_UNCONFIGURED
345
346/* Indexing macros: */
347#define _XCHAL_INTLEVEL_MASK(n) XCHAL_INTLEVEL ## n ## _MASK
348#define XCHAL_INTLEVEL_MASK(n) _XCHAL_INTLEVEL_MASK(n) /* n = 0 .. 15 */
349#define _XCHAL_INTLEVEL_ANDBELOWMASK(n) XCHAL_INTLEVEL ## n ## _ANDBELOW_MASK
350#define XCHAL_INTLEVEL_ANDBELOW_MASK(n) _XCHAL_INTLEVEL_ANDBELOWMASK(n) /* n = 0 .. 15 */
351#define _XCHAL_INT_LEVEL(n) XCHAL_INT ## n ## _LEVEL
352#define XCHAL_INT_LEVEL(n) _XCHAL_INT_LEVEL(n) /* n = 0 .. 31 */
353#define _XCHAL_INT_TYPE(n) XCHAL_INT ## n ## _TYPE
354#define XCHAL_INT_TYPE(n) _XCHAL_INT_TYPE(n) /* n = 0 .. 31 */
355#define _XCHAL_TIMER_INTERRUPT(n) XCHAL_TIMER ## n ## _INTERRUPT
356#define XCHAL_TIMER_INTERRUPT(n) _XCHAL_TIMER_INTERRUPT(n) /* n = 0 .. 3 */
357
358
359
360/*
361 * External interrupt vectors/levels.
362 * These macros describe how Xtensa processor interrupt numbers
363 * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
364 * map to external BInterrupt<n> pins, for those interrupts
365 * configured as external (level-triggered, edge-triggered, or NMI).
366 * See the Xtensa processor databook for more details.
367 */
368
369/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
370#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
371#define XCHAL_EXTINT1_NUM 1 /* (intlevel 2) */
372#define XCHAL_EXTINT2_NUM 2 /* (intlevel 3) */
373#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
374#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
375#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
376#define XCHAL_EXTINT6_NUM 6 /* (intlevel 1) */
377#define XCHAL_EXTINT7_NUM 7 /* (intlevel 1) */
378#define XCHAL_EXTINT8_NUM 8 /* (intlevel 2) */
379#define XCHAL_EXTINT9_NUM 9 /* (intlevel 3) */
380
381/* Corresponding interrupt masks: */
382#define XCHAL_EXTINT0_MASK 0x00000001
383#define XCHAL_EXTINT1_MASK 0x00000002
384#define XCHAL_EXTINT2_MASK 0x00000004
385#define XCHAL_EXTINT3_MASK 0x00000008
386#define XCHAL_EXTINT4_MASK 0x00000010
387#define XCHAL_EXTINT5_MASK 0x00000020
388#define XCHAL_EXTINT6_MASK 0x00000040
389#define XCHAL_EXTINT7_MASK 0x00000080
390#define XCHAL_EXTINT8_MASK 0x00000100
391#define XCHAL_EXTINT9_MASK 0x00000200
392
393/* Core config interrupt levels mapped to each external interrupt: */
394#define XCHAL_EXTINT0_LEVEL 1 /* (int number 0) */
395#define XCHAL_EXTINT1_LEVEL 2 /* (int number 1) */
396#define XCHAL_EXTINT2_LEVEL 3 /* (int number 2) */
397#define XCHAL_EXTINT3_LEVEL 1 /* (int number 3) */
398#define XCHAL_EXTINT4_LEVEL 1 /* (int number 4) */
399#define XCHAL_EXTINT5_LEVEL 1 /* (int number 5) */
400#define XCHAL_EXTINT6_LEVEL 1 /* (int number 6) */
401#define XCHAL_EXTINT7_LEVEL 1 /* (int number 7) */
402#define XCHAL_EXTINT8_LEVEL 2 /* (int number 8) */
403#define XCHAL_EXTINT9_LEVEL 3 /* (int number 9) */
404
405
406/*----------------------------------------------------------------------
407 EXCEPTIONS and VECTORS
408 ----------------------------------------------------------------------*/
409
410#define XCHAL_HAVE_EXCEPTIONS 1 /* 1 if exception option configured, 0 otherwise */
411
412#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture number: 1 for XEA1 (old), 2 for XEA2 (new) */
413#define XCHAL_HAVE_XEA1 0 /* 1 if XEA1, 0 otherwise */
414#define XCHAL_HAVE_XEA2 1 /* 1 if XEA2, 0 otherwise */
415/* For backward compatibility ONLY -- DO NOT USE (will be removed in future release): */
416#define XCHAL_HAVE_OLD_EXC_ARCH XCHAL_HAVE_XEA1 /* (DEPRECATED) 1 if old exception architecture (XEA1), 0 otherwise (eg. XEA2) */
417#define XCHAL_HAVE_EXCM XCHAL_HAVE_XEA2 /* (DEPRECATED) 1 if PS.EXCM bit exists (currently equals XCHAL_HAVE_TLBS) */
418
419#define XCHAL_RESET_VECTOR_VADDR 0xFE000020
420#define XCHAL_RESET_VECTOR_PADDR 0xFE000020
421#define XCHAL_USER_VECTOR_VADDR 0xD0000220
422#define XCHAL_PROGRAMEXC_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR /* for backward compatibility */
423#define XCHAL_USEREXC_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR /* for backward compatibility */
424#define XCHAL_USER_VECTOR_PADDR 0x00000220
425#define XCHAL_PROGRAMEXC_VECTOR_PADDR XCHAL_USER_VECTOR_PADDR /* for backward compatibility */
426#define XCHAL_USEREXC_VECTOR_PADDR XCHAL_USER_VECTOR_PADDR /* for backward compatibility */
427#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000200
428#define XCHAL_STACKEDEXC_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR /* for backward compatibility */
429#define XCHAL_KERNELEXC_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR /* for backward compatibility */
430#define XCHAL_KERNEL_VECTOR_PADDR 0x00000200
431#define XCHAL_STACKEDEXC_VECTOR_PADDR XCHAL_KERNEL_VECTOR_PADDR /* for backward compatibility */
432#define XCHAL_KERNELEXC_VECTOR_PADDR XCHAL_KERNEL_VECTOR_PADDR /* for backward compatibility */
433#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD0000290
434#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x00000290
435#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
436#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
437#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000240
438#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000240
439#define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD0000250
440#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x00000250
441#define XCHAL_INTLEVEL4_VECTOR_VADDR 0xFE000520
442#define XCHAL_INTLEVEL4_VECTOR_PADDR 0xFE000520
443#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
444#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL4_VECTOR_PADDR
445
446/* Indexing macros: */
447#define _XCHAL_INTLEVEL_VECTOR_VADDR(n) XCHAL_INTLEVEL ## n ## _VECTOR_VADDR
448#define XCHAL_INTLEVEL_VECTOR_VADDR(n) _XCHAL_INTLEVEL_VECTOR_VADDR(n) /* n = 0 .. 15 */
449
450/*
451 * General Exception Causes
452 * (values of EXCCAUSE special register set by general exceptions,
453 * which vector to the user, kernel, or double-exception vectors):
454 */
455#define XCHAL_EXCCAUSE_ILLEGAL_INSTRUCTION 0 /* Illegal Instruction (IllegalInstruction) */
456#define XCHAL_EXCCAUSE_SYSTEM_CALL 1 /* System Call (SystemCall) */
457#define XCHAL_EXCCAUSE_INSTRUCTION_FETCH_ERROR 2 /* Instruction Fetch Error (InstructionFetchError) */
458#define XCHAL_EXCCAUSE_LOAD_STORE_ERROR 3 /* Load Store Error (LoadStoreError) */
459#define XCHAL_EXCCAUSE_LEVEL1_INTERRUPT 4 /* Level 1 Interrupt (Level1Interrupt) */
460#define XCHAL_EXCCAUSE_ALLOCA 5 /* Stack Extension Assist (Alloca) */
461#define XCHAL_EXCCAUSE_INTEGER_DIVIDE_BY_ZERO 6 /* Integer Divide by Zero (IntegerDivideByZero) */
462#define XCHAL_EXCCAUSE_SPECULATION 7 /* Speculation (Speculation) */
463#define XCHAL_EXCCAUSE_PRIVILEGED 8 /* Privileged Instruction (Privileged) */
464#define XCHAL_EXCCAUSE_UNALIGNED 9 /* Unaligned Load Store (Unaligned) */
465#define XCHAL_EXCCAUSE_ITLB_MISS 16 /* ITlb Miss Exception (ITlbMiss) */
466#define XCHAL_EXCCAUSE_ITLB_MULTIHIT 17 /* ITlb Mutltihit Exception (ITlbMultihit) */
467#define XCHAL_EXCCAUSE_ITLB_PRIVILEGE 18 /* ITlb Privilege Exception (ITlbPrivilege) */
468#define XCHAL_EXCCAUSE_ITLB_SIZE_RESTRICTION 19 /* ITlb Size Restriction Exception (ITlbSizeRestriction) */
469#define XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE 20 /* Fetch Cache Attribute Exception (FetchCacheAttribute) */
470#define XCHAL_EXCCAUSE_DTLB_MISS 24 /* DTlb Miss Exception (DTlbMiss) */
471#define XCHAL_EXCCAUSE_DTLB_MULTIHIT 25 /* DTlb Multihit Exception (DTlbMultihit) */
472#define XCHAL_EXCCAUSE_DTLB_PRIVILEGE 26 /* DTlb Privilege Exception (DTlbPrivilege) */
473#define XCHAL_EXCCAUSE_DTLB_SIZE_RESTRICTION 27 /* DTlb Size Restriction Exception (DTlbSizeRestriction) */
474#define XCHAL_EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28 /* Load Cache Attribute Exception (LoadCacheAttribute) */
475#define XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE 29 /* Store Cache Attribute Exception (StoreCacheAttribute) */
476#define XCHAL_EXCCAUSE_FLOATING_POINT 40 /* Floating Point Exception (FloatingPoint) */
477
478
479
480/*----------------------------------------------------------------------
481 TIMERS
482 ----------------------------------------------------------------------*/
483
484#define XCHAL_HAVE_CCOUNT 1 /* 1 if have CCOUNT, 0 otherwise */
485/*#define XCHAL_HAVE_TIMERS XCHAL_HAVE_CCOUNT*/
486#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
487
488
489
490/*----------------------------------------------------------------------
491 DEBUG
492 ----------------------------------------------------------------------*/
493
494#define XCHAL_HAVE_DEBUG 1 /* 1 if debug option configured, 0 otherwise */
495#define XCHAL_HAVE_OCD 1 /* 1 if OnChipDebug option configured, 0 otherwise */
496#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
497#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
498#define XCHAL_DEBUGLEVEL 4 /* debug interrupt level */
499/*DebugExternalInterrupt 0 0|1*/
500/*DebugUseDIRArray 0 0|1*/
501
502
503
504
505/*----------------------------------------------------------------------
506 COPROCESSORS and EXTRA STATE
507 ----------------------------------------------------------------------*/
508
509#define XCHAL_HAVE_CP 0 /* 1 if coprocessor option configured (CPENABLE present) */
510#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one (per cfg) */
511
512#include <xtensa/config/tie.h>
513
514
515
516
517/*----------------------------------------------------------------------
518 INTERNAL I/D RAM/ROMs and XLMI
519 ----------------------------------------------------------------------*/
520
521#define XCHAL_NUM_INSTROM 0 /* number of core instruction ROMs configured */
522#define XCHAL_NUM_INSTRAM 0 /* number of core instruction RAMs configured */
523#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs configured */
524#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs configured */
525#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports configured */
526#define XCHAL_NUM_IROM XCHAL_NUM_INSTROM /* (DEPRECATED) */
527#define XCHAL_NUM_IRAM XCHAL_NUM_INSTRAM /* (DEPRECATED) */
528#define XCHAL_NUM_DROM XCHAL_NUM_DATAROM /* (DEPRECATED) */
529#define XCHAL_NUM_DRAM XCHAL_NUM_DATARAM /* (DEPRECATED) */
530
531
532
533/*----------------------------------------------------------------------
534 CACHE
535 ----------------------------------------------------------------------*/
536
537/* Size of the cache lines in log2(bytes): */
538#define XCHAL_ICACHE_LINEWIDTH 4
539#define XCHAL_DCACHE_LINEWIDTH 4
540/* Size of the cache lines in bytes: */
541#define XCHAL_ICACHE_LINESIZE 16
542#define XCHAL_DCACHE_LINESIZE 16
543/* Max for both I-cache and D-cache (used for general alignment): */
544#define XCHAL_CACHE_LINEWIDTH_MAX 4
545#define XCHAL_CACHE_LINESIZE_MAX 16
546
547/* Number of cache sets in log2(lines per way): */
548#define XCHAL_ICACHE_SETWIDTH 8
549#define XCHAL_DCACHE_SETWIDTH 8
550/* Max for both I-cache and D-cache (used for general cache-coherency page alignment): */
551#define XCHAL_CACHE_SETWIDTH_MAX 8
552#define XCHAL_CACHE_SETSIZE_MAX 256
553
554/* Cache set associativity (number of ways): */
555#define XCHAL_ICACHE_WAYS 2
556#define XCHAL_DCACHE_WAYS 2
557
558/* Size of the caches in bytes (ways * 2^(linewidth + setwidth)): */
559#define XCHAL_ICACHE_SIZE 8192
560#define XCHAL_DCACHE_SIZE 8192
561
562/* Cache features: */
563#define XCHAL_DCACHE_IS_WRITEBACK 0
564/* Whether cache locking feature is available: */
565#define XCHAL_ICACHE_LINE_LOCKABLE 0
566#define XCHAL_DCACHE_LINE_LOCKABLE 0
567
568/* Number of (encoded) cache attribute bits: */
569#define XCHAL_CA_BITS 4 /* number of bits needed to hold cache attribute encoding */
570/* (The number of access mode bits (decoded cache attribute bits) is defined by the architecture; see xtensa/hal.h?) */
571
572
573/* Cache Attribute encodings -- lists of access modes for each cache attribute: */
574#define XCHAL_FCA_LIST XTHAL_FAM_EXCEPTION XCHAL_SEP \
575 XTHAL_FAM_BYPASS XCHAL_SEP \
576 XTHAL_FAM_EXCEPTION XCHAL_SEP \
577 XTHAL_FAM_BYPASS XCHAL_SEP \
578 XTHAL_FAM_EXCEPTION XCHAL_SEP \
579 XTHAL_FAM_CACHED XCHAL_SEP \
580 XTHAL_FAM_EXCEPTION XCHAL_SEP \
581 XTHAL_FAM_CACHED XCHAL_SEP \
582 XTHAL_FAM_EXCEPTION XCHAL_SEP \
583 XTHAL_FAM_CACHED XCHAL_SEP \
584 XTHAL_FAM_EXCEPTION XCHAL_SEP \
585 XTHAL_FAM_CACHED XCHAL_SEP \
586 XTHAL_FAM_EXCEPTION XCHAL_SEP \
587 XTHAL_FAM_EXCEPTION XCHAL_SEP \
588 XTHAL_FAM_EXCEPTION XCHAL_SEP \
589 XTHAL_FAM_EXCEPTION
590#define XCHAL_LCA_LIST XTHAL_LAM_EXCEPTION XCHAL_SEP \
591 XTHAL_LAM_BYPASSG XCHAL_SEP \
592 XTHAL_LAM_EXCEPTION XCHAL_SEP \
593 XTHAL_LAM_BYPASSG XCHAL_SEP \
594 XTHAL_LAM_EXCEPTION XCHAL_SEP \
595 XTHAL_LAM_CACHED XCHAL_SEP \
596 XTHAL_LAM_EXCEPTION XCHAL_SEP \
597 XTHAL_LAM_CACHED XCHAL_SEP \
598 XTHAL_LAM_EXCEPTION XCHAL_SEP \
599 XTHAL_LAM_NACACHED XCHAL_SEP \
600 XTHAL_LAM_EXCEPTION XCHAL_SEP \
601 XTHAL_LAM_NACACHED XCHAL_SEP \
602 XTHAL_LAM_EXCEPTION XCHAL_SEP \
603 XTHAL_LAM_ISOLATE XCHAL_SEP \
604 XTHAL_LAM_EXCEPTION XCHAL_SEP \
605 XTHAL_LAM_CACHED
606#define XCHAL_SCA_LIST XTHAL_SAM_EXCEPTION XCHAL_SEP \
607 XTHAL_SAM_EXCEPTION XCHAL_SEP \
608 XTHAL_SAM_EXCEPTION XCHAL_SEP \
609 XTHAL_SAM_BYPASS XCHAL_SEP \
610 XTHAL_SAM_EXCEPTION XCHAL_SEP \
611 XTHAL_SAM_EXCEPTION XCHAL_SEP \
612 XTHAL_SAM_EXCEPTION XCHAL_SEP \
613 XTHAL_SAM_WRITETHRU XCHAL_SEP \
614 XTHAL_SAM_EXCEPTION XCHAL_SEP \
615 XTHAL_SAM_EXCEPTION XCHAL_SEP \
616 XTHAL_SAM_EXCEPTION XCHAL_SEP \
617 XTHAL_SAM_WRITETHRU XCHAL_SEP \
618 XTHAL_SAM_EXCEPTION XCHAL_SEP \
619 XTHAL_SAM_ISOLATE XCHAL_SEP \
620 XTHAL_SAM_EXCEPTION XCHAL_SEP \
621 XTHAL_SAM_WRITETHRU
622
623/* Test:
624 read/only: 0 + 1 + 2 + 4 + 5 + 6 + 8 + 9 + 10 + 12 + 14
625 read/only: 0 + 1 + 2 + 4 + 5 + 6 + 8 + 9 + 10 + 12 + 14
626 all: 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15
627 fault: 0 + 2 + 4 + 6 + 8 + 10 + 12 + 14
628 r/w/x cached:
629 r/w/x dcached:
630 I-bypass: 1 + 3
631
632 load guard bit set: 1 + 3
633 load guard bit clr: 0 + 2 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15
634 hit-cache r/w/x: 7 + 11
635
636 fams: 5
637 fams: 0 / 6 / 18 / 1 / 2
638 fams: Bypass / Isolate / Cached / Exception / NACached
639
640 MMU okay: yes
641*/
642
643
644/*----------------------------------------------------------------------
645 MMU
646 ----------------------------------------------------------------------*/
647
648/*
649 * General notes on MMU parameters.
650 *
651 * Terminology:
652 * ASID = address-space ID (acts as an "extension" of virtual addresses)
653 * VPN = virtual page number
654 * PPN = physical page number
655 * CA = encoded cache attribute (access modes)
656 * TLB = translation look-aside buffer (term is stretched somewhat here)
657 * I = instruction (fetch accesses)
658 * D = data (load and store accesses)
659 * way = each TLB (ITLB and DTLB) consists of a number of "ways"
660 * that simultaneously match the virtual address of an access;
661 * a TLB successfully translates a virtual address if exactly
662 * one way matches the vaddr; if none match, it is a miss;
663 * if multiple match, one gets a "multihit" exception;
664 * each way can be independently configured in terms of number of
665 * entries, page sizes, which fields are writable or constant, etc.
666 * set = group of contiguous ways with exactly identical parameters
667 * ARF = auto-refill; hardware services a 1st-level miss by loading a PTE
668 * from the page table and storing it in one of the auto-refill ways;
669 * if this PTE load also misses, a miss exception is posted for s/w.
670 * min-wired = a "min-wired" way can be used to map a single (minimum-sized)
671 * page arbitrarily under program control; it has a single entry,
672 * is non-auto-refill (some other way(s) must be auto-refill),
673 * all its fields (VPN, PPN, ASID, CA) are all writable, and it
674 * supports the XCHAL_MMU_MIN_PTE_PAGE_SIZE page size (a current
675 * restriction is that this be the only page size it supports).
676 *
677 * TLB way entries are virtually indexed.
678 * TLB ways that support multiple page sizes:
679 * - must have all writable VPN and PPN fields;
680 * - can only use one page size at any given time (eg. setup at startup),
681 * selected by the respective ITLBCFG or DTLBCFG special register,
682 * whose bits n*4+3 .. n*4 index the list of page sizes for way n
683 * (XCHAL_xTLB_SETm_PAGESZ_LOG2_LIST for set m corresponding to way n);
684 * this list may be sparse for auto-refill ways because auto-refill
685 * ways have independent lists of supported page sizes sharing a
686 * common encoding with PTE entries; the encoding is the index into
687 * this list; unsupported sizes for a given way are zero in the list;
688 * selecting unsupported sizes results in undefined hardware behaviour;
689 * - is only possible for ways 0 thru 7 (due to ITLBCFG/DTLBCFG definition).
690 */
691
692#define XCHAL_HAVE_CACHEATTR 0 /* 1 if CACHEATTR register present, 0 if TLBs present instead */
693#define XCHAL_HAVE_TLBS 1 /* 1 if TLBs present, 0 if CACHEATTR present instead */
694#define XCHAL_HAVE_MMU XCHAL_HAVE_TLBS /* (DEPRECATED; use XCHAL_HAVE_TLBS instead; will be removed in future release) */
695#define XCHAL_HAVE_SPANNING_WAY 0 /* 1 if single way maps entire virtual address space in I+D */
696#define XCHAL_HAVE_IDENTITY_MAP 0 /* 1 if virtual addr == physical addr always, 0 otherwise */
697#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* 1 if have MMU that mimics a CACHEATTR config (CaMMU) */
698#define XCHAL_HAVE_XLT_CACHEATTR 0 /* 1 if have MMU that mimics a CACHEATTR config, but with translation (CaXltMMU) */
699
700#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs (address space IDs) */
701#define XCHAL_MMU_ASID_INVALID 0 /* ASID value indicating invalid address space */
702#define XCHAL_MMU_ASID_KERNEL 1 /* ASID value indicating kernel (ring 0) address space */
703#define XCHAL_MMU_RINGS 4 /* number of rings supported (1..4) */
704#define XCHAL_MMU_RING_BITS 2 /* number of bits needed to hold ring number */
705#define XCHAL_MMU_SR_BITS 0 /* number of size-restriction bits supported */
706#define XCHAL_MMU_CA_BITS 4 /* number of bits needed to hold cache attribute encoding */
707#define XCHAL_MMU_MAX_PTE_PAGE_SIZE 12 /* max page size in a PTE structure (log2) */
708#define XCHAL_MMU_MIN_PTE_PAGE_SIZE 12 /* min page size in a PTE structure (log2) */
709
710
711/*** Instruction TLB: ***/
712
713#define XCHAL_ITLB_WAY_BITS 3 /* number of bits holding the ways */
714#define XCHAL_ITLB_WAYS 7 /* number of ways (n-way set-associative TLB) */
715#define XCHAL_ITLB_ARF_WAYS 4 /* number of auto-refill ways */
716#define XCHAL_ITLB_SETS 4 /* number of sets (groups of ways with identical settings) */
717
718/* Way set to which each way belongs: */
719#define XCHAL_ITLB_WAY0_SET 0
720#define XCHAL_ITLB_WAY1_SET 0
721#define XCHAL_ITLB_WAY2_SET 0
722#define XCHAL_ITLB_WAY3_SET 0
723#define XCHAL_ITLB_WAY4_SET 1
724#define XCHAL_ITLB_WAY5_SET 2
725#define XCHAL_ITLB_WAY6_SET 3
726
727/* Ways sets that are used by hardware auto-refill (ARF): */
728#define XCHAL_ITLB_ARF_SETS 1 /* number of auto-refill sets */
729#define XCHAL_ITLB_ARF_SET0 0 /* index of n'th auto-refill set */
730
731/* Way sets that are "min-wired" (see terminology comment above): */
732#define XCHAL_ITLB_MINWIRED_SETS 0 /* number of "min-wired" sets */
733
734
735/* ITLB way set 0 (group of ways 0 thru 3): */
736#define XCHAL_ITLB_SET0_WAY 0 /* index of first way in this way set */
737#define XCHAL_ITLB_SET0_WAYS 4 /* number of (contiguous) ways in this way set */
738#define XCHAL_ITLB_SET0_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
739#define XCHAL_ITLB_SET0_ENTRIES 4 /* number of entries in this way (always a power of 2) */
740#define XCHAL_ITLB_SET0_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
741#define XCHAL_ITLB_SET0_PAGESIZES 1 /* number of supported page sizes in this way */
742#define XCHAL_ITLB_SET0_PAGESZ_BITS 0 /* number of bits to encode the page size */
743#define XCHAL_ITLB_SET0_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
744#define XCHAL_ITLB_SET0_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
745#define XCHAL_ITLB_SET0_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
746 2^PAGESZ_BITS entries in list, unsupported entries are zero */
747#define XCHAL_ITLB_SET0_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
748#define XCHAL_ITLB_SET0_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
749#define XCHAL_ITLB_SET0_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
750#define XCHAL_ITLB_SET0_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
751#define XCHAL_ITLB_SET0_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
752#define XCHAL_ITLB_SET0_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
753#define XCHAL_ITLB_SET0_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
754#define XCHAL_ITLB_SET0_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
755
756/* ITLB way set 1 (group of ways 4 thru 4): */
757#define XCHAL_ITLB_SET1_WAY 4 /* index of first way in this way set */
758#define XCHAL_ITLB_SET1_WAYS 1 /* number of (contiguous) ways in this way set */
759#define XCHAL_ITLB_SET1_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
760#define XCHAL_ITLB_SET1_ENTRIES 4 /* number of entries in this way (always a power of 2) */
761#define XCHAL_ITLB_SET1_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
762#define XCHAL_ITLB_SET1_PAGESIZES 4 /* number of supported page sizes in this way */
763#define XCHAL_ITLB_SET1_PAGESZ_BITS 2 /* number of bits to encode the page size */
764#define XCHAL_ITLB_SET1_PAGESZ_LOG2_MIN 20 /* log2(minimum supported page size) */
765#define XCHAL_ITLB_SET1_PAGESZ_LOG2_MAX 26 /* log2(maximum supported page size) */
766#define XCHAL_ITLB_SET1_PAGESZ_LOG2_LIST 20 XCHAL_SEP 22 XCHAL_SEP 24 XCHAL_SEP 26 /* list of log2(page size)s, separated by XCHAL_SEP;
767 2^PAGESZ_BITS entries in list, unsupported entries are zero */
768#define XCHAL_ITLB_SET1_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
769#define XCHAL_ITLB_SET1_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
770#define XCHAL_ITLB_SET1_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
771#define XCHAL_ITLB_SET1_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
772#define XCHAL_ITLB_SET1_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
773#define XCHAL_ITLB_SET1_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
774#define XCHAL_ITLB_SET1_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
775#define XCHAL_ITLB_SET1_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
776
777/* ITLB way set 2 (group of ways 5 thru 5): */
778#define XCHAL_ITLB_SET2_WAY 5 /* index of first way in this way set */
779#define XCHAL_ITLB_SET2_WAYS 1 /* number of (contiguous) ways in this way set */
780#define XCHAL_ITLB_SET2_ENTRIES_LOG2 1 /* log2(number of entries in this way) */
781#define XCHAL_ITLB_SET2_ENTRIES 2 /* number of entries in this way (always a power of 2) */
782#define XCHAL_ITLB_SET2_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
783#define XCHAL_ITLB_SET2_PAGESIZES 1 /* number of supported page sizes in this way */
784#define XCHAL_ITLB_SET2_PAGESZ_BITS 0 /* number of bits to encode the page size */
785#define XCHAL_ITLB_SET2_PAGESZ_LOG2_MIN 27 /* log2(minimum supported page size) */
786#define XCHAL_ITLB_SET2_PAGESZ_LOG2_MAX 27 /* log2(maximum supported page size) */
787#define XCHAL_ITLB_SET2_PAGESZ_LOG2_LIST 27 /* list of log2(page size)s, separated by XCHAL_SEP;
788 2^PAGESZ_BITS entries in list, unsupported entries are zero */
789#define XCHAL_ITLB_SET2_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */
790#define XCHAL_ITLB_SET2_VPN_CONSTMASK 0xF0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
791#define XCHAL_ITLB_SET2_PPN_CONSTMASK 0xF8000000 /* constant PPN bits, including entry index bits; 0 if all writable */
792#define XCHAL_ITLB_SET2_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */
793#define XCHAL_ITLB_SET2_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
794#define XCHAL_ITLB_SET2_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
795#define XCHAL_ITLB_SET2_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
796#define XCHAL_ITLB_SET2_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
797/* Constant ASID values for each entry of ITLB way set 2 (because ASID_CONSTMASK is non-zero): */
798#define XCHAL_ITLB_SET2_E0_ASID_CONST 0x01
799#define XCHAL_ITLB_SET2_E1_ASID_CONST 0x01
800/* Constant VPN values for each entry of ITLB way set 2 (because VPN_CONSTMASK is non-zero): */
801#define XCHAL_ITLB_SET2_E0_VPN_CONST 0xD0000000
802#define XCHAL_ITLB_SET2_E1_VPN_CONST 0xD8000000
803/* Constant PPN values for each entry of ITLB way set 2 (because PPN_CONSTMASK is non-zero): */
804#define XCHAL_ITLB_SET2_E0_PPN_CONST 0x00000000
805#define XCHAL_ITLB_SET2_E1_PPN_CONST 0x00000000
806/* Constant CA values for each entry of ITLB way set 2 (because CA_CONSTMASK is non-zero): */
807#define XCHAL_ITLB_SET2_E0_CA_CONST 0x07
808#define XCHAL_ITLB_SET2_E1_CA_CONST 0x03
809
810/* ITLB way set 3 (group of ways 6 thru 6): */
811#define XCHAL_ITLB_SET3_WAY 6 /* index of first way in this way set */
812#define XCHAL_ITLB_SET3_WAYS 1 /* number of (contiguous) ways in this way set */
813#define XCHAL_ITLB_SET3_ENTRIES_LOG2 1 /* log2(number of entries in this way) */
814#define XCHAL_ITLB_SET3_ENTRIES 2 /* number of entries in this way (always a power of 2) */
815#define XCHAL_ITLB_SET3_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
816#define XCHAL_ITLB_SET3_PAGESIZES 1 /* number of supported page sizes in this way */
817#define XCHAL_ITLB_SET3_PAGESZ_BITS 0 /* number of bits to encode the page size */
818#define XCHAL_ITLB_SET3_PAGESZ_LOG2_MIN 28 /* log2(minimum supported page size) */
819#define XCHAL_ITLB_SET3_PAGESZ_LOG2_MAX 28 /* log2(maximum supported page size) */
820#define XCHAL_ITLB_SET3_PAGESZ_LOG2_LIST 28 /* list of log2(page size)s, separated by XCHAL_SEP;
821 2^PAGESZ_BITS entries in list, unsupported entries are zero */
822#define XCHAL_ITLB_SET3_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */
823#define XCHAL_ITLB_SET3_VPN_CONSTMASK 0xE0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
824#define XCHAL_ITLB_SET3_PPN_CONSTMASK 0xF0000000 /* constant PPN bits, including entry index bits; 0 if all writable */
825#define XCHAL_ITLB_SET3_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */
826#define XCHAL_ITLB_SET3_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
827#define XCHAL_ITLB_SET3_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
828#define XCHAL_ITLB_SET3_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
829#define XCHAL_ITLB_SET3_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
830/* Constant ASID values for each entry of ITLB way set 3 (because ASID_CONSTMASK is non-zero): */
831#define XCHAL_ITLB_SET3_E0_ASID_CONST 0x01
832#define XCHAL_ITLB_SET3_E1_ASID_CONST 0x01
833/* Constant VPN values for each entry of ITLB way set 3 (because VPN_CONSTMASK is non-zero): */
834#define XCHAL_ITLB_SET3_E0_VPN_CONST 0xE0000000
835#define XCHAL_ITLB_SET3_E1_VPN_CONST 0xF0000000
836/* Constant PPN values for each entry of ITLB way set 3 (because PPN_CONSTMASK is non-zero): */
837#define XCHAL_ITLB_SET3_E0_PPN_CONST 0xF0000000
838#define XCHAL_ITLB_SET3_E1_PPN_CONST 0xF0000000
839/* Constant CA values for each entry of ITLB way set 3 (because CA_CONSTMASK is non-zero): */
840#define XCHAL_ITLB_SET3_E0_CA_CONST 0x07
841#define XCHAL_ITLB_SET3_E1_CA_CONST 0x03
842
843/* Indexing macros: */
844#define _XCHAL_ITLB_SET(n,_what) XCHAL_ITLB_SET ## n ## _what
845#define XCHAL_ITLB_SET(n,what) _XCHAL_ITLB_SET(n, _ ## what )
846#define _XCHAL_ITLB_SET_E(n,i,_what) XCHAL_ITLB_SET ## n ## _E ## i ## _what
847#define XCHAL_ITLB_SET_E(n,i,what) _XCHAL_ITLB_SET_E(n,i, _ ## what )
848/*
849 * Example use: XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES)
850 * to get the value of XCHAL_ITLB_SET<n>_ENTRIES where <n> is the first auto-refill set.
851 */
852
853
854/*** Data TLB: ***/
855
856#define XCHAL_DTLB_WAY_BITS 4 /* number of bits holding the ways */
857#define XCHAL_DTLB_WAYS 10 /* number of ways (n-way set-associative TLB) */
858#define XCHAL_DTLB_ARF_WAYS 4 /* number of auto-refill ways */
859#define XCHAL_DTLB_SETS 5 /* number of sets (groups of ways with identical settings) */
860
861/* Way set to which each way belongs: */
862#define XCHAL_DTLB_WAY0_SET 0
863#define XCHAL_DTLB_WAY1_SET 0
864#define XCHAL_DTLB_WAY2_SET 0
865#define XCHAL_DTLB_WAY3_SET 0
866#define XCHAL_DTLB_WAY4_SET 1
867#define XCHAL_DTLB_WAY5_SET 2
868#define XCHAL_DTLB_WAY6_SET 3
869#define XCHAL_DTLB_WAY7_SET 4
870#define XCHAL_DTLB_WAY8_SET 4
871#define XCHAL_DTLB_WAY9_SET 4
872
873/* Ways sets that are used by hardware auto-refill (ARF): */
874#define XCHAL_DTLB_ARF_SETS 1 /* number of auto-refill sets */
875#define XCHAL_DTLB_ARF_SET0 0 /* index of n'th auto-refill set */
876
877/* Way sets that are "min-wired" (see terminology comment above): */
878#define XCHAL_DTLB_MINWIRED_SETS 1 /* number of "min-wired" sets */
879#define XCHAL_DTLB_MINWIRED_SET0 4 /* index of n'th "min-wired" set */
880
881
882/* DTLB way set 0 (group of ways 0 thru 3): */
883#define XCHAL_DTLB_SET0_WAY 0 /* index of first way in this way set */
884#define XCHAL_DTLB_SET0_WAYS 4 /* number of (contiguous) ways in this way set */
885#define XCHAL_DTLB_SET0_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
886#define XCHAL_DTLB_SET0_ENTRIES 4 /* number of entries in this way (always a power of 2) */
887#define XCHAL_DTLB_SET0_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
888#define XCHAL_DTLB_SET0_PAGESIZES 1 /* number of supported page sizes in this way */
889#define XCHAL_DTLB_SET0_PAGESZ_BITS 0 /* number of bits to encode the page size */
890#define XCHAL_DTLB_SET0_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
891#define XCHAL_DTLB_SET0_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
892#define XCHAL_DTLB_SET0_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
893 2^PAGESZ_BITS entries in list, unsupported entries are zero */
894#define XCHAL_DTLB_SET0_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
895#define XCHAL_DTLB_SET0_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
896#define XCHAL_DTLB_SET0_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
897#define XCHAL_DTLB_SET0_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
898#define XCHAL_DTLB_SET0_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
899#define XCHAL_DTLB_SET0_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
900#define XCHAL_DTLB_SET0_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
901#define XCHAL_DTLB_SET0_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
902
903/* DTLB way set 1 (group of ways 4 thru 4): */
904#define XCHAL_DTLB_SET1_WAY 4 /* index of first way in this way set */
905#define XCHAL_DTLB_SET1_WAYS 1 /* number of (contiguous) ways in this way set */
906#define XCHAL_DTLB_SET1_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
907#define XCHAL_DTLB_SET1_ENTRIES 4 /* number of entries in this way (always a power of 2) */
908#define XCHAL_DTLB_SET1_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
909#define XCHAL_DTLB_SET1_PAGESIZES 4 /* number of supported page sizes in this way */
910#define XCHAL_DTLB_SET1_PAGESZ_BITS 2 /* number of bits to encode the page size */
911#define XCHAL_DTLB_SET1_PAGESZ_LOG2_MIN 20 /* log2(minimum supported page size) */
912#define XCHAL_DTLB_SET1_PAGESZ_LOG2_MAX 26 /* log2(maximum supported page size) */
913#define XCHAL_DTLB_SET1_PAGESZ_LOG2_LIST 20 XCHAL_SEP 22 XCHAL_SEP 24 XCHAL_SEP 26 /* list of log2(page size)s, separated by XCHAL_SEP;
914 2^PAGESZ_BITS entries in list, unsupported entries are zero */
915#define XCHAL_DTLB_SET1_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
916#define XCHAL_DTLB_SET1_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
917#define XCHAL_DTLB_SET1_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
918#define XCHAL_DTLB_SET1_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
919#define XCHAL_DTLB_SET1_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
920#define XCHAL_DTLB_SET1_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
921#define XCHAL_DTLB_SET1_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
922#define XCHAL_DTLB_SET1_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
923
924/* DTLB way set 2 (group of ways 5 thru 5): */
925#define XCHAL_DTLB_SET2_WAY 5 /* index of first way in this way set */
926#define XCHAL_DTLB_SET2_WAYS 1 /* number of (contiguous) ways in this way set */
927#define XCHAL_DTLB_SET2_ENTRIES_LOG2 1 /* log2(number of entries in this way) */
928#define XCHAL_DTLB_SET2_ENTRIES 2 /* number of entries in this way (always a power of 2) */
929#define XCHAL_DTLB_SET2_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
930#define XCHAL_DTLB_SET2_PAGESIZES 1 /* number of supported page sizes in this way */
931#define XCHAL_DTLB_SET2_PAGESZ_BITS 0 /* number of bits to encode the page size */
932#define XCHAL_DTLB_SET2_PAGESZ_LOG2_MIN 27 /* log2(minimum supported page size) */
933#define XCHAL_DTLB_SET2_PAGESZ_LOG2_MAX 27 /* log2(maximum supported page size) */
934#define XCHAL_DTLB_SET2_PAGESZ_LOG2_LIST 27 /* list of log2(page size)s, separated by XCHAL_SEP;
935 2^PAGESZ_BITS entries in list, unsupported entries are zero */
936#define XCHAL_DTLB_SET2_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */
937#define XCHAL_DTLB_SET2_VPN_CONSTMASK 0xF0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
938#define XCHAL_DTLB_SET2_PPN_CONSTMASK 0xF8000000 /* constant PPN bits, including entry index bits; 0 if all writable */
939#define XCHAL_DTLB_SET2_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */
940#define XCHAL_DTLB_SET2_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
941#define XCHAL_DTLB_SET2_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
942#define XCHAL_DTLB_SET2_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
943#define XCHAL_DTLB_SET2_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
944/* Constant ASID values for each entry of DTLB way set 2 (because ASID_CONSTMASK is non-zero): */
945#define XCHAL_DTLB_SET2_E0_ASID_CONST 0x01
946#define XCHAL_DTLB_SET2_E1_ASID_CONST 0x01
947/* Constant VPN values for each entry of DTLB way set 2 (because VPN_CONSTMASK is non-zero): */
948#define XCHAL_DTLB_SET2_E0_VPN_CONST 0xD0000000
949#define XCHAL_DTLB_SET2_E1_VPN_CONST 0xD8000000
950/* Constant PPN values for each entry of DTLB way set 2 (because PPN_CONSTMASK is non-zero): */
951#define XCHAL_DTLB_SET2_E0_PPN_CONST 0x00000000
952#define XCHAL_DTLB_SET2_E1_PPN_CONST 0x00000000
953/* Constant CA values for each entry of DTLB way set 2 (because CA_CONSTMASK is non-zero): */
954#define XCHAL_DTLB_SET2_E0_CA_CONST 0x07
955#define XCHAL_DTLB_SET2_E1_CA_CONST 0x03
956
957/* DTLB way set 3 (group of ways 6 thru 6): */
958#define XCHAL_DTLB_SET3_WAY 6 /* index of first way in this way set */
959#define XCHAL_DTLB_SET3_WAYS 1 /* number of (contiguous) ways in this way set */
960#define XCHAL_DTLB_SET3_ENTRIES_LOG2 1 /* log2(number of entries in this way) */
961#define XCHAL_DTLB_SET3_ENTRIES 2 /* number of entries in this way (always a power of 2) */
962#define XCHAL_DTLB_SET3_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
963#define XCHAL_DTLB_SET3_PAGESIZES 1 /* number of supported page sizes in this way */
964#define XCHAL_DTLB_SET3_PAGESZ_BITS 0 /* number of bits to encode the page size */
965#define XCHAL_DTLB_SET3_PAGESZ_LOG2_MIN 28 /* log2(minimum supported page size) */
966#define XCHAL_DTLB_SET3_PAGESZ_LOG2_MAX 28 /* log2(maximum supported page size) */
967#define XCHAL_DTLB_SET3_PAGESZ_LOG2_LIST 28 /* list of log2(page size)s, separated by XCHAL_SEP;
968 2^PAGESZ_BITS entries in list, unsupported entries are zero */
969#define XCHAL_DTLB_SET3_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */
970#define XCHAL_DTLB_SET3_VPN_CONSTMASK 0xE0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
971#define XCHAL_DTLB_SET3_PPN_CONSTMASK 0xF0000000 /* constant PPN bits, including entry index bits; 0 if all writable */
972#define XCHAL_DTLB_SET3_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */
973#define XCHAL_DTLB_SET3_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
974#define XCHAL_DTLB_SET3_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
975#define XCHAL_DTLB_SET3_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
976#define XCHAL_DTLB_SET3_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
977/* Constant ASID values for each entry of DTLB way set 3 (because ASID_CONSTMASK is non-zero): */
978#define XCHAL_DTLB_SET3_E0_ASID_CONST 0x01
979#define XCHAL_DTLB_SET3_E1_ASID_CONST 0x01
980/* Constant VPN values for each entry of DTLB way set 3 (because VPN_CONSTMASK is non-zero): */
981#define XCHAL_DTLB_SET3_E0_VPN_CONST 0xE0000000
982#define XCHAL_DTLB_SET3_E1_VPN_CONST 0xF0000000
983/* Constant PPN values for each entry of DTLB way set 3 (because PPN_CONSTMASK is non-zero): */
984#define XCHAL_DTLB_SET3_E0_PPN_CONST 0xF0000000
985#define XCHAL_DTLB_SET3_E1_PPN_CONST 0xF0000000
986/* Constant CA values for each entry of DTLB way set 3 (because CA_CONSTMASK is non-zero): */
987#define XCHAL_DTLB_SET3_E0_CA_CONST 0x07
988#define XCHAL_DTLB_SET3_E1_CA_CONST 0x03
989
990/* DTLB way set 4 (group of ways 7 thru 9): */
991#define XCHAL_DTLB_SET4_WAY 7 /* index of first way in this way set */
992#define XCHAL_DTLB_SET4_WAYS 3 /* number of (contiguous) ways in this way set */
993#define XCHAL_DTLB_SET4_ENTRIES_LOG2 0 /* log2(number of entries in this way) */
994#define XCHAL_DTLB_SET4_ENTRIES 1 /* number of entries in this way (always a power of 2) */
995#define XCHAL_DTLB_SET4_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
996#define XCHAL_DTLB_SET4_PAGESIZES 1 /* number of supported page sizes in this way */
997#define XCHAL_DTLB_SET4_PAGESZ_BITS 0 /* number of bits to encode the page size */
998#define XCHAL_DTLB_SET4_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
999#define XCHAL_DTLB_SET4_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
1000#define XCHAL_DTLB_SET4_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
1001 2^PAGESZ_BITS entries in list, unsupported entries are zero */
1002#define XCHAL_DTLB_SET4_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
1003#define XCHAL_DTLB_SET4_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
1004#define XCHAL_DTLB_SET4_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
1005#define XCHAL_DTLB_SET4_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
1006#define XCHAL_DTLB_SET4_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
1007#define XCHAL_DTLB_SET4_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
1008#define XCHAL_DTLB_SET4_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
1009#define XCHAL_DTLB_SET4_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
1010
1011/* Indexing macros: */
1012#define _XCHAL_DTLB_SET(n,_what) XCHAL_DTLB_SET ## n ## _what
1013#define XCHAL_DTLB_SET(n,what) _XCHAL_DTLB_SET(n, _ ## what )
1014#define _XCHAL_DTLB_SET_E(n,i,_what) XCHAL_DTLB_SET ## n ## _E ## i ## _what
1015#define XCHAL_DTLB_SET_E(n,i,what) _XCHAL_DTLB_SET_E(n,i, _ ## what )
1016/*
1017 * Example use: XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,ENTRIES)
1018 * to get the value of XCHAL_DTLB_SET<n>_ENTRIES where <n> is the first auto-refill set.
1019 */
1020
1021
1022/*
1023 * Determine whether we have a full MMU (with Page Table and Protection)
1024 * usable for an MMU-based OS:
1025 */
1026#if XCHAL_HAVE_TLBS && !XCHAL_HAVE_SPANNING_WAY && XCHAL_ITLB_ARF_WAYS > 0 && XCHAL_DTLB_ARF_WAYS > 0 && XCHAL_MMU_RINGS >= 2
1027# define XCHAL_HAVE_PTP_MMU 1 /* have full MMU (with page table [autorefill] and protection) */
1028#else
1029# define XCHAL_HAVE_PTP_MMU 0 /* don't have full MMU */
1030#endif
1031
1032/*
1033 * For full MMUs, report kernel RAM segment and kernel I/O segment static page mappings:
1034 */
1035#if XCHAL_HAVE_PTP_MMU
1036#define XCHAL_KSEG_CACHED_VADDR 0xD0000000 /* virt.addr of kernel RAM cached static map */
1037#define XCHAL_KSEG_CACHED_PADDR 0x00000000 /* phys.addr of kseg_cached */
1038#define XCHAL_KSEG_CACHED_SIZE 0x08000000 /* size in bytes of kseg_cached (assumed power of 2!!!) */
1039#define XCHAL_KSEG_BYPASS_VADDR 0xD8000000 /* virt.addr of kernel RAM bypass (uncached) static map */
1040#define XCHAL_KSEG_BYPASS_PADDR 0x00000000 /* phys.addr of kseg_bypass */
1041#define XCHAL_KSEG_BYPASS_SIZE 0x08000000 /* size in bytes of kseg_bypass (assumed power of 2!!!) */
1042
1043#define XCHAL_KIO_CACHED_VADDR 0xE0000000 /* virt.addr of kernel I/O cached static map */
1044#define XCHAL_KIO_CACHED_PADDR 0xF0000000 /* phys.addr of kio_cached */
1045#define XCHAL_KIO_CACHED_SIZE 0x10000000 /* size in bytes of kio_cached (assumed power of 2!!!) */
1046#define XCHAL_KIO_BYPASS_VADDR 0xF0000000 /* virt.addr of kernel I/O bypass (uncached) static map */
1047#define XCHAL_KIO_BYPASS_PADDR 0xF0000000 /* phys.addr of kio_bypass */
1048#define XCHAL_KIO_BYPASS_SIZE 0x10000000 /* size in bytes of kio_bypass (assumed power of 2!!!) */
1049
1050#define XCHAL_SEG_MAPPABLE_VADDR 0x00000000 /* start of largest non-static-mapped virtual addr area */
1051#define XCHAL_SEG_MAPPABLE_SIZE 0xD0000000 /* size in bytes of " */
1052/* define XCHAL_SEG_MAPPABLE2_xxx if more areas present, sorted in order of descending size. */
1053#endif
1054
1055
1056/*----------------------------------------------------------------------
1057 MISC
1058 ----------------------------------------------------------------------*/
1059
1060#define XCHAL_NUM_WRITEBUFFER_ENTRIES 4 /* number of write buffer entries */
1061
1062#define XCHAL_CORE_ID "linux_be" /* configuration's alphanumeric core identifier
1063 (CoreID) set in the Xtensa Processor Generator */
1064
1065#define XCHAL_BUILD_UNIQUE_ID 0x00003256 /* software build-unique ID (22-bit) */
1066
1067/* These definitions describe the hardware targeted by this software: */
1068#define XCHAL_HW_CONFIGID0 0xC103D1FF /* config ID reg 0 value (upper 32 of 64 bits) */
1069#define XCHAL_HW_CONFIGID1 0x00803256 /* config ID reg 1 value (lower 32 of 64 bits) */
1070#define XCHAL_CONFIGID0 XCHAL_HW_CONFIGID0 /* for backward compatibility only -- don't use! */
1071#define XCHAL_CONFIGID1 XCHAL_HW_CONFIGID1 /* for backward compatibility only -- don't use! */
1072#define XCHAL_HW_RELEASE_MAJOR 1050 /* major release of targeted hardware */
1073#define XCHAL_HW_RELEASE_MINOR 1 /* minor release of targeted hardware */
1074#define XCHAL_HW_RELEASE_NAME "T1050.1" /* full release name of targeted hardware */
1075#define XTHAL_HW_REL_T1050 1
1076#define XTHAL_HW_REL_T1050_1 1
1077#define XCHAL_HW_CONFIGID_RELIABLE 1
1078
1079
1080/*
1081 * Miscellaneous special register fields:
1082 */
1083
1084
1085/* DBREAKC (special register number 160): */
1086#define XCHAL_DBREAKC_VALIDMASK 0xC000003F /* bits of DBREAKC that are defined */
1087/* MASK field: */
1088#define XCHAL_DBREAKC_MASK_BITS 6 /* number of bits in MASK field */
1089#define XCHAL_DBREAKC_MASK_NUM 64 /* max number of possible causes (2^bits) */
1090#define XCHAL_DBREAKC_MASK_SHIFT 0 /* position of MASK bits in DBREAKC, starting from lsbit */
1091#define XCHAL_DBREAKC_MASK_MASK 0x0000003F /* mask of bits in MASK field of DBREAKC */
1092/* LOADBREAK field: */
1093#define XCHAL_DBREAKC_LOADBREAK_BITS 1 /* number of bits in LOADBREAK field */
1094#define XCHAL_DBREAKC_LOADBREAK_NUM 2 /* max number of possible causes (2^bits) */
1095#define XCHAL_DBREAKC_LOADBREAK_SHIFT 30 /* position of LOADBREAK bits in DBREAKC, starting from lsbit */
1096#define XCHAL_DBREAKC_LOADBREAK_MASK 0x40000000 /* mask of bits in LOADBREAK field of DBREAKC */
1097/* STOREBREAK field: */
1098#define XCHAL_DBREAKC_STOREBREAK_BITS 1 /* number of bits in STOREBREAK field */
1099#define XCHAL_DBREAKC_STOREBREAK_NUM 2 /* max number of possible causes (2^bits) */
1100#define XCHAL_DBREAKC_STOREBREAK_SHIFT 31 /* position of STOREBREAK bits in DBREAKC, starting from lsbit */
1101#define XCHAL_DBREAKC_STOREBREAK_MASK 0x80000000 /* mask of bits in STOREBREAK field of DBREAKC */
1102
1103/* PS (special register number 230): */
1104#define XCHAL_PS_VALIDMASK 0x00070FFF /* bits of PS that are defined */
1105/* INTLEVEL field: */
1106#define XCHAL_PS_INTLEVEL_BITS 4 /* number of bits in INTLEVEL field */
1107#define XCHAL_PS_INTLEVEL_NUM 16 /* max number of possible causes (2^bits) */
1108#define XCHAL_PS_INTLEVEL_SHIFT 0 /* position of INTLEVEL bits in PS, starting from lsbit */
1109#define XCHAL_PS_INTLEVEL_MASK 0x0000000F /* mask of bits in INTLEVEL field of PS */
1110/* EXCM field: */
1111#define XCHAL_PS_EXCM_BITS 1 /* number of bits in EXCM field */
1112#define XCHAL_PS_EXCM_NUM 2 /* max number of possible causes (2^bits) */
1113#define XCHAL_PS_EXCM_SHIFT 4 /* position of EXCM bits in PS, starting from lsbit */
1114#define XCHAL_PS_EXCM_MASK 0x00000010 /* mask of bits in EXCM field of PS */
1115/* PROGSTACK field: */
1116#define XCHAL_PS_PROGSTACK_BITS 1 /* number of bits in PROGSTACK field */
1117#define XCHAL_PS_PROGSTACK_NUM 2 /* max number of possible causes (2^bits) */
1118#define XCHAL_PS_PROGSTACK_SHIFT 5 /* position of PROGSTACK bits in PS, starting from lsbit */
1119#define XCHAL_PS_PROGSTACK_MASK 0x00000020 /* mask of bits in PROGSTACK field of PS */
1120/* RING field: */
1121#define XCHAL_PS_RING_BITS 2 /* number of bits in RING field */
1122#define XCHAL_PS_RING_NUM 4 /* max number of possible causes (2^bits) */
1123#define XCHAL_PS_RING_SHIFT 6 /* position of RING bits in PS, starting from lsbit */
1124#define XCHAL_PS_RING_MASK 0x000000C0 /* mask of bits in RING field of PS */
1125/* OWB field: */
1126#define XCHAL_PS_OWB_BITS 4 /* number of bits in OWB field */
1127#define XCHAL_PS_OWB_NUM 16 /* max number of possible causes (2^bits) */
1128#define XCHAL_PS_OWB_SHIFT 8 /* position of OWB bits in PS, starting from lsbit */
1129#define XCHAL_PS_OWB_MASK 0x00000F00 /* mask of bits in OWB field of PS */
1130/* CALLINC field: */
1131#define XCHAL_PS_CALLINC_BITS 2 /* number of bits in CALLINC field */
1132#define XCHAL_PS_CALLINC_NUM 4 /* max number of possible causes (2^bits) */
1133#define XCHAL_PS_CALLINC_SHIFT 16 /* position of CALLINC bits in PS, starting from lsbit */
1134#define XCHAL_PS_CALLINC_MASK 0x00030000 /* mask of bits in CALLINC field of PS */
1135/* WOE field: */
1136#define XCHAL_PS_WOE_BITS 1 /* number of bits in WOE field */
1137#define XCHAL_PS_WOE_NUM 2 /* max number of possible causes (2^bits) */
1138#define XCHAL_PS_WOE_SHIFT 18 /* position of WOE bits in PS, starting from lsbit */
1139#define XCHAL_PS_WOE_MASK 0x00040000 /* mask of bits in WOE field of PS */
1140
1141/* EXCCAUSE (special register number 232): */
1142#define XCHAL_EXCCAUSE_VALIDMASK 0x0000003F /* bits of EXCCAUSE that are defined */
1143/* EXCCAUSE field: */
1144#define XCHAL_EXCCAUSE_BITS 6 /* number of bits in EXCCAUSE register */
1145#define XCHAL_EXCCAUSE_NUM 64 /* max number of possible causes (2^bits) */
1146#define XCHAL_EXCCAUSE_SHIFT 0 /* position of EXCCAUSE bits in register, starting from lsbit */
1147#define XCHAL_EXCCAUSE_MASK 0x0000003F /* mask of bits in EXCCAUSE register */
1148
1149/* DEBUGCAUSE (special register number 233): */
1150#define XCHAL_DEBUGCAUSE_VALIDMASK 0x0000003F /* bits of DEBUGCAUSE that are defined */
1151/* ICOUNT field: */
1152#define XCHAL_DEBUGCAUSE_ICOUNT_BITS 1 /* number of bits in ICOUNT field */
1153#define XCHAL_DEBUGCAUSE_ICOUNT_NUM 2 /* max number of possible causes (2^bits) */
1154#define XCHAL_DEBUGCAUSE_ICOUNT_SHIFT 0 /* position of ICOUNT bits in DEBUGCAUSE, starting from lsbit */
1155#define XCHAL_DEBUGCAUSE_ICOUNT_MASK 0x00000001 /* mask of bits in ICOUNT field of DEBUGCAUSE */
1156/* IBREAK field: */
1157#define XCHAL_DEBUGCAUSE_IBREAK_BITS 1 /* number of bits in IBREAK field */
1158#define XCHAL_DEBUGCAUSE_IBREAK_NUM 2 /* max number of possible causes (2^bits) */
1159#define XCHAL_DEBUGCAUSE_IBREAK_SHIFT 1 /* position of IBREAK bits in DEBUGCAUSE, starting from lsbit */
1160#define XCHAL_DEBUGCAUSE_IBREAK_MASK 0x00000002 /* mask of bits in IBREAK field of DEBUGCAUSE */
1161/* DBREAK field: */
1162#define XCHAL_DEBUGCAUSE_DBREAK_BITS 1 /* number of bits in DBREAK field */
1163#define XCHAL_DEBUGCAUSE_DBREAK_NUM 2 /* max number of possible causes (2^bits) */
1164#define XCHAL_DEBUGCAUSE_DBREAK_SHIFT 2 /* position of DBREAK bits in DEBUGCAUSE, starting from lsbit */
1165#define XCHAL_DEBUGCAUSE_DBREAK_MASK 0x00000004 /* mask of bits in DBREAK field of DEBUGCAUSE */
1166/* BREAK field: */
1167#define XCHAL_DEBUGCAUSE_BREAK_BITS 1 /* number of bits in BREAK field */
1168#define XCHAL_DEBUGCAUSE_BREAK_NUM 2 /* max number of possible causes (2^bits) */
1169#define XCHAL_DEBUGCAUSE_BREAK_SHIFT 3 /* position of BREAK bits in DEBUGCAUSE, starting from lsbit */
1170#define XCHAL_DEBUGCAUSE_BREAK_MASK 0x00000008 /* mask of bits in BREAK field of DEBUGCAUSE */
1171/* BREAKN field: */
1172#define XCHAL_DEBUGCAUSE_BREAKN_BITS 1 /* number of bits in BREAKN field */
1173#define XCHAL_DEBUGCAUSE_BREAKN_NUM 2 /* max number of possible causes (2^bits) */
1174#define XCHAL_DEBUGCAUSE_BREAKN_SHIFT 4 /* position of BREAKN bits in DEBUGCAUSE, starting from lsbit */
1175#define XCHAL_DEBUGCAUSE_BREAKN_MASK 0x00000010 /* mask of bits in BREAKN field of DEBUGCAUSE */
1176/* DEBUGINT field: */
1177#define XCHAL_DEBUGCAUSE_DEBUGINT_BITS 1 /* number of bits in DEBUGINT field */
1178#define XCHAL_DEBUGCAUSE_DEBUGINT_NUM 2 /* max number of possible causes (2^bits) */
1179#define XCHAL_DEBUGCAUSE_DEBUGINT_SHIFT 5 /* position of DEBUGINT bits in DEBUGCAUSE, starting from lsbit */
1180#define XCHAL_DEBUGCAUSE_DEBUGINT_MASK 0x00000020 /* mask of bits in DEBUGINT field of DEBUGCAUSE */
1181
1182
1183
1184/*----------------------------------------------------------------------
1185 ISA
1186 ----------------------------------------------------------------------*/
1187
1188#define XCHAL_HAVE_DENSITY 1 /* 1 if density option configured, 0 otherwise */
1189#define XCHAL_HAVE_LOOPS 1 /* 1 if zero-overhead loops option configured, 0 otherwise */
1190/* Misc instructions: */
1191#define XCHAL_HAVE_NSA 0 /* 1 if NSA/NSAU instructions option configured, 0 otherwise */
1192#define XCHAL_HAVE_MINMAX 0 /* 1 if MIN/MAX instructions option configured, 0 otherwise */
1193#define XCHAL_HAVE_SEXT 0 /* 1 if sign-extend instruction option configured, 0 otherwise */
1194#define XCHAL_HAVE_CLAMPS 0 /* 1 if CLAMPS instruction option configured, 0 otherwise */
1195#define XCHAL_HAVE_MAC16 0 /* 1 if MAC16 option configured, 0 otherwise */
1196#define XCHAL_HAVE_MUL16 0 /* 1 if 16-bit integer multiply option configured, 0 otherwise */
1197/*#define XCHAL_HAVE_POPC 0*/ /* 1 if CRC instruction option configured, 0 otherwise */
1198/*#define XCHAL_HAVE_CRC 0*/ /* 1 if POPC instruction option configured, 0 otherwise */
1199
1200#define XCHAL_HAVE_SPECULATION 0 /* 1 if speculation option configured, 0 otherwise */
1201/*#define XCHAL_HAVE_MP_SYNC 0*/ /* 1 if multiprocessor sync. option configured, 0 otherwise */
1202#define XCHAL_HAVE_PRID 0 /* 1 if processor ID register configured, 0 otherwise */
1203
1204#define XCHAL_NUM_MISC_REGS 2 /* number of miscellaneous registers (0..4) */
1205
1206/* These relate a bit more to TIE: */
1207#define XCHAL_HAVE_BOOLEANS 0 /* 1 if booleans option configured, 0 otherwise */
1208#define XCHAL_HAVE_MUL32 0 /* 1 if 32-bit integer multiply option configured, 0 otherwise */
1209#define XCHAL_HAVE_MUL32_HIGH 0 /* 1 if MUL32 option includes MULUH and MULSH, 0 otherwise */
1210#define XCHAL_HAVE_FP 0 /* 1 if floating point option configured, 0 otherwise */
1211
1212
1213/*----------------------------------------------------------------------
1214 DERIVED
1215 ----------------------------------------------------------------------*/
1216
1217#if XCHAL_HAVE_BE
1218#define XCHAL_INST_ILLN 0xD60F /* 2-byte illegal instruction, msb-first */
1219#define XCHAL_INST_ILLN_BYTE0 0xD6 /* 2-byte illegal instruction, 1st byte */
1220#define XCHAL_INST_ILLN_BYTE1 0x0F /* 2-byte illegal instruction, 2nd byte */
1221#else
1222#define XCHAL_INST_ILLN 0xF06D /* 2-byte illegal instruction, lsb-first */
1223#define XCHAL_INST_ILLN_BYTE0 0x6D /* 2-byte illegal instruction, 1st byte */
1224#define XCHAL_INST_ILLN_BYTE1 0xF0 /* 2-byte illegal instruction, 2nd byte */
1225#endif
1226/* Belongs in xtensa/hal.h: */
1227#define XTHAL_INST_ILL 0x000000 /* 3-byte illegal instruction */
1228
1229
1230/*
1231 * Because information as to exactly which hardware release is targeted
1232 * by a given software build is not always available, compile-time HAL
1233 * Hardware-Release "_AT" macros are fuzzy (return 0, 1, or XCHAL_MAYBE):
1234 */
1235#ifndef XCHAL_HW_RELEASE_MAJOR
1236# define XCHAL_HW_CONFIGID_RELIABLE 0
1237#endif
1238#if XCHAL_HW_CONFIGID_RELIABLE
1239# define XCHAL_HW_RELEASE_AT_OR_BELOW(major,minor) (XTHAL_REL_LE( XCHAL_HW_RELEASE_MAJOR,XCHAL_HW_RELEASE_MINOR, major,minor ) ? 1 : 0)
1240# define XCHAL_HW_RELEASE_AT_OR_ABOVE(major,minor) (XTHAL_REL_GE( XCHAL_HW_RELEASE_MAJOR,XCHAL_HW_RELEASE_MINOR, major,minor ) ? 1 : 0)
1241# define XCHAL_HW_RELEASE_AT(major,minor) (XTHAL_REL_EQ( XCHAL_HW_RELEASE_MAJOR,XCHAL_HW_RELEASE_MINOR, major,minor ) ? 1 : 0)
1242# define XCHAL_HW_RELEASE_MAJOR_AT(major) ((XCHAL_HW_RELEASE_MAJOR == (major)) ? 1 : 0)
1243#else
1244# define XCHAL_HW_RELEASE_AT_OR_BELOW(major,minor) ( ((major) < 1040 && XCHAL_HAVE_XEA2) ? 0 \
1245 : ((major) > 1050 && XCHAL_HAVE_XEA1) ? 1 \
1246 : XTHAL_MAYBE )
1247# define XCHAL_HW_RELEASE_AT_OR_ABOVE(major,minor) ( ((major) >= 2000 && XCHAL_HAVE_XEA1) ? 0 \
1248 : (XTHAL_REL_LE(major,minor, 1040,0) && XCHAL_HAVE_XEA2) ? 1 \
1249 : XTHAL_MAYBE )
1250# define XCHAL_HW_RELEASE_AT(major,minor) ( (((major) < 1040 && XCHAL_HAVE_XEA2) || \
1251 ((major) >= 2000 && XCHAL_HAVE_XEA1)) ? 0 : XTHAL_MAYBE)
1252# define XCHAL_HW_RELEASE_MAJOR_AT(major) XCHAL_HW_RELEASE_AT(major,0)
1253#endif
1254
1255/*
1256 * Specific errata:
1257 */
1258
1259/*
1260 * Erratum T1020.H13, T1030.H7, T1040.H10, T1050.H4 (fixed in T1040.3 and T1050.1;
1261 * relevant only in XEA1, kernel-vector mode, level-one interrupts and overflows enabled):
1262 */
1263#define XCHAL_MAYHAVE_ERRATUM_XEA1KWIN (XCHAL_HAVE_XEA1 && \
1264 (XCHAL_HW_RELEASE_AT_OR_BELOW(1040,2) != 0 \
1265 || XCHAL_HW_RELEASE_AT(1050,0)))
1266
1267
1268
1269#endif /*XTENSA_CONFIG_CORE_H*/
1270
diff --git a/include/asm-xtensa/xtensa/config-linux_be/defs.h b/include/asm-xtensa/xtensa/config-linux_be/defs.h
deleted file mode 100644
index f7c58b273371..000000000000
--- a/include/asm-xtensa/xtensa/config-linux_be/defs.h
+++ /dev/null
@@ -1,270 +0,0 @@
1/* Definitions for Xtensa instructions, types, and protos. */
2
3/*
4 * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of version 2.1 of the GNU Lesser General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 *
14 * Further, this software is distributed without any warranty that it is
15 * free of the rightful claim of any third person regarding infringement
16 * or the like. Any license provided herein, whether implied or
17 * otherwise, applies only to this software file. Patent licenses, if
18 * any, provided herein do not apply to combinations of this program with
19 * other software, or any other product whatsoever.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this program; if not, write the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
24 * USA.
25 */
26
27/* Do not modify. This is automatically generated.*/
28
29#ifndef _XTENSA_BASE_HEADER
30#define _XTENSA_BASE_HEADER
31
32#ifdef __XTENSA__
33#if defined(__GNUC__) && !defined(__XCC__)
34
35#define L8UI_ASM(arr, ars, imm) { \
36 __asm__ volatile("l8ui %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \
37}
38
39#define XT_L8UI(ars, imm) \
40({ \
41 unsigned char _arr; \
42 const unsigned char *_ars = ars; \
43 L8UI_ASM(_arr, _ars, imm); \
44 _arr; \
45})
46
47#define L16UI_ASM(arr, ars, imm) { \
48 __asm__ volatile("l16ui %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \
49}
50
51#define XT_L16UI(ars, imm) \
52({ \
53 unsigned short _arr; \
54 const unsigned short *_ars = ars; \
55 L16UI_ASM(_arr, _ars, imm); \
56 _arr; \
57})
58
59#define L16SI_ASM(arr, ars, imm) {\
60 __asm__ volatile("l16si %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \
61}
62
63#define XT_L16SI(ars, imm) \
64({ \
65 signed short _arr; \
66 const signed short *_ars = ars; \
67 L16SI_ASM(_arr, _ars, imm); \
68 _arr; \
69})
70
71#define L32I_ASM(arr, ars, imm) { \
72 __asm__ volatile("l32i %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \
73}
74
75#define XT_L32I(ars, imm) \
76({ \
77 unsigned _arr; \
78 const unsigned *_ars = ars; \
79 L32I_ASM(_arr, _ars, imm); \
80 _arr; \
81})
82
83#define S8I_ASM(arr, ars, imm) {\
84 __asm__ volatile("s8i %0, %1, %2" : : "a" (arr), "a" (ars) , "i" (imm) : "memory" ); \
85}
86
87#define XT_S8I(arr, ars, imm) \
88({ \
89 signed char _arr = arr; \
90 const signed char *_ars = ars; \
91 S8I_ASM(_arr, _ars, imm); \
92})
93
94#define S16I_ASM(arr, ars, imm) {\
95 __asm__ volatile("s16i %0, %1, %2" : : "a" (arr), "a" (ars) , "i" (imm) : "memory" ); \
96}
97
98#define XT_S16I(arr, ars, imm) \
99({ \
100 signed short _arr = arr; \
101 const signed short *_ars = ars; \
102 S16I_ASM(_arr, _ars, imm); \
103})
104
105#define S32I_ASM(arr, ars, imm) { \
106 __asm__ volatile("s32i %0, %1, %2" : : "a" (arr), "a" (ars) , "i" (imm) : "memory" ); \
107}
108
109#define XT_S32I(arr, ars, imm) \
110({ \
111 signed int _arr = arr; \
112 const signed int *_ars = ars; \
113 S32I_ASM(_arr, _ars, imm); \
114})
115
116#define ADDI_ASM(art, ars, imm) {\
117 __asm__ ("addi %0, %1, %2" : "=a" (art) : "a" (ars), "i" (imm)); \
118}
119
120#define XT_ADDI(ars, imm) \
121({ \
122 unsigned _art; \
123 unsigned _ars = ars; \
124 ADDI_ASM(_art, _ars, imm); \
125 _art; \
126})
127
128#define ABS_ASM(arr, art) {\
129 __asm__ ("abs %0, %1" : "=a" (arr) : "a" (art)); \
130}
131
132#define XT_ABS(art) \
133({ \
134 unsigned _arr; \
135 signed _art = art; \
136 ABS_ASM(_arr, _art); \
137 _arr; \
138})
139
140/* Note: In the following macros that reference SAR, the magic "state"
141 register is used to capture the dependency on SAR. This is because
142 SAR is a 5-bit register and thus there are no C types that can be
143 used to represent it. It doesn't appear that the SAR register is
144 even relevant to GCC, but it is marked as "clobbered" just in
145 case. */
146
147#define SRC_ASM(arr, ars, art) {\
148 register int _xt_sar __asm__ ("state"); \
149 __asm__ ("src %0, %1, %2" \
150 : "=a" (arr) : "a" (ars), "a" (art), "t" (_xt_sar)); \
151}
152
153#define XT_SRC(ars, art) \
154({ \
155 unsigned _arr; \
156 unsigned _ars = ars; \
157 unsigned _art = art; \
158 SRC_ASM(_arr, _ars, _art); \
159 _arr; \
160})
161
162#define SSR_ASM(ars) {\
163 register int _xt_sar __asm__ ("state"); \
164 __asm__ ("ssr %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \
165}
166
167#define XT_SSR(ars) \
168({ \
169 unsigned _ars = ars; \
170 SSR_ASM(_ars); \
171})
172
173#define SSL_ASM(ars) {\
174 register int _xt_sar __asm__ ("state"); \
175 __asm__ ("ssl %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \
176}
177
178#define XT_SSL(ars) \
179({ \
180 unsigned _ars = ars; \
181 SSL_ASM(_ars); \
182})
183
184#define SSA8B_ASM(ars) {\
185 register int _xt_sar __asm__ ("state"); \
186 __asm__ ("ssa8b %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \
187}
188
189#define XT_SSA8B(ars) \
190({ \
191 unsigned _ars = ars; \
192 SSA8B_ASM(_ars); \
193})
194
195#define SSA8L_ASM(ars) {\
196 register int _xt_sar __asm__ ("state"); \
197 __asm__ ("ssa8l %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \
198}
199
200#define XT_SSA8L(ars) \
201({ \
202 unsigned _ars = ars; \
203 SSA8L_ASM(_ars); \
204})
205
206#define SSAI_ASM(imm) {\
207 register int _xt_sar __asm__ ("state"); \
208 __asm__ ("ssai %1" : "=t" (_xt_sar) : "i" (imm) : "sar"); \
209}
210
211#define XT_SSAI(imm) \
212({ \
213 SSAI_ASM(imm); \
214})
215
216
217
218
219
220
221
222
223#endif /* __GNUC__ && !__XCC__ */
224
225#ifdef __XCC__
226
227/* Core load/store instructions */
228extern unsigned char _TIE_L8UI(const unsigned char * ars, immediate imm);
229extern unsigned short _TIE_L16UI(const unsigned short * ars, immediate imm);
230extern signed short _TIE_L16SI(const signed short * ars, immediate imm);
231extern unsigned _TIE_L32I(const unsigned * ars, immediate imm);
232extern void _TIE_S8I(unsigned char arr, unsigned char * ars, immediate imm);
233extern void _TIE_S16I(unsigned short arr, unsigned short * ars, immediate imm);
234extern void _TIE_S32I(unsigned arr, unsigned * ars, immediate imm);
235
236#define XT_L8UI _TIE_L8UI
237#define XT_L16UI _TIE_L16UI
238#define XT_L16SI _TIE_L16SI
239#define XT_L32I _TIE_L32I
240#define XT_S8I _TIE_S8I
241#define XT_S16I _TIE_S16I
242#define XT_S32I _TIE_S32I
243
244/* Add-immediate instruction */
245extern unsigned _TIE_ADDI(unsigned ars, immediate imm);
246#define XT_ADDI _TIE_ADDI
247
248/* Absolute value instruction */
249extern unsigned _TIE_ABS(int art);
250#define XT_ABS _TIE_ABS
251
252/* funnel shift instructions */
253extern unsigned _TIE_SRC(unsigned ars, unsigned art);
254#define XT_SRC _TIE_SRC
255extern void _TIE_SSR(unsigned ars);
256#define XT_SSR _TIE_SSR
257extern void _TIE_SSL(unsigned ars);
258#define XT_SSL _TIE_SSL
259extern void _TIE_SSA8B(unsigned ars);
260#define XT_SSA8B _TIE_SSA8B
261extern void _TIE_SSA8L(unsigned ars);
262#define XT_SSA8L _TIE_SSA8L
263extern void _TIE_SSAI(immediate imm);
264#define XT_SSAI _TIE_SSAI
265
266
267#endif /* __XCC__ */
268
269#endif /* __XTENSA__ */
270#endif /* !_XTENSA_BASE_HEADER */
diff --git a/include/asm-xtensa/xtensa/config-linux_be/specreg.h b/include/asm-xtensa/xtensa/config-linux_be/specreg.h
deleted file mode 100644
index fa4106aa9a02..000000000000
--- a/include/asm-xtensa/xtensa/config-linux_be/specreg.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Xtensa Special Register symbolic names
3 */
4
5/* $Id: specreg.h,v 1.2 2003/03/07 19:15:18 joetaylor Exp $ */
6
7/*
8 * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2.1 of the GNU Lesser General Public
12 * License as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it would be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 *
18 * Further, this software is distributed without any warranty that it is
19 * free of the rightful claim of any third person regarding infringement
20 * or the like. Any license provided herein, whether implied or
21 * otherwise, applies only to this software file. Patent licenses, if
22 * any, provided herein do not apply to combinations of this program with
23 * other software, or any other product whatsoever.
24 *
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this program; if not, write the Free Software
27 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
28 * USA.
29 */
30
31#ifndef XTENSA_SPECREG_H
32#define XTENSA_SPECREG_H
33
34/* Include these special register bitfield definitions, for historical reasons: */
35#include <xtensa/corebits.h>
36
37
38/* Special registers: */
39#define LBEG 0
40#define LEND 1
41#define LCOUNT 2
42#define SAR 3
43#define WINDOWBASE 72
44#define WINDOWSTART 73
45#define PTEVADDR 83
46#define RASID 90
47#define ITLBCFG 91
48#define DTLBCFG 92
49#define IBREAKENABLE 96
50#define DDR 104
51#define IBREAKA_0 128
52#define IBREAKA_1 129
53#define DBREAKA_0 144
54#define DBREAKA_1 145
55#define DBREAKC_0 160
56#define DBREAKC_1 161
57#define EPC_1 177
58#define EPC_2 178
59#define EPC_3 179
60#define EPC_4 180
61#define DEPC 192
62#define EPS_2 194
63#define EPS_3 195
64#define EPS_4 196
65#define EXCSAVE_1 209
66#define EXCSAVE_2 210
67#define EXCSAVE_3 211
68#define EXCSAVE_4 212
69#define INTERRUPT 226
70#define INTENABLE 228
71#define PS 230
72#define EXCCAUSE 232
73#define DEBUGCAUSE 233
74#define CCOUNT 234
75#define ICOUNT 236
76#define ICOUNTLEVEL 237
77#define EXCVADDR 238
78#define CCOMPARE_0 240
79#define CCOMPARE_1 241
80#define CCOMPARE_2 242
81#define MISC_REG_0 244
82#define MISC_REG_1 245
83
84/* Special cases (bases of special register series): */
85#define IBREAKA 128
86#define DBREAKA 144
87#define DBREAKC 160
88#define EPC 176
89#define EPS 192
90#define EXCSAVE 208
91#define CCOMPARE 240
92
93/* Special names for read-only and write-only interrupt registers: */
94#define INTREAD 226
95#define INTSET 226
96#define INTCLEAR 227
97
98#endif /* XTENSA_SPECREG_H */
99
diff --git a/include/asm-xtensa/xtensa/config-linux_be/system.h b/include/asm-xtensa/xtensa/config-linux_be/system.h
deleted file mode 100644
index cf9d4d308e3a..000000000000
--- a/include/asm-xtensa/xtensa/config-linux_be/system.h
+++ /dev/null
@@ -1,198 +0,0 @@
1/*
2 * xtensa/config/system.h -- HAL definitions that are dependent on SYSTEM configuration
3 *
4 * NOTE: The location and contents of this file are highly subject to change.
5 *
6 * Source for configuration-independent binaries (which link in a
7 * configuration-specific HAL library) must NEVER include this file.
8 * The HAL itself has historically included this file in some instances,
9 * but this is not appropriate either, because the HAL is meant to be
10 * core-specific but system independent.
11 */
12
13/*
14 * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of version 2.1 of the GNU Lesser General Public
18 * License as published by the Free Software Foundation.
19 *
20 * This program is distributed in the hope that it would be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
23 *
24 * Further, this software is distributed without any warranty that it is
25 * free of the rightful claim of any third person regarding infringement
26 * or the like. Any license provided herein, whether implied or
27 * otherwise, applies only to this software file. Patent licenses, if
28 * any, provided herein do not apply to combinations of this program with
29 * other software, or any other product whatsoever.
30 *
31 * You should have received a copy of the GNU Lesser General Public
32 * License along with this program; if not, write the Free Software
33 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
34 * USA.
35 */
36
37
38#ifndef XTENSA_CONFIG_SYSTEM_H
39#define XTENSA_CONFIG_SYSTEM_H
40
41/*#include <xtensa/hal.h>*/
42
43
44
45/*----------------------------------------------------------------------
46 DEVICE ADDRESSES
47 ----------------------------------------------------------------------*/
48
49/*
50 * Strange place to find these, but the configuration GUI
51 * allows moving these around to account for various core
52 * configurations. Specific boards (and their BSP software)
53 * will have specific meanings for these components.
54 */
55
56/* I/O Block areas: */
57#define XSHAL_IOBLOCK_CACHED_VADDR 0xE0000000
58#define XSHAL_IOBLOCK_CACHED_PADDR 0xF0000000
59#define XSHAL_IOBLOCK_CACHED_SIZE 0x0E000000
60
61#define XSHAL_IOBLOCK_BYPASS_VADDR 0xF0000000
62#define XSHAL_IOBLOCK_BYPASS_PADDR 0xF0000000
63#define XSHAL_IOBLOCK_BYPASS_SIZE 0x0E000000
64
65/* System ROM: */
66#define XSHAL_ROM_VADDR 0xEE000000
67#define XSHAL_ROM_PADDR 0xFE000000
68#define XSHAL_ROM_SIZE 0x00400000
69/* Largest available area (free of vectors): */
70#define XSHAL_ROM_AVAIL_VADDR 0xEE00052C
71#define XSHAL_ROM_AVAIL_VSIZE 0x003FFAD4
72
73/* System RAM: */
74#define XSHAL_RAM_VADDR 0xD0000000
75#define XSHAL_RAM_PADDR 0x00000000
76#define XSHAL_RAM_VSIZE 0x08000000
77#define XSHAL_RAM_PSIZE 0x10000000
78#define XSHAL_RAM_SIZE XSHAL_RAM_PSIZE
79/* Largest available area (free of vectors): */
80#define XSHAL_RAM_AVAIL_VADDR 0xD0000370
81#define XSHAL_RAM_AVAIL_VSIZE 0x07FFFC90
82
83/*
84 * Shadow system RAM (same device as system RAM, at different address).
85 * (Emulation boards need this for the SONIC Ethernet driver
86 * when data caches are configured for writeback mode.)
87 * NOTE: on full MMU configs, this points to the BYPASS virtual address
88 * of system RAM, ie. is the same as XSHAL_RAM_* except that virtual
89 * addresses are viewed through the BYPASS static map rather than
90 * the CACHED static map.
91 */
92#define XSHAL_RAM_BYPASS_VADDR 0xD8000000
93#define XSHAL_RAM_BYPASS_PADDR 0x00000000
94#define XSHAL_RAM_BYPASS_PSIZE 0x08000000
95
96/* Alternate system RAM (different device than system RAM): */
97#define XSHAL_ALTRAM_VADDR 0xCEE00000
98#define XSHAL_ALTRAM_PADDR 0xC0000000
99#define XSHAL_ALTRAM_SIZE 0x00200000
100
101
102/*----------------------------------------------------------------------
103 * DEVICE-ADDRESS DEPENDENT...
104 *
105 * Values written to CACHEATTR special register (or its equivalent)
106 * to enable and disable caches in various modes.
107 *----------------------------------------------------------------------*/
108
109/*----------------------------------------------------------------------
110 BACKWARD COMPATIBILITY ...
111 ----------------------------------------------------------------------*/
112
113/*
114 * NOTE: the following two macros are DEPRECATED. Use the latter
115 * board-specific macros instead, which are specially tuned for the
116 * particular target environments' memory maps.
117 */
118#define XSHAL_CACHEATTR_BYPASS XSHAL_XT2000_CACHEATTR_BYPASS /* disable caches in bypass mode */
119#define XSHAL_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_DEFAULT /* default setting to enable caches (no writeback!) */
120
121/*----------------------------------------------------------------------
122 ISS (Instruction Set Simulator) SPECIFIC ...
123 ----------------------------------------------------------------------*/
124
125#define XSHAL_ISS_CACHEATTR_WRITEBACK 0x1122222F /* enable caches in write-back mode */
126#define XSHAL_ISS_CACHEATTR_WRITEALLOC 0x1122222F /* enable caches in write-allocate mode */
127#define XSHAL_ISS_CACHEATTR_WRITETHRU 0x1122222F /* enable caches in write-through mode */
128#define XSHAL_ISS_CACHEATTR_BYPASS 0x2222222F /* disable caches in bypass mode */
129#define XSHAL_ISS_CACHEATTR_DEFAULT XSHAL_ISS_CACHEATTR_WRITEBACK /* default setting to enable caches */
130
131/* For Coware only: */
132#define XSHAL_COWARE_CACHEATTR_WRITEBACK 0x11222222 /* enable caches in write-back mode */
133#define XSHAL_COWARE_CACHEATTR_WRITEALLOC 0x11222222 /* enable caches in write-allocate mode */
134#define XSHAL_COWARE_CACHEATTR_WRITETHRU 0x11222222 /* enable caches in write-through mode */
135#define XSHAL_COWARE_CACHEATTR_BYPASS 0x22222222 /* disable caches in bypass mode */
136#define XSHAL_COWARE_CACHEATTR_DEFAULT XSHAL_COWARE_CACHEATTR_WRITEBACK /* default setting to enable caches */
137
138/* For BFM and other purposes: */
139#define XSHAL_ALLVALID_CACHEATTR_WRITEBACK 0x11222222 /* enable caches without any invalid regions */
140#define XSHAL_ALLVALID_CACHEATTR_DEFAULT XSHAL_ALLVALID_CACHEATTR_WRITEBACK /* default setting for caches without any invalid regions */
141
142#define XSHAL_ISS_PIPE_REGIONS 0
143#define XSHAL_ISS_SDRAM_REGIONS 0
144
145
146/*----------------------------------------------------------------------
147 XT2000 BOARD SPECIFIC ...
148 ----------------------------------------------------------------------*/
149
150#define XSHAL_XT2000_CACHEATTR_WRITEBACK 0x22FFFFFF /* enable caches in write-back mode */
151#define XSHAL_XT2000_CACHEATTR_WRITEALLOC 0x22FFFFFF /* enable caches in write-allocate mode */
152#define XSHAL_XT2000_CACHEATTR_WRITETHRU 0x22FFFFFF /* enable caches in write-through mode */
153#define XSHAL_XT2000_CACHEATTR_BYPASS 0x22FFFFFF /* disable caches in bypass mode */
154#define XSHAL_XT2000_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_WRITEBACK /* default setting to enable caches */
155
156#define XSHAL_XT2000_PIPE_REGIONS 0x00001000 /* BusInt pipeline regions */
157#define XSHAL_XT2000_SDRAM_REGIONS 0x00000005 /* BusInt SDRAM regions */
158
159
160/*----------------------------------------------------------------------
161 VECTOR SIZES
162 ----------------------------------------------------------------------*/
163
164/*
165 * Sizes allocated to vectors by the system (memory map) configuration.
166 * These sizes are constrained by core configuration (eg. one vector's
167 * code cannot overflow into another vector) but are dependent on the
168 * system or board (or LSP) memory map configuration.
169 *
170 * Whether or not each vector happens to be in a system ROM is also
171 * a system configuration matter, sometimes useful, included here also:
172 */
173#define XSHAL_RESET_VECTOR_SIZE 0x000004E0
174#define XSHAL_RESET_VECTOR_ISROM 1
175#define XSHAL_USER_VECTOR_SIZE 0x0000001C
176#define XSHAL_USER_VECTOR_ISROM 0
177#define XSHAL_PROGRAMEXC_VECTOR_SIZE XSHAL_USER_VECTOR_SIZE /* for backward compatibility */
178#define XSHAL_USEREXC_VECTOR_SIZE XSHAL_USER_VECTOR_SIZE /* for backward compatibility */
179#define XSHAL_KERNEL_VECTOR_SIZE 0x0000001C
180#define XSHAL_KERNEL_VECTOR_ISROM 0
181#define XSHAL_STACKEDEXC_VECTOR_SIZE XSHAL_KERNEL_VECTOR_SIZE /* for backward compatibility */
182#define XSHAL_KERNELEXC_VECTOR_SIZE XSHAL_KERNEL_VECTOR_SIZE /* for backward compatibility */
183#define XSHAL_DOUBLEEXC_VECTOR_SIZE 0x000000E0
184#define XSHAL_DOUBLEEXC_VECTOR_ISROM 0
185#define XSHAL_WINDOW_VECTORS_SIZE 0x00000180
186#define XSHAL_WINDOW_VECTORS_ISROM 0
187#define XSHAL_INTLEVEL2_VECTOR_SIZE 0x0000000C
188#define XSHAL_INTLEVEL2_VECTOR_ISROM 0
189#define XSHAL_INTLEVEL3_VECTOR_SIZE 0x0000000C
190#define XSHAL_INTLEVEL3_VECTOR_ISROM 0
191#define XSHAL_INTLEVEL4_VECTOR_SIZE 0x0000000C
192#define XSHAL_INTLEVEL4_VECTOR_ISROM 1
193#define XSHAL_DEBUG_VECTOR_SIZE XSHAL_INTLEVEL4_VECTOR_SIZE
194#define XSHAL_DEBUG_VECTOR_ISROM XSHAL_INTLEVEL4_VECTOR_ISROM
195
196
197#endif /*XTENSA_CONFIG_SYSTEM_H*/
198
diff --git a/include/asm-xtensa/xtensa/config-linux_be/tie.h b/include/asm-xtensa/xtensa/config-linux_be/tie.h
deleted file mode 100644
index 3c2e514602f4..000000000000
--- a/include/asm-xtensa/xtensa/config-linux_be/tie.h
+++ /dev/null
@@ -1,275 +0,0 @@
1/*
2 * xtensa/config/tie.h -- HAL definitions that are dependent on CORE and TIE configuration
3 *
4 * This header file is sometimes referred to as the "compile-time HAL" or CHAL.
5 * It was generated for a specific Xtensa processor configuration,
6 * and furthermore for a specific set of TIE source files that extend
7 * basic core functionality.
8 *
9 * Source for configuration-independent binaries (which link in a
10 * configuration-specific HAL library) must NEVER include this file.
11 * It is perfectly normal, however, for the HAL source itself to include this file.
12 */
13
14/*
15 * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of version 2.1 of the GNU Lesser General Public
19 * License as published by the Free Software Foundation.
20 *
21 * This program is distributed in the hope that it would be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
24 *
25 * Further, this software is distributed without any warranty that it is
26 * free of the rightful claim of any third person regarding infringement
27 * or the like. Any license provided herein, whether implied or
28 * otherwise, applies only to this software file. Patent licenses, if
29 * any, provided herein do not apply to combinations of this program with
30 * other software, or any other product whatsoever.
31 *
32 * You should have received a copy of the GNU Lesser General Public
33 * License along with this program; if not, write the Free Software
34 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
35 * USA.
36 */
37
38
39#ifndef XTENSA_CONFIG_TIE_H
40#define XTENSA_CONFIG_TIE_H
41
42#include <xtensa/hal.h>
43
44
45/*----------------------------------------------------------------------
46 GENERAL
47 ----------------------------------------------------------------------*/
48
49/*
50 * Separators for macros that expand into arrays.
51 * These can be predefined by files that #include this one,
52 * when different separators are required.
53 */
54/* Element separator for macros that expand into 1-dimensional arrays: */
55#ifndef XCHAL_SEP
56#define XCHAL_SEP ,
57#endif
58/* Array separator for macros that expand into 2-dimensional arrays: */
59#ifndef XCHAL_SEP2
60#define XCHAL_SEP2 },{
61#endif
62
63
64
65
66
67
68/*----------------------------------------------------------------------
69 COPROCESSORS and EXTRA STATE
70 ----------------------------------------------------------------------*/
71
72#define XCHAL_CP_NUM 0 /* number of coprocessors */
73#define XCHAL_CP_MAX 0 /* max coprocessor id plus one (0 if none) */
74#define XCHAL_CP_MASK 0x00 /* bitmask of coprocessors by id */
75
76/* Space for coprocessors' state save areas: */
77#define XCHAL_CP0_SA_SIZE 0
78#define XCHAL_CP1_SA_SIZE 0
79#define XCHAL_CP2_SA_SIZE 0
80#define XCHAL_CP3_SA_SIZE 0
81#define XCHAL_CP4_SA_SIZE 0
82#define XCHAL_CP5_SA_SIZE 0
83#define XCHAL_CP6_SA_SIZE 0
84#define XCHAL_CP7_SA_SIZE 0
85/* Minimum required alignments of CP state save areas: */
86#define XCHAL_CP0_SA_ALIGN 1
87#define XCHAL_CP1_SA_ALIGN 1
88#define XCHAL_CP2_SA_ALIGN 1
89#define XCHAL_CP3_SA_ALIGN 1
90#define XCHAL_CP4_SA_ALIGN 1
91#define XCHAL_CP5_SA_ALIGN 1
92#define XCHAL_CP6_SA_ALIGN 1
93#define XCHAL_CP7_SA_ALIGN 1
94
95/* Indexing macros: */
96#define _XCHAL_CP_SA_SIZE(n) XCHAL_CP ## n ## _SA_SIZE
97#define XCHAL_CP_SA_SIZE(n) _XCHAL_CP_SA_SIZE(n) /* n = 0 .. 7 */
98#define _XCHAL_CP_SA_ALIGN(n) XCHAL_CP ## n ## _SA_ALIGN
99#define XCHAL_CP_SA_ALIGN(n) _XCHAL_CP_SA_ALIGN(n) /* n = 0 .. 7 */
100
101
102/* Space for "extra" state (user special registers and non-cp TIE) save area: */
103#define XCHAL_EXTRA_SA_SIZE 0
104#define XCHAL_EXTRA_SA_ALIGN 1
105
106/* Total save area size (extra + all coprocessors) */
107/* (not useful until xthal_{save,restore}_all_extra() is implemented, */
108/* but included for Tor2 beta; doesn't account for alignment!): */
109#define XCHAL_CPEXTRA_SA_SIZE_TOR2 0 /* Tor2Beta temporary definition -- do not use */
110
111/* Combined required alignment for all CP and EXTRA state save areas */
112/* (does not include required alignment for any base config registers): */
113#define XCHAL_CPEXTRA_SA_ALIGN 1
114
115/* ... */
116
117
118#ifdef _ASMLANGUAGE
119/*
120 * Assembly-language specific definitions (assembly macros, etc.).
121 */
122#include <xtensa/config/specreg.h>
123
124/********************
125 * Macros to save and restore the non-coprocessor TIE portion of EXTRA state.
126 */
127
128/* (none) */
129
130
131/********************
132 * Macros to create functions that save and restore all EXTRA (non-coprocessor) state
133 * (does not include zero-overhead loop registers and non-optional registers).
134 */
135
136 /*
137 * Macro that expands to the body of a function that
138 * stores the extra (non-coprocessor) optional/custom state.
139 * Entry: a2 = ptr to save area in which to save extra state
140 * Exit: any register a2-a15 (?) may have been clobbered.
141 */
142 .macro xchal_extra_store_funcbody
143 .endm
144
145
146 /*
147 * Macro that expands to the body of a function that
148 * loads the extra (non-coprocessor) optional/custom state.
149 * Entry: a2 = ptr to save area from which to restore extra state
150 * Exit: any register a2-a15 (?) may have been clobbered.
151 */
152 .macro xchal_extra_load_funcbody
153 .endm
154
155
156/********************
157 * Macros to save and restore the state of each TIE coprocessor.
158 */
159
160
161
162/********************
163 * Macros to create functions that save and restore the state of *any* TIE coprocessor.
164 */
165
166 /*
167 * Macro that expands to the body of a function
168 * that stores the selected coprocessor's state (registers etc).
169 * Entry: a2 = ptr to save area in which to save cp state
170 * a3 = coprocessor number
171 * Exit: any register a2-a15 (?) may have been clobbered.
172 */
173 .macro xchal_cpi_store_funcbody
174 .endm
175
176
177 /*
178 * Macro that expands to the body of a function
179 * that loads the selected coprocessor's state (registers etc).
180 * Entry: a2 = ptr to save area from which to restore cp state
181 * a3 = coprocessor number
182 * Exit: any register a2-a15 (?) may have been clobbered.
183 */
184 .macro xchal_cpi_load_funcbody
185 .endm
186
187#endif /*_ASMLANGUAGE*/
188
189
190/*
191 * Contents of save areas in terms of libdb register numbers.
192 * NOTE: CONTENTS_LIBDB_{UREG,REGF} macros are not defined in this file;
193 * it is up to the user of this header file to define these macros
194 * usefully before each expansion of the CONTENTS_LIBDB macros.
195 * (Fields rsv[123] are reserved for future additions; they are currently
196 * set to zero but may be set to some useful values in the future.)
197 *
198 * CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum, bitmask, rsv2, rsv3)
199 * CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum, bitmask, rsv2, rsv3)
200 * CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, numentries, contentsize, regname_base, regfile_name, rsv2, rsv3)
201 */
202
203#define XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM 0
204#define XCHAL_EXTRA_SA_CONTENTS_LIBDB /* empty */
205
206#define XCHAL_CP0_SA_CONTENTS_LIBDB_NUM 0
207#define XCHAL_CP0_SA_CONTENTS_LIBDB /* empty */
208
209#define XCHAL_CP1_SA_CONTENTS_LIBDB_NUM 0
210#define XCHAL_CP1_SA_CONTENTS_LIBDB /* empty */
211
212#define XCHAL_CP2_SA_CONTENTS_LIBDB_NUM 0
213#define XCHAL_CP2_SA_CONTENTS_LIBDB /* empty */
214
215#define XCHAL_CP3_SA_CONTENTS_LIBDB_NUM 0
216#define XCHAL_CP3_SA_CONTENTS_LIBDB /* empty */
217
218#define XCHAL_CP4_SA_CONTENTS_LIBDB_NUM 0
219#define XCHAL_CP4_SA_CONTENTS_LIBDB /* empty */
220
221#define XCHAL_CP5_SA_CONTENTS_LIBDB_NUM 0
222#define XCHAL_CP5_SA_CONTENTS_LIBDB /* empty */
223
224#define XCHAL_CP6_SA_CONTENTS_LIBDB_NUM 0
225#define XCHAL_CP6_SA_CONTENTS_LIBDB /* empty */
226
227#define XCHAL_CP7_SA_CONTENTS_LIBDB_NUM 0
228#define XCHAL_CP7_SA_CONTENTS_LIBDB /* empty */
229
230
231
232
233
234
235/*----------------------------------------------------------------------
236 MISC
237 ----------------------------------------------------------------------*/
238
239#if 0 /* is there something equivalent for user TIE? */
240#define XCHAL_CORE_ID "linux_be" /* configuration's alphanumeric core identifier
241 (CoreID) set in the Xtensa Processor Generator */
242
243#define XCHAL_BUILD_UNIQUE_ID 0x00003256 /* software build-unique ID (22-bit) */
244
245/* These definitions describe the hardware targeted by this software: */
246#define XCHAL_HW_CONFIGID0 0xC103D1FF /* config ID reg 0 value (upper 32 of 64 bits) */
247#define XCHAL_HW_CONFIGID1 0x00803256 /* config ID reg 1 value (lower 32 of 64 bits) */
248#define XCHAL_CONFIGID0 XCHAL_HW_CONFIGID0 /* for backward compatibility only -- don't use! */
249#define XCHAL_CONFIGID1 XCHAL_HW_CONFIGID1 /* for backward compatibility only -- don't use! */
250#define XCHAL_HW_RELEASE_MAJOR 1050 /* major release of targeted hardware */
251#define XCHAL_HW_RELEASE_MINOR 1 /* minor release of targeted hardware */
252#define XCHAL_HW_RELEASE_NAME "T1050.1" /* full release name of targeted hardware */
253#define XTHAL_HW_REL_T1050 1
254#define XTHAL_HW_REL_T1050_1 1
255#define XCHAL_HW_CONFIGID_RELIABLE 1
256#endif /*0*/
257
258
259
260/*----------------------------------------------------------------------
261 ISA
262 ----------------------------------------------------------------------*/
263
264#if 0 /* these probably don't belong here, but are related to or implemented using TIE */
265#define XCHAL_HAVE_BOOLEANS 0 /* 1 if booleans option configured, 0 otherwise */
266/* Misc instructions: */
267#define XCHAL_HAVE_MUL32 0 /* 1 if 32-bit integer multiply option configured, 0 otherwise */
268#define XCHAL_HAVE_MUL32_HIGH 0 /* 1 if MUL32 option includes MULUH and MULSH, 0 otherwise */
269
270#define XCHAL_HAVE_FP 0 /* 1 if floating point option configured, 0 otherwise */
271#endif /*0*/
272
273
274#endif /*XTENSA_CONFIG_TIE_H*/
275
diff --git a/include/asm-xtensa/xtensa/coreasm.h b/include/asm-xtensa/xtensa/coreasm.h
deleted file mode 100644
index a8cfb54c20a1..000000000000
--- a/include/asm-xtensa/xtensa/coreasm.h
+++ /dev/null
@@ -1,526 +0,0 @@
1#ifndef XTENSA_COREASM_H
2#define XTENSA_COREASM_H
3
4/*
5 * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
6 *
7 * include/asm-xtensa/xtensa/coreasm.h -- assembler-specific
8 * definitions that depend on CORE configuration.
9 *
10 * Source for configuration-independent binaries (which link in a
11 * configuration-specific HAL library) must NEVER include this file.
12 * It is perfectly normal, however, for the HAL itself to include this
13 * file.
14 *
15 * This file must NOT include xtensa/config/system.h. Any assembler
16 * header file that depends on system information should likely go in
17 * a new systemasm.h (or sysasm.h) header file.
18 *
19 * NOTE: macro beqi32 is NOT configuration-dependent, and is placed
20 * here til we will have configuration-independent header file.
21 *
22 * This file is subject to the terms and conditions of the GNU General
23 * Public License. See the file "COPYING" in the main directory of
24 * this archive for more details.
25 *
26 * Copyright (C) 2002 Tensilica Inc.
27 */
28
29
30#include <xtensa/config/core.h>
31#include <xtensa/config/specreg.h>
32
33/*
34 * Assembly-language specific definitions (assembly macros, etc.).
35 */
36
37/*----------------------------------------------------------------------
38 * find_ms_setbit
39 *
40 * This macro finds the most significant bit that is set in <as>
41 * and return its index + <base> in <ad>, or <base> - 1 if <as> is zero.
42 * The index counts starting at zero for the lsbit, so the return
43 * value ranges from <base>-1 (no bit set) to <base>+31 (msbit set).
44 *
45 * Parameters:
46 * <ad> destination address register (any register)
47 * <as> source address register
48 * <at> temporary address register (must be different than <as>)
49 * <base> constant value added to result (usually 0 or 1)
50 * On entry:
51 * <ad> = undefined if different than <as>
52 * <as> = value whose most significant set bit is to be found
53 * <at> = undefined
54 * no other registers are used by this macro.
55 * On exit:
56 * <ad> = <base> + index of msbit set in original <as>,
57 * = <base> - 1 if original <as> was zero.
58 * <as> clobbered (if not <ad>)
59 * <at> clobbered (if not <ad>)
60 * Example:
61 * find_ms_setbit a0, a4, a0, 0 -- return in a0 index of msbit set in a4
62 */
63
64 .macro find_ms_setbit ad, as, at, base
65#if XCHAL_HAVE_NSA
66 movi \at, 31+\base
67 nsau \as, \as // get index of \as, numbered from msbit (32 if absent)
68 sub \ad, \at, \as // get numbering from lsbit (0..31, -1 if absent)
69#else /* XCHAL_HAVE_NSA */
70 movi \at, \base // start with result of 0 (point to lsbit of 32)
71
72 beqz \as, 2f // special case for zero argument: return -1
73 bltui \as, 0x10000, 1f // is it one of the 16 lsbits? (if so, check lower 16 bits)
74 addi \at, \at, 16 // no, increment result to upper 16 bits (of 32)
75 //srli \as, \as, 16 // check upper half (shift right 16 bits)
76 extui \as, \as, 16, 16 // check upper half (shift right 16 bits)
771: bltui \as, 0x100, 1f // is it one of the 8 lsbits? (if so, check lower 8 bits)
78 addi \at, \at, 8 // no, increment result to upper 8 bits (of 16)
79 srli \as, \as, 8 // shift right to check upper 8 bits
801: bltui \as, 0x10, 1f // is it one of the 4 lsbits? (if so, check lower 4 bits)
81 addi \at, \at, 4 // no, increment result to upper 4 bits (of 8)
82 srli \as, \as, 4 // shift right 4 bits to check upper half
831: bltui \as, 0x4, 1f // is it one of the 2 lsbits? (if so, check lower 2 bits)
84 addi \at, \at, 2 // no, increment result to upper 2 bits (of 4)
85 srli \as, \as, 2 // shift right 2 bits to check upper half
861: bltui \as, 0x2, 1f // is it the lsbit?
87 addi \at, \at, 2 // no, increment result to upper bit (of 2)
882: addi \at, \at, -1 // (from just above: add 1; from beqz: return -1)
89 //srli \as, \as, 1
901: // done! \at contains index of msbit set (or -1 if none set)
91 .if 0x\ad - 0x\at // destination different than \at ? (works because regs are a0-a15)
92 mov \ad, \at // then move result to \ad
93 .endif
94#endif /* XCHAL_HAVE_NSA */
95 .endm // find_ms_setbit
96
97/*----------------------------------------------------------------------
98 * find_ls_setbit
99 *
100 * This macro finds the least significant bit that is set in <as>,
101 * and return its index in <ad>.
102 * Usage is the same as for the find_ms_setbit macro.
103 * Example:
104 * find_ls_setbit a0, a4, a0, 0 -- return in a0 index of lsbit set in a4
105 */
106
107 .macro find_ls_setbit ad, as, at, base
108 neg \at, \as // keep only the least-significant bit that is set...
109 and \as, \at, \as // ... in \as
110 find_ms_setbit \ad, \as, \at, \base
111 .endm // find_ls_setbit
112
113/*----------------------------------------------------------------------
114 * find_ls_one
115 *
116 * Same as find_ls_setbit with base zero.
117 * Source (as) and destination (ad) registers must be different.
118 * Provided for backward compatibility.
119 */
120
121 .macro find_ls_one ad, as
122 find_ls_setbit \ad, \as, \ad, 0
123 .endm // find_ls_one
124
125/*----------------------------------------------------------------------
126 * floop, floopnez, floopgtz, floopend
127 *
128 * These macros are used for fast inner loops that
129 * work whether or not the Loops options is configured.
130 * If the Loops option is configured, they simply use
131 * the zero-overhead LOOP instructions; otherwise
132 * they use explicit decrement and branch instructions.
133 *
134 * They are used in pairs, with floop, floopnez or floopgtz
135 * at the beginning of the loop, and floopend at the end.
136 *
137 * Each pair of loop macro calls must be given the loop count
138 * address register and a unique label for that loop.
139 *
140 * Example:
141 *
142 * movi a3, 16 // loop 16 times
143 * floop a3, myloop1
144 * :
145 * bnez a7, end1 // exit loop if a7 != 0
146 * :
147 * floopend a3, myloop1
148 * end1:
149 *
150 * Like the LOOP instructions, these macros cannot be
151 * nested, must include at least one instruction,
152 * cannot call functions inside the loop, etc.
153 * The loop can be exited by jumping to the instruction
154 * following floopend (or elsewhere outside the loop),
155 * or continued by jumping to a NOP instruction placed
156 * immediately before floopend.
157 *
158 * Unlike LOOP instructions, the register passed to floop*
159 * cannot be used inside the loop, because it is used as
160 * the loop counter if the Loops option is not configured.
161 * And its value is undefined after exiting the loop.
162 * And because the loop counter register is active inside
163 * the loop, you can't easily use this construct to loop
164 * across a register file using ROTW as you might with LOOP
165 * instructions, unless you copy the loop register along.
166 */
167
168 /* Named label version of the macros: */
169
170 .macro floop ar, endlabel
171 floop_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
172 .endm
173
174 .macro floopnez ar, endlabel
175 floopnez_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
176 .endm
177
178 .macro floopgtz ar, endlabel
179 floopgtz_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
180 .endm
181
182 .macro floopend ar, endlabel
183 floopend_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
184 .endm
185
186 /* Numbered local label version of the macros: */
187#if 0 /*UNTESTED*/
188 .macro floop89 ar
189 floop_ \ar, 8, 9f
190 .endm
191
192 .macro floopnez89 ar
193 floopnez_ \ar, 8, 9f
194 .endm
195
196 .macro floopgtz89 ar
197 floopgtz_ \ar, 8, 9f
198 .endm
199
200 .macro floopend89 ar
201 floopend_ \ar, 8b, 9
202 .endm
203#endif /*0*/
204
205 /* Underlying version of the macros: */
206
207 .macro floop_ ar, startlabel, endlabelref
208 .ifdef _infloop_
209 .if _infloop_
210 .err // Error: floop cannot be nested
211 .endif
212 .endif
213 .set _infloop_, 1
214#if XCHAL_HAVE_LOOPS
215 loop \ar, \endlabelref
216#else /* XCHAL_HAVE_LOOPS */
217\startlabel:
218 addi \ar, \ar, -1
219#endif /* XCHAL_HAVE_LOOPS */
220 .endm // floop_
221
222 .macro floopnez_ ar, startlabel, endlabelref
223 .ifdef _infloop_
224 .if _infloop_
225 .err // Error: floopnez cannot be nested
226 .endif
227 .endif
228 .set _infloop_, 1
229#if XCHAL_HAVE_LOOPS
230 loopnez \ar, \endlabelref
231#else /* XCHAL_HAVE_LOOPS */
232 beqz \ar, \endlabelref
233\startlabel:
234 addi \ar, \ar, -1
235#endif /* XCHAL_HAVE_LOOPS */
236 .endm // floopnez_
237
238 .macro floopgtz_ ar, startlabel, endlabelref
239 .ifdef _infloop_
240 .if _infloop_
241 .err // Error: floopgtz cannot be nested
242 .endif
243 .endif
244 .set _infloop_, 1
245#if XCHAL_HAVE_LOOPS
246 loopgtz \ar, \endlabelref
247#else /* XCHAL_HAVE_LOOPS */
248 bltz \ar, \endlabelref
249 beqz \ar, \endlabelref
250\startlabel:
251 addi \ar, \ar, -1
252#endif /* XCHAL_HAVE_LOOPS */
253 .endm // floopgtz_
254
255
256 .macro floopend_ ar, startlabelref, endlabel
257 .ifndef _infloop_
258 .err // Error: floopend without matching floopXXX
259 .endif
260 .ifeq _infloop_
261 .err // Error: floopend without matching floopXXX
262 .endif
263 .set _infloop_, 0
264#if ! XCHAL_HAVE_LOOPS
265 bnez \ar, \startlabelref
266#endif /* XCHAL_HAVE_LOOPS */
267\endlabel:
268 .endm // floopend_
269
270/*----------------------------------------------------------------------
271 * crsil -- conditional RSIL (read/set interrupt level)
272 *
273 * Executes the RSIL instruction if it exists, else just reads PS.
274 * The RSIL instruction does not exist in the new exception architecture
275 * if the interrupt option is not selected.
276 */
277
278 .macro crsil ar, newlevel
279#if XCHAL_HAVE_OLD_EXC_ARCH || XCHAL_HAVE_INTERRUPTS
280 rsil \ar, \newlevel
281#else
282 rsr \ar, PS
283#endif
284 .endm // crsil
285
286/*----------------------------------------------------------------------
287 * window_spill{4,8,12}
288 *
289 * These macros spill callers' register windows to the stack.
290 * They work for both privileged and non-privileged tasks.
291 * Must be called from a windowed ABI context, eg. within
292 * a windowed ABI function (ie. valid stack frame, window
293 * exceptions enabled, not in exception mode, etc).
294 *
295 * This macro requires a single invocation of the window_spill_common
296 * macro in the same assembly unit and section.
297 *
298 * Note that using window_spill{4,8,12} macros is more efficient
299 * than calling a function implemented using window_spill_function,
300 * because the latter needs extra code to figure out the size of
301 * the call to the spilling function.
302 *
303 * Example usage:
304 *
305 * .text
306 * .align 4
307 * .global some_function
308 * .type some_function,@function
309 * some_function:
310 * entry a1, 16
311 * :
312 * :
313 *
314 * window_spill4 // spill windows of some_function's callers; preserves a0..a3 only;
315 * // to use window_spill{8,12} in this example function we'd have
316 * // to increase space allocated by the entry instruction, because
317 * // 16 bytes only allows call4; 32 or 48 bytes (+locals) are needed
318 * // for call8/window_spill8 or call12/window_spill12 respectively.
319 * :
320 *
321 * retw
322 *
323 * window_spill_common // instantiates code used by window_spill4
324 *
325 *
326 * On entry:
327 * none (if window_spill4)
328 * stack frame has enough space allocated for call8 (if window_spill8)
329 * stack frame has enough space allocated for call12 (if window_spill12)
330 * On exit:
331 * a4..a15 clobbered (if window_spill4)
332 * a8..a15 clobbered (if window_spill8)
333 * a12..a15 clobbered (if window_spill12)
334 * no caller windows are in live registers
335 */
336
337 .macro window_spill4
338#if XCHAL_HAVE_WINDOWED
339# if XCHAL_NUM_AREGS == 16
340 movi a15, 0 // for 16-register files, no need to call to reach the end
341# elif XCHAL_NUM_AREGS == 32
342 call4 .L__wdwspill_assist28 // call deep enough to clear out any live callers
343# elif XCHAL_NUM_AREGS == 64
344 call4 .L__wdwspill_assist60 // call deep enough to clear out any live callers
345# endif
346#endif
347 .endm // window_spill4
348
349 .macro window_spill8
350#if XCHAL_HAVE_WINDOWED
351# if XCHAL_NUM_AREGS == 16
352 movi a15, 0 // for 16-register files, no need to call to reach the end
353# elif XCHAL_NUM_AREGS == 32
354 call8 .L__wdwspill_assist24 // call deep enough to clear out any live callers
355# elif XCHAL_NUM_AREGS == 64
356 call8 .L__wdwspill_assist56 // call deep enough to clear out any live callers
357# endif
358#endif
359 .endm // window_spill8
360
361 .macro window_spill12
362#if XCHAL_HAVE_WINDOWED
363# if XCHAL_NUM_AREGS == 16
364 movi a15, 0 // for 16-register files, no need to call to reach the end
365# elif XCHAL_NUM_AREGS == 32
366 call12 .L__wdwspill_assist20 // call deep enough to clear out any live callers
367# elif XCHAL_NUM_AREGS == 64
368 call12 .L__wdwspill_assist52 // call deep enough to clear out any live callers
369# endif
370#endif
371 .endm // window_spill12
372
373/*----------------------------------------------------------------------
374 * window_spill_function
375 *
376 * This macro outputs a function that will spill its caller's callers'
377 * register windows to the stack. Eg. it could be used to implement
378 * a version of xthal_window_spill() that works in non-privileged tasks.
379 * This works for both privileged and non-privileged tasks.
380 *
381 * Typical usage:
382 *
383 * .text
384 * .align 4
385 * .global my_spill_function
386 * .type my_spill_function,@function
387 * my_spill_function:
388 * window_spill_function
389 *
390 * On entry to resulting function:
391 * none
392 * On exit from resulting function:
393 * none (no caller windows are in live registers)
394 */
395
396 .macro window_spill_function
397#if XCHAL_HAVE_WINDOWED
398# if XCHAL_NUM_AREGS == 32
399 entry sp, 48
400 bbci.l a0, 31, 1f // branch if called with call4
401 bbsi.l a0, 30, 2f // branch if called with call12
402 call8 .L__wdwspill_assist16 // called with call8, only need another 8
403 retw
4041: call12 .L__wdwspill_assist16 // called with call4, only need another 12
405 retw
4062: call4 .L__wdwspill_assist16 // called with call12, only need another 4
407 retw
408# elif XCHAL_NUM_AREGS == 64
409 entry sp, 48
410 bbci.l a0, 31, 1f // branch if called with call4
411 bbsi.l a0, 30, 2f // branch if called with call12
412 call4 .L__wdwspill_assist52 // called with call8, only need a call4
413 retw
4141: call8 .L__wdwspill_assist52 // called with call4, only need a call8
415 retw
4162: call12 .L__wdwspill_assist40 // called with call12, can skip a call12
417 retw
418# elif XCHAL_NUM_AREGS == 16
419 entry sp, 16
420 bbci.l a0, 31, 1f // branch if called with call4
421 bbsi.l a0, 30, 2f // branch if called with call12
422 movi a7, 0 // called with call8
423 retw
4241: movi a11, 0 // called with call4
4252: retw // if called with call12, everything already spilled
426
427// movi a15, 0 // trick to spill all but the direct caller
428// j 1f
429// // The entry instruction is magical in the assembler (gets auto-aligned)
430// // so we have to jump to it to avoid falling through the padding.
431// // We need entry/retw to know where to return.
432//1: entry sp, 16
433// retw
434# else
435# error "unrecognized address register file size"
436# endif
437#endif /* XCHAL_HAVE_WINDOWED */
438 window_spill_common
439 .endm // window_spill_function
440
441/*----------------------------------------------------------------------
442 * window_spill_common
443 *
444 * Common code used by any number of invocations of the window_spill##
445 * and window_spill_function macros.
446 *
447 * Must be instantiated exactly once within a given assembly unit,
448 * within call/j range of and same section as window_spill##
449 * macro invocations for that assembly unit.
450 * (Is automatically instantiated by the window_spill_function macro.)
451 */
452
453 .macro window_spill_common
454#if XCHAL_HAVE_WINDOWED && (XCHAL_NUM_AREGS == 32 || XCHAL_NUM_AREGS == 64)
455 .ifndef .L__wdwspill_defined
456# if XCHAL_NUM_AREGS >= 64
457.L__wdwspill_assist60:
458 entry sp, 32
459 call8 .L__wdwspill_assist52
460 retw
461.L__wdwspill_assist56:
462 entry sp, 16
463 call4 .L__wdwspill_assist52
464 retw
465.L__wdwspill_assist52:
466 entry sp, 48
467 call12 .L__wdwspill_assist40
468 retw
469.L__wdwspill_assist40:
470 entry sp, 48
471 call12 .L__wdwspill_assist28
472 retw
473# endif
474.L__wdwspill_assist28:
475 entry sp, 48
476 call12 .L__wdwspill_assist16
477 retw
478.L__wdwspill_assist24:
479 entry sp, 32
480 call8 .L__wdwspill_assist16
481 retw
482.L__wdwspill_assist20:
483 entry sp, 16
484 call4 .L__wdwspill_assist16
485 retw
486.L__wdwspill_assist16:
487 entry sp, 16
488 movi a15, 0
489 retw
490 .set .L__wdwspill_defined, 1
491 .endif
492#endif /* XCHAL_HAVE_WINDOWED with 32 or 64 aregs */
493 .endm // window_spill_common
494
495/*----------------------------------------------------------------------
496 * beqi32
497 *
498 * macro implements version of beqi for arbitrary 32-bit immidiate value
499 *
500 * beqi32 ax, ay, imm32, label
501 *
502 * Compares value in register ax with imm32 value and jumps to label if
503 * equal. Clobberes register ay if needed
504 *
505 */
506 .macro beqi32 ax, ay, imm, label
507 .ifeq ((\imm-1) & ~7) // 1..8 ?
508 beqi \ax, \imm, \label
509 .else
510 .ifeq (\imm+1) // -1 ?
511 beqi \ax, \imm, \label
512 .else
513 .ifeq (\imm) // 0 ?
514 beqz \ax, \label
515 .else
516 // We could also handle immediates 10,12,16,32,64,128,256
517 // but it would be a long macro...
518 movi \ay, \imm
519 beq \ax, \ay, \label
520 .endif
521 .endif
522 .endif
523 .endm // beqi32
524
525#endif /*XTENSA_COREASM_H*/
526
diff --git a/include/asm-xtensa/xtensa/corebits.h b/include/asm-xtensa/xtensa/corebits.h
deleted file mode 100644
index e578ade41632..000000000000
--- a/include/asm-xtensa/xtensa/corebits.h
+++ /dev/null
@@ -1,77 +0,0 @@
1#ifndef XTENSA_COREBITS_H
2#define XTENSA_COREBITS_H
3
4/*
5 * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
6 *
7 * xtensa/corebits.h - Xtensa Special Register field positions and masks.
8 *
9 * (In previous releases, these were defined in specreg.h, a generated file.
10 * This file is not generated, i.e. it is processor configuration independent.)
11 */
12
13
14/* EXCCAUSE register fields: */
15#define EXCCAUSE_EXCCAUSE_SHIFT 0
16#define EXCCAUSE_EXCCAUSE_MASK 0x3F
17/* Exception causes (mostly incomplete!): */
18#define EXCCAUSE_ILLEGAL 0
19#define EXCCAUSE_SYSCALL 1
20#define EXCCAUSE_IFETCHERROR 2
21#define EXCCAUSE_LOADSTOREERROR 3
22#define EXCCAUSE_LEVEL1INTERRUPT 4
23#define EXCCAUSE_ALLOCA 5
24
25/* PS register fields: */
26#define PS_WOE_SHIFT 18
27#define PS_WOE_MASK 0x00040000
28#define PS_WOE PS_WOE_MASK
29#define PS_CALLINC_SHIFT 16
30#define PS_CALLINC_MASK 0x00030000
31#define PS_CALLINC(n) (((n)&3)<<PS_CALLINC_SHIFT) /* n = 0..3 */
32#define PS_OWB_SHIFT 8
33#define PS_OWB_MASK 0x00000F00
34#define PS_OWB(n) (((n)&15)<<PS_OWB_SHIFT) /* n = 0..15 (or 0..7) */
35#define PS_RING_SHIFT 6
36#define PS_RING_MASK 0x000000C0
37#define PS_RING(n) (((n)&3)<<PS_RING_SHIFT) /* n = 0..3 */
38#define PS_UM_SHIFT 5
39#define PS_UM_MASK 0x00000020
40#define PS_UM PS_UM_MASK
41#define PS_EXCM_SHIFT 4
42#define PS_EXCM_MASK 0x00000010
43#define PS_EXCM PS_EXCM_MASK
44#define PS_INTLEVEL_SHIFT 0
45#define PS_INTLEVEL_MASK 0x0000000F
46#define PS_INTLEVEL(n) ((n)&PS_INTLEVEL_MASK) /* n = 0..15 */
47/* Backward compatibility (deprecated): */
48#define PS_PROGSTACK_SHIFT PS_UM_SHIFT
49#define PS_PROGSTACK_MASK PS_UM_MASK
50#define PS_PROG_SHIFT PS_UM_SHIFT
51#define PS_PROG_MASK PS_UM_MASK
52#define PS_PROG PS_UM
53
54/* DBREAKCn register fields: */
55#define DBREAKC_MASK_SHIFT 0
56#define DBREAKC_MASK_MASK 0x0000003F
57#define DBREAKC_LOADBREAK_SHIFT 30
58#define DBREAKC_LOADBREAK_MASK 0x40000000
59#define DBREAKC_STOREBREAK_SHIFT 31
60#define DBREAKC_STOREBREAK_MASK 0x80000000
61
62/* DEBUGCAUSE register fields: */
63#define DEBUGCAUSE_DEBUGINT_SHIFT 5
64#define DEBUGCAUSE_DEBUGINT_MASK 0x20 /* debug interrupt */
65#define DEBUGCAUSE_BREAKN_SHIFT 4
66#define DEBUGCAUSE_BREAKN_MASK 0x10 /* BREAK.N instruction */
67#define DEBUGCAUSE_BREAK_SHIFT 3
68#define DEBUGCAUSE_BREAK_MASK 0x08 /* BREAK instruction */
69#define DEBUGCAUSE_DBREAK_SHIFT 2
70#define DEBUGCAUSE_DBREAK_MASK 0x04 /* DBREAK match */
71#define DEBUGCAUSE_IBREAK_SHIFT 1
72#define DEBUGCAUSE_IBREAK_MASK 0x02 /* IBREAK match */
73#define DEBUGCAUSE_ICOUNT_SHIFT 0
74#define DEBUGCAUSE_ICOUNT_MASK 0x01 /* ICOUNT would increment to zero */
75
76#endif /*XTENSA_COREBITS_H*/
77
diff --git a/include/asm-xtensa/xtensa/hal.h b/include/asm-xtensa/xtensa/hal.h
deleted file mode 100644
index d10472505454..000000000000
--- a/include/asm-xtensa/xtensa/hal.h
+++ /dev/null
@@ -1,822 +0,0 @@
1#ifndef XTENSA_HAL_H
2#define XTENSA_HAL_H
3
4/*
5 * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
6 *
7 * include/asm-xtensa/xtensa/hal.h -- contains a definition of the
8 * Core HAL interface.
9 *
10 * All definitions in this header file are independent of any specific
11 * Xtensa processor configuration. Thus an OS or other software can
12 * include this header file and be compiled into configuration-
13 * independent objects that can be distributed and eventually linked
14 * to the HAL library (libhal.a) to create a configuration-specific
15 * final executable.
16 *
17 * Certain definitions, however, are release-specific -- such as the
18 * XTHAL_RELEASE_xxx macros (or additions made in later releases).
19 *
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file "COPYING" in the main directory of this archive
22 * for more details.
23 *
24 * Copyright (C) 2002 Tensilica Inc.
25 */
26
27
28/*----------------------------------------------------------------------
29 Constant Definitions
30 (shared with assembly)
31 ----------------------------------------------------------------------*/
32
33/* Software release information (not configuration-specific!): */
34#define XTHAL_RELEASE_MAJOR 1050
35#define XTHAL_RELEASE_MINOR 0
36#define XTHAL_RELEASE_NAME "T1050.0-2002-08-06-eng0"
37#define XTHAL_RELEASE_INTERNAL "2002-08-06-eng0"
38#define XTHAL_REL_T1050 1
39#define XTHAL_REL_T1050_0 1
40#define XTHAL_REL_T1050_0_2002 1
41#define XTHAL_REL_T1050_0_2002_08 1
42#define XTHAL_REL_T1050_0_2002_08_06 1
43#define XTHAL_REL_T1050_0_2002_08_06_ENG0 1
44
45/* HAL version numbers (these names are for backward compatibility): */
46#define XTHAL_MAJOR_REV XTHAL_RELEASE_MAJOR
47#define XTHAL_MINOR_REV XTHAL_RELEASE_MINOR
48/*
49 * A bit of software release history on values of XTHAL_{MAJOR,MINOR}_REV:
50 *
51 * Release MAJOR MINOR Comment
52 * ======= ===== ===== =======
53 * T1015.n n/a n/a (HAL not yet available)
54 * T1020.{0,1,2} 0 1 (HAL beta)
55 * T1020.{3,4} 0 2 First release.
56 * T1020.n (n>4) 0 2 or >3 (TBD)
57 * T1030.0 0 1 (HAL beta)
58 * T1030.{1,2} 0 3 Equivalent to first release.
59 * T1030.n (n>=3) 0 >= 3 (TBD)
60 * T1040.n 1040 n Full CHAL available from T1040.2
61 * T1050.n 1050 n Current release.
62 *
63 *
64 * Note: there is a distinction between the software release with
65 * which something is compiled (accessible using XTHAL_RELEASE_* macros)
66 * and the software release with which the HAL library was compiled
67 * (accessible using Xthal_release_* global variables). This
68 * distinction is particularly relevant for vendors that distribute
69 * configuration-independent binaries (eg. an OS), where their customer
70 * might link it with a HAL of a different Xtensa software release.
71 * In this case, it may be appropriate for the OS to verify at run-time
72 * whether XTHAL_RELEASE_* and Xthal_release_* are compatible.
73 * [Guidelines as to which release is compatible with which are not
74 * currently provided explicitly, but might be inferred from reading
75 * OSKit documentation for all releases -- compatibility is also highly
76 * dependent on which HAL features are used. Each release is usually
77 * backward compatible, with very few exceptions if any.]
78 *
79 * Notes:
80 * Tornado 2.0 supported in T1020.3+, T1030.1+, and T1040.{0,1} only.
81 * Tornado 2.0.2 supported in T1040.2+, and T1050.
82 * Compile-time HAL port of NucleusPlus supported by T1040.2+ and T1050.
83 */
84
85
86/*
87 * Architectural limits, independent of configuration.
88 * Note that these are ISA-defined limits, not micro-architecture implementation
89 * limits enforced by the Xtensa Processor Generator (which may be stricter than
90 * these below).
91 */
92#define XTHAL_MAX_CPS 8 /* max number of coprocessors (0..7) */
93#define XTHAL_MAX_INTERRUPTS 32 /* max number of interrupts (0..31) */
94#define XTHAL_MAX_INTLEVELS 16 /* max number of interrupt levels (0..15) */
95 /* (as of T1040, implementation limit is 7: 0..6) */
96#define XTHAL_MAX_TIMERS 4 /* max number of timers (CCOMPARE0..CCOMPARE3) */
97 /* (as of T1040, implementation limit is 3: 0..2) */
98
99/* Misc: */
100#define XTHAL_LITTLEENDIAN 0
101#define XTHAL_BIGENDIAN 1
102
103
104/* Interrupt types: */
105#define XTHAL_INTTYPE_UNCONFIGURED 0
106#define XTHAL_INTTYPE_SOFTWARE 1
107#define XTHAL_INTTYPE_EXTERN_EDGE 2
108#define XTHAL_INTTYPE_EXTERN_LEVEL 3
109#define XTHAL_INTTYPE_TIMER 4
110#define XTHAL_INTTYPE_NMI 5
111#define XTHAL_MAX_INTTYPES 6 /* number of interrupt types */
112
113/* Timer related: */
114#define XTHAL_TIMER_UNCONFIGURED -1 /* Xthal_timer_interrupt[] value for non-existent timers */
115#define XTHAL_TIMER_UNASSIGNED XTHAL_TIMER_UNCONFIGURED /* (for backwards compatibility only) */
116
117
118/* Access Mode bits (tentative): */ /* bit abbr unit short_name PPC equ - Description */
119#define XTHAL_AMB_EXCEPTION 0 /* 001 E EX fls: EXception none - generate exception on any access (aka "illegal") */
120#define XTHAL_AMB_HITCACHE 1 /* 002 C CH fls: use Cache on Hit ~(I CI) - use cache on hit -- way from tag match [or H HC, or U UC] (ISA: same, except for Isolate case) */
121#define XTHAL_AMB_ALLOCATE 2 /* 004 A AL fl?: ALlocate none - refill cache on miss -- way from LRU [or F FI fill] (ISA: Read/Write Miss Refill) */
122#define XTHAL_AMB_WRITETHRU 3 /* 008 W WT --s: WriteThrough W WT - store immediately to memory (ISA: same) */
123#define XTHAL_AMB_ISOLATE 4 /* 010 I IS fls: ISolate none - use cache regardless of hit-vs-miss -- way from vaddr (ISA: use-cache-on-miss+hit) */
124#define XTHAL_AMB_GUARD 5 /* 020 G GU ?l?: GUard G * - non-speculative; spec/replay refs not permitted */
125#if 0
126#define XTHAL_AMB_ORDERED x /* 000 O OR fls: ORdered G * - mem accesses cannot be out of order */
127#define XTHAL_AMB_FUSEWRITES x /* 000 F FW --s: FuseWrites none - allow combining/merging multiple writes (to same datapath data unit) into one (implied by writeback) */
128#define XTHAL_AMB_COHERENT x /* 000 M MC fl?: Mem/MP Coherent M - on reads, other CPUs/bus-masters may need to supply data */
129#define XTHAL_AMB_TRUSTED x /* 000 T TR ?l?: TRusted none - memory will not bus error (if it does, handle as fatal imprecise interrupt) */
130#define XTHAL_AMB_PREFETCH x /* 000 P PR fl?: PRefetch none - on refill, read line+1 into prefetch buffers */
131#define XTHAL_AMB_STREAM x /* 000 S ST ???: STreaming none - access one of N stream buffers */
132#endif /*0*/
133
134#define XTHAL_AM_EXCEPTION (1<<XTHAL_AMB_EXCEPTION)
135#define XTHAL_AM_HITCACHE (1<<XTHAL_AMB_HITCACHE)
136#define XTHAL_AM_ALLOCATE (1<<XTHAL_AMB_ALLOCATE)
137#define XTHAL_AM_WRITETHRU (1<<XTHAL_AMB_WRITETHRU)
138#define XTHAL_AM_ISOLATE (1<<XTHAL_AMB_ISOLATE)
139#define XTHAL_AM_GUARD (1<<XTHAL_AMB_GUARD)
140#if 0
141#define XTHAL_AM_ORDERED (1<<XTHAL_AMB_ORDERED)
142#define XTHAL_AM_FUSEWRITES (1<<XTHAL_AMB_FUSEWRITES)
143#define XTHAL_AM_COHERENT (1<<XTHAL_AMB_COHERENT)
144#define XTHAL_AM_TRUSTED (1<<XTHAL_AMB_TRUSTED)
145#define XTHAL_AM_PREFETCH (1<<XTHAL_AMB_PREFETCH)
146#define XTHAL_AM_STREAM (1<<XTHAL_AMB_STREAM)
147#endif /*0*/
148
149/*
150 * Allowed Access Modes (bit combinations).
151 *
152 * Columns are:
153 * "FOGIWACE"
154 * Access mode bits (see XTHAL_AMB_xxx above).
155 * <letter> = bit is set
156 * '-' = bit is clear
157 * '.' = bit is irrelevant / don't care, as follows:
158 * E=1 makes all others irrelevant
159 * W,F relevant only for stores
160 * "2345"
161 * Indicates which Xtensa releases support the corresponding
162 * access mode. Releases for each character column are:
163 * 2 = prior to T1020.2: T1015 (V1.5), T1020.0, T1020.1
164 * 3 = T1020.2 and later: T1020.2+, T1030
165 * 4 = T1040
166 * 5 = T1050 (maybe)
167 * And the character column contents are:
168 * <number> = support by release(s)
169 * "." = unsupported by release(s)
170 * "?" = support unknown
171 */
172 /* FOGIWACE 2345 */
173/* For instruction fetch: */
174#define XTHAL_FAM_EXCEPTION 0x001 /* .......E 2345 exception */
175#define XTHAL_FAM_ISOLATE 0x012 /* .--I.-C- .... isolate */
176#define XTHAL_FAM_BYPASS 0x000 /* .---.--- 2345 bypass */
177#define XTHAL_FAM_NACACHED 0x002 /* .---.-C- .... cached no-allocate (frozen) */
178#define XTHAL_FAM_CACHED 0x006 /* .---.AC- 2345 cached */
179/* For data load: */
180#define XTHAL_LAM_EXCEPTION 0x001 /* .......E 2345 exception */
181#define XTHAL_LAM_ISOLATE 0x012 /* .--I.-C- 2345 isolate */
182#define XTHAL_LAM_BYPASS 0x000 /* .O--.--- 2... bypass speculative */
183#define XTHAL_LAM_BYPASSG 0x020 /* .OG-.--- .345 bypass guarded */
184#define XTHAL_LAM_NACACHED 0x002 /* .O--.-C- 2... cached no-allocate speculative */
185#define XTHAL_LAM_NACACHEDG 0x022 /* .OG-.-C- .345 cached no-allocate guarded */
186#define XTHAL_LAM_CACHED 0x006 /* .---.AC- 2345 cached speculative */
187#define XTHAL_LAM_CACHEDG 0x026 /* .?G-.AC- .... cached guarded */
188/* For data store: */
189#define XTHAL_SAM_EXCEPTION 0x001 /* .......E 2345 exception */
190#define XTHAL_SAM_ISOLATE 0x032 /* .-GI--C- 2345 isolate */
191#define XTHAL_SAM_BYPASS 0x028 /* -OG-W--- 2345 bypass */
192/*efine XTHAL_SAM_BYPASSF 0x028*/ /* F-G-W--- ...? bypass write-combined */
193#define XTHAL_SAM_WRITETHRU 0x02A /* -OG-W-C- 234? writethrough */
194/*efine XTHAL_SAM_WRITETHRUF 0x02A*/ /* F-G-W-C- ...5 writethrough write-combined */
195#define XTHAL_SAM_WRITEALLOC 0x02E /* -OG-WAC- ...? writethrough-allocate */
196/*efine XTHAL_SAM_WRITEALLOCF 0x02E*/ /* F-G-WAC- ...? writethrough-allocate write-combined */
197#define XTHAL_SAM_WRITEBACK 0x026 /* F-G--AC- ...5 writeback */
198
199#if 0
200/*
201 Cache attribute encoding for CACHEATTR (per ISA):
202 (Note: if this differs from ISA Ref Manual, ISA has precedence)
203
204 Inst-fetches Loads Stores
205 ------------- ------------ -------------
2060x0 FCA_EXCEPTION ?LCA_NACACHED_G* SCA_WRITETHRU "uncached"
2070x1 FCA_CACHED LCA_CACHED SCA_WRITETHRU cached
2080x2 FCA_BYPASS LCA_BYPASS_G* SCA_BYPASS bypass
2090x3 FCA_CACHED LCA_CACHED SCA_WRITEALLOCF write-allocate
210 or LCA_EXCEPTION SCA_EXCEPTION (if unimplemented)
2110x4 FCA_CACHED LCA_CACHED SCA_WRITEBACK write-back
212 or LCA_EXCEPTION SCA_EXCEPTION (if unimplemented)
2130x5..D FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION (reserved)
2140xE FCA_EXCEPTION LCA_ISOLATE SCA_ISOLATE isolate
2150xF FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION illegal
216 * Prior to T1020.2?, guard feature not supported, this defaulted to speculative (no _G)
217*/
218#endif /*0*/
219
220
221#if !defined(__ASSEMBLY__) && !defined(_NOCLANGUAGE)
222#ifdef __cplusplus
223extern "C" {
224#endif
225
226/*----------------------------------------------------------------------
227 HAL
228 ----------------------------------------------------------------------*/
229
230/* Constant to be checked in build = (XTHAL_MAJOR_REV<<16)|XTHAL_MINOR_REV */
231extern const unsigned int Xthal_rev_no;
232
233
234/*----------------------------------------------------------------------
235 Processor State
236 ----------------------------------------------------------------------*/
237/* save & restore the extra processor state */
238extern void xthal_save_extra(void *base);
239extern void xthal_restore_extra(void *base);
240
241extern void xthal_save_cpregs(void *base, int);
242extern void xthal_restore_cpregs(void *base, int);
243
244/*extern void xthal_save_all_extra(void *base);*/
245/*extern void xthal_restore_all_extra(void *base);*/
246
247/* space for processor state */
248extern const unsigned int Xthal_extra_size;
249extern const unsigned int Xthal_extra_align;
250/* space for TIE register files */
251extern const unsigned int Xthal_cpregs_size[XTHAL_MAX_CPS];
252extern const unsigned int Xthal_cpregs_align[XTHAL_MAX_CPS];
253
254/* total of space for the processor state (for Tor2) */
255extern const unsigned int Xthal_all_extra_size;
256extern const unsigned int Xthal_all_extra_align;
257
258/* initialize the extra processor */
259/*extern void xthal_init_extra(void);*/
260/* initialize the TIE coprocessor */
261/*extern void xthal_init_cp(int);*/
262
263/* initialize the extra processor */
264extern void xthal_init_mem_extra(void *);
265/* initialize the TIE coprocessor */
266extern void xthal_init_mem_cp(void *, int);
267
268/* validate & invalidate the TIE register file */
269extern void xthal_validate_cp(int);
270extern void xthal_invalidate_cp(int);
271
272/* the number of TIE coprocessors contiguous from zero (for Tor2) */
273extern const unsigned int Xthal_num_coprocessors;
274
275/* actual number of coprocessors */
276extern const unsigned char Xthal_cp_num;
277/* index of highest numbered coprocessor, plus one */
278extern const unsigned char Xthal_cp_max;
279/* index of highest allowed coprocessor number, per cfg, plus one */
280/*extern const unsigned char Xthal_cp_maxcfg;*/
281/* bitmask of which coprocessors are present */
282extern const unsigned int Xthal_cp_mask;
283
284/* read and write cpenable register */
285extern void xthal_set_cpenable(unsigned);
286extern unsigned xthal_get_cpenable(void);
287
288/* read & write extra state register */
289/*extern int xthal_read_extra(void *base, unsigned reg, unsigned *value);*/
290/*extern int xthal_write_extra(void *base, unsigned reg, unsigned value);*/
291
292/* read & write a TIE coprocessor register */
293/*extern int xthal_read_cpreg(void *base, int cp, unsigned reg, unsigned *value);*/
294/*extern int xthal_write_cpreg(void *base, int cp, unsigned reg, unsigned value);*/
295
296/* return coprocessor number based on register */
297/*extern int xthal_which_cp(unsigned reg);*/
298
299/*----------------------------------------------------------------------
300 Interrupts
301 ----------------------------------------------------------------------*/
302
303/* the number of interrupt levels */
304extern const unsigned char Xthal_num_intlevels;
305/* the number of interrupts */
306extern const unsigned char Xthal_num_interrupts;
307
308/* mask for level of interrupts */
309extern const unsigned int Xthal_intlevel_mask[XTHAL_MAX_INTLEVELS];
310/* mask for level 0 to N interrupts */
311extern const unsigned int Xthal_intlevel_andbelow_mask[XTHAL_MAX_INTLEVELS];
312
313/* level of each interrupt */
314extern const unsigned char Xthal_intlevel[XTHAL_MAX_INTERRUPTS];
315
316/* type per interrupt */
317extern const unsigned char Xthal_inttype[XTHAL_MAX_INTERRUPTS];
318
319/* masks of each type of interrupt */
320extern const unsigned int Xthal_inttype_mask[XTHAL_MAX_INTTYPES];
321
322/* interrupt numbers assigned to each timer interrupt */
323extern const int Xthal_timer_interrupt[XTHAL_MAX_TIMERS];
324
325/*** Virtual interrupt prioritization: ***/
326
327/* Convert between interrupt levels (as per PS.INTLEVEL) and virtual interrupt priorities: */
328extern unsigned xthal_vpri_to_intlevel(unsigned vpri);
329extern unsigned xthal_intlevel_to_vpri(unsigned intlevel);
330
331/* Enables/disables given set (mask) of interrupts; returns previous enabled-mask of all ints: */
332extern unsigned xthal_int_enable(unsigned);
333extern unsigned xthal_int_disable(unsigned);
334
335/* Set/get virtual priority of an interrupt: */
336extern int xthal_set_int_vpri(int intnum, int vpri);
337extern int xthal_get_int_vpri(int intnum);
338
339/* Set/get interrupt lockout level for exclusive access to virtual priority data structures: */
340extern void xthal_set_vpri_locklevel(unsigned intlevel);
341extern unsigned xthal_get_vpri_locklevel(void);
342
343/* Set/get current virtual interrupt priority: */
344extern unsigned xthal_set_vpri(unsigned vpri);
345extern unsigned xthal_get_vpri(unsigned vpri);
346extern unsigned xthal_set_vpri_intlevel(unsigned intlevel);
347extern unsigned xthal_set_vpri_lock(void);
348
349
350
351/*----------------------------------------------------------------------
352 Generic Interrupt Trampolining Support
353 ----------------------------------------------------------------------*/
354
355typedef void (XtHalVoidFunc)(void);
356
357/*
358 * Bitmask of interrupts currently trampolining down:
359 */
360extern unsigned Xthal_tram_pending;
361
362/*
363 * Bitmask of which interrupts currently trampolining down
364 * synchronously are actually enabled; this bitmask is necessary
365 * because INTENABLE cannot hold that state (sync-trampolining
366 * interrupts must be kept disabled while trampolining);
367 * in the current implementation, any bit set here is not set
368 * in INTENABLE, and vice-versa; once a sync-trampoline is
369 * handled (at level one), its enable bit must be moved from
370 * here to INTENABLE:
371 */
372extern unsigned Xthal_tram_enabled;
373
374/*
375 * Bitmask of interrupts configured for sync trampolining:
376 */
377extern unsigned Xthal_tram_sync;
378
379
380/* Trampoline support functions: */
381extern unsigned xthal_tram_pending_to_service( void );
382extern void xthal_tram_done( unsigned serviced_mask );
383extern int xthal_tram_set_sync( int intnum, int sync );
384extern XtHalVoidFunc* xthal_set_tram_trigger_func( XtHalVoidFunc *trigger_fn );
385
386/* INTENABLE,INTREAD,INTSET,INTCLEAR register access functions: */
387extern unsigned xthal_get_intenable( void );
388extern void xthal_set_intenable( unsigned );
389extern unsigned xthal_get_intread( void );
390extern void xthal_set_intset( unsigned );
391extern void xthal_set_intclear( unsigned );
392
393
394/*----------------------------------------------------------------------
395 Register Windows
396 ----------------------------------------------------------------------*/
397
398/* number of registers in register window */
399extern const unsigned int Xthal_num_aregs;
400extern const unsigned char Xthal_num_aregs_log2;
401
402/* This spill any live register windows (other than the caller's): */
403extern void xthal_window_spill( void );
404
405
406/*----------------------------------------------------------------------
407 Cache
408 ----------------------------------------------------------------------*/
409
410/* size of the cache lines in log2(bytes) */
411extern const unsigned char Xthal_icache_linewidth;
412extern const unsigned char Xthal_dcache_linewidth;
413/* size of the cache lines in bytes */
414extern const unsigned short Xthal_icache_linesize;
415extern const unsigned short Xthal_dcache_linesize;
416/* number of cache sets in log2(lines per way) */
417extern const unsigned char Xthal_icache_setwidth;
418extern const unsigned char Xthal_dcache_setwidth;
419/* cache set associativity (number of ways) */
420extern const unsigned int Xthal_icache_ways;
421extern const unsigned int Xthal_dcache_ways;
422/* size of the caches in bytes (ways * 2^(linewidth + setwidth)) */
423extern const unsigned int Xthal_icache_size;
424extern const unsigned int Xthal_dcache_size;
425/* cache features */
426extern const unsigned char Xthal_dcache_is_writeback;
427extern const unsigned char Xthal_icache_line_lockable;
428extern const unsigned char Xthal_dcache_line_lockable;
429
430/* cache attribute register control (used by other HAL routines) */
431extern unsigned xthal_get_cacheattr( void );
432extern unsigned xthal_get_icacheattr( void );
433extern unsigned xthal_get_dcacheattr( void );
434extern void xthal_set_cacheattr( unsigned );
435extern void xthal_set_icacheattr( unsigned );
436extern void xthal_set_dcacheattr( unsigned );
437
438/* initialize cache support (must be called once at startup, before all other cache calls) */
439/*extern void xthal_cache_startinit( void );*/
440/* reset caches */
441/*extern void xthal_icache_reset( void );*/
442/*extern void xthal_dcache_reset( void );*/
443/* enable caches */
444extern void xthal_icache_enable( void ); /* DEPRECATED */
445extern void xthal_dcache_enable( void ); /* DEPRECATED */
446/* disable caches */
447extern void xthal_icache_disable( void ); /* DEPRECATED */
448extern void xthal_dcache_disable( void ); /* DEPRECATED */
449
450/* invalidate the caches */
451extern void xthal_icache_all_invalidate( void );
452extern void xthal_dcache_all_invalidate( void );
453extern void xthal_icache_region_invalidate( void *addr, unsigned size );
454extern void xthal_dcache_region_invalidate( void *addr, unsigned size );
455extern void xthal_icache_line_invalidate(void *addr);
456extern void xthal_dcache_line_invalidate(void *addr);
457/* write dirty data back */
458extern void xthal_dcache_all_writeback( void );
459extern void xthal_dcache_region_writeback( void *addr, unsigned size );
460extern void xthal_dcache_line_writeback(void *addr);
461/* write dirty data back and invalidate */
462extern void xthal_dcache_all_writeback_inv( void );
463extern void xthal_dcache_region_writeback_inv( void *addr, unsigned size );
464extern void xthal_dcache_line_writeback_inv(void *addr);
465/* prefetch and lock specified memory range into cache */
466extern void xthal_icache_region_lock( void *addr, unsigned size );
467extern void xthal_dcache_region_lock( void *addr, unsigned size );
468extern void xthal_icache_line_lock(void *addr);
469extern void xthal_dcache_line_lock(void *addr);
470/* unlock from cache */
471extern void xthal_icache_all_unlock( void );
472extern void xthal_dcache_all_unlock( void );
473extern void xthal_icache_region_unlock( void *addr, unsigned size );
474extern void xthal_dcache_region_unlock( void *addr, unsigned size );
475extern void xthal_icache_line_unlock(void *addr);
476extern void xthal_dcache_line_unlock(void *addr);
477
478
479/* sync icache and memory */
480extern void xthal_icache_sync( void );
481/* sync dcache and memory */
482extern void xthal_dcache_sync( void );
483
484/*----------------------------------------------------------------------
485 Debug
486 ----------------------------------------------------------------------*/
487
488/* 1 if debug option configured, 0 if not: */
489extern const int Xthal_debug_configured;
490
491/* Number of instruction and data break registers: */
492extern const int Xthal_num_ibreak;
493extern const int Xthal_num_dbreak;
494
495/* Set (plant) and remove software breakpoint, both synchronizing cache: */
496extern unsigned int xthal_set_soft_break(void *addr);
497extern void xthal_remove_soft_break(void *addr, unsigned int);
498
499
500/*----------------------------------------------------------------------
501 Disassembler
502 ----------------------------------------------------------------------*/
503
504/* Max expected size of the return buffer for a disassembled instruction (hint only): */
505#define XTHAL_DISASM_BUFSIZE 80
506
507/* Disassembly option bits for selecting what to return: */
508#define XTHAL_DISASM_OPT_ADDR 0x0001 /* display address */
509#define XTHAL_DISASM_OPT_OPHEX 0x0002 /* display opcode bytes in hex */
510#define XTHAL_DISASM_OPT_OPCODE 0x0004 /* display opcode name (mnemonic) */
511#define XTHAL_DISASM_OPT_PARMS 0x0008 /* display parameters */
512#define XTHAL_DISASM_OPT_ALL 0x0FFF /* display everything */
513
514/* routine to get a string for the disassembled instruction */
515extern int xthal_disassemble( unsigned char *instr_buf, void *tgt_addr,
516 char *buffer, unsigned buflen, unsigned options );
517
518/* routine to get the size of the next instruction. Returns 0 for
519 illegal instruction */
520extern int xthal_disassemble_size( unsigned char *instr_buf );
521
522
523/*----------------------------------------------------------------------
524 Core Counter
525 ----------------------------------------------------------------------*/
526
527/* counter info */
528extern const unsigned char Xthal_have_ccount; /* set if CCOUNT register present */
529extern const unsigned char Xthal_num_ccompare; /* number of CCOMPAREn registers */
530
531/* get CCOUNT register (if not present return 0) */
532extern unsigned xthal_get_ccount(void);
533
534/* set and get CCOMPAREn registers (if not present, get returns 0) */
535extern void xthal_set_ccompare(int, unsigned);
536extern unsigned xthal_get_ccompare(int);
537
538
539/*----------------------------------------------------------------------
540 Instruction/Data RAM/ROM Access
541 ----------------------------------------------------------------------*/
542
543extern void* xthal_memcpy(void *dst, const void *src, unsigned len);
544extern void* xthal_bcopy(const void *src, void *dst, unsigned len);
545
546/*----------------------------------------------------------------------
547 MP Synchronization
548 ----------------------------------------------------------------------*/
549extern int xthal_compare_and_set( int *addr, int test_val, int compare_val );
550extern unsigned xthal_get_prid( void );
551
552/*extern const char Xthal_have_s32c1i;*/
553extern const unsigned char Xthal_have_prid;
554
555
556/*----------------------------------------------------------------------
557 Miscellaneous
558 ----------------------------------------------------------------------*/
559
560extern const unsigned int Xthal_release_major;
561extern const unsigned int Xthal_release_minor;
562extern const char * const Xthal_release_name;
563extern const char * const Xthal_release_internal;
564
565extern const unsigned char Xthal_memory_order;
566extern const unsigned char Xthal_have_windowed;
567extern const unsigned char Xthal_have_density;
568extern const unsigned char Xthal_have_booleans;
569extern const unsigned char Xthal_have_loops;
570extern const unsigned char Xthal_have_nsa;
571extern const unsigned char Xthal_have_minmax;
572extern const unsigned char Xthal_have_sext;
573extern const unsigned char Xthal_have_clamps;
574extern const unsigned char Xthal_have_mac16;
575extern const unsigned char Xthal_have_mul16;
576extern const unsigned char Xthal_have_fp;
577extern const unsigned char Xthal_have_speculation;
578extern const unsigned char Xthal_have_exceptions;
579extern const unsigned char Xthal_xea_version;
580extern const unsigned char Xthal_have_interrupts;
581extern const unsigned char Xthal_have_highlevel_interrupts;
582extern const unsigned char Xthal_have_nmi;
583
584extern const unsigned short Xthal_num_writebuffer_entries;
585
586extern const unsigned int Xthal_build_unique_id;
587/* Release info for hardware targeted by software upgrades: */
588extern const unsigned int Xthal_hw_configid0;
589extern const unsigned int Xthal_hw_configid1;
590extern const unsigned int Xthal_hw_release_major;
591extern const unsigned int Xthal_hw_release_minor;
592extern const char * const Xthal_hw_release_name;
593extern const char * const Xthal_hw_release_internal;
594
595
596/* Internal memories... */
597
598extern const unsigned char Xthal_num_instrom;
599extern const unsigned char Xthal_num_instram;
600extern const unsigned char Xthal_num_datarom;
601extern const unsigned char Xthal_num_dataram;
602extern const unsigned char Xthal_num_xlmi;
603extern const unsigned int Xthal_instrom_vaddr[1];
604extern const unsigned int Xthal_instrom_paddr[1];
605extern const unsigned int Xthal_instrom_size [1];
606extern const unsigned int Xthal_instram_vaddr[1];
607extern const unsigned int Xthal_instram_paddr[1];
608extern const unsigned int Xthal_instram_size [1];
609extern const unsigned int Xthal_datarom_vaddr[1];
610extern const unsigned int Xthal_datarom_paddr[1];
611extern const unsigned int Xthal_datarom_size [1];
612extern const unsigned int Xthal_dataram_vaddr[1];
613extern const unsigned int Xthal_dataram_paddr[1];
614extern const unsigned int Xthal_dataram_size [1];
615extern const unsigned int Xthal_xlmi_vaddr[1];
616extern const unsigned int Xthal_xlmi_paddr[1];
617extern const unsigned int Xthal_xlmi_size [1];
618
619
620
621/*----------------------------------------------------------------------
622 Memory Management Unit
623 ----------------------------------------------------------------------*/
624
625extern const unsigned char Xthal_have_spanning_way;
626extern const unsigned char Xthal_have_identity_map;
627extern const unsigned char Xthal_have_mimic_cacheattr;
628extern const unsigned char Xthal_have_xlt_cacheattr;
629extern const unsigned char Xthal_have_cacheattr;
630extern const unsigned char Xthal_have_tlbs;
631
632extern const unsigned char Xthal_mmu_asid_bits; /* 0 .. 8 */
633extern const unsigned char Xthal_mmu_asid_kernel;
634extern const unsigned char Xthal_mmu_rings; /* 1 .. 4 (perhaps 0 if no MMU and/or no protection?) */
635extern const unsigned char Xthal_mmu_ring_bits;
636extern const unsigned char Xthal_mmu_sr_bits;
637extern const unsigned char Xthal_mmu_ca_bits;
638extern const unsigned int Xthal_mmu_max_pte_page_size;
639extern const unsigned int Xthal_mmu_min_pte_page_size;
640
641extern const unsigned char Xthal_itlb_way_bits;
642extern const unsigned char Xthal_itlb_ways;
643extern const unsigned char Xthal_itlb_arf_ways;
644extern const unsigned char Xthal_dtlb_way_bits;
645extern const unsigned char Xthal_dtlb_ways;
646extern const unsigned char Xthal_dtlb_arf_ways;
647
648/* Convert between virtual and physical addresses (through static maps only): */
649/*** WARNING: these two functions may go away in a future release; don't depend on them! ***/
650extern int xthal_static_v2p( unsigned vaddr, unsigned *paddrp );
651extern int xthal_static_p2v( unsigned paddr, unsigned *vaddrp, unsigned cached );
652
653#if 0
654/******************* EXPERIMENTAL AND TENTATIVE ONLY ********************/
655
656#define XTHAL_MMU_PAGESZ_COUNT_MAX 8 /* maximum number of different page sizes */
657extern const char Xthal_mmu_pagesz_count; /* 0 .. 8 number of different page sizes configured */
658
659/* Note: the following table doesn't necessarily have page sizes in increasing order: */
660extern const char Xthal_mmu_pagesz_log2[XTHAL_MMU_PAGESZ_COUNT_MAX]; /* 10 .. 28 (0 past count) */
661
662/* Sorted (increasing) table of page sizes, that indexes into the above table: */
663extern const char Xthal_mmu_pagesz_sorted[XTHAL_MMU_PAGESZ_COUNT_MAX]; /* 0 .. 7 (0 past count) */
664
665/*u32 Xthal_virtual_exceptions;*/ /* bitmask of which exceptions execute in virtual mode... */
666
667extern const char Xthal_mmu_pte_pagesz_log2_min; /* ?? minimum page size in PTEs */
668extern const char Xthal_mmu_pte_pagesz_log2_max; /* ?? maximum page size in PTEs */
669
670/* Cache Attribute Bits Implemented by the Cache (part of the cache abstraction) */
671extern const char Xthal_icache_fca_bits_implemented; /* ITLB/UTLB only! */
672extern const char Xthal_dcache_lca_bits_implemented; /* DTLB/UTLB only! */
673extern const char Xthal_dcache_sca_bits_implemented; /* DTLB/UTLB only! */
674
675/* Per TLB Parameters (Instruction, Data, Unified) */
676struct XtHalMmuTlb Xthal_itlb; /* description of MMU I-TLB generic features */
677struct XtHalMmuTlb Xthal_dtlb; /* description of MMU D-TLB generic features */
678struct XtHalMmuTlb Xthal_utlb; /* description of MMU U-TLB generic features */
679
680#define XTHAL_MMU_WAYS_MAX 8 /* maximum number of ways (associativities) for each TLB */
681
682/* Structure for common information described for each possible TLB (instruction, data and unified): */
683typedef struct XtHalMmuTlb {
684 u8 va_bits; /* 32 (number of virtual address bits) */
685 u8 pa_bits; /* 32 (number of physical address bits) */
686 bool tlb_va_indexed; /* 1 (set if TLB is indexed by virtual address) */
687 bool tlb_va_tagged; /* 0 (set if TLB is tagged by virtual address) */
688 bool cache_va_indexed; /* 1 (set if cache is indexed by virtual address) */
689 bool cache_va_tagged; /* 0 (set if cache is tagged by virtual address) */
690 /*bool (whether page tables are traversed in vaddr sorted order, paddr sorted order, ...) */
691 /*u8 (set of available page attribute bits, other than cache attribute bits defined above) */
692 /*u32 (various masks for pages, MMU table/TLB entries, etc.) */
693 u8 way_count; /* 0 .. 8 (number of ways, a.k.a. associativities, for this TLB) */
694 XtHalMmuTlbWay * ways[XTHAL_MMU_WAYS_MAX]; /* pointers to per-way parms for each way */
695} XtHalMmuTlb;
696
697/* Per TLB Way (Per Associativity) Parameters */
698typedef struct XtHalMmuTlbWay {
699 u32 index_count_log2; /* 0 .. 4 */
700 u32 pagesz_mask; /* 0 .. 2^pagesz_count - 1 (each bit corresponds to a size */
701 /* defined in the Xthal_mmu_pagesz_log2[] table) */
702 u32 vpn_const_mask;
703 u32 vpn_const_value;
704 u64 ppn_const_mask; /* future may support pa_bits > 32 */
705 u64 ppn_const_value;
706 u32 ppn_id_mask; /* paddr bits taken directly from vaddr */
707 bool backgnd_match; /* 0 or 1 */
708 /* These are defined in terms of the XTHAL_CACHE_xxx bits: */
709 u8 fca_const_mask; /* ITLB/UTLB only! */
710 u8 fca_const_value; /* ITLB/UTLB only! */
711 u8 lca_const_mask; /* DTLB/UTLB only! */
712 u8 lca_const_value; /* DTLB/UTLB only! */
713 u8 sca_const_mask; /* DTLB/UTLB only! */
714 u8 sca_const_value; /* DTLB/UTLB only! */
715 /* These define an encoding that map 5 bits in TLB and PTE entries to */
716 /* 8 bits (FCA, ITLB), 16 bits (LCA+SCA, DTLB) or 24 bits (FCA+LCA+SCA, UTLB): */
717 /* (they may be moved to struct XtHalMmuTlb) */
718 u8 ca_bits; /* number of bits in TLB/PTE entries for cache attributes */
719 u32 * ca_map; /* pointer to array of 2^ca_bits entries of FCA+LCA+SCA bits */
720} XtHalMmuTlbWay;
721
722/*
723 * The way to determine whether protection support is present in core
724 * is to [look at Xthal_mmu_rings ???].
725 * Give info on memory requirements for MMU tables and other in-memory
726 * data structures (globally, per task, base and per page, etc.) - whatever bounds can be calculated.
727 */
728
729
730/* Default vectors: */
731xthal_immu_fetch_miss_vector
732xthal_dmmu_load_miss_vector
733xthal_dmmu_store_miss_vector
734
735/* Functions called when a fault is detected: */
736typedef void (XtHalMmuFaultFunc)( unsigned vaddr, ...context... );
737/* Or, */
738/* a? = vaddr */
739/* a? = context... */
740/* PS.xxx = xxx */
741XtHalMMuFaultFunc *Xthal_immu_fetch_fault_func;
742XtHalMMuFaultFunc *Xthal_dmmu_load_fault_func;
743XtHalMMuFaultFunc *Xthal_dmmu_store_fault_func;
744
745/* Default Handlers: */
746/* The user and/or kernel exception handlers may jump to these handlers to handle the relevant exceptions,
747 * according to the value of EXCCAUSE. The exact register state on entry to these handlers is TBD. */
748/* When multiple TLB entries match (hit) on the same access: */
749xthal_immu_fetch_multihit_handler
750xthal_dmmu_load_multihit_handler
751xthal_dmmu_store_multihit_handler
752/* Protection violations according to cache attributes, and other cache attribute mismatches: */
753xthal_immu_fetch_attr_handler
754xthal_dmmu_load_attr_handler
755xthal_dmmu_store_attr_handler
756/* Protection violations due to insufficient ring level: */
757xthal_immu_fetch_priv_handler
758xthal_dmmu_load_priv_handler
759xthal_dmmu_store_priv_handler
760/* Alignment exception handlers (if supported by the particular Xtensa MMU configuration): */
761xthal_dmmu_load_align_handler
762xthal_dmmu_store_align_handler
763
764/* Or, alternatively, the OS user and/or kernel exception handlers may simply jump to the
765 * following entry points which will handle any values of EXCCAUSE not handled by the OS: */
766xthal_user_exc_default_handler
767xthal_kernel_exc_default_handler
768
769#endif /*0*/
770
771#ifdef INCLUDE_DEPRECATED_HAL_CODE
772extern const unsigned char Xthal_have_old_exc_arch;
773extern const unsigned char Xthal_have_mmu;
774extern const unsigned int Xthal_num_regs;
775extern const unsigned char Xthal_num_iroms;
776extern const unsigned char Xthal_num_irams;
777extern const unsigned char Xthal_num_droms;
778extern const unsigned char Xthal_num_drams;
779extern const unsigned int Xthal_configid0;
780extern const unsigned int Xthal_configid1;
781#endif
782
783#ifdef INCLUDE_DEPRECATED_HAL_DEBUG_CODE
784#define XTHAL_24_BIT_BREAK 0x80000000
785#define XTHAL_16_BIT_BREAK 0x40000000
786extern const unsigned short Xthal_ill_inst_16[16];
787#define XTHAL_DEST_REG 0xf0000000 /* Mask for destination register */
788#define XTHAL_DEST_REG_INST 0x08000000 /* Branch address is in register */
789#define XTHAL_DEST_REL_INST 0x04000000 /* Branch address is relative */
790#define XTHAL_RFW_INST 0x00000800
791#define XTHAL_RFUE_INST 0x00000400
792#define XTHAL_RFI_INST 0x00000200
793#define XTHAL_RFE_INST 0x00000100
794#define XTHAL_RET_INST 0x00000080
795#define XTHAL_BREAK_INST 0x00000040
796#define XTHAL_SYSCALL_INST 0x00000020
797#define XTHAL_LOOP_END 0x00000010 /* Not set by xthal_inst_type */
798#define XTHAL_JUMP_INST 0x00000008 /* Call or jump instruction */
799#define XTHAL_BRANCH_INST 0x00000004 /* Branch instruction */
800#define XTHAL_24_BIT_INST 0x00000002
801#define XTHAL_16_BIT_INST 0x00000001
802typedef struct xthal_state {
803 unsigned pc;
804 unsigned ar[16];
805 unsigned lbeg;
806 unsigned lend;
807 unsigned lcount;
808 unsigned extra_ptr;
809 unsigned cpregs_ptr[XTHAL_MAX_CPS];
810} XTHAL_STATE;
811extern unsigned int xthal_inst_type(void *addr);
812extern unsigned int xthal_branch_addr(void *addr);
813extern unsigned int xthal_get_npc(XTHAL_STATE *user_state);
814#endif /* INCLUDE_DEPRECATED_HAL_DEBUG_CODE */
815
816#ifdef __cplusplus
817}
818#endif
819#endif /*!__ASSEMBLY__ */
820
821#endif /*XTENSA_HAL_H*/
822
diff --git a/include/asm-xtensa/xtensa/simcall.h b/include/asm-xtensa/xtensa/simcall.h
deleted file mode 100644
index a2b868929a49..000000000000
--- a/include/asm-xtensa/xtensa/simcall.h
+++ /dev/null
@@ -1,130 +0,0 @@
1#ifndef SIMCALL_INCLUDED
2#define SIMCALL_INCLUDED
3
4/*
5 * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
6 *
7 * include/asm-xtensa/xtensa/simcall.h - Simulator call numbers
8 *
9 * This file is subject to the terms and conditions of the GNU General
10 * Public License. See the file "COPYING" in the main directory of
11 * this archive for more details.
12 *
13 * Copyright (C) 2002 Tensilica Inc.
14 */
15
16
17/*
18 * System call like services offered by the simulator host.
19 * These are modeled after the Linux 2.4 kernel system calls
20 * for Xtensa processors. However not all system calls and
21 * not all functionality of a given system call are implemented,
22 * or necessarily have well defined or equivalent semantics in
23 * the context of a simulation (as opposed to a Unix kernel).
24 *
25 * These services behave largely as if they had been invoked
26 * as a task in the simulator host's operating system
27 * (eg. files accessed are those of the simulator host).
28 * However, these SIMCALLs model a virtual operating system
29 * so that various definitions, bit assignments etc
30 * (eg. open mode bits, errno values, etc) are independent
31 * of the host operating system used to run the simulation.
32 * Rather these definitions are specific to the Xtensa ISS.
33 * This way Xtensa ISA code written to use these SIMCALLs
34 * can (in principle) be simulated on any host.
35 *
36 * Up to 6 parameters are passed in registers a3 to a8
37 * (note the 6th parameter isn't passed on the stack,
38 * unlike windowed function calling conventions).
39 * The return value is in a2. A negative value in the
40 * range -4096 to -1 indicates a negated error code to be
41 * reported in errno with a return value of -1, otherwise
42 * the value in a2 is returned as is.
43 */
44
45/* These #defines need to match what's in Xtensa/OS/vxworks/xtiss/simcalls.c */
46
47#define SYS_nop 0 /* n/a - setup; used to flush register windows */
48#define SYS_exit 1 /*x*/
49#define SYS_fork 2
50#define SYS_read 3 /*x*/
51#define SYS_write 4 /*x*/
52#define SYS_open 5 /*x*/
53#define SYS_close 6 /*x*/
54#define SYS_rename 7 /*x 38 - waitpid */
55#define SYS_creat 8 /*x*/
56#define SYS_link 9 /*x (not implemented on WIN32) */
57#define SYS_unlink 10 /*x*/
58#define SYS_execv 11 /* n/a - execve */
59#define SYS_execve 12 /* 11 - chdir */
60#define SYS_pipe 13 /* 42 - time */
61#define SYS_stat 14 /* 106 - mknod */
62#define SYS_chmod 15
63#define SYS_chown 16 /* 202 - lchown */
64#define SYS_utime 17 /* 30 - break */
65#define SYS_wait 18 /* n/a - oldstat */
66#define SYS_lseek 19 /*x*/
67#define SYS_getpid 20
68#define SYS_isatty 21 /* n/a - mount */
69#define SYS_fstat 22 /* 108 - oldumount */
70#define SYS_time 23 /* 13 - setuid */
71#define SYS_gettimeofday 24 /*x 78 - getuid (not implemented on WIN32) */
72#define SYS_times 25 /*X 43 - stime (Xtensa-specific implementation) */
73#define SYS_socket 26
74#define SYS_sendto 27
75#define SYS_recvfrom 28
76#define SYS_select_one 29 /* not compitible select, one file descriptor at the time */
77#define SYS_bind 30
78#define SYS_ioctl 31
79
80/*
81 * Other...
82 */
83#define SYS_iss_argc 1000 /* returns value of argc */
84#define SYS_iss_argv_size 1001 /* bytes needed for argv & arg strings */
85#define SYS_iss_set_argv 1002 /* saves argv & arg strings at given addr */
86
87/*
88 * SIMCALLs for the ferret memory debugger. All are invoked by
89 * libferret.a ... ( Xtensa/Target-Libs/ferret )
90 */
91#define SYS_ferret 1010
92#define SYS_malloc 1011
93#define SYS_free 1012
94#define SYS_more_heap 1013
95#define SYS_no_heap 1014
96
97
98/*
99 * Extra SIMCALLs for GDB:
100 */
101#define SYS_gdb_break -1 /* invoked by XTOS on user exceptions if EPC points
102 to a break.n/break, regardless of cause! */
103#define SYS_xmon_out -2 /* invoked by XMON: ... */
104#define SYS_xmon_in -3 /* invoked by XMON: ... */
105#define SYS_xmon_flush -4 /* invoked by XMON: ... */
106#define SYS_gdb_abort -5 /* invoked by XTOS in _xtos_panic() */
107#define SYS_gdb_illegal_inst -6 /* invoked by XTOS for illegal instructions (too deeply) */
108#define SYS_xmon_init -7 /* invoked by XMON: ... */
109#define SYS_gdb_enter_sktloop -8 /* invoked by XTOS on debug exceptions */
110
111/*
112 * SIMCALLs for vxWorks xtiss BSP:
113 */
114#define SYS_setup_ppp_pipes -83
115#define SYS_log_msg -84
116
117/*
118 * Test SIMCALLs:
119 */
120#define SYS_test_write_state -100
121#define SYS_test_read_state -101
122
123/*
124 * SYS_select_one specifiers
125 */
126#define XTISS_SELECT_ONE_READ 1
127#define XTISS_SELECT_ONE_WRITE 2
128#define XTISS_SELECT_ONE_EXCEPT 3
129
130#endif /* !SIMCALL_INCLUDED */
diff --git a/include/asm-xtensa/xtensa/xt2000-uart.h b/include/asm-xtensa/xtensa/xt2000-uart.h
deleted file mode 100644
index 0154460f0ed8..000000000000
--- a/include/asm-xtensa/xtensa/xt2000-uart.h
+++ /dev/null
@@ -1,155 +0,0 @@
1#ifndef _uart_h_included_
2#define _uart_h_included_
3
4/*
5 * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
6 *
7 * include/asm-xtensa/xtensa/xt2000-uart.h -- NatSemi PC16552D DUART
8 * definitions
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 *
14 * Copyright (C) 2002 Tensilica Inc.
15 */
16
17
18#include <xtensa/xt2000.h>
19
20
21/* 16550 UART DEVICE REGISTERS
22 The XT2000 board aligns each register to a 32-bit word but the UART device only uses
23 one byte of the word, which is the least-significant byte regardless of the
24 endianness of the core (ie. byte offset 0 for little-endian and 3 for big-endian).
25 So if using word accesses then endianness doesn't matter.
26 The macros provided here do that.
27*/
28struct uart_dev_s {
29 union {
30 unsigned int rxb; /* DLAB=0: receive buffer, read-only */
31 unsigned int txb; /* DLAB=0: transmit buffer, write-only */
32 unsigned int dll; /* DLAB=1: divisor, least-significant byte latch (was write-only?) */
33 } w0;
34 union {
35 unsigned int ier; /* DLAB=0: interrupt-enable register (was write-only?) */
36 unsigned int dlm; /* DLAB=1: divisor, most-significant byte latch (was write-only?) */
37 } w1;
38
39 union {
40 unsigned int isr; /* DLAB=0: interrupt status register, read-only */
41 unsigned int fcr; /* DLAB=0: FIFO control register, write-only */
42 unsigned int afr; /* DLAB=1: alternate function register */
43 } w2;
44
45 unsigned int lcr; /* line control-register, write-only */
46 unsigned int mcr; /* modem control-regsiter, write-only */
47 unsigned int lsr; /* line status register, read-only */
48 unsigned int msr; /* modem status register, read-only */
49 unsigned int scr; /* scratch regsiter, read/write */
50};
51
52#define _RXB(u) ((u)->w0.rxb)
53#define _TXB(u) ((u)->w0.txb)
54#define _DLL(u) ((u)->w0.dll)
55#define _IER(u) ((u)->w1.ier)
56#define _DLM(u) ((u)->w1.dlm)
57#define _ISR(u) ((u)->w2.isr)
58#define _FCR(u) ((u)->w2.fcr)
59#define _AFR(u) ((u)->w2.afr)
60#define _LCR(u) ((u)->lcr)
61#define _MCR(u) ((u)->mcr)
62#define _LSR(u) ((u)->lsr)
63#define _MSR(u) ((u)->msr)
64#define _SCR(u) ((u)->scr)
65
66typedef volatile struct uart_dev_s uart_dev_t;
67
68/* IER bits */
69#define RCVR_DATA_REG_INTENABLE 0x01
70#define XMIT_HOLD_REG_INTENABLE 0x02
71#define RCVR_STATUS_INTENABLE 0x04
72#define MODEM_STATUS_INTENABLE 0x08
73
74/* FCR bits */
75#define _FIFO_ENABLE 0x01
76#define RCVR_FIFO_RESET 0x02
77#define XMIT_FIFO_RESET 0x04
78#define DMA_MODE_SELECT 0x08
79#define RCVR_TRIGGER_LSB 0x40
80#define RCVR_TRIGGER_MSB 0x80
81
82/* AFR bits */
83#define AFR_CONC_WRITE 0x01
84#define AFR_BAUDOUT_SEL 0x02
85#define AFR_RXRDY_SEL 0x04
86
87/* ISR bits */
88#define INT_STATUS(r) ((r)&1)
89#define INT_PRIORITY(r) (((r)>>1)&0x7)
90
91/* LCR bits */
92#define WORD_LENGTH(n) (((n)-5)&0x3)
93#define STOP_BIT_ENABLE 0x04
94#define PARITY_ENABLE 0x08
95#define EVEN_PARITY 0x10
96#define FORCE_PARITY 0x20
97#define XMIT_BREAK 0x40
98#define DLAB_ENABLE 0x80
99
100/* MCR bits */
101#define _DTR 0x01
102#define _RTS 0x02
103#define _OP1 0x04
104#define _OP2 0x08
105#define LOOP_BACK 0x10
106
107/* LSR Bits */
108#define RCVR_DATA_READY 0x01
109#define OVERRUN_ERROR 0x02
110#define PARITY_ERROR 0x04
111#define FRAMING_ERROR 0x08
112#define BREAK_INTERRUPT 0x10
113#define XMIT_HOLD_EMPTY 0x20
114#define XMIT_EMPTY 0x40
115#define FIFO_ERROR 0x80
116#define RCVR_READY(u) (_LSR(u)&RCVR_DATA_READY)
117#define XMIT_READY(u) (_LSR(u)&XMIT_HOLD_EMPTY)
118
119/* MSR bits */
120#define _RDR 0x01
121#define DELTA_DSR 0x02
122#define DELTA_RI 0x04
123#define DELTA_CD 0x08
124#define _CTS 0x10
125#define _DSR 0x20
126#define _RI 0x40
127#define _CD 0x80
128
129/* prototypes */
130void uart_init( uart_dev_t *u, int bitrate );
131void uart_out( uart_dev_t *u, char c );
132void uart_puts( uart_dev_t *u, char *s );
133char uart_in( uart_dev_t *u );
134void uart_enable_rcvr_int( uart_dev_t *u );
135void uart_disable_rcvr_int( uart_dev_t *u );
136
137#ifdef DUART16552_1_VADDR
138/* DUART present. */
139#define DUART_1_BASE (*(uart_dev_t*)DUART16552_1_VADDR)
140#define DUART_2_BASE (*(uart_dev_t*)DUART16552_2_VADDR)
141#define UART1_PUTS(s) uart_puts( &DUART_1_BASE, s )
142#define UART2_PUTS(s) uart_puts( &DUART_2_BASE, s )
143#else
144/* DUART not configured, use dummy placeholders to allow compiles to work. */
145#define DUART_1_BASE (*(uart_dev_t*)0)
146#define DUART_2_BASE (*(uart_dev_t*)0)
147#define UART1_PUTS(s)
148#define UART2_PUTS(s)
149#endif
150
151/* Compute 16-bit divisor for baudrate generator, with rounding: */
152#define DUART_DIVISOR(crystal,speed) (((crystal)/16 + (speed)/2)/(speed))
153
154#endif /*_uart_h_included_*/
155
diff --git a/include/asm-xtensa/xtensa/xt2000.h b/include/asm-xtensa/xtensa/xt2000.h
deleted file mode 100644
index 703a45002f8f..000000000000
--- a/include/asm-xtensa/xtensa/xt2000.h
+++ /dev/null
@@ -1,408 +0,0 @@
1#ifndef _INC_XT2000_H_
2#define _INC_XT2000_H_
3
4/*
5 * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
6 *
7 * include/asm-xtensa/xtensa/xt2000.h - Definitions specific to the
8 * Tensilica XT2000 Emulation Board
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 *
14 * Copyright (C) 2002 Tensilica Inc.
15 */
16
17
18#include <xtensa/config/core.h>
19#include <xtensa/config/system.h>
20
21
22/*
23 * Default assignment of XT2000 devices to external interrupts.
24 */
25
26/* Ethernet interrupt: */
27#ifdef XCHAL_EXTINT3_NUM
28#define SONIC83934_INTNUM XCHAL_EXTINT3_NUM
29#define SONIC83934_INTLEVEL XCHAL_EXTINT3_LEVEL
30#define SONIC83934_INTMASK XCHAL_EXTINT3_MASK
31#else
32#define SONIC83934_INTMASK 0
33#endif
34
35/* DUART channel 1 interrupt (P1 - console): */
36#ifdef XCHAL_EXTINT4_NUM
37#define DUART16552_1_INTNUM XCHAL_EXTINT4_NUM
38#define DUART16552_1_INTLEVEL XCHAL_EXTINT4_LEVEL
39#define DUART16552_1_INTMASK XCHAL_EXTINT4_MASK
40#else
41#define DUART16552_1_INTMASK 0
42#endif
43
44/* DUART channel 2 interrupt (P2 - 2nd serial port): */
45#ifdef XCHAL_EXTINT5_NUM
46#define DUART16552_2_INTNUM XCHAL_EXTINT5_NUM
47#define DUART16552_2_INTLEVEL XCHAL_EXTINT5_LEVEL
48#define DUART16552_2_INTMASK XCHAL_EXTINT5_MASK
49#else
50#define DUART16552_2_INTMASK 0
51#endif
52
53/* FPGA-combined PCI/etc interrupts: */
54#ifdef XCHAL_EXTINT6_NUM
55#define XT2000_FPGAPCI_INTNUM XCHAL_EXTINT6_NUM
56#define XT2000_FPGAPCI_INTLEVEL XCHAL_EXTINT6_LEVEL
57#define XT2000_FPGAPCI_INTMASK XCHAL_EXTINT6_MASK
58#else
59#define XT2000_FPGAPCI_INTMASK 0
60#endif
61
62
63
64/*
65 * Device addresses.
66 *
67 * Note: for endianness-independence, use 32-bit loads and stores for all
68 * register accesses to Ethernet, DUART and LED devices. Undefined bits
69 * may need to be masked out if needed when reading if the actual register
70 * size is smaller than 32 bits.
71 *
72 * Note: XT2000 bus byte lanes are defined in terms of msbyte and lsbyte
73 * relative to the processor. So 32-bit registers are accessed consistently
74 * from both big and little endian processors. However, this means byte
75 * sequences are not consistent between big and little endian processors.
76 * This is fine for RAM, and for ROM if ROM is created for a specific
77 * processor (and thus has correct byte sequences). However this may be
78 * unexpected for Flash, which might contain a file-system that one wants
79 * to use for multiple processor configurations (eg. the Flash might contain
80 * the Ethernet card's address, endianness-independent application data, etc).
81 * That is, byte sequences written in Flash by a core of a given endianness
82 * will be byte-swapped when seen by a core of the other endianness.
83 * Someone implementing an endianness-independent Flash file system will
84 * likely handle this byte-swapping issue in the Flash driver software.
85 */
86
87#define DUART16552_XTAL_FREQ 18432000 /* crystal frequency in Hz */
88#define XTBOARD_FLASH_MAXSIZE 0x4000000 /* 64 MB (max; depends on what is socketed!) */
89#define XTBOARD_EPROM_MAXSIZE 0x0400000 /* 4 MB (max; depends on what is socketed!) */
90#define XTBOARD_EEPROM_MAXSIZE 0x0080000 /* 512 kB (max; depends on what is socketed!) */
91#define XTBOARD_ASRAM_SIZE 0x0100000 /* 1 MB */
92#define XTBOARD_PCI_MEM_SIZE 0x8000000 /* 128 MB (allocated) */
93#define XTBOARD_PCI_IO_SIZE 0x1000000 /* 16 MB (allocated) */
94
95#ifdef XSHAL_IOBLOCK_BYPASS_PADDR
96/* PCI memory space: */
97# define XTBOARD_PCI_MEM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0x0000000)
98/* Socketed Flash (eg. 2 x 16-bit devices): */
99# define XTBOARD_FLASH_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0x8000000)
100/* PCI I/O space: */
101# define XTBOARD_PCI_IO_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xC000000)
102/* V3 PCI interface chip register/config space: */
103# define XTBOARD_V3PCI_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD000000)
104/* Bus Interface registers: */
105# define XTBOARD_BUSINT_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD010000)
106/* FPGA registers: */
107# define XT2000_FPGAREGS_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD020000)
108/* SONIC SN83934 Ethernet controller/transceiver: */
109# define SONIC83934_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD030000)
110/* 8-character bitmapped LED display: */
111# define XTBOARD_LED_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD040000)
112/* National-Semi PC16552D DUART: */
113# define DUART16552_1_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD050020) /* channel 1 (P1 - console) */
114# define DUART16552_2_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD050000) /* channel 2 (P2) */
115/* Asynchronous Static RAM: */
116# define XTBOARD_ASRAM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD400000)
117/* 8-bit EEPROM: */
118# define XTBOARD_EEPROM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD600000)
119/* 2 x 16-bit EPROMs: */
120# define XTBOARD_EPROM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD800000)
121#endif /* XSHAL_IOBLOCK_BYPASS_PADDR */
122
123/* These devices might be accessed cached: */
124#ifdef XSHAL_IOBLOCK_CACHED_PADDR
125# define XTBOARD_PCI_MEM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0x0000000)
126# define XTBOARD_FLASH_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0x8000000)
127# define XTBOARD_ASRAM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0xD400000)
128# define XTBOARD_EEPROM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0xD600000)
129# define XTBOARD_EPROM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0xD800000)
130#endif /* XSHAL_IOBLOCK_CACHED_PADDR */
131
132
133/*** Same thing over again, this time with virtual addresses: ***/
134
135#ifdef XSHAL_IOBLOCK_BYPASS_VADDR
136/* PCI memory space: */
137# define XTBOARD_PCI_MEM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0x0000000)
138/* Socketed Flash (eg. 2 x 16-bit devices): */
139# define XTBOARD_FLASH_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0x8000000)
140/* PCI I/O space: */
141# define XTBOARD_PCI_IO_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xC000000)
142/* V3 PCI interface chip register/config space: */
143# define XTBOARD_V3PCI_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD000000)
144/* Bus Interface registers: */
145# define XTBOARD_BUSINT_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD010000)
146/* FPGA registers: */
147# define XT2000_FPGAREGS_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD020000)
148/* SONIC SN83934 Ethernet controller/transceiver: */
149# define SONIC83934_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD030000)
150/* 8-character bitmapped LED display: */
151# define XTBOARD_LED_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD040000)
152/* National-Semi PC16552D DUART: */
153# define DUART16552_1_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD050020) /* channel 1 (P1 - console) */
154# define DUART16552_2_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD050000) /* channel 2 (P2) */
155/* Asynchronous Static RAM: */
156# define XTBOARD_ASRAM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD400000)
157/* 8-bit EEPROM: */
158# define XTBOARD_EEPROM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD600000)
159/* 2 x 16-bit EPROMs: */
160# define XTBOARD_EPROM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD800000)
161#endif /* XSHAL_IOBLOCK_BYPASS_VADDR */
162
163/* These devices might be accessed cached: */
164#ifdef XSHAL_IOBLOCK_CACHED_VADDR
165# define XTBOARD_PCI_MEM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0x0000000)
166# define XTBOARD_FLASH_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0x8000000)
167# define XTBOARD_ASRAM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0xD400000)
168# define XTBOARD_EEPROM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0xD600000)
169# define XTBOARD_EPROM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0xD800000)
170#endif /* XSHAL_IOBLOCK_CACHED_VADDR */
171
172
173/* System ROM: */
174#define XTBOARD_ROM_SIZE XSHAL_ROM_SIZE
175#ifdef XSHAL_ROM_VADDR
176#define XTBOARD_ROM_VADDR XSHAL_ROM_VADDR
177#endif
178#ifdef XSHAL_ROM_PADDR
179#define XTBOARD_ROM_PADDR XSHAL_ROM_PADDR
180#endif
181
182/* System RAM: */
183#define XTBOARD_RAM_SIZE XSHAL_RAM_SIZE
184#ifdef XSHAL_RAM_VADDR
185#define XTBOARD_RAM_VADDR XSHAL_RAM_VADDR
186#endif
187#ifdef XSHAL_RAM_PADDR
188#define XTBOARD_RAM_PADDR XSHAL_RAM_PADDR
189#endif
190#define XTBOARD_RAM_BYPASS_VADDR XSHAL_RAM_BYPASS_VADDR
191#define XTBOARD_RAM_BYPASS_PADDR XSHAL_RAM_BYPASS_PADDR
192
193
194
195/*
196 * Things that depend on device addresses.
197 */
198
199
200#define XTBOARD_CACHEATTR_WRITEBACK XSHAL_XT2000_CACHEATTR_WRITEBACK
201#define XTBOARD_CACHEATTR_WRITEALLOC XSHAL_XT2000_CACHEATTR_WRITEALLOC
202#define XTBOARD_CACHEATTR_WRITETHRU XSHAL_XT2000_CACHEATTR_WRITETHRU
203#define XTBOARD_CACHEATTR_BYPASS XSHAL_XT2000_CACHEATTR_BYPASS
204#define XTBOARD_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_DEFAULT
205
206#define XTBOARD_BUSINT_PIPE_REGIONS XSHAL_XT2000_PIPE_REGIONS
207#define XTBOARD_BUSINT_SDRAM_REGIONS XSHAL_XT2000_SDRAM_REGIONS
208
209
210
211/*
212 * BusLogic (FPGA) registers.
213 * All these registers are normally accessed using 32-bit loads/stores.
214 */
215
216/* Register offsets: */
217#define XT2000_DATECD_OFS 0x00 /* date code (read-only) */
218#define XT2000_STSREG_OFS 0x04 /* status (read-only) */
219#define XT2000_SYSLED_OFS 0x08 /* system LED */
220#define XT2000_WRPROT_OFS 0x0C /* write protect */
221#define XT2000_SWRST_OFS 0x10 /* software reset */
222#define XT2000_SYSRST_OFS 0x14 /* system (peripherals) reset */
223#define XT2000_IMASK_OFS 0x18 /* interrupt mask */
224#define XT2000_ISTAT_OFS 0x1C /* interrupt status */
225#define XT2000_V3CFG_OFS 0x20 /* V3 config (V320 PCI) */
226
227/* Physical register addresses: */
228#ifdef XT2000_FPGAREGS_PADDR
229#define XT2000_DATECD_PADDR (XT2000_FPGAREGS_PADDR+XT2000_DATECD_OFS)
230#define XT2000_STSREG_PADDR (XT2000_FPGAREGS_PADDR+XT2000_STSREG_OFS)
231#define XT2000_SYSLED_PADDR (XT2000_FPGAREGS_PADDR+XT2000_SYSLED_OFS)
232#define XT2000_WRPROT_PADDR (XT2000_FPGAREGS_PADDR+XT2000_WRPROT_OFS)
233#define XT2000_SWRST_PADDR (XT2000_FPGAREGS_PADDR+XT2000_SWRST_OFS)
234#define XT2000_SYSRST_PADDR (XT2000_FPGAREGS_PADDR+XT2000_SYSRST_OFS)
235#define XT2000_IMASK_PADDR (XT2000_FPGAREGS_PADDR+XT2000_IMASK_OFS)
236#define XT2000_ISTAT_PADDR (XT2000_FPGAREGS_PADDR+XT2000_ISTAT_OFS)
237#define XT2000_V3CFG_PADDR (XT2000_FPGAREGS_PADDR+XT2000_V3CFG_OFS)
238#endif
239
240/* Virtual register addresses: */
241#ifdef XT2000_FPGAREGS_VADDR
242#define XT2000_DATECD_VADDR (XT2000_FPGAREGS_VADDR+XT2000_DATECD_OFS)
243#define XT2000_STSREG_VADDR (XT2000_FPGAREGS_VADDR+XT2000_STSREG_OFS)
244#define XT2000_SYSLED_VADDR (XT2000_FPGAREGS_VADDR+XT2000_SYSLED_OFS)
245#define XT2000_WRPROT_VADDR (XT2000_FPGAREGS_VADDR+XT2000_WRPROT_OFS)
246#define XT2000_SWRST_VADDR (XT2000_FPGAREGS_VADDR+XT2000_SWRST_OFS)
247#define XT2000_SYSRST_VADDR (XT2000_FPGAREGS_VADDR+XT2000_SYSRST_OFS)
248#define XT2000_IMASK_VADDR (XT2000_FPGAREGS_VADDR+XT2000_IMASK_OFS)
249#define XT2000_ISTAT_VADDR (XT2000_FPGAREGS_VADDR+XT2000_ISTAT_OFS)
250#define XT2000_V3CFG_VADDR (XT2000_FPGAREGS_VADDR+XT2000_V3CFG_OFS)
251/* Register access (for C code): */
252#define XT2000_DATECD_REG (*(volatile unsigned*) XT2000_DATECD_VADDR)
253#define XT2000_STSREG_REG (*(volatile unsigned*) XT2000_STSREG_VADDR)
254#define XT2000_SYSLED_REG (*(volatile unsigned*) XT2000_SYSLED_VADDR)
255#define XT2000_WRPROT_REG (*(volatile unsigned*) XT2000_WRPROT_VADDR)
256#define XT2000_SWRST_REG (*(volatile unsigned*) XT2000_SWRST_VADDR)
257#define XT2000_SYSRST_REG (*(volatile unsigned*) XT2000_SYSRST_VADDR)
258#define XT2000_IMASK_REG (*(volatile unsigned*) XT2000_IMASK_VADDR)
259#define XT2000_ISTAT_REG (*(volatile unsigned*) XT2000_ISTAT_VADDR)
260#define XT2000_V3CFG_REG (*(volatile unsigned*) XT2000_V3CFG_VADDR)
261#endif
262
263/* DATECD (date code) bit fields: */
264
265/* BCD-coded month (01..12): */
266#define XT2000_DATECD_MONTH_SHIFT 24
267#define XT2000_DATECD_MONTH_BITS 8
268#define XT2000_DATECD_MONTH_MASK 0xFF000000
269/* BCD-coded day (01..31): */
270#define XT2000_DATECD_DAY_SHIFT 16
271#define XT2000_DATECD_DAY_BITS 8
272#define XT2000_DATECD_DAY_MASK 0x00FF0000
273/* BCD-coded year (2001..9999): */
274#define XT2000_DATECD_YEAR_SHIFT 0
275#define XT2000_DATECD_YEAR_BITS 16
276#define XT2000_DATECD_YEAR_MASK 0x0000FFFF
277
278/* STSREG (status) bit fields: */
279
280/* Switch SW3 setting bit fields (0=off/up, 1=on/down): */
281#define XT2000_STSREG_SW3_SHIFT 0
282#define XT2000_STSREG_SW3_BITS 4
283#define XT2000_STSREG_SW3_MASK 0x0000000F
284/* Boot-select bits of switch SW3: */
285#define XT2000_STSREG_BOOTSEL_SHIFT 0
286#define XT2000_STSREG_BOOTSEL_BITS 2
287#define XT2000_STSREG_BOOTSEL_MASK 0x00000003
288/* Boot-select values: */
289#define XT2000_STSREG_BOOTSEL_FLASH 0
290#define XT2000_STSREG_BOOTSEL_EPROM16 1
291#define XT2000_STSREG_BOOTSEL_PROM8 2
292#define XT2000_STSREG_BOOTSEL_ASRAM 3
293/* User-defined bits of switch SW3: */
294#define XT2000_STSREG_SW3_2_SHIFT 2
295#define XT2000_STSREG_SW3_2_MASK 0x00000004
296#define XT2000_STSREG_SW3_3_SHIFT 3
297#define XT2000_STSREG_SW3_3_MASK 0x00000008
298
299/* SYSLED (system LED) bit fields: */
300
301/* LED control bit (0=off, 1=on): */
302#define XT2000_SYSLED_LEDON_SHIFT 0
303#define XT2000_SYSLED_LEDON_MASK 0x00000001
304
305/* WRPROT (write protect) bit fields (0=writable, 1=write-protected [default]): */
306
307/* Flash write protect: */
308#define XT2000_WRPROT_FLWP_SHIFT 0
309#define XT2000_WRPROT_FLWP_MASK 0x00000001
310/* Reserved but present write protect bits: */
311#define XT2000_WRPROT_WRP_SHIFT 1
312#define XT2000_WRPROT_WRP_BITS 7
313#define XT2000_WRPROT_WRP_MASK 0x000000FE
314
315/* SWRST (software reset; allows s/w to generate power-on equivalent reset): */
316
317/* Software reset bits: */
318#define XT2000_SWRST_SWR_SHIFT 0
319#define XT2000_SWRST_SWR_BITS 16
320#define XT2000_SWRST_SWR_MASK 0x0000FFFF
321/* Software reset value -- writing this value resets the board: */
322#define XT2000_SWRST_RESETVALUE 0x0000DEAD
323
324/* SYSRST (system reset; controls reset of individual peripherals): */
325
326/* All-device reset: */
327#define XT2000_SYSRST_ALL_SHIFT 0
328#define XT2000_SYSRST_ALL_BITS 4
329#define XT2000_SYSRST_ALL_MASK 0x0000000F
330/* HDSP-2534 LED display reset (1=reset, 0=nothing): */
331#define XT2000_SYSRST_LED_SHIFT 0
332#define XT2000_SYSRST_LED_MASK 0x00000001
333/* Sonic DP83934 Ethernet controller reset (1=reset, 0=nothing): */
334#define XT2000_SYSRST_SONIC_SHIFT 1
335#define XT2000_SYSRST_SONIC_MASK 0x00000002
336/* DP16552 DUART reset (1=reset, 0=nothing): */
337#define XT2000_SYSRST_DUART_SHIFT 2
338#define XT2000_SYSRST_DUART_MASK 0x00000004
339/* V3 V320 PCI bridge controller reset (1=reset, 0=nothing): */
340#define XT2000_SYSRST_V3_SHIFT 3
341#define XT2000_SYSRST_V3_MASK 0x00000008
342
343/* IMASK (interrupt mask; 0=disable, 1=enable): */
344/* ISTAT (interrupt status; 0=inactive, 1=pending): */
345
346/* PCI INTP interrupt: */
347#define XT2000_INTMUX_PCI_INTP_SHIFT 2
348#define XT2000_INTMUX_PCI_INTP_MASK 0x00000004
349/* PCI INTS interrupt: */
350#define XT2000_INTMUX_PCI_INTS_SHIFT 3
351#define XT2000_INTMUX_PCI_INTS_MASK 0x00000008
352/* PCI INTD interrupt: */
353#define XT2000_INTMUX_PCI_INTD_SHIFT 4
354#define XT2000_INTMUX_PCI_INTD_MASK 0x00000010
355/* V320 PCI controller interrupt: */
356#define XT2000_INTMUX_V3_SHIFT 5
357#define XT2000_INTMUX_V3_MASK 0x00000020
358/* PCI ENUM interrupt: */
359#define XT2000_INTMUX_PCI_ENUM_SHIFT 6
360#define XT2000_INTMUX_PCI_ENUM_MASK 0x00000040
361/* PCI DEG interrupt: */
362#define XT2000_INTMUX_PCI_DEG_SHIFT 7
363#define XT2000_INTMUX_PCI_DEG_MASK 0x00000080
364
365/* V3CFG (V3 config, V320 PCI controller): */
366
367/* V3 address control (0=pass-thru, 1=V3 address bits 31:28 set to 4'b0001 [default]): */
368#define XT2000_V3CFG_V3ADC_SHIFT 0
369#define XT2000_V3CFG_V3ADC_MASK 0x00000001
370
371/* I2C Devices */
372
373#define XT2000_I2C_RTC_ID 0x68
374#define XT2000_I2C_NVRAM0_ID 0x56 /* 1st 256 byte block */
375#define XT2000_I2C_NVRAM1_ID 0x57 /* 2nd 256 byte block */
376
377/* NVRAM Board Info structure: */
378
379#define XT2000_NVRAM_SIZE 512
380
381#define XT2000_NVRAM_BINFO_START 0x100
382#define XT2000_NVRAM_BINFO_SIZE 0x20
383#define XT2000_NVRAM_BINFO_VERSION 0x10 /* version 1.0 */
384#if 0
385#define XT2000_NVRAM_BINFO_VERSION_OFFSET 0x00
386#define XT2000_NVRAM_BINFO_VERSION_SIZE 0x1
387#define XT2000_NVRAM_BINFO_ETH_ADDR_OFFSET 0x02
388#define XT2000_NVRAM_BINFO_ETH_ADDR_SIZE 0x6
389#define XT2000_NVRAM_BINFO_SN_OFFSET 0x10
390#define XT2000_NVRAM_BINFO_SN_SIZE 0xE
391#define XT2000_NVRAM_BINFO_CRC_OFFSET 0x1E
392#define XT2000_NVRAM_BINFO_CRC_SIZE 0x2
393#endif /*0*/
394
395#if !defined(__ASSEMBLY__) && !defined(_NOCLANGUAGE)
396typedef struct xt2000_nvram_binfo {
397 unsigned char version;
398 unsigned char reserved1;
399 unsigned char eth_addr[6];
400 unsigned char reserved8[8];
401 unsigned char serialno[14];
402 unsigned char crc[2]; /* 16-bit CRC */
403} xt2000_nvram_binfo;
404#endif /*!__ASSEMBLY__ && !_NOCLANGUAGE*/
405
406
407#endif /*_INC_XT2000_H_*/
408
diff --git a/include/asm-xtensa/xtensa/xtboard.h b/include/asm-xtensa/xtensa/xtboard.h
deleted file mode 100644
index 22469c175307..000000000000
--- a/include/asm-xtensa/xtensa/xtboard.h
+++ /dev/null
@@ -1,120 +0,0 @@
1#ifndef _xtboard_h_included_
2#define _xtboard_h_included_
3
4/*
5 * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
6 *
7 * xtboard.h -- Routines for getting useful information from the board.
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 *
13 * Copyright (C) 2002 Tensilica Inc.
14 */
15
16
17#include <xtensa/xt2000.h>
18
19#define XTBOARD_RTC_ERROR -1
20#define XTBOARD_RTC_STOPPED -2
21
22
23/* xt2000-i2cdev.c: */
24typedef void XtboardDelayFunc( unsigned );
25extern XtboardDelayFunc* xtboard_set_nsdelay_func( XtboardDelayFunc *delay_fn );
26extern int xtboard_i2c_read (unsigned id, unsigned char *buf, unsigned addr, unsigned size);
27extern int xtboard_i2c_write(unsigned id, unsigned char *buf, unsigned addr, unsigned size);
28extern int xtboard_i2c_wait_nvram_ack(unsigned id, unsigned swtimer);
29
30/* xtboard.c: */
31extern int xtboard_nvram_read (unsigned addr, unsigned len, unsigned char *buf);
32extern int xtboard_nvram_write(unsigned addr, unsigned len, unsigned char *buf);
33extern int xtboard_nvram_binfo_read (xt2000_nvram_binfo *buf);
34extern int xtboard_nvram_binfo_write(xt2000_nvram_binfo *buf);
35extern int xtboard_nvram_binfo_valid(xt2000_nvram_binfo *buf);
36extern int xtboard_ethermac_get(unsigned char *buf);
37extern int xtboard_ethermac_set(unsigned char *buf);
38
39/*+*----------------------------------------------------------------------------
40/ Function: xtboard_get_rtc_time
41/
42/ Description: Get time stored in real-time clock.
43/
44/ Returns: time in seconds stored in real-time clock.
45/-**----------------------------------------------------------------------------*/
46
47extern unsigned xtboard_get_rtc_time(void);
48
49/*+*----------------------------------------------------------------------------
50/ Function: xtboard_set_rtc_time
51/
52/ Description: Set time stored in real-time clock.
53/
54/ Parameters: time -- time in seconds to store to real-time clock
55/
56/ Returns: 0 on success, xtboard_i2c_write() error code otherwise.
57/-**----------------------------------------------------------------------------*/
58
59extern int xtboard_set_rtc_time(unsigned time);
60
61
62/* xtfreq.c: */
63/*+*----------------------------------------------------------------------------
64/ Function: xtboard_measure_sys_clk
65/
66/ Description: Get frequency of system clock.
67/
68/ Parameters: none
69/
70/ Returns: frequency of system clock.
71/-**----------------------------------------------------------------------------*/
72
73extern unsigned xtboard_measure_sys_clk(void);
74
75
76#if 0 /* old stuff from xtboard.c: */
77
78/*+*----------------------------------------------------------------------------
79/ Function: xtboard_nvram valid
80/
81/ Description: Determines if data in NVRAM is valid.
82/
83/ Parameters: delay -- 10us delay function
84/
85/ Returns: 1 if NVRAM is valid, 0 otherwise
86/-**----------------------------------------------------------------------------*/
87
88extern unsigned xtboard_nvram_valid(void (*delay)( void ));
89
90/*+*----------------------------------------------------------------------------
91/ Function: xtboard_get_nvram_contents
92/
93/ Description: Returns contents of NVRAM.
94/
95/ Parameters: buf -- buffer to NVRAM contents.
96/ delay -- 10us delay function
97/
98/ Returns: 1 if NVRAM is valid, 0 otherwise
99/-**----------------------------------------------------------------------------*/
100
101extern unsigned xtboard_get_nvram_contents(unsigned char *buf, void (*delay)( void ));
102
103/*+*----------------------------------------------------------------------------
104/ Function: xtboard_get_ether_addr
105/
106/ Description: Returns ethernet address of board.
107/
108/ Parameters: buf -- buffer to store ethernet address
109/ delay -- 10us delay function
110/
111/ Returns: nothing.
112/-**----------------------------------------------------------------------------*/
113
114extern void xtboard_get_ether_addr(unsigned char *buf, void (*delay)( void ));
115
116#endif /*0*/
117
118
119#endif /*_xtboard_h_included_*/
120
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index e618b25b5add..a1b04d8a1d01 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -61,7 +61,6 @@ header-y += fd.h
61header-y += fdreg.h 61header-y += fdreg.h
62header-y += fib_rules.h 62header-y += fib_rules.h
63header-y += fuse.h 63header-y += fuse.h
64header-y += futex.h
65header-y += genetlink.h 64header-y += genetlink.h
66header-y += gen_stats.h 65header-y += gen_stats.h
67header-y += gigaset_dev.h 66header-y += gigaset_dev.h
@@ -203,6 +202,7 @@ unifdef-y += fb.h
203unifdef-y += fcntl.h 202unifdef-y += fcntl.h
204unifdef-y += filter.h 203unifdef-y += filter.h
205unifdef-y += flat.h 204unifdef-y += flat.h
205unifdef-y += futex.h
206unifdef-y += fs.h 206unifdef-y += fs.h
207unifdef-y += gameport.h 207unifdef-y += gameport.h
208unifdef-y += generic_serial.h 208unifdef-y += generic_serial.h
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 3372ec6bf53a..a30ef13c9e62 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -105,6 +105,7 @@ struct kiocb {
105 wait_queue_t ki_wait; 105 wait_queue_t ki_wait;
106 loff_t ki_pos; 106 loff_t ki_pos;
107 107
108 atomic_t ki_bio_count; /* num bio used for this iocb */
108 void *private; 109 void *private;
109 /* State that we remember to be able to restart/retry */ 110 /* State that we remember to be able to restart/retry */
110 unsigned short ki_opcode; 111 unsigned short ki_opcode;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 092dbd0e7658..08daf3272c02 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -309,6 +309,7 @@ extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
309 gfp_t); 309 gfp_t);
310extern void bio_set_pages_dirty(struct bio *bio); 310extern void bio_set_pages_dirty(struct bio *bio);
311extern void bio_check_pages_dirty(struct bio *bio); 311extern void bio_check_pages_dirty(struct bio *bio);
312extern void bio_release_pages(struct bio *bio);
312extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); 313extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
313extern int bio_uncopy_user(struct bio *); 314extern int bio_uncopy_user(struct bio *);
314void zero_fill_bio(struct bio *bio); 315void zero_fill_bio(struct bio *bio);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e1c7286165ff..ea330d7b46c0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -342,7 +342,6 @@ typedef void (unplug_fn) (request_queue_t *);
342 342
343struct bio_vec; 343struct bio_vec;
344typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); 344typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
345typedef void (activity_fn) (void *data, int rw);
346typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); 345typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
347typedef void (prepare_flush_fn) (request_queue_t *, struct request *); 346typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
348typedef void (softirq_done_fn)(struct request *); 347typedef void (softirq_done_fn)(struct request *);
@@ -384,7 +383,6 @@ struct request_queue
384 prep_rq_fn *prep_rq_fn; 383 prep_rq_fn *prep_rq_fn;
385 unplug_fn *unplug_fn; 384 unplug_fn *unplug_fn;
386 merge_bvec_fn *merge_bvec_fn; 385 merge_bvec_fn *merge_bvec_fn;
387 activity_fn *activity_fn;
388 issue_flush_fn *issue_flush_fn; 386 issue_flush_fn *issue_flush_fn;
389 prepare_flush_fn *prepare_flush_fn; 387 prepare_flush_fn *prepare_flush_fn;
390 softirq_done_fn *softirq_done_fn; 388 softirq_done_fn *softirq_done_fn;
@@ -411,8 +409,6 @@ struct request_queue
411 */ 409 */
412 void *queuedata; 410 void *queuedata;
413 411
414 void *activity_data;
415
416 /* 412 /*
417 * queue needs bounce pages for pages above this limit 413 * queue needs bounce pages for pages above this limit
418 */ 414 */
@@ -677,7 +673,6 @@ extern void blk_sync_queue(struct request_queue *q);
677extern void __blk_stop_queue(request_queue_t *q); 673extern void __blk_stop_queue(request_queue_t *q);
678extern void blk_run_queue(request_queue_t *); 674extern void blk_run_queue(request_queue_t *);
679extern void blk_start_queueing(request_queue_t *); 675extern void blk_start_queueing(request_queue_t *);
680extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
681extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long); 676extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long);
682extern int blk_rq_unmap_user(struct request *); 677extern int blk_rq_unmap_user(struct request *);
683extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); 678extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index d852024ed095..1622d23a8dc3 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -159,7 +159,7 @@ static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles)
159 * Unless you're the timekeeping code, you should not be using this! 159 * Unless you're the timekeeping code, you should not be using this!
160 */ 160 */
161static inline void clocksource_calculate_interval(struct clocksource *c, 161static inline void clocksource_calculate_interval(struct clocksource *c,
162 unsigned long length_nsec) 162 unsigned long length_nsec)
163{ 163{
164 u64 tmp; 164 u64 tmp;
165 165
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h
index be512cc98791..4c2632a8d31b 100644
--- a/include/linux/coda_linux.h
+++ b/include/linux/coda_linux.h
@@ -64,7 +64,7 @@ void coda_sysctl_clean(void);
64 64
65#define CODA_ALLOC(ptr, cast, size) do { \ 65#define CODA_ALLOC(ptr, cast, size) do { \
66 if (size < PAGE_SIZE) \ 66 if (size < PAGE_SIZE) \
67 ptr = (cast)kmalloc((unsigned long) size, GFP_KERNEL); \ 67 ptr = kmalloc((unsigned long) size, GFP_KERNEL); \
68 else \ 68 else \
69 ptr = (cast)vmalloc((unsigned long) size); \ 69 ptr = (cast)vmalloc((unsigned long) size); \
70 if (!ptr) \ 70 if (!ptr) \
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 538423d4a865..aca66984aafd 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -40,7 +40,7 @@ extern void __chk_io_ptr(void __iomem *);
40#error no compiler-gcc.h file for this gcc version 40#error no compiler-gcc.h file for this gcc version
41#elif __GNUC__ == 4 41#elif __GNUC__ == 4
42# include <linux/compiler-gcc4.h> 42# include <linux/compiler-gcc4.h>
43#elif __GNUC__ == 3 43#elif __GNUC__ == 3 && __GNUC_MINOR__ >= 2
44# include <linux/compiler-gcc3.h> 44# include <linux/compiler-gcc3.h>
45#else 45#else
46# error Sorry, your compiler is too old/not recognized. 46# error Sorry, your compiler is too old/not recognized.
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index a7f015027535..fef6f3d0a4a7 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -160,31 +160,6 @@ struct configfs_group_operations {
160 void (*drop_item)(struct config_group *group, struct config_item *item); 160 void (*drop_item)(struct config_group *group, struct config_item *item);
161}; 161};
162 162
163
164
165/**
166 * Use these macros to make defining attributes easier. See include/linux/device.h
167 * for examples..
168 */
169
170#if 0
171#define __ATTR(_name,_mode,_show,_store) { \
172 .attr = {.ca_name = __stringify(_name), .ca_mode = _mode, .ca_owner = THIS_MODULE }, \
173 .show = _show, \
174 .store = _store, \
175}
176
177#define __ATTR_RO(_name) { \
178 .attr = { .ca_name = __stringify(_name), .ca_mode = 0444, .ca_owner = THIS_MODULE }, \
179 .show = _name##_show, \
180}
181
182#define __ATTR_NULL { .attr = { .name = NULL } }
183
184#define attr_name(_attr) (_attr).attr.name
185#endif
186
187
188struct configfs_subsystem { 163struct configfs_subsystem {
189 struct config_group su_group; 164 struct config_group su_group;
190 struct semaphore su_sem; 165 struct semaphore su_sem;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 4ea39fee99c7..7f008f6bfdc3 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -172,6 +172,8 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
172 unsigned int relation); 172 unsigned int relation);
173 173
174 174
175extern int cpufreq_driver_getavg(struct cpufreq_policy *policy);
176
175int cpufreq_register_governor(struct cpufreq_governor *governor); 177int cpufreq_register_governor(struct cpufreq_governor *governor);
176void cpufreq_unregister_governor(struct cpufreq_governor *governor); 178void cpufreq_unregister_governor(struct cpufreq_governor *governor);
177 179
@@ -204,6 +206,7 @@ struct cpufreq_driver {
204 unsigned int (*get) (unsigned int cpu); 206 unsigned int (*get) (unsigned int cpu);
205 207
206 /* optional */ 208 /* optional */
209 unsigned int (*getavg) (unsigned int cpu);
207 int (*exit) (struct cpufreq_policy *policy); 210 int (*exit) (struct cpufreq_policy *policy);
208 int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); 211 int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
209 int (*resume) (struct cpufreq_policy *policy); 212 int (*resume) (struct cpufreq_policy *policy);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 8821e1f75b44..826b15e914e2 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -30,10 +30,19 @@ void cpuset_update_task_memory_state(void);
30 nodes_subset((nodes), current->mems_allowed) 30 nodes_subset((nodes), current->mems_allowed)
31int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl); 31int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl);
32 32
33extern int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask); 33extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask);
34static int inline cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 34extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask);
35
36static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
37{
38 return number_of_cpusets <= 1 ||
39 __cpuset_zone_allowed_softwall(z, gfp_mask);
40}
41
42static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
35{ 43{
36 return number_of_cpusets <= 1 || __cpuset_zone_allowed(z, gfp_mask); 44 return number_of_cpusets <= 1 ||
45 __cpuset_zone_allowed_hardwall(z, gfp_mask);
37} 46}
38 47
39extern int cpuset_excl_nodes_overlap(const struct task_struct *p); 48extern int cpuset_excl_nodes_overlap(const struct task_struct *p);
@@ -94,7 +103,12 @@ static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
94 return 1; 103 return 1;
95} 104}
96 105
97static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 106static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
107{
108 return 1;
109}
110
111static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
98{ 112{
99 return 1; 113 return 1;
100} 114}
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index ed6cc8962d87..1cb054bd93f2 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -176,20 +176,20 @@ enum {
176}; 176};
177 177
178/* DCCP features (RFC 4340 section 6.4) */ 178/* DCCP features (RFC 4340 section 6.4) */
179 enum { 179enum {
180 DCCPF_RESERVED = 0, 180 DCCPF_RESERVED = 0,
181 DCCPF_CCID = 1, 181 DCCPF_CCID = 1,
182 DCCPF_SHORT_SEQNOS = 2, /* XXX: not yet implemented */ 182 DCCPF_SHORT_SEQNOS = 2, /* XXX: not yet implemented */
183 DCCPF_SEQUENCE_WINDOW = 3, 183 DCCPF_SEQUENCE_WINDOW = 3,
184 DCCPF_ECN_INCAPABLE = 4, /* XXX: not yet implemented */ 184 DCCPF_ECN_INCAPABLE = 4, /* XXX: not yet implemented */
185 DCCPF_ACK_RATIO = 5, 185 DCCPF_ACK_RATIO = 5,
186 DCCPF_SEND_ACK_VECTOR = 6, 186 DCCPF_SEND_ACK_VECTOR = 6,
187 DCCPF_SEND_NDP_COUNT = 7, 187 DCCPF_SEND_NDP_COUNT = 7,
188 DCCPF_MIN_CSUM_COVER = 8, 188 DCCPF_MIN_CSUM_COVER = 8,
189 DCCPF_DATA_CHECKSUM = 9, /* XXX: not yet implemented */ 189 DCCPF_DATA_CHECKSUM = 9, /* XXX: not yet implemented */
190 /* 10-127 reserved */ 190 /* 10-127 reserved */
191 DCCPF_MIN_CCID_SPECIFIC = 128, 191 DCCPF_MIN_CCID_SPECIFIC = 128,
192 DCCPF_MAX_CCID_SPECIFIC = 255, 192 DCCPF_MAX_CCID_SPECIFIC = 255,
193}; 193};
194 194
195/* this structure is argument to DCCP_SOCKOPT_CHANGE_X */ 195/* this structure is argument to DCCP_SOCKOPT_CHANGE_X */
@@ -427,7 +427,7 @@ struct dccp_service_list {
427}; 427};
428 428
429#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1) 429#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
430#define DCCP_SERVICE_CODE_IS_ABSENT 0 430#define DCCP_SERVICE_CODE_IS_ABSENT 0
431 431
432static inline int dccp_list_has_service(const struct dccp_service_list *sl, 432static inline int dccp_list_has_service(const struct dccp_service_list *sl,
433 const __be32 service) 433 const __be32 service)
@@ -436,7 +436,7 @@ static inline int dccp_list_has_service(const struct dccp_service_list *sl,
436 u32 i = sl->dccpsl_nr; 436 u32 i = sl->dccpsl_nr;
437 while (i--) 437 while (i--)
438 if (sl->dccpsl_list[i] == service) 438 if (sl->dccpsl_list[i] == service)
439 return 1; 439 return 1;
440 } 440 }
441 return 0; 441 return 0;
442} 442}
@@ -511,7 +511,7 @@ struct dccp_sock {
511 __u8 dccps_hc_tx_insert_options:1; 511 __u8 dccps_hc_tx_insert_options:1;
512 struct timer_list dccps_xmit_timer; 512 struct timer_list dccps_xmit_timer;
513}; 513};
514 514
515static inline struct dccp_sock *dccp_sk(const struct sock *sk) 515static inline struct dccp_sock *dccp_sk(const struct sock *sk)
516{ 516{
517 return (struct dccp_sock *)sk; 517 return (struct dccp_sock *)sk;
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 6fe56aaa6685..64177ec9a019 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -929,8 +929,6 @@ extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max)
929#define FB_MODE_IS_FIRST 16 929#define FB_MODE_IS_FIRST 16
930#define FB_MODE_IS_FROM_VAR 32 930#define FB_MODE_IS_FROM_VAR 32
931 931
932extern int fbmon_valid_timings(u_int pixclock, u_int htotal, u_int vtotal,
933 const struct fb_info *fb_info);
934extern int fbmon_dpms(const struct fb_info *fb_info); 932extern int fbmon_dpms(const struct fb_info *fb_info);
935extern int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, 933extern int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var,
936 struct fb_info *info); 934 struct fb_info *info);
diff --git a/include/linux/file.h b/include/linux/file.h
index 6e77b9177f9e..edca361f2ab4 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -26,19 +26,12 @@ struct embedded_fd_set {
26 unsigned long fds_bits[1]; 26 unsigned long fds_bits[1];
27}; 27};
28 28
29/*
30 * More than this number of fds: we use a separately allocated fd_set
31 */
32#define EMBEDDED_FD_SET_SIZE (BITS_PER_BYTE * sizeof(struct embedded_fd_set))
33
34struct fdtable { 29struct fdtable {
35 unsigned int max_fds; 30 unsigned int max_fds;
36 int max_fdset;
37 struct file ** fd; /* current fd array */ 31 struct file ** fd; /* current fd array */
38 fd_set *close_on_exec; 32 fd_set *close_on_exec;
39 fd_set *open_fds; 33 fd_set *open_fds;
40 struct rcu_head rcu; 34 struct rcu_head rcu;
41 struct files_struct *free_files;
42 struct fdtable *next; 35 struct fdtable *next;
43}; 36};
44 37
@@ -83,14 +76,8 @@ extern int get_unused_fd(void);
83extern void FASTCALL(put_unused_fd(unsigned int fd)); 76extern void FASTCALL(put_unused_fd(unsigned int fd));
84struct kmem_cache; 77struct kmem_cache;
85 78
86extern struct file ** alloc_fd_array(int);
87extern void free_fd_array(struct file **, int);
88
89extern fd_set *alloc_fdset(int);
90extern void free_fdset(fd_set *, int);
91
92extern int expand_files(struct files_struct *, int nr); 79extern int expand_files(struct files_struct *, int nr);
93extern void free_fdtable(struct fdtable *fdt); 80extern void free_fdtable_rcu(struct rcu_head *rcu);
94extern void __init files_defer_init(void); 81extern void __init files_defer_init(void);
95 82
96static inline struct file * fcheck_files(struct files_struct *files, unsigned int fd) 83static inline struct file * fcheck_files(struct files_struct *files, unsigned int fd)
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 6e05e3e7ce39..5e75e26d4787 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -1,5 +1,7 @@
1/* Freezer declarations */ 1/* Freezer declarations */
2 2
3#include <linux/sched.h>
4
3#ifdef CONFIG_PM 5#ifdef CONFIG_PM
4/* 6/*
5 * Check if a process has been frozen 7 * Check if a process has been frozen
@@ -14,16 +16,15 @@ static inline int frozen(struct task_struct *p)
14 */ 16 */
15static inline int freezing(struct task_struct *p) 17static inline int freezing(struct task_struct *p)
16{ 18{
17 return p->flags & PF_FREEZE; 19 return test_tsk_thread_flag(p, TIF_FREEZE);
18} 20}
19 21
20/* 22/*
21 * Request that a process be frozen 23 * Request that a process be frozen
22 * FIXME: SMP problem. We may not modify other process' flags!
23 */ 24 */
24static inline void freeze(struct task_struct *p) 25static inline void freeze(struct task_struct *p)
25{ 26{
26 p->flags |= PF_FREEZE; 27 set_tsk_thread_flag(p, TIF_FREEZE);
27} 28}
28 29
29/* 30/*
@@ -31,7 +32,7 @@ static inline void freeze(struct task_struct *p)
31 */ 32 */
32static inline void do_not_freeze(struct task_struct *p) 33static inline void do_not_freeze(struct task_struct *p)
33{ 34{
34 p->flags &= ~PF_FREEZE; 35 clear_tsk_thread_flag(p, TIF_FREEZE);
35} 36}
36 37
37/* 38/*
@@ -52,7 +53,9 @@ static inline int thaw_process(struct task_struct *p)
52 */ 53 */
53static inline void frozen_process(struct task_struct *p) 54static inline void frozen_process(struct task_struct *p)
54{ 55{
55 p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN; 56 p->flags |= PF_FROZEN;
57 wmb();
58 clear_tsk_thread_flag(p, TIF_FREEZE);
56} 59}
57 60
58extern void refrigerator(void); 61extern void refrigerator(void);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index adce6e1d70c2..186da813541e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -120,6 +120,7 @@ extern int dir_notify_enable;
120#define MS_PRIVATE (1<<18) /* change to private */ 120#define MS_PRIVATE (1<<18) /* change to private */
121#define MS_SLAVE (1<<19) /* change to slave */ 121#define MS_SLAVE (1<<19) /* change to slave */
122#define MS_SHARED (1<<20) /* change to shared */ 122#define MS_SHARED (1<<20) /* change to shared */
123#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
123#define MS_ACTIVE (1<<30) 124#define MS_ACTIVE (1<<30)
124#define MS_NOUSER (1<<31) 125#define MS_NOUSER (1<<31)
125 126
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 3da29e2d524a..abb64c437f6f 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -19,6 +19,7 @@
19#define _FSL_DEVICE_H_ 19#define _FSL_DEVICE_H_
20 20
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/phy.h>
22 23
23/* 24/*
24 * Some conventions on how we handle peripherals on Freescale chips 25 * Some conventions on how we handle peripherals on Freescale chips
diff --git a/include/linux/futex.h b/include/linux/futex.h
index d097b5b72bc6..3f153b4e156c 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -93,6 +93,7 @@ struct robust_list_head {
93 */ 93 */
94#define ROBUST_LIST_LIMIT 2048 94#define ROBUST_LIST_LIMIT 2048
95 95
96#ifdef __KERNEL__
96long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, 97long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
97 u32 __user *uaddr2, u32 val2, u32 val3); 98 u32 __user *uaddr2, u32 val2, u32 val3);
98 99
@@ -110,6 +111,7 @@ static inline void exit_pi_state_list(struct task_struct *curr)
110{ 111{
111} 112}
112#endif 113#endif
114#endif /* __KERNEL__ */
113 115
114#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ 116#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */
115#define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */ 117#define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 2cdba0c23957..afad95272841 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -105,7 +105,7 @@ static inline void gameport_set_phys(struct gameport *gameport,
105 105
106static inline struct gameport *gameport_allocate_port(void) 106static inline struct gameport *gameport_allocate_port(void)
107{ 107{
108 struct gameport *gameport = kcalloc(1, sizeof(struct gameport), GFP_KERNEL); 108 struct gameport *gameport = kzalloc(sizeof(struct gameport), GFP_KERNEL);
109 109
110 return gameport; 110 return gameport;
111} 111}
diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h
index c8f8df25c7e0..937da70cb4c4 100644
--- a/include/linux/i2c-algo-bit.h
+++ b/include/linux/i2c-algo-bit.h
@@ -26,9 +26,9 @@
26 26
27/* --- Defines for bit-adapters --------------------------------------- */ 27/* --- Defines for bit-adapters --------------------------------------- */
28/* 28/*
29 * This struct contains the hw-dependent functions of bit-style adapters to 29 * This struct contains the hw-dependent functions of bit-style adapters to
30 * manipulate the line states, and to init any hw-specific features. This is 30 * manipulate the line states, and to init any hw-specific features. This is
31 * only used if you have more than one hw-type of adapter running. 31 * only used if you have more than one hw-type of adapter running.
32 */ 32 */
33struct i2c_algo_bit_data { 33struct i2c_algo_bit_data {
34 void *data; /* private data for lowlevel routines */ 34 void *data; /* private data for lowlevel routines */
@@ -44,6 +44,5 @@ struct i2c_algo_bit_data {
44}; 44};
45 45
46int i2c_bit_add_bus(struct i2c_adapter *); 46int i2c_bit_add_bus(struct i2c_adapter *);
47int i2c_bit_del_bus(struct i2c_adapter *);
48 47
49#endif /* _LINUX_I2C_ALGO_BIT_H */ 48#endif /* _LINUX_I2C_ALGO_BIT_H */
diff --git a/include/linux/i2c-algo-ite.h b/include/linux/i2c-algo-ite.h
deleted file mode 100644
index 0073fe96c76e..000000000000
--- a/include/linux/i2c-algo-ite.h
+++ /dev/null
@@ -1,72 +0,0 @@
1/* ------------------------------------------------------------------------- */
2/* i2c-algo-ite.h i2c driver algorithms for ITE IIC adapters */
3/* ------------------------------------------------------------------------- */
4/* Copyright (C) 1995-97 Simon G. Vogl
5 1998-99 Hans Berglund
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
20/* ------------------------------------------------------------------------- */
21
22/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
23 Frodo Looijaard <frodol@dds.nl> */
24
25/* Modifications by MontaVista Software, 2001
26 Changes made to support the ITE IIC peripheral */
27
28
29#ifndef I2C_ALGO_ITE_H
30#define I2C_ALGO_ITE_H 1
31
32#include <linux/types.h>
33
34/* Example of a sequential read request:
35 struct i2c_iic_msg s_msg;
36
37 s_msg.addr=device_address;
38 s_msg.len=length;
39 s_msg.buf=buffer;
40 s_msg.waddr=word_address;
41 ioctl(file,I2C_SREAD, &s_msg);
42 */
43#define I2C_SREAD 0x780 /* SREAD ioctl command */
44
45struct i2c_iic_msg {
46 __u16 addr; /* device address */
47 __u16 waddr; /* word address */
48 short len; /* msg length */
49 char *buf; /* pointer to msg data */
50};
51
52#ifdef __KERNEL__
53struct i2c_adapter;
54
55struct i2c_algo_iic_data {
56 void *data; /* private data for lolevel routines */
57 void (*setiic) (void *data, int ctl, int val);
58 int (*getiic) (void *data, int ctl);
59 int (*getown) (void *data);
60 int (*getclock) (void *data);
61 void (*waitforpin) (void);
62
63 /* local settings */
64 int udelay;
65 int mdelay;
66 int timeout;
67};
68
69int i2c_iic_add_bus(struct i2c_adapter *);
70int i2c_iic_del_bus(struct i2c_adapter *);
71#endif /* __KERNEL__ */
72#endif /* I2C_ALGO_ITE_H */
diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h
index 226693e0d88b..fce47c051bb1 100644
--- a/include/linux/i2c-algo-pca.h
+++ b/include/linux/i2c-algo-pca.h
@@ -10,6 +10,5 @@ struct i2c_algo_pca_data {
10}; 10};
11 11
12int i2c_pca_add_bus(struct i2c_adapter *); 12int i2c_pca_add_bus(struct i2c_adapter *);
13int i2c_pca_del_bus(struct i2c_adapter *);
14 13
15#endif /* _LINUX_I2C_ALGO_PCA_H */ 14#endif /* _LINUX_I2C_ALGO_PCA_H */
diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h
index 9908f3fc4839..994eb86f882c 100644
--- a/include/linux/i2c-algo-pcf.h
+++ b/include/linux/i2c-algo-pcf.h
@@ -31,7 +31,7 @@ struct i2c_algo_pcf_data {
31 int (*getpcf) (void *data, int ctl); 31 int (*getpcf) (void *data, int ctl);
32 int (*getown) (void *data); 32 int (*getown) (void *data);
33 int (*getclock) (void *data); 33 int (*getclock) (void *data);
34 void (*waitforpin) (void); 34 void (*waitforpin) (void);
35 35
36 /* local settings */ 36 /* local settings */
37 int udelay; 37 int udelay;
@@ -39,6 +39,5 @@ struct i2c_algo_pcf_data {
39}; 39};
40 40
41int i2c_pcf_add_bus(struct i2c_adapter *); 41int i2c_pcf_add_bus(struct i2c_adapter *);
42int i2c_pcf_del_bus(struct i2c_adapter *);
43 42
44#endif /* _LINUX_I2C_ALGO_PCF_H */ 43#endif /* _LINUX_I2C_ALGO_PCF_H */
diff --git a/include/linux/i2c-algo-sgi.h b/include/linux/i2c-algo-sgi.h
index 4a0113d64064..3b7715024e69 100644
--- a/include/linux/i2c-algo-sgi.h
+++ b/include/linux/i2c-algo-sgi.h
@@ -22,6 +22,5 @@ struct i2c_algo_sgi_data {
22}; 22};
23 23
24int i2c_sgi_add_bus(struct i2c_adapter *); 24int i2c_sgi_add_bus(struct i2c_adapter *);
25int i2c_sgi_del_bus(struct i2c_adapter *);
26 25
27#endif /* I2C_ALGO_SGI_H */ 26#endif /* I2C_ALGO_SGI_H */
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 9dc984b5f897..d38778f2fbec 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -1,7 +1,7 @@
1/* ------------------------------------------------------------------------- */ 1/* ------------------------------------------------------------------------- */
2/* */ 2/* */
3/* i2c-id.h - identifier values for i2c drivers and adapters */ 3/* i2c-id.h - identifier values for i2c drivers and adapters */
4/* */ 4/* */
5/* ------------------------------------------------------------------------- */ 5/* ------------------------------------------------------------------------- */
6/* Copyright (C) 1995-1999 Simon G. Vogl 6/* Copyright (C) 1995-1999 Simon G. Vogl
7 7
@@ -40,10 +40,10 @@
40#define I2C_DRIVERID_SAA7120 11 /* video encoder */ 40#define I2C_DRIVERID_SAA7120 11 /* video encoder */
41#define I2C_DRIVERID_SAA7121 12 /* video encoder */ 41#define I2C_DRIVERID_SAA7121 12 /* video encoder */
42#define I2C_DRIVERID_SAA7185B 13 /* video encoder */ 42#define I2C_DRIVERID_SAA7185B 13 /* video encoder */
43#define I2C_DRIVERID_CH7003 14 /* digital pc to tv encoder */ 43#define I2C_DRIVERID_CH7003 14 /* digital pc to tv encoder */
44#define I2C_DRIVERID_PCF8574A 15 /* i2c expander - 8 bit in/out */ 44#define I2C_DRIVERID_PCF8574A 15 /* i2c expander - 8 bit in/out */
45#define I2C_DRIVERID_PCF8582C 16 /* eeprom */ 45#define I2C_DRIVERID_PCF8582C 16 /* eeprom */
46#define I2C_DRIVERID_AT24Cxx 17 /* eeprom 1/2/4/8/16 K */ 46#define I2C_DRIVERID_AT24Cxx 17 /* eeprom 1/2/4/8/16 K */
47#define I2C_DRIVERID_TEA6300 18 /* audio mixer */ 47#define I2C_DRIVERID_TEA6300 18 /* audio mixer */
48#define I2C_DRIVERID_BT829 19 /* pc to tv encoder */ 48#define I2C_DRIVERID_BT829 19 /* pc to tv encoder */
49#define I2C_DRIVERID_TDA9850 20 /* audio mixer */ 49#define I2C_DRIVERID_TDA9850 20 /* audio mixer */
@@ -82,9 +82,8 @@
82#define I2C_DRIVERID_STM41T00 52 /* real time clock */ 82#define I2C_DRIVERID_STM41T00 52 /* real time clock */
83#define I2C_DRIVERID_UDA1342 53 /* UDA1342 audio codec */ 83#define I2C_DRIVERID_UDA1342 53 /* UDA1342 audio codec */
84#define I2C_DRIVERID_ADV7170 54 /* video encoder */ 84#define I2C_DRIVERID_ADV7170 54 /* video encoder */
85#define I2C_DRIVERID_RADEON 55 /* I2C bus on Radeon boards */
86#define I2C_DRIVERID_MAX1617 56 /* temp sensor */ 85#define I2C_DRIVERID_MAX1617 56 /* temp sensor */
87#define I2C_DRIVERID_SAA7191 57 /* video encoder */ 86#define I2C_DRIVERID_SAA7191 57 /* video decoder */
88#define I2C_DRIVERID_INDYCAM 58 /* SGI IndyCam */ 87#define I2C_DRIVERID_INDYCAM 58 /* SGI IndyCam */
89#define I2C_DRIVERID_BT832 59 /* CMOS camera video processor */ 88#define I2C_DRIVERID_BT832 59 /* CMOS camera video processor */
90#define I2C_DRIVERID_TDA9887 60 /* TDA988x IF-PLL demodulator */ 89#define I2C_DRIVERID_TDA9887 60 /* TDA988x IF-PLL demodulator */
@@ -132,7 +131,6 @@
132#define I2C_DRIVERID_ADM1021 1008 131#define I2C_DRIVERID_ADM1021 1008
133#define I2C_DRIVERID_ADM9240 1009 132#define I2C_DRIVERID_ADM9240 1009
134#define I2C_DRIVERID_LTC1710 1010 133#define I2C_DRIVERID_LTC1710 1010
135#define I2C_DRIVERID_ICSPLL 1012
136#define I2C_DRIVERID_BT869 1013 134#define I2C_DRIVERID_BT869 1013
137#define I2C_DRIVERID_MAXILIFE 1014 135#define I2C_DRIVERID_MAXILIFE 1014
138#define I2C_DRIVERID_MATORB 1015 136#define I2C_DRIVERID_MATORB 1015
@@ -158,12 +156,13 @@
158#define I2C_DRIVERID_ASB100 1043 156#define I2C_DRIVERID_ASB100 1043
159#define I2C_DRIVERID_FSCHER 1046 157#define I2C_DRIVERID_FSCHER 1046
160#define I2C_DRIVERID_W83L785TS 1047 158#define I2C_DRIVERID_W83L785TS 1047
159#define I2C_DRIVERID_OV7670 1048 /* Omnivision 7670 camera */
161 160
162/* 161/*
163 * ---- Adapter types ---------------------------------------------------- 162 * ---- Adapter types ----------------------------------------------------
164 */ 163 */
165 164
166/* --- Bit algorithm adapters */ 165/* --- Bit algorithm adapters */
167#define I2C_HW_B_LP 0x010000 /* Parallel port Philips style */ 166#define I2C_HW_B_LP 0x010000 /* Parallel port Philips style */
168#define I2C_HW_B_SER 0x010002 /* Serial line interface */ 167#define I2C_HW_B_SER 0x010002 /* Serial line interface */
169#define I2C_HW_B_BT848 0x010005 /* BT848 video boards */ 168#define I2C_HW_B_BT848 0x010005 /* BT848 video boards */
@@ -211,9 +210,6 @@
211/* --- MPC8xx PowerPC adapters */ 210/* --- MPC8xx PowerPC adapters */
212#define I2C_HW_MPC8XX_EPON 0x110000 /* Eponymous MPC8xx I2C adapter */ 211#define I2C_HW_MPC8XX_EPON 0x110000 /* Eponymous MPC8xx I2C adapter */
213 212
214/* --- ITE based algorithms */
215#define I2C_HW_I_IIC 0x080000 /* controller on the ITE */
216
217/* --- PowerPC on-chip adapters */ 213/* --- PowerPC on-chip adapters */
218#define I2C_HW_OCP 0x120000 /* IBM on-chip I2C adapter */ 214#define I2C_HW_OCP 0x120000 /* IBM on-chip I2C adapter */
219 215
@@ -249,6 +245,7 @@
249#define I2C_HW_SMBUS_OV518 0x04000f /* OV518(+) USB 1.1 webcam ICs */ 245#define I2C_HW_SMBUS_OV518 0x04000f /* OV518(+) USB 1.1 webcam ICs */
250#define I2C_HW_SMBUS_OV519 0x040010 /* OV519 USB 1.1 webcam IC */ 246#define I2C_HW_SMBUS_OV519 0x040010 /* OV519 USB 1.1 webcam IC */
251#define I2C_HW_SMBUS_OVFX2 0x040011 /* Cypress/OmniVision FX2 webcam */ 247#define I2C_HW_SMBUS_OVFX2 0x040011 /* Cypress/OmniVision FX2 webcam */
248#define I2C_HW_SMBUS_CAFE 0x040012 /* Marvell 88ALP01 "CAFE" cam */
252 249
253/* --- ISA pseudo-adapter */ 250/* --- ISA pseudo-adapter */
254#define I2C_HW_ISA 0x050000 251#define I2C_HW_ISA 0x050000
diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h
new file mode 100644
index 000000000000..e6e9c814da61
--- /dev/null
+++ b/include/linux/i2c-pnx.h
@@ -0,0 +1,43 @@
1/*
2 * Header file for I2C support on PNX010x/4008.
3 *
4 * Author: Dennis Kovalev <dkovalev@ru.mvista.com>
5 *
6 * 2004-2006 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 */
11
12#ifndef __I2C_PNX_H__
13#define __I2C_PNX_H__
14
15#include <asm/arch/i2c.h>
16
17struct i2c_pnx_mif {
18 int ret; /* Return value */
19 int mode; /* Interface mode */
20 struct completion complete; /* I/O completion */
21 struct timer_list timer; /* Timeout */
22 char * buf; /* Data buffer */
23 int len; /* Length of data buffer */
24};
25
26struct i2c_pnx_algo_data {
27 u32 base;
28 u32 ioaddr;
29 int irq;
30 struct i2c_pnx_mif mif;
31 int last;
32};
33
34struct i2c_pnx_data {
35 int (*suspend) (struct platform_device *pdev, pm_message_t state);
36 int (*resume) (struct platform_device *pdev);
37 u32 (*calculate_input_freq) (struct platform_device *pdev);
38 int (*set_clock_run) (struct platform_device *pdev);
39 int (*set_clock_stop) (struct platform_device *pdev);
40 struct i2c_adapter *adapter;
41};
42
43#endif /* __I2C_PNX_H__ */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 9b5d04768c2c..71e50d3e492f 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -1,7 +1,7 @@
1/* ------------------------------------------------------------------------- */ 1/* ------------------------------------------------------------------------- */
2/* */ 2/* */
3/* i2c.h - definitions for the i2c-bus interface */ 3/* i2c.h - definitions for the i2c-bus interface */
4/* */ 4/* */
5/* ------------------------------------------------------------------------- */ 5/* ------------------------------------------------------------------------- */
6/* Copyright (C) 1995-2000 Simon G. Vogl 6/* Copyright (C) 1995-2000 Simon G. Vogl
7 7
@@ -27,7 +27,7 @@
27#define _LINUX_I2C_H 27#define _LINUX_I2C_H
28 28
29#include <linux/types.h> 29#include <linux/types.h>
30#ifdef __KERNEL__ 30#ifdef __KERNEL__
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/i2c-id.h> 32#include <linux/i2c-id.h>
33#include <linux/mod_devicetable.h> 33#include <linux/mod_devicetable.h>
@@ -53,8 +53,8 @@ union i2c_smbus_data;
53 53
54/* 54/*
55 * The master routines are the ones normally used to transmit data to devices 55 * The master routines are the ones normally used to transmit data to devices
56 * on a bus (or read from them). Apart from two basic transfer functions to 56 * on a bus (or read from them). Apart from two basic transfer functions to
57 * transmit one message at a time, a more complex version can be used to 57 * transmit one message at a time, a more complex version can be used to
58 * transmit an arbitrary number of messages without interruption. 58 * transmit an arbitrary number of messages without interruption.
59 */ 59 */
60extern int i2c_master_send(struct i2c_client *,const char* ,int); 60extern int i2c_master_send(struct i2c_client *,const char* ,int);
@@ -67,10 +67,10 @@ extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
67 67
68/* This is the very generalized SMBus access routine. You probably do not 68/* This is the very generalized SMBus access routine. You probably do not
69 want to use this, though; one of the functions below may be much easier, 69 want to use this, though; one of the functions below may be much easier,
70 and probably just as fast. 70 and probably just as fast.
71 Note that we use i2c_adapter here, because you do not need a specific 71 Note that we use i2c_adapter here, because you do not need a specific
72 smbus adapter to call this function. */ 72 smbus adapter to call this function. */
73extern s32 i2c_smbus_xfer (struct i2c_adapter * adapter, u16 addr, 73extern s32 i2c_smbus_xfer (struct i2c_adapter * adapter, u16 addr,
74 unsigned short flags, 74 unsigned short flags,
75 char read_write, u8 command, int size, 75 char read_write, u8 command, int size,
76 union i2c_smbus_data * data); 76 union i2c_smbus_data * data);
@@ -112,14 +112,14 @@ struct i2c_driver {
112 112
113 /* Notifies the driver that a new bus has appeared. This routine 113 /* Notifies the driver that a new bus has appeared. This routine
114 * can be used by the driver to test if the bus meets its conditions 114 * can be used by the driver to test if the bus meets its conditions
115 * & seek for the presence of the chip(s) it supports. If found, it 115 * & seek for the presence of the chip(s) it supports. If found, it
116 * registers the client(s) that are on the bus to the i2c admin. via 116 * registers the client(s) that are on the bus to the i2c admin. via
117 * i2c_attach_client. 117 * i2c_attach_client.
118 */ 118 */
119 int (*attach_adapter)(struct i2c_adapter *); 119 int (*attach_adapter)(struct i2c_adapter *);
120 int (*detach_adapter)(struct i2c_adapter *); 120 int (*detach_adapter)(struct i2c_adapter *);
121 121
122 /* tells the driver that a client is about to be deleted & gives it 122 /* tells the driver that a client is about to be deleted & gives it
123 * the chance to remove its private data. Also, if the client struct 123 * the chance to remove its private data. Also, if the client struct
124 * has been dynamically allocated by the driver in the function above, 124 * has been dynamically allocated by the driver in the function above,
125 * it must be freed here. 125 * it must be freed here.
@@ -139,13 +139,13 @@ struct i2c_driver {
139#define I2C_NAME_SIZE 50 139#define I2C_NAME_SIZE 50
140 140
141/* 141/*
142 * i2c_client identifies a single device (i.e. chip) that is connected to an 142 * i2c_client identifies a single device (i.e. chip) that is connected to an
143 * i2c bus. The behaviour is defined by the routines of the driver. This 143 * i2c bus. The behaviour is defined by the routines of the driver. This
144 * function is mainly used for lookup & other admin. functions. 144 * function is mainly used for lookup & other admin. functions.
145 */ 145 */
146struct i2c_client { 146struct i2c_client {
147 unsigned int flags; /* div., see below */ 147 unsigned int flags; /* div., see below */
148 unsigned short addr; /* chip address - NOTE: 7bit */ 148 unsigned short addr; /* chip address - NOTE: 7bit */
149 /* addresses are stored in the */ 149 /* addresses are stored in the */
150 /* _LOWER_ 7 bits */ 150 /* _LOWER_ 7 bits */
151 struct i2c_adapter *adapter; /* the adapter we sit on */ 151 struct i2c_adapter *adapter; /* the adapter we sit on */
@@ -182,14 +182,14 @@ static inline void i2c_set_clientdata (struct i2c_client *dev, void *data)
182 */ 182 */
183struct i2c_algorithm { 183struct i2c_algorithm {
184 /* If an adapter algorithm can't do I2C-level access, set master_xfer 184 /* If an adapter algorithm can't do I2C-level access, set master_xfer
185 to NULL. If an adapter algorithm can do SMBus access, set 185 to NULL. If an adapter algorithm can do SMBus access, set
186 smbus_xfer. If set to NULL, the SMBus protocol is simulated 186 smbus_xfer. If set to NULL, the SMBus protocol is simulated
187 using common I2C messages */ 187 using common I2C messages */
188 /* master_xfer should return the number of messages successfully 188 /* master_xfer should return the number of messages successfully
189 processed, or a negative value on error */ 189 processed, or a negative value on error */
190 int (*master_xfer)(struct i2c_adapter *adap,struct i2c_msg *msgs, 190 int (*master_xfer)(struct i2c_adapter *adap,struct i2c_msg *msgs,
191 int num); 191 int num);
192 int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr, 192 int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr,
193 unsigned short flags, char read_write, 193 unsigned short flags, char read_write,
194 u8 command, int size, union i2c_smbus_data * data); 194 u8 command, int size, union i2c_smbus_data * data);
195 195
@@ -216,6 +216,7 @@ struct i2c_adapter {
216 int (*client_unregister)(struct i2c_client *); 216 int (*client_unregister)(struct i2c_client *);
217 217
218 /* data fields that are valid for all devices */ 218 /* data fields that are valid for all devices */
219 u8 level; /* nesting level for lockdep */
219 struct mutex bus_lock; 220 struct mutex bus_lock;
220 struct mutex clist_lock; 221 struct mutex clist_lock;
221 222
@@ -316,7 +317,7 @@ extern int i2c_check_addr (struct i2c_adapter *adapter, int addr);
316 * It will only call found_proc if some client is connected at the 317 * It will only call found_proc if some client is connected at the
317 * specific address (unless a 'force' matched); 318 * specific address (unless a 'force' matched);
318 */ 319 */
319extern int i2c_probe(struct i2c_adapter *adapter, 320extern int i2c_probe(struct i2c_adapter *adapter,
320 struct i2c_client_address_data *address_data, 321 struct i2c_client_address_data *address_data,
321 int (*found_proc) (struct i2c_adapter *, int, int)); 322 int (*found_proc) (struct i2c_adapter *, int, int));
322 323
@@ -352,15 +353,15 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap)
352 */ 353 */
353struct i2c_msg { 354struct i2c_msg {
354 __u16 addr; /* slave address */ 355 __u16 addr; /* slave address */
355 __u16 flags; 356 __u16 flags;
356#define I2C_M_TEN 0x10 /* we have a ten bit chip address */ 357#define I2C_M_TEN 0x10 /* we have a ten bit chip address */
357#define I2C_M_RD 0x01 358#define I2C_M_RD 0x01
358#define I2C_M_NOSTART 0x4000 359#define I2C_M_NOSTART 0x4000
359#define I2C_M_REV_DIR_ADDR 0x2000 360#define I2C_M_REV_DIR_ADDR 0x2000
360#define I2C_M_IGNORE_NAK 0x1000 361#define I2C_M_IGNORE_NAK 0x1000
361#define I2C_M_NO_RD_ACK 0x0800 362#define I2C_M_NO_RD_ACK 0x0800
362 __u16 len; /* msg length */ 363 __u16 len; /* msg length */
363 __u8 *buf; /* pointer to msg data */ 364 __u8 *buf; /* pointer to msg data */
364}; 365};
365 366
366/* To determine what functionality is present */ 367/* To determine what functionality is present */
@@ -370,16 +371,16 @@ struct i2c_msg {
370#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_{REV_DIR_ADDR,NOSTART,..} */ 371#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_{REV_DIR_ADDR,NOSTART,..} */
371#define I2C_FUNC_SMBUS_HWPEC_CALC 0x00000008 /* SMBus 2.0 */ 372#define I2C_FUNC_SMBUS_HWPEC_CALC 0x00000008 /* SMBus 2.0 */
372#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */ 373#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */
373#define I2C_FUNC_SMBUS_QUICK 0x00010000 374#define I2C_FUNC_SMBUS_QUICK 0x00010000
374#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000 375#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000
375#define I2C_FUNC_SMBUS_WRITE_BYTE 0x00040000 376#define I2C_FUNC_SMBUS_WRITE_BYTE 0x00040000
376#define I2C_FUNC_SMBUS_READ_BYTE_DATA 0x00080000 377#define I2C_FUNC_SMBUS_READ_BYTE_DATA 0x00080000
377#define I2C_FUNC_SMBUS_WRITE_BYTE_DATA 0x00100000 378#define I2C_FUNC_SMBUS_WRITE_BYTE_DATA 0x00100000
378#define I2C_FUNC_SMBUS_READ_WORD_DATA 0x00200000 379#define I2C_FUNC_SMBUS_READ_WORD_DATA 0x00200000
379#define I2C_FUNC_SMBUS_WRITE_WORD_DATA 0x00400000 380#define I2C_FUNC_SMBUS_WRITE_WORD_DATA 0x00400000
380#define I2C_FUNC_SMBUS_PROC_CALL 0x00800000 381#define I2C_FUNC_SMBUS_PROC_CALL 0x00800000
381#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000 382#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000
382#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000 383#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000
383#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */ 384#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */
384#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */ 385#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */
385#define I2C_FUNC_SMBUS_READ_I2C_BLOCK_2 0x10000000 /* I2C-like block xfer */ 386#define I2C_FUNC_SMBUS_READ_I2C_BLOCK_2 0x10000000 /* I2C-like block xfer */
@@ -406,10 +407,10 @@ struct i2c_msg {
406 I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | \ 407 I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | \
407 I2C_FUNC_SMBUS_I2C_BLOCK) 408 I2C_FUNC_SMBUS_I2C_BLOCK)
408 409
409/* 410/*
410 * Data for SMBus Messages 411 * Data for SMBus Messages
411 */ 412 */
412#define I2C_SMBUS_BLOCK_MAX 32 /* As specified in SMBus standard */ 413#define I2C_SMBUS_BLOCK_MAX 32 /* As specified in SMBus standard */
413union i2c_smbus_data { 414union i2c_smbus_data {
414 __u8 byte; 415 __u8 byte;
415 __u16 word; 416 __u16 word;
@@ -421,11 +422,11 @@ union i2c_smbus_data {
421#define I2C_SMBUS_READ 1 422#define I2C_SMBUS_READ 1
422#define I2C_SMBUS_WRITE 0 423#define I2C_SMBUS_WRITE 0
423 424
424/* SMBus transaction types (size parameter in the above functions) 425/* SMBus transaction types (size parameter in the above functions)
425 Note: these no longer correspond to the (arbitrary) PIIX4 internal codes! */ 426 Note: these no longer correspond to the (arbitrary) PIIX4 internal codes! */
426#define I2C_SMBUS_QUICK 0 427#define I2C_SMBUS_QUICK 0
427#define I2C_SMBUS_BYTE 1 428#define I2C_SMBUS_BYTE 1
428#define I2C_SMBUS_BYTE_DATA 2 429#define I2C_SMBUS_BYTE_DATA 2
429#define I2C_SMBUS_WORD_DATA 3 430#define I2C_SMBUS_WORD_DATA 3
430#define I2C_SMBUS_PROC_CALL 4 431#define I2C_SMBUS_PROC_CALL 4
431#define I2C_SMBUS_BLOCK_DATA 5 432#define I2C_SMBUS_BLOCK_DATA 5
@@ -434,15 +435,15 @@ union i2c_smbus_data {
434 435
435 436
436/* ----- commands for the ioctl like i2c_command call: 437/* ----- commands for the ioctl like i2c_command call:
437 * note that additional calls are defined in the algorithm and hw 438 * note that additional calls are defined in the algorithm and hw
438 * dependent layers - these can be listed here, or see the 439 * dependent layers - these can be listed here, or see the
439 * corresponding header files. 440 * corresponding header files.
440 */ 441 */
441 /* -> bit-adapter specific ioctls */ 442 /* -> bit-adapter specific ioctls */
442#define I2C_RETRIES 0x0701 /* number of times a device address */ 443#define I2C_RETRIES 0x0701 /* number of times a device address */
443 /* should be polled when not */ 444 /* should be polled when not */
444 /* acknowledging */ 445 /* acknowledging */
445#define I2C_TIMEOUT 0x0702 /* set timeout - call with int */ 446#define I2C_TIMEOUT 0x0702 /* set timeout - call with int */
446 447
447 448
448/* this is for i2c-dev.c */ 449/* this is for i2c-dev.c */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 9c2050293f17..e26a03981a94 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -796,6 +796,7 @@ typedef struct hwif_s {
796 unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ 796 unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
797 unsigned no_io_32bit : 1; /* 1 = can not do 32-bit IO ops */ 797 unsigned no_io_32bit : 1; /* 1 = can not do 32-bit IO ops */
798 unsigned err_stops_fifo : 1; /* 1=data FIFO is cleared by an error */ 798 unsigned err_stops_fifo : 1; /* 1=data FIFO is cleared by an error */
799 unsigned atapi_irq_bogon : 1; /* Generates spurious DMA interrupts in PIO mode */
799 800
800 struct device gendev; 801 struct device gendev;
801 struct completion gendev_rel_comp; /* To deal with device release() */ 802 struct completion gendev_rel_comp; /* To deal with device release() */
@@ -803,8 +804,6 @@ typedef struct hwif_s {
803 void *hwif_data; /* extra hwif data */ 804 void *hwif_data; /* extra hwif data */
804 805
805 unsigned dma; 806 unsigned dma;
806
807 void (*led_act)(void *data, int rw);
808} ____cacheline_internodealigned_in_smp ide_hwif_t; 807} ____cacheline_internodealigned_in_smp ide_hwif_t;
809 808
810/* 809/*
diff --git a/include/linux/init.h b/include/linux/init.h
index 5eb5d24b7680..5a593a1dec1e 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -111,6 +111,7 @@ extern void setup_arch(char **);
111#define subsys_initcall_sync(fn) __define_initcall("4s",fn,4s) 111#define subsys_initcall_sync(fn) __define_initcall("4s",fn,4s)
112#define fs_initcall(fn) __define_initcall("5",fn,5) 112#define fs_initcall(fn) __define_initcall("5",fn,5)
113#define fs_initcall_sync(fn) __define_initcall("5s",fn,5s) 113#define fs_initcall_sync(fn) __define_initcall("5s",fn,5s)
114#define rootfs_initcall(fn) __define_initcall("rootfs",fn,rootfs)
114#define device_initcall(fn) __define_initcall("6",fn,6) 115#define device_initcall(fn) __define_initcall("6",fn,6)
115#define device_initcall_sync(fn) __define_initcall("6s",fn,6s) 116#define device_initcall_sync(fn) __define_initcall("6s",fn,6s)
116#define late_initcall(fn) __define_initcall("7",fn,7) 117#define late_initcall(fn) __define_initcall("7",fn,7)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 7272ff9ee77c..6383d2d83bb0 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -12,12 +12,10 @@
12#define INIT_FDTABLE \ 12#define INIT_FDTABLE \
13{ \ 13{ \
14 .max_fds = NR_OPEN_DEFAULT, \ 14 .max_fds = NR_OPEN_DEFAULT, \
15 .max_fdset = EMBEDDED_FD_SET_SIZE, \
16 .fd = &init_files.fd_array[0], \ 15 .fd = &init_files.fd_array[0], \
17 .close_on_exec = (fd_set *)&init_files.close_on_exec_init, \ 16 .close_on_exec = (fd_set *)&init_files.close_on_exec_init, \
18 .open_fds = (fd_set *)&init_files.open_fds_init, \ 17 .open_fds = (fd_set *)&init_files.open_fds_init, \
19 .rcu = RCU_HEAD_INIT, \ 18 .rcu = RCU_HEAD_INIT, \
20 .free_files = NULL, \
21 .next = NULL, \ 19 .next = NULL, \
22} 20}
23 21
@@ -77,7 +75,6 @@ extern struct nsproxy init_nsproxy;
77 .pid_ns = &init_pid_ns, \ 75 .pid_ns = &init_pid_ns, \
78 .count = ATOMIC_INIT(1), \ 76 .count = ATOMIC_INIT(1), \
79 .nslock = __SPIN_LOCK_UNLOCKED(nsproxy.nslock), \ 77 .nslock = __SPIN_LOCK_UNLOCKED(nsproxy.nslock), \
80 .id = 0, \
81 .uts_ns = &init_uts_ns, \ 78 .uts_ns = &init_uts_ns, \
82 .mnt_ns = NULL, \ 79 .mnt_ns = NULL, \
83 INIT_IPC_NS(ipc_ns) \ 80 INIT_IPC_NS(ipc_ns) \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index de7593f4e895..e36e86c869fb 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -231,7 +231,8 @@ enum
231 NET_TX_SOFTIRQ, 231 NET_TX_SOFTIRQ,
232 NET_RX_SOFTIRQ, 232 NET_RX_SOFTIRQ,
233 BLOCK_SOFTIRQ, 233 BLOCK_SOFTIRQ,
234 TASKLET_SOFTIRQ 234 TASKLET_SOFTIRQ,
235 SCHED_SOFTIRQ,
235}; 236};
236 237
237/* softirq mask and active fields moved to irq_cpustat_t in 238/* softirq mask and active fields moved to irq_cpustat_t in
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e8bfac34d2ba..b0c4a05a4b0c 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -17,8 +17,6 @@
17#include <asm/byteorder.h> 17#include <asm/byteorder.h>
18#include <asm/bug.h> 18#include <asm/bug.h>
19 19
20extern const char linux_banner[];
21
22#define INT_MAX ((int)(~0U>>1)) 20#define INT_MAX ((int)(~0U>>1))
23#define INT_MIN (-INT_MAX - 1) 21#define INT_MIN (-INT_MAX - 1)
24#define UINT_MAX (~0U) 22#define UINT_MAX (~0U)
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
new file mode 100644
index 000000000000..5bb2c3c585c1
--- /dev/null
+++ b/include/linux/kvm.h
@@ -0,0 +1,227 @@
1#ifndef __LINUX_KVM_H
2#define __LINUX_KVM_H
3
4/*
5 * Userspace interface for /dev/kvm - kernel based virtual machine
6 *
7 * Note: this interface is considered experimental and may change without
8 * notice.
9 */
10
11#include <asm/types.h>
12#include <linux/ioctl.h>
13
14/*
15 * Architectural interrupt line count, and the size of the bitmap needed
16 * to hold them.
17 */
18#define KVM_NR_INTERRUPTS 256
19#define KVM_IRQ_BITMAP_SIZE_BYTES ((KVM_NR_INTERRUPTS + 7) / 8)
20#define KVM_IRQ_BITMAP_SIZE(type) (KVM_IRQ_BITMAP_SIZE_BYTES / sizeof(type))
21
22
23/* for KVM_CREATE_MEMORY_REGION */
24struct kvm_memory_region {
25 __u32 slot;
26 __u32 flags;
27 __u64 guest_phys_addr;
28 __u64 memory_size; /* bytes */
29};
30
31/* for kvm_memory_region::flags */
32#define KVM_MEM_LOG_DIRTY_PAGES 1UL
33
34
35#define KVM_EXIT_TYPE_FAIL_ENTRY 1
36#define KVM_EXIT_TYPE_VM_EXIT 2
37
38enum kvm_exit_reason {
39 KVM_EXIT_UNKNOWN = 0,
40 KVM_EXIT_EXCEPTION = 1,
41 KVM_EXIT_IO = 2,
42 KVM_EXIT_CPUID = 3,
43 KVM_EXIT_DEBUG = 4,
44 KVM_EXIT_HLT = 5,
45 KVM_EXIT_MMIO = 6,
46};
47
48/* for KVM_RUN */
49struct kvm_run {
50 /* in */
51 __u32 vcpu;
52 __u32 emulated; /* skip current instruction */
53 __u32 mmio_completed; /* mmio request completed */
54
55 /* out */
56 __u32 exit_type;
57 __u32 exit_reason;
58 __u32 instruction_length;
59 union {
60 /* KVM_EXIT_UNKNOWN */
61 struct {
62 __u32 hardware_exit_reason;
63 } hw;
64 /* KVM_EXIT_EXCEPTION */
65 struct {
66 __u32 exception;
67 __u32 error_code;
68 } ex;
69 /* KVM_EXIT_IO */
70 struct {
71#define KVM_EXIT_IO_IN 0
72#define KVM_EXIT_IO_OUT 1
73 __u8 direction;
74 __u8 size; /* bytes */
75 __u8 string;
76 __u8 string_down;
77 __u8 rep;
78 __u8 pad;
79 __u16 port;
80 __u64 count;
81 union {
82 __u64 address;
83 __u32 value;
84 };
85 } io;
86 struct {
87 } debug;
88 /* KVM_EXIT_MMIO */
89 struct {
90 __u64 phys_addr;
91 __u8 data[8];
92 __u32 len;
93 __u8 is_write;
94 } mmio;
95 };
96};
97
98/* for KVM_GET_REGS and KVM_SET_REGS */
99struct kvm_regs {
100 /* in */
101 __u32 vcpu;
102 __u32 padding;
103
104 /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
105 __u64 rax, rbx, rcx, rdx;
106 __u64 rsi, rdi, rsp, rbp;
107 __u64 r8, r9, r10, r11;
108 __u64 r12, r13, r14, r15;
109 __u64 rip, rflags;
110};
111
112struct kvm_segment {
113 __u64 base;
114 __u32 limit;
115 __u16 selector;
116 __u8 type;
117 __u8 present, dpl, db, s, l, g, avl;
118 __u8 unusable;
119 __u8 padding;
120};
121
122struct kvm_dtable {
123 __u64 base;
124 __u16 limit;
125 __u16 padding[3];
126};
127
128/* for KVM_GET_SREGS and KVM_SET_SREGS */
129struct kvm_sregs {
130 /* in */
131 __u32 vcpu;
132 __u32 padding;
133
134 /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
135 struct kvm_segment cs, ds, es, fs, gs, ss;
136 struct kvm_segment tr, ldt;
137 struct kvm_dtable gdt, idt;
138 __u64 cr0, cr2, cr3, cr4, cr8;
139 __u64 efer;
140 __u64 apic_base;
141 __u64 interrupt_bitmap[KVM_IRQ_BITMAP_SIZE(__u64)];
142};
143
144struct kvm_msr_entry {
145 __u32 index;
146 __u32 reserved;
147 __u64 data;
148};
149
150/* for KVM_GET_MSRS and KVM_SET_MSRS */
151struct kvm_msrs {
152 __u32 vcpu;
153 __u32 nmsrs; /* number of msrs in entries */
154
155 struct kvm_msr_entry entries[0];
156};
157
158/* for KVM_GET_MSR_INDEX_LIST */
159struct kvm_msr_list {
160 __u32 nmsrs; /* number of msrs in entries */
161 __u32 indices[0];
162};
163
164/* for KVM_TRANSLATE */
165struct kvm_translation {
166 /* in */
167 __u64 linear_address;
168 __u32 vcpu;
169 __u32 padding;
170
171 /* out */
172 __u64 physical_address;
173 __u8 valid;
174 __u8 writeable;
175 __u8 usermode;
176};
177
178/* for KVM_INTERRUPT */
179struct kvm_interrupt {
180 /* in */
181 __u32 vcpu;
182 __u32 irq;
183};
184
185struct kvm_breakpoint {
186 __u32 enabled;
187 __u32 padding;
188 __u64 address;
189};
190
191/* for KVM_DEBUG_GUEST */
192struct kvm_debug_guest {
193 /* int */
194 __u32 vcpu;
195 __u32 enabled;
196 struct kvm_breakpoint breakpoints[4];
197 __u32 singlestep;
198};
199
200/* for KVM_GET_DIRTY_LOG */
201struct kvm_dirty_log {
202 __u32 slot;
203 __u32 padding;
204 union {
205 void __user *dirty_bitmap; /* one bit per page */
206 __u64 padding;
207 };
208};
209
210#define KVMIO 0xAE
211
212#define KVM_RUN _IOWR(KVMIO, 2, struct kvm_run)
213#define KVM_GET_REGS _IOWR(KVMIO, 3, struct kvm_regs)
214#define KVM_SET_REGS _IOW(KVMIO, 4, struct kvm_regs)
215#define KVM_GET_SREGS _IOWR(KVMIO, 5, struct kvm_sregs)
216#define KVM_SET_SREGS _IOW(KVMIO, 6, struct kvm_sregs)
217#define KVM_TRANSLATE _IOWR(KVMIO, 7, struct kvm_translation)
218#define KVM_INTERRUPT _IOW(KVMIO, 8, struct kvm_interrupt)
219#define KVM_DEBUG_GUEST _IOW(KVMIO, 9, struct kvm_debug_guest)
220#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 10, struct kvm_memory_region)
221#define KVM_CREATE_VCPU _IOW(KVMIO, 11, int /* vcpu_slot */)
222#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 12, struct kvm_dirty_log)
223#define KVM_GET_MSRS _IOWR(KVMIO, 13, struct kvm_msrs)
224#define KVM_SET_MSRS _IOWR(KVMIO, 14, struct kvm_msrs)
225#define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 15, struct kvm_msr_list)
226
227#endif
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index aa50d89eacd7..246de1d84a26 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -23,7 +23,7 @@ struct svc_rqst;
23 * This is the set of functions for lockd->nfsd communication 23 * This is the set of functions for lockd->nfsd communication
24 */ 24 */
25struct nlmsvc_binding { 25struct nlmsvc_binding {
26 u32 (*fopen)(struct svc_rqst *, 26 __be32 (*fopen)(struct svc_rqst *,
27 struct nfs_fh *, 27 struct nfs_fh *,
28 struct file **); 28 struct file **);
29 void (*fclose)(struct file *); 29 void (*fclose)(struct file *);
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 0c962b82a9de..ac25b5649c59 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -191,7 +191,7 @@ __be32 nlmsvc_cancel_blocked(struct nlm_file *, struct nlm_lock *);
191unsigned long nlmsvc_retry_blocked(void); 191unsigned long nlmsvc_retry_blocked(void);
192void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, 192void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
193 nlm_host_match_fn_t match); 193 nlm_host_match_fn_t match);
194void nlmsvc_grant_reply(struct nlm_cookie *, u32); 194void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
195 195
196/* 196/*
197 * File handling for the server personality 197 * File handling for the server personality
diff --git a/include/linux/lockd/sm_inter.h b/include/linux/lockd/sm_inter.h
index fc61d40964da..22a645828f26 100644
--- a/include/linux/lockd/sm_inter.h
+++ b/include/linux/lockd/sm_inter.h
@@ -24,7 +24,7 @@
24 * Arguments for all calls to statd 24 * Arguments for all calls to statd
25 */ 25 */
26struct nsm_args { 26struct nsm_args {
27 u32 addr; /* remote address */ 27 __be32 addr; /* remote address */
28 u32 prog; /* RPC callback info */ 28 u32 prog; /* RPC callback info */
29 u32 vers; 29 u32 vers;
30 u32 proc; 30 u32 proc;
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index 29e7d9fc9dad..83a1f9f6237b 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -69,7 +69,7 @@ typedef struct nlm_args nlm_args;
69 */ 69 */
70struct nlm_res { 70struct nlm_res {
71 struct nlm_cookie cookie; 71 struct nlm_cookie cookie;
72 u32 status; 72 __be32 status;
73 struct nlm_lock lock; 73 struct nlm_lock lock;
74}; 74};
75 75
@@ -80,9 +80,9 @@ struct nlm_reboot {
80 char * mon; 80 char * mon;
81 int len; 81 int len;
82 u32 state; 82 u32 state;
83 u32 addr; 83 __be32 addr;
84 u32 vers; 84 __be32 vers;
85 u32 proto; 85 __be32 proto;
86}; 86};
87 87
88/* 88/*
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 498bfbd3b4e1..ea097dddc44f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -281,15 +281,25 @@ struct lock_class_key { };
281#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) 281#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
282extern void early_init_irq_lock_class(void); 282extern void early_init_irq_lock_class(void);
283#else 283#else
284# define early_init_irq_lock_class() do { } while (0) 284static inline void early_init_irq_lock_class(void)
285{
286}
285#endif 287#endif
286 288
287#ifdef CONFIG_TRACE_IRQFLAGS 289#ifdef CONFIG_TRACE_IRQFLAGS
288extern void early_boot_irqs_off(void); 290extern void early_boot_irqs_off(void);
289extern void early_boot_irqs_on(void); 291extern void early_boot_irqs_on(void);
292extern void print_irqtrace_events(struct task_struct *curr);
290#else 293#else
291# define early_boot_irqs_off() do { } while (0) 294static inline void early_boot_irqs_off(void)
292# define early_boot_irqs_on() do { } while (0) 295{
296}
297static inline void early_boot_irqs_on(void)
298{
299}
300static inline void print_irqtrace_events(struct task_struct *curr)
301{
302}
293#endif 303#endif
294 304
295/* 305/*
diff --git a/include/linux/mount.h b/include/linux/mount.h
index e357dc86a4de..1b7e178b0d84 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -27,6 +27,7 @@ struct mnt_namespace;
27#define MNT_NOEXEC 0x04 27#define MNT_NOEXEC 0x04
28#define MNT_NOATIME 0x08 28#define MNT_NOATIME 0x08
29#define MNT_NODIRATIME 0x10 29#define MNT_NODIRATIME 0x10
30#define MNT_RELATIME 0x20
30 31
31#define MNT_SHRINKABLE 0x100 32#define MNT_SHRINKABLE 0x100
32 33
diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h
index db4f3776978a..de24af79ebd3 100644
--- a/include/linux/n_r3964.h
+++ b/include/linux/n_r3964.h
@@ -116,7 +116,7 @@ struct r3964_message;
116 116
117struct r3964_client_info { 117struct r3964_client_info {
118 spinlock_t lock; 118 spinlock_t lock;
119 pid_t pid; 119 struct pid *pid;
120 unsigned int sig_flags; 120 unsigned int sig_flags;
121 121
122 struct r3964_client_info *next; 122 struct r3964_client_info *next;
diff --git a/include/linux/ncp_mount.h b/include/linux/ncp_mount.h
index f46bddcdbd3b..a2b549eb1eca 100644
--- a/include/linux/ncp_mount.h
+++ b/include/linux/ncp_mount.h
@@ -75,7 +75,7 @@ struct ncp_mount_data_kernel {
75 unsigned int int_flags; /* internal flags */ 75 unsigned int int_flags; /* internal flags */
76#define NCP_IMOUNT_LOGGEDIN_POSSIBLE 0x0001 76#define NCP_IMOUNT_LOGGEDIN_POSSIBLE 0x0001
77 __kernel_uid32_t mounted_uid; /* Who may umount() this filesystem? */ 77 __kernel_uid32_t mounted_uid; /* Who may umount() this filesystem? */
78 __kernel_pid_t wdog_pid; /* Who cares for our watchdog packets? */ 78 struct pid *wdog_pid; /* Who cares for our watchdog packets? */
79 unsigned int ncp_fd; /* The socket to the ncp port */ 79 unsigned int ncp_fd; /* The socket to the ncp port */
80 unsigned int time_out; /* How long should I wait after 80 unsigned int time_out; /* How long should I wait after
81 sending a NCP request? */ 81 sending a NCP request? */
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index edb54c3171b3..0727774772ba 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -275,12 +275,12 @@ static inline int is_fsid(struct svc_fh *fh, struct knfsd_fh *reffh)
275 * we might process an operation with side effects, and be unable to 275 * we might process an operation with side effects, and be unable to
276 * tell the client that the operation succeeded. 276 * tell the client that the operation succeeded.
277 * 277 *
278 * COMPOUND_SLACK_SPACE - this is the minimum amount of buffer space 278 * COMPOUND_SLACK_SPACE - this is the minimum bytes of buffer space
279 * needed to encode an "ordinary" _successful_ operation. (GETATTR, 279 * needed to encode an "ordinary" _successful_ operation. (GETATTR,
280 * READ, READDIR, and READLINK have their own buffer checks.) if we 280 * READ, READDIR, and READLINK have their own buffer checks.) if we
281 * fall below this level, we fail the next operation with NFS4ERR_RESOURCE. 281 * fall below this level, we fail the next operation with NFS4ERR_RESOURCE.
282 * 282 *
283 * COMPOUND_ERR_SLACK_SPACE - this is the minimum amount of buffer space 283 * COMPOUND_ERR_SLACK_SPACE - this is the minimum bytes of buffer space
284 * needed to encode an operation which has failed with NFS4ERR_RESOURCE. 284 * needed to encode an operation which has failed with NFS4ERR_RESOURCE.
285 * care is taken to ensure that we never fall below this level for any 285 * care is taken to ensure that we never fall below this level for any
286 * reason. 286 * reason.
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index c3673f487e84..ab5c236bd9a7 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -273,7 +273,6 @@ struct nfs4_stateid {
273 ((err) != nfserr_stale_stateid) && \ 273 ((err) != nfserr_stale_stateid) && \
274 ((err) != nfserr_bad_stateid)) 274 ((err) != nfserr_bad_stateid))
275 275
276extern __be32 nfsd4_renew(clientid_t *clid);
277extern __be32 nfs4_preprocess_stateid_op(struct svc_fh *current_fh, 276extern __be32 nfs4_preprocess_stateid_op(struct svc_fh *current_fh,
278 stateid_t *stateid, int flags, struct file **filp); 277 stateid_t *stateid, int flags, struct file **filp);
279extern void nfs4_lock_state(void); 278extern void nfs4_lock_state(void);
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
index 45ca01b5f844..09799bcee0ac 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/include/linux/nfsd/xdr4.h
@@ -44,6 +44,12 @@
44#define NFSD4_MAX_TAGLEN 128 44#define NFSD4_MAX_TAGLEN 128
45#define XDR_LEN(n) (((n) + 3) & ~3) 45#define XDR_LEN(n) (((n) + 3) & ~3)
46 46
47struct nfsd4_compound_state {
48 struct svc_fh current_fh;
49 struct svc_fh save_fh;
50 struct nfs4_stateowner *replay_owner;
51};
52
47struct nfsd4_change_info { 53struct nfsd4_change_info {
48 u32 atomic; 54 u32 atomic;
49 u32 before_ctime_sec; 55 u32 before_ctime_sec;
@@ -430,35 +436,39 @@ __be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
430 struct dentry *dentry, __be32 *buffer, int *countp, 436 struct dentry *dentry, __be32 *buffer, int *countp,
431 u32 *bmval, struct svc_rqst *); 437 u32 *bmval, struct svc_rqst *);
432extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp, 438extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp,
439 struct nfsd4_compound_state *,
433 struct nfsd4_setclientid *setclid); 440 struct nfsd4_setclientid *setclid);
434extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 441extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
442 struct nfsd4_compound_state *,
435 struct nfsd4_setclientid_confirm *setclientid_confirm); 443 struct nfsd4_setclientid_confirm *setclientid_confirm);
436extern __be32 nfsd4_process_open1(struct nfsd4_open *open); 444extern __be32 nfsd4_process_open1(struct nfsd4_open *open);
437extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, 445extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp,
438 struct svc_fh *current_fh, struct nfsd4_open *open); 446 struct svc_fh *current_fh, struct nfsd4_open *open);
439extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, 447extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp,
440 struct svc_fh *current_fh, struct nfsd4_open_confirm *oc, 448 struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc);
441 struct nfs4_stateowner **); 449extern __be32 nfsd4_close(struct svc_rqst *rqstp,
442extern __be32 nfsd4_close(struct svc_rqst *rqstp, struct svc_fh *current_fh, 450 struct nfsd4_compound_state *,
443 struct nfsd4_close *close, 451 struct nfsd4_close *close);
444 struct nfs4_stateowner **replay_owner);
445extern __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp, 452extern __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp,
446 struct svc_fh *current_fh, struct nfsd4_open_downgrade *od, 453 struct nfsd4_compound_state *,
447 struct nfs4_stateowner **replay_owner); 454 struct nfsd4_open_downgrade *od);
448extern __be32 nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, 455extern __be32 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
449 struct nfsd4_lock *lock, 456 struct nfsd4_lock *lock);
450 struct nfs4_stateowner **replay_owner); 457extern __be32 nfsd4_lockt(struct svc_rqst *rqstp,
451extern __be32 nfsd4_lockt(struct svc_rqst *rqstp, struct svc_fh *current_fh, 458 struct nfsd4_compound_state *,
452 struct nfsd4_lockt *lockt); 459 struct nfsd4_lockt *lockt);
453extern __be32 nfsd4_locku(struct svc_rqst *rqstp, struct svc_fh *current_fh, 460extern __be32 nfsd4_locku(struct svc_rqst *rqstp,
454 struct nfsd4_locku *locku, 461 struct nfsd4_compound_state *,
455 struct nfs4_stateowner **replay_owner); 462 struct nfsd4_locku *locku);
456extern __be32 463extern __be32
457nfsd4_release_lockowner(struct svc_rqst *rqstp, 464nfsd4_release_lockowner(struct svc_rqst *rqstp,
465 struct nfsd4_compound_state *,
458 struct nfsd4_release_lockowner *rlockowner); 466 struct nfsd4_release_lockowner *rlockowner);
459extern void nfsd4_release_compoundargs(struct nfsd4_compoundargs *); 467extern void nfsd4_release_compoundargs(struct nfsd4_compoundargs *);
460extern __be32 nfsd4_delegreturn(struct svc_rqst *rqstp, 468extern __be32 nfsd4_delegreturn(struct svc_rqst *rqstp,
461 struct svc_fh *current_fh, struct nfsd4_delegreturn *dr); 469 struct nfsd4_compound_state *, struct nfsd4_delegreturn *dr);
470extern __be32 nfsd4_renew(struct svc_rqst *rqstp,
471 struct nfsd4_compound_state *, clientid_t *clid);
462#endif 472#endif
463 473
464/* 474/*
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index fdfb0e44912f..0b9f0dc30d61 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -24,7 +24,6 @@ struct pid_namespace;
24struct nsproxy { 24struct nsproxy {
25 atomic_t count; 25 atomic_t count;
26 spinlock_t nslock; 26 spinlock_t nslock;
27 unsigned long id;
28 struct uts_namespace *uts_ns; 27 struct uts_namespace *uts_ns;
29 struct ipc_namespace *ipc_ns; 28 struct ipc_namespace *ipc_ns;
30 struct mnt_namespace *mnt_ns; 29 struct mnt_namespace *mnt_ns;
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index ea4f7cd7bfd8..2e19478e9e84 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -12,7 +12,7 @@
12struct pipe_buffer { 12struct pipe_buffer {
13 struct page *page; 13 struct page *page;
14 unsigned int offset, len; 14 unsigned int offset, len;
15 struct pipe_buf_operations *ops; 15 const struct pipe_buf_operations *ops;
16 unsigned int flags; 16 unsigned int flags;
17}; 17};
18 18
@@ -41,9 +41,7 @@ struct pipe_buf_operations {
41struct pipe_inode_info { 41struct pipe_inode_info {
42 wait_queue_head_t wait; 42 wait_queue_head_t wait;
43 unsigned int nrbufs, curbuf; 43 unsigned int nrbufs, curbuf;
44 struct pipe_buffer bufs[PIPE_BUFFERS];
45 struct page *tmp_page; 44 struct page *tmp_page;
46 unsigned int start;
47 unsigned int readers; 45 unsigned int readers;
48 unsigned int writers; 46 unsigned int writers;
49 unsigned int waiting_writers; 47 unsigned int waiting_writers;
@@ -52,6 +50,7 @@ struct pipe_inode_info {
52 struct fasync_struct *fasync_readers; 50 struct fasync_struct *fasync_readers;
53 struct fasync_struct *fasync_writers; 51 struct fasync_struct *fasync_writers;
54 struct inode *inode; 52 struct inode *inode;
53 struct pipe_buffer bufs[PIPE_BUFFERS];
55}; 54};
56 55
57/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual 56/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index 03636d7918fe..d8286db60b96 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -227,7 +227,10 @@ struct raid5_private_data {
227 struct list_head handle_list; /* stripes needing handling */ 227 struct list_head handle_list; /* stripes needing handling */
228 struct list_head delayed_list; /* stripes that have plugged requests */ 228 struct list_head delayed_list; /* stripes that have plugged requests */
229 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */ 229 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
230 struct bio *retry_read_aligned; /* currently retrying aligned bios */
231 struct bio *retry_read_aligned_list; /* aligned bios retry list */
230 atomic_t preread_active_stripes; /* stripes with scheduled io */ 232 atomic_t preread_active_stripes; /* stripes with scheduled io */
233 atomic_t active_aligned_reads;
231 234
232 atomic_t reshape_stripes; /* stripes with pending writes for reshape */ 235 atomic_t reshape_stripes; /* stripes with pending writes for reshape */
233 /* unfortunately we need two cache names as we temporarily have 236 /* unfortunately we need two cache names as we temporarily have
diff --git a/include/linux/reciprocal_div.h b/include/linux/reciprocal_div.h
new file mode 100644
index 000000000000..f9c90b33285b
--- /dev/null
+++ b/include/linux/reciprocal_div.h
@@ -0,0 +1,32 @@
1#ifndef _LINUX_RECIPROCAL_DIV_H
2#define _LINUX_RECIPROCAL_DIV_H
3
4#include <linux/types.h>
5
6/*
7 * This file describes reciprocical division.
8 *
9 * This optimizes the (A/B) problem, when A and B are two u32
10 * and B is a known value (but not known at compile time)
11 *
12 * The math principle used is :
13 * Let RECIPROCAL_VALUE(B) be (((1LL << 32) + (B - 1))/ B)
14 * Then A / B = (u32)(((u64)(A) * (R)) >> 32)
15 *
16 * This replaces a divide by a multiply (and a shift), and
17 * is generally less expensive in CPU cycles.
18 */
19
20/*
21 * Computes the reciprocal value (R) for the value B of the divisor.
22 * Should not be called before each reciprocal_divide(),
23 * or else the performance is slower than a normal divide.
24 */
25extern u32 reciprocal_value(u32 B);
26
27
28static inline u32 reciprocal_divide(u32 A, u32 R)
29{
30 return (u32)(((u64)A * R) >> 32);
31}
32#endif
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 09ff4c3e2713..5e22d4510d11 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -106,6 +106,7 @@ extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year
106extern int rtc_valid_tm(struct rtc_time *tm); 106extern int rtc_valid_tm(struct rtc_time *tm);
107extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time); 107extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time);
108extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); 108extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm);
109extern void rtc_merge_alarm(struct rtc_time *now, struct rtc_time *alarm);
109 110
110#include <linux/device.h> 111#include <linux/device.h>
111#include <linux/seq_file.h> 112#include <linux/seq_file.h>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ad9c46071ff8..446373535190 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -82,6 +82,7 @@ struct sched_param {
82#include <linux/resource.h> 82#include <linux/resource.h>
83#include <linux/timer.h> 83#include <linux/timer.h>
84#include <linux/hrtimer.h> 84#include <linux/hrtimer.h>
85#include <linux/task_io_accounting.h>
85 86
86#include <asm/processor.h> 87#include <asm/processor.h>
87 88
@@ -647,6 +648,7 @@ enum idle_type
647#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ 648#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */
648#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ 649#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
649#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ 650#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */
651#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
650 652
651#define BALANCE_FOR_MC_POWER \ 653#define BALANCE_FOR_MC_POWER \
652 (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) 654 (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
@@ -1013,6 +1015,7 @@ struct task_struct {
1013 wait_queue_t *io_wait; 1015 wait_queue_t *io_wait;
1014/* i/o counters(bytes read/written, #syscalls */ 1016/* i/o counters(bytes read/written, #syscalls */
1015 u64 rchar, wchar, syscr, syscw; 1017 u64 rchar, wchar, syscr, syscw;
1018 struct task_io_accounting ioac;
1016#if defined(CONFIG_TASK_XACCT) 1019#if defined(CONFIG_TASK_XACCT)
1017 u64 acct_rss_mem1; /* accumulated rss usage */ 1020 u64 acct_rss_mem1; /* accumulated rss usage */
1018 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1021 u64 acct_vm_mem1; /* accumulated virtual memory usage */
@@ -1141,7 +1144,6 @@ static inline void put_task_struct(struct task_struct *t)
1141#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1144#define PF_MEMALLOC 0x00000800 /* Allocating memory */
1142#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */ 1145#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
1143#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1146#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1144#define PF_FREEZE 0x00004000 /* this task is being frozen for suspend now */
1145#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1147#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1146#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1148#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1147#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1149#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 46000936f8f1..6b0648cfdffc 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -44,8 +44,11 @@ typedef struct {
44#define SEQLOCK_UNLOCKED \ 44#define SEQLOCK_UNLOCKED \
45 __SEQLOCK_UNLOCKED(old_style_seqlock_init) 45 __SEQLOCK_UNLOCKED(old_style_seqlock_init)
46 46
47#define seqlock_init(x) \ 47#define seqlock_init(x) \
48 do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); } while (0) 48 do { \
49 (x)->sequence = 0; \
50 spin_lock_init(&(x)->lock); \
51 } while (0)
49 52
50#define DEFINE_SEQLOCK(x) \ 53#define DEFINE_SEQLOCK(x) \
51 seqlock_t x = __SEQLOCK_UNLOCKED(x) 54 seqlock_t x = __SEQLOCK_UNLOCKED(x)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2271886744f8..1ef822e31c77 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -1,7 +1,9 @@
1/* 1/*
2 * linux/include/linux/slab.h 2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3 * Written by Mark Hemment, 1996. 3 *
4 * (markhe@nextd.demon.co.uk) 4 * (C) SGI 2006, Christoph Lameter <clameter@sgi.com>
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
5 */ 7 */
6 8
7#ifndef _LINUX_SLAB_H 9#ifndef _LINUX_SLAB_H
@@ -10,64 +12,95 @@
10#ifdef __KERNEL__ 12#ifdef __KERNEL__
11 13
12#include <linux/gfp.h> 14#include <linux/gfp.h>
13#include <linux/init.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
16#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
17#include <linux/compiler.h>
18 16
19/* kmem_cache_t exists for legacy reasons and is not used by code in mm */
20typedef struct kmem_cache kmem_cache_t __deprecated; 17typedef struct kmem_cache kmem_cache_t __deprecated;
21 18
22/* flags to pass to kmem_cache_create(). 19/*
23 * The first 3 are only valid when the allocator as been build 20 * Flags to pass to kmem_cache_create().
24 * SLAB_DEBUG_SUPPORT. 21 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
25 */ 22 */
26#define SLAB_DEBUG_FREE 0x00000100UL /* Peform (expensive) checks on free */ 23#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
27#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */ 24#define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */
28#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */ 25#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
29#define SLAB_POISON 0x00000800UL /* Poison objects */ 26#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
30#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */ 27#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
31#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */ 28#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
32#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */ 29#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */
33#define SLAB_STORE_USER 0x00010000UL /* store the last owner for bug hunting */ 30#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
34#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* track pages allocated to indicate 31#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
35 what is reclaimable later*/ 32#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
36#define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */ 33#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
37#define SLAB_DESTROY_BY_RCU 0x00080000UL /* defer freeing pages to RCU */
38#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 34#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
39 35
40/* flags passed to a constructor func */ 36/* Flags passed to a constructor functions */
41#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */ 37#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */
42#define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */ 38#define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */
43#define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */ 39#define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */
44 40
45#ifndef CONFIG_SLOB 41/*
46 42 * struct kmem_cache related prototypes
47/* prototypes */ 43 */
48extern void __init kmem_cache_init(void); 44void __init kmem_cache_init(void);
45extern int slab_is_available(void);
49 46
50extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 47struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
51 unsigned long, 48 unsigned long,
52 void (*)(void *, struct kmem_cache *, unsigned long), 49 void (*)(void *, struct kmem_cache *, unsigned long),
53 void (*)(void *, struct kmem_cache *, unsigned long)); 50 void (*)(void *, struct kmem_cache *, unsigned long));
54extern void kmem_cache_destroy(struct kmem_cache *); 51void kmem_cache_destroy(struct kmem_cache *);
55extern int kmem_cache_shrink(struct kmem_cache *); 52int kmem_cache_shrink(struct kmem_cache *);
56extern void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 53void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
57extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); 54void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
58extern void kmem_cache_free(struct kmem_cache *, void *); 55void kmem_cache_free(struct kmem_cache *, void *);
59extern unsigned int kmem_cache_size(struct kmem_cache *); 56unsigned int kmem_cache_size(struct kmem_cache *);
60extern const char *kmem_cache_name(struct kmem_cache *); 57const char *kmem_cache_name(struct kmem_cache *);
58int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
59
60#ifdef CONFIG_NUMA
61extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
62#else
63static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
64 gfp_t flags, int node)
65{
66 return kmem_cache_alloc(cachep, flags);
67}
68#endif
69
70/*
71 * Common kmalloc functions provided by all allocators
72 */
73void *__kmalloc(size_t, gfp_t);
74void *__kzalloc(size_t, gfp_t);
75void kfree(const void *);
76unsigned int ksize(const void *);
77
78/**
79 * kcalloc - allocate memory for an array. The memory is set to zero.
80 * @n: number of elements.
81 * @size: element size.
82 * @flags: the type of memory to allocate.
83 */
84static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
85{
86 if (n != 0 && size > ULONG_MAX / n)
87 return NULL;
88 return __kzalloc(n * size, flags);
89}
61 90
62/* Size description struct for general caches. */ 91/*
63struct cache_sizes { 92 * Allocator specific definitions. These are mainly used to establish optimized
64 size_t cs_size; 93 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by selecting
65 struct kmem_cache *cs_cachep; 94 * the appropriate general cache at compile time.
66 struct kmem_cache *cs_dmacachep; 95 */
67};
68extern struct cache_sizes malloc_sizes[];
69 96
70extern void *__kmalloc(size_t, gfp_t); 97#ifdef CONFIG_SLAB
98#include <linux/slab_def.h>
99#else
100/*
101 * Fallback definitions for an allocator not wanting to provide
102 * its own optimized kmalloc definitions (like SLOB).
103 */
71 104
72/** 105/**
73 * kmalloc - allocate memory 106 * kmalloc - allocate memory
@@ -116,46 +149,9 @@ extern void *__kmalloc(size_t, gfp_t);
116 */ 149 */
117static inline void *kmalloc(size_t size, gfp_t flags) 150static inline void *kmalloc(size_t size, gfp_t flags)
118{ 151{
119 if (__builtin_constant_p(size)) {
120 int i = 0;
121#define CACHE(x) \
122 if (size <= x) \
123 goto found; \
124 else \
125 i++;
126#include "kmalloc_sizes.h"
127#undef CACHE
128 {
129 extern void __you_cannot_kmalloc_that_much(void);
130 __you_cannot_kmalloc_that_much();
131 }
132found:
133 return kmem_cache_alloc((flags & GFP_DMA) ?
134 malloc_sizes[i].cs_dmacachep :
135 malloc_sizes[i].cs_cachep, flags);
136 }
137 return __kmalloc(size, flags); 152 return __kmalloc(size, flags);
138} 153}
139 154
140/*
141 * kmalloc_track_caller is a special version of kmalloc that records the
142 * calling function of the routine calling it for slab leak tracking instead
143 * of just the calling function (confusing, eh?).
144 * It's useful when the call to kmalloc comes from a widely-used standard
145 * allocator where we care about the real place the memory allocation
146 * request comes from.
147 */
148#ifndef CONFIG_DEBUG_SLAB
149#define kmalloc_track_caller(size, flags) \
150 __kmalloc(size, flags)
151#else
152extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
153#define kmalloc_track_caller(size, flags) \
154 __kmalloc_track_caller(size, flags, __builtin_return_address(0))
155#endif
156
157extern void *__kzalloc(size_t, gfp_t);
158
159/** 155/**
160 * kzalloc - allocate memory. The memory is set to zero. 156 * kzalloc - allocate memory. The memory is set to zero.
161 * @size: how many bytes of memory are required. 157 * @size: how many bytes of memory are required.
@@ -163,72 +159,41 @@ extern void *__kzalloc(size_t, gfp_t);
163 */ 159 */
164static inline void *kzalloc(size_t size, gfp_t flags) 160static inline void *kzalloc(size_t size, gfp_t flags)
165{ 161{
166 if (__builtin_constant_p(size)) {
167 int i = 0;
168#define CACHE(x) \
169 if (size <= x) \
170 goto found; \
171 else \
172 i++;
173#include "kmalloc_sizes.h"
174#undef CACHE
175 {
176 extern void __you_cannot_kzalloc_that_much(void);
177 __you_cannot_kzalloc_that_much();
178 }
179found:
180 return kmem_cache_zalloc((flags & GFP_DMA) ?
181 malloc_sizes[i].cs_dmacachep :
182 malloc_sizes[i].cs_cachep, flags);
183 }
184 return __kzalloc(size, flags); 162 return __kzalloc(size, flags);
185} 163}
164#endif
186 165
187/** 166#ifndef CONFIG_NUMA
188 * kcalloc - allocate memory for an array. The memory is set to zero. 167static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
189 * @n: number of elements.
190 * @size: element size.
191 * @flags: the type of memory to allocate.
192 */
193static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
194{ 168{
195 if (n != 0 && size > ULONG_MAX / n) 169 return kmalloc(size, flags);
196 return NULL;
197 return kzalloc(n * size, flags);
198} 170}
199 171
200extern void kfree(const void *); 172static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
201extern unsigned int ksize(const void *);
202extern int slab_is_available(void);
203
204#ifdef CONFIG_NUMA
205extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
206extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
207
208static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
209{ 173{
210 if (__builtin_constant_p(size)) { 174 return __kmalloc(size, flags);
211 int i = 0;
212#define CACHE(x) \
213 if (size <= x) \
214 goto found; \
215 else \
216 i++;
217#include "kmalloc_sizes.h"
218#undef CACHE
219 {
220 extern void __you_cannot_kmalloc_that_much(void);
221 __you_cannot_kmalloc_that_much();
222 }
223found:
224 return kmem_cache_alloc_node((flags & GFP_DMA) ?
225 malloc_sizes[i].cs_dmacachep :
226 malloc_sizes[i].cs_cachep, flags, node);
227 }
228 return __kmalloc_node(size, flags, node);
229} 175}
176#endif /* !CONFIG_NUMA */
230 177
231/* 178/*
179 * kmalloc_track_caller is a special version of kmalloc that records the
180 * calling function of the routine calling it for slab leak tracking instead
181 * of just the calling function (confusing, eh?).
182 * It's useful when the call to kmalloc comes from a widely-used standard
183 * allocator where we care about the real place the memory allocation
184 * request comes from.
185 */
186#ifdef CONFIG_DEBUG_SLAB
187extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
188#define kmalloc_track_caller(size, flags) \
189 __kmalloc_track_caller(size, flags, __builtin_return_address(0))
190#else
191#define kmalloc_track_caller(size, flags) \
192 __kmalloc(size, flags)
193#endif /* DEBUG_SLAB */
194
195#ifdef CONFIG_NUMA
196/*
232 * kmalloc_node_track_caller is a special version of kmalloc_node that 197 * kmalloc_node_track_caller is a special version of kmalloc_node that
233 * records the calling function of the routine calling it for slab leak 198 * records the calling function of the routine calling it for slab leak
234 * tracking instead of just the calling function (confusing, eh?). 199 * tracking instead of just the calling function (confusing, eh?).
@@ -236,70 +201,23 @@ found:
236 * standard allocator where we care about the real place the memory 201 * standard allocator where we care about the real place the memory
237 * allocation request comes from. 202 * allocation request comes from.
238 */ 203 */
239#ifndef CONFIG_DEBUG_SLAB 204#ifdef CONFIG_DEBUG_SLAB
240#define kmalloc_node_track_caller(size, flags, node) \
241 __kmalloc_node(size, flags, node)
242#else
243extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); 205extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
244#define kmalloc_node_track_caller(size, flags, node) \ 206#define kmalloc_node_track_caller(size, flags, node) \
245 __kmalloc_node_track_caller(size, flags, node, \ 207 __kmalloc_node_track_caller(size, flags, node, \
246 __builtin_return_address(0)) 208 __builtin_return_address(0))
209#else
210#define kmalloc_node_track_caller(size, flags, node) \
211 __kmalloc_node(size, flags, node)
247#endif 212#endif
213
248#else /* CONFIG_NUMA */ 214#else /* CONFIG_NUMA */
249static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
250 gfp_t flags, int node)
251{
252 return kmem_cache_alloc(cachep, flags);
253}
254static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
255{
256 return kmalloc(size, flags);
257}
258 215
259#define kmalloc_node_track_caller(size, flags, node) \ 216#define kmalloc_node_track_caller(size, flags, node) \
260 kmalloc_track_caller(size, flags) 217 kmalloc_track_caller(size, flags)
261#endif
262 218
263extern int FASTCALL(kmem_cache_reap(int)); 219#endif /* DEBUG_SLAB */
264extern int FASTCALL(kmem_ptr_validate(struct kmem_cache *cachep, void *ptr));
265
266#else /* CONFIG_SLOB */
267
268/* SLOB allocator routines */
269
270void kmem_cache_init(void);
271struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t,
272 unsigned long,
273 void (*)(void *, struct kmem_cache *, unsigned long),
274 void (*)(void *, struct kmem_cache *, unsigned long));
275void kmem_cache_destroy(struct kmem_cache *c);
276void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags);
277void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
278void kmem_cache_free(struct kmem_cache *c, void *b);
279const char *kmem_cache_name(struct kmem_cache *);
280void *kmalloc(size_t size, gfp_t flags);
281void *__kzalloc(size_t size, gfp_t flags);
282void kfree(const void *m);
283unsigned int ksize(const void *m);
284unsigned int kmem_cache_size(struct kmem_cache *c);
285
286static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
287{
288 return __kzalloc(n * size, flags);
289}
290
291#define kmem_cache_shrink(d) (0)
292#define kmem_cache_reap(a)
293#define kmem_ptr_validate(a, b) (0)
294#define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f)
295#define kmalloc_node(s, f, n) kmalloc(s, f)
296#define kzalloc(s, f) __kzalloc(s, f)
297#define kmalloc_track_caller kmalloc
298
299#define kmalloc_node_track_caller kmalloc_node
300
301#endif /* CONFIG_SLOB */
302 220
303#endif /* __KERNEL__ */ 221#endif /* __KERNEL__ */
304
305#endif /* _LINUX_SLAB_H */ 222#endif /* _LINUX_SLAB_H */
223
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
new file mode 100644
index 000000000000..4b463e66ddea
--- /dev/null
+++ b/include/linux/slab_def.h
@@ -0,0 +1,100 @@
1#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h>
17
18/* Size description struct for general caches. */
19struct cache_sizes {
20 size_t cs_size;
21 struct kmem_cache *cs_cachep;
22 struct kmem_cache *cs_dmacachep;
23};
24extern struct cache_sizes malloc_sizes[];
25
26static inline void *kmalloc(size_t size, gfp_t flags)
27{
28 if (__builtin_constant_p(size)) {
29 int i = 0;
30#define CACHE(x) \
31 if (size <= x) \
32 goto found; \
33 else \
34 i++;
35#include "kmalloc_sizes.h"
36#undef CACHE
37 {
38 extern void __you_cannot_kmalloc_that_much(void);
39 __you_cannot_kmalloc_that_much();
40 }
41found:
42 return kmem_cache_alloc((flags & GFP_DMA) ?
43 malloc_sizes[i].cs_dmacachep :
44 malloc_sizes[i].cs_cachep, flags);
45 }
46 return __kmalloc(size, flags);
47}
48
49static inline void *kzalloc(size_t size, gfp_t flags)
50{
51 if (__builtin_constant_p(size)) {
52 int i = 0;
53#define CACHE(x) \
54 if (size <= x) \
55 goto found; \
56 else \
57 i++;
58#include "kmalloc_sizes.h"
59#undef CACHE
60 {
61 extern void __you_cannot_kzalloc_that_much(void);
62 __you_cannot_kzalloc_that_much();
63 }
64found:
65 return kmem_cache_zalloc((flags & GFP_DMA) ?
66 malloc_sizes[i].cs_dmacachep :
67 malloc_sizes[i].cs_cachep, flags);
68 }
69 return __kzalloc(size, flags);
70}
71
72#ifdef CONFIG_NUMA
73extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
74
75static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
76{
77 if (__builtin_constant_p(size)) {
78 int i = 0;
79#define CACHE(x) \
80 if (size <= x) \
81 goto found; \
82 else \
83 i++;
84#include "kmalloc_sizes.h"
85#undef CACHE
86 {
87 extern void __you_cannot_kmalloc_that_much(void);
88 __you_cannot_kmalloc_that_much();
89 }
90found:
91 return kmem_cache_alloc_node((flags & GFP_DMA) ?
92 malloc_sizes[i].cs_dmacachep :
93 malloc_sizes[i].cs_cachep, flags, node);
94 }
95 return __kmalloc_node(size, flags, node);
96}
97
98#endif /* CONFIG_NUMA */
99
100#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/smb_fs_sb.h b/include/linux/smb_fs_sb.h
index 5b4ae2cc445c..3aa97aa4277f 100644
--- a/include/linux/smb_fs_sb.h
+++ b/include/linux/smb_fs_sb.h
@@ -55,7 +55,7 @@ struct smb_sb_info {
55 * generation is incremented. 55 * generation is incremented.
56 */ 56 */
57 unsigned int generation; 57 unsigned int generation;
58 pid_t conn_pid; 58 struct pid *conn_pid;
59 struct smb_conn_opt opt; 59 struct smb_conn_opt opt;
60 wait_queue_head_t conn_wq; 60 wait_queue_head_t conn_wq;
61 int conn_complete; 61 int conn_complete;
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 94316a98e0d0..81480e613467 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -11,7 +11,7 @@
11 ** the sysctl() binary interface. Do *NOT* change the 11 ** the sysctl() binary interface. Do *NOT* change the
12 ** numbering of any existing values here, and do not change 12 ** numbering of any existing values here, and do not change
13 ** any numbers within any one set of values. If you have to 13 ** any numbers within any one set of values. If you have to
14 ** have to redefine an existing interface, use a new number for it. 14 ** redefine an existing interface, use a new number for it.
15 ** The kernel will then return -ENOTDIR to any application using 15 ** The kernel will then return -ENOTDIR to any application using
16 ** the old binary interface. 16 ** the old binary interface.
17 ** 17 **
@@ -918,8 +918,7 @@ typedef struct ctl_table ctl_table;
918 918
919typedef int ctl_handler (ctl_table *table, int __user *name, int nlen, 919typedef int ctl_handler (ctl_table *table, int __user *name, int nlen,
920 void __user *oldval, size_t __user *oldlenp, 920 void __user *oldval, size_t __user *oldlenp,
921 void __user *newval, size_t newlen, 921 void __user *newval, size_t newlen);
922 void **context);
923 922
924typedef int proc_handler (ctl_table *ctl, int write, struct file * filp, 923typedef int proc_handler (ctl_table *ctl, int write, struct file * filp,
925 void __user *buffer, size_t *lenp, loff_t *ppos); 924 void __user *buffer, size_t *lenp, loff_t *ppos);
@@ -950,7 +949,7 @@ extern int do_sysctl (int __user *name, int nlen,
950extern int do_sysctl_strategy (ctl_table *table, 949extern int do_sysctl_strategy (ctl_table *table,
951 int __user *name, int nlen, 950 int __user *name, int nlen,
952 void __user *oldval, size_t __user *oldlenp, 951 void __user *oldval, size_t __user *oldlenp,
953 void __user *newval, size_t newlen, void ** context); 952 void __user *newval, size_t newlen);
954 953
955extern ctl_handler sysctl_string; 954extern ctl_handler sysctl_string;
956extern ctl_handler sysctl_intvec; 955extern ctl_handler sysctl_intvec;
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 9df8833670cb..98a1d8cfb73d 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -37,23 +37,37 @@ struct sysrq_key_op {
37 37
38#ifdef CONFIG_MAGIC_SYSRQ 38#ifdef CONFIG_MAGIC_SYSRQ
39 39
40extern int sysrq_on(void);
41
42/*
43 * Do not use this one directly:
44 */
45extern int __sysrq_enabled;
46
40/* Generic SysRq interface -- you may call it from any device driver, supplying 47/* Generic SysRq interface -- you may call it from any device driver, supplying
41 * ASCII code of the key, pointer to registers and kbd/tty structs (if they 48 * ASCII code of the key, pointer to registers and kbd/tty structs (if they
42 * are available -- else NULL's). 49 * are available -- else NULL's).
43 */ 50 */
44 51
45void handle_sysrq(int, struct tty_struct *); 52void handle_sysrq(int key, struct tty_struct *tty);
46void __handle_sysrq(int, struct tty_struct *, int check_mask); 53void __handle_sysrq(int key, struct tty_struct *tty, int check_mask);
47int register_sysrq_key(int, struct sysrq_key_op *); 54int register_sysrq_key(int key, struct sysrq_key_op *op);
48int unregister_sysrq_key(int, struct sysrq_key_op *); 55int unregister_sysrq_key(int key, struct sysrq_key_op *op);
49struct sysrq_key_op *__sysrq_get_key_op(int key); 56struct sysrq_key_op *__sysrq_get_key_op(int key);
50 57
51#else 58#else
52 59
60static inline int sysrq_on(void)
61{
62 return 0;
63}
53static inline int __reterr(void) 64static inline int __reterr(void)
54{ 65{
55 return -EINVAL; 66 return -EINVAL;
56} 67}
68static inline void handle_sysrq(int key, struct tty_struct *tty)
69{
70}
57 71
58#define register_sysrq_key(ig,nore) __reterr() 72#define register_sysrq_key(ig,nore) __reterr()
59#define unregister_sysrq_key(ig,nore) __reterr() 73#define unregister_sysrq_key(ig,nore) __reterr()
diff --git a/include/linux/task_io_accounting.h b/include/linux/task_io_accounting.h
new file mode 100644
index 000000000000..44d00e9cceea
--- /dev/null
+++ b/include/linux/task_io_accounting.h
@@ -0,0 +1,37 @@
1/*
2 * task_io_accounting: a structure which is used for recording a single task's
3 * IO statistics.
4 *
5 * Don't include this header file directly - it is designed to be dragged in via
6 * sched.h.
7 *
8 * Blame akpm@osdl.org for all this.
9 */
10
11#ifdef CONFIG_TASK_IO_ACCOUNTING
12struct task_io_accounting {
13 /*
14 * The number of bytes which this task has caused to be read from
15 * storage.
16 */
17 u64 read_bytes;
18
19 /*
20 * The number of bytes which this task has caused, or shall cause to be
21 * written to disk.
22 */
23 u64 write_bytes;
24
25 /*
26 * A task can cause "negative" IO too. If this task truncates some
27 * dirty pagecache, some IO which another task has been accounted for
28 * (in its write_bytes) will not be happening. We _could_ just
29 * subtract that from the truncating task's write_bytes, but there is
30 * information loss in doing that.
31 */
32 u64 cancelled_write_bytes;
33};
34#else
35struct task_io_accounting {
36};
37#endif
diff --git a/include/linux/task_io_accounting_ops.h b/include/linux/task_io_accounting_ops.h
new file mode 100644
index 000000000000..df2a319106b2
--- /dev/null
+++ b/include/linux/task_io_accounting_ops.h
@@ -0,0 +1,47 @@
1/*
2 * Task I/O accounting operations
3 */
4#ifndef __TASK_IO_ACCOUNTING_OPS_INCLUDED
5#define __TASK_IO_ACCOUNTING_OPS_INCLUDED
6
7#ifdef CONFIG_TASK_IO_ACCOUNTING
8static inline void task_io_account_read(size_t bytes)
9{
10 current->ioac.read_bytes += bytes;
11}
12
13static inline void task_io_account_write(size_t bytes)
14{
15 current->ioac.write_bytes += bytes;
16}
17
18static inline void task_io_account_cancelled_write(size_t bytes)
19{
20 current->ioac.cancelled_write_bytes += bytes;
21}
22
23static inline void task_io_accounting_init(struct task_struct *tsk)
24{
25 memset(&tsk->ioac, 0, sizeof(tsk->ioac));
26}
27
28#else
29
30static inline void task_io_account_read(size_t bytes)
31{
32}
33
34static inline void task_io_account_write(size_t bytes)
35{
36}
37
38static inline void task_io_account_cancelled_write(size_t bytes)
39{
40}
41
42static inline void task_io_accounting_init(struct task_struct *tsk)
43{
44}
45
46#endif /* CONFIG_TASK_IO_ACCOUNTING */
47#endif /* __TASK_IO_ACCOUNTING_OPS_INCLUDED */
diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h
index 45248806ae9c..3fced4798255 100644
--- a/include/linux/taskstats.h
+++ b/include/linux/taskstats.h
@@ -31,7 +31,7 @@
31 */ 31 */
32 32
33 33
34#define TASKSTATS_VERSION 2 34#define TASKSTATS_VERSION 3
35#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN 35#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
36 * in linux/sched.h */ 36 * in linux/sched.h */
37 37
@@ -115,31 +115,37 @@ struct taskstats {
115 __u64 ac_majflt; /* Major Page Fault Count */ 115 __u64 ac_majflt; /* Major Page Fault Count */
116 /* Basic Accounting Fields end */ 116 /* Basic Accounting Fields end */
117 117
118 /* Extended accounting fields start */ 118 /* Extended accounting fields start */
119 /* Accumulated RSS usage in duration of a task, in MBytes-usecs. 119 /* Accumulated RSS usage in duration of a task, in MBytes-usecs.
120 * The current rss usage is added to this counter every time 120 * The current rss usage is added to this counter every time
121 * a tick is charged to a task's system time. So, at the end we 121 * a tick is charged to a task's system time. So, at the end we
122 * will have memory usage multiplied by system time. Thus an 122 * will have memory usage multiplied by system time. Thus an
123 * average usage per system time unit can be calculated. 123 * average usage per system time unit can be calculated.
124 */ 124 */
125 __u64 coremem; /* accumulated RSS usage in MB-usec */ 125 __u64 coremem; /* accumulated RSS usage in MB-usec */
126 /* Accumulated virtual memory usage in duration of a task. 126 /* Accumulated virtual memory usage in duration of a task.
127 * Same as acct_rss_mem1 above except that we keep track of VM usage. 127 * Same as acct_rss_mem1 above except that we keep track of VM usage.
128 */ 128 */
129 __u64 virtmem; /* accumulated VM usage in MB-usec */ 129 __u64 virtmem; /* accumulated VM usage in MB-usec */
130 130
131 /* High watermark of RSS and virtual memory usage in duration of 131 /* High watermark of RSS and virtual memory usage in duration of
132 * a task, in KBytes. 132 * a task, in KBytes.
133 */ 133 */
134 __u64 hiwater_rss; /* High-watermark of RSS usage, in KB */ 134 __u64 hiwater_rss; /* High-watermark of RSS usage, in KB */
135 __u64 hiwater_vm; /* High-water VM usage, in KB */ 135 __u64 hiwater_vm; /* High-water VM usage, in KB */
136 136
137 /* The following four fields are I/O statistics of a task. */ 137 /* The following four fields are I/O statistics of a task. */
138 __u64 read_char; /* bytes read */ 138 __u64 read_char; /* bytes read */
139 __u64 write_char; /* bytes written */ 139 __u64 write_char; /* bytes written */
140 __u64 read_syscalls; /* read syscalls */ 140 __u64 read_syscalls; /* read syscalls */
141 __u64 write_syscalls; /* write syscalls */ 141 __u64 write_syscalls; /* write syscalls */
142 /* Extended accounting fields end */ 142 /* Extended accounting fields end */
143
144#define TASKSTATS_HAS_IO_ACCOUNTING
145 /* Per-task storage I/O accounting starts */
146 __u64 read_bytes; /* bytes of read I/O */
147 __u64 write_bytes; /* bytes of write I/O */
148 __u64 cancelled_write_bytes; /* bytes of cancelled write I/O */
143}; 149};
144 150
145 151
diff --git a/include/linux/tfrc.h b/include/linux/tfrc.h
index 31a9b25276fe..8a8462b4a4dd 100644
--- a/include/linux/tfrc.h
+++ b/include/linux/tfrc.h
@@ -37,10 +37,14 @@ struct tfrc_rx_info {
37 * @tfrctx_p: current loss event rate (5.4) 37 * @tfrctx_p: current loss event rate (5.4)
38 * @tfrctx_rto: estimate of RTO, equals 4*RTT (4.3) 38 * @tfrctx_rto: estimate of RTO, equals 4*RTT (4.3)
39 * @tfrctx_ipi: inter-packet interval (4.6) 39 * @tfrctx_ipi: inter-packet interval (4.6)
40 *
41 * Note: X and X_recv are both maintained in units of 64 * bytes/second. This
42 * enables a finer resolution of sending rates and avoids problems with
43 * integer arithmetic; u32 is not sufficient as scaling consumes 6 bits.
40 */ 44 */
41struct tfrc_tx_info { 45struct tfrc_tx_info {
42 __u32 tfrctx_x; 46 __u64 tfrctx_x;
43 __u32 tfrctx_x_recv; 47 __u64 tfrctx_x_recv;
44 __u32 tfrctx_x_calc; 48 __u32 tfrctx_x_calc;
45 __u32 tfrctx_rtt; 49 __u32 tfrctx_rtt;
46 __u32 tfrctx_p; 50 __u32 tfrctx_p;
diff --git a/include/linux/timer.h b/include/linux/timer.h
index c982304dbafd..eeef6643d4c6 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -98,4 +98,10 @@ extern void run_local_timers(void);
98struct hrtimer; 98struct hrtimer;
99extern int it_real_fn(struct hrtimer *); 99extern int it_real_fn(struct hrtimer *);
100 100
101unsigned long __round_jiffies(unsigned long j, int cpu);
102unsigned long __round_jiffies_relative(unsigned long j, int cpu);
103unsigned long round_jiffies(unsigned long j);
104unsigned long round_jiffies_relative(unsigned long j);
105
106
101#endif 107#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index da508d1998e4..6c5a6e6e813b 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -93,7 +93,7 @@
93 .groups = NULL, \ 93 .groups = NULL, \
94 .min_interval = 1, \ 94 .min_interval = 1, \
95 .max_interval = 2, \ 95 .max_interval = 2, \
96 .busy_factor = 8, \ 96 .busy_factor = 64, \
97 .imbalance_pct = 110, \ 97 .imbalance_pct = 110, \
98 .cache_nice_tries = 0, \ 98 .cache_nice_tries = 0, \
99 .per_cpu_gain = 25, \ 99 .per_cpu_gain = 25, \
@@ -194,7 +194,8 @@
194 .wake_idx = 0, /* unused */ \ 194 .wake_idx = 0, /* unused */ \
195 .forkexec_idx = 0, /* unused */ \ 195 .forkexec_idx = 0, /* unused */ \
196 .per_cpu_gain = 100, \ 196 .per_cpu_gain = 100, \
197 .flags = SD_LOAD_BALANCE, \ 197 .flags = SD_LOAD_BALANCE \
198 | SD_SERIALIZE, \
198 .last_balance = jiffies, \ 199 .last_balance = jiffies, \
199 .balance_interval = 64, \ 200 .balance_interval = 64, \
200 .nr_balance_failed = 0, \ 201 .nr_balance_failed = 0, \
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index df5c4654360d..5cb380a559fd 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -244,6 +244,7 @@ struct v4l2_pix_format
244#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y','Y','U','V') /* 16 YUV 4:2:2 */ 244#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y','Y','U','V') /* 16 YUV 4:2:2 */
245#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H','I','2','4') /* 8 8-bit color */ 245#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H','I','2','4') /* 8 8-bit color */
246#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H','M','1','2') /* 8 YUV 4:2:0 16x16 macroblocks */ 246#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H','M','1','2') /* 8 YUV 4:2:0 16x16 macroblocks */
247#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R','4','4','4') /* 16 xxxxrrrr ggggbbbb */
247 248
248/* see http://www.siliconimaging.com/RGB%20Bayer.htm */ 249/* see http://www.siliconimaging.com/RGB%20Bayer.htm */
249#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B','A','8','1') /* 8 BGBG.. GRGR.. */ 250#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B','A','8','1') /* 8 BGBG.. GRGR.. */
diff --git a/include/media/ir-common.h b/include/media/ir-common.h
index 8f58406533c6..2b25f5c95006 100644
--- a/include/media/ir-common.h
+++ b/include/media/ir-common.h
@@ -92,6 +92,7 @@ extern IR_KEYTAB_TYPE ir_codes_hauppauge_new[IR_KEYTAB_SIZE];
92extern IR_KEYTAB_TYPE ir_codes_npgtech[IR_KEYTAB_SIZE]; 92extern IR_KEYTAB_TYPE ir_codes_npgtech[IR_KEYTAB_SIZE];
93extern IR_KEYTAB_TYPE ir_codes_norwood[IR_KEYTAB_SIZE]; 93extern IR_KEYTAB_TYPE ir_codes_norwood[IR_KEYTAB_SIZE];
94extern IR_KEYTAB_TYPE ir_codes_proteus_2309[IR_KEYTAB_SIZE]; 94extern IR_KEYTAB_TYPE ir_codes_proteus_2309[IR_KEYTAB_SIZE];
95extern IR_KEYTAB_TYPE ir_codes_budget_ci_old[IR_KEYTAB_SIZE];
95 96
96#endif 97#endif
97 98
diff --git a/include/media/saa7146.h b/include/media/saa7146.h
index fee579f10b32..796bcf151a3a 100644
--- a/include/media/saa7146.h
+++ b/include/media/saa7146.h
@@ -42,10 +42,6 @@ extern unsigned int saa7146_debug;
42#define DEB_INT(x) if (0!=(DEBUG_VARIABLE&0x20)) { DEBUG_PROLOG; printk x; } /* interrupt debug messages */ 42#define DEB_INT(x) if (0!=(DEBUG_VARIABLE&0x20)) { DEBUG_PROLOG; printk x; } /* interrupt debug messages */
43#define DEB_CAP(x) if (0!=(DEBUG_VARIABLE&0x40)) { DEBUG_PROLOG; printk x; } /* capture debug messages */ 43#define DEB_CAP(x) if (0!=(DEBUG_VARIABLE&0x40)) { DEBUG_PROLOG; printk x; } /* capture debug messages */
44 44
45#define SAA7146_IER_DISABLE(x,y) \
46 saa7146_write(x, IER, saa7146_read(x, IER) & ~(y));
47#define SAA7146_IER_ENABLE(x,y) \
48 saa7146_write(x, IER, saa7146_read(x, IER) | (y));
49#define SAA7146_ISR_CLEAR(x,y) \ 45#define SAA7146_ISR_CLEAR(x,y) \
50 saa7146_write(x, ISR, (y)); 46 saa7146_write(x, ISR, (y));
51 47
@@ -441,4 +437,20 @@ int saa7146_wait_for_debi_done(struct saa7146_dev *dev, int nobusyloop);
441#define SAA7146_I2C_BUS_BIT_RATE_80 (0x200) 437#define SAA7146_I2C_BUS_BIT_RATE_80 (0x200)
442#define SAA7146_I2C_BUS_BIT_RATE_60 (0x300) 438#define SAA7146_I2C_BUS_BIT_RATE_60 (0x300)
443 439
440static inline void SAA7146_IER_DISABLE(struct saa7146_dev *x, unsigned y)
441{
442 unsigned long flags;
443 spin_lock_irqsave(&x->int_slock, flags);
444 saa7146_write(x, IER, saa7146_read(x, IER) & ~y);
445 spin_unlock_irqrestore(&x->int_slock, flags);
446}
447
448static inline void SAA7146_IER_ENABLE(struct saa7146_dev *x, unsigned y)
449{
450 unsigned long flags;
451 spin_lock_irqsave(&x->int_slock, flags);
452 saa7146_write(x, IER, saa7146_read(x, IER) | y);
453 spin_unlock_irqrestore(&x->int_slock, flags);
454}
455
444#endif 456#endif
diff --git a/include/media/tuner-types.h b/include/media/tuner-types.h
index 37dad07a8439..e5ad3fcfe984 100644
--- a/include/media/tuner-types.h
+++ b/include/media/tuner-types.h
@@ -50,6 +50,10 @@ struct tuner_params {
50 sensitivity. If this setting is 1, then set PORT2 to 1 to 50 sensitivity. If this setting is 1, then set PORT2 to 1 to
51 get proper FM reception. */ 51 get proper FM reception. */
52 unsigned int port2_fm_high_sensitivity:1; 52 unsigned int port2_fm_high_sensitivity:1;
53 /* Some Philips tuners use tda9887 cGainNormal to select the FM radio
54 sensitivity. If this setting is 1, e register will use cGainNormal
55 instead of cGainLow. */
56 unsigned int fm_gain_normal:1;
53 /* Most tuners with a tda9887 use QSS mode. Some (cheaper) tuners 57 /* Most tuners with a tda9887 use QSS mode. Some (cheaper) tuners
54 use Intercarrier mode. If this setting is 1, then the tuner 58 use Intercarrier mode. If this setting is 1, then the tuner
55 needs to be set to intercarrier mode. */ 59 needs to be set to intercarrier mode. */
diff --git a/include/media/tuner.h b/include/media/tuner.h
index 3116e750132f..99acf847365c 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -145,6 +145,7 @@ extern int tuner_debug;
145#define TDA9887_DEEMPHASIS_75 (3<<16) 145#define TDA9887_DEEMPHASIS_75 (3<<16)
146#define TDA9887_AUTOMUTE (1<<18) 146#define TDA9887_AUTOMUTE (1<<18)
147#define TDA9887_GATING_18 (1<<19) 147#define TDA9887_GATING_18 (1<<19)
148#define TDA9887_GAIN_NORMAL (1<<20)
148 149
149#ifdef __KERNEL__ 150#ifdef __KERNEL__
150 151
diff --git a/include/media/tveeprom.h b/include/media/tveeprom.h
index e9fc1a785497..5660ea24996b 100644
--- a/include/media/tveeprom.h
+++ b/include/media/tveeprom.h
@@ -3,7 +3,7 @@
3 3
4struct tveeprom { 4struct tveeprom {
5 u32 has_radio; 5 u32 has_radio;
6 u32 has_ir; /* 0: no IR, 1: IR present, 2: unknown */ 6 u32 has_ir; /* bit 0: IR receiver present, bit 1: IR transmitter (blaster) present. -1 == unknown */
7 u32 has_MAC_address; /* 0: no MAC, 1: MAC present, 2: unknown */ 7 u32 has_MAC_address; /* 0: no MAC, 1: MAC present, 2: unknown */
8 8
9 u32 tuner_type; 9 u32 tuner_type;
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index aecc946980a3..91b19921f958 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -144,6 +144,9 @@ enum v4l2_chip_ident {
144 V4L2_IDENT_CX25841 = 241, 144 V4L2_IDENT_CX25841 = 241,
145 V4L2_IDENT_CX25842 = 242, 145 V4L2_IDENT_CX25842 = 242,
146 V4L2_IDENT_CX25843 = 243, 146 V4L2_IDENT_CX25843 = 243,
147
148 /* OmniVision sensors - range 250-299 */
149 V4L2_IDENT_OV7670 = 250,
147}; 150};
148 151
149/* audio ioctls */ 152/* audio ioctls */
@@ -251,4 +254,8 @@ struct v4l2_crystal_freq {
251 If the frequency is not supported, then -EINVAL is returned. */ 254 If the frequency is not supported, then -EINVAL is returned. */
252#define VIDIOC_INT_S_CRYSTAL_FREQ _IOW ('d', 113, struct v4l2_crystal_freq) 255#define VIDIOC_INT_S_CRYSTAL_FREQ _IOW ('d', 113, struct v4l2_crystal_freq)
253 256
257/* Initialize the sensor registors to some sort of reasonable
258 default values. */
259#define VIDIOC_INT_INIT _IOW ('d', 114, u32)
260
254#endif /* V4L2_COMMON_H_ */ 261#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 6a11d772700f..fb96472a1bd3 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -43,6 +43,7 @@
43 43
44/* Video standard functions */ 44/* Video standard functions */
45extern unsigned int v4l2_video_std_fps(struct v4l2_standard *vs); 45extern unsigned int v4l2_video_std_fps(struct v4l2_standard *vs);
46extern char *v4l2_norm_to_name(v4l2_std_id id);
46extern int v4l2_video_std_construct(struct v4l2_standard *vs, 47extern int v4l2_video_std_construct(struct v4l2_standard *vs,
47 int id, char *name); 48 int id, char *name);
48 49
@@ -81,12 +82,6 @@ extern long v4l_compat_ioctl32(struct file *file, unsigned int cmd,
81 * This version moves redundant code from video device code to 82 * This version moves redundant code from video device code to
82 * the common handler 83 * the common handler
83 */ 84 */
84struct v4l2_tvnorm {
85 char *name;
86 v4l2_std_id id;
87
88 void *priv_data;
89};
90 85
91struct video_device 86struct video_device
92{ 87{
@@ -104,9 +99,8 @@ struct video_device
104 int debug; /* Activates debug level*/ 99 int debug; /* Activates debug level*/
105 100
106 /* Video standard vars */ 101 /* Video standard vars */
107 int tvnormsize; /* Size of tvnorm array */ 102 v4l2_std_id tvnorms; /* Supported tv norms */
108 v4l2_std_id current_norm; /* Current tvnorm */ 103 v4l2_std_id current_norm; /* Current tvnorm */
109 struct v4l2_tvnorm *tvnorms;
110 104
111 /* callbacks */ 105 /* callbacks */
112 void (*release)(struct video_device *vfd); 106 void (*release)(struct video_device *vfd);
@@ -211,7 +205,7 @@ struct video_device
211 /* Standard handling 205 /* Standard handling
212 G_STD and ENUMSTD are handled by videodev.c 206 G_STD and ENUMSTD are handled by videodev.c
213 */ 207 */
214 int (*vidioc_s_std) (struct file *file, void *fh, v4l2_std_id a); 208 int (*vidioc_s_std) (struct file *file, void *fh, v4l2_std_id *norm);
215 int (*vidioc_querystd) (struct file *file, void *fh, v4l2_std_id *a); 209 int (*vidioc_querystd) (struct file *file, void *fh, v4l2_std_id *a);
216 210
217 /* Input handling */ 211 /* Input handling */
diff --git a/include/net/ax25.h b/include/net/ax25.h
index e1d116f11923..14b72d868f03 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -285,6 +285,8 @@ extern struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
285extern const ax25_address ax25_bcast; 285extern const ax25_address ax25_bcast;
286extern const ax25_address ax25_defaddr; 286extern const ax25_address ax25_defaddr;
287extern const ax25_address null_ax25_address; 287extern const ax25_address null_ax25_address;
288extern char *ax2asc(char *buf, const ax25_address *);
289extern void asc2ax(ax25_address *addr, const char *callsign);
288extern int ax25cmp(const ax25_address *, const ax25_address *); 290extern int ax25cmp(const ax25_address *, const ax25_address *);
289extern int ax25digicmp(const ax25_digi *, const ax25_digi *); 291extern int ax25digicmp(const ax25_digi *, const ax25_digi *);
290extern const unsigned char *ax25_addr_parse(const unsigned char *, int, 292extern const unsigned char *ax25_addr_parse(const unsigned char *, int,
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 10a3eec191fd..41456c148842 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -739,13 +739,13 @@ struct sockaddr_hci {
739struct hci_filter { 739struct hci_filter {
740 unsigned long type_mask; 740 unsigned long type_mask;
741 unsigned long event_mask[2]; 741 unsigned long event_mask[2];
742 __u16 opcode; 742 __le16 opcode;
743}; 743};
744 744
745struct hci_ufilter { 745struct hci_ufilter {
746 __u32 type_mask; 746 __u32 type_mask;
747 __u32 event_mask[2]; 747 __u32 event_mask[2];
748 __u16 opcode; 748 __le16 opcode;
749}; 749};
750 750
751#define HCI_FLT_TYPE_BITS 31 751#define HCI_FLT_TYPE_BITS 31
diff --git a/include/net/ip.h b/include/net/ip.h
index 83cb9ac5554e..053f02b5cb89 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -376,8 +376,7 @@ int ipv4_doint_and_flush(ctl_table *ctl, int write,
376 size_t *lenp, loff_t *ppos); 376 size_t *lenp, loff_t *ppos);
377int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen, 377int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen,
378 void __user *oldval, size_t __user *oldlenp, 378 void __user *oldval, size_t __user *oldlenp,
379 void __user *newval, size_t newlen, 379 void __user *newval, size_t newlen);
380 void **context);
381#ifdef CONFIG_PROC_FS 380#ifdef CONFIG_PROC_FS
382extern int ip_misc_proc_init(void); 381extern int ip_misc_proc_init(void);
383#endif 382#endif
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 215461f18db1..c818f87122af 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -368,7 +368,7 @@ static inline void sctp_sysctl_register(void) { return; }
368static inline void sctp_sysctl_unregister(void) { return; } 368static inline void sctp_sysctl_unregister(void) { return; }
369static inline int sctp_sysctl_jiffies_ms(ctl_table *table, int __user *name, int nlen, 369static inline int sctp_sysctl_jiffies_ms(ctl_table *table, int __user *name, int nlen,
370 void __user *oldval, size_t __user *oldlenp, 370 void __user *oldval, size_t __user *oldlenp,
371 void __user *newval, size_t newlen, void **context) { 371 void __user *newval, size_t newlen) {
372 return -ENOSYS; 372 return -ENOSYS;
373} 373}
374#endif 374#endif
diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h
index 66bf4d7d0dfb..db037205c9e8 100644
--- a/include/rdma/ib_marshall.h
+++ b/include/rdma/ib_marshall.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005 Intel Corporation. All rights reserved. 2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -41,6 +41,9 @@
41void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, 41void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
42 struct ib_qp_attr *src); 42 struct ib_qp_attr *src);
43 43
44void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
45 struct ib_ah_attr *src);
46
44void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, 47void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
45 struct ib_sa_path_rec *src); 48 struct ib_sa_path_rec *src);
46 49
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 8eacc3510993..fd2353fa7e12 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -43,6 +43,8 @@
43 43
44#include <linux/types.h> 44#include <linux/types.h>
45#include <linux/device.h> 45#include <linux/device.h>
46#include <linux/mm.h>
47#include <linux/dma-mapping.h>
46 48
47#include <asm/atomic.h> 49#include <asm/atomic.h>
48#include <asm/scatterlist.h> 50#include <asm/scatterlist.h>
@@ -848,6 +850,49 @@ struct ib_cache {
848 u8 *lmc_cache; 850 u8 *lmc_cache;
849}; 851};
850 852
853struct ib_dma_mapping_ops {
854 int (*mapping_error)(struct ib_device *dev,
855 u64 dma_addr);
856 u64 (*map_single)(struct ib_device *dev,
857 void *ptr, size_t size,
858 enum dma_data_direction direction);
859 void (*unmap_single)(struct ib_device *dev,
860 u64 addr, size_t size,
861 enum dma_data_direction direction);
862 u64 (*map_page)(struct ib_device *dev,
863 struct page *page, unsigned long offset,
864 size_t size,
865 enum dma_data_direction direction);
866 void (*unmap_page)(struct ib_device *dev,
867 u64 addr, size_t size,
868 enum dma_data_direction direction);
869 int (*map_sg)(struct ib_device *dev,
870 struct scatterlist *sg, int nents,
871 enum dma_data_direction direction);
872 void (*unmap_sg)(struct ib_device *dev,
873 struct scatterlist *sg, int nents,
874 enum dma_data_direction direction);
875 u64 (*dma_address)(struct ib_device *dev,
876 struct scatterlist *sg);
877 unsigned int (*dma_len)(struct ib_device *dev,
878 struct scatterlist *sg);
879 void (*sync_single_for_cpu)(struct ib_device *dev,
880 u64 dma_handle,
881 size_t size,
882 enum dma_data_direction dir);
883 void (*sync_single_for_device)(struct ib_device *dev,
884 u64 dma_handle,
885 size_t size,
886 enum dma_data_direction dir);
887 void *(*alloc_coherent)(struct ib_device *dev,
888 size_t size,
889 u64 *dma_handle,
890 gfp_t flag);
891 void (*free_coherent)(struct ib_device *dev,
892 size_t size, void *cpu_addr,
893 u64 dma_handle);
894};
895
851struct iw_cm_verbs; 896struct iw_cm_verbs;
852 897
853struct ib_device { 898struct ib_device {
@@ -992,6 +1037,8 @@ struct ib_device {
992 struct ib_mad *in_mad, 1037 struct ib_mad *in_mad,
993 struct ib_mad *out_mad); 1038 struct ib_mad *out_mad);
994 1039
1040 struct ib_dma_mapping_ops *dma_ops;
1041
995 struct module *owner; 1042 struct module *owner;
996 struct class_device class_dev; 1043 struct class_device class_dev;
997 struct kobject ports_parent; 1044 struct kobject ports_parent;
@@ -1395,10 +1442,216 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1395 * usable for DMA. 1442 * usable for DMA.
1396 * @pd: The protection domain associated with the memory region. 1443 * @pd: The protection domain associated with the memory region.
1397 * @mr_access_flags: Specifies the memory access rights. 1444 * @mr_access_flags: Specifies the memory access rights.
1445 *
1446 * Note that the ib_dma_*() functions defined below must be used
1447 * to create/destroy addresses used with the Lkey or Rkey returned
1448 * by ib_get_dma_mr().
1398 */ 1449 */
1399struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); 1450struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1400 1451
1401/** 1452/**
1453 * ib_dma_mapping_error - check a DMA addr for error
1454 * @dev: The device for which the dma_addr was created
1455 * @dma_addr: The DMA address to check
1456 */
1457static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1458{
1459 return dev->dma_ops ?
1460 dev->dma_ops->mapping_error(dev, dma_addr) :
1461 dma_mapping_error(dma_addr);
1462}
1463
1464/**
1465 * ib_dma_map_single - Map a kernel virtual address to DMA address
1466 * @dev: The device for which the dma_addr is to be created
1467 * @cpu_addr: The kernel virtual address
1468 * @size: The size of the region in bytes
1469 * @direction: The direction of the DMA
1470 */
1471static inline u64 ib_dma_map_single(struct ib_device *dev,
1472 void *cpu_addr, size_t size,
1473 enum dma_data_direction direction)
1474{
1475 return dev->dma_ops ?
1476 dev->dma_ops->map_single(dev, cpu_addr, size, direction) :
1477 dma_map_single(dev->dma_device, cpu_addr, size, direction);
1478}
1479
1480/**
1481 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1482 * @dev: The device for which the DMA address was created
1483 * @addr: The DMA address
1484 * @size: The size of the region in bytes
1485 * @direction: The direction of the DMA
1486 */
1487static inline void ib_dma_unmap_single(struct ib_device *dev,
1488 u64 addr, size_t size,
1489 enum dma_data_direction direction)
1490{
1491 dev->dma_ops ?
1492 dev->dma_ops->unmap_single(dev, addr, size, direction) :
1493 dma_unmap_single(dev->dma_device, addr, size, direction);
1494}
1495
1496/**
1497 * ib_dma_map_page - Map a physical page to DMA address
1498 * @dev: The device for which the dma_addr is to be created
1499 * @page: The page to be mapped
1500 * @offset: The offset within the page
1501 * @size: The size of the region in bytes
1502 * @direction: The direction of the DMA
1503 */
1504static inline u64 ib_dma_map_page(struct ib_device *dev,
1505 struct page *page,
1506 unsigned long offset,
1507 size_t size,
1508 enum dma_data_direction direction)
1509{
1510 return dev->dma_ops ?
1511 dev->dma_ops->map_page(dev, page, offset, size, direction) :
1512 dma_map_page(dev->dma_device, page, offset, size, direction);
1513}
1514
1515/**
1516 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1517 * @dev: The device for which the DMA address was created
1518 * @addr: The DMA address
1519 * @size: The size of the region in bytes
1520 * @direction: The direction of the DMA
1521 */
1522static inline void ib_dma_unmap_page(struct ib_device *dev,
1523 u64 addr, size_t size,
1524 enum dma_data_direction direction)
1525{
1526 dev->dma_ops ?
1527 dev->dma_ops->unmap_page(dev, addr, size, direction) :
1528 dma_unmap_page(dev->dma_device, addr, size, direction);
1529}
1530
1531/**
1532 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1533 * @dev: The device for which the DMA addresses are to be created
1534 * @sg: The array of scatter/gather entries
1535 * @nents: The number of scatter/gather entries
1536 * @direction: The direction of the DMA
1537 */
1538static inline int ib_dma_map_sg(struct ib_device *dev,
1539 struct scatterlist *sg, int nents,
1540 enum dma_data_direction direction)
1541{
1542 return dev->dma_ops ?
1543 dev->dma_ops->map_sg(dev, sg, nents, direction) :
1544 dma_map_sg(dev->dma_device, sg, nents, direction);
1545}
1546
1547/**
1548 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1549 * @dev: The device for which the DMA addresses were created
1550 * @sg: The array of scatter/gather entries
1551 * @nents: The number of scatter/gather entries
1552 * @direction: The direction of the DMA
1553 */
1554static inline void ib_dma_unmap_sg(struct ib_device *dev,
1555 struct scatterlist *sg, int nents,
1556 enum dma_data_direction direction)
1557{
1558 dev->dma_ops ?
1559 dev->dma_ops->unmap_sg(dev, sg, nents, direction) :
1560 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1561}
1562
1563/**
1564 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1565 * @dev: The device for which the DMA addresses were created
1566 * @sg: The scatter/gather entry
1567 */
1568static inline u64 ib_sg_dma_address(struct ib_device *dev,
1569 struct scatterlist *sg)
1570{
1571 return dev->dma_ops ?
1572 dev->dma_ops->dma_address(dev, sg) : sg_dma_address(sg);
1573}
1574
1575/**
1576 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1577 * @dev: The device for which the DMA addresses were created
1578 * @sg: The scatter/gather entry
1579 */
1580static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1581 struct scatterlist *sg)
1582{
1583 return dev->dma_ops ?
1584 dev->dma_ops->dma_len(dev, sg) : sg_dma_len(sg);
1585}
1586
1587/**
1588 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1589 * @dev: The device for which the DMA address was created
1590 * @addr: The DMA address
1591 * @size: The size of the region in bytes
1592 * @dir: The direction of the DMA
1593 */
1594static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1595 u64 addr,
1596 size_t size,
1597 enum dma_data_direction dir)
1598{
1599 dev->dma_ops ?
1600 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir) :
1601 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1602}
1603
1604/**
1605 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1606 * @dev: The device for which the DMA address was created
1607 * @addr: The DMA address
1608 * @size: The size of the region in bytes
1609 * @dir: The direction of the DMA
1610 */
1611static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1612 u64 addr,
1613 size_t size,
1614 enum dma_data_direction dir)
1615{
1616 dev->dma_ops ?
1617 dev->dma_ops->sync_single_for_device(dev, addr, size, dir) :
1618 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1619}
1620
1621/**
1622 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1623 * @dev: The device for which the DMA address is requested
1624 * @size: The size of the region to allocate in bytes
1625 * @dma_handle: A pointer for returning the DMA address of the region
1626 * @flag: memory allocator flags
1627 */
1628static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1629 size_t size,
1630 u64 *dma_handle,
1631 gfp_t flag)
1632{
1633 return dev->dma_ops ?
1634 dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag) :
1635 dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
1636}
1637
1638/**
1639 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1640 * @dev: The device for which the DMA addresses were allocated
1641 * @size: The size of the region
1642 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1643 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1644 */
1645static inline void ib_dma_free_coherent(struct ib_device *dev,
1646 size_t size, void *cpu_addr,
1647 u64 dma_handle)
1648{
1649 dev->dma_ops ?
1650 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle) :
1651 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1652}
1653
1654/**
1402 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use 1655 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1403 * by an HCA. 1656 * by an HCA.
1404 * @pd: The protection domain associated assigned to the registered region. 1657 * @pd: The protection domain associated assigned to the registered region.
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index deb5a0a4cee5..36cd8a8526a0 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -77,11 +77,34 @@ struct rdma_route {
77 int num_paths; 77 int num_paths;
78}; 78};
79 79
80struct rdma_conn_param {
81 const void *private_data;
82 u8 private_data_len;
83 u8 responder_resources;
84 u8 initiator_depth;
85 u8 flow_control;
86 u8 retry_count; /* ignored when accepting */
87 u8 rnr_retry_count;
88 /* Fields below ignored if a QP is created on the rdma_cm_id. */
89 u8 srq;
90 u32 qp_num;
91};
92
93struct rdma_ud_param {
94 const void *private_data;
95 u8 private_data_len;
96 struct ib_ah_attr ah_attr;
97 u32 qp_num;
98 u32 qkey;
99};
100
80struct rdma_cm_event { 101struct rdma_cm_event {
81 enum rdma_cm_event_type event; 102 enum rdma_cm_event_type event;
82 int status; 103 int status;
83 void *private_data; 104 union {
84 u8 private_data_len; 105 struct rdma_conn_param conn;
106 struct rdma_ud_param ud;
107 } param;
85}; 108};
86 109
87struct rdma_cm_id; 110struct rdma_cm_id;
@@ -204,25 +227,17 @@ void rdma_destroy_qp(struct rdma_cm_id *id);
204int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 227int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
205 int *qp_attr_mask); 228 int *qp_attr_mask);
206 229
207struct rdma_conn_param {
208 const void *private_data;
209 u8 private_data_len;
210 u8 responder_resources;
211 u8 initiator_depth;
212 u8 flow_control;
213 u8 retry_count; /* ignored when accepting */
214 u8 rnr_retry_count;
215 /* Fields below ignored if a QP is created on the rdma_cm_id. */
216 u8 srq;
217 u32 qp_num;
218 enum ib_qp_type qp_type;
219};
220
221/** 230/**
222 * rdma_connect - Initiate an active connection request. 231 * rdma_connect - Initiate an active connection request.
232 * @id: Connection identifier to connect.
233 * @conn_param: Connection information used for connected QPs.
223 * 234 *
224 * Users must have resolved a route for the rdma_cm_id to connect with 235 * Users must have resolved a route for the rdma_cm_id to connect with
225 * by having called rdma_resolve_route before calling this routine. 236 * by having called rdma_resolve_route before calling this routine.
237 *
238 * This call will either connect to a remote QP or obtain remote QP
239 * information for unconnected rdma_cm_id's. The actual operation is
240 * based on the rdma_cm_id's port space.
226 */ 241 */
227int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); 242int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
228 243
@@ -253,6 +268,21 @@ int rdma_listen(struct rdma_cm_id *id, int backlog);
253int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); 268int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
254 269
255/** 270/**
271 * rdma_notify - Notifies the RDMA CM of an asynchronous event that has
272 * occurred on the connection.
273 * @id: Connection identifier to transition to established.
274 * @event: Asynchronous event.
275 *
276 * This routine should be invoked by users to notify the CM of relevant
277 * communication events. Events that should be reported to the CM and
278 * when to report them are:
279 *
280 * IB_EVENT_COMM_EST - Used when a message is received on a connected
281 * QP before an RTU has been received.
282 */
283int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event);
284
285/**
256 * rdma_reject - Called to reject a connection request or response. 286 * rdma_reject - Called to reject a connection request or response.
257 */ 287 */
258int rdma_reject(struct rdma_cm_id *id, const void *private_data, 288int rdma_reject(struct rdma_cm_id *id, const void *private_data,
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
index e8c3af1804d4..9b176df1d667 100644
--- a/include/rdma/rdma_cm_ib.h
+++ b/include/rdma/rdma_cm_ib.h
@@ -44,4 +44,7 @@
44int rdma_set_ib_paths(struct rdma_cm_id *id, 44int rdma_set_ib_paths(struct rdma_cm_id *id,
45 struct ib_sa_path_rec *path_rec, int num_paths); 45 struct ib_sa_path_rec *path_rec, int num_paths);
46 46
47/* Global qkey for UD QPs and multicast groups. */
48#define RDMA_UD_QKEY 0x01234567
49
47#endif /* RDMA_CM_IB_H */ 50#endif /* RDMA_CM_IB_H */
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h
new file mode 100644
index 000000000000..9572ab8eeac1
--- /dev/null
+++ b/include/rdma/rdma_user_cm.h
@@ -0,0 +1,206 @@
1/*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef RDMA_USER_CM_H
34#define RDMA_USER_CM_H
35
36#include <linux/types.h>
37#include <linux/in6.h>
38#include <rdma/ib_user_verbs.h>
39#include <rdma/ib_user_sa.h>
40
41#define RDMA_USER_CM_ABI_VERSION 3
42
43#define RDMA_MAX_PRIVATE_DATA 256
44
45enum {
46 RDMA_USER_CM_CMD_CREATE_ID,
47 RDMA_USER_CM_CMD_DESTROY_ID,
48 RDMA_USER_CM_CMD_BIND_ADDR,
49 RDMA_USER_CM_CMD_RESOLVE_ADDR,
50 RDMA_USER_CM_CMD_RESOLVE_ROUTE,
51 RDMA_USER_CM_CMD_QUERY_ROUTE,
52 RDMA_USER_CM_CMD_CONNECT,
53 RDMA_USER_CM_CMD_LISTEN,
54 RDMA_USER_CM_CMD_ACCEPT,
55 RDMA_USER_CM_CMD_REJECT,
56 RDMA_USER_CM_CMD_DISCONNECT,
57 RDMA_USER_CM_CMD_INIT_QP_ATTR,
58 RDMA_USER_CM_CMD_GET_EVENT,
59 RDMA_USER_CM_CMD_GET_OPTION,
60 RDMA_USER_CM_CMD_SET_OPTION,
61 RDMA_USER_CM_CMD_NOTIFY
62};
63
64/*
65 * command ABI structures.
66 */
67struct rdma_ucm_cmd_hdr {
68 __u32 cmd;
69 __u16 in;
70 __u16 out;
71};
72
73struct rdma_ucm_create_id {
74 __u64 uid;
75 __u64 response;
76 __u16 ps;
77 __u8 reserved[6];
78};
79
80struct rdma_ucm_create_id_resp {
81 __u32 id;
82};
83
84struct rdma_ucm_destroy_id {
85 __u64 response;
86 __u32 id;
87 __u32 reserved;
88};
89
90struct rdma_ucm_destroy_id_resp {
91 __u32 events_reported;
92};
93
94struct rdma_ucm_bind_addr {
95 __u64 response;
96 struct sockaddr_in6 addr;
97 __u32 id;
98};
99
100struct rdma_ucm_resolve_addr {
101 struct sockaddr_in6 src_addr;
102 struct sockaddr_in6 dst_addr;
103 __u32 id;
104 __u32 timeout_ms;
105};
106
107struct rdma_ucm_resolve_route {
108 __u32 id;
109 __u32 timeout_ms;
110};
111
112struct rdma_ucm_query_route {
113 __u64 response;
114 __u32 id;
115 __u32 reserved;
116};
117
118struct rdma_ucm_query_route_resp {
119 __u64 node_guid;
120 struct ib_user_path_rec ib_route[2];
121 struct sockaddr_in6 src_addr;
122 struct sockaddr_in6 dst_addr;
123 __u32 num_paths;
124 __u8 port_num;
125 __u8 reserved[3];
126};
127
128struct rdma_ucm_conn_param {
129 __u32 qp_num;
130 __u32 reserved;
131 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
132 __u8 private_data_len;
133 __u8 srq;
134 __u8 responder_resources;
135 __u8 initiator_depth;
136 __u8 flow_control;
137 __u8 retry_count;
138 __u8 rnr_retry_count;
139 __u8 valid;
140};
141
142struct rdma_ucm_ud_param {
143 __u32 qp_num;
144 __u32 qkey;
145 struct ib_uverbs_ah_attr ah_attr;
146 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
147 __u8 private_data_len;
148 __u8 reserved[7];
149};
150
151struct rdma_ucm_connect {
152 struct rdma_ucm_conn_param conn_param;
153 __u32 id;
154 __u32 reserved;
155};
156
157struct rdma_ucm_listen {
158 __u32 id;
159 __u32 backlog;
160};
161
162struct rdma_ucm_accept {
163 __u64 uid;
164 struct rdma_ucm_conn_param conn_param;
165 __u32 id;
166 __u32 reserved;
167};
168
169struct rdma_ucm_reject {
170 __u32 id;
171 __u8 private_data_len;
172 __u8 reserved[3];
173 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
174};
175
176struct rdma_ucm_disconnect {
177 __u32 id;
178};
179
180struct rdma_ucm_init_qp_attr {
181 __u64 response;
182 __u32 id;
183 __u32 qp_state;
184};
185
186struct rdma_ucm_notify {
187 __u32 id;
188 __u32 event;
189};
190
191struct rdma_ucm_get_event {
192 __u64 response;
193};
194
195struct rdma_ucm_event_resp {
196 __u64 uid;
197 __u32 id;
198 __u32 event;
199 __u32 status;
200 union {
201 struct rdma_ucm_conn_param conn;
202 struct rdma_ucm_ud_param ud;
203 } param;
204};
205
206#endif /* RDMA_USER_CM_H */
diff --git a/include/video/sstfb.h b/include/video/sstfb.h
index 5dbf5e7e50a8..baa163f770ab 100644
--- a/include/video/sstfb.h
+++ b/include/video/sstfb.h
@@ -119,7 +119,7 @@
119#define BACKPORCH 0x0208 119#define BACKPORCH 0x0208
120#define VIDEODIMENSIONS 0x020c 120#define VIDEODIMENSIONS 0x020c
121#define FBIINIT0 0x0210 /* misc+fifo controls */ 121#define FBIINIT0 0x0210 /* misc+fifo controls */
122# define EN_VGA_PASSTHROUGH BIT(0) 122# define DIS_VGA_PASSTHROUGH BIT(0)
123# define FBI_RESET BIT(1) 123# define FBI_RESET BIT(1)
124# define FIFO_RESET BIT(2) 124# define FIFO_RESET BIT(2)
125#define FBIINIT1 0x0214 /* PCI + video controls */ 125#define FBIINIT1 0x0214 /* PCI + video controls */
@@ -251,7 +251,7 @@
251# define DACREG_ICS_CLK1_A 0 /* bit4 */ 251# define DACREG_ICS_CLK1_A 0 /* bit4 */
252 252
253/* sst default init registers */ 253/* sst default init registers */
254#define FBIINIT0_DEFAULT EN_VGA_PASSTHROUGH 254#define FBIINIT0_DEFAULT DIS_VGA_PASSTHROUGH
255 255
256#define FBIINIT1_DEFAULT \ 256#define FBIINIT1_DEFAULT \
257 ( \ 257 ( \
@@ -296,6 +296,11 @@
296 * 296 *
297 */ 297 */
298 298
299/* ioctl to enable/disable VGA passthrough */
300#define SSTFB_SET_VGAPASS _IOW('F', 0xdd, __u32)
301#define SSTFB_GET_VGAPASS _IOR('F', 0xdd, __u32)
302
303
299/* used to know witch clock to set */ 304/* used to know witch clock to set */
300enum { 305enum {
301 VID_CLOCK=0, 306 VID_CLOCK=0,
@@ -317,7 +322,7 @@ struct pll_timing {
317}; 322};
318 323
319struct dac_switch { 324struct dac_switch {
320 char * name; 325 const char *name;
321 int (*detect) (struct fb_info *info); 326 int (*detect) (struct fb_info *info);
322 int (*set_pll) (struct fb_info *info, const struct pll_timing *t, const int clock); 327 int (*set_pll) (struct fb_info *info, const struct pll_timing *t, const int clock);
323 void (*set_vidmod) (struct fb_info *info, const int bpp); 328 void (*set_vidmod) (struct fb_info *info, const int bpp);
@@ -345,7 +350,7 @@ struct sstfb_par {
345 struct pci_dev *dev; 350 struct pci_dev *dev;
346 int type; 351 int type;
347 u8 revision; 352 u8 revision;
348 int gfx_clock; /* status */ 353 u8 vgapass; /* VGA pass through: 1=enabled, 0=disabled */
349}; 354};
350 355
351#endif /* _SSTFB_H_ */ 356#endif /* _SSTFB_H_ */
diff --git a/init/Kconfig b/init/Kconfig
index 14d484606fab..f000edb3bb7a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -304,6 +304,15 @@ config TASK_XACCT
304 304
305 Say N if unsure. 305 Say N if unsure.
306 306
307config TASK_IO_ACCOUNTING
308 bool "Enable per-task storage I/O accounting (EXPERIMENTAL)"
309 depends on TASK_XACCT
310 help
311 Collect information on the number of bytes of storage I/O which this
312 task has caused.
313
314 Say N if unsure.
315
307config SYSCTL 316config SYSCTL
308 bool 317 bool
309 318
@@ -339,7 +348,7 @@ config SYSCTL_SYSCALL
339 If unsure say Y here. 348 If unsure say Y here.
340 349
341config KALLSYMS 350config KALLSYMS
342 bool "Load all symbols for debugging/kksymoops" if EMBEDDED 351 bool "Load all symbols for debugging/ksymoops" if EMBEDDED
343 default y 352 default y
344 help 353 help
345 Say Y here to let the kernel print out symbolic crash information and 354 Say Y here to let the kernel print out symbolic crash information and
diff --git a/init/initramfs.c b/init/initramfs.c
index 85f04037ade1..4fa0f7977de1 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -526,7 +526,7 @@ static void __init free_initrd(void)
526 526
527#endif 527#endif
528 528
529void __init populate_rootfs(void) 529static int __init populate_rootfs(void)
530{ 530{
531 char *err = unpack_to_rootfs(__initramfs_start, 531 char *err = unpack_to_rootfs(__initramfs_start,
532 __initramfs_end - __initramfs_start, 0); 532 __initramfs_end - __initramfs_start, 0);
@@ -544,7 +544,7 @@ void __init populate_rootfs(void)
544 unpack_to_rootfs((char *)initrd_start, 544 unpack_to_rootfs((char *)initrd_start,
545 initrd_end - initrd_start, 0); 545 initrd_end - initrd_start, 0);
546 free_initrd(); 546 free_initrd();
547 return; 547 return 0;
548 } 548 }
549 printk("it isn't (%s); looks like an initrd\n", err); 549 printk("it isn't (%s); looks like an initrd\n", err);
550 fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700); 550 fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700);
@@ -565,4 +565,6 @@ void __init populate_rootfs(void)
565#endif 565#endif
566 } 566 }
567#endif 567#endif
568 return 0;
568} 569}
570rootfs_initcall(populate_rootfs);
diff --git a/init/main.c b/init/main.c
index 036f97c0c34c..e3f0bb20b4dd 100644
--- a/init/main.c
+++ b/init/main.c
@@ -94,7 +94,6 @@ extern void pidmap_init(void);
94extern void prio_tree_init(void); 94extern void prio_tree_init(void);
95extern void radix_tree_init(void); 95extern void radix_tree_init(void);
96extern void free_initmem(void); 96extern void free_initmem(void);
97extern void populate_rootfs(void);
98extern void driver_init(void); 97extern void driver_init(void);
99extern void prepare_namespace(void); 98extern void prepare_namespace(void);
100#ifdef CONFIG_ACPI 99#ifdef CONFIG_ACPI
@@ -483,6 +482,12 @@ void __init __attribute__((weak)) smp_setup_processor_id(void)
483{ 482{
484} 483}
485 484
485static const char linux_banner[] =
486 "Linux version " UTS_RELEASE
487 " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ")"
488 " (" LINUX_COMPILER ")"
489 " " UTS_VERSION "\n";
490
486asmlinkage void __init start_kernel(void) 491asmlinkage void __init start_kernel(void)
487{ 492{
488 char * command_line; 493 char * command_line;
@@ -509,7 +514,7 @@ asmlinkage void __init start_kernel(void)
509 boot_cpu_init(); 514 boot_cpu_init();
510 page_address_init(); 515 page_address_init();
511 printk(KERN_NOTICE); 516 printk(KERN_NOTICE);
512 printk(linux_banner, UTS_RELEASE, UTS_VERSION); 517 printk(linux_banner);
513 setup_arch(&command_line); 518 setup_arch(&command_line);
514 unwind_setup(); 519 unwind_setup();
515 setup_per_cpu_areas(); 520 setup_per_cpu_areas();
@@ -739,12 +744,6 @@ static int init(void * unused)
739 744
740 cpuset_init_smp(); 745 cpuset_init_smp();
741 746
742 /*
743 * Do this before initcalls, because some drivers want to access
744 * firmware files.
745 */
746 populate_rootfs();
747
748 do_basic_setup(); 747 do_basic_setup();
749 748
750 /* 749 /*
diff --git a/init/version.c b/init/version.c
index 2a5dfcd1c2e6..9d96d36501ca 100644
--- a/init/version.c
+++ b/init/version.c
@@ -33,8 +33,3 @@ struct uts_namespace init_uts_ns = {
33 }, 33 },
34}; 34};
35EXPORT_SYMBOL_GPL(init_uts_ns); 35EXPORT_SYMBOL_GPL(init_uts_ns);
36
37const char linux_banner[] =
38 "Linux version %s (" LINUX_COMPILE_BY "@"
39 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") %s\n";
40
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index 0992616eeed6..c82c215693d7 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -36,7 +36,7 @@ struct msg_msg *load_msg(const void __user *src, int len)
36 if (alen > DATALEN_MSG) 36 if (alen > DATALEN_MSG)
37 alen = DATALEN_MSG; 37 alen = DATALEN_MSG;
38 38
39 msg = (struct msg_msg *)kmalloc(sizeof(*msg) + alen, GFP_KERNEL); 39 msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL);
40 if (msg == NULL) 40 if (msg == NULL)
41 return ERR_PTR(-ENOMEM); 41 return ERR_PTR(-ENOMEM);
42 42
@@ -56,7 +56,7 @@ struct msg_msg *load_msg(const void __user *src, int len)
56 alen = len; 56 alen = len;
57 if (alen > DATALEN_SEG) 57 if (alen > DATALEN_SEG)
58 alen = DATALEN_SEG; 58 alen = DATALEN_SEG;
59 seg = (struct msg_msgseg *)kmalloc(sizeof(*seg) + alen, 59 seg = kmalloc(sizeof(*seg) + alen,
60 GFP_KERNEL); 60 GFP_KERNEL);
61 if (seg == NULL) { 61 if (seg == NULL) {
62 err = -ENOMEM; 62 err = -ENOMEM;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 2c3b4431472b..232aed2b10f9 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2342,32 +2342,48 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
2342} 2342}
2343 2343
2344/** 2344/**
2345 * cpuset_zone_allowed - Can we allocate memory on zone z's memory node? 2345 * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node?
2346 * @z: is this zone on an allowed node? 2346 * @z: is this zone on an allowed node?
2347 * @gfp_mask: memory allocation flags (we use __GFP_HARDWALL) 2347 * @gfp_mask: memory allocation flags
2348 * 2348 *
2349 * If we're in interrupt, yes, we can always allocate. If zone 2349 * If we're in interrupt, yes, we can always allocate. If
2350 * __GFP_THISNODE is set, yes, we can always allocate. If zone
2350 * z's node is in our tasks mems_allowed, yes. If it's not a 2351 * z's node is in our tasks mems_allowed, yes. If it's not a
2351 * __GFP_HARDWALL request and this zone's nodes is in the nearest 2352 * __GFP_HARDWALL request and this zone's nodes is in the nearest
2352 * mem_exclusive cpuset ancestor to this tasks cpuset, yes. 2353 * mem_exclusive cpuset ancestor to this tasks cpuset, yes.
2353 * Otherwise, no. 2354 * Otherwise, no.
2354 * 2355 *
2356 * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall()
2357 * reduces to cpuset_zone_allowed_hardwall(). Otherwise,
2358 * cpuset_zone_allowed_softwall() might sleep, and might allow a zone
2359 * from an enclosing cpuset.
2360 *
2361 * cpuset_zone_allowed_hardwall() only handles the simpler case of
2362 * hardwall cpusets, and never sleeps.
2363 *
2364 * The __GFP_THISNODE placement logic is really handled elsewhere,
2365 * by forcibly using a zonelist starting at a specified node, and by
2366 * (in get_page_from_freelist()) refusing to consider the zones for
2367 * any node on the zonelist except the first. By the time any such
2368 * calls get to this routine, we should just shut up and say 'yes'.
2369 *
2355 * GFP_USER allocations are marked with the __GFP_HARDWALL bit, 2370 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2356 * and do not allow allocations outside the current tasks cpuset. 2371 * and do not allow allocations outside the current tasks cpuset.
2357 * GFP_KERNEL allocations are not so marked, so can escape to the 2372 * GFP_KERNEL allocations are not so marked, so can escape to the
2358 * nearest mem_exclusive ancestor cpuset. 2373 * nearest enclosing mem_exclusive ancestor cpuset.
2359 * 2374 *
2360 * Scanning up parent cpusets requires callback_mutex. The __alloc_pages() 2375 * Scanning up parent cpusets requires callback_mutex. The
2361 * routine only calls here with __GFP_HARDWALL bit _not_ set if 2376 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2362 * it's a GFP_KERNEL allocation, and all nodes in the current tasks 2377 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2363 * mems_allowed came up empty on the first pass over the zonelist. 2378 * current tasks mems_allowed came up empty on the first pass over
2364 * So only GFP_KERNEL allocations, if all nodes in the cpuset are 2379 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
2365 * short of memory, might require taking the callback_mutex mutex. 2380 * cpuset are short of memory, might require taking the callback_mutex
2381 * mutex.
2366 * 2382 *
2367 * The first call here from mm/page_alloc:get_page_from_freelist() 2383 * The first call here from mm/page_alloc:get_page_from_freelist()
2368 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, so 2384 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2369 * no allocation on a node outside the cpuset is allowed (unless in 2385 * so no allocation on a node outside the cpuset is allowed (unless
2370 * interrupt, of course). 2386 * in interrupt, of course).
2371 * 2387 *
2372 * The second pass through get_page_from_freelist() doesn't even call 2388 * The second pass through get_page_from_freelist() doesn't even call
2373 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() 2389 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
@@ -2380,12 +2396,12 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
2380 * GFP_USER - only nodes in current tasks mems allowed ok. 2396 * GFP_USER - only nodes in current tasks mems allowed ok.
2381 * 2397 *
2382 * Rule: 2398 * Rule:
2383 * Don't call cpuset_zone_allowed() if you can't sleep, unless you 2399 * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you
2384 * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables 2400 * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2385 * the code that might scan up ancestor cpusets and sleep. 2401 * the code that might scan up ancestor cpusets and sleep.
2386 **/ 2402 */
2387 2403
2388int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 2404int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
2389{ 2405{
2390 int node; /* node that zone z is on */ 2406 int node; /* node that zone z is on */
2391 const struct cpuset *cs; /* current cpuset ancestors */ 2407 const struct cpuset *cs; /* current cpuset ancestors */
@@ -2415,6 +2431,40 @@ int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
2415 return allowed; 2431 return allowed;
2416} 2432}
2417 2433
2434/*
2435 * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node?
2436 * @z: is this zone on an allowed node?
2437 * @gfp_mask: memory allocation flags
2438 *
2439 * If we're in interrupt, yes, we can always allocate.
2440 * If __GFP_THISNODE is set, yes, we can always allocate. If zone
2441 * z's node is in our tasks mems_allowed, yes. Otherwise, no.
2442 *
2443 * The __GFP_THISNODE placement logic is really handled elsewhere,
2444 * by forcibly using a zonelist starting at a specified node, and by
2445 * (in get_page_from_freelist()) refusing to consider the zones for
2446 * any node on the zonelist except the first. By the time any such
2447 * calls get to this routine, we should just shut up and say 'yes'.
2448 *
2449 * Unlike the cpuset_zone_allowed_softwall() variant, above,
2450 * this variant requires that the zone be in the current tasks
2451 * mems_allowed or that we're in interrupt. It does not scan up the
2452 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2453 * It never sleeps.
2454 */
2455
2456int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
2457{
2458 int node; /* node that zone z is on */
2459
2460 if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2461 return 1;
2462 node = zone_to_nid(z);
2463 if (node_isset(node, current->mems_allowed))
2464 return 1;
2465 return 0;
2466}
2467
2418/** 2468/**
2419 * cpuset_lock - lock out any changes to cpuset structures 2469 * cpuset_lock - lock out any changes to cpuset structures
2420 * 2470 *
diff --git a/kernel/exit.c b/kernel/exit.c
index 03e64fe4a14a..122fadb972fc 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -425,7 +425,7 @@ static void close_files(struct files_struct * files)
425 for (;;) { 425 for (;;) {
426 unsigned long set; 426 unsigned long set;
427 i = j * __NFDBITS; 427 i = j * __NFDBITS;
428 if (i >= fdt->max_fdset || i >= fdt->max_fds) 428 if (i >= fdt->max_fds)
429 break; 429 break;
430 set = fdt->open_fds->fds_bits[j++]; 430 set = fdt->open_fds->fds_bits[j++];
431 while (set) { 431 while (set) {
@@ -466,11 +466,9 @@ void fastcall put_files_struct(struct files_struct *files)
466 * you can free files immediately. 466 * you can free files immediately.
467 */ 467 */
468 fdt = files_fdtable(files); 468 fdt = files_fdtable(files);
469 if (fdt == &files->fdtab) 469 if (fdt != &files->fdtab)
470 fdt->free_files = files;
471 else
472 kmem_cache_free(files_cachep, files); 470 kmem_cache_free(files_cachep, files);
473 free_fdtable(fdt); 471 call_rcu(&fdt->rcu, free_fdtable_rcu);
474 } 472 }
475} 473}
476 474
diff --git a/kernel/fork.c b/kernel/fork.c
index 8c859eef8e6a..d16c566eb645 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -36,6 +36,7 @@
36#include <linux/syscalls.h> 36#include <linux/syscalls.h>
37#include <linux/jiffies.h> 37#include <linux/jiffies.h>
38#include <linux/futex.h> 38#include <linux/futex.h>
39#include <linux/task_io_accounting_ops.h>
39#include <linux/rcupdate.h> 40#include <linux/rcupdate.h>
40#include <linux/ptrace.h> 41#include <linux/ptrace.h>
41#include <linux/mount.h> 42#include <linux/mount.h>
@@ -613,7 +614,7 @@ static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
613 614
614static int count_open_files(struct fdtable *fdt) 615static int count_open_files(struct fdtable *fdt)
615{ 616{
616 int size = fdt->max_fdset; 617 int size = fdt->max_fds;
617 int i; 618 int i;
618 619
619 /* Find the last open fd */ 620 /* Find the last open fd */
@@ -640,12 +641,10 @@ static struct files_struct *alloc_files(void)
640 newf->next_fd = 0; 641 newf->next_fd = 0;
641 fdt = &newf->fdtab; 642 fdt = &newf->fdtab;
642 fdt->max_fds = NR_OPEN_DEFAULT; 643 fdt->max_fds = NR_OPEN_DEFAULT;
643 fdt->max_fdset = EMBEDDED_FD_SET_SIZE;
644 fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init; 644 fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
645 fdt->open_fds = (fd_set *)&newf->open_fds_init; 645 fdt->open_fds = (fd_set *)&newf->open_fds_init;
646 fdt->fd = &newf->fd_array[0]; 646 fdt->fd = &newf->fd_array[0];
647 INIT_RCU_HEAD(&fdt->rcu); 647 INIT_RCU_HEAD(&fdt->rcu);
648 fdt->free_files = NULL;
649 fdt->next = NULL; 648 fdt->next = NULL;
650 rcu_assign_pointer(newf->fdt, fdt); 649 rcu_assign_pointer(newf->fdt, fdt);
651out: 650out:
@@ -661,7 +660,7 @@ static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
661{ 660{
662 struct files_struct *newf; 661 struct files_struct *newf;
663 struct file **old_fds, **new_fds; 662 struct file **old_fds, **new_fds;
664 int open_files, size, i, expand; 663 int open_files, size, i;
665 struct fdtable *old_fdt, *new_fdt; 664 struct fdtable *old_fdt, *new_fdt;
666 665
667 *errorp = -ENOMEM; 666 *errorp = -ENOMEM;
@@ -672,25 +671,14 @@ static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
672 spin_lock(&oldf->file_lock); 671 spin_lock(&oldf->file_lock);
673 old_fdt = files_fdtable(oldf); 672 old_fdt = files_fdtable(oldf);
674 new_fdt = files_fdtable(newf); 673 new_fdt = files_fdtable(newf);
675 size = old_fdt->max_fdset;
676 open_files = count_open_files(old_fdt); 674 open_files = count_open_files(old_fdt);
677 expand = 0;
678 675
679 /* 676 /*
680 * Check whether we need to allocate a larger fd array or fd set. 677 * Check whether we need to allocate a larger fd array and fd set.
681 * Note: we're not a clone task, so the open count won't change. 678 * Note: we're not a clone task, so the open count won't change.
682 */ 679 */
683 if (open_files > new_fdt->max_fdset) {
684 new_fdt->max_fdset = 0;
685 expand = 1;
686 }
687 if (open_files > new_fdt->max_fds) { 680 if (open_files > new_fdt->max_fds) {
688 new_fdt->max_fds = 0; 681 new_fdt->max_fds = 0;
689 expand = 1;
690 }
691
692 /* if the old fdset gets grown now, we'll only copy up to "size" fds */
693 if (expand) {
694 spin_unlock(&oldf->file_lock); 682 spin_unlock(&oldf->file_lock);
695 spin_lock(&newf->file_lock); 683 spin_lock(&newf->file_lock);
696 *errorp = expand_files(newf, open_files-1); 684 *errorp = expand_files(newf, open_files-1);
@@ -710,8 +698,10 @@ static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
710 old_fds = old_fdt->fd; 698 old_fds = old_fdt->fd;
711 new_fds = new_fdt->fd; 699 new_fds = new_fdt->fd;
712 700
713 memcpy(new_fdt->open_fds->fds_bits, old_fdt->open_fds->fds_bits, open_files/8); 701 memcpy(new_fdt->open_fds->fds_bits,
714 memcpy(new_fdt->close_on_exec->fds_bits, old_fdt->close_on_exec->fds_bits, open_files/8); 702 old_fdt->open_fds->fds_bits, open_files/8);
703 memcpy(new_fdt->close_on_exec->fds_bits,
704 old_fdt->close_on_exec->fds_bits, open_files/8);
715 705
716 for (i = open_files; i != 0; i--) { 706 for (i = open_files; i != 0; i--) {
717 struct file *f = *old_fds++; 707 struct file *f = *old_fds++;
@@ -736,22 +726,19 @@ static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
736 /* This is long word aligned thus could use a optimized version */ 726 /* This is long word aligned thus could use a optimized version */
737 memset(new_fds, 0, size); 727 memset(new_fds, 0, size);
738 728
739 if (new_fdt->max_fdset > open_files) { 729 if (new_fdt->max_fds > open_files) {
740 int left = (new_fdt->max_fdset-open_files)/8; 730 int left = (new_fdt->max_fds-open_files)/8;
741 int start = open_files / (8 * sizeof(unsigned long)); 731 int start = open_files / (8 * sizeof(unsigned long));
742 732
743 memset(&new_fdt->open_fds->fds_bits[start], 0, left); 733 memset(&new_fdt->open_fds->fds_bits[start], 0, left);
744 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left); 734 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
745 } 735 }
746 736
747out:
748 return newf; 737 return newf;
749 738
750out_release: 739out_release:
751 free_fdset (new_fdt->close_on_exec, new_fdt->max_fdset);
752 free_fdset (new_fdt->open_fds, new_fdt->max_fdset);
753 free_fd_array(new_fdt->fd, new_fdt->max_fds);
754 kmem_cache_free(files_cachep, newf); 740 kmem_cache_free(files_cachep, newf);
741out:
755 return NULL; 742 return NULL;
756} 743}
757 744
@@ -1055,6 +1042,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1055 p->wchar = 0; /* I/O counter: bytes written */ 1042 p->wchar = 0; /* I/O counter: bytes written */
1056 p->syscr = 0; /* I/O counter: read syscalls */ 1043 p->syscr = 0; /* I/O counter: read syscalls */
1057 p->syscw = 0; /* I/O counter: write syscalls */ 1044 p->syscw = 0; /* I/O counter: write syscalls */
1045 task_io_accounting_init(p);
1058 acct_clear_integrals(p); 1046 acct_clear_integrals(p);
1059 1047
1060 p->it_virt_expires = cputime_zero; 1048 p->it_virt_expires = cputime_zero;
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b02032476dc2..01e750559034 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -43,13 +43,49 @@
43#include "lockdep_internals.h" 43#include "lockdep_internals.h"
44 44
45/* 45/*
46 * hash_lock: protects the lockdep hashes and class/list/hash allocators. 46 * lockdep_lock: protects the lockdep graph, the hashes and the
47 * class/list/hash allocators.
47 * 48 *
48 * This is one of the rare exceptions where it's justified 49 * This is one of the rare exceptions where it's justified
49 * to use a raw spinlock - we really dont want the spinlock 50 * to use a raw spinlock - we really dont want the spinlock
50 * code to recurse back into the lockdep code. 51 * code to recurse back into the lockdep code...
51 */ 52 */
52static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 53static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
54
55static int graph_lock(void)
56{
57 __raw_spin_lock(&lockdep_lock);
58 /*
59 * Make sure that if another CPU detected a bug while
60 * walking the graph we dont change it (while the other
61 * CPU is busy printing out stuff with the graph lock
62 * dropped already)
63 */
64 if (!debug_locks) {
65 __raw_spin_unlock(&lockdep_lock);
66 return 0;
67 }
68 return 1;
69}
70
71static inline int graph_unlock(void)
72{
73 __raw_spin_unlock(&lockdep_lock);
74 return 0;
75}
76
77/*
78 * Turn lock debugging off and return with 0 if it was off already,
79 * and also release the graph lock:
80 */
81static inline int debug_locks_off_graph_unlock(void)
82{
83 int ret = debug_locks_off();
84
85 __raw_spin_unlock(&lockdep_lock);
86
87 return ret;
88}
53 89
54static int lockdep_initialized; 90static int lockdep_initialized;
55 91
@@ -57,14 +93,15 @@ unsigned long nr_list_entries;
57static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; 93static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
58 94
59/* 95/*
60 * Allocate a lockdep entry. (assumes hash_lock held, returns 96 * Allocate a lockdep entry. (assumes the graph_lock held, returns
61 * with NULL on failure) 97 * with NULL on failure)
62 */ 98 */
63static struct lock_list *alloc_list_entry(void) 99static struct lock_list *alloc_list_entry(void)
64{ 100{
65 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { 101 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
66 __raw_spin_unlock(&hash_lock); 102 if (!debug_locks_off_graph_unlock())
67 debug_locks_off(); 103 return NULL;
104
68 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); 105 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
69 printk("turning off the locking correctness validator.\n"); 106 printk("turning off the locking correctness validator.\n");
70 return NULL; 107 return NULL;
@@ -145,9 +182,7 @@ EXPORT_SYMBOL(lockdep_on);
145 */ 182 */
146 183
147#define VERBOSE 0 184#define VERBOSE 0
148#ifdef VERBOSE 185#define VERY_VERBOSE 0
149# define VERY_VERBOSE 0
150#endif
151 186
152#if VERBOSE 187#if VERBOSE
153# define HARDIRQ_VERBOSE 1 188# define HARDIRQ_VERBOSE 1
@@ -172,8 +207,8 @@ static int class_filter(struct lock_class *class)
172 !strcmp(class->name, "&struct->lockfield")) 207 !strcmp(class->name, "&struct->lockfield"))
173 return 1; 208 return 1;
174#endif 209#endif
175 /* Allow everything else. 0 would be filter everything else */ 210 /* Filter everything else. 1 would be to allow everything else */
176 return 1; 211 return 0;
177} 212}
178#endif 213#endif
179 214
@@ -207,7 +242,7 @@ static int softirq_verbose(struct lock_class *class)
207 242
208/* 243/*
209 * Stack-trace: tightly packed array of stack backtrace 244 * Stack-trace: tightly packed array of stack backtrace
210 * addresses. Protected by the hash_lock. 245 * addresses. Protected by the graph_lock.
211 */ 246 */
212unsigned long nr_stack_trace_entries; 247unsigned long nr_stack_trace_entries;
213static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; 248static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
@@ -226,18 +261,15 @@ static int save_trace(struct stack_trace *trace)
226 trace->max_entries = trace->nr_entries; 261 trace->max_entries = trace->nr_entries;
227 262
228 nr_stack_trace_entries += trace->nr_entries; 263 nr_stack_trace_entries += trace->nr_entries;
229 if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) {
230 __raw_spin_unlock(&hash_lock);
231 return 0;
232 }
233 264
234 if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) { 265 if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
235 __raw_spin_unlock(&hash_lock); 266 if (!debug_locks_off_graph_unlock())
236 if (debug_locks_off()) { 267 return 0;
237 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n"); 268
238 printk("turning off the locking correctness validator.\n"); 269 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
239 dump_stack(); 270 printk("turning off the locking correctness validator.\n");
240 } 271 dump_stack();
272
241 return 0; 273 return 0;
242 } 274 }
243 275
@@ -526,9 +558,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
526{ 558{
527 struct task_struct *curr = current; 559 struct task_struct *curr = current;
528 560
529 __raw_spin_unlock(&hash_lock); 561 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
530 debug_locks_off();
531 if (debug_locks_silent)
532 return 0; 562 return 0;
533 563
534 printk("\n=======================================================\n"); 564 printk("\n=======================================================\n");
@@ -556,12 +586,10 @@ static noinline int print_circular_bug_tail(void)
556 if (debug_locks_silent) 586 if (debug_locks_silent)
557 return 0; 587 return 0;
558 588
559 /* hash_lock unlocked by the header */
560 __raw_spin_lock(&hash_lock);
561 this.class = check_source->class; 589 this.class = check_source->class;
562 if (!save_trace(&this.trace)) 590 if (!save_trace(&this.trace))
563 return 0; 591 return 0;
564 __raw_spin_unlock(&hash_lock); 592
565 print_circular_bug_entry(&this, 0); 593 print_circular_bug_entry(&this, 0);
566 594
567 printk("\nother info that might help us debug this:\n\n"); 595 printk("\nother info that might help us debug this:\n\n");
@@ -577,8 +605,10 @@ static noinline int print_circular_bug_tail(void)
577 605
578static int noinline print_infinite_recursion_bug(void) 606static int noinline print_infinite_recursion_bug(void)
579{ 607{
580 __raw_spin_unlock(&hash_lock); 608 if (!debug_locks_off_graph_unlock())
581 DEBUG_LOCKS_WARN_ON(1); 609 return 0;
610
611 WARN_ON(1);
582 612
583 return 0; 613 return 0;
584} 614}
@@ -713,9 +743,7 @@ print_bad_irq_dependency(struct task_struct *curr,
713 enum lock_usage_bit bit2, 743 enum lock_usage_bit bit2,
714 const char *irqclass) 744 const char *irqclass)
715{ 745{
716 __raw_spin_unlock(&hash_lock); 746 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
717 debug_locks_off();
718 if (debug_locks_silent)
719 return 0; 747 return 0;
720 748
721 printk("\n======================================================\n"); 749 printk("\n======================================================\n");
@@ -796,9 +824,7 @@ static int
796print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, 824print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
797 struct held_lock *next) 825 struct held_lock *next)
798{ 826{
799 debug_locks_off(); 827 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
800 __raw_spin_unlock(&hash_lock);
801 if (debug_locks_silent)
802 return 0; 828 return 0;
803 829
804 printk("\n=============================================\n"); 830 printk("\n=============================================\n");
@@ -974,14 +1000,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
974 * Debugging printouts: 1000 * Debugging printouts:
975 */ 1001 */
976 if (verbose(prev->class) || verbose(next->class)) { 1002 if (verbose(prev->class) || verbose(next->class)) {
977 __raw_spin_unlock(&hash_lock); 1003 graph_unlock();
978 printk("\n new dependency: "); 1004 printk("\n new dependency: ");
979 print_lock_name(prev->class); 1005 print_lock_name(prev->class);
980 printk(" => "); 1006 printk(" => ");
981 print_lock_name(next->class); 1007 print_lock_name(next->class);
982 printk("\n"); 1008 printk("\n");
983 dump_stack(); 1009 dump_stack();
984 __raw_spin_lock(&hash_lock); 1010 return graph_lock();
985 } 1011 }
986 return 1; 1012 return 1;
987} 1013}
@@ -1046,8 +1072,10 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
1046 } 1072 }
1047 return 1; 1073 return 1;
1048out_bug: 1074out_bug:
1049 __raw_spin_unlock(&hash_lock); 1075 if (!debug_locks_off_graph_unlock())
1050 DEBUG_LOCKS_WARN_ON(1); 1076 return 0;
1077
1078 WARN_ON(1);
1051 1079
1052 return 0; 1080 return 0;
1053} 1081}
@@ -1201,7 +1229,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1201 hash_head = classhashentry(key); 1229 hash_head = classhashentry(key);
1202 1230
1203 raw_local_irq_save(flags); 1231 raw_local_irq_save(flags);
1204 __raw_spin_lock(&hash_lock); 1232 if (!graph_lock()) {
1233 raw_local_irq_restore(flags);
1234 return NULL;
1235 }
1205 /* 1236 /*
1206 * We have to do the hash-walk again, to avoid races 1237 * We have to do the hash-walk again, to avoid races
1207 * with another CPU: 1238 * with another CPU:
@@ -1214,9 +1245,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1214 * the hash: 1245 * the hash:
1215 */ 1246 */
1216 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { 1247 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
1217 __raw_spin_unlock(&hash_lock); 1248 if (!debug_locks_off_graph_unlock()) {
1249 raw_local_irq_restore(flags);
1250 return NULL;
1251 }
1218 raw_local_irq_restore(flags); 1252 raw_local_irq_restore(flags);
1219 debug_locks_off(); 1253
1220 printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); 1254 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
1221 printk("turning off the locking correctness validator.\n"); 1255 printk("turning off the locking correctness validator.\n");
1222 return NULL; 1256 return NULL;
@@ -1237,18 +1271,23 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1237 list_add_tail_rcu(&class->hash_entry, hash_head); 1271 list_add_tail_rcu(&class->hash_entry, hash_head);
1238 1272
1239 if (verbose(class)) { 1273 if (verbose(class)) {
1240 __raw_spin_unlock(&hash_lock); 1274 graph_unlock();
1241 raw_local_irq_restore(flags); 1275 raw_local_irq_restore(flags);
1276
1242 printk("\nnew class %p: %s", class->key, class->name); 1277 printk("\nnew class %p: %s", class->key, class->name);
1243 if (class->name_version > 1) 1278 if (class->name_version > 1)
1244 printk("#%d", class->name_version); 1279 printk("#%d", class->name_version);
1245 printk("\n"); 1280 printk("\n");
1246 dump_stack(); 1281 dump_stack();
1282
1247 raw_local_irq_save(flags); 1283 raw_local_irq_save(flags);
1248 __raw_spin_lock(&hash_lock); 1284 if (!graph_lock()) {
1285 raw_local_irq_restore(flags);
1286 return NULL;
1287 }
1249 } 1288 }
1250out_unlock_set: 1289out_unlock_set:
1251 __raw_spin_unlock(&hash_lock); 1290 graph_unlock();
1252 raw_local_irq_restore(flags); 1291 raw_local_irq_restore(flags);
1253 1292
1254 if (!subclass || force) 1293 if (!subclass || force)
@@ -1264,7 +1303,7 @@ out_unlock_set:
1264 * add it and return 0 - in this case the new dependency chain is 1303 * add it and return 0 - in this case the new dependency chain is
1265 * validated. If the key is already hashed, return 1. 1304 * validated. If the key is already hashed, return 1.
1266 */ 1305 */
1267static inline int lookup_chain_cache(u64 chain_key) 1306static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
1268{ 1307{
1269 struct list_head *hash_head = chainhashentry(chain_key); 1308 struct list_head *hash_head = chainhashentry(chain_key);
1270 struct lock_chain *chain; 1309 struct lock_chain *chain;
@@ -1278,34 +1317,32 @@ static inline int lookup_chain_cache(u64 chain_key)
1278 if (chain->chain_key == chain_key) { 1317 if (chain->chain_key == chain_key) {
1279cache_hit: 1318cache_hit:
1280 debug_atomic_inc(&chain_lookup_hits); 1319 debug_atomic_inc(&chain_lookup_hits);
1281 /* 1320 if (very_verbose(class))
1282 * In the debugging case, force redundant checking 1321 printk("\nhash chain already cached, key: %016Lx tail class: [%p] %s\n", chain_key, class->key, class->name);
1283 * by returning 1:
1284 */
1285#ifdef CONFIG_DEBUG_LOCKDEP
1286 __raw_spin_lock(&hash_lock);
1287 return 1;
1288#endif
1289 return 0; 1322 return 0;
1290 } 1323 }
1291 } 1324 }
1325 if (very_verbose(class))
1326 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n", chain_key, class->key, class->name);
1292 /* 1327 /*
1293 * Allocate a new chain entry from the static array, and add 1328 * Allocate a new chain entry from the static array, and add
1294 * it to the hash: 1329 * it to the hash:
1295 */ 1330 */
1296 __raw_spin_lock(&hash_lock); 1331 if (!graph_lock())
1332 return 0;
1297 /* 1333 /*
1298 * We have to walk the chain again locked - to avoid duplicates: 1334 * We have to walk the chain again locked - to avoid duplicates:
1299 */ 1335 */
1300 list_for_each_entry(chain, hash_head, entry) { 1336 list_for_each_entry(chain, hash_head, entry) {
1301 if (chain->chain_key == chain_key) { 1337 if (chain->chain_key == chain_key) {
1302 __raw_spin_unlock(&hash_lock); 1338 graph_unlock();
1303 goto cache_hit; 1339 goto cache_hit;
1304 } 1340 }
1305 } 1341 }
1306 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { 1342 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1307 __raw_spin_unlock(&hash_lock); 1343 if (!debug_locks_off_graph_unlock())
1308 debug_locks_off(); 1344 return 0;
1345
1309 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); 1346 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1310 printk("turning off the locking correctness validator.\n"); 1347 printk("turning off the locking correctness validator.\n");
1311 return 0; 1348 return 0;
@@ -1381,9 +1418,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1381 struct held_lock *this, int forwards, 1418 struct held_lock *this, int forwards,
1382 const char *irqclass) 1419 const char *irqclass)
1383{ 1420{
1384 __raw_spin_unlock(&hash_lock); 1421 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1385 debug_locks_off();
1386 if (debug_locks_silent)
1387 return 0; 1422 return 0;
1388 1423
1389 printk("\n=========================================================\n"); 1424 printk("\n=========================================================\n");
@@ -1453,7 +1488,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1453 return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass); 1488 return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1454} 1489}
1455 1490
1456static inline void print_irqtrace_events(struct task_struct *curr) 1491void print_irqtrace_events(struct task_struct *curr)
1457{ 1492{
1458 printk("irq event stamp: %u\n", curr->irq_events); 1493 printk("irq event stamp: %u\n", curr->irq_events);
1459 printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event); 1494 printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
@@ -1466,19 +1501,13 @@ static inline void print_irqtrace_events(struct task_struct *curr)
1466 print_ip_sym(curr->softirq_disable_ip); 1501 print_ip_sym(curr->softirq_disable_ip);
1467} 1502}
1468 1503
1469#else
1470static inline void print_irqtrace_events(struct task_struct *curr)
1471{
1472}
1473#endif 1504#endif
1474 1505
1475static int 1506static int
1476print_usage_bug(struct task_struct *curr, struct held_lock *this, 1507print_usage_bug(struct task_struct *curr, struct held_lock *this,
1477 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) 1508 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1478{ 1509{
1479 __raw_spin_unlock(&hash_lock); 1510 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1480 debug_locks_off();
1481 if (debug_locks_silent)
1482 return 0; 1511 return 0;
1483 1512
1484 printk("\n=================================\n"); 1513 printk("\n=================================\n");
@@ -1539,12 +1568,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
1539 if (likely(this->class->usage_mask & new_mask)) 1568 if (likely(this->class->usage_mask & new_mask))
1540 return 1; 1569 return 1;
1541 1570
1542 __raw_spin_lock(&hash_lock); 1571 if (!graph_lock())
1572 return 0;
1543 /* 1573 /*
1544 * Make sure we didnt race: 1574 * Make sure we didnt race:
1545 */ 1575 */
1546 if (unlikely(this->class->usage_mask & new_mask)) { 1576 if (unlikely(this->class->usage_mask & new_mask)) {
1547 __raw_spin_unlock(&hash_lock); 1577 graph_unlock();
1548 return 1; 1578 return 1;
1549 } 1579 }
1550 1580
@@ -1730,16 +1760,16 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
1730 debug_atomic_dec(&nr_unused_locks); 1760 debug_atomic_dec(&nr_unused_locks);
1731 break; 1761 break;
1732 default: 1762 default:
1733 __raw_spin_unlock(&hash_lock); 1763 if (!debug_locks_off_graph_unlock())
1734 debug_locks_off(); 1764 return 0;
1735 WARN_ON(1); 1765 WARN_ON(1);
1736 return 0; 1766 return 0;
1737 } 1767 }
1738 1768
1739 __raw_spin_unlock(&hash_lock); 1769 graph_unlock();
1740 1770
1741 /* 1771 /*
1742 * We must printk outside of the hash_lock: 1772 * We must printk outside of the graph_lock:
1743 */ 1773 */
1744 if (ret == 2) { 1774 if (ret == 2) {
1745 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); 1775 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
@@ -2137,9 +2167,9 @@ out_calc_hash:
2137 * We look up the chain_key and do the O(N^2) check and update of 2167 * We look up the chain_key and do the O(N^2) check and update of
2138 * the dependencies only if this is a new dependency chain. 2168 * the dependencies only if this is a new dependency chain.
2139 * (If lookup_chain_cache() returns with 1 it acquires 2169 * (If lookup_chain_cache() returns with 1 it acquires
2140 * hash_lock for us) 2170 * graph_lock for us)
2141 */ 2171 */
2142 if (!trylock && (check == 2) && lookup_chain_cache(chain_key)) { 2172 if (!trylock && (check == 2) && lookup_chain_cache(chain_key, class)) {
2143 /* 2173 /*
2144 * Check whether last held lock: 2174 * Check whether last held lock:
2145 * 2175 *
@@ -2170,7 +2200,7 @@ out_calc_hash:
2170 if (!chain_head && ret != 2) 2200 if (!chain_head && ret != 2)
2171 if (!check_prevs_add(curr, hlock)) 2201 if (!check_prevs_add(curr, hlock))
2172 return 0; 2202 return 0;
2173 __raw_spin_unlock(&hash_lock); 2203 graph_unlock();
2174 } 2204 }
2175 curr->lockdep_depth++; 2205 curr->lockdep_depth++;
2176 check_chain_key(curr); 2206 check_chain_key(curr);
@@ -2433,6 +2463,7 @@ EXPORT_SYMBOL_GPL(lock_release);
2433void lockdep_reset(void) 2463void lockdep_reset(void)
2434{ 2464{
2435 unsigned long flags; 2465 unsigned long flags;
2466 int i;
2436 2467
2437 raw_local_irq_save(flags); 2468 raw_local_irq_save(flags);
2438 current->curr_chain_key = 0; 2469 current->curr_chain_key = 0;
@@ -2443,6 +2474,8 @@ void lockdep_reset(void)
2443 nr_softirq_chains = 0; 2474 nr_softirq_chains = 0;
2444 nr_process_chains = 0; 2475 nr_process_chains = 0;
2445 debug_locks = 1; 2476 debug_locks = 1;
2477 for (i = 0; i < CHAINHASH_SIZE; i++)
2478 INIT_LIST_HEAD(chainhash_table + i);
2446 raw_local_irq_restore(flags); 2479 raw_local_irq_restore(flags);
2447} 2480}
2448 2481
@@ -2479,7 +2512,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
2479 int i; 2512 int i;
2480 2513
2481 raw_local_irq_save(flags); 2514 raw_local_irq_save(flags);
2482 __raw_spin_lock(&hash_lock); 2515 graph_lock();
2483 2516
2484 /* 2517 /*
2485 * Unhash all classes that were created by this module: 2518 * Unhash all classes that were created by this module:
@@ -2493,7 +2526,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
2493 zap_class(class); 2526 zap_class(class);
2494 } 2527 }
2495 2528
2496 __raw_spin_unlock(&hash_lock); 2529 graph_unlock();
2497 raw_local_irq_restore(flags); 2530 raw_local_irq_restore(flags);
2498} 2531}
2499 2532
@@ -2521,20 +2554,20 @@ void lockdep_reset_lock(struct lockdep_map *lock)
2521 * Debug check: in the end all mapped classes should 2554 * Debug check: in the end all mapped classes should
2522 * be gone. 2555 * be gone.
2523 */ 2556 */
2524 __raw_spin_lock(&hash_lock); 2557 graph_lock();
2525 for (i = 0; i < CLASSHASH_SIZE; i++) { 2558 for (i = 0; i < CLASSHASH_SIZE; i++) {
2526 head = classhash_table + i; 2559 head = classhash_table + i;
2527 if (list_empty(head)) 2560 if (list_empty(head))
2528 continue; 2561 continue;
2529 list_for_each_entry_safe(class, next, head, hash_entry) { 2562 list_for_each_entry_safe(class, next, head, hash_entry) {
2530 if (unlikely(class == lock->class_cache)) { 2563 if (unlikely(class == lock->class_cache)) {
2531 __raw_spin_unlock(&hash_lock); 2564 if (debug_locks_off_graph_unlock())
2532 DEBUG_LOCKS_WARN_ON(1); 2565 WARN_ON(1);
2533 goto out_restore; 2566 goto out_restore;
2534 } 2567 }
2535 } 2568 }
2536 } 2569 }
2537 __raw_spin_unlock(&hash_lock); 2570 graph_unlock();
2538 2571
2539out_restore: 2572out_restore:
2540 raw_local_irq_restore(flags); 2573 raw_local_irq_restore(flags);
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index e2ce748e96af..f5b9ee6f6bbb 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -46,10 +46,8 @@ static inline struct nsproxy *clone_namespaces(struct nsproxy *orig)
46 struct nsproxy *ns; 46 struct nsproxy *ns;
47 47
48 ns = kmemdup(orig, sizeof(struct nsproxy), GFP_KERNEL); 48 ns = kmemdup(orig, sizeof(struct nsproxy), GFP_KERNEL);
49 if (ns) { 49 if (ns)
50 atomic_set(&ns->count, 1); 50 atomic_set(&ns->count, 1);
51 ns->id = -1;
52 }
53 return ns; 51 return ns;
54} 52}
55 53
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 99eeb119b06d..6d566bf7085c 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -28,8 +28,7 @@ static inline int freezeable(struct task_struct * p)
28 if ((p == current) || 28 if ((p == current) ||
29 (p->flags & PF_NOFREEZE) || 29 (p->flags & PF_NOFREEZE) ||
30 (p->exit_state == EXIT_ZOMBIE) || 30 (p->exit_state == EXIT_ZOMBIE) ||
31 (p->exit_state == EXIT_DEAD) || 31 (p->exit_state == EXIT_DEAD))
32 (p->state == TASK_STOPPED))
33 return 0; 32 return 0;
34 return 1; 33 return 1;
35} 34}
@@ -61,10 +60,16 @@ static inline void freeze_process(struct task_struct *p)
61 unsigned long flags; 60 unsigned long flags;
62 61
63 if (!freezing(p)) { 62 if (!freezing(p)) {
64 freeze(p); 63 rmb();
65 spin_lock_irqsave(&p->sighand->siglock, flags); 64 if (!frozen(p)) {
66 signal_wake_up(p, 0); 65 if (p->state == TASK_STOPPED)
67 spin_unlock_irqrestore(&p->sighand->siglock, flags); 66 force_sig_specific(SIGSTOP, p);
67
68 freeze(p);
69 spin_lock_irqsave(&p->sighand->siglock, flags);
70 signal_wake_up(p, p->state == TASK_STOPPED);
71 spin_unlock_irqrestore(&p->sighand->siglock, flags);
72 }
68 } 73 }
69} 74}
70 75
@@ -103,9 +108,7 @@ static unsigned int try_to_freeze_tasks(int freeze_user_space)
103 if (frozen(p)) 108 if (frozen(p))
104 continue; 109 continue;
105 110
106 if (p->state == TASK_TRACED && 111 if (p->state == TASK_TRACED && frozen(p->parent)) {
107 (frozen(p->parent) ||
108 p->parent->state == TASK_STOPPED)) {
109 cancel_freezing(p); 112 cancel_freezing(p);
110 continue; 113 continue;
111 } 114 }
diff --git a/kernel/relay.c b/kernel/relay.c
index 818e514729cf..a4701e7ba7d0 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -138,7 +138,7 @@ depopulate:
138 */ 138 */
139struct rchan_buf *relay_create_buf(struct rchan *chan) 139struct rchan_buf *relay_create_buf(struct rchan *chan)
140{ 140{
141 struct rchan_buf *buf = kcalloc(1, sizeof(struct rchan_buf), GFP_KERNEL); 141 struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
142 if (!buf) 142 if (!buf)
143 return NULL; 143 return NULL;
144 144
@@ -479,7 +479,7 @@ struct rchan *relay_open(const char *base_filename,
479 if (!(subbuf_size && n_subbufs)) 479 if (!(subbuf_size && n_subbufs))
480 return NULL; 480 return NULL;
481 481
482 chan = kcalloc(1, sizeof(struct rchan), GFP_KERNEL); 482 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
483 if (!chan) 483 if (!chan)
484 return NULL; 484 return NULL;
485 485
diff --git a/kernel/sched.c b/kernel/sched.c
index f385eff4682d..5cd833bc2173 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -225,8 +225,10 @@ struct rq {
225 unsigned long nr_uninterruptible; 225 unsigned long nr_uninterruptible;
226 226
227 unsigned long expired_timestamp; 227 unsigned long expired_timestamp;
228 unsigned long long timestamp_last_tick; 228 /* Cached timestamp set by update_cpu_clock() */
229 unsigned long long most_recent_timestamp;
229 struct task_struct *curr, *idle; 230 struct task_struct *curr, *idle;
231 unsigned long next_balance;
230 struct mm_struct *prev_mm; 232 struct mm_struct *prev_mm;
231 struct prio_array *active, *expired, arrays[2]; 233 struct prio_array *active, *expired, arrays[2];
232 int best_expired_prio; 234 int best_expired_prio;
@@ -426,7 +428,7 @@ static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
426 * bump this up when changing the output format or the meaning of an existing 428 * bump this up when changing the output format or the meaning of an existing
427 * format, so that tools can adapt (or abort) 429 * format, so that tools can adapt (or abort)
428 */ 430 */
429#define SCHEDSTAT_VERSION 12 431#define SCHEDSTAT_VERSION 14
430 432
431static int show_schedstat(struct seq_file *seq, void *v) 433static int show_schedstat(struct seq_file *seq, void *v)
432{ 434{
@@ -464,7 +466,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
464 seq_printf(seq, "domain%d %s", dcnt++, mask_str); 466 seq_printf(seq, "domain%d %s", dcnt++, mask_str);
465 for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES; 467 for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES;
466 itype++) { 468 itype++) {
467 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu", 469 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
470 "%lu",
468 sd->lb_cnt[itype], 471 sd->lb_cnt[itype],
469 sd->lb_balanced[itype], 472 sd->lb_balanced[itype],
470 sd->lb_failed[itype], 473 sd->lb_failed[itype],
@@ -474,11 +477,13 @@ static int show_schedstat(struct seq_file *seq, void *v)
474 sd->lb_nobusyq[itype], 477 sd->lb_nobusyq[itype],
475 sd->lb_nobusyg[itype]); 478 sd->lb_nobusyg[itype]);
476 } 479 }
477 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n", 480 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
481 " %lu %lu %lu\n",
478 sd->alb_cnt, sd->alb_failed, sd->alb_pushed, 482 sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
479 sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed, 483 sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
480 sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed, 484 sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
481 sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance); 485 sd->ttwu_wake_remote, sd->ttwu_move_affine,
486 sd->ttwu_move_balance);
482 } 487 }
483 preempt_enable(); 488 preempt_enable();
484#endif 489#endif
@@ -547,7 +552,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
547#endif 552#endif
548 553
549/* 554/*
550 * rq_lock - lock a given runqueue and disable interrupts. 555 * this_rq_lock - lock this runqueue and disable interrupts.
551 */ 556 */
552static inline struct rq *this_rq_lock(void) 557static inline struct rq *this_rq_lock(void)
553 __acquires(rq->lock) 558 __acquires(rq->lock)
@@ -938,13 +943,16 @@ static void activate_task(struct task_struct *p, struct rq *rq, int local)
938{ 943{
939 unsigned long long now; 944 unsigned long long now;
940 945
946 if (rt_task(p))
947 goto out;
948
941 now = sched_clock(); 949 now = sched_clock();
942#ifdef CONFIG_SMP 950#ifdef CONFIG_SMP
943 if (!local) { 951 if (!local) {
944 /* Compensate for drifting sched_clock */ 952 /* Compensate for drifting sched_clock */
945 struct rq *this_rq = this_rq(); 953 struct rq *this_rq = this_rq();
946 now = (now - this_rq->timestamp_last_tick) 954 now = (now - this_rq->most_recent_timestamp)
947 + rq->timestamp_last_tick; 955 + rq->most_recent_timestamp;
948 } 956 }
949#endif 957#endif
950 958
@@ -959,8 +967,7 @@ static void activate_task(struct task_struct *p, struct rq *rq, int local)
959 (now - p->timestamp) >> 20); 967 (now - p->timestamp) >> 20);
960 } 968 }
961 969
962 if (!rt_task(p)) 970 p->prio = recalc_task_prio(p, now);
963 p->prio = recalc_task_prio(p, now);
964 971
965 /* 972 /*
966 * This checks to make sure it's not an uninterruptible task 973 * This checks to make sure it's not an uninterruptible task
@@ -985,7 +992,7 @@ static void activate_task(struct task_struct *p, struct rq *rq, int local)
985 } 992 }
986 } 993 }
987 p->timestamp = now; 994 p->timestamp = now;
988 995out:
989 __activate_task(p, rq); 996 __activate_task(p, rq);
990} 997}
991 998
@@ -1450,7 +1457,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1450 1457
1451 if (this_sd->flags & SD_WAKE_AFFINE) { 1458 if (this_sd->flags & SD_WAKE_AFFINE) {
1452 unsigned long tl = this_load; 1459 unsigned long tl = this_load;
1453 unsigned long tl_per_task = cpu_avg_load_per_task(this_cpu); 1460 unsigned long tl_per_task;
1461
1462 tl_per_task = cpu_avg_load_per_task(this_cpu);
1454 1463
1455 /* 1464 /*
1456 * If sync wakeup then subtract the (maximum possible) 1465 * If sync wakeup then subtract the (maximum possible)
@@ -1688,8 +1697,8 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1688 * Not the local CPU - must adjust timestamp. This should 1697 * Not the local CPU - must adjust timestamp. This should
1689 * get optimised away in the !CONFIG_SMP case. 1698 * get optimised away in the !CONFIG_SMP case.
1690 */ 1699 */
1691 p->timestamp = (p->timestamp - this_rq->timestamp_last_tick) 1700 p->timestamp = (p->timestamp - this_rq->most_recent_timestamp)
1692 + rq->timestamp_last_tick; 1701 + rq->most_recent_timestamp;
1693 __activate_task(p, rq); 1702 __activate_task(p, rq);
1694 if (TASK_PREEMPTS_CURR(p, rq)) 1703 if (TASK_PREEMPTS_CURR(p, rq))
1695 resched_task(rq->curr); 1704 resched_task(rq->curr);
@@ -1952,6 +1961,7 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1952 __acquires(rq1->lock) 1961 __acquires(rq1->lock)
1953 __acquires(rq2->lock) 1962 __acquires(rq2->lock)
1954{ 1963{
1964 BUG_ON(!irqs_disabled());
1955 if (rq1 == rq2) { 1965 if (rq1 == rq2) {
1956 spin_lock(&rq1->lock); 1966 spin_lock(&rq1->lock);
1957 __acquire(rq2->lock); /* Fake it out ;) */ 1967 __acquire(rq2->lock); /* Fake it out ;) */
@@ -1991,6 +2001,11 @@ static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
1991 __acquires(busiest->lock) 2001 __acquires(busiest->lock)
1992 __acquires(this_rq->lock) 2002 __acquires(this_rq->lock)
1993{ 2003{
2004 if (unlikely(!irqs_disabled())) {
2005 /* printk() doesn't work good under rq->lock */
2006 spin_unlock(&this_rq->lock);
2007 BUG_ON(1);
2008 }
1994 if (unlikely(!spin_trylock(&busiest->lock))) { 2009 if (unlikely(!spin_trylock(&busiest->lock))) {
1995 if (busiest < this_rq) { 2010 if (busiest < this_rq) {
1996 spin_unlock(&this_rq->lock); 2011 spin_unlock(&this_rq->lock);
@@ -2061,8 +2076,8 @@ static void pull_task(struct rq *src_rq, struct prio_array *src_array,
2061 set_task_cpu(p, this_cpu); 2076 set_task_cpu(p, this_cpu);
2062 inc_nr_running(p, this_rq); 2077 inc_nr_running(p, this_rq);
2063 enqueue_task(p, this_array); 2078 enqueue_task(p, this_array);
2064 p->timestamp = (p->timestamp - src_rq->timestamp_last_tick) 2079 p->timestamp = (p->timestamp - src_rq->most_recent_timestamp)
2065 + this_rq->timestamp_last_tick; 2080 + this_rq->most_recent_timestamp;
2066 /* 2081 /*
2067 * Note that idle threads have a prio of MAX_PRIO, for this test 2082 * Note that idle threads have a prio of MAX_PRIO, for this test
2068 * to be always true for them. 2083 * to be always true for them.
@@ -2098,10 +2113,15 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2098 * 2) too many balance attempts have failed. 2113 * 2) too many balance attempts have failed.
2099 */ 2114 */
2100 2115
2101 if (sd->nr_balance_failed > sd->cache_nice_tries) 2116 if (sd->nr_balance_failed > sd->cache_nice_tries) {
2117#ifdef CONFIG_SCHEDSTATS
2118 if (task_hot(p, rq->most_recent_timestamp, sd))
2119 schedstat_inc(sd, lb_hot_gained[idle]);
2120#endif
2102 return 1; 2121 return 1;
2122 }
2103 2123
2104 if (task_hot(p, rq->timestamp_last_tick, sd)) 2124 if (task_hot(p, rq->most_recent_timestamp, sd))
2105 return 0; 2125 return 0;
2106 return 1; 2126 return 1;
2107} 2127}
@@ -2199,11 +2219,6 @@ skip_queue:
2199 goto skip_bitmap; 2219 goto skip_bitmap;
2200 } 2220 }
2201 2221
2202#ifdef CONFIG_SCHEDSTATS
2203 if (task_hot(tmp, busiest->timestamp_last_tick, sd))
2204 schedstat_inc(sd, lb_hot_gained[idle]);
2205#endif
2206
2207 pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu); 2222 pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
2208 pulled++; 2223 pulled++;
2209 rem_load_move -= tmp->load_weight; 2224 rem_load_move -= tmp->load_weight;
@@ -2241,7 +2256,7 @@ out:
2241static struct sched_group * 2256static struct sched_group *
2242find_busiest_group(struct sched_domain *sd, int this_cpu, 2257find_busiest_group(struct sched_domain *sd, int this_cpu,
2243 unsigned long *imbalance, enum idle_type idle, int *sd_idle, 2258 unsigned long *imbalance, enum idle_type idle, int *sd_idle,
2244 cpumask_t *cpus) 2259 cpumask_t *cpus, int *balance)
2245{ 2260{
2246 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; 2261 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
2247 unsigned long max_load, avg_load, total_load, this_load, total_pwr; 2262 unsigned long max_load, avg_load, total_load, this_load, total_pwr;
@@ -2270,10 +2285,14 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2270 unsigned long load, group_capacity; 2285 unsigned long load, group_capacity;
2271 int local_group; 2286 int local_group;
2272 int i; 2287 int i;
2288 unsigned int balance_cpu = -1, first_idle_cpu = 0;
2273 unsigned long sum_nr_running, sum_weighted_load; 2289 unsigned long sum_nr_running, sum_weighted_load;
2274 2290
2275 local_group = cpu_isset(this_cpu, group->cpumask); 2291 local_group = cpu_isset(this_cpu, group->cpumask);
2276 2292
2293 if (local_group)
2294 balance_cpu = first_cpu(group->cpumask);
2295
2277 /* Tally up the load of all CPUs in the group */ 2296 /* Tally up the load of all CPUs in the group */
2278 sum_weighted_load = sum_nr_running = avg_load = 0; 2297 sum_weighted_load = sum_nr_running = avg_load = 0;
2279 2298
@@ -2289,9 +2308,14 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2289 *sd_idle = 0; 2308 *sd_idle = 0;
2290 2309
2291 /* Bias balancing toward cpus of our domain */ 2310 /* Bias balancing toward cpus of our domain */
2292 if (local_group) 2311 if (local_group) {
2312 if (idle_cpu(i) && !first_idle_cpu) {
2313 first_idle_cpu = 1;
2314 balance_cpu = i;
2315 }
2316
2293 load = target_load(i, load_idx); 2317 load = target_load(i, load_idx);
2294 else 2318 } else
2295 load = source_load(i, load_idx); 2319 load = source_load(i, load_idx);
2296 2320
2297 avg_load += load; 2321 avg_load += load;
@@ -2299,6 +2323,16 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2299 sum_weighted_load += rq->raw_weighted_load; 2323 sum_weighted_load += rq->raw_weighted_load;
2300 } 2324 }
2301 2325
2326 /*
2327 * First idle cpu or the first cpu(busiest) in this sched group
2328 * is eligible for doing load balancing at this and above
2329 * domains.
2330 */
2331 if (local_group && balance_cpu != this_cpu && balance) {
2332 *balance = 0;
2333 goto ret;
2334 }
2335
2302 total_load += avg_load; 2336 total_load += avg_load;
2303 total_pwr += group->cpu_power; 2337 total_pwr += group->cpu_power;
2304 2338
@@ -2458,18 +2492,21 @@ small_imbalance:
2458 pwr_now /= SCHED_LOAD_SCALE; 2492 pwr_now /= SCHED_LOAD_SCALE;
2459 2493
2460 /* Amount of load we'd subtract */ 2494 /* Amount of load we'd subtract */
2461 tmp = busiest_load_per_task*SCHED_LOAD_SCALE/busiest->cpu_power; 2495 tmp = busiest_load_per_task * SCHED_LOAD_SCALE /
2496 busiest->cpu_power;
2462 if (max_load > tmp) 2497 if (max_load > tmp)
2463 pwr_move += busiest->cpu_power * 2498 pwr_move += busiest->cpu_power *
2464 min(busiest_load_per_task, max_load - tmp); 2499 min(busiest_load_per_task, max_load - tmp);
2465 2500
2466 /* Amount of load we'd add */ 2501 /* Amount of load we'd add */
2467 if (max_load*busiest->cpu_power < 2502 if (max_load * busiest->cpu_power <
2468 busiest_load_per_task*SCHED_LOAD_SCALE) 2503 busiest_load_per_task * SCHED_LOAD_SCALE)
2469 tmp = max_load*busiest->cpu_power/this->cpu_power; 2504 tmp = max_load * busiest->cpu_power / this->cpu_power;
2470 else 2505 else
2471 tmp = busiest_load_per_task*SCHED_LOAD_SCALE/this->cpu_power; 2506 tmp = busiest_load_per_task * SCHED_LOAD_SCALE /
2472 pwr_move += this->cpu_power*min(this_load_per_task, this_load + tmp); 2507 this->cpu_power;
2508 pwr_move += this->cpu_power *
2509 min(this_load_per_task, this_load + tmp);
2473 pwr_move /= SCHED_LOAD_SCALE; 2510 pwr_move /= SCHED_LOAD_SCALE;
2474 2511
2475 /* Move if we gain throughput */ 2512 /* Move if we gain throughput */
@@ -2490,8 +2527,8 @@ out_balanced:
2490 *imbalance = min_load_per_task; 2527 *imbalance = min_load_per_task;
2491 return group_min; 2528 return group_min;
2492 } 2529 }
2493ret:
2494#endif 2530#endif
2531ret:
2495 *imbalance = 0; 2532 *imbalance = 0;
2496 return NULL; 2533 return NULL;
2497} 2534}
@@ -2540,17 +2577,17 @@ static inline unsigned long minus_1_or_zero(unsigned long n)
2540/* 2577/*
2541 * Check this_cpu to ensure it is balanced within domain. Attempt to move 2578 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2542 * tasks if there is an imbalance. 2579 * tasks if there is an imbalance.
2543 *
2544 * Called with this_rq unlocked.
2545 */ 2580 */
2546static int load_balance(int this_cpu, struct rq *this_rq, 2581static int load_balance(int this_cpu, struct rq *this_rq,
2547 struct sched_domain *sd, enum idle_type idle) 2582 struct sched_domain *sd, enum idle_type idle,
2583 int *balance)
2548{ 2584{
2549 int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; 2585 int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
2550 struct sched_group *group; 2586 struct sched_group *group;
2551 unsigned long imbalance; 2587 unsigned long imbalance;
2552 struct rq *busiest; 2588 struct rq *busiest;
2553 cpumask_t cpus = CPU_MASK_ALL; 2589 cpumask_t cpus = CPU_MASK_ALL;
2590 unsigned long flags;
2554 2591
2555 /* 2592 /*
2556 * When power savings policy is enabled for the parent domain, idle 2593 * When power savings policy is enabled for the parent domain, idle
@@ -2566,7 +2603,11 @@ static int load_balance(int this_cpu, struct rq *this_rq,
2566 2603
2567redo: 2604redo:
2568 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle, 2605 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
2569 &cpus); 2606 &cpus, balance);
2607
2608 if (*balance == 0)
2609 goto out_balanced;
2610
2570 if (!group) { 2611 if (!group) {
2571 schedstat_inc(sd, lb_nobusyg[idle]); 2612 schedstat_inc(sd, lb_nobusyg[idle]);
2572 goto out_balanced; 2613 goto out_balanced;
@@ -2590,11 +2631,13 @@ redo:
2590 * still unbalanced. nr_moved simply stays zero, so it is 2631 * still unbalanced. nr_moved simply stays zero, so it is
2591 * correctly treated as an imbalance. 2632 * correctly treated as an imbalance.
2592 */ 2633 */
2634 local_irq_save(flags);
2593 double_rq_lock(this_rq, busiest); 2635 double_rq_lock(this_rq, busiest);
2594 nr_moved = move_tasks(this_rq, this_cpu, busiest, 2636 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2595 minus_1_or_zero(busiest->nr_running), 2637 minus_1_or_zero(busiest->nr_running),
2596 imbalance, sd, idle, &all_pinned); 2638 imbalance, sd, idle, &all_pinned);
2597 double_rq_unlock(this_rq, busiest); 2639 double_rq_unlock(this_rq, busiest);
2640 local_irq_restore(flags);
2598 2641
2599 /* All tasks on this runqueue were pinned by CPU affinity */ 2642 /* All tasks on this runqueue were pinned by CPU affinity */
2600 if (unlikely(all_pinned)) { 2643 if (unlikely(all_pinned)) {
@@ -2611,13 +2654,13 @@ redo:
2611 2654
2612 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { 2655 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
2613 2656
2614 spin_lock(&busiest->lock); 2657 spin_lock_irqsave(&busiest->lock, flags);
2615 2658
2616 /* don't kick the migration_thread, if the curr 2659 /* don't kick the migration_thread, if the curr
2617 * task on busiest cpu can't be moved to this_cpu 2660 * task on busiest cpu can't be moved to this_cpu
2618 */ 2661 */
2619 if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { 2662 if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
2620 spin_unlock(&busiest->lock); 2663 spin_unlock_irqrestore(&busiest->lock, flags);
2621 all_pinned = 1; 2664 all_pinned = 1;
2622 goto out_one_pinned; 2665 goto out_one_pinned;
2623 } 2666 }
@@ -2627,7 +2670,7 @@ redo:
2627 busiest->push_cpu = this_cpu; 2670 busiest->push_cpu = this_cpu;
2628 active_balance = 1; 2671 active_balance = 1;
2629 } 2672 }
2630 spin_unlock(&busiest->lock); 2673 spin_unlock_irqrestore(&busiest->lock, flags);
2631 if (active_balance) 2674 if (active_balance)
2632 wake_up_process(busiest->migration_thread); 2675 wake_up_process(busiest->migration_thread);
2633 2676
@@ -2706,7 +2749,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
2706 schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); 2749 schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
2707redo: 2750redo:
2708 group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE, 2751 group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE,
2709 &sd_idle, &cpus); 2752 &sd_idle, &cpus, NULL);
2710 if (!group) { 2753 if (!group) {
2711 schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]); 2754 schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
2712 goto out_balanced; 2755 goto out_balanced;
@@ -2766,14 +2809,28 @@ out_balanced:
2766static void idle_balance(int this_cpu, struct rq *this_rq) 2809static void idle_balance(int this_cpu, struct rq *this_rq)
2767{ 2810{
2768 struct sched_domain *sd; 2811 struct sched_domain *sd;
2812 int pulled_task = 0;
2813 unsigned long next_balance = jiffies + 60 * HZ;
2769 2814
2770 for_each_domain(this_cpu, sd) { 2815 for_each_domain(this_cpu, sd) {
2771 if (sd->flags & SD_BALANCE_NEWIDLE) { 2816 if (sd->flags & SD_BALANCE_NEWIDLE) {
2772 /* If we've pulled tasks over stop searching: */ 2817 /* If we've pulled tasks over stop searching: */
2773 if (load_balance_newidle(this_cpu, this_rq, sd)) 2818 pulled_task = load_balance_newidle(this_cpu,
2819 this_rq, sd);
2820 if (time_after(next_balance,
2821 sd->last_balance + sd->balance_interval))
2822 next_balance = sd->last_balance
2823 + sd->balance_interval;
2824 if (pulled_task)
2774 break; 2825 break;
2775 } 2826 }
2776 } 2827 }
2828 if (!pulled_task)
2829 /*
2830 * We are going idle. next_balance may be set based on
2831 * a busy processor. So reset next_balance.
2832 */
2833 this_rq->next_balance = next_balance;
2777} 2834}
2778 2835
2779/* 2836/*
@@ -2826,26 +2883,9 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
2826 spin_unlock(&target_rq->lock); 2883 spin_unlock(&target_rq->lock);
2827} 2884}
2828 2885
2829/* 2886static void update_load(struct rq *this_rq)
2830 * rebalance_tick will get called every timer tick, on every CPU.
2831 *
2832 * It checks each scheduling domain to see if it is due to be balanced,
2833 * and initiates a balancing operation if so.
2834 *
2835 * Balancing parameters are set up in arch_init_sched_domains.
2836 */
2837
2838/* Don't have all balancing operations going off at once: */
2839static inline unsigned long cpu_offset(int cpu)
2840{ 2887{
2841 return jiffies + cpu * HZ / NR_CPUS; 2888 unsigned long this_load;
2842}
2843
2844static void
2845rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle)
2846{
2847 unsigned long this_load, interval, j = cpu_offset(this_cpu);
2848 struct sched_domain *sd;
2849 int i, scale; 2889 int i, scale;
2850 2890
2851 this_load = this_rq->raw_weighted_load; 2891 this_load = this_rq->raw_weighted_load;
@@ -2865,6 +2905,32 @@ rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle)
2865 new_load += scale-1; 2905 new_load += scale-1;
2866 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale; 2906 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale;
2867 } 2907 }
2908}
2909
2910/*
2911 * run_rebalance_domains is triggered when needed from the scheduler tick.
2912 *
2913 * It checks each scheduling domain to see if it is due to be balanced,
2914 * and initiates a balancing operation if so.
2915 *
2916 * Balancing parameters are set up in arch_init_sched_domains.
2917 */
2918static DEFINE_SPINLOCK(balancing);
2919
2920static void run_rebalance_domains(struct softirq_action *h)
2921{
2922 int this_cpu = smp_processor_id(), balance = 1;
2923 struct rq *this_rq = cpu_rq(this_cpu);
2924 unsigned long interval;
2925 struct sched_domain *sd;
2926 /*
2927 * We are idle if there are no processes running. This
2928 * is valid even if we are the idle process (SMT).
2929 */
2930 enum idle_type idle = !this_rq->nr_running ?
2931 SCHED_IDLE : NOT_IDLE;
2932 /* Earliest time when we have to call run_rebalance_domains again */
2933 unsigned long next_balance = jiffies + 60*HZ;
2868 2934
2869 for_each_domain(this_cpu, sd) { 2935 for_each_domain(this_cpu, sd) {
2870 if (!(sd->flags & SD_LOAD_BALANCE)) 2936 if (!(sd->flags & SD_LOAD_BALANCE))
@@ -2879,8 +2945,13 @@ rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle)
2879 if (unlikely(!interval)) 2945 if (unlikely(!interval))
2880 interval = 1; 2946 interval = 1;
2881 2947
2882 if (j - sd->last_balance >= interval) { 2948 if (sd->flags & SD_SERIALIZE) {
2883 if (load_balance(this_cpu, this_rq, sd, idle)) { 2949 if (!spin_trylock(&balancing))
2950 goto out;
2951 }
2952
2953 if (time_after_eq(jiffies, sd->last_balance + interval)) {
2954 if (load_balance(this_cpu, this_rq, sd, idle, &balance)) {
2884 /* 2955 /*
2885 * We've pulled tasks over so either we're no 2956 * We've pulled tasks over so either we're no
2886 * longer idle, or one of our SMT siblings is 2957 * longer idle, or one of our SMT siblings is
@@ -2888,39 +2959,48 @@ rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle)
2888 */ 2959 */
2889 idle = NOT_IDLE; 2960 idle = NOT_IDLE;
2890 } 2961 }
2891 sd->last_balance += interval; 2962 sd->last_balance = jiffies;
2892 } 2963 }
2964 if (sd->flags & SD_SERIALIZE)
2965 spin_unlock(&balancing);
2966out:
2967 if (time_after(next_balance, sd->last_balance + interval))
2968 next_balance = sd->last_balance + interval;
2969
2970 /*
2971 * Stop the load balance at this level. There is another
2972 * CPU in our sched group which is doing load balancing more
2973 * actively.
2974 */
2975 if (!balance)
2976 break;
2893 } 2977 }
2978 this_rq->next_balance = next_balance;
2894} 2979}
2895#else 2980#else
2896/* 2981/*
2897 * on UP we do not need to balance between CPUs: 2982 * on UP we do not need to balance between CPUs:
2898 */ 2983 */
2899static inline void rebalance_tick(int cpu, struct rq *rq, enum idle_type idle)
2900{
2901}
2902static inline void idle_balance(int cpu, struct rq *rq) 2984static inline void idle_balance(int cpu, struct rq *rq)
2903{ 2985{
2904} 2986}
2905#endif 2987#endif
2906 2988
2907static inline int wake_priority_sleeper(struct rq *rq) 2989static inline void wake_priority_sleeper(struct rq *rq)
2908{ 2990{
2909 int ret = 0;
2910
2911#ifdef CONFIG_SCHED_SMT 2991#ifdef CONFIG_SCHED_SMT
2992 if (!rq->nr_running)
2993 return;
2994
2912 spin_lock(&rq->lock); 2995 spin_lock(&rq->lock);
2913 /* 2996 /*
2914 * If an SMT sibling task has been put to sleep for priority 2997 * If an SMT sibling task has been put to sleep for priority
2915 * reasons reschedule the idle task to see if it can now run. 2998 * reasons reschedule the idle task to see if it can now run.
2916 */ 2999 */
2917 if (rq->nr_running) { 3000 if (rq->nr_running)
2918 resched_task(rq->idle); 3001 resched_task(rq->idle);
2919 ret = 1;
2920 }
2921 spin_unlock(&rq->lock); 3002 spin_unlock(&rq->lock);
2922#endif 3003#endif
2923 return ret;
2924} 3004}
2925 3005
2926DEFINE_PER_CPU(struct kernel_stat, kstat); 3006DEFINE_PER_CPU(struct kernel_stat, kstat);
@@ -2934,7 +3014,8 @@ EXPORT_PER_CPU_SYMBOL(kstat);
2934static inline void 3014static inline void
2935update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now) 3015update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now)
2936{ 3016{
2937 p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); 3017 p->sched_time += now - p->last_ran;
3018 p->last_ran = rq->most_recent_timestamp = now;
2938} 3019}
2939 3020
2940/* 3021/*
@@ -2947,8 +3028,7 @@ unsigned long long current_sched_time(const struct task_struct *p)
2947 unsigned long flags; 3028 unsigned long flags;
2948 3029
2949 local_irq_save(flags); 3030 local_irq_save(flags);
2950 ns = max(p->timestamp, task_rq(p)->timestamp_last_tick); 3031 ns = p->sched_time + sched_clock() - p->last_ran;
2951 ns = p->sched_time + sched_clock() - ns;
2952 local_irq_restore(flags); 3032 local_irq_restore(flags);
2953 3033
2954 return ns; 3034 return ns;
@@ -3048,35 +3128,12 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
3048 cpustat->steal = cputime64_add(cpustat->steal, tmp); 3128 cpustat->steal = cputime64_add(cpustat->steal, tmp);
3049} 3129}
3050 3130
3051/* 3131static void task_running_tick(struct rq *rq, struct task_struct *p)
3052 * This function gets called by the timer code, with HZ frequency.
3053 * We call it with interrupts disabled.
3054 *
3055 * It also gets called by the fork code, when changing the parent's
3056 * timeslices.
3057 */
3058void scheduler_tick(void)
3059{ 3132{
3060 unsigned long long now = sched_clock();
3061 struct task_struct *p = current;
3062 int cpu = smp_processor_id();
3063 struct rq *rq = cpu_rq(cpu);
3064
3065 update_cpu_clock(p, rq, now);
3066
3067 rq->timestamp_last_tick = now;
3068
3069 if (p == rq->idle) {
3070 if (wake_priority_sleeper(rq))
3071 goto out;
3072 rebalance_tick(cpu, rq, SCHED_IDLE);
3073 return;
3074 }
3075
3076 /* Task might have expired already, but not scheduled off yet */
3077 if (p->array != rq->active) { 3133 if (p->array != rq->active) {
3134 /* Task has expired but was not scheduled yet */
3078 set_tsk_need_resched(p); 3135 set_tsk_need_resched(p);
3079 goto out; 3136 return;
3080 } 3137 }
3081 spin_lock(&rq->lock); 3138 spin_lock(&rq->lock);
3082 /* 3139 /*
@@ -3144,8 +3201,34 @@ void scheduler_tick(void)
3144 } 3201 }
3145out_unlock: 3202out_unlock:
3146 spin_unlock(&rq->lock); 3203 spin_unlock(&rq->lock);
3147out: 3204}
3148 rebalance_tick(cpu, rq, NOT_IDLE); 3205
3206/*
3207 * This function gets called by the timer code, with HZ frequency.
3208 * We call it with interrupts disabled.
3209 *
3210 * It also gets called by the fork code, when changing the parent's
3211 * timeslices.
3212 */
3213void scheduler_tick(void)
3214{
3215 unsigned long long now = sched_clock();
3216 struct task_struct *p = current;
3217 int cpu = smp_processor_id();
3218 struct rq *rq = cpu_rq(cpu);
3219
3220 update_cpu_clock(p, rq, now);
3221
3222 if (p == rq->idle)
3223 /* Task on the idle queue */
3224 wake_priority_sleeper(rq);
3225 else
3226 task_running_tick(rq, p);
3227#ifdef CONFIG_SMP
3228 update_load(rq);
3229 if (time_after_eq(jiffies, rq->next_balance))
3230 raise_softirq(SCHED_SOFTIRQ);
3231#endif
3149} 3232}
3150 3233
3151#ifdef CONFIG_SCHED_SMT 3234#ifdef CONFIG_SCHED_SMT
@@ -3291,7 +3374,8 @@ void fastcall add_preempt_count(int val)
3291 /* 3374 /*
3292 * Spinlock count overflowing soon? 3375 * Spinlock count overflowing soon?
3293 */ 3376 */
3294 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10); 3377 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3378 PREEMPT_MASK - 10);
3295} 3379}
3296EXPORT_SYMBOL(add_preempt_count); 3380EXPORT_SYMBOL(add_preempt_count);
3297 3381
@@ -3345,6 +3429,8 @@ asmlinkage void __sched schedule(void)
3345 "%s/0x%08x/%d\n", 3429 "%s/0x%08x/%d\n",
3346 current->comm, preempt_count(), current->pid); 3430 current->comm, preempt_count(), current->pid);
3347 debug_show_held_locks(current); 3431 debug_show_held_locks(current);
3432 if (irqs_disabled())
3433 print_irqtrace_events(current);
3348 dump_stack(); 3434 dump_stack();
3349 } 3435 }
3350 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 3436 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
@@ -4990,8 +5076,8 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4990 * afterwards, and pretending it was a local activate. 5076 * afterwards, and pretending it was a local activate.
4991 * This way is cleaner and logically correct. 5077 * This way is cleaner and logically correct.
4992 */ 5078 */
4993 p->timestamp = p->timestamp - rq_src->timestamp_last_tick 5079 p->timestamp = p->timestamp - rq_src->most_recent_timestamp
4994 + rq_dest->timestamp_last_tick; 5080 + rq_dest->most_recent_timestamp;
4995 deactivate_task(p, rq_src); 5081 deactivate_task(p, rq_src);
4996 __activate_task(p, rq_dest); 5082 __activate_task(p, rq_dest);
4997 if (TASK_PREEMPTS_CURR(p, rq_dest)) 5083 if (TASK_PREEMPTS_CURR(p, rq_dest))
@@ -5067,7 +5153,10 @@ wait_to_die:
5067} 5153}
5068 5154
5069#ifdef CONFIG_HOTPLUG_CPU 5155#ifdef CONFIG_HOTPLUG_CPU
5070/* Figure out where task on dead CPU should go, use force if neccessary. */ 5156/*
5157 * Figure out where task on dead CPU should go, use force if neccessary.
5158 * NOTE: interrupts should be disabled by the caller
5159 */
5071static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 5160static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
5072{ 5161{
5073 unsigned long flags; 5162 unsigned long flags;
@@ -5187,6 +5276,7 @@ void idle_task_exit(void)
5187 mmdrop(mm); 5276 mmdrop(mm);
5188} 5277}
5189 5278
5279/* called under rq->lock with disabled interrupts */
5190static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) 5280static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
5191{ 5281{
5192 struct rq *rq = cpu_rq(dead_cpu); 5282 struct rq *rq = cpu_rq(dead_cpu);
@@ -5203,10 +5293,11 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
5203 * Drop lock around migration; if someone else moves it, 5293 * Drop lock around migration; if someone else moves it,
5204 * that's OK. No task can be added to this CPU, so iteration is 5294 * that's OK. No task can be added to this CPU, so iteration is
5205 * fine. 5295 * fine.
5296 * NOTE: interrupts should be left disabled --dev@
5206 */ 5297 */
5207 spin_unlock_irq(&rq->lock); 5298 spin_unlock(&rq->lock);
5208 move_task_off_dead_cpu(dead_cpu, p); 5299 move_task_off_dead_cpu(dead_cpu, p);
5209 spin_lock_irq(&rq->lock); 5300 spin_lock(&rq->lock);
5210 5301
5211 put_task_struct(p); 5302 put_task_struct(p);
5212} 5303}
@@ -5359,16 +5450,19 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
5359 if (!(sd->flags & SD_LOAD_BALANCE)) { 5450 if (!(sd->flags & SD_LOAD_BALANCE)) {
5360 printk("does not load-balance\n"); 5451 printk("does not load-balance\n");
5361 if (sd->parent) 5452 if (sd->parent)
5362 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent"); 5453 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5454 " has parent");
5363 break; 5455 break;
5364 } 5456 }
5365 5457
5366 printk("span %s\n", str); 5458 printk("span %s\n", str);
5367 5459
5368 if (!cpu_isset(cpu, sd->span)) 5460 if (!cpu_isset(cpu, sd->span))
5369 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); 5461 printk(KERN_ERR "ERROR: domain->span does not contain "
5462 "CPU%d\n", cpu);
5370 if (!cpu_isset(cpu, group->cpumask)) 5463 if (!cpu_isset(cpu, group->cpumask))
5371 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); 5464 printk(KERN_ERR "ERROR: domain->groups does not contain"
5465 " CPU%d\n", cpu);
5372 5466
5373 printk(KERN_DEBUG); 5467 printk(KERN_DEBUG);
5374 for (i = 0; i < level + 2; i++) 5468 for (i = 0; i < level + 2; i++)
@@ -5383,7 +5477,8 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
5383 5477
5384 if (!group->cpu_power) { 5478 if (!group->cpu_power) {
5385 printk("\n"); 5479 printk("\n");
5386 printk(KERN_ERR "ERROR: domain->cpu_power not set\n"); 5480 printk(KERN_ERR "ERROR: domain->cpu_power not "
5481 "set\n");
5387 } 5482 }
5388 5483
5389 if (!cpus_weight(group->cpumask)) { 5484 if (!cpus_weight(group->cpumask)) {
@@ -5406,15 +5501,17 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
5406 printk("\n"); 5501 printk("\n");
5407 5502
5408 if (!cpus_equal(sd->span, groupmask)) 5503 if (!cpus_equal(sd->span, groupmask))
5409 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 5504 printk(KERN_ERR "ERROR: groups don't span "
5505 "domain->span\n");
5410 5506
5411 level++; 5507 level++;
5412 sd = sd->parent; 5508 sd = sd->parent;
5509 if (!sd)
5510 continue;
5413 5511
5414 if (sd) { 5512 if (!cpus_subset(groupmask, sd->span))
5415 if (!cpus_subset(groupmask, sd->span)) 5513 printk(KERN_ERR "ERROR: parent span is not a superset "
5416 printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); 5514 "of domain->span\n");
5417 }
5418 5515
5419 } while (sd); 5516 } while (sd);
5420} 5517}
@@ -5528,28 +5625,27 @@ static int __init isolated_cpu_setup(char *str)
5528__setup ("isolcpus=", isolated_cpu_setup); 5625__setup ("isolcpus=", isolated_cpu_setup);
5529 5626
5530/* 5627/*
5531 * init_sched_build_groups takes an array of groups, the cpumask we wish 5628 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
5532 * to span, and a pointer to a function which identifies what group a CPU 5629 * to a function which identifies what group(along with sched group) a CPU
5533 * belongs to. The return value of group_fn must be a valid index into the 5630 * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS
5534 * groups[] array, and must be >= 0 and < NR_CPUS (due to the fact that we 5631 * (due to the fact that we keep track of groups covered with a cpumask_t).
5535 * keep track of groups covered with a cpumask_t).
5536 * 5632 *
5537 * init_sched_build_groups will build a circular linked list of the groups 5633 * init_sched_build_groups will build a circular linked list of the groups
5538 * covered by the given span, and will set each group's ->cpumask correctly, 5634 * covered by the given span, and will set each group's ->cpumask correctly,
5539 * and ->cpu_power to 0. 5635 * and ->cpu_power to 0.
5540 */ 5636 */
5541static void 5637static void
5542init_sched_build_groups(struct sched_group groups[], cpumask_t span, 5638init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
5543 const cpumask_t *cpu_map, 5639 int (*group_fn)(int cpu, const cpumask_t *cpu_map,
5544 int (*group_fn)(int cpu, const cpumask_t *cpu_map)) 5640 struct sched_group **sg))
5545{ 5641{
5546 struct sched_group *first = NULL, *last = NULL; 5642 struct sched_group *first = NULL, *last = NULL;
5547 cpumask_t covered = CPU_MASK_NONE; 5643 cpumask_t covered = CPU_MASK_NONE;
5548 int i; 5644 int i;
5549 5645
5550 for_each_cpu_mask(i, span) { 5646 for_each_cpu_mask(i, span) {
5551 int group = group_fn(i, cpu_map); 5647 struct sched_group *sg;
5552 struct sched_group *sg = &groups[group]; 5648 int group = group_fn(i, cpu_map, &sg);
5553 int j; 5649 int j;
5554 5650
5555 if (cpu_isset(i, covered)) 5651 if (cpu_isset(i, covered))
@@ -5559,7 +5655,7 @@ init_sched_build_groups(struct sched_group groups[], cpumask_t span,
5559 sg->cpu_power = 0; 5655 sg->cpu_power = 0;
5560 5656
5561 for_each_cpu_mask(j, span) { 5657 for_each_cpu_mask(j, span) {
5562 if (group_fn(j, cpu_map) != group) 5658 if (group_fn(j, cpu_map, NULL) != group)
5563 continue; 5659 continue;
5564 5660
5565 cpu_set(j, covered); 5661 cpu_set(j, covered);
@@ -5733,8 +5829,9 @@ __setup("max_cache_size=", setup_max_cache_size);
5733 */ 5829 */
5734static void touch_cache(void *__cache, unsigned long __size) 5830static void touch_cache(void *__cache, unsigned long __size)
5735{ 5831{
5736 unsigned long size = __size/sizeof(long), chunk1 = size/3, 5832 unsigned long size = __size / sizeof(long);
5737 chunk2 = 2*size/3; 5833 unsigned long chunk1 = size / 3;
5834 unsigned long chunk2 = 2 * size / 3;
5738 unsigned long *cache = __cache; 5835 unsigned long *cache = __cache;
5739 int i; 5836 int i;
5740 5837
@@ -5843,11 +5940,11 @@ measure_cost(int cpu1, int cpu2, void *cache, unsigned int size)
5843 */ 5940 */
5844 measure_one(cache, size, cpu1, cpu2); 5941 measure_one(cache, size, cpu1, cpu2);
5845 for (i = 0; i < ITERATIONS; i++) 5942 for (i = 0; i < ITERATIONS; i++)
5846 cost1 += measure_one(cache, size - i*1024, cpu1, cpu2); 5943 cost1 += measure_one(cache, size - i * 1024, cpu1, cpu2);
5847 5944
5848 measure_one(cache, size, cpu2, cpu1); 5945 measure_one(cache, size, cpu2, cpu1);
5849 for (i = 0; i < ITERATIONS; i++) 5946 for (i = 0; i < ITERATIONS; i++)
5850 cost1 += measure_one(cache, size - i*1024, cpu2, cpu1); 5947 cost1 += measure_one(cache, size - i * 1024, cpu2, cpu1);
5851 5948
5852 /* 5949 /*
5853 * (We measure the non-migrating [cached] cost on both 5950 * (We measure the non-migrating [cached] cost on both
@@ -5857,17 +5954,17 @@ measure_cost(int cpu1, int cpu2, void *cache, unsigned int size)
5857 5954
5858 measure_one(cache, size, cpu1, cpu1); 5955 measure_one(cache, size, cpu1, cpu1);
5859 for (i = 0; i < ITERATIONS; i++) 5956 for (i = 0; i < ITERATIONS; i++)
5860 cost2 += measure_one(cache, size - i*1024, cpu1, cpu1); 5957 cost2 += measure_one(cache, size - i * 1024, cpu1, cpu1);
5861 5958
5862 measure_one(cache, size, cpu2, cpu2); 5959 measure_one(cache, size, cpu2, cpu2);
5863 for (i = 0; i < ITERATIONS; i++) 5960 for (i = 0; i < ITERATIONS; i++)
5864 cost2 += measure_one(cache, size - i*1024, cpu2, cpu2); 5961 cost2 += measure_one(cache, size - i * 1024, cpu2, cpu2);
5865 5962
5866 /* 5963 /*
5867 * Get the per-iteration migration cost: 5964 * Get the per-iteration migration cost:
5868 */ 5965 */
5869 do_div(cost1, 2*ITERATIONS); 5966 do_div(cost1, 2 * ITERATIONS);
5870 do_div(cost2, 2*ITERATIONS); 5967 do_div(cost2, 2 * ITERATIONS);
5871 5968
5872 return cost1 - cost2; 5969 return cost1 - cost2;
5873} 5970}
@@ -5905,7 +6002,7 @@ static unsigned long long measure_migration_cost(int cpu1, int cpu2)
5905 */ 6002 */
5906 cache = vmalloc(max_size); 6003 cache = vmalloc(max_size);
5907 if (!cache) { 6004 if (!cache) {
5908 printk("could not vmalloc %d bytes for cache!\n", 2*max_size); 6005 printk("could not vmalloc %d bytes for cache!\n", 2 * max_size);
5909 return 1000000; /* return 1 msec on very small boxen */ 6006 return 1000000; /* return 1 msec on very small boxen */
5910 } 6007 }
5911 6008
@@ -5930,7 +6027,8 @@ static unsigned long long measure_migration_cost(int cpu1, int cpu2)
5930 avg_fluct = (avg_fluct + fluct)/2; 6027 avg_fluct = (avg_fluct + fluct)/2;
5931 6028
5932 if (migration_debug) 6029 if (migration_debug)
5933 printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): (%8Ld %8Ld)\n", 6030 printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): "
6031 "(%8Ld %8Ld)\n",
5934 cpu1, cpu2, size, 6032 cpu1, cpu2, size,
5935 (long)cost / 1000000, 6033 (long)cost / 1000000,
5936 ((long)cost / 100000) % 10, 6034 ((long)cost / 100000) % 10,
@@ -6025,20 +6123,18 @@ static void calibrate_migration_costs(const cpumask_t *cpu_map)
6025 -1 6123 -1
6026#endif 6124#endif
6027 ); 6125 );
6028 if (system_state == SYSTEM_BOOTING) { 6126 if (system_state == SYSTEM_BOOTING && num_online_cpus() > 1) {
6029 if (num_online_cpus() > 1) { 6127 printk("migration_cost=");
6030 printk("migration_cost="); 6128 for (distance = 0; distance <= max_distance; distance++) {
6031 for (distance = 0; distance <= max_distance; distance++) { 6129 if (distance)
6032 if (distance) 6130 printk(",");
6033 printk(","); 6131 printk("%ld", (long)migration_cost[distance] / 1000);
6034 printk("%ld", (long)migration_cost[distance] / 1000);
6035 }
6036 printk("\n");
6037 } 6132 }
6133 printk("\n");
6038 } 6134 }
6039 j1 = jiffies; 6135 j1 = jiffies;
6040 if (migration_debug) 6136 if (migration_debug)
6041 printk("migration: %ld seconds\n", (j1-j0)/HZ); 6137 printk("migration: %ld seconds\n", (j1-j0) / HZ);
6042 6138
6043 /* 6139 /*
6044 * Move back to the original CPU. NUMA-Q gets confused 6140 * Move back to the original CPU. NUMA-Q gets confused
@@ -6135,10 +6231,13 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
6135 */ 6231 */
6136#ifdef CONFIG_SCHED_SMT 6232#ifdef CONFIG_SCHED_SMT
6137static DEFINE_PER_CPU(struct sched_domain, cpu_domains); 6233static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
6138static struct sched_group sched_group_cpus[NR_CPUS]; 6234static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
6139 6235
6140static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map) 6236static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
6237 struct sched_group **sg)
6141{ 6238{
6239 if (sg)
6240 *sg = &per_cpu(sched_group_cpus, cpu);
6142 return cpu; 6241 return cpu;
6143} 6242}
6144#endif 6243#endif
@@ -6148,39 +6247,52 @@ static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map)
6148 */ 6247 */
6149#ifdef CONFIG_SCHED_MC 6248#ifdef CONFIG_SCHED_MC
6150static DEFINE_PER_CPU(struct sched_domain, core_domains); 6249static DEFINE_PER_CPU(struct sched_domain, core_domains);
6151static struct sched_group sched_group_core[NR_CPUS]; 6250static DEFINE_PER_CPU(struct sched_group, sched_group_core);
6152#endif 6251#endif
6153 6252
6154#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) 6253#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
6155static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map) 6254static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
6255 struct sched_group **sg)
6156{ 6256{
6257 int group;
6157 cpumask_t mask = cpu_sibling_map[cpu]; 6258 cpumask_t mask = cpu_sibling_map[cpu];
6158 cpus_and(mask, mask, *cpu_map); 6259 cpus_and(mask, mask, *cpu_map);
6159 return first_cpu(mask); 6260 group = first_cpu(mask);
6261 if (sg)
6262 *sg = &per_cpu(sched_group_core, group);
6263 return group;
6160} 6264}
6161#elif defined(CONFIG_SCHED_MC) 6265#elif defined(CONFIG_SCHED_MC)
6162static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map) 6266static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
6267 struct sched_group **sg)
6163{ 6268{
6269 if (sg)
6270 *sg = &per_cpu(sched_group_core, cpu);
6164 return cpu; 6271 return cpu;
6165} 6272}
6166#endif 6273#endif
6167 6274
6168static DEFINE_PER_CPU(struct sched_domain, phys_domains); 6275static DEFINE_PER_CPU(struct sched_domain, phys_domains);
6169static struct sched_group sched_group_phys[NR_CPUS]; 6276static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
6170 6277
6171static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map) 6278static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
6279 struct sched_group **sg)
6172{ 6280{
6281 int group;
6173#ifdef CONFIG_SCHED_MC 6282#ifdef CONFIG_SCHED_MC
6174 cpumask_t mask = cpu_coregroup_map(cpu); 6283 cpumask_t mask = cpu_coregroup_map(cpu);
6175 cpus_and(mask, mask, *cpu_map); 6284 cpus_and(mask, mask, *cpu_map);
6176 return first_cpu(mask); 6285 group = first_cpu(mask);
6177#elif defined(CONFIG_SCHED_SMT) 6286#elif defined(CONFIG_SCHED_SMT)
6178 cpumask_t mask = cpu_sibling_map[cpu]; 6287 cpumask_t mask = cpu_sibling_map[cpu];
6179 cpus_and(mask, mask, *cpu_map); 6288 cpus_and(mask, mask, *cpu_map);
6180 return first_cpu(mask); 6289 group = first_cpu(mask);
6181#else 6290#else
6182 return cpu; 6291 group = cpu;
6183#endif 6292#endif
6293 if (sg)
6294 *sg = &per_cpu(sched_group_phys, group);
6295 return group;
6184} 6296}
6185 6297
6186#ifdef CONFIG_NUMA 6298#ifdef CONFIG_NUMA
@@ -6193,12 +6305,22 @@ static DEFINE_PER_CPU(struct sched_domain, node_domains);
6193static struct sched_group **sched_group_nodes_bycpu[NR_CPUS]; 6305static struct sched_group **sched_group_nodes_bycpu[NR_CPUS];
6194 6306
6195static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); 6307static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
6196static struct sched_group *sched_group_allnodes_bycpu[NR_CPUS]; 6308static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
6197 6309
6198static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map) 6310static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
6311 struct sched_group **sg)
6199{ 6312{
6200 return cpu_to_node(cpu); 6313 cpumask_t nodemask = node_to_cpumask(cpu_to_node(cpu));
6314 int group;
6315
6316 cpus_and(nodemask, nodemask, *cpu_map);
6317 group = first_cpu(nodemask);
6318
6319 if (sg)
6320 *sg = &per_cpu(sched_group_allnodes, group);
6321 return group;
6201} 6322}
6323
6202static void init_numa_sched_groups_power(struct sched_group *group_head) 6324static void init_numa_sched_groups_power(struct sched_group *group_head)
6203{ 6325{
6204 struct sched_group *sg = group_head; 6326 struct sched_group *sg = group_head;
@@ -6234,16 +6356,9 @@ static void free_sched_groups(const cpumask_t *cpu_map)
6234 int cpu, i; 6356 int cpu, i;
6235 6357
6236 for_each_cpu_mask(cpu, *cpu_map) { 6358 for_each_cpu_mask(cpu, *cpu_map) {
6237 struct sched_group *sched_group_allnodes
6238 = sched_group_allnodes_bycpu[cpu];
6239 struct sched_group **sched_group_nodes 6359 struct sched_group **sched_group_nodes
6240 = sched_group_nodes_bycpu[cpu]; 6360 = sched_group_nodes_bycpu[cpu];
6241 6361
6242 if (sched_group_allnodes) {
6243 kfree(sched_group_allnodes);
6244 sched_group_allnodes_bycpu[cpu] = NULL;
6245 }
6246
6247 if (!sched_group_nodes) 6362 if (!sched_group_nodes)
6248 continue; 6363 continue;
6249 6364
@@ -6337,7 +6452,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6337 struct sched_domain *sd; 6452 struct sched_domain *sd;
6338#ifdef CONFIG_NUMA 6453#ifdef CONFIG_NUMA
6339 struct sched_group **sched_group_nodes = NULL; 6454 struct sched_group **sched_group_nodes = NULL;
6340 struct sched_group *sched_group_allnodes = NULL; 6455 int sd_allnodes = 0;
6341 6456
6342 /* 6457 /*
6343 * Allocate the per-node list of sched groups 6458 * Allocate the per-node list of sched groups
@@ -6355,7 +6470,6 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6355 * Set up domains for cpus specified by the cpu_map. 6470 * Set up domains for cpus specified by the cpu_map.
6356 */ 6471 */
6357 for_each_cpu_mask(i, *cpu_map) { 6472 for_each_cpu_mask(i, *cpu_map) {
6358 int group;
6359 struct sched_domain *sd = NULL, *p; 6473 struct sched_domain *sd = NULL, *p;
6360 cpumask_t nodemask = node_to_cpumask(cpu_to_node(i)); 6474 cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
6361 6475
@@ -6364,26 +6478,12 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6364#ifdef CONFIG_NUMA 6478#ifdef CONFIG_NUMA
6365 if (cpus_weight(*cpu_map) 6479 if (cpus_weight(*cpu_map)
6366 > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { 6480 > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
6367 if (!sched_group_allnodes) {
6368 sched_group_allnodes
6369 = kmalloc_node(sizeof(struct sched_group)
6370 * MAX_NUMNODES,
6371 GFP_KERNEL,
6372 cpu_to_node(i));
6373 if (!sched_group_allnodes) {
6374 printk(KERN_WARNING
6375 "Can not alloc allnodes sched group\n");
6376 goto error;
6377 }
6378 sched_group_allnodes_bycpu[i]
6379 = sched_group_allnodes;
6380 }
6381 sd = &per_cpu(allnodes_domains, i); 6481 sd = &per_cpu(allnodes_domains, i);
6382 *sd = SD_ALLNODES_INIT; 6482 *sd = SD_ALLNODES_INIT;
6383 sd->span = *cpu_map; 6483 sd->span = *cpu_map;
6384 group = cpu_to_allnodes_group(i, cpu_map); 6484 cpu_to_allnodes_group(i, cpu_map, &sd->groups);
6385 sd->groups = &sched_group_allnodes[group];
6386 p = sd; 6485 p = sd;
6486 sd_allnodes = 1;
6387 } else 6487 } else
6388 p = NULL; 6488 p = NULL;
6389 6489
@@ -6398,36 +6498,33 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6398 6498
6399 p = sd; 6499 p = sd;
6400 sd = &per_cpu(phys_domains, i); 6500 sd = &per_cpu(phys_domains, i);
6401 group = cpu_to_phys_group(i, cpu_map);
6402 *sd = SD_CPU_INIT; 6501 *sd = SD_CPU_INIT;
6403 sd->span = nodemask; 6502 sd->span = nodemask;
6404 sd->parent = p; 6503 sd->parent = p;
6405 if (p) 6504 if (p)
6406 p->child = sd; 6505 p->child = sd;
6407 sd->groups = &sched_group_phys[group]; 6506 cpu_to_phys_group(i, cpu_map, &sd->groups);
6408 6507
6409#ifdef CONFIG_SCHED_MC 6508#ifdef CONFIG_SCHED_MC
6410 p = sd; 6509 p = sd;
6411 sd = &per_cpu(core_domains, i); 6510 sd = &per_cpu(core_domains, i);
6412 group = cpu_to_core_group(i, cpu_map);
6413 *sd = SD_MC_INIT; 6511 *sd = SD_MC_INIT;
6414 sd->span = cpu_coregroup_map(i); 6512 sd->span = cpu_coregroup_map(i);
6415 cpus_and(sd->span, sd->span, *cpu_map); 6513 cpus_and(sd->span, sd->span, *cpu_map);
6416 sd->parent = p; 6514 sd->parent = p;
6417 p->child = sd; 6515 p->child = sd;
6418 sd->groups = &sched_group_core[group]; 6516 cpu_to_core_group(i, cpu_map, &sd->groups);
6419#endif 6517#endif
6420 6518
6421#ifdef CONFIG_SCHED_SMT 6519#ifdef CONFIG_SCHED_SMT
6422 p = sd; 6520 p = sd;
6423 sd = &per_cpu(cpu_domains, i); 6521 sd = &per_cpu(cpu_domains, i);
6424 group = cpu_to_cpu_group(i, cpu_map);
6425 *sd = SD_SIBLING_INIT; 6522 *sd = SD_SIBLING_INIT;
6426 sd->span = cpu_sibling_map[i]; 6523 sd->span = cpu_sibling_map[i];
6427 cpus_and(sd->span, sd->span, *cpu_map); 6524 cpus_and(sd->span, sd->span, *cpu_map);
6428 sd->parent = p; 6525 sd->parent = p;
6429 p->child = sd; 6526 p->child = sd;
6430 sd->groups = &sched_group_cpus[group]; 6527 cpu_to_cpu_group(i, cpu_map, &sd->groups);
6431#endif 6528#endif
6432 } 6529 }
6433 6530
@@ -6439,8 +6536,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6439 if (i != first_cpu(this_sibling_map)) 6536 if (i != first_cpu(this_sibling_map))
6440 continue; 6537 continue;
6441 6538
6442 init_sched_build_groups(sched_group_cpus, this_sibling_map, 6539 init_sched_build_groups(this_sibling_map, cpu_map, &cpu_to_cpu_group);
6443 cpu_map, &cpu_to_cpu_group);
6444 } 6540 }
6445#endif 6541#endif
6446 6542
@@ -6451,8 +6547,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6451 cpus_and(this_core_map, this_core_map, *cpu_map); 6547 cpus_and(this_core_map, this_core_map, *cpu_map);
6452 if (i != first_cpu(this_core_map)) 6548 if (i != first_cpu(this_core_map))
6453 continue; 6549 continue;
6454 init_sched_build_groups(sched_group_core, this_core_map, 6550 init_sched_build_groups(this_core_map, cpu_map, &cpu_to_core_group);
6455 cpu_map, &cpu_to_core_group);
6456 } 6551 }
6457#endif 6552#endif
6458 6553
@@ -6465,15 +6560,13 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6465 if (cpus_empty(nodemask)) 6560 if (cpus_empty(nodemask))
6466 continue; 6561 continue;
6467 6562
6468 init_sched_build_groups(sched_group_phys, nodemask, 6563 init_sched_build_groups(nodemask, cpu_map, &cpu_to_phys_group);
6469 cpu_map, &cpu_to_phys_group);
6470 } 6564 }
6471 6565
6472#ifdef CONFIG_NUMA 6566#ifdef CONFIG_NUMA
6473 /* Set up node groups */ 6567 /* Set up node groups */
6474 if (sched_group_allnodes) 6568 if (sd_allnodes)
6475 init_sched_build_groups(sched_group_allnodes, *cpu_map, 6569 init_sched_build_groups(*cpu_map, cpu_map, &cpu_to_allnodes_group);
6476 cpu_map, &cpu_to_allnodes_group);
6477 6570
6478 for (i = 0; i < MAX_NUMNODES; i++) { 6571 for (i = 0; i < MAX_NUMNODES; i++) {
6479 /* Set up node groups */ 6572 /* Set up node groups */
@@ -6565,10 +6658,10 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6565 for (i = 0; i < MAX_NUMNODES; i++) 6658 for (i = 0; i < MAX_NUMNODES; i++)
6566 init_numa_sched_groups_power(sched_group_nodes[i]); 6659 init_numa_sched_groups_power(sched_group_nodes[i]);
6567 6660
6568 if (sched_group_allnodes) { 6661 if (sd_allnodes) {
6569 int group = cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map); 6662 struct sched_group *sg;
6570 struct sched_group *sg = &sched_group_allnodes[group];
6571 6663
6664 cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg);
6572 init_numa_sched_groups_power(sg); 6665 init_numa_sched_groups_power(sg);
6573 } 6666 }
6574#endif 6667#endif
@@ -6847,6 +6940,10 @@ void __init sched_init(void)
6847 6940
6848 set_load_weight(&init_task); 6941 set_load_weight(&init_task);
6849 6942
6943#ifdef CONFIG_SMP
6944 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
6945#endif
6946
6850#ifdef CONFIG_RT_MUTEXES 6947#ifdef CONFIG_RT_MUTEXES
6851 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); 6948 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
6852#endif 6949#endif
@@ -6882,6 +6979,8 @@ void __might_sleep(char *file, int line)
6882 printk("in_atomic():%d, irqs_disabled():%d\n", 6979 printk("in_atomic():%d, irqs_disabled():%d\n",
6883 in_atomic(), irqs_disabled()); 6980 in_atomic(), irqs_disabled());
6884 debug_show_held_locks(current); 6981 debug_show_held_locks(current);
6982 if (irqs_disabled())
6983 print_irqtrace_events(current);
6885 dump_stack(); 6984 dump_stack();
6886 } 6985 }
6887#endif 6986#endif
diff --git a/kernel/signal.c b/kernel/signal.c
index 1921ffdc5e77..5630255d2e2a 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1705,7 +1705,9 @@ finish_stop(int stop_count)
1705 read_unlock(&tasklist_lock); 1705 read_unlock(&tasklist_lock);
1706 } 1706 }
1707 1707
1708 schedule(); 1708 do {
1709 schedule();
1710 } while (try_to_freeze());
1709 /* 1711 /*
1710 * Now we don't run again until continued. 1712 * Now we don't run again until continued.
1711 */ 1713 */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 025fcb3c66f8..600b33358ded 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -65,7 +65,6 @@ extern int sysctl_overcommit_memory;
65extern int sysctl_overcommit_ratio; 65extern int sysctl_overcommit_ratio;
66extern int sysctl_panic_on_oom; 66extern int sysctl_panic_on_oom;
67extern int max_threads; 67extern int max_threads;
68extern int sysrq_enabled;
69extern int core_uses_pid; 68extern int core_uses_pid;
70extern int suid_dumpable; 69extern int suid_dumpable;
71extern char core_pattern[]; 70extern char core_pattern[];
@@ -133,7 +132,7 @@ extern int max_lock_depth;
133 132
134#ifdef CONFIG_SYSCTL_SYSCALL 133#ifdef CONFIG_SYSCTL_SYSCALL
135static int parse_table(int __user *, int, void __user *, size_t __user *, 134static int parse_table(int __user *, int, void __user *, size_t __user *,
136 void __user *, size_t, ctl_table *, void **); 135 void __user *, size_t, ctl_table *);
137#endif 136#endif
138 137
139static int proc_do_uts_string(ctl_table *table, int write, struct file *filp, 138static int proc_do_uts_string(ctl_table *table, int write, struct file *filp,
@@ -141,12 +140,12 @@ static int proc_do_uts_string(ctl_table *table, int write, struct file *filp,
141 140
142static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen, 141static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen,
143 void __user *oldval, size_t __user *oldlenp, 142 void __user *oldval, size_t __user *oldlenp,
144 void __user *newval, size_t newlen, void **context); 143 void __user *newval, size_t newlen);
145 144
146#ifdef CONFIG_SYSVIPC 145#ifdef CONFIG_SYSVIPC
147static int sysctl_ipc_data(ctl_table *table, int __user *name, int nlen, 146static int sysctl_ipc_data(ctl_table *table, int __user *name, int nlen,
148 void __user *oldval, size_t __user *oldlenp, 147 void __user *oldval, size_t __user *oldlenp,
149 void __user *newval, size_t newlen, void **context); 148 void __user *newval, size_t newlen);
150#endif 149#endif
151 150
152#ifdef CONFIG_PROC_SYSCTL 151#ifdef CONFIG_PROC_SYSCTL
@@ -543,7 +542,7 @@ static ctl_table kern_table[] = {
543 { 542 {
544 .ctl_name = KERN_SYSRQ, 543 .ctl_name = KERN_SYSRQ,
545 .procname = "sysrq", 544 .procname = "sysrq",
546 .data = &sysrq_enabled, 545 .data = &__sysrq_enabled,
547 .maxlen = sizeof (int), 546 .maxlen = sizeof (int),
548 .mode = 0644, 547 .mode = 0644,
549 .proc_handler = &proc_dointvec, 548 .proc_handler = &proc_dointvec,
@@ -1243,7 +1242,6 @@ int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *ol
1243 do { 1242 do {
1244 struct ctl_table_header *head = 1243 struct ctl_table_header *head =
1245 list_entry(tmp, struct ctl_table_header, ctl_entry); 1244 list_entry(tmp, struct ctl_table_header, ctl_entry);
1246 void *context = NULL;
1247 1245
1248 if (!use_table(head)) 1246 if (!use_table(head))
1249 continue; 1247 continue;
@@ -1251,9 +1249,7 @@ int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *ol
1251 spin_unlock(&sysctl_lock); 1249 spin_unlock(&sysctl_lock);
1252 1250
1253 error = parse_table(name, nlen, oldval, oldlenp, 1251 error = parse_table(name, nlen, oldval, oldlenp,
1254 newval, newlen, head->ctl_table, 1252 newval, newlen, head->ctl_table);
1255 &context);
1256 kfree(context);
1257 1253
1258 spin_lock(&sysctl_lock); 1254 spin_lock(&sysctl_lock);
1259 unuse_table(head); 1255 unuse_table(head);
@@ -1309,7 +1305,7 @@ static inline int ctl_perm(ctl_table *table, int op)
1309static int parse_table(int __user *name, int nlen, 1305static int parse_table(int __user *name, int nlen,
1310 void __user *oldval, size_t __user *oldlenp, 1306 void __user *oldval, size_t __user *oldlenp,
1311 void __user *newval, size_t newlen, 1307 void __user *newval, size_t newlen,
1312 ctl_table *table, void **context) 1308 ctl_table *table)
1313{ 1309{
1314 int n; 1310 int n;
1315repeat: 1311repeat:
@@ -1329,7 +1325,7 @@ repeat:
1329 error = table->strategy( 1325 error = table->strategy(
1330 table, name, nlen, 1326 table, name, nlen,
1331 oldval, oldlenp, 1327 oldval, oldlenp,
1332 newval, newlen, context); 1328 newval, newlen);
1333 if (error) 1329 if (error)
1334 return error; 1330 return error;
1335 } 1331 }
@@ -1340,7 +1336,7 @@ repeat:
1340 } 1336 }
1341 error = do_sysctl_strategy(table, name, nlen, 1337 error = do_sysctl_strategy(table, name, nlen,
1342 oldval, oldlenp, 1338 oldval, oldlenp,
1343 newval, newlen, context); 1339 newval, newlen);
1344 return error; 1340 return error;
1345 } 1341 }
1346 } 1342 }
@@ -1351,7 +1347,7 @@ repeat:
1351int do_sysctl_strategy (ctl_table *table, 1347int do_sysctl_strategy (ctl_table *table,
1352 int __user *name, int nlen, 1348 int __user *name, int nlen,
1353 void __user *oldval, size_t __user *oldlenp, 1349 void __user *oldval, size_t __user *oldlenp,
1354 void __user *newval, size_t newlen, void **context) 1350 void __user *newval, size_t newlen)
1355{ 1351{
1356 int op = 0, rc; 1352 int op = 0, rc;
1357 size_t len; 1353 size_t len;
@@ -1365,7 +1361,7 @@ int do_sysctl_strategy (ctl_table *table,
1365 1361
1366 if (table->strategy) { 1362 if (table->strategy) {
1367 rc = table->strategy(table, name, nlen, oldval, oldlenp, 1363 rc = table->strategy(table, name, nlen, oldval, oldlenp,
1368 newval, newlen, context); 1364 newval, newlen);
1369 if (rc < 0) 1365 if (rc < 0)
1370 return rc; 1366 return rc;
1371 if (rc > 0) 1367 if (rc > 0)
@@ -1931,9 +1927,6 @@ int proc_dointvec(ctl_table *table, int write, struct file *filp,
1931 1927
1932#define OP_SET 0 1928#define OP_SET 0
1933#define OP_AND 1 1929#define OP_AND 1
1934#define OP_OR 2
1935#define OP_MAX 3
1936#define OP_MIN 4
1937 1930
1938static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp, 1931static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp,
1939 int *valp, 1932 int *valp,
@@ -1945,13 +1938,6 @@ static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp,
1945 switch(op) { 1938 switch(op) {
1946 case OP_SET: *valp = val; break; 1939 case OP_SET: *valp = val; break;
1947 case OP_AND: *valp &= val; break; 1940 case OP_AND: *valp &= val; break;
1948 case OP_OR: *valp |= val; break;
1949 case OP_MAX: if(*valp < val)
1950 *valp = val;
1951 break;
1952 case OP_MIN: if(*valp > val)
1953 *valp = val;
1954 break;
1955 } 1941 }
1956 } else { 1942 } else {
1957 int val = *valp; 1943 int val = *valp;
@@ -2408,6 +2394,17 @@ static int proc_do_ipc_string(ctl_table *table, int write, struct file *filp,
2408{ 2394{
2409 return -ENOSYS; 2395 return -ENOSYS;
2410} 2396}
2397static int proc_ipc_dointvec(ctl_table *table, int write, struct file *filp,
2398 void __user *buffer, size_t *lenp, loff_t *ppos)
2399{
2400 return -ENOSYS;
2401}
2402static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
2403 struct file *filp, void __user *buffer,
2404 size_t *lenp, loff_t *ppos)
2405{
2406 return -ENOSYS;
2407}
2411#endif 2408#endif
2412 2409
2413int proc_dointvec(ctl_table *table, int write, struct file *filp, 2410int proc_dointvec(ctl_table *table, int write, struct file *filp,
@@ -2472,7 +2469,7 @@ int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
2472/* The generic string strategy routine: */ 2469/* The generic string strategy routine: */
2473int sysctl_string(ctl_table *table, int __user *name, int nlen, 2470int sysctl_string(ctl_table *table, int __user *name, int nlen,
2474 void __user *oldval, size_t __user *oldlenp, 2471 void __user *oldval, size_t __user *oldlenp,
2475 void __user *newval, size_t newlen, void **context) 2472 void __user *newval, size_t newlen)
2476{ 2473{
2477 if (!table->data || !table->maxlen) 2474 if (!table->data || !table->maxlen)
2478 return -ENOTDIR; 2475 return -ENOTDIR;
@@ -2518,7 +2515,7 @@ int sysctl_string(ctl_table *table, int __user *name, int nlen,
2518 */ 2515 */
2519int sysctl_intvec(ctl_table *table, int __user *name, int nlen, 2516int sysctl_intvec(ctl_table *table, int __user *name, int nlen,
2520 void __user *oldval, size_t __user *oldlenp, 2517 void __user *oldval, size_t __user *oldlenp,
2521 void __user *newval, size_t newlen, void **context) 2518 void __user *newval, size_t newlen)
2522{ 2519{
2523 2520
2524 if (newval && newlen) { 2521 if (newval && newlen) {
@@ -2554,7 +2551,7 @@ int sysctl_intvec(ctl_table *table, int __user *name, int nlen,
2554/* Strategy function to convert jiffies to seconds */ 2551/* Strategy function to convert jiffies to seconds */
2555int sysctl_jiffies(ctl_table *table, int __user *name, int nlen, 2552int sysctl_jiffies(ctl_table *table, int __user *name, int nlen,
2556 void __user *oldval, size_t __user *oldlenp, 2553 void __user *oldval, size_t __user *oldlenp,
2557 void __user *newval, size_t newlen, void **context) 2554 void __user *newval, size_t newlen)
2558{ 2555{
2559 if (oldval) { 2556 if (oldval) {
2560 size_t olen; 2557 size_t olen;
@@ -2582,7 +2579,7 @@ int sysctl_jiffies(ctl_table *table, int __user *name, int nlen,
2582/* Strategy function to convert jiffies to seconds */ 2579/* Strategy function to convert jiffies to seconds */
2583int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen, 2580int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen,
2584 void __user *oldval, size_t __user *oldlenp, 2581 void __user *oldval, size_t __user *oldlenp,
2585 void __user *newval, size_t newlen, void **context) 2582 void __user *newval, size_t newlen)
2586{ 2583{
2587 if (oldval) { 2584 if (oldval) {
2588 size_t olen; 2585 size_t olen;
@@ -2611,7 +2608,7 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen,
2611/* The generic string strategy routine: */ 2608/* The generic string strategy routine: */
2612static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen, 2609static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen,
2613 void __user *oldval, size_t __user *oldlenp, 2610 void __user *oldval, size_t __user *oldlenp,
2614 void __user *newval, size_t newlen, void **context) 2611 void __user *newval, size_t newlen)
2615{ 2612{
2616 struct ctl_table uts_table; 2613 struct ctl_table uts_table;
2617 int r, write; 2614 int r, write;
@@ -2619,7 +2616,7 @@ static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen,
2619 memcpy(&uts_table, table, sizeof(uts_table)); 2616 memcpy(&uts_table, table, sizeof(uts_table));
2620 uts_table.data = get_uts(table, write); 2617 uts_table.data = get_uts(table, write);
2621 r = sysctl_string(&uts_table, name, nlen, 2618 r = sysctl_string(&uts_table, name, nlen,
2622 oldval, oldlenp, newval, newlen, context); 2619 oldval, oldlenp, newval, newlen);
2623 put_uts(table, write, uts_table.data); 2620 put_uts(table, write, uts_table.data);
2624 return r; 2621 return r;
2625} 2622}
@@ -2628,7 +2625,7 @@ static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen,
2628/* The generic sysctl ipc data routine. */ 2625/* The generic sysctl ipc data routine. */
2629static int sysctl_ipc_data(ctl_table *table, int __user *name, int nlen, 2626static int sysctl_ipc_data(ctl_table *table, int __user *name, int nlen,
2630 void __user *oldval, size_t __user *oldlenp, 2627 void __user *oldval, size_t __user *oldlenp,
2631 void __user *newval, size_t newlen, void **context) 2628 void __user *newval, size_t newlen)
2632{ 2629{
2633 size_t len; 2630 size_t len;
2634 void *data; 2631 void *data;
@@ -2703,41 +2700,41 @@ out:
2703 2700
2704int sysctl_string(ctl_table *table, int __user *name, int nlen, 2701int sysctl_string(ctl_table *table, int __user *name, int nlen,
2705 void __user *oldval, size_t __user *oldlenp, 2702 void __user *oldval, size_t __user *oldlenp,
2706 void __user *newval, size_t newlen, void **context) 2703 void __user *newval, size_t newlen)
2707{ 2704{
2708 return -ENOSYS; 2705 return -ENOSYS;
2709} 2706}
2710 2707
2711int sysctl_intvec(ctl_table *table, int __user *name, int nlen, 2708int sysctl_intvec(ctl_table *table, int __user *name, int nlen,
2712 void __user *oldval, size_t __user *oldlenp, 2709 void __user *oldval, size_t __user *oldlenp,
2713 void __user *newval, size_t newlen, void **context) 2710 void __user *newval, size_t newlen)
2714{ 2711{
2715 return -ENOSYS; 2712 return -ENOSYS;
2716} 2713}
2717 2714
2718int sysctl_jiffies(ctl_table *table, int __user *name, int nlen, 2715int sysctl_jiffies(ctl_table *table, int __user *name, int nlen,
2719 void __user *oldval, size_t __user *oldlenp, 2716 void __user *oldval, size_t __user *oldlenp,
2720 void __user *newval, size_t newlen, void **context) 2717 void __user *newval, size_t newlen)
2721{ 2718{
2722 return -ENOSYS; 2719 return -ENOSYS;
2723} 2720}
2724 2721
2725int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen, 2722int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen,
2726 void __user *oldval, size_t __user *oldlenp, 2723 void __user *oldval, size_t __user *oldlenp,
2727 void __user *newval, size_t newlen, void **context) 2724 void __user *newval, size_t newlen)
2728{ 2725{
2729 return -ENOSYS; 2726 return -ENOSYS;
2730} 2727}
2731 2728
2732static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen, 2729static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen,
2733 void __user *oldval, size_t __user *oldlenp, 2730 void __user *oldval, size_t __user *oldlenp,
2734 void __user *newval, size_t newlen, void **context) 2731 void __user *newval, size_t newlen)
2735{ 2732{
2736 return -ENOSYS; 2733 return -ENOSYS;
2737} 2734}
2738static int sysctl_ipc_data(ctl_table *table, int __user *name, int nlen, 2735static int sysctl_ipc_data(ctl_table *table, int __user *name, int nlen,
2739 void __user *oldval, size_t __user *oldlenp, 2736 void __user *oldval, size_t __user *oldlenp,
2740 void __user *newval, size_t newlen, void **context) 2737 void __user *newval, size_t newlen)
2741{ 2738{
2742 return -ENOSYS; 2739 return -ENOSYS;
2743} 2740}
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 74eca5939bd9..22504afc0d34 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -156,7 +156,7 @@ int clocksource_register(struct clocksource *c)
156 /* check if clocksource is already registered */ 156 /* check if clocksource is already registered */
157 if (is_registered_source(c)) { 157 if (is_registered_source(c)) {
158 printk("register_clocksource: Cannot register %s. " 158 printk("register_clocksource: Cannot register %s. "
159 "Already registered!", c->name); 159 "Already registered!", c->name);
160 ret = -EBUSY; 160 ret = -EBUSY;
161 } else { 161 } else {
162 /* register it */ 162 /* register it */
@@ -186,6 +186,7 @@ void clocksource_reselect(void)
186} 186}
187EXPORT_SYMBOL(clocksource_reselect); 187EXPORT_SYMBOL(clocksource_reselect);
188 188
189#ifdef CONFIG_SYSFS
189/** 190/**
190 * sysfs_show_current_clocksources - sysfs interface for current clocksource 191 * sysfs_show_current_clocksources - sysfs interface for current clocksource
191 * @dev: unused 192 * @dev: unused
@@ -275,10 +276,10 @@ sysfs_show_available_clocksources(struct sys_device *dev, char *buf)
275 * Sysfs setup bits: 276 * Sysfs setup bits:
276 */ 277 */
277static SYSDEV_ATTR(current_clocksource, 0600, sysfs_show_current_clocksources, 278static SYSDEV_ATTR(current_clocksource, 0600, sysfs_show_current_clocksources,
278 sysfs_override_clocksource); 279 sysfs_override_clocksource);
279 280
280static SYSDEV_ATTR(available_clocksource, 0600, 281static SYSDEV_ATTR(available_clocksource, 0600,
281 sysfs_show_available_clocksources, NULL); 282 sysfs_show_available_clocksources, NULL);
282 283
283static struct sysdev_class clocksource_sysclass = { 284static struct sysdev_class clocksource_sysclass = {
284 set_kset_name("clocksource"), 285 set_kset_name("clocksource"),
@@ -307,6 +308,7 @@ static int __init init_clocksource_sysfs(void)
307} 308}
308 309
309device_initcall(init_clocksource_sysfs); 310device_initcall(init_clocksource_sysfs);
311#endif /* CONFIG_SYSFS */
310 312
311/** 313/**
312 * boot_override_clocksource - boot clock override 314 * boot_override_clocksource - boot clock override
diff --git a/kernel/timer.c b/kernel/timer.c
index c1c7fbcffec1..feddf817baa5 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -80,6 +80,138 @@ tvec_base_t boot_tvec_bases;
80EXPORT_SYMBOL(boot_tvec_bases); 80EXPORT_SYMBOL(boot_tvec_bases);
81static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; 81static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
82 82
83/**
84 * __round_jiffies - function to round jiffies to a full second
85 * @j: the time in (absolute) jiffies that should be rounded
86 * @cpu: the processor number on which the timeout will happen
87 *
88 * __round_jiffies rounds an absolute time in the future (in jiffies)
89 * up or down to (approximately) full seconds. This is useful for timers
90 * for which the exact time they fire does not matter too much, as long as
91 * they fire approximately every X seconds.
92 *
93 * By rounding these timers to whole seconds, all such timers will fire
94 * at the same time, rather than at various times spread out. The goal
95 * of this is to have the CPU wake up less, which saves power.
96 *
97 * The exact rounding is skewed for each processor to avoid all
98 * processors firing at the exact same time, which could lead
99 * to lock contention or spurious cache line bouncing.
100 *
101 * The return value is the rounded version of the "j" parameter.
102 */
103unsigned long __round_jiffies(unsigned long j, int cpu)
104{
105 int rem;
106 unsigned long original = j;
107
108 /*
109 * We don't want all cpus firing their timers at once hitting the
110 * same lock or cachelines, so we skew each extra cpu with an extra
111 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
112 * already did this.
113 * The skew is done by adding 3*cpunr, then round, then subtract this
114 * extra offset again.
115 */
116 j += cpu * 3;
117
118 rem = j % HZ;
119
120 /*
121 * If the target jiffie is just after a whole second (which can happen
122 * due to delays of the timer irq, long irq off times etc etc) then
123 * we should round down to the whole second, not up. Use 1/4th second
124 * as cutoff for this rounding as an extreme upper bound for this.
125 */
126 if (rem < HZ/4) /* round down */
127 j = j - rem;
128 else /* round up */
129 j = j - rem + HZ;
130
131 /* now that we have rounded, subtract the extra skew again */
132 j -= cpu * 3;
133
134 if (j <= jiffies) /* rounding ate our timeout entirely; */
135 return original;
136 return j;
137}
138EXPORT_SYMBOL_GPL(__round_jiffies);
139
140/**
141 * __round_jiffies_relative - function to round jiffies to a full second
142 * @j: the time in (relative) jiffies that should be rounded
143 * @cpu: the processor number on which the timeout will happen
144 *
145 * __round_jiffies_relative rounds a time delta in the future (in jiffies)
146 * up or down to (approximately) full seconds. This is useful for timers
147 * for which the exact time they fire does not matter too much, as long as
148 * they fire approximately every X seconds.
149 *
150 * By rounding these timers to whole seconds, all such timers will fire
151 * at the same time, rather than at various times spread out. The goal
152 * of this is to have the CPU wake up less, which saves power.
153 *
154 * The exact rounding is skewed for each processor to avoid all
155 * processors firing at the exact same time, which could lead
156 * to lock contention or spurious cache line bouncing.
157 *
158 * The return value is the rounded version of the "j" parameter.
159 */
160unsigned long __round_jiffies_relative(unsigned long j, int cpu)
161{
162 /*
163 * In theory the following code can skip a jiffy in case jiffies
164 * increments right between the addition and the later subtraction.
165 * However since the entire point of this function is to use approximate
166 * timeouts, it's entirely ok to not handle that.
167 */
168 return __round_jiffies(j + jiffies, cpu) - jiffies;
169}
170EXPORT_SYMBOL_GPL(__round_jiffies_relative);
171
172/**
173 * round_jiffies - function to round jiffies to a full second
174 * @j: the time in (absolute) jiffies that should be rounded
175 *
176 * round_jiffies rounds an absolute time in the future (in jiffies)
177 * up or down to (approximately) full seconds. This is useful for timers
178 * for which the exact time they fire does not matter too much, as long as
179 * they fire approximately every X seconds.
180 *
181 * By rounding these timers to whole seconds, all such timers will fire
182 * at the same time, rather than at various times spread out. The goal
183 * of this is to have the CPU wake up less, which saves power.
184 *
185 * The return value is the rounded version of the "j" parameter.
186 */
187unsigned long round_jiffies(unsigned long j)
188{
189 return __round_jiffies(j, raw_smp_processor_id());
190}
191EXPORT_SYMBOL_GPL(round_jiffies);
192
193/**
194 * round_jiffies_relative - function to round jiffies to a full second
195 * @j: the time in (relative) jiffies that should be rounded
196 *
197 * round_jiffies_relative rounds a time delta in the future (in jiffies)
198 * up or down to (approximately) full seconds. This is useful for timers
199 * for which the exact time they fire does not matter too much, as long as
200 * they fire approximately every X seconds.
201 *
202 * By rounding these timers to whole seconds, all such timers will fire
203 * at the same time, rather than at various times spread out. The goal
204 * of this is to have the CPU wake up less, which saves power.
205 *
206 * The return value is the rounded version of the "j" parameter.
207 */
208unsigned long round_jiffies_relative(unsigned long j)
209{
210 return __round_jiffies_relative(j, raw_smp_processor_id());
211}
212EXPORT_SYMBOL_GPL(round_jiffies_relative);
213
214
83static inline void set_running_timer(tvec_base_t *base, 215static inline void set_running_timer(tvec_base_t *base,
84 struct timer_list *timer) 216 struct timer_list *timer)
85{ 217{
@@ -714,7 +846,7 @@ static int change_clocksource(void)
714 clock = new; 846 clock = new;
715 clock->cycle_last = now; 847 clock->cycle_last = now;
716 printk(KERN_INFO "Time: %s clocksource has been installed.\n", 848 printk(KERN_INFO "Time: %s clocksource has been installed.\n",
717 clock->name); 849 clock->name);
718 return 1; 850 return 1;
719 } else if (clock->update_callback) { 851 } else if (clock->update_callback) {
720 return clock->update_callback(); 852 return clock->update_callback();
@@ -722,7 +854,10 @@ static int change_clocksource(void)
722 return 0; 854 return 0;
723} 855}
724#else 856#else
725#define change_clocksource() (0) 857static inline int change_clocksource(void)
858{
859 return 0;
860}
726#endif 861#endif
727 862
728/** 863/**
@@ -820,7 +955,8 @@ device_initcall(timekeeping_init_device);
820 * If the error is already larger, we look ahead even further 955 * If the error is already larger, we look ahead even further
821 * to compensate for late or lost adjustments. 956 * to compensate for late or lost adjustments.
822 */ 957 */
823static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, s64 *offset) 958static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
959 s64 *offset)
824{ 960{
825 s64 tick_error, i; 961 s64 tick_error, i;
826 u32 look_ahead, adj; 962 u32 look_ahead, adj;
@@ -844,7 +980,8 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, s64 *
844 * Now calculate the error in (1 << look_ahead) ticks, but first 980 * Now calculate the error in (1 << look_ahead) ticks, but first
845 * remove the single look ahead already included in the error. 981 * remove the single look ahead already included in the error.
846 */ 982 */
847 tick_error = current_tick_length() >> (TICK_LENGTH_SHIFT - clock->shift + 1); 983 tick_error = current_tick_length() >>
984 (TICK_LENGTH_SHIFT - clock->shift + 1);
848 tick_error -= clock->xtime_interval >> 1; 985 tick_error -= clock->xtime_interval >> 1;
849 error = ((error - tick_error) >> look_ahead) + tick_error; 986 error = ((error - tick_error) >> look_ahead) + tick_error;
850 987
@@ -896,7 +1033,8 @@ static void clocksource_adjust(struct clocksource *clock, s64 offset)
896 clock->mult += adj; 1033 clock->mult += adj;
897 clock->xtime_interval += interval; 1034 clock->xtime_interval += interval;
898 clock->xtime_nsec -= offset; 1035 clock->xtime_nsec -= offset;
899 clock->error -= (interval - offset) << (TICK_LENGTH_SHIFT - clock->shift); 1036 clock->error -= (interval - offset) <<
1037 (TICK_LENGTH_SHIFT - clock->shift);
900} 1038}
901 1039
902/** 1040/**
@@ -1008,11 +1146,15 @@ static inline void calc_load(unsigned long ticks)
1008 unsigned long active_tasks; /* fixed-point */ 1146 unsigned long active_tasks; /* fixed-point */
1009 static int count = LOAD_FREQ; 1147 static int count = LOAD_FREQ;
1010 1148
1011 active_tasks = count_active_tasks(); 1149 count -= ticks;
1012 for (count -= ticks; count < 0; count += LOAD_FREQ) { 1150 if (unlikely(count < 0)) {
1013 CALC_LOAD(avenrun[0], EXP_1, active_tasks); 1151 active_tasks = count_active_tasks();
1014 CALC_LOAD(avenrun[1], EXP_5, active_tasks); 1152 do {
1015 CALC_LOAD(avenrun[2], EXP_15, active_tasks); 1153 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
1154 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
1155 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
1156 count += LOAD_FREQ;
1157 } while (count < 0);
1016 } 1158 }
1017} 1159}
1018 1160
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 96f77013d3f0..baacc3691415 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -96,6 +96,15 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
96 stats->write_char = p->wchar; 96 stats->write_char = p->wchar;
97 stats->read_syscalls = p->syscr; 97 stats->read_syscalls = p->syscr;
98 stats->write_syscalls = p->syscw; 98 stats->write_syscalls = p->syscw;
99#ifdef CONFIG_TASK_IO_ACCOUNTING
100 stats->read_bytes = p->ioac.read_bytes;
101 stats->write_bytes = p->ioac.write_bytes;
102 stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes;
103#else
104 stats->read_bytes = 0;
105 stats->write_bytes = 0;
106 stats->cancelled_write_bytes = 0;
107#endif
99} 108}
100#undef KB 109#undef KB
101#undef MB 110#undef MB
diff --git a/lib/Kconfig b/lib/Kconfig
index 47b172df3e31..9b03581cdecb 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -101,4 +101,9 @@ config TEXTSEARCH_FSM
101config PLIST 101config PLIST
102 boolean 102 boolean
103 103
104config IOMAP_COPY
105 boolean
106 depends on !UML
107 default y
108
104endmenu 109endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6eccc643c32e..818e4589f718 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -47,6 +47,30 @@ config UNUSED_SYMBOLS
47 you really need it, and what the merge plan to the mainline kernel for 47 you really need it, and what the merge plan to the mainline kernel for
48 your module is. 48 your module is.
49 49
50config DEBUG_FS
51 bool "Debug Filesystem"
52 depends on SYSFS
53 help
54 debugfs is a virtual file system that kernel developers use to put
55 debugging files into. Enable this option to be able to read and
56 write to these files.
57
58 If unsure, say N.
59
60config HEADERS_CHECK
61 bool "Run 'make headers_check' when building vmlinux"
62 depends on !UML
63 help
64 This option will extract the user-visible kernel headers whenever
65 building the kernel, and will run basic sanity checks on them to
66 ensure that exported files do not attempt to include files which
67 were not exported, etc.
68
69 If you're making modifications to header files which are
70 relevant for userspace, say 'Y', and check the headers
71 exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
72 your build tree), to make sure they're suitable.
73
50config DEBUG_KERNEL 74config DEBUG_KERNEL
51 bool "Kernel debugging" 75 bool "Kernel debugging"
52 help 76 help
@@ -302,16 +326,6 @@ config DEBUG_INFO
302 326
303 If unsure, say N. 327 If unsure, say N.
304 328
305config DEBUG_FS
306 bool "Debug Filesystem"
307 depends on SYSFS
308 help
309 debugfs is a virtual file system that kernel developers use to put
310 debugging files into. Enable this option to be able to read and
311 write to these files.
312
313 If unsure, say N.
314
315config DEBUG_VM 329config DEBUG_VM
316 bool "Debug VM" 330 bool "Debug VM"
317 depends on DEBUG_KERNEL 331 depends on DEBUG_KERNEL
@@ -372,20 +386,6 @@ config FORCED_INLINING
372 become the default in the future, until then this option is there to 386 become the default in the future, until then this option is there to
373 test gcc for this. 387 test gcc for this.
374 388
375config HEADERS_CHECK
376 bool "Run 'make headers_check' when building vmlinux"
377 depends on !UML
378 help
379 This option will extract the user-visible kernel headers whenever
380 building the kernel, and will run basic sanity checks on them to
381 ensure that exported files do not attempt to include files which
382 were not exported, etc.
383
384 If you're making modifications to header files which are
385 relevant for userspace, say 'Y', and check the headers
386 exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
387 your build tree), to make sure they're suitable.
388
389config RCU_TORTURE_TEST 389config RCU_TORTURE_TEST
390 tristate "torture tests for RCU" 390 tristate "torture tests for RCU"
391 depends on DEBUG_KERNEL 391 depends on DEBUG_KERNEL
@@ -402,6 +402,7 @@ config RCU_TORTURE_TEST
402 402
403config LKDTM 403config LKDTM
404 tristate "Linux Kernel Dump Test Tool Module" 404 tristate "Linux Kernel Dump Test Tool Module"
405 depends on DEBUG_KERNEL
405 depends on KPROBES 406 depends on KPROBES
406 default n 407 default n
407 help 408 help
@@ -436,7 +437,7 @@ config FAIL_PAGE_ALLOC
436 Provide fault-injection capability for alloc_pages(). 437 Provide fault-injection capability for alloc_pages().
437 438
438config FAIL_MAKE_REQUEST 439config FAIL_MAKE_REQUEST
439 bool "Fault-injection capabilitiy for disk IO" 440 bool "Fault-injection capability for disk IO"
440 depends on FAULT_INJECTION 441 depends on FAULT_INJECTION
441 help 442 help
442 Provide fault-injection capability for disk IO. 443 Provide fault-injection capability for disk IO.
diff --git a/lib/Makefile b/lib/Makefile
index 2d6106af53cd..77b4bad7d441 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -5,20 +5,21 @@
5lib-y := ctype.o string.o vsprintf.o cmdline.o \ 5lib-y := ctype.o string.o vsprintf.o cmdline.o \
6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \ 6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ 7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8 sha1.o irq_regs.o 8 sha1.o irq_regs.o reciprocal_div.o
9 9
10lib-$(CONFIG_MMU) += ioremap.o 10lib-$(CONFIG_MMU) += ioremap.o
11lib-$(CONFIG_SMP) += cpumask.o 11lib-$(CONFIG_SMP) += cpumask.o
12 12
13lib-y += kobject.o kref.o kobject_uevent.o klist.o 13lib-y += kobject.o kref.o kobject_uevent.o klist.o
14 14
15obj-y += sort.o parser.o halfmd4.o iomap_copy.o debug_locks.o random32.o 15obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o
16 16
17ifeq ($(CONFIG_DEBUG_KOBJECT),y) 17ifeq ($(CONFIG_DEBUG_KOBJECT),y)
18CFLAGS_kobject.o += -DDEBUG 18CFLAGS_kobject.o += -DDEBUG
19CFLAGS_kobject_uevent.o += -DDEBUG 19CFLAGS_kobject_uevent.o += -DDEBUG
20endif 20endif
21 21
22obj-$(CONFIG_IOMAP_COPY) += iomap_copy.o
22obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o 23obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
23obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o 24obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
24lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o 25lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
diff --git a/lib/bitrev.c b/lib/bitrev.c
index f4e1c49c825a..989aff73f881 100644
--- a/lib/bitrev.c
+++ b/lib/bitrev.c
@@ -2,6 +2,10 @@
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/bitrev.h> 3#include <linux/bitrev.h>
4 4
5MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
6MODULE_DESCRIPTION("Bit ordering reversal functions");
7MODULE_LICENSE("GPL");
8
5const u8 byte_rev_table[256] = { 9const u8 byte_rev_table[256] = {
6 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 10 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
7 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, 11 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 99fa277f9f7b..a9e4415b02dc 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -5,7 +5,6 @@
5 * 5 *
6 * (C) Copyright 1995 1996 Linus Torvalds 6 * (C) Copyright 1995 1996 Linus Torvalds
7 */ 7 */
8#include <linux/io.h>
9#include <linux/vmalloc.h> 8#include <linux/vmalloc.h>
10#include <linux/mm.h> 9#include <linux/mm.h>
11 10
diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c
new file mode 100644
index 000000000000..6a3bd48fa2a0
--- /dev/null
+++ b/lib/reciprocal_div.c
@@ -0,0 +1,9 @@
1#include <asm/div64.h>
2#include <linux/reciprocal_div.h>
3
4u32 reciprocal_value(u32 k)
5{
6 u64 val = (1LL << 32) + (k - 1);
7 do_div(val, k);
8 return (u32)val;
9}
diff --git a/mm/filemap.c b/mm/filemap.c
index 606432f71b3a..8332c77b1bd1 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1181,8 +1181,6 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1181 if (pos < size) { 1181 if (pos < size) {
1182 retval = generic_file_direct_IO(READ, iocb, 1182 retval = generic_file_direct_IO(READ, iocb,
1183 iov, pos, nr_segs); 1183 iov, pos, nr_segs);
1184 if (retval > 0 && !is_sync_kiocb(iocb))
1185 retval = -EIOCBQUEUED;
1186 if (retval > 0) 1184 if (retval > 0)
1187 *ppos = pos + retval; 1185 *ppos = pos + retval;
1188 } 1186 }
@@ -2047,15 +2045,14 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2047 * Sync the fs metadata but not the minor inode changes and 2045 * Sync the fs metadata but not the minor inode changes and
2048 * of course not the data as we did direct DMA for the IO. 2046 * of course not the data as we did direct DMA for the IO.
2049 * i_mutex is held, which protects generic_osync_inode() from 2047 * i_mutex is held, which protects generic_osync_inode() from
2050 * livelocking. 2048 * livelocking. AIO O_DIRECT ops attempt to sync metadata here.
2051 */ 2049 */
2052 if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2050 if ((written >= 0 || written == -EIOCBQUEUED) &&
2051 ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2053 int err = generic_osync_inode(inode, mapping, OSYNC_METADATA); 2052 int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
2054 if (err < 0) 2053 if (err < 0)
2055 written = err; 2054 written = err;
2056 } 2055 }
2057 if (written == count && !is_sync_kiocb(iocb))
2058 written = -EIOCBQUEUED;
2059 return written; 2056 return written;
2060} 2057}
2061EXPORT_SYMBOL(generic_file_direct_write); 2058EXPORT_SYMBOL(generic_file_direct_write);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0ccc7f230252..089092d152ab 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -73,7 +73,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
73 73
74 for (z = zonelist->zones; *z; z++) { 74 for (z = zonelist->zones; *z; z++) {
75 nid = zone_to_nid(*z); 75 nid = zone_to_nid(*z);
76 if (cpuset_zone_allowed(*z, GFP_HIGHUSER) && 76 if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) &&
77 !list_empty(&hugepage_freelists[nid])) 77 !list_empty(&hugepage_freelists[nid]))
78 break; 78 break;
79 } 79 }
diff --git a/mm/memory.c b/mm/memory.c
index 4198df0dff1c..bf6100236e62 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1110,23 +1110,29 @@ static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1110{ 1110{
1111 pte_t *pte; 1111 pte_t *pte;
1112 spinlock_t *ptl; 1112 spinlock_t *ptl;
1113 int err = 0;
1113 1114
1114 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 1115 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1115 if (!pte) 1116 if (!pte)
1116 return -ENOMEM; 1117 return -EAGAIN;
1117 arch_enter_lazy_mmu_mode(); 1118 arch_enter_lazy_mmu_mode();
1118 do { 1119 do {
1119 struct page *page = ZERO_PAGE(addr); 1120 struct page *page = ZERO_PAGE(addr);
1120 pte_t zero_pte = pte_wrprotect(mk_pte(page, prot)); 1121 pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
1122
1123 if (unlikely(!pte_none(*pte))) {
1124 err = -EEXIST;
1125 pte++;
1126 break;
1127 }
1121 page_cache_get(page); 1128 page_cache_get(page);
1122 page_add_file_rmap(page); 1129 page_add_file_rmap(page);
1123 inc_mm_counter(mm, file_rss); 1130 inc_mm_counter(mm, file_rss);
1124 BUG_ON(!pte_none(*pte));
1125 set_pte_at(mm, addr, pte, zero_pte); 1131 set_pte_at(mm, addr, pte, zero_pte);
1126 } while (pte++, addr += PAGE_SIZE, addr != end); 1132 } while (pte++, addr += PAGE_SIZE, addr != end);
1127 arch_leave_lazy_mmu_mode(); 1133 arch_leave_lazy_mmu_mode();
1128 pte_unmap_unlock(pte - 1, ptl); 1134 pte_unmap_unlock(pte - 1, ptl);
1129 return 0; 1135 return err;
1130} 1136}
1131 1137
1132static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud, 1138static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
@@ -1134,16 +1140,18 @@ static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
1134{ 1140{
1135 pmd_t *pmd; 1141 pmd_t *pmd;
1136 unsigned long next; 1142 unsigned long next;
1143 int err;
1137 1144
1138 pmd = pmd_alloc(mm, pud, addr); 1145 pmd = pmd_alloc(mm, pud, addr);
1139 if (!pmd) 1146 if (!pmd)
1140 return -ENOMEM; 1147 return -EAGAIN;
1141 do { 1148 do {
1142 next = pmd_addr_end(addr, end); 1149 next = pmd_addr_end(addr, end);
1143 if (zeromap_pte_range(mm, pmd, addr, next, prot)) 1150 err = zeromap_pte_range(mm, pmd, addr, next, prot);
1144 return -ENOMEM; 1151 if (err)
1152 break;
1145 } while (pmd++, addr = next, addr != end); 1153 } while (pmd++, addr = next, addr != end);
1146 return 0; 1154 return err;
1147} 1155}
1148 1156
1149static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd, 1157static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
@@ -1151,16 +1159,18 @@ static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1151{ 1159{
1152 pud_t *pud; 1160 pud_t *pud;
1153 unsigned long next; 1161 unsigned long next;
1162 int err;
1154 1163
1155 pud = pud_alloc(mm, pgd, addr); 1164 pud = pud_alloc(mm, pgd, addr);
1156 if (!pud) 1165 if (!pud)
1157 return -ENOMEM; 1166 return -EAGAIN;
1158 do { 1167 do {
1159 next = pud_addr_end(addr, end); 1168 next = pud_addr_end(addr, end);
1160 if (zeromap_pmd_range(mm, pud, addr, next, prot)) 1169 err = zeromap_pmd_range(mm, pud, addr, next, prot);
1161 return -ENOMEM; 1170 if (err)
1171 break;
1162 } while (pud++, addr = next, addr != end); 1172 } while (pud++, addr = next, addr != end);
1163 return 0; 1173 return err;
1164} 1174}
1165 1175
1166int zeromap_page_range(struct vm_area_struct *vma, 1176int zeromap_page_range(struct vm_area_struct *vma,
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 223d9ccb7d64..64cf3c214634 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -177,7 +177,7 @@ static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask)
177 nodemask_t nodes = node_online_map; 177 nodemask_t nodes = node_online_map;
178 178
179 for (z = zonelist->zones; *z; z++) 179 for (z = zonelist->zones; *z; z++)
180 if (cpuset_zone_allowed(*z, gfp_mask)) 180 if (cpuset_zone_allowed_softwall(*z, gfp_mask))
181 node_clear(zone_to_nid(*z), nodes); 181 node_clear(zone_to_nid(*z), nodes);
182 else 182 else
183 return CONSTRAINT_CPUSET; 183 return CONSTRAINT_CPUSET;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 8d9b19f239c3..237107c1b084 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -21,6 +21,7 @@
21#include <linux/writeback.h> 21#include <linux/writeback.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/backing-dev.h> 23#include <linux/backing-dev.h>
24#include <linux/task_io_accounting_ops.h>
24#include <linux/blkdev.h> 25#include <linux/blkdev.h>
25#include <linux/mpage.h> 26#include <linux/mpage.h>
26#include <linux/rmap.h> 27#include <linux/rmap.h>
@@ -761,23 +762,24 @@ int __set_page_dirty_nobuffers(struct page *page)
761 struct address_space *mapping = page_mapping(page); 762 struct address_space *mapping = page_mapping(page);
762 struct address_space *mapping2; 763 struct address_space *mapping2;
763 764
764 if (mapping) { 765 if (!mapping)
765 write_lock_irq(&mapping->tree_lock); 766 return 1;
766 mapping2 = page_mapping(page); 767
767 if (mapping2) { /* Race with truncate? */ 768 write_lock_irq(&mapping->tree_lock);
768 BUG_ON(mapping2 != mapping); 769 mapping2 = page_mapping(page);
769 if (mapping_cap_account_dirty(mapping)) 770 if (mapping2) { /* Race with truncate? */
770 __inc_zone_page_state(page, 771 BUG_ON(mapping2 != mapping);
771 NR_FILE_DIRTY); 772 if (mapping_cap_account_dirty(mapping)) {
772 radix_tree_tag_set(&mapping->page_tree, 773 __inc_zone_page_state(page, NR_FILE_DIRTY);
773 page_index(page), PAGECACHE_TAG_DIRTY); 774 task_io_account_write(PAGE_CACHE_SIZE);
774 }
775 write_unlock_irq(&mapping->tree_lock);
776 if (mapping->host) {
777 /* !PageAnon && !swapper_space */
778 __mark_inode_dirty(mapping->host,
779 I_DIRTY_PAGES);
780 } 775 }
776 radix_tree_tag_set(&mapping->page_tree,
777 page_index(page), PAGECACHE_TAG_DIRTY);
778 }
779 write_unlock_irq(&mapping->tree_lock);
780 if (mapping->host) {
781 /* !PageAnon && !swapper_space */
782 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
781 } 783 }
782 return 1; 784 return 1;
783 } 785 }
@@ -851,27 +853,26 @@ int test_clear_page_dirty(struct page *page)
851 struct address_space *mapping = page_mapping(page); 853 struct address_space *mapping = page_mapping(page);
852 unsigned long flags; 854 unsigned long flags;
853 855
854 if (mapping) { 856 if (!mapping)
855 write_lock_irqsave(&mapping->tree_lock, flags); 857 return TestClearPageDirty(page);
856 if (TestClearPageDirty(page)) { 858
857 radix_tree_tag_clear(&mapping->page_tree, 859 write_lock_irqsave(&mapping->tree_lock, flags);
858 page_index(page), 860 if (TestClearPageDirty(page)) {
859 PAGECACHE_TAG_DIRTY); 861 radix_tree_tag_clear(&mapping->page_tree,
860 write_unlock_irqrestore(&mapping->tree_lock, flags); 862 page_index(page), PAGECACHE_TAG_DIRTY);
861 /*
862 * We can continue to use `mapping' here because the
863 * page is locked, which pins the address_space
864 */
865 if (mapping_cap_account_dirty(mapping)) {
866 page_mkclean(page);
867 dec_zone_page_state(page, NR_FILE_DIRTY);
868 }
869 return 1;
870 }
871 write_unlock_irqrestore(&mapping->tree_lock, flags); 863 write_unlock_irqrestore(&mapping->tree_lock, flags);
872 return 0; 864 /*
865 * We can continue to use `mapping' here because the
866 * page is locked, which pins the address_space
867 */
868 if (mapping_cap_account_dirty(mapping)) {
869 page_mkclean(page);
870 dec_zone_page_state(page, NR_FILE_DIRTY);
871 }
872 return 1;
873 } 873 }
874 return TestClearPageDirty(page); 874 write_unlock_irqrestore(&mapping->tree_lock, flags);
875 return 0;
875} 876}
876EXPORT_SYMBOL(test_clear_page_dirty); 877EXPORT_SYMBOL(test_clear_page_dirty);
877 878
@@ -893,17 +894,17 @@ int clear_page_dirty_for_io(struct page *page)
893{ 894{
894 struct address_space *mapping = page_mapping(page); 895 struct address_space *mapping = page_mapping(page);
895 896
896 if (mapping) { 897 if (!mapping)
897 if (TestClearPageDirty(page)) { 898 return TestClearPageDirty(page);
898 if (mapping_cap_account_dirty(mapping)) { 899
899 page_mkclean(page); 900 if (TestClearPageDirty(page)) {
900 dec_zone_page_state(page, NR_FILE_DIRTY); 901 if (mapping_cap_account_dirty(mapping)) {
901 } 902 page_mkclean(page);
902 return 1; 903 dec_zone_page_state(page, NR_FILE_DIRTY);
903 } 904 }
904 return 0; 905 return 1;
905 } 906 }
906 return TestClearPageDirty(page); 907 return 0;
907} 908}
908EXPORT_SYMBOL(clear_page_dirty_for_io); 909EXPORT_SYMBOL(clear_page_dirty_for_io);
909 910
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e6b17b2989e0..8c1a116875bc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1162,7 +1162,7 @@ zonelist_scan:
1162 zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) 1162 zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
1163 break; 1163 break;
1164 if ((alloc_flags & ALLOC_CPUSET) && 1164 if ((alloc_flags & ALLOC_CPUSET) &&
1165 !cpuset_zone_allowed(zone, gfp_mask)) 1165 !cpuset_zone_allowed_softwall(zone, gfp_mask))
1166 goto try_next_zone; 1166 goto try_next_zone;
1167 1167
1168 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 1168 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
diff --git a/mm/readahead.c b/mm/readahead.c
index c0df5ed05f62..0f539e8e827a 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/blkdev.h> 14#include <linux/blkdev.h>
15#include <linux/backing-dev.h> 15#include <linux/backing-dev.h>
16#include <linux/task_io_accounting_ops.h>
16#include <linux/pagevec.h> 17#include <linux/pagevec.h>
17 18
18void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 19void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
@@ -151,6 +152,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
151 put_pages_list(pages); 152 put_pages_list(pages);
152 break; 153 break;
153 } 154 }
155 task_io_account_read(PAGE_CACHE_SIZE);
154 } 156 }
155 pagevec_lru_add(&lru_pvec); 157 pagevec_lru_add(&lru_pvec);
156 return ret; 158 return ret;
diff --git a/mm/slab.c b/mm/slab.c
index 56af694c9e6a..909975f6e090 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -109,6 +109,7 @@
109#include <linux/mutex.h> 109#include <linux/mutex.h>
110#include <linux/fault-inject.h> 110#include <linux/fault-inject.h>
111#include <linux/rtmutex.h> 111#include <linux/rtmutex.h>
112#include <linux/reciprocal_div.h>
112 113
113#include <asm/cacheflush.h> 114#include <asm/cacheflush.h>
114#include <asm/tlbflush.h> 115#include <asm/tlbflush.h>
@@ -386,6 +387,7 @@ struct kmem_cache {
386 unsigned int shared; 387 unsigned int shared;
387 388
388 unsigned int buffer_size; 389 unsigned int buffer_size;
390 u32 reciprocal_buffer_size;
389/* 3) touched by every alloc & free from the backend */ 391/* 3) touched by every alloc & free from the backend */
390 struct kmem_list3 *nodelists[MAX_NUMNODES]; 392 struct kmem_list3 *nodelists[MAX_NUMNODES];
391 393
@@ -627,10 +629,17 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
627 return slab->s_mem + cache->buffer_size * idx; 629 return slab->s_mem + cache->buffer_size * idx;
628} 630}
629 631
630static inline unsigned int obj_to_index(struct kmem_cache *cache, 632/*
631 struct slab *slab, void *obj) 633 * We want to avoid an expensive divide : (offset / cache->buffer_size)
634 * Using the fact that buffer_size is a constant for a particular cache,
635 * we can replace (offset / cache->buffer_size) by
636 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
637 */
638static inline unsigned int obj_to_index(const struct kmem_cache *cache,
639 const struct slab *slab, void *obj)
632{ 640{
633 return (unsigned)(obj - slab->s_mem) / cache->buffer_size; 641 u32 offset = (obj - slab->s_mem);
642 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
634} 643}
635 644
636/* 645/*
@@ -946,7 +955,8 @@ static void __devinit start_cpu_timer(int cpu)
946 if (keventd_up() && reap_work->work.func == NULL) { 955 if (keventd_up() && reap_work->work.func == NULL) {
947 init_reap_node(cpu); 956 init_reap_node(cpu);
948 INIT_DELAYED_WORK(reap_work, cache_reap); 957 INIT_DELAYED_WORK(reap_work, cache_reap);
949 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); 958 schedule_delayed_work_on(cpu, reap_work,
959 __round_jiffies_relative(HZ, cpu));
950 } 960 }
951} 961}
952 962
@@ -1426,6 +1436,8 @@ void __init kmem_cache_init(void)
1426 1436
1427 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, 1437 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1428 cache_line_size()); 1438 cache_line_size());
1439 cache_cache.reciprocal_buffer_size =
1440 reciprocal_value(cache_cache.buffer_size);
1429 1441
1430 for (order = 0; order < MAX_ORDER; order++) { 1442 for (order = 0; order < MAX_ORDER; order++) {
1431 cache_estimate(order, cache_cache.buffer_size, 1443 cache_estimate(order, cache_cache.buffer_size,
@@ -2312,6 +2324,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2312 if (flags & SLAB_CACHE_DMA) 2324 if (flags & SLAB_CACHE_DMA)
2313 cachep->gfpflags |= GFP_DMA; 2325 cachep->gfpflags |= GFP_DMA;
2314 cachep->buffer_size = size; 2326 cachep->buffer_size = size;
2327 cachep->reciprocal_buffer_size = reciprocal_value(size);
2315 2328
2316 if (flags & CFLGS_OFF_SLAB) { 2329 if (flags & CFLGS_OFF_SLAB) {
2317 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); 2330 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
@@ -3251,6 +3264,7 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3251 struct zone **z; 3264 struct zone **z;
3252 void *obj = NULL; 3265 void *obj = NULL;
3253 int nid; 3266 int nid;
3267 gfp_t local_flags = (flags & GFP_LEVEL_MASK);
3254 3268
3255retry: 3269retry:
3256 /* 3270 /*
@@ -3260,7 +3274,7 @@ retry:
3260 for (z = zonelist->zones; *z && !obj; z++) { 3274 for (z = zonelist->zones; *z && !obj; z++) {
3261 nid = zone_to_nid(*z); 3275 nid = zone_to_nid(*z);
3262 3276
3263 if (cpuset_zone_allowed(*z, flags | __GFP_HARDWALL) && 3277 if (cpuset_zone_allowed_hardwall(*z, flags) &&
3264 cache->nodelists[nid] && 3278 cache->nodelists[nid] &&
3265 cache->nodelists[nid]->free_objects) 3279 cache->nodelists[nid]->free_objects)
3266 obj = ____cache_alloc_node(cache, 3280 obj = ____cache_alloc_node(cache,
@@ -3274,7 +3288,12 @@ retry:
3274 * We may trigger various forms of reclaim on the allowed 3288 * We may trigger various forms of reclaim on the allowed
3275 * set and go into memory reserves if necessary. 3289 * set and go into memory reserves if necessary.
3276 */ 3290 */
3291 if (local_flags & __GFP_WAIT)
3292 local_irq_enable();
3293 kmem_flagcheck(cache, flags);
3277 obj = kmem_getpages(cache, flags, -1); 3294 obj = kmem_getpages(cache, flags, -1);
3295 if (local_flags & __GFP_WAIT)
3296 local_irq_disable();
3278 if (obj) { 3297 if (obj) {
3279 /* 3298 /*
3280 * Insert into the appropriate per node queues 3299 * Insert into the appropriate per node queues
@@ -3534,7 +3553,7 @@ EXPORT_SYMBOL(kmem_cache_zalloc);
3534 * 3553 *
3535 * Currently only used for dentry validation. 3554 * Currently only used for dentry validation.
3536 */ 3555 */
3537int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr) 3556int fastcall kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
3538{ 3557{
3539 unsigned long addr = (unsigned long)ptr; 3558 unsigned long addr = (unsigned long)ptr;
3540 unsigned long min_addr = PAGE_OFFSET; 3559 unsigned long min_addr = PAGE_OFFSET;
@@ -4006,7 +4025,7 @@ static void cache_reap(struct work_struct *unused)
4006 if (!mutex_trylock(&cache_chain_mutex)) { 4025 if (!mutex_trylock(&cache_chain_mutex)) {
4007 /* Give up. Setup the next iteration. */ 4026 /* Give up. Setup the next iteration. */
4008 schedule_delayed_work(&__get_cpu_var(reap_work), 4027 schedule_delayed_work(&__get_cpu_var(reap_work),
4009 REAPTIMEOUT_CPUC); 4028 round_jiffies_relative(REAPTIMEOUT_CPUC));
4010 return; 4029 return;
4011 } 4030 }
4012 4031
@@ -4052,7 +4071,8 @@ next:
4052 next_reap_node(); 4071 next_reap_node();
4053 refresh_cpu_vm_stats(smp_processor_id()); 4072 refresh_cpu_vm_stats(smp_processor_id());
4054 /* Set up the next iteration */ 4073 /* Set up the next iteration */
4055 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); 4074 schedule_delayed_work(&__get_cpu_var(reap_work),
4075 round_jiffies_relative(REAPTIMEOUT_CPUC));
4056} 4076}
4057 4077
4058#ifdef CONFIG_PROC_FS 4078#ifdef CONFIG_PROC_FS
diff --git a/mm/slob.c b/mm/slob.c
index 542394184a58..2e9236e10ed1 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -157,7 +157,7 @@ static int fastcall find_order(int size)
157 return order; 157 return order;
158} 158}
159 159
160void *kmalloc(size_t size, gfp_t gfp) 160void *__kmalloc(size_t size, gfp_t gfp)
161{ 161{
162 slob_t *m; 162 slob_t *m;
163 bigblock_t *bb; 163 bigblock_t *bb;
@@ -186,8 +186,7 @@ void *kmalloc(size_t size, gfp_t gfp)
186 slob_free(bb, sizeof(bigblock_t)); 186 slob_free(bb, sizeof(bigblock_t));
187 return 0; 187 return 0;
188} 188}
189 189EXPORT_SYMBOL(__kmalloc);
190EXPORT_SYMBOL(kmalloc);
191 190
192void kfree(const void *block) 191void kfree(const void *block)
193{ 192{
@@ -329,6 +328,17 @@ EXPORT_SYMBOL(kmem_cache_name);
329static struct timer_list slob_timer = TIMER_INITIALIZER( 328static struct timer_list slob_timer = TIMER_INITIALIZER(
330 (void (*)(unsigned long))kmem_cache_init, 0, 0); 329 (void (*)(unsigned long))kmem_cache_init, 0, 0);
331 330
331int kmem_cache_shrink(struct kmem_cache *d)
332{
333 return 0;
334}
335EXPORT_SYMBOL(kmem_cache_shrink);
336
337int kmem_ptr_validate(struct kmem_cache *a, const void *b)
338{
339 return 0;
340}
341
332void kmem_cache_init(void) 342void kmem_cache_init(void)
333{ 343{
334 void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1); 344 void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1);
diff --git a/mm/truncate.c b/mm/truncate.c
index e07b1e682c38..9bfb8e853860 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/pagemap.h> 14#include <linux/pagemap.h>
15#include <linux/pagevec.h> 15#include <linux/pagevec.h>
16#include <linux/task_io_accounting_ops.h>
16#include <linux/buffer_head.h> /* grr. try_to_release_page, 17#include <linux/buffer_head.h> /* grr. try_to_release_page,
17 do_invalidatepage */ 18 do_invalidatepage */
18 19
@@ -69,7 +70,8 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
69 if (PagePrivate(page)) 70 if (PagePrivate(page))
70 do_invalidatepage(page, 0); 71 do_invalidatepage(page, 0);
71 72
72 clear_page_dirty(page); 73 if (test_clear_page_dirty(page))
74 task_io_account_cancelled_write(PAGE_CACHE_SIZE);
73 ClearPageUptodate(page); 75 ClearPageUptodate(page);
74 ClearPageMappedToDisk(page); 76 ClearPageMappedToDisk(page);
75 remove_from_page_cache(page); 77 remove_from_page_cache(page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 093f5fe6dd77..e9813b06c7a3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -984,7 +984,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
984 if (!populated_zone(zone)) 984 if (!populated_zone(zone))
985 continue; 985 continue;
986 986
987 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 987 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
988 continue; 988 continue;
989 989
990 note_zone_scanning_priority(zone, priority); 990 note_zone_scanning_priority(zone, priority);
@@ -1034,7 +1034,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
1034 for (i = 0; zones[i] != NULL; i++) { 1034 for (i = 0; zones[i] != NULL; i++) {
1035 struct zone *zone = zones[i]; 1035 struct zone *zone = zones[i];
1036 1036
1037 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1037 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1038 continue; 1038 continue;
1039 1039
1040 lru_pages += zone->nr_active + zone->nr_inactive; 1040 lru_pages += zone->nr_active + zone->nr_inactive;
@@ -1089,7 +1089,7 @@ out:
1089 for (i = 0; zones[i] != 0; i++) { 1089 for (i = 0; zones[i] != 0; i++) {
1090 struct zone *zone = zones[i]; 1090 struct zone *zone = zones[i];
1091 1091
1092 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1092 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1093 continue; 1093 continue;
1094 1094
1095 zone->prev_priority = priority; 1095 zone->prev_priority = priority;
@@ -1354,7 +1354,7 @@ void wakeup_kswapd(struct zone *zone, int order)
1354 return; 1354 return;
1355 if (pgdat->kswapd_max_order < order) 1355 if (pgdat->kswapd_max_order < order)
1356 pgdat->kswapd_max_order = order; 1356 pgdat->kswapd_max_order = order;
1357 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1357 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1358 return; 1358 return;
1359 if (!waitqueue_active(&pgdat->kswapd_wait)) 1359 if (!waitqueue_active(&pgdat->kswapd_wait))
1360 return; 1360 return;
diff --git a/net/ax25/ax25_addr.c b/net/ax25/ax25_addr.c
index 21a0616152fc..97a49c79c605 100644
--- a/net/ax25/ax25_addr.c
+++ b/net/ax25/ax25_addr.c
@@ -83,7 +83,7 @@ EXPORT_SYMBOL(ax2asc);
83 */ 83 */
84void asc2ax(ax25_address *addr, const char *callsign) 84void asc2ax(ax25_address *addr, const char *callsign)
85{ 85{
86 char *s; 86 const char *s;
87 int n; 87 int n;
88 88
89 for (s = callsign, n = 0; n < 6; n++) { 89 for (s = callsign, n = 0; n < 6; n++) {
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 711a085eca5b..dbf98c49dbaa 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -123,10 +123,10 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
123 if (flt->opcode && 123 if (flt->opcode &&
124 ((evt == HCI_EV_CMD_COMPLETE && 124 ((evt == HCI_EV_CMD_COMPLETE &&
125 flt->opcode != 125 flt->opcode !=
126 get_unaligned((__u16 *)(skb->data + 3))) || 126 get_unaligned((__le16 *)(skb->data + 3))) ||
127 (evt == HCI_EV_CMD_STATUS && 127 (evt == HCI_EV_CMD_STATUS &&
128 flt->opcode != 128 flt->opcode !=
129 get_unaligned((__u16 *)(skb->data + 4))))) 129 get_unaligned((__le16 *)(skb->data + 4)))))
130 continue; 130 continue;
131 } 131 }
132 132
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 8a271285f2f3..823215d8e90f 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -55,6 +55,7 @@ static void queue_process(struct work_struct *work)
55 struct netpoll_info *npinfo = 55 struct netpoll_info *npinfo =
56 container_of(work, struct netpoll_info, tx_work.work); 56 container_of(work, struct netpoll_info, tx_work.work);
57 struct sk_buff *skb; 57 struct sk_buff *skb;
58 unsigned long flags;
58 59
59 while ((skb = skb_dequeue(&npinfo->txq))) { 60 while ((skb = skb_dequeue(&npinfo->txq))) {
60 struct net_device *dev = skb->dev; 61 struct net_device *dev = skb->dev;
@@ -64,15 +65,19 @@ static void queue_process(struct work_struct *work)
64 continue; 65 continue;
65 } 66 }
66 67
67 netif_tx_lock_bh(dev); 68 local_irq_save(flags);
69 netif_tx_lock(dev);
68 if (netif_queue_stopped(dev) || 70 if (netif_queue_stopped(dev) ||
69 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { 71 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
70 skb_queue_head(&npinfo->txq, skb); 72 skb_queue_head(&npinfo->txq, skb);
71 netif_tx_unlock_bh(dev); 73 netif_tx_unlock(dev);
74 local_irq_restore(flags);
72 75
73 schedule_delayed_work(&npinfo->tx_work, HZ/10); 76 schedule_delayed_work(&npinfo->tx_work, HZ/10);
74 return; 77 return;
75 } 78 }
79 netif_tx_unlock(dev);
80 local_irq_restore(flags);
76 } 81 }
77} 82}
78 83
@@ -242,22 +247,28 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
242 247
243 /* don't get messages out of order, and no recursion */ 248 /* don't get messages out of order, and no recursion */
244 if (skb_queue_len(&npinfo->txq) == 0 && 249 if (skb_queue_len(&npinfo->txq) == 0 &&
245 npinfo->poll_owner != smp_processor_id() && 250 npinfo->poll_owner != smp_processor_id()) {
246 netif_tx_trylock(dev)) { 251 unsigned long flags;
247 /* try until next clock tick */
248 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) {
249 if (!netif_queue_stopped(dev))
250 status = dev->hard_start_xmit(skb, dev);
251 252
252 if (status == NETDEV_TX_OK) 253 local_irq_save(flags);
253 break; 254 if (netif_tx_trylock(dev)) {
255 /* try until next clock tick */
256 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
257 tries > 0; --tries) {
258 if (!netif_queue_stopped(dev))
259 status = dev->hard_start_xmit(skb, dev);
254 260
255 /* tickle device maybe there is some cleanup */ 261 if (status == NETDEV_TX_OK)
256 netpoll_poll(np); 262 break;
263
264 /* tickle device maybe there is some cleanup */
265 netpoll_poll(np);
257 266
258 udelay(USEC_PER_POLL); 267 udelay(USEC_PER_POLL);
268 }
269 netif_tx_unlock(dev);
259 } 270 }
260 netif_tx_unlock(dev); 271 local_irq_restore(flags);
261 } 272 }
262 273
263 if (status != NETDEV_TX_OK) { 274 if (status != NETDEV_TX_OK) {
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 1f4727ddbdbf..a086c6312d3b 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -223,7 +223,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
223 gap = -new_head; 223 gap = -new_head;
224 } 224 }
225 new_head += DCCP_MAX_ACKVEC_LEN; 225 new_head += DCCP_MAX_ACKVEC_LEN;
226 } 226 }
227 227
228 av->dccpav_buf_head = new_head; 228 av->dccpav_buf_head = new_head;
229 229
@@ -336,7 +336,7 @@ out_duplicate:
336void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, int len) 336void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, int len)
337{ 337{
338 dccp_pr_debug_cat("ACK vector len=%d, ackno=%llu |", len, 338 dccp_pr_debug_cat("ACK vector len=%d, ackno=%llu |", len,
339 (unsigned long long)ackno); 339 (unsigned long long)ackno);
340 340
341 while (len--) { 341 while (len--) {
342 const u8 state = (*vector & DCCP_ACKVEC_STATE_MASK) >> 6; 342 const u8 state = (*vector & DCCP_ACKVEC_STATE_MASK) >> 6;
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index bcc2d12ae81c..c65cb2453e43 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -43,8 +43,6 @@ struct ccid_operations {
43 unsigned char* value); 43 unsigned char* value);
44 int (*ccid_hc_rx_insert_options)(struct sock *sk, 44 int (*ccid_hc_rx_insert_options)(struct sock *sk,
45 struct sk_buff *skb); 45 struct sk_buff *skb);
46 int (*ccid_hc_tx_insert_options)(struct sock *sk,
47 struct sk_buff *skb);
48 void (*ccid_hc_tx_packet_recv)(struct sock *sk, 46 void (*ccid_hc_tx_packet_recv)(struct sock *sk,
49 struct sk_buff *skb); 47 struct sk_buff *skb);
50 int (*ccid_hc_tx_parse_options)(struct sock *sk, 48 int (*ccid_hc_tx_parse_options)(struct sock *sk,
@@ -146,14 +144,6 @@ static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
146 return rc; 144 return rc;
147} 145}
148 146
149static inline int ccid_hc_tx_insert_options(struct ccid *ccid, struct sock *sk,
150 struct sk_buff *skb)
151{
152 if (ccid->ccid_ops->ccid_hc_tx_insert_options != NULL)
153 return ccid->ccid_ops->ccid_hc_tx_insert_options(sk, skb);
154 return 0;
155}
156
157static inline int ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk, 147static inline int ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk,
158 struct sk_buff *skb) 148 struct sk_buff *skb)
159{ 149{
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 2555be8f4790..fd38b05d6f79 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -351,7 +351,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
351 351
352 while (seqp != hctx->ccid2hctx_seqh) { 352 while (seqp != hctx->ccid2hctx_seqh) {
353 ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n", 353 ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n",
354 (unsigned long long)seqp->ccid2s_seq, 354 (unsigned long long)seqp->ccid2s_seq,
355 seqp->ccid2s_acked, seqp->ccid2s_sent); 355 seqp->ccid2s_acked, seqp->ccid2s_sent);
356 seqp = seqp->ccid2s_next; 356 seqp = seqp->ccid2s_next;
357 } 357 }
@@ -473,7 +473,7 @@ static inline void ccid2_new_ack(struct sock *sk,
473 /* first measurement */ 473 /* first measurement */
474 if (hctx->ccid2hctx_srtt == -1) { 474 if (hctx->ccid2hctx_srtt == -1) {
475 ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n", 475 ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
476 r, jiffies, 476 r, jiffies,
477 (unsigned long long)seqp->ccid2s_seq); 477 (unsigned long long)seqp->ccid2s_seq);
478 ccid2_change_srtt(hctx, r); 478 ccid2_change_srtt(hctx, r);
479 hctx->ccid2hctx_rttvar = r >> 1; 479 hctx->ccid2hctx_rttvar = r >> 1;
@@ -518,8 +518,8 @@ static inline void ccid2_new_ack(struct sock *sk,
518 hctx->ccid2hctx_lastrtt = jiffies; 518 hctx->ccid2hctx_lastrtt = jiffies;
519 519
520 ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n", 520 ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
521 hctx->ccid2hctx_srtt, hctx->ccid2hctx_rttvar, 521 hctx->ccid2hctx_srtt, hctx->ccid2hctx_rttvar,
522 hctx->ccid2hctx_rto, HZ, r); 522 hctx->ccid2hctx_rto, HZ, r);
523 hctx->ccid2hctx_sent = 0; 523 hctx->ccid2hctx_sent = 0;
524 } 524 }
525 525
@@ -667,9 +667,9 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
667 /* new packet received or marked */ 667 /* new packet received or marked */
668 if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED && 668 if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED &&
669 !seqp->ccid2s_acked) { 669 !seqp->ccid2s_acked) {
670 if (state == 670 if (state ==
671 DCCP_ACKVEC_STATE_ECN_MARKED) { 671 DCCP_ACKVEC_STATE_ECN_MARKED) {
672 ccid2_congestion_event(hctx, 672 ccid2_congestion_event(hctx,
673 seqp); 673 seqp);
674 } else 674 } else
675 ccid2_new_ack(sk, seqp, 675 ccid2_new_ack(sk, seqp,
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 66a27b9688ca..fa6b75372ed7 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -41,27 +41,6 @@
41#include "lib/tfrc.h" 41#include "lib/tfrc.h"
42#include "ccid3.h" 42#include "ccid3.h"
43 43
44/*
45 * Reason for maths here is to avoid 32 bit overflow when a is big.
46 * With this we get close to the limit.
47 */
48static u32 usecs_div(const u32 a, const u32 b)
49{
50 const u32 div = a < (UINT_MAX / (USEC_PER_SEC / 10)) ? 10 :
51 a < (UINT_MAX / (USEC_PER_SEC / 50)) ? 50 :
52 a < (UINT_MAX / (USEC_PER_SEC / 100)) ? 100 :
53 a < (UINT_MAX / (USEC_PER_SEC / 500)) ? 500 :
54 a < (UINT_MAX / (USEC_PER_SEC / 1000)) ? 1000 :
55 a < (UINT_MAX / (USEC_PER_SEC / 5000)) ? 5000 :
56 a < (UINT_MAX / (USEC_PER_SEC / 10000)) ? 10000 :
57 a < (UINT_MAX / (USEC_PER_SEC / 50000)) ? 50000 :
58 100000;
59 const u32 tmp = a * (USEC_PER_SEC / div);
60 return (b >= 2 * div) ? tmp / (b / div) : tmp;
61}
62
63
64
65#ifdef CONFIG_IP_DCCP_CCID3_DEBUG 44#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
66static int ccid3_debug; 45static int ccid3_debug;
67#define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a) 46#define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a)
@@ -108,8 +87,9 @@ static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx)
108{ 87{
109 timeval_sub_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi); 88 timeval_sub_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
110 89
111 /* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */ 90 /* Calculate new t_ipi = s / X_inst (X_inst is in 64 * bytes/second) */
112 hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_x); 91 hctx->ccid3hctx_t_ipi = scaled_div(hctx->ccid3hctx_s,
92 hctx->ccid3hctx_x >> 6);
113 93
114 /* Update nominal send time with regard to the new t_ipi */ 94 /* Update nominal send time with regard to the new t_ipi */
115 timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi); 95 timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
@@ -128,40 +108,44 @@ static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx)
128 * X = max(min(2 * X, 2 * X_recv), s / R); 108 * X = max(min(2 * X, 2 * X_recv), s / R);
129 * tld = now; 109 * tld = now;
130 * 110 *
111 * Note: X and X_recv are both stored in units of 64 * bytes/second, to support
112 * fine-grained resolution of sending rates. This requires scaling by 2^6
113 * throughout the code. Only X_calc is unscaled (in bytes/second).
114 *
131 * If X has changed, we also update the scheduled send time t_now, 115 * If X has changed, we also update the scheduled send time t_now,
132 * the inter-packet interval t_ipi, and the delta value. 116 * the inter-packet interval t_ipi, and the delta value.
133 */ 117 */
134static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now) 118static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now)
135 119
136{ 120{
137 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 121 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
138 const __u32 old_x = hctx->ccid3hctx_x; 122 const __u64 old_x = hctx->ccid3hctx_x;
139 123
140 if (hctx->ccid3hctx_p > 0) { 124 if (hctx->ccid3hctx_p > 0) {
141 hctx->ccid3hctx_x_calc = tfrc_calc_x(hctx->ccid3hctx_s, 125
142 hctx->ccid3hctx_rtt, 126 hctx->ccid3hctx_x = min(((__u64)hctx->ccid3hctx_x_calc) << 6,
143 hctx->ccid3hctx_p); 127 hctx->ccid3hctx_x_recv * 2);
144 hctx->ccid3hctx_x = max_t(u32, min(hctx->ccid3hctx_x_calc, 128 hctx->ccid3hctx_x = max(hctx->ccid3hctx_x,
145 hctx->ccid3hctx_x_recv * 2), 129 (((__u64)hctx->ccid3hctx_s) << 6) /
146 hctx->ccid3hctx_s / TFRC_T_MBI); 130 TFRC_T_MBI);
147 131
148 } else if (timeval_delta(now, &hctx->ccid3hctx_t_ld) >= 132 } else if (timeval_delta(now, &hctx->ccid3hctx_t_ld) -
149 hctx->ccid3hctx_rtt) { 133 (suseconds_t)hctx->ccid3hctx_rtt >= 0) {
150 hctx->ccid3hctx_x = max(min(hctx->ccid3hctx_x_recv, 134
151 hctx->ccid3hctx_x ) * 2, 135 hctx->ccid3hctx_x =
152 usecs_div(hctx->ccid3hctx_s, 136 max(2 * min(hctx->ccid3hctx_x, hctx->ccid3hctx_x_recv),
153 hctx->ccid3hctx_rtt) ); 137 scaled_div(((__u64)hctx->ccid3hctx_s) << 6,
138 hctx->ccid3hctx_rtt));
154 hctx->ccid3hctx_t_ld = *now; 139 hctx->ccid3hctx_t_ld = *now;
155 } else 140 }
156 ccid3_pr_debug("Not changing X\n");
157 141
158 if (hctx->ccid3hctx_x != old_x) 142 if (hctx->ccid3hctx_x != old_x)
159 ccid3_update_send_time(hctx); 143 ccid3_update_send_time(hctx);
160} 144}
161 145
162/* 146/*
163 * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1) 147 * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1)
164 * @len: DCCP packet payload size in bytes 148 * @len: DCCP packet payload size in bytes
165 */ 149 */
166static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len) 150static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len)
167{ 151{
@@ -178,6 +162,33 @@ static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len)
178 */ 162 */
179} 163}
180 164
165/*
166 * Update Window Counter using the algorithm from [RFC 4342, 8.1].
167 * The algorithm is not applicable if RTT < 4 microseconds.
168 */
169static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hctx,
170 struct timeval *now)
171{
172 suseconds_t delta;
173 u32 quarter_rtts;
174
175 if (unlikely(hctx->ccid3hctx_rtt < 4)) /* avoid divide-by-zero */
176 return;
177
178 delta = timeval_delta(now, &hctx->ccid3hctx_t_last_win_count);
179 DCCP_BUG_ON(delta < 0);
180
181 quarter_rtts = (u32)delta / (hctx->ccid3hctx_rtt / 4);
182
183 if (quarter_rtts > 0) {
184 hctx->ccid3hctx_t_last_win_count = *now;
185 hctx->ccid3hctx_last_win_count += min_t(u32, quarter_rtts, 5);
186 hctx->ccid3hctx_last_win_count &= 0xF; /* mod 16 */
187
188 ccid3_pr_debug("now at %#X\n", hctx->ccid3hctx_last_win_count);
189 }
190}
191
181static void ccid3_hc_tx_no_feedback_timer(unsigned long data) 192static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
182{ 193{
183 struct sock *sk = (struct sock *)data; 194 struct sock *sk = (struct sock *)data;
@@ -191,20 +202,20 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
191 goto restart_timer; 202 goto restart_timer;
192 } 203 }
193 204
194 ccid3_pr_debug("%s, sk=%p, state=%s\n", dccp_role(sk), sk, 205 ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk,
195 ccid3_tx_state_name(hctx->ccid3hctx_state)); 206 ccid3_tx_state_name(hctx->ccid3hctx_state));
196 207
197 switch (hctx->ccid3hctx_state) { 208 switch (hctx->ccid3hctx_state) {
198 case TFRC_SSTATE_NO_FBACK: 209 case TFRC_SSTATE_NO_FBACK:
199 /* RFC 3448, 4.4: Halve send rate directly */ 210 /* RFC 3448, 4.4: Halve send rate directly */
200 hctx->ccid3hctx_x = min_t(u32, hctx->ccid3hctx_x / 2, 211 hctx->ccid3hctx_x = max(hctx->ccid3hctx_x / 2,
201 hctx->ccid3hctx_s / TFRC_T_MBI); 212 (((__u64)hctx->ccid3hctx_s) << 6) /
213 TFRC_T_MBI);
202 214
203 ccid3_pr_debug("%s, sk=%p, state=%s, updated tx rate to %d " 215 ccid3_pr_debug("%s(%p, state=%s), updated tx rate to %u "
204 "bytes/s\n", 216 "bytes/s\n", dccp_role(sk), sk,
205 dccp_role(sk), sk,
206 ccid3_tx_state_name(hctx->ccid3hctx_state), 217 ccid3_tx_state_name(hctx->ccid3hctx_state),
207 hctx->ccid3hctx_x); 218 (unsigned)(hctx->ccid3hctx_x >> 6));
208 /* The value of R is still undefined and so we can not recompute 219 /* The value of R is still undefined and so we can not recompute
209 * the timout value. Keep initial value as per [RFC 4342, 5]. */ 220 * the timout value. Keep initial value as per [RFC 4342, 5]. */
210 t_nfb = TFRC_INITIAL_TIMEOUT; 221 t_nfb = TFRC_INITIAL_TIMEOUT;
@@ -213,34 +224,46 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
213 case TFRC_SSTATE_FBACK: 224 case TFRC_SSTATE_FBACK:
214 /* 225 /*
215 * Check if IDLE since last timeout and recv rate is less than 226 * Check if IDLE since last timeout and recv rate is less than
216 * 4 packets per RTT 227 * 4 packets (in units of 64*bytes/sec) per RTT
217 */ 228 */
218 if (!hctx->ccid3hctx_idle || 229 if (!hctx->ccid3hctx_idle ||
219 (hctx->ccid3hctx_x_recv >= 230 (hctx->ccid3hctx_x_recv >= 4 *
220 4 * usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_rtt))) { 231 scaled_div(((__u64)hctx->ccid3hctx_s) << 6,
232 hctx->ccid3hctx_rtt))) {
221 struct timeval now; 233 struct timeval now;
222 234
223 ccid3_pr_debug("%s, sk=%p, state=%s, not idle\n", 235 ccid3_pr_debug("%s(%p, state=%s), not idle\n",
224 dccp_role(sk), sk, 236 dccp_role(sk), sk,
225 ccid3_tx_state_name(hctx->ccid3hctx_state)); 237 ccid3_tx_state_name(hctx->ccid3hctx_state));
226 /* Halve sending rate */
227 238
228 /* If (p == 0 || X_calc > 2 * X_recv) 239 /*
240 * Modify the cached value of X_recv [RFC 3448, 4.4]
241 *
242 * If (p == 0 || X_calc > 2 * X_recv)
229 * X_recv = max(X_recv / 2, s / (2 * t_mbi)); 243 * X_recv = max(X_recv / 2, s / (2 * t_mbi));
230 * Else 244 * Else
231 * X_recv = X_calc / 4; 245 * X_recv = X_calc / 4;
246 *
247 * Note that X_recv is scaled by 2^6 while X_calc is not
232 */ 248 */
233 BUG_ON(hctx->ccid3hctx_p && !hctx->ccid3hctx_x_calc); 249 BUG_ON(hctx->ccid3hctx_p && !hctx->ccid3hctx_x_calc);
234 250
235 if (hctx->ccid3hctx_p == 0 || 251 if (hctx->ccid3hctx_p == 0 ||
236 hctx->ccid3hctx_x_calc > 2 * hctx->ccid3hctx_x_recv) 252 (hctx->ccid3hctx_x_calc >
237 hctx->ccid3hctx_x_recv = max_t(u32, hctx->ccid3hctx_x_recv / 2, 253 (hctx->ccid3hctx_x_recv >> 5))) {
238 hctx->ccid3hctx_s / (2 * TFRC_T_MBI)); 254
239 else 255 hctx->ccid3hctx_x_recv =
240 hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc / 4; 256 max(hctx->ccid3hctx_x_recv / 2,
241 257 (((__u64)hctx->ccid3hctx_s) << 6) /
242 /* Update sending rate */ 258 (2 * TFRC_T_MBI));
243 dccp_timestamp(sk, &now); 259
260 if (hctx->ccid3hctx_p == 0)
261 dccp_timestamp(sk, &now);
262 } else {
263 hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc;
264 hctx->ccid3hctx_x_recv <<= 4;
265 }
266 /* Now recalculate X [RFC 3448, 4.3, step (4)] */
244 ccid3_hc_tx_update_x(sk, &now); 267 ccid3_hc_tx_update_x(sk, &now);
245 } 268 }
246 /* 269 /*
@@ -251,7 +274,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
251 t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi); 274 t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi);
252 break; 275 break;
253 case TFRC_SSTATE_NO_SENT: 276 case TFRC_SSTATE_NO_SENT:
254 DCCP_BUG("Illegal %s state NO_SENT, sk=%p", dccp_role(sk), sk); 277 DCCP_BUG("%s(%p) - Illegal state NO_SENT", dccp_role(sk), sk);
255 /* fall through */ 278 /* fall through */
256 case TFRC_SSTATE_TERM: 279 case TFRC_SSTATE_TERM:
257 goto out; 280 goto out;
@@ -277,9 +300,8 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
277{ 300{
278 struct dccp_sock *dp = dccp_sk(sk); 301 struct dccp_sock *dp = dccp_sk(sk);
279 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 302 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
280 struct dccp_tx_hist_entry *new_packet;
281 struct timeval now; 303 struct timeval now;
282 long delay; 304 suseconds_t delay;
283 305
284 BUG_ON(hctx == NULL); 306 BUG_ON(hctx == NULL);
285 307
@@ -291,34 +313,21 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
291 if (unlikely(skb->len == 0)) 313 if (unlikely(skb->len == 0))
292 return -EBADMSG; 314 return -EBADMSG;
293 315
294 /* See if last packet allocated was not sent */
295 new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
296 if (new_packet == NULL || new_packet->dccphtx_sent) {
297 new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist,
298 GFP_ATOMIC);
299
300 if (unlikely(new_packet == NULL)) {
301 DCCP_WARN("%s, sk=%p, not enough mem to add to history,"
302 "send refused\n", dccp_role(sk), sk);
303 return -ENOBUFS;
304 }
305
306 dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, new_packet);
307 }
308
309 dccp_timestamp(sk, &now); 316 dccp_timestamp(sk, &now);
310 317
311 switch (hctx->ccid3hctx_state) { 318 switch (hctx->ccid3hctx_state) {
312 case TFRC_SSTATE_NO_SENT: 319 case TFRC_SSTATE_NO_SENT:
313 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, 320 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
314 jiffies + usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)); 321 (jiffies +
322 usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
315 hctx->ccid3hctx_last_win_count = 0; 323 hctx->ccid3hctx_last_win_count = 0;
316 hctx->ccid3hctx_t_last_win_count = now; 324 hctx->ccid3hctx_t_last_win_count = now;
317 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); 325 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
318 326
319 /* Set initial sending rate to 1 packet per second */ 327 /* Set initial sending rate X/s to 1pps (X is scaled by 2^6) */
320 ccid3_hc_tx_update_s(hctx, skb->len); 328 ccid3_hc_tx_update_s(hctx, skb->len);
321 hctx->ccid3hctx_x = hctx->ccid3hctx_s; 329 hctx->ccid3hctx_x = hctx->ccid3hctx_s;
330 hctx->ccid3hctx_x <<= 6;
322 331
323 /* First timeout, according to [RFC 3448, 4.2], is 1 second */ 332 /* First timeout, according to [RFC 3448, 4.2], is 1 second */
324 hctx->ccid3hctx_t_ipi = USEC_PER_SEC; 333 hctx->ccid3hctx_t_ipi = USEC_PER_SEC;
@@ -332,77 +341,57 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
332 case TFRC_SSTATE_FBACK: 341 case TFRC_SSTATE_FBACK:
333 delay = timeval_delta(&hctx->ccid3hctx_t_nom, &now); 342 delay = timeval_delta(&hctx->ccid3hctx_t_nom, &now);
334 /* 343 /*
335 * Scheduling of packet transmissions [RFC 3448, 4.6] 344 * Scheduling of packet transmissions [RFC 3448, 4.6]
336 * 345 *
337 * if (t_now > t_nom - delta) 346 * if (t_now > t_nom - delta)
338 * // send the packet now 347 * // send the packet now
339 * else 348 * else
340 * // send the packet in (t_nom - t_now) milliseconds. 349 * // send the packet in (t_nom - t_now) milliseconds.
341 */ 350 */
342 if (delay - (long)hctx->ccid3hctx_delta >= 0) 351 if (delay - (suseconds_t)hctx->ccid3hctx_delta >= 0)
343 return delay / 1000L; 352 return delay / 1000L;
353
354 ccid3_hc_tx_update_win_count(hctx, &now);
344 break; 355 break;
345 case TFRC_SSTATE_TERM: 356 case TFRC_SSTATE_TERM:
346 DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk); 357 DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
347 return -EINVAL; 358 return -EINVAL;
348 } 359 }
349 360
350 /* prepare to send now (add options etc.) */ 361 /* prepare to send now (add options etc.) */
351 dp->dccps_hc_tx_insert_options = 1; 362 dp->dccps_hc_tx_insert_options = 1;
352 new_packet->dccphtx_ccval = DCCP_SKB_CB(skb)->dccpd_ccval = 363 DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
353 hctx->ccid3hctx_last_win_count; 364
365 /* set the nominal send time for the next following packet */
354 timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi); 366 timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
355 367
356 return 0; 368 return 0;
357} 369}
358 370
359static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) 371static void ccid3_hc_tx_packet_sent(struct sock *sk, int more,
372 unsigned int len)
360{ 373{
361 const struct dccp_sock *dp = dccp_sk(sk);
362 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); 374 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
363 struct timeval now; 375 struct timeval now;
364 unsigned long quarter_rtt;
365 struct dccp_tx_hist_entry *packet; 376 struct dccp_tx_hist_entry *packet;
366 377
367 BUG_ON(hctx == NULL); 378 BUG_ON(hctx == NULL);
368 379
369 dccp_timestamp(sk, &now);
370
371 ccid3_hc_tx_update_s(hctx, len); 380 ccid3_hc_tx_update_s(hctx, len);
372 381
373 packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); 382 packet = dccp_tx_hist_entry_new(ccid3_tx_hist, GFP_ATOMIC);
374 if (unlikely(packet == NULL)) { 383 if (unlikely(packet == NULL)) {
375 DCCP_WARN("packet doesn't exist in history!\n"); 384 DCCP_CRIT("packet history - out of memory!");
376 return;
377 }
378 if (unlikely(packet->dccphtx_sent)) {
379 DCCP_WARN("no unsent packet in history!\n");
380 return; 385 return;
381 } 386 }
382 packet->dccphtx_tstamp = now; 387 dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, packet);
383 packet->dccphtx_seqno = dp->dccps_gss;
384 /*
385 * Check if win_count have changed
386 * Algorithm in "8.1. Window Counter Value" in RFC 4342.
387 */
388 quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count);
389 if (likely(hctx->ccid3hctx_rtt > 8))
390 quarter_rtt /= hctx->ccid3hctx_rtt / 4;
391
392 if (quarter_rtt > 0) {
393 hctx->ccid3hctx_t_last_win_count = now;
394 hctx->ccid3hctx_last_win_count = (hctx->ccid3hctx_last_win_count +
395 min_t(unsigned long, quarter_rtt, 5)) % 16;
396 ccid3_pr_debug("%s, sk=%p, window changed from "
397 "%u to %u!\n",
398 dccp_role(sk), sk,
399 packet->dccphtx_ccval,
400 hctx->ccid3hctx_last_win_count);
401 }
402 388
403 hctx->ccid3hctx_idle = 0; 389 dccp_timestamp(sk, &now);
404 packet->dccphtx_rtt = hctx->ccid3hctx_rtt; 390 packet->dccphtx_tstamp = now;
405 packet->dccphtx_sent = 1; 391 packet->dccphtx_seqno = dccp_sk(sk)->dccps_gss;
392 packet->dccphtx_rtt = hctx->ccid3hctx_rtt;
393 packet->dccphtx_sent = 1;
394 hctx->ccid3hctx_idle = 0;
406} 395}
407 396
408static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) 397static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
@@ -414,7 +403,7 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
414 struct timeval now; 403 struct timeval now;
415 unsigned long t_nfb; 404 unsigned long t_nfb;
416 u32 pinv; 405 u32 pinv;
417 long r_sample, t_elapsed; 406 suseconds_t r_sample, t_elapsed;
418 407
419 BUG_ON(hctx == NULL); 408 BUG_ON(hctx == NULL);
420 409
@@ -430,44 +419,44 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
430 case TFRC_SSTATE_FBACK: 419 case TFRC_SSTATE_FBACK:
431 /* get packet from history to look up t_recvdata */ 420 /* get packet from history to look up t_recvdata */
432 packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist, 421 packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist,
433 DCCP_SKB_CB(skb)->dccpd_ack_seq); 422 DCCP_SKB_CB(skb)->dccpd_ack_seq);
434 if (unlikely(packet == NULL)) { 423 if (unlikely(packet == NULL)) {
435 DCCP_WARN("%s(%p), seqno %llu(%s) doesn't exist " 424 DCCP_WARN("%s(%p), seqno %llu(%s) doesn't exist "
436 "in history!\n", dccp_role(sk), sk, 425 "in history!\n", dccp_role(sk), sk,
437 (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq, 426 (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
438 dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type)); 427 dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
439 return; 428 return;
440 } 429 }
441 430
442 /* Update receive rate */ 431 /* Update receive rate in units of 64 * bytes/second */
443 hctx->ccid3hctx_x_recv = opt_recv->ccid3or_receive_rate; 432 hctx->ccid3hctx_x_recv = opt_recv->ccid3or_receive_rate;
433 hctx->ccid3hctx_x_recv <<= 6;
444 434
445 /* Update loss event rate */ 435 /* Update loss event rate */
446 pinv = opt_recv->ccid3or_loss_event_rate; 436 pinv = opt_recv->ccid3or_loss_event_rate;
447 if (pinv == ~0U || pinv == 0) 437 if (pinv == ~0U || pinv == 0) /* see RFC 4342, 8.5 */
448 hctx->ccid3hctx_p = 0; 438 hctx->ccid3hctx_p = 0;
449 else 439 else /* can not exceed 100% */
450 hctx->ccid3hctx_p = 1000000 / pinv; 440 hctx->ccid3hctx_p = 1000000 / pinv;
451 441
452 dccp_timestamp(sk, &now); 442 dccp_timestamp(sk, &now);
453 443
454 /* 444 /*
455 * Calculate new round trip sample as per [RFC 3448, 4.3] by 445 * Calculate new round trip sample as per [RFC 3448, 4.3] by
456 * R_sample = (now - t_recvdata) - t_elapsed 446 * R_sample = (now - t_recvdata) - t_elapsed
457 */ 447 */
458 r_sample = timeval_delta(&now, &packet->dccphtx_tstamp); 448 r_sample = timeval_delta(&now, &packet->dccphtx_tstamp);
459 t_elapsed = dp->dccps_options_received.dccpor_elapsed_time * 10; 449 t_elapsed = dp->dccps_options_received.dccpor_elapsed_time * 10;
460 450
461 if (unlikely(r_sample <= 0)) { 451 DCCP_BUG_ON(r_sample < 0);
462 DCCP_WARN("WARNING: R_sample (%ld) <= 0!\n", r_sample); 452 if (unlikely(r_sample <= t_elapsed))
463 r_sample = 0; 453 DCCP_WARN("WARNING: r_sample=%dus <= t_elapsed=%dus\n",
464 } else if (unlikely(r_sample <= t_elapsed)) 454 (int)r_sample, (int)t_elapsed);
465 DCCP_WARN("WARNING: r_sample=%ldus <= t_elapsed=%ldus\n",
466 r_sample, t_elapsed);
467 else 455 else
468 r_sample -= t_elapsed; 456 r_sample -= t_elapsed;
457 CCID3_RTT_SANITY_CHECK(r_sample);
469 458
470 /* Update RTT estimate by 459 /* Update RTT estimate by
471 * If (No feedback recv) 460 * If (No feedback recv)
472 * R = R_sample; 461 * R = R_sample;
473 * Else 462 * Else
@@ -476,34 +465,45 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
476 * q is a constant, RFC 3448 recomments 0.9 465 * q is a constant, RFC 3448 recomments 0.9
477 */ 466 */
478 if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) { 467 if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) {
479 /* Use Larger Initial Windows [RFC 4342, sec. 5] 468 /*
480 * We deviate in that we use `s' instead of `MSS'. */ 469 * Larger Initial Windows [RFC 4342, sec. 5]
481 u16 w_init = max( 4 * hctx->ccid3hctx_s, 470 * We deviate in that we use `s' instead of `MSS'.
482 max(2 * hctx->ccid3hctx_s, 4380)); 471 */
472 __u64 w_init = min(4 * hctx->ccid3hctx_s,
473 max(2 * hctx->ccid3hctx_s, 4380));
483 hctx->ccid3hctx_rtt = r_sample; 474 hctx->ccid3hctx_rtt = r_sample;
484 hctx->ccid3hctx_x = usecs_div(w_init, r_sample); 475 hctx->ccid3hctx_x = scaled_div(w_init << 6, r_sample);
485 hctx->ccid3hctx_t_ld = now; 476 hctx->ccid3hctx_t_ld = now;
486 477
487 ccid3_update_send_time(hctx); 478 ccid3_update_send_time(hctx);
488 479
489 ccid3_pr_debug("%s(%p), s=%u, w_init=%u, " 480 ccid3_pr_debug("%s(%p), s=%u, w_init=%llu, "
490 "R_sample=%ldus, X=%u\n", dccp_role(sk), 481 "R_sample=%dus, X=%u\n", dccp_role(sk),
491 sk, hctx->ccid3hctx_s, w_init, r_sample, 482 sk, hctx->ccid3hctx_s, w_init,
492 hctx->ccid3hctx_x); 483 (int)r_sample,
484 (unsigned)(hctx->ccid3hctx_x >> 6));
493 485
494 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK); 486 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
495 } else { 487 } else {
496 hctx->ccid3hctx_rtt = (9 * hctx->ccid3hctx_rtt + 488 hctx->ccid3hctx_rtt = (9 * hctx->ccid3hctx_rtt +
497 (u32)r_sample ) / 10; 489 (u32)r_sample) / 10;
498 490
491 /* Update sending rate (step 4 of [RFC 3448, 4.3]) */
492 if (hctx->ccid3hctx_p > 0)
493 hctx->ccid3hctx_x_calc =
494 tfrc_calc_x(hctx->ccid3hctx_s,
495 hctx->ccid3hctx_rtt,
496 hctx->ccid3hctx_p);
499 ccid3_hc_tx_update_x(sk, &now); 497 ccid3_hc_tx_update_x(sk, &now);
500 498
501 ccid3_pr_debug("%s(%p), RTT=%uus (sample=%ldus), s=%u, " 499 ccid3_pr_debug("%s(%p), RTT=%uus (sample=%dus), s=%u, "
502 "p=%u, X_calc=%u, X=%u\n", dccp_role(sk), 500 "p=%u, X_calc=%u, X_recv=%u, X=%u\n",
503 sk, hctx->ccid3hctx_rtt, r_sample, 501 dccp_role(sk),
502 sk, hctx->ccid3hctx_rtt, (int)r_sample,
504 hctx->ccid3hctx_s, hctx->ccid3hctx_p, 503 hctx->ccid3hctx_s, hctx->ccid3hctx_p,
505 hctx->ccid3hctx_x_calc, 504 hctx->ccid3hctx_x_calc,
506 hctx->ccid3hctx_x); 505 (unsigned)(hctx->ccid3hctx_x_recv >> 6),
506 (unsigned)(hctx->ccid3hctx_x >> 6));
507 } 507 }
508 508
509 /* unschedule no feedback timer */ 509 /* unschedule no feedback timer */
@@ -513,57 +513,48 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
513 dccp_tx_hist_purge_older(ccid3_tx_hist, 513 dccp_tx_hist_purge_older(ccid3_tx_hist,
514 &hctx->ccid3hctx_hist, packet); 514 &hctx->ccid3hctx_hist, packet);
515 /* 515 /*
516 * As we have calculated new ipi, delta, t_nom it is possible that 516 * As we have calculated new ipi, delta, t_nom it is possible
517 * we now can send a packet, so wake up dccp_wait_for_ccid 517 * that we now can send a packet, so wake up dccp_wait_for_ccid
518 */ 518 */
519 sk->sk_write_space(sk); 519 sk->sk_write_space(sk);
520 520
521 /* 521 /*
522 * Update timeout interval for the nofeedback timer. 522 * Update timeout interval for the nofeedback timer.
523 * We use a configuration option to increase the lower bound. 523 * We use a configuration option to increase the lower bound.
524 * This can help avoid triggering the nofeedback timer too often 524 * This can help avoid triggering the nofeedback timer too
525 * ('spinning') on LANs with small RTTs. 525 * often ('spinning') on LANs with small RTTs.
526 */ 526 */
527 hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt, 527 hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt,
528 CONFIG_IP_DCCP_CCID3_RTO * 528 CONFIG_IP_DCCP_CCID3_RTO *
529 (USEC_PER_SEC/1000) ); 529 (USEC_PER_SEC/1000));
530 /* 530 /*
531 * Schedule no feedback timer to expire in 531 * Schedule no feedback timer to expire in
532 * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi) 532 * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi)
533 */ 533 */
534 t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi); 534 t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi);
535 535
536 ccid3_pr_debug("%s, sk=%p, Scheduled no feedback timer to " 536 ccid3_pr_debug("%s(%p), Scheduled no feedback timer to "
537 "expire in %lu jiffies (%luus)\n", 537 "expire in %lu jiffies (%luus)\n",
538 dccp_role(sk), sk, 538 dccp_role(sk),
539 usecs_to_jiffies(t_nfb), t_nfb); 539 sk, usecs_to_jiffies(t_nfb), t_nfb);
540 540
541 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, 541 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
542 jiffies + usecs_to_jiffies(t_nfb)); 542 jiffies + usecs_to_jiffies(t_nfb));
543 543
544 /* set idle flag */ 544 /* set idle flag */
545 hctx->ccid3hctx_idle = 1; 545 hctx->ccid3hctx_idle = 1;
546 break; 546 break;
547 case TFRC_SSTATE_NO_SENT: 547 case TFRC_SSTATE_NO_SENT:
548 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_CLIENT) 548 /*
549 DCCP_WARN("Illegal ACK received - no packet sent\n"); 549 * XXX when implementing bidirectional rx/tx check this again
550 */
551 DCCP_WARN("Illegal ACK received - no packet sent\n");
550 /* fall through */ 552 /* fall through */
551 case TFRC_SSTATE_TERM: /* ignore feedback when closing */ 553 case TFRC_SSTATE_TERM: /* ignore feedback when closing */
552 break; 554 break;
553 } 555 }
554} 556}
555 557
556static int ccid3_hc_tx_insert_options(struct sock *sk, struct sk_buff *skb)
557{
558 const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
559
560 BUG_ON(hctx == NULL);
561
562 if (sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)
563 DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
564 return 0;
565}
566
567static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, 558static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
568 unsigned char len, u16 idx, 559 unsigned char len, u16 idx,
569 unsigned char *value) 560 unsigned char *value)
@@ -588,13 +579,14 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
588 switch (option) { 579 switch (option) {
589 case TFRC_OPT_LOSS_EVENT_RATE: 580 case TFRC_OPT_LOSS_EVENT_RATE:
590 if (unlikely(len != 4)) { 581 if (unlikely(len != 4)) {
591 DCCP_WARN("%s, sk=%p, invalid len %d " 582 DCCP_WARN("%s(%p), invalid len %d "
592 "for TFRC_OPT_LOSS_EVENT_RATE\n", 583 "for TFRC_OPT_LOSS_EVENT_RATE\n",
593 dccp_role(sk), sk, len); 584 dccp_role(sk), sk, len);
594 rc = -EINVAL; 585 rc = -EINVAL;
595 } else { 586 } else {
596 opt_recv->ccid3or_loss_event_rate = ntohl(*(__be32 *)value); 587 opt_recv->ccid3or_loss_event_rate =
597 ccid3_pr_debug("%s, sk=%p, LOSS_EVENT_RATE=%u\n", 588 ntohl(*(__be32 *)value);
589 ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
598 dccp_role(sk), sk, 590 dccp_role(sk), sk,
599 opt_recv->ccid3or_loss_event_rate); 591 opt_recv->ccid3or_loss_event_rate);
600 } 592 }
@@ -602,20 +594,21 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
602 case TFRC_OPT_LOSS_INTERVALS: 594 case TFRC_OPT_LOSS_INTERVALS:
603 opt_recv->ccid3or_loss_intervals_idx = idx; 595 opt_recv->ccid3or_loss_intervals_idx = idx;
604 opt_recv->ccid3or_loss_intervals_len = len; 596 opt_recv->ccid3or_loss_intervals_len = len;
605 ccid3_pr_debug("%s, sk=%p, LOSS_INTERVALS=(%u, %u)\n", 597 ccid3_pr_debug("%s(%p), LOSS_INTERVALS=(%u, %u)\n",
606 dccp_role(sk), sk, 598 dccp_role(sk), sk,
607 opt_recv->ccid3or_loss_intervals_idx, 599 opt_recv->ccid3or_loss_intervals_idx,
608 opt_recv->ccid3or_loss_intervals_len); 600 opt_recv->ccid3or_loss_intervals_len);
609 break; 601 break;
610 case TFRC_OPT_RECEIVE_RATE: 602 case TFRC_OPT_RECEIVE_RATE:
611 if (unlikely(len != 4)) { 603 if (unlikely(len != 4)) {
612 DCCP_WARN("%s, sk=%p, invalid len %d " 604 DCCP_WARN("%s(%p), invalid len %d "
613 "for TFRC_OPT_RECEIVE_RATE\n", 605 "for TFRC_OPT_RECEIVE_RATE\n",
614 dccp_role(sk), sk, len); 606 dccp_role(sk), sk, len);
615 rc = -EINVAL; 607 rc = -EINVAL;
616 } else { 608 } else {
617 opt_recv->ccid3or_receive_rate = ntohl(*(__be32 *)value); 609 opt_recv->ccid3or_receive_rate =
618 ccid3_pr_debug("%s, sk=%p, RECEIVE_RATE=%u\n", 610 ntohl(*(__be32 *)value);
611 ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n",
619 dccp_role(sk), sk, 612 dccp_role(sk), sk,
620 opt_recv->ccid3or_receive_rate); 613 opt_recv->ccid3or_receive_rate);
621 } 614 }
@@ -630,10 +623,12 @@ static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
630 struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid); 623 struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid);
631 624
632 hctx->ccid3hctx_s = 0; 625 hctx->ccid3hctx_s = 0;
626 hctx->ccid3hctx_rtt = 0;
633 hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; 627 hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT;
634 INIT_LIST_HEAD(&hctx->ccid3hctx_hist); 628 INIT_LIST_HEAD(&hctx->ccid3hctx_hist);
635 629
636 hctx->ccid3hctx_no_feedback_timer.function = ccid3_hc_tx_no_feedback_timer; 630 hctx->ccid3hctx_no_feedback_timer.function =
631 ccid3_hc_tx_no_feedback_timer;
637 hctx->ccid3hctx_no_feedback_timer.data = (unsigned long)sk; 632 hctx->ccid3hctx_no_feedback_timer.data = (unsigned long)sk;
638 init_timer(&hctx->ccid3hctx_no_feedback_timer); 633 init_timer(&hctx->ccid3hctx_no_feedback_timer);
639 634
@@ -698,8 +693,9 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
698 struct dccp_sock *dp = dccp_sk(sk); 693 struct dccp_sock *dp = dccp_sk(sk);
699 struct dccp_rx_hist_entry *packet; 694 struct dccp_rx_hist_entry *packet;
700 struct timeval now; 695 struct timeval now;
696 suseconds_t delta;
701 697
702 ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk); 698 ccid3_pr_debug("%s(%p) - entry \n", dccp_role(sk), sk);
703 699
704 dccp_timestamp(sk, &now); 700 dccp_timestamp(sk, &now);
705 701
@@ -707,21 +703,21 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
707 case TFRC_RSTATE_NO_DATA: 703 case TFRC_RSTATE_NO_DATA:
708 hcrx->ccid3hcrx_x_recv = 0; 704 hcrx->ccid3hcrx_x_recv = 0;
709 break; 705 break;
710 case TFRC_RSTATE_DATA: { 706 case TFRC_RSTATE_DATA:
711 const u32 delta = timeval_delta(&now, 707 delta = timeval_delta(&now,
712 &hcrx->ccid3hcrx_tstamp_last_feedback); 708 &hcrx->ccid3hcrx_tstamp_last_feedback);
713 hcrx->ccid3hcrx_x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, 709 DCCP_BUG_ON(delta < 0);
714 delta); 710 hcrx->ccid3hcrx_x_recv =
715 } 711 scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta);
716 break; 712 break;
717 case TFRC_RSTATE_TERM: 713 case TFRC_RSTATE_TERM:
718 DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk); 714 DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
719 return; 715 return;
720 } 716 }
721 717
722 packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist); 718 packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist);
723 if (unlikely(packet == NULL)) { 719 if (unlikely(packet == NULL)) {
724 DCCP_WARN("%s, sk=%p, no data packet in history!\n", 720 DCCP_WARN("%s(%p), no data packet in history!\n",
725 dccp_role(sk), sk); 721 dccp_role(sk), sk);
726 return; 722 return;
727 } 723 }
@@ -730,13 +726,19 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
730 hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval; 726 hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval;
731 hcrx->ccid3hcrx_bytes_recv = 0; 727 hcrx->ccid3hcrx_bytes_recv = 0;
732 728
733 /* Convert to multiples of 10us */ 729 /* Elapsed time information [RFC 4340, 13.2] in units of 10 * usecs */
734 hcrx->ccid3hcrx_elapsed_time = 730 delta = timeval_delta(&now, &packet->dccphrx_tstamp);
735 timeval_delta(&now, &packet->dccphrx_tstamp) / 10; 731 DCCP_BUG_ON(delta < 0);
732 hcrx->ccid3hcrx_elapsed_time = delta / 10;
733
736 if (hcrx->ccid3hcrx_p == 0) 734 if (hcrx->ccid3hcrx_p == 0)
737 hcrx->ccid3hcrx_pinv = ~0; 735 hcrx->ccid3hcrx_pinv = ~0U; /* see RFC 4342, 8.5 */
738 else 736 else if (hcrx->ccid3hcrx_p > 1000000) {
737 DCCP_WARN("p (%u) > 100%%\n", hcrx->ccid3hcrx_p);
738 hcrx->ccid3hcrx_pinv = 1; /* use 100% in this case */
739 } else
739 hcrx->ccid3hcrx_pinv = 1000000 / hcrx->ccid3hcrx_p; 740 hcrx->ccid3hcrx_pinv = 1000000 / hcrx->ccid3hcrx_p;
741
740 dp->dccps_hc_rx_insert_options = 1; 742 dp->dccps_hc_rx_insert_options = 1;
741 dccp_send_ack(sk); 743 dccp_send_ack(sk);
742} 744}
@@ -764,9 +766,9 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
764 hcrx->ccid3hcrx_elapsed_time)) || 766 hcrx->ccid3hcrx_elapsed_time)) ||
765 dccp_insert_option_timestamp(sk, skb) || 767 dccp_insert_option_timestamp(sk, skb) ||
766 dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE, 768 dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE,
767 &pinv, sizeof(pinv)) || 769 &pinv, sizeof(pinv)) ||
768 dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE, 770 dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE,
769 &x_recv, sizeof(x_recv))) 771 &x_recv, sizeof(x_recv)))
770 return -1; 772 return -1;
771 773
772 return 0; 774 return 0;
@@ -780,12 +782,13 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
780{ 782{
781 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 783 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
782 struct dccp_rx_hist_entry *entry, *next, *tail = NULL; 784 struct dccp_rx_hist_entry *entry, *next, *tail = NULL;
783 u32 rtt, delta, x_recv, fval, p, tmp2; 785 u32 x_recv, p;
786 suseconds_t rtt, delta;
784 struct timeval tstamp = { 0, }; 787 struct timeval tstamp = { 0, };
785 int interval = 0; 788 int interval = 0;
786 int win_count = 0; 789 int win_count = 0;
787 int step = 0; 790 int step = 0;
788 u64 tmp1; 791 u64 fval;
789 792
790 list_for_each_entry_safe(entry, next, &hcrx->ccid3hcrx_hist, 793 list_for_each_entry_safe(entry, next, &hcrx->ccid3hcrx_hist,
791 dccphrx_node) { 794 dccphrx_node) {
@@ -810,13 +813,13 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
810 } 813 }
811 814
812 if (unlikely(step == 0)) { 815 if (unlikely(step == 0)) {
813 DCCP_WARN("%s, sk=%p, packet history has no data packets!\n", 816 DCCP_WARN("%s(%p), packet history has no data packets!\n",
814 dccp_role(sk), sk); 817 dccp_role(sk), sk);
815 return ~0; 818 return ~0;
816 } 819 }
817 820
818 if (unlikely(interval == 0)) { 821 if (unlikely(interval == 0)) {
819 DCCP_WARN("%s, sk=%p, Could not find a win_count interval > 0." 822 DCCP_WARN("%s(%p), Could not find a win_count interval > 0."
820 "Defaulting to 1\n", dccp_role(sk), sk); 823 "Defaulting to 1\n", dccp_role(sk), sk);
821 interval = 1; 824 interval = 1;
822 } 825 }
@@ -825,41 +828,51 @@ found:
825 DCCP_CRIT("tail is null\n"); 828 DCCP_CRIT("tail is null\n");
826 return ~0; 829 return ~0;
827 } 830 }
828 rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval;
829 ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n",
830 dccp_role(sk), sk, rtt);
831 831
832 if (rtt == 0) { 832 delta = timeval_delta(&tstamp, &tail->dccphrx_tstamp);
833 DCCP_WARN("RTT==0, setting to 1\n"); 833 DCCP_BUG_ON(delta < 0);
834 rtt = 1; 834
835 rtt = delta * 4 / interval;
836 ccid3_pr_debug("%s(%p), approximated RTT to %dus\n",
837 dccp_role(sk), sk, (int)rtt);
838
839 /*
840 * Determine the length of the first loss interval via inverse lookup.
841 * Assume that X_recv can be computed by the throughput equation
842 * s
843 * X_recv = --------
844 * R * fval
845 * Find some p such that f(p) = fval; return 1/p [RFC 3448, 6.3.1].
846 */
847 if (rtt == 0) { /* would result in divide-by-zero */
848 DCCP_WARN("RTT==0, returning 1/p = 1\n");
849 return 1000000;
835 } 850 }
836 851
837 dccp_timestamp(sk, &tstamp); 852 dccp_timestamp(sk, &tstamp);
838 delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback); 853 delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback);
839 x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta); 854 DCCP_BUG_ON(delta <= 0);
840 855
841 if (x_recv == 0) 856 x_recv = scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta);
842 x_recv = hcrx->ccid3hcrx_x_recv; 857 if (x_recv == 0) { /* would also trigger divide-by-zero */
843 858 DCCP_WARN("X_recv==0\n");
844 tmp1 = (u64)x_recv * (u64)rtt; 859 if ((x_recv = hcrx->ccid3hcrx_x_recv) == 0) {
845 do_div(tmp1,10000000); 860 DCCP_BUG("stored value of X_recv is zero");
846 tmp2 = (u32)tmp1; 861 return 1000000;
847 862 }
848 if (!tmp2) {
849 DCCP_CRIT("tmp2 = 0, x_recv = %u, rtt =%u\n", x_recv, rtt);
850 return ~0;
851 } 863 }
852 864
853 fval = (hcrx->ccid3hcrx_s * 100000) / tmp2; 865 fval = scaled_div(hcrx->ccid3hcrx_s, rtt);
854 /* do not alter order above or you will get overflow on 32 bit */ 866 fval = scaled_div32(fval, x_recv);
855 p = tfrc_calc_x_reverse_lookup(fval); 867 p = tfrc_calc_x_reverse_lookup(fval);
856 ccid3_pr_debug("%s, sk=%p, receive rate=%u bytes/s, implied " 868
869 ccid3_pr_debug("%s(%p), receive rate=%u bytes/s, implied "
857 "loss rate=%u\n", dccp_role(sk), sk, x_recv, p); 870 "loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
858 871
859 if (p == 0) 872 if (p == 0)
860 return ~0; 873 return ~0;
861 else 874 else
862 return 1000000 / p; 875 return 1000000 / p;
863} 876}
864 877
865static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) 878static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
@@ -913,7 +926,8 @@ static int ccid3_hc_rx_detect_loss(struct sock *sk,
913 struct dccp_rx_hist_entry *packet) 926 struct dccp_rx_hist_entry *packet)
914{ 927{
915 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 928 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
916 struct dccp_rx_hist_entry *rx_hist = dccp_rx_hist_head(&hcrx->ccid3hcrx_hist); 929 struct dccp_rx_hist_entry *rx_hist =
930 dccp_rx_hist_head(&hcrx->ccid3hcrx_hist);
917 u64 seqno = packet->dccphrx_seqno; 931 u64 seqno = packet->dccphrx_seqno;
918 u64 tmp_seqno; 932 u64 tmp_seqno;
919 int loss = 0; 933 int loss = 0;
@@ -941,7 +955,7 @@ static int ccid3_hc_rx_detect_loss(struct sock *sk,
941 dccp_inc_seqno(&tmp_seqno); 955 dccp_inc_seqno(&tmp_seqno);
942 while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist, 956 while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist,
943 tmp_seqno, &ccval)) { 957 tmp_seqno, &ccval)) {
944 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno; 958 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
945 hcrx->ccid3hcrx_ccval_nonloss = ccval; 959 hcrx->ccid3hcrx_ccval_nonloss = ccval;
946 dccp_inc_seqno(&tmp_seqno); 960 dccp_inc_seqno(&tmp_seqno);
947 } 961 }
@@ -967,7 +981,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
967 const struct dccp_options_received *opt_recv; 981 const struct dccp_options_received *opt_recv;
968 struct dccp_rx_hist_entry *packet; 982 struct dccp_rx_hist_entry *packet;
969 struct timeval now; 983 struct timeval now;
970 u32 p_prev, rtt_prev, r_sample, t_elapsed; 984 u32 p_prev, rtt_prev;
985 suseconds_t r_sample, t_elapsed;
971 int loss, payload_size; 986 int loss, payload_size;
972 987
973 BUG_ON(hcrx == NULL); 988 BUG_ON(hcrx == NULL);
@@ -987,11 +1002,13 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
987 r_sample = timeval_usecs(&now); 1002 r_sample = timeval_usecs(&now);
988 t_elapsed = opt_recv->dccpor_elapsed_time * 10; 1003 t_elapsed = opt_recv->dccpor_elapsed_time * 10;
989 1004
1005 DCCP_BUG_ON(r_sample < 0);
990 if (unlikely(r_sample <= t_elapsed)) 1006 if (unlikely(r_sample <= t_elapsed))
991 DCCP_WARN("r_sample=%uus, t_elapsed=%uus\n", 1007 DCCP_WARN("r_sample=%ldus, t_elapsed=%ldus\n",
992 r_sample, t_elapsed); 1008 r_sample, t_elapsed);
993 else 1009 else
994 r_sample -= t_elapsed; 1010 r_sample -= t_elapsed;
1011 CCID3_RTT_SANITY_CHECK(r_sample);
995 1012
996 if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA) 1013 if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
997 hcrx->ccid3hcrx_rtt = r_sample; 1014 hcrx->ccid3hcrx_rtt = r_sample;
@@ -1000,8 +1017,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1000 r_sample / 10; 1017 r_sample / 10;
1001 1018
1002 if (rtt_prev != hcrx->ccid3hcrx_rtt) 1019 if (rtt_prev != hcrx->ccid3hcrx_rtt)
1003 ccid3_pr_debug("%s, New RTT=%uus, elapsed time=%u\n", 1020 ccid3_pr_debug("%s(%p), New RTT=%uus, elapsed time=%u\n",
1004 dccp_role(sk), hcrx->ccid3hcrx_rtt, 1021 dccp_role(sk), sk, hcrx->ccid3hcrx_rtt,
1005 opt_recv->dccpor_elapsed_time); 1022 opt_recv->dccpor_elapsed_time);
1006 break; 1023 break;
1007 case DCCP_PKT_DATA: 1024 case DCCP_PKT_DATA:
@@ -1013,7 +1030,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1013 packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp, 1030 packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp,
1014 skb, GFP_ATOMIC); 1031 skb, GFP_ATOMIC);
1015 if (unlikely(packet == NULL)) { 1032 if (unlikely(packet == NULL)) {
1016 DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet " 1033 DCCP_WARN("%s(%p), Not enough mem to add rx packet "
1017 "to history, consider it lost!\n", dccp_role(sk), sk); 1034 "to history, consider it lost!\n", dccp_role(sk), sk);
1018 return; 1035 return;
1019 } 1036 }
@@ -1028,9 +1045,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1028 1045
1029 switch (hcrx->ccid3hcrx_state) { 1046 switch (hcrx->ccid3hcrx_state) {
1030 case TFRC_RSTATE_NO_DATA: 1047 case TFRC_RSTATE_NO_DATA:
1031 ccid3_pr_debug("%s, sk=%p(%s), skb=%p, sending initial " 1048 ccid3_pr_debug("%s(%p, state=%s), skb=%p, sending initial "
1032 "feedback\n", 1049 "feedback\n", dccp_role(sk), sk,
1033 dccp_role(sk), sk,
1034 dccp_state_name(sk->sk_state), skb); 1050 dccp_state_name(sk->sk_state), skb);
1035 ccid3_hc_rx_send_feedback(sk); 1051 ccid3_hc_rx_send_feedback(sk);
1036 ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA); 1052 ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA);
@@ -1041,19 +1057,19 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1041 break; 1057 break;
1042 1058
1043 dccp_timestamp(sk, &now); 1059 dccp_timestamp(sk, &now);
1044 if (timeval_delta(&now, &hcrx->ccid3hcrx_tstamp_last_ack) >= 1060 if ((timeval_delta(&now, &hcrx->ccid3hcrx_tstamp_last_ack) -
1045 hcrx->ccid3hcrx_rtt) { 1061 (suseconds_t)hcrx->ccid3hcrx_rtt) >= 0) {
1046 hcrx->ccid3hcrx_tstamp_last_ack = now; 1062 hcrx->ccid3hcrx_tstamp_last_ack = now;
1047 ccid3_hc_rx_send_feedback(sk); 1063 ccid3_hc_rx_send_feedback(sk);
1048 } 1064 }
1049 return; 1065 return;
1050 case TFRC_RSTATE_TERM: 1066 case TFRC_RSTATE_TERM:
1051 DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk); 1067 DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
1052 return; 1068 return;
1053 } 1069 }
1054 1070
1055 /* Dealing with packet loss */ 1071 /* Dealing with packet loss */
1056 ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n", 1072 ccid3_pr_debug("%s(%p, state=%s), data loss! Reacting...\n",
1057 dccp_role(sk), sk, dccp_state_name(sk->sk_state)); 1073 dccp_role(sk), sk, dccp_state_name(sk->sk_state));
1058 1074
1059 p_prev = hcrx->ccid3hcrx_p; 1075 p_prev = hcrx->ccid3hcrx_p;
@@ -1078,7 +1094,7 @@ static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
1078{ 1094{
1079 struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid); 1095 struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid);
1080 1096
1081 ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk); 1097 ccid3_pr_debug("entry\n");
1082 1098
1083 hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA; 1099 hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA;
1084 INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist); 1100 INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist);
@@ -1086,7 +1102,7 @@ static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
1086 dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack); 1102 dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack);
1087 hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack; 1103 hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack;
1088 hcrx->ccid3hcrx_s = 0; 1104 hcrx->ccid3hcrx_s = 0;
1089 hcrx->ccid3hcrx_rtt = 5000; /* XXX 5ms for now... */ 1105 hcrx->ccid3hcrx_rtt = 0;
1090 return 0; 1106 return 0;
1091} 1107}
1092 1108
@@ -1115,9 +1131,9 @@ static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
1115 1131
1116 BUG_ON(hcrx == NULL); 1132 BUG_ON(hcrx == NULL);
1117 1133
1118 info->tcpi_ca_state = hcrx->ccid3hcrx_state; 1134 info->tcpi_ca_state = hcrx->ccid3hcrx_state;
1119 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 1135 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
1120 info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt; 1136 info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt;
1121} 1137}
1122 1138
1123static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info) 1139static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
@@ -1198,7 +1214,6 @@ static struct ccid_operations ccid3 = {
1198 .ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet, 1214 .ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet,
1199 .ccid_hc_tx_packet_sent = ccid3_hc_tx_packet_sent, 1215 .ccid_hc_tx_packet_sent = ccid3_hc_tx_packet_sent,
1200 .ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv, 1216 .ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv,
1201 .ccid_hc_tx_insert_options = ccid3_hc_tx_insert_options,
1202 .ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options, 1217 .ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options,
1203 .ccid_hc_rx_obj_size = sizeof(struct ccid3_hc_rx_sock), 1218 .ccid_hc_rx_obj_size = sizeof(struct ccid3_hc_rx_sock),
1204 .ccid_hc_rx_init = ccid3_hc_rx_init, 1219 .ccid_hc_rx_init = ccid3_hc_rx_init,
@@ -1210,7 +1225,7 @@ static struct ccid_operations ccid3 = {
1210 .ccid_hc_rx_getsockopt = ccid3_hc_rx_getsockopt, 1225 .ccid_hc_rx_getsockopt = ccid3_hc_rx_getsockopt,
1211 .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt, 1226 .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt,
1212}; 1227};
1213 1228
1214#ifdef CONFIG_IP_DCCP_CCID3_DEBUG 1229#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
1215module_param(ccid3_debug, int, 0444); 1230module_param(ccid3_debug, int, 0444);
1216MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); 1231MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
@@ -1233,7 +1248,7 @@ static __init int ccid3_module_init(void)
1233 goto out_free_tx; 1248 goto out_free_tx;
1234 1249
1235 rc = ccid_register(&ccid3); 1250 rc = ccid_register(&ccid3);
1236 if (rc != 0) 1251 if (rc != 0)
1237 goto out_free_loss_interval_history; 1252 goto out_free_loss_interval_history;
1238out: 1253out:
1239 return rc; 1254 return rc;
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
index 07596d704ef9..15776a88c090 100644
--- a/net/dccp/ccids/ccid3.h
+++ b/net/dccp/ccids/ccid3.h
@@ -51,6 +51,16 @@
51/* Parameter t_mbi from [RFC 3448, 4.3]: backoff interval in seconds */ 51/* Parameter t_mbi from [RFC 3448, 4.3]: backoff interval in seconds */
52#define TFRC_T_MBI 64 52#define TFRC_T_MBI 64
53 53
54/* What we think is a reasonable upper limit on RTT values */
55#define CCID3_SANE_RTT_MAX ((suseconds_t)(4 * USEC_PER_SEC))
56
57#define CCID3_RTT_SANITY_CHECK(rtt) do { \
58 if (rtt > CCID3_SANE_RTT_MAX) { \
59 DCCP_CRIT("RTT (%d) too large, substituting %d", \
60 (int)rtt, (int)CCID3_SANE_RTT_MAX); \
61 rtt = CCID3_SANE_RTT_MAX; \
62 } } while (0)
63
54enum ccid3_options { 64enum ccid3_options {
55 TFRC_OPT_LOSS_EVENT_RATE = 192, 65 TFRC_OPT_LOSS_EVENT_RATE = 192,
56 TFRC_OPT_LOSS_INTERVALS = 193, 66 TFRC_OPT_LOSS_INTERVALS = 193,
@@ -67,7 +77,7 @@ struct ccid3_options_received {
67 77
68/* TFRC sender states */ 78/* TFRC sender states */
69enum ccid3_hc_tx_states { 79enum ccid3_hc_tx_states {
70 TFRC_SSTATE_NO_SENT = 1, 80 TFRC_SSTATE_NO_SENT = 1,
71 TFRC_SSTATE_NO_FBACK, 81 TFRC_SSTATE_NO_FBACK,
72 TFRC_SSTATE_FBACK, 82 TFRC_SSTATE_FBACK,
73 TFRC_SSTATE_TERM, 83 TFRC_SSTATE_TERM,
@@ -75,23 +85,23 @@ enum ccid3_hc_tx_states {
75 85
76/** struct ccid3_hc_tx_sock - CCID3 sender half-connection socket 86/** struct ccid3_hc_tx_sock - CCID3 sender half-connection socket
77 * 87 *
78 * @ccid3hctx_x - Current sending rate 88 * @ccid3hctx_x - Current sending rate in 64 * bytes per second
79 * @ccid3hctx_x_recv - Receive rate 89 * @ccid3hctx_x_recv - Receive rate in 64 * bytes per second
80 * @ccid3hctx_x_calc - Calculated send rate (RFC 3448, 3.1) 90 * @ccid3hctx_x_calc - Calculated rate in bytes per second
81 * @ccid3hctx_rtt - Estimate of current round trip time in usecs 91 * @ccid3hctx_rtt - Estimate of current round trip time in usecs
82 * @ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000 92 * @ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000
83 * @ccid3hctx_s - Packet size 93 * @ccid3hctx_s - Packet size in bytes
84 * @ccid3hctx_t_rto - Retransmission Timeout (RFC 3448, 3.1) 94 * @ccid3hctx_t_rto - Nofeedback Timer setting in usecs
85 * @ccid3hctx_t_ipi - Interpacket (send) interval (RFC 3448, 4.6) 95 * @ccid3hctx_t_ipi - Interpacket (send) interval (RFC 3448, 4.6) in usecs
86 * @ccid3hctx_state - Sender state, one of %ccid3_hc_tx_states 96 * @ccid3hctx_state - Sender state, one of %ccid3_hc_tx_states
87 * @ccid3hctx_last_win_count - Last window counter sent 97 * @ccid3hctx_last_win_count - Last window counter sent
88 * @ccid3hctx_t_last_win_count - Timestamp of earliest packet 98 * @ccid3hctx_t_last_win_count - Timestamp of earliest packet
89 * with last_win_count value sent 99 * with last_win_count value sent
90 * @ccid3hctx_no_feedback_timer - Handle to no feedback timer 100 * @ccid3hctx_no_feedback_timer - Handle to no feedback timer
91 * @ccid3hctx_idle - Flag indicating that sender is idling 101 * @ccid3hctx_idle - Flag indicating that sender is idling
92 * @ccid3hctx_t_ld - Time last doubled during slow start 102 * @ccid3hctx_t_ld - Time last doubled during slow start
93 * @ccid3hctx_t_nom - Nominal send time of next packet 103 * @ccid3hctx_t_nom - Nominal send time of next packet
94 * @ccid3hctx_delta - Send timer delta 104 * @ccid3hctx_delta - Send timer delta (RFC 3448, 4.6) in usecs
95 * @ccid3hctx_hist - Packet history 105 * @ccid3hctx_hist - Packet history
96 * @ccid3hctx_options_received - Parsed set of retrieved options 106 * @ccid3hctx_options_received - Parsed set of retrieved options
97 */ 107 */
@@ -105,7 +115,7 @@ struct ccid3_hc_tx_sock {
105#define ccid3hctx_t_rto ccid3hctx_tfrc.tfrctx_rto 115#define ccid3hctx_t_rto ccid3hctx_tfrc.tfrctx_rto
106#define ccid3hctx_t_ipi ccid3hctx_tfrc.tfrctx_ipi 116#define ccid3hctx_t_ipi ccid3hctx_tfrc.tfrctx_ipi
107 u16 ccid3hctx_s; 117 u16 ccid3hctx_s;
108 enum ccid3_hc_tx_states ccid3hctx_state:8; 118 enum ccid3_hc_tx_states ccid3hctx_state:8;
109 u8 ccid3hctx_last_win_count; 119 u8 ccid3hctx_last_win_count;
110 u8 ccid3hctx_idle; 120 u8 ccid3hctx_idle;
111 struct timeval ccid3hctx_t_last_win_count; 121 struct timeval ccid3hctx_t_last_win_count;
@@ -119,7 +129,7 @@ struct ccid3_hc_tx_sock {
119 129
120/* TFRC receiver states */ 130/* TFRC receiver states */
121enum ccid3_hc_rx_states { 131enum ccid3_hc_rx_states {
122 TFRC_RSTATE_NO_DATA = 1, 132 TFRC_RSTATE_NO_DATA = 1,
123 TFRC_RSTATE_DATA, 133 TFRC_RSTATE_DATA,
124 TFRC_RSTATE_TERM = 127, 134 TFRC_RSTATE_TERM = 127,
125}; 135};
@@ -147,18 +157,18 @@ struct ccid3_hc_rx_sock {
147#define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv 157#define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv
148#define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt 158#define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt
149#define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p 159#define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p
150 u64 ccid3hcrx_seqno_nonloss:48, 160 u64 ccid3hcrx_seqno_nonloss:48,
151 ccid3hcrx_ccval_nonloss:4, 161 ccid3hcrx_ccval_nonloss:4,
152 ccid3hcrx_ccval_last_counter:4; 162 ccid3hcrx_ccval_last_counter:4;
153 enum ccid3_hc_rx_states ccid3hcrx_state:8; 163 enum ccid3_hc_rx_states ccid3hcrx_state:8;
154 u32 ccid3hcrx_bytes_recv; 164 u32 ccid3hcrx_bytes_recv;
155 struct timeval ccid3hcrx_tstamp_last_feedback; 165 struct timeval ccid3hcrx_tstamp_last_feedback;
156 struct timeval ccid3hcrx_tstamp_last_ack; 166 struct timeval ccid3hcrx_tstamp_last_ack;
157 struct list_head ccid3hcrx_hist; 167 struct list_head ccid3hcrx_hist;
158 struct list_head ccid3hcrx_li_hist; 168 struct list_head ccid3hcrx_li_hist;
159 u16 ccid3hcrx_s; 169 u16 ccid3hcrx_s;
160 u32 ccid3hcrx_pinv; 170 u32 ccid3hcrx_pinv;
161 u32 ccid3hcrx_elapsed_time; 171 u32 ccid3hcrx_elapsed_time;
162}; 172};
163 173
164static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) 174static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index b876c9c81c65..2e8ef42721e2 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -36,9 +36,100 @@
36 36
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/string.h> 38#include <linux/string.h>
39
40#include "packet_history.h" 39#include "packet_history.h"
41 40
41/*
42 * Transmitter History Routines
43 */
44struct dccp_tx_hist *dccp_tx_hist_new(const char *name)
45{
46 struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
47 static const char dccp_tx_hist_mask[] = "tx_hist_%s";
48 char *slab_name;
49
50 if (hist == NULL)
51 goto out;
52
53 slab_name = kmalloc(strlen(name) + sizeof(dccp_tx_hist_mask) - 1,
54 GFP_ATOMIC);
55 if (slab_name == NULL)
56 goto out_free_hist;
57
58 sprintf(slab_name, dccp_tx_hist_mask, name);
59 hist->dccptxh_slab = kmem_cache_create(slab_name,
60 sizeof(struct dccp_tx_hist_entry),
61 0, SLAB_HWCACHE_ALIGN,
62 NULL, NULL);
63 if (hist->dccptxh_slab == NULL)
64 goto out_free_slab_name;
65out:
66 return hist;
67out_free_slab_name:
68 kfree(slab_name);
69out_free_hist:
70 kfree(hist);
71 hist = NULL;
72 goto out;
73}
74
75EXPORT_SYMBOL_GPL(dccp_tx_hist_new);
76
77void dccp_tx_hist_delete(struct dccp_tx_hist *hist)
78{
79 const char* name = kmem_cache_name(hist->dccptxh_slab);
80
81 kmem_cache_destroy(hist->dccptxh_slab);
82 kfree(name);
83 kfree(hist);
84}
85
86EXPORT_SYMBOL_GPL(dccp_tx_hist_delete);
87
88struct dccp_tx_hist_entry *
89 dccp_tx_hist_find_entry(const struct list_head *list, const u64 seq)
90{
91 struct dccp_tx_hist_entry *packet = NULL, *entry;
92
93 list_for_each_entry(entry, list, dccphtx_node)
94 if (entry->dccphtx_seqno == seq) {
95 packet = entry;
96 break;
97 }
98
99 return packet;
100}
101
102EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry);
103
104void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list)
105{
106 struct dccp_tx_hist_entry *entry, *next;
107
108 list_for_each_entry_safe(entry, next, list, dccphtx_node) {
109 list_del_init(&entry->dccphtx_node);
110 dccp_tx_hist_entry_delete(hist, entry);
111 }
112}
113
114EXPORT_SYMBOL_GPL(dccp_tx_hist_purge);
115
116void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
117 struct list_head *list,
118 struct dccp_tx_hist_entry *packet)
119{
120 struct dccp_tx_hist_entry *next;
121
122 list_for_each_entry_safe_continue(packet, next, list, dccphtx_node) {
123 list_del_init(&packet->dccphtx_node);
124 dccp_tx_hist_entry_delete(hist, packet);
125 }
126}
127
128EXPORT_SYMBOL_GPL(dccp_tx_hist_purge_older);
129
130/*
131 * Receiver History Routines
132 */
42struct dccp_rx_hist *dccp_rx_hist_new(const char *name) 133struct dccp_rx_hist *dccp_rx_hist_new(const char *name)
43{ 134{
44 struct dccp_rx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC); 135 struct dccp_rx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
@@ -83,18 +174,24 @@ void dccp_rx_hist_delete(struct dccp_rx_hist *hist)
83 174
84EXPORT_SYMBOL_GPL(dccp_rx_hist_delete); 175EXPORT_SYMBOL_GPL(dccp_rx_hist_delete);
85 176
86void dccp_rx_hist_purge(struct dccp_rx_hist *hist, struct list_head *list) 177int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
178 u8 *ccval)
87{ 179{
88 struct dccp_rx_hist_entry *entry, *next; 180 struct dccp_rx_hist_entry *packet = NULL, *entry;
89 181
90 list_for_each_entry_safe(entry, next, list, dccphrx_node) { 182 list_for_each_entry(entry, list, dccphrx_node)
91 list_del_init(&entry->dccphrx_node); 183 if (entry->dccphrx_seqno == seq) {
92 kmem_cache_free(hist->dccprxh_slab, entry); 184 packet = entry;
93 } 185 break;
94} 186 }
95 187
96EXPORT_SYMBOL_GPL(dccp_rx_hist_purge); 188 if (packet)
189 *ccval = packet->dccphrx_ccval;
97 190
191 return packet != NULL;
192}
193
194EXPORT_SYMBOL_GPL(dccp_rx_hist_find_entry);
98struct dccp_rx_hist_entry * 195struct dccp_rx_hist_entry *
99 dccp_rx_hist_find_data_packet(const struct list_head *list) 196 dccp_rx_hist_find_data_packet(const struct list_head *list)
100{ 197{
@@ -184,110 +281,18 @@ void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
184 281
185EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet); 282EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet);
186 283
187struct dccp_tx_hist *dccp_tx_hist_new(const char *name) 284void dccp_rx_hist_purge(struct dccp_rx_hist *hist, struct list_head *list)
188{
189 struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
190 static const char dccp_tx_hist_mask[] = "tx_hist_%s";
191 char *slab_name;
192
193 if (hist == NULL)
194 goto out;
195
196 slab_name = kmalloc(strlen(name) + sizeof(dccp_tx_hist_mask) - 1,
197 GFP_ATOMIC);
198 if (slab_name == NULL)
199 goto out_free_hist;
200
201 sprintf(slab_name, dccp_tx_hist_mask, name);
202 hist->dccptxh_slab = kmem_cache_create(slab_name,
203 sizeof(struct dccp_tx_hist_entry),
204 0, SLAB_HWCACHE_ALIGN,
205 NULL, NULL);
206 if (hist->dccptxh_slab == NULL)
207 goto out_free_slab_name;
208out:
209 return hist;
210out_free_slab_name:
211 kfree(slab_name);
212out_free_hist:
213 kfree(hist);
214 hist = NULL;
215 goto out;
216}
217
218EXPORT_SYMBOL_GPL(dccp_tx_hist_new);
219
220void dccp_tx_hist_delete(struct dccp_tx_hist *hist)
221{
222 const char* name = kmem_cache_name(hist->dccptxh_slab);
223
224 kmem_cache_destroy(hist->dccptxh_slab);
225 kfree(name);
226 kfree(hist);
227}
228
229EXPORT_SYMBOL_GPL(dccp_tx_hist_delete);
230
231struct dccp_tx_hist_entry *
232 dccp_tx_hist_find_entry(const struct list_head *list, const u64 seq)
233{
234 struct dccp_tx_hist_entry *packet = NULL, *entry;
235
236 list_for_each_entry(entry, list, dccphtx_node)
237 if (entry->dccphtx_seqno == seq) {
238 packet = entry;
239 break;
240 }
241
242 return packet;
243}
244
245EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry);
246
247int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
248 u8 *ccval)
249{
250 struct dccp_rx_hist_entry *packet = NULL, *entry;
251
252 list_for_each_entry(entry, list, dccphrx_node)
253 if (entry->dccphrx_seqno == seq) {
254 packet = entry;
255 break;
256 }
257
258 if (packet)
259 *ccval = packet->dccphrx_ccval;
260
261 return packet != NULL;
262}
263
264EXPORT_SYMBOL_GPL(dccp_rx_hist_find_entry);
265
266void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
267 struct list_head *list,
268 struct dccp_tx_hist_entry *packet)
269{ 285{
270 struct dccp_tx_hist_entry *next; 286 struct dccp_rx_hist_entry *entry, *next;
271 287
272 list_for_each_entry_safe_continue(packet, next, list, dccphtx_node) { 288 list_for_each_entry_safe(entry, next, list, dccphrx_node) {
273 list_del_init(&packet->dccphtx_node); 289 list_del_init(&entry->dccphrx_node);
274 dccp_tx_hist_entry_delete(hist, packet); 290 kmem_cache_free(hist->dccprxh_slab, entry);
275 } 291 }
276} 292}
277 293
278EXPORT_SYMBOL_GPL(dccp_tx_hist_purge_older); 294EXPORT_SYMBOL_GPL(dccp_rx_hist_purge);
279
280void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list)
281{
282 struct dccp_tx_hist_entry *entry, *next;
283
284 list_for_each_entry_safe(entry, next, list, dccphtx_node) {
285 list_del_init(&entry->dccphtx_node);
286 dccp_tx_hist_entry_delete(hist, entry);
287 }
288}
289 295
290EXPORT_SYMBOL_GPL(dccp_tx_hist_purge);
291 296
292MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, " 297MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
293 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>"); 298 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 9a8bcf224aa7..1f960c19ea1b 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -49,43 +49,27 @@
49#define TFRC_WIN_COUNT_PER_RTT 4 49#define TFRC_WIN_COUNT_PER_RTT 4
50#define TFRC_WIN_COUNT_LIMIT 16 50#define TFRC_WIN_COUNT_LIMIT 16
51 51
52/*
53 * Transmitter History data structures and declarations
54 */
52struct dccp_tx_hist_entry { 55struct dccp_tx_hist_entry {
53 struct list_head dccphtx_node; 56 struct list_head dccphtx_node;
54 u64 dccphtx_seqno:48, 57 u64 dccphtx_seqno:48,
55 dccphtx_ccval:4,
56 dccphtx_sent:1; 58 dccphtx_sent:1;
57 u32 dccphtx_rtt; 59 u32 dccphtx_rtt;
58 struct timeval dccphtx_tstamp; 60 struct timeval dccphtx_tstamp;
59}; 61};
60 62
61struct dccp_rx_hist_entry {
62 struct list_head dccphrx_node;
63 u64 dccphrx_seqno:48,
64 dccphrx_ccval:4,
65 dccphrx_type:4;
66 u32 dccphrx_ndp; /* In fact it is from 8 to 24 bits */
67 struct timeval dccphrx_tstamp;
68};
69
70struct dccp_tx_hist { 63struct dccp_tx_hist {
71 struct kmem_cache *dccptxh_slab; 64 struct kmem_cache *dccptxh_slab;
72}; 65};
73 66
74extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name); 67extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name);
75extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist); 68extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist);
76
77struct dccp_rx_hist {
78 struct kmem_cache *dccprxh_slab;
79};
80
81extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name);
82extern void dccp_rx_hist_delete(struct dccp_rx_hist *hist);
83extern struct dccp_rx_hist_entry *
84 dccp_rx_hist_find_data_packet(const struct list_head *list);
85 69
86static inline struct dccp_tx_hist_entry * 70static inline struct dccp_tx_hist_entry *
87 dccp_tx_hist_entry_new(struct dccp_tx_hist *hist, 71 dccp_tx_hist_entry_new(struct dccp_tx_hist *hist,
88 const gfp_t prio) 72 const gfp_t prio)
89{ 73{
90 struct dccp_tx_hist_entry *entry = kmem_cache_alloc(hist->dccptxh_slab, 74 struct dccp_tx_hist_entry *entry = kmem_cache_alloc(hist->dccptxh_slab,
91 prio); 75 prio);
@@ -96,18 +80,20 @@ static inline struct dccp_tx_hist_entry *
96 return entry; 80 return entry;
97} 81}
98 82
99static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist, 83static inline struct dccp_tx_hist_entry *
100 struct dccp_tx_hist_entry *entry) 84 dccp_tx_hist_head(struct list_head *list)
101{ 85{
102 if (entry != NULL) 86 struct dccp_tx_hist_entry *head = NULL;
103 kmem_cache_free(hist->dccptxh_slab, entry); 87
88 if (!list_empty(list))
89 head = list_entry(list->next, struct dccp_tx_hist_entry,
90 dccphtx_node);
91 return head;
104} 92}
105 93
106extern struct dccp_tx_hist_entry * 94extern struct dccp_tx_hist_entry *
107 dccp_tx_hist_find_entry(const struct list_head *list, 95 dccp_tx_hist_find_entry(const struct list_head *list,
108 const u64 seq); 96 const u64 seq);
109extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
110 u8 *ccval);
111 97
112static inline void dccp_tx_hist_add_entry(struct list_head *list, 98static inline void dccp_tx_hist_add_entry(struct list_head *list,
113 struct dccp_tx_hist_entry *entry) 99 struct dccp_tx_hist_entry *entry)
@@ -115,30 +101,45 @@ static inline void dccp_tx_hist_add_entry(struct list_head *list,
115 list_add(&entry->dccphtx_node, list); 101 list_add(&entry->dccphtx_node, list);
116} 102}
117 103
104static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist,
105 struct dccp_tx_hist_entry *entry)
106{
107 if (entry != NULL)
108 kmem_cache_free(hist->dccptxh_slab, entry);
109}
110
111extern void dccp_tx_hist_purge(struct dccp_tx_hist *hist,
112 struct list_head *list);
113
118extern void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist, 114extern void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
119 struct list_head *list, 115 struct list_head *list,
120 struct dccp_tx_hist_entry *next); 116 struct dccp_tx_hist_entry *next);
121 117
122extern void dccp_tx_hist_purge(struct dccp_tx_hist *hist, 118/*
123 struct list_head *list); 119 * Receiver History data structures and declarations
120 */
121struct dccp_rx_hist_entry {
122 struct list_head dccphrx_node;
123 u64 dccphrx_seqno:48,
124 dccphrx_ccval:4,
125 dccphrx_type:4;
126 u32 dccphrx_ndp; /* In fact it is from 8 to 24 bits */
127 struct timeval dccphrx_tstamp;
128};
124 129
125static inline struct dccp_tx_hist_entry * 130struct dccp_rx_hist {
126 dccp_tx_hist_head(struct list_head *list) 131 struct kmem_cache *dccprxh_slab;
127{ 132};
128 struct dccp_tx_hist_entry *head = NULL;
129 133
130 if (!list_empty(list)) 134extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name);
131 head = list_entry(list->next, struct dccp_tx_hist_entry, 135extern void dccp_rx_hist_delete(struct dccp_rx_hist *hist);
132 dccphtx_node);
133 return head;
134}
135 136
136static inline struct dccp_rx_hist_entry * 137static inline struct dccp_rx_hist_entry *
137 dccp_rx_hist_entry_new(struct dccp_rx_hist *hist, 138 dccp_rx_hist_entry_new(struct dccp_rx_hist *hist,
138 const struct sock *sk, 139 const struct sock *sk,
139 const u32 ndp, 140 const u32 ndp,
140 const struct sk_buff *skb, 141 const struct sk_buff *skb,
141 const gfp_t prio) 142 const gfp_t prio)
142{ 143{
143 struct dccp_rx_hist_entry *entry = kmem_cache_alloc(hist->dccprxh_slab, 144 struct dccp_rx_hist_entry *entry = kmem_cache_alloc(hist->dccprxh_slab,
144 prio); 145 prio);
@@ -156,18 +157,8 @@ static inline struct dccp_rx_hist_entry *
156 return entry; 157 return entry;
157} 158}
158 159
159static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist,
160 struct dccp_rx_hist_entry *entry)
161{
162 if (entry != NULL)
163 kmem_cache_free(hist->dccprxh_slab, entry);
164}
165
166extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist,
167 struct list_head *list);
168
169static inline struct dccp_rx_hist_entry * 160static inline struct dccp_rx_hist_entry *
170 dccp_rx_hist_head(struct list_head *list) 161 dccp_rx_hist_head(struct list_head *list)
171{ 162{
172 struct dccp_rx_hist_entry *head = NULL; 163 struct dccp_rx_hist_entry *head = NULL;
173 164
@@ -177,6 +168,27 @@ static inline struct dccp_rx_hist_entry *
177 return head; 168 return head;
178} 169}
179 170
171extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
172 u8 *ccval);
173extern struct dccp_rx_hist_entry *
174 dccp_rx_hist_find_data_packet(const struct list_head *list);
175
176extern void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
177 struct list_head *rx_list,
178 struct list_head *li_list,
179 struct dccp_rx_hist_entry *packet,
180 u64 nonloss_seqno);
181
182static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist,
183 struct dccp_rx_hist_entry *entry)
184{
185 if (entry != NULL)
186 kmem_cache_free(hist->dccprxh_slab, entry);
187}
188
189extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist,
190 struct list_head *list);
191
180static inline int 192static inline int
181 dccp_rx_hist_entry_data_packet(const struct dccp_rx_hist_entry *entry) 193 dccp_rx_hist_entry_data_packet(const struct dccp_rx_hist_entry *entry)
182{ 194{
@@ -184,12 +196,6 @@ static inline int
184 entry->dccphrx_type == DCCP_PKT_DATAACK; 196 entry->dccphrx_type == DCCP_PKT_DATAACK;
185} 197}
186 198
187extern void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
188 struct list_head *rx_list,
189 struct list_head *li_list,
190 struct dccp_rx_hist_entry *packet,
191 u64 nonloss_seqno);
192
193extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list, 199extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list,
194 struct list_head *li_list, u8 *win_loss); 200 struct list_head *li_list, u8 *win_loss);
195 201
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index 45f30f59ea2a..faf5f7e219e3 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -13,8 +13,29 @@
13 * the Free Software Foundation; either version 2 of the License, or 13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 */ 15 */
16
17#include <linux/types.h> 16#include <linux/types.h>
17#include <asm/div64.h>
18
19/* integer-arithmetic divisions of type (a * 1000000)/b */
20static inline u64 scaled_div(u64 a, u32 b)
21{
22 BUG_ON(b==0);
23 a *= 1000000;
24 do_div(a, b);
25 return a;
26}
27
28static inline u32 scaled_div32(u64 a, u32 b)
29{
30 u64 result = scaled_div(a, b);
31
32 if (result > UINT_MAX) {
33 DCCP_CRIT("Overflow: a(%llu)/b(%u) > ~0U",
34 (unsigned long long)a, b);
35 return UINT_MAX;
36 }
37 return result;
38}
18 39
19extern u32 tfrc_calc_x(u16 s, u32 R, u32 p); 40extern u32 tfrc_calc_x(u16 s, u32 R, u32 p);
20extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue); 41extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue);
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index ddac2c511e2f..90009fd77e15 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -13,7 +13,6 @@
13 */ 13 */
14 14
15#include <linux/module.h> 15#include <linux/module.h>
16#include <asm/div64.h>
17#include "../../dccp.h" 16#include "../../dccp.h"
18#include "tfrc.h" 17#include "tfrc.h"
19 18
@@ -616,15 +615,12 @@ static inline u32 tfrc_binsearch(u32 fval, u8 small)
616 * @R: RTT scaled by 1000000 (i.e., microseconds) 615 * @R: RTT scaled by 1000000 (i.e., microseconds)
617 * @p: loss ratio estimate scaled by 1000000 616 * @p: loss ratio estimate scaled by 1000000
618 * Returns X_calc in bytes per second (not scaled). 617 * Returns X_calc in bytes per second (not scaled).
619 *
620 * Note: DO NOT alter this code unless you run test cases against it,
621 * as the code has been optimized to stop underflow/overflow.
622 */ 618 */
623u32 tfrc_calc_x(u16 s, u32 R, u32 p) 619u32 tfrc_calc_x(u16 s, u32 R, u32 p)
624{ 620{
625 int index; 621 u16 index;
626 u32 f; 622 u32 f;
627 u64 tmp1, tmp2; 623 u64 result;
628 624
629 /* check against invalid parameters and divide-by-zero */ 625 /* check against invalid parameters and divide-by-zero */
630 BUG_ON(p > 1000000); /* p must not exceed 100% */ 626 BUG_ON(p > 1000000); /* p must not exceed 100% */
@@ -650,15 +646,17 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
650 f = tfrc_calc_x_lookup[index][0]; 646 f = tfrc_calc_x_lookup[index][0];
651 } 647 }
652 648
653 /* The following computes X = s/(R*f(p)) in bytes per second. Since f(p) 649 /*
654 * and R are both scaled by 1000000, we need to multiply by 1000000^2. 650 * Compute X = s/(R*f(p)) in bytes per second.
655 * ==> DO NOT alter this unless you test against overflow on 32 bit */ 651 * Since f(p) and R are both scaled by 1000000, we need to multiply by
656 tmp1 = ((u64)s * 100000000); 652 * 1000000^2. To avoid overflow, the result is computed in two stages.
657 tmp2 = ((u64)R * (u64)f); 653 * This works under almost all reasonable operational conditions, for a
658 do_div(tmp2, 10000); 654 * wide range of parameters. Yet, should some strange combination of
659 do_div(tmp1, tmp2); 655 * parameters result in overflow, the use of scaled_div32 will catch
660 656 * this and return UINT_MAX - which is a logically adequate consequence.
661 return (u32)tmp1; 657 */
658 result = scaled_div(s, R);
659 return scaled_div32(result, f);
662} 660}
663 661
664EXPORT_SYMBOL_GPL(tfrc_calc_x); 662EXPORT_SYMBOL_GPL(tfrc_calc_x);
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 68886986c8e4..a0900bf98e6b 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -80,8 +80,6 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo);
80 80
81#define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */ 81#define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */
82 82
83#define DCCP_XMIT_TIMEO 30000 /* Time/msecs for blocking transmit per packet */
84
85/* sysctl variables for DCCP */ 83/* sysctl variables for DCCP */
86extern int sysctl_dccp_request_retries; 84extern int sysctl_dccp_request_retries;
87extern int sysctl_dccp_retries1; 85extern int sysctl_dccp_retries1;
@@ -434,6 +432,7 @@ static inline void timeval_sub_usecs(struct timeval *tv,
434 tv->tv_sec--; 432 tv->tv_sec--;
435 tv->tv_usec += USEC_PER_SEC; 433 tv->tv_usec += USEC_PER_SEC;
436 } 434 }
435 DCCP_BUG_ON(tv->tv_sec < 0);
437} 436}
438 437
439#ifdef CONFIG_SYSCTL 438#ifdef CONFIG_SYSCTL
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 4dc487f27a1f..95b6927ec653 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -329,7 +329,7 @@ static void dccp_feat_empty_confirm(struct dccp_minisock *dmsk,
329 switch (type) { 329 switch (type) {
330 case DCCPO_CHANGE_L: opt->dccpop_type = DCCPO_CONFIRM_R; break; 330 case DCCPO_CHANGE_L: opt->dccpop_type = DCCPO_CONFIRM_R; break;
331 case DCCPO_CHANGE_R: opt->dccpop_type = DCCPO_CONFIRM_L; break; 331 case DCCPO_CHANGE_R: opt->dccpop_type = DCCPO_CONFIRM_L; break;
332 default: DCCP_WARN("invalid type %d\n", type); return; 332 default: DCCP_WARN("invalid type %d\n", type); return;
333 333
334 } 334 }
335 opt->dccpop_feat = feature; 335 opt->dccpop_feat = feature;
@@ -427,7 +427,7 @@ int dccp_feat_confirm_recv(struct sock *sk, u8 type, u8 feature,
427 switch (type) { 427 switch (type) {
428 case DCCPO_CONFIRM_L: t = DCCPO_CHANGE_R; break; 428 case DCCPO_CONFIRM_L: t = DCCPO_CHANGE_R; break;
429 case DCCPO_CONFIRM_R: t = DCCPO_CHANGE_L; break; 429 case DCCPO_CONFIRM_R: t = DCCPO_CHANGE_L; break;
430 default: DCCP_WARN("invalid type %d\n", type); 430 default: DCCP_WARN("invalid type %d\n", type);
431 return 1; 431 return 1;
432 432
433 } 433 }
@@ -610,7 +610,7 @@ const char *dccp_feat_typename(const u8 type)
610 case DCCPO_CHANGE_R: return("ChangeR"); 610 case DCCPO_CHANGE_R: return("ChangeR");
611 case DCCPO_CONFIRM_R: return("ConfirmR"); 611 case DCCPO_CONFIRM_R: return("ConfirmR");
612 /* the following case must not appear in feature negotation */ 612 /* the following case must not appear in feature negotation */
613 default: dccp_pr_debug("unknown type %d [BUG!]\n", type); 613 default: dccp_pr_debug("unknown type %d [BUG!]\n", type);
614 } 614 }
615 return NULL; 615 return NULL;
616} 616}
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 7371a2f3acf4..565bc80557ce 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/dccp/input.c 2 * net/dccp/input.c
3 * 3 *
4 * An implementation of the DCCP protocol 4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 * 6 *
@@ -82,7 +82,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
82 * Otherwise, 82 * Otherwise,
83 * Drop packet and return 83 * Drop packet and return
84 */ 84 */
85 if (dh->dccph_type == DCCP_PKT_SYNC || 85 if (dh->dccph_type == DCCP_PKT_SYNC ||
86 dh->dccph_type == DCCP_PKT_SYNCACK) { 86 dh->dccph_type == DCCP_PKT_SYNCACK) {
87 if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, 87 if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
88 dp->dccps_awl, dp->dccps_awh) && 88 dp->dccps_awl, dp->dccps_awh) &&
@@ -185,8 +185,8 @@ static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
185 dccp_rcv_close(sk, skb); 185 dccp_rcv_close(sk, skb);
186 return 0; 186 return 0;
187 case DCCP_PKT_REQUEST: 187 case DCCP_PKT_REQUEST:
188 /* Step 7 188 /* Step 7
189 * or (S.is_server and P.type == Response) 189 * or (S.is_server and P.type == Response)
190 * or (S.is_client and P.type == Request) 190 * or (S.is_client and P.type == Request)
191 * or (S.state >= OPEN and P.type == Request 191 * or (S.state >= OPEN and P.type == Request
192 * and P.seqno >= S.OSR) 192 * and P.seqno >= S.OSR)
@@ -248,8 +248,18 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
248 DCCP_ACKVEC_STATE_RECEIVED)) 248 DCCP_ACKVEC_STATE_RECEIVED))
249 goto discard; 249 goto discard;
250 250
251 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb); 251 /*
252 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb); 252 * Deliver to the CCID module in charge.
253 * FIXME: Currently DCCP operates one-directional only, i.e. a listening
254 * server is not at the same time a connecting client. There is
255 * not much sense in delivering to both rx/tx sides at the moment
256 * (only one is active at a time); when moving to bidirectional
257 * service, this needs to be revised.
258 */
259 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER)
260 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
261 else
262 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
253 263
254 return __dccp_rcv_established(sk, skb, dh, len); 264 return __dccp_rcv_established(sk, skb, dh, len);
255discard: 265discard:
@@ -264,7 +274,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
264 const struct dccp_hdr *dh, 274 const struct dccp_hdr *dh,
265 const unsigned len) 275 const unsigned len)
266{ 276{
267 /* 277 /*
268 * Step 4: Prepare sequence numbers in REQUEST 278 * Step 4: Prepare sequence numbers in REQUEST
269 * If S.state == REQUEST, 279 * If S.state == REQUEST,
270 * If (P.type == Response or P.type == Reset) 280 * If (P.type == Response or P.type == Reset)
@@ -332,7 +342,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
332 * from the Response * / 342 * from the Response * /
333 * S.state := PARTOPEN 343 * S.state := PARTOPEN
334 * Set PARTOPEN timer 344 * Set PARTOPEN timer
335 * Continue with S.state == PARTOPEN 345 * Continue with S.state == PARTOPEN
336 * / * Step 12 will send the Ack completing the 346 * / * Step 12 will send the Ack completing the
337 * three-way handshake * / 347 * three-way handshake * /
338 */ 348 */
@@ -363,7 +373,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
363 */ 373 */
364 __kfree_skb(skb); 374 __kfree_skb(skb);
365 return 0; 375 return 0;
366 } 376 }
367 dccp_send_ack(sk); 377 dccp_send_ack(sk);
368 return -1; 378 return -1;
369 } 379 }
@@ -371,7 +381,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
371out_invalid_packet: 381out_invalid_packet:
372 /* dccp_v4_do_rcv will send a reset */ 382 /* dccp_v4_do_rcv will send a reset */
373 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR; 383 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
374 return 1; 384 return 1;
375} 385}
376 386
377static int dccp_rcv_respond_partopen_state_process(struct sock *sk, 387static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
@@ -478,14 +488,17 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
478 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 488 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
479 dccp_event_ack_recv(sk, skb); 489 dccp_event_ack_recv(sk, skb);
480 490
481 if (dccp_msk(sk)->dccpms_send_ack_vector && 491 if (dccp_msk(sk)->dccpms_send_ack_vector &&
482 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk, 492 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
483 DCCP_SKB_CB(skb)->dccpd_seq, 493 DCCP_SKB_CB(skb)->dccpd_seq,
484 DCCP_ACKVEC_STATE_RECEIVED)) 494 DCCP_ACKVEC_STATE_RECEIVED))
485 goto discard; 495 goto discard;
486 496
487 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb); 497 /* XXX see the comments in dccp_rcv_established about this */
488 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb); 498 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER)
499 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
500 else
501 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
489 } 502 }
490 503
491 /* 504 /*
@@ -567,7 +580,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
567 } 580 }
568 } 581 }
569 582
570 if (!queued) { 583 if (!queued) {
571discard: 584discard:
572 __kfree_skb(skb); 585 __kfree_skb(skb);
573 } 586 }
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index ff81679c9f17..90c74b4adb73 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -157,7 +157,7 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk,
157 /* We don't check in the destentry if pmtu discovery is forbidden 157 /* We don't check in the destentry if pmtu discovery is forbidden
158 * on this route. We just assume that no packet_to_big packets 158 * on this route. We just assume that no packet_to_big packets
159 * are send back when pmtu discovery is not active. 159 * are send back when pmtu discovery is not active.
160 * There is a small race when the user changes this flag in the 160 * There is a small race when the user changes this flag in the
161 * route, but I think that's acceptable. 161 * route, but I think that's acceptable.
162 */ 162 */
163 if ((dst = __sk_dst_check(sk, 0)) == NULL) 163 if ((dst = __sk_dst_check(sk, 0)) == NULL)
@@ -467,7 +467,7 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
467 .uli_u = { .ports = 467 .uli_u = { .ports =
468 { .sport = dccp_hdr(skb)->dccph_dport, 468 { .sport = dccp_hdr(skb)->dccph_dport,
469 .dport = dccp_hdr(skb)->dccph_sport } 469 .dport = dccp_hdr(skb)->dccph_sport }
470 } 470 }
471 }; 471 };
472 472
473 security_skb_classify_flow(skb, &fl); 473 security_skb_classify_flow(skb, &fl);
@@ -595,7 +595,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
595 struct inet_request_sock *ireq; 595 struct inet_request_sock *ireq;
596 struct request_sock *req; 596 struct request_sock *req;
597 struct dccp_request_sock *dreq; 597 struct dccp_request_sock *dreq;
598 const __be32 service = dccp_hdr_request(skb)->dccph_req_service; 598 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
599 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 599 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
600 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY; 600 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
601 601
@@ -609,7 +609,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
609 if (dccp_bad_service_code(sk, service)) { 609 if (dccp_bad_service_code(sk, service)) {
610 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; 610 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
611 goto drop; 611 goto drop;
612 } 612 }
613 /* 613 /*
614 * TW buckets are converted to open requests without 614 * TW buckets are converted to open requests without
615 * limitations, they conserve resources and peer is 615 * limitations, they conserve resources and peer is
@@ -644,7 +644,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
644 ireq->rmt_addr = skb->nh.iph->saddr; 644 ireq->rmt_addr = skb->nh.iph->saddr;
645 ireq->opt = NULL; 645 ireq->opt = NULL;
646 646
647 /* 647 /*
648 * Step 3: Process LISTEN state 648 * Step 3: Process LISTEN state
649 * 649 *
650 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie 650 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
@@ -846,15 +846,15 @@ static int dccp_v4_rcv(struct sk_buff *skb)
846 } 846 }
847 847
848 /* Step 2: 848 /* Step 2:
849 * Look up flow ID in table and get corresponding socket */ 849 * Look up flow ID in table and get corresponding socket */
850 sk = __inet_lookup(&dccp_hashinfo, 850 sk = __inet_lookup(&dccp_hashinfo,
851 skb->nh.iph->saddr, dh->dccph_sport, 851 skb->nh.iph->saddr, dh->dccph_sport,
852 skb->nh.iph->daddr, dh->dccph_dport, 852 skb->nh.iph->daddr, dh->dccph_dport,
853 inet_iif(skb)); 853 inet_iif(skb));
854 854
855 /* 855 /*
856 * Step 2: 856 * Step 2:
857 * If no socket ... 857 * If no socket ...
858 */ 858 */
859 if (sk == NULL) { 859 if (sk == NULL) {
860 dccp_pr_debug("failed to look up flow ID in table and " 860 dccp_pr_debug("failed to look up flow ID in table and "
@@ -862,9 +862,9 @@ static int dccp_v4_rcv(struct sk_buff *skb)
862 goto no_dccp_socket; 862 goto no_dccp_socket;
863 } 863 }
864 864
865 /* 865 /*
866 * Step 2: 866 * Step 2:
867 * ... or S.state == TIMEWAIT, 867 * ... or S.state == TIMEWAIT,
868 * Generate Reset(No Connection) unless P.type == Reset 868 * Generate Reset(No Connection) unless P.type == Reset
869 * Drop packet and return 869 * Drop packet and return
870 */ 870 */
@@ -876,8 +876,8 @@ static int dccp_v4_rcv(struct sk_buff *skb)
876 876
877 /* 877 /*
878 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage 878 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
879 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted 879 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
880 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov 880 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
881 */ 881 */
882 min_cov = dccp_sk(sk)->dccps_pcrlen; 882 min_cov = dccp_sk(sk)->dccps_pcrlen;
883 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) { 883 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
@@ -900,7 +900,7 @@ no_dccp_socket:
900 goto discard_it; 900 goto discard_it;
901 /* 901 /*
902 * Step 2: 902 * Step 2:
903 * If no socket ... 903 * If no socket ...
904 * Generate Reset(No Connection) unless P.type == Reset 904 * Generate Reset(No Connection) unless P.type == Reset
905 * Drop packet and return 905 * Drop packet and return
906 */ 906 */
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index c7aaa2574f52..6b91a9dd0411 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -77,7 +77,7 @@ static inline void dccp_v6_send_check(struct sock *sk, int unused_value,
77} 77}
78 78
79static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, 79static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
80 __be16 sport, __be16 dport ) 80 __be16 sport, __be16 dport )
81{ 81{
82 return secure_tcpv6_sequence_number(saddr, daddr, sport, dport); 82 return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
83} 83}
@@ -329,7 +329,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
329 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header, 329 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header,
330 GFP_ATOMIC); 330 GFP_ATOMIC);
331 if (skb == NULL) 331 if (skb == NULL)
332 return; 332 return;
333 333
334 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header); 334 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
335 335
@@ -353,7 +353,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
353 353
354 dccp_csum_outgoing(skb); 354 dccp_csum_outgoing(skb);
355 dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxskb->nh.ipv6h->saddr, 355 dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxskb->nh.ipv6h->saddr,
356 &rxskb->nh.ipv6h->daddr); 356 &rxskb->nh.ipv6h->daddr);
357 357
358 memset(&fl, 0, sizeof(fl)); 358 memset(&fl, 0, sizeof(fl));
359 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr); 359 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
@@ -424,7 +424,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
424 struct dccp_request_sock *dreq; 424 struct dccp_request_sock *dreq;
425 struct inet6_request_sock *ireq6; 425 struct inet6_request_sock *ireq6;
426 struct ipv6_pinfo *np = inet6_sk(sk); 426 struct ipv6_pinfo *np = inet6_sk(sk);
427 const __be32 service = dccp_hdr_request(skb)->dccph_req_service; 427 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
428 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 428 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
429 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY; 429 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
430 430
@@ -437,7 +437,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
437 if (dccp_bad_service_code(sk, service)) { 437 if (dccp_bad_service_code(sk, service)) {
438 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; 438 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
439 goto drop; 439 goto drop;
440 } 440 }
441 /* 441 /*
442 * There are no SYN attacks on IPv6, yet... 442 * There are no SYN attacks on IPv6, yet...
443 */ 443 */
@@ -787,7 +787,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
787 * otherwise we just shortcircuit this and continue with 787 * otherwise we just shortcircuit this and continue with
788 * the new socket.. 788 * the new socket..
789 */ 789 */
790 if (nsk != sk) { 790 if (nsk != sk) {
791 if (dccp_child_process(sk, nsk, skb)) 791 if (dccp_child_process(sk, nsk, skb))
792 goto reset; 792 goto reset;
793 if (opt_skb != NULL) 793 if (opt_skb != NULL)
@@ -843,14 +843,14 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
843 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); 843 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
844 844
845 /* Step 2: 845 /* Step 2:
846 * Look up flow ID in table and get corresponding socket */ 846 * Look up flow ID in table and get corresponding socket */
847 sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr, 847 sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr,
848 dh->dccph_sport, 848 dh->dccph_sport,
849 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport), 849 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport),
850 inet6_iif(skb)); 850 inet6_iif(skb));
851 /* 851 /*
852 * Step 2: 852 * Step 2:
853 * If no socket ... 853 * If no socket ...
854 */ 854 */
855 if (sk == NULL) { 855 if (sk == NULL) {
856 dccp_pr_debug("failed to look up flow ID in table and " 856 dccp_pr_debug("failed to look up flow ID in table and "
@@ -860,7 +860,7 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
860 860
861 /* 861 /*
862 * Step 2: 862 * Step 2:
863 * ... or S.state == TIMEWAIT, 863 * ... or S.state == TIMEWAIT,
864 * Generate Reset(No Connection) unless P.type == Reset 864 * Generate Reset(No Connection) unless P.type == Reset
865 * Drop packet and return 865 * Drop packet and return
866 */ 866 */
@@ -872,8 +872,8 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
872 872
873 /* 873 /*
874 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage 874 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
875 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted 875 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
876 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov 876 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
877 */ 877 */
878 min_cov = dccp_sk(sk)->dccps_pcrlen; 878 min_cov = dccp_sk(sk)->dccps_pcrlen;
879 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) { 879 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
@@ -893,7 +893,7 @@ no_dccp_socket:
893 goto discard_it; 893 goto discard_it;
894 /* 894 /*
895 * Step 2: 895 * Step 2:
896 * If no socket ... 896 * If no socket ...
897 * Generate Reset(No Connection) unless P.type == Reset 897 * Generate Reset(No Connection) unless P.type == Reset
898 * Drop packet and return 898 * Drop packet and return
899 */ 899 */
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 4c9e26775f72..6656bb497c7b 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -182,7 +182,7 @@ out_free:
182 182
183EXPORT_SYMBOL_GPL(dccp_create_openreq_child); 183EXPORT_SYMBOL_GPL(dccp_create_openreq_child);
184 184
185/* 185/*
186 * Process an incoming packet for RESPOND sockets represented 186 * Process an incoming packet for RESPOND sockets represented
187 * as an request_sock. 187 * as an request_sock.
188 */ 188 */
diff --git a/net/dccp/options.c b/net/dccp/options.c
index f398b43bc055..c03ba61eb6da 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -557,11 +557,6 @@ int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
557 return -1; 557 return -1;
558 dp->dccps_hc_rx_insert_options = 0; 558 dp->dccps_hc_rx_insert_options = 0;
559 } 559 }
560 if (dp->dccps_hc_tx_insert_options) {
561 if (ccid_hc_tx_insert_options(dp->dccps_hc_tx_ccid, sk, skb))
562 return -1;
563 dp->dccps_hc_tx_insert_options = 0;
564 }
565 560
566 /* Feature negotiation */ 561 /* Feature negotiation */
567 /* Data packets can't do feat negotiation */ 562 /* Data packets can't do feat negotiation */
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 400c30b6fcae..824569659083 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/dccp/output.c 2 * net/dccp/output.c
3 * 3 *
4 * An implementation of the DCCP protocol 4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 * 6 *
@@ -175,14 +175,12 @@ void dccp_write_space(struct sock *sk)
175/** 175/**
176 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet 176 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
177 * @sk: socket to wait for 177 * @sk: socket to wait for
178 * @timeo: for how long
179 */ 178 */
180static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, 179static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb)
181 long *timeo)
182{ 180{
183 struct dccp_sock *dp = dccp_sk(sk); 181 struct dccp_sock *dp = dccp_sk(sk);
184 DEFINE_WAIT(wait); 182 DEFINE_WAIT(wait);
185 long delay; 183 unsigned long delay;
186 int rc; 184 int rc;
187 185
188 while (1) { 186 while (1) {
@@ -190,8 +188,6 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
190 188
191 if (sk->sk_err) 189 if (sk->sk_err)
192 goto do_error; 190 goto do_error;
193 if (!*timeo)
194 goto do_nonblock;
195 if (signal_pending(current)) 191 if (signal_pending(current))
196 goto do_interrupted; 192 goto do_interrupted;
197 193
@@ -199,12 +195,9 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
199 if (rc <= 0) 195 if (rc <= 0)
200 break; 196 break;
201 delay = msecs_to_jiffies(rc); 197 delay = msecs_to_jiffies(rc);
202 if (delay > *timeo || delay < 0)
203 goto do_nonblock;
204
205 sk->sk_write_pending++; 198 sk->sk_write_pending++;
206 release_sock(sk); 199 release_sock(sk);
207 *timeo -= schedule_timeout(delay); 200 schedule_timeout(delay);
208 lock_sock(sk); 201 lock_sock(sk);
209 sk->sk_write_pending--; 202 sk->sk_write_pending--;
210 } 203 }
@@ -215,11 +208,8 @@ out:
215do_error: 208do_error:
216 rc = -EPIPE; 209 rc = -EPIPE;
217 goto out; 210 goto out;
218do_nonblock:
219 rc = -EAGAIN;
220 goto out;
221do_interrupted: 211do_interrupted:
222 rc = sock_intr_errno(*timeo); 212 rc = -EINTR;
223 goto out; 213 goto out;
224} 214}
225 215
@@ -240,8 +230,6 @@ void dccp_write_xmit(struct sock *sk, int block)
240{ 230{
241 struct dccp_sock *dp = dccp_sk(sk); 231 struct dccp_sock *dp = dccp_sk(sk);
242 struct sk_buff *skb; 232 struct sk_buff *skb;
243 long timeo = DCCP_XMIT_TIMEO; /* If a packet is taking longer than
244 this we have other issues */
245 233
246 while ((skb = skb_peek(&sk->sk_write_queue))) { 234 while ((skb = skb_peek(&sk->sk_write_queue))) {
247 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); 235 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
@@ -251,11 +239,9 @@ void dccp_write_xmit(struct sock *sk, int block)
251 sk_reset_timer(sk, &dp->dccps_xmit_timer, 239 sk_reset_timer(sk, &dp->dccps_xmit_timer,
252 msecs_to_jiffies(err)+jiffies); 240 msecs_to_jiffies(err)+jiffies);
253 break; 241 break;
254 } else { 242 } else
255 err = dccp_wait_for_ccid(sk, skb, &timeo); 243 err = dccp_wait_for_ccid(sk, skb);
256 timeo = DCCP_XMIT_TIMEO; 244 if (err && err != -EINTR)
257 }
258 if (err)
259 DCCP_BUG("err=%d after dccp_wait_for_ccid", err); 245 DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
260 } 246 }
261 247
@@ -281,8 +267,10 @@ void dccp_write_xmit(struct sock *sk, int block)
281 if (err) 267 if (err)
282 DCCP_BUG("err=%d after ccid_hc_tx_packet_sent", 268 DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
283 err); 269 err);
284 } else 270 } else {
271 dccp_pr_debug("packet discarded\n");
285 kfree(skb); 272 kfree(skb);
273 }
286 } 274 }
287} 275}
288 276
@@ -350,7 +338,6 @@ EXPORT_SYMBOL_GPL(dccp_make_response);
350 338
351static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, 339static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
352 const enum dccp_reset_codes code) 340 const enum dccp_reset_codes code)
353
354{ 341{
355 struct dccp_hdr *dh; 342 struct dccp_hdr *dh;
356 struct dccp_sock *dp = dccp_sk(sk); 343 struct dccp_sock *dp = dccp_sk(sk);
@@ -431,14 +418,14 @@ static inline void dccp_connect_init(struct sock *sk)
431 418
432 dccp_sync_mss(sk, dst_mtu(dst)); 419 dccp_sync_mss(sk, dst_mtu(dst));
433 420
434 /* 421 /*
435 * SWL and AWL are initially adjusted so that they are not less than 422 * SWL and AWL are initially adjusted so that they are not less than
436 * the initial Sequence Numbers received and sent, respectively: 423 * the initial Sequence Numbers received and sent, respectively:
437 * SWL := max(GSR + 1 - floor(W/4), ISR), 424 * SWL := max(GSR + 1 - floor(W/4), ISR),
438 * AWL := max(GSS - W' + 1, ISS). 425 * AWL := max(GSS - W' + 1, ISS).
439 * These adjustments MUST be applied only at the beginning of the 426 * These adjustments MUST be applied only at the beginning of the
440 * connection. 427 * connection.
441 */ 428 */
442 dccp_update_gss(sk, dp->dccps_iss); 429 dccp_update_gss(sk, dp->dccps_iss);
443 dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss)); 430 dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss));
444 431
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 5ec47d9ee447..63b3fa20e14b 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -196,7 +196,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
196 sk, GFP_KERNEL); 196 sk, GFP_KERNEL);
197 dp->dccps_hc_tx_ccid = ccid_hc_tx_new(dmsk->dccpms_tx_ccid, 197 dp->dccps_hc_tx_ccid = ccid_hc_tx_new(dmsk->dccpms_tx_ccid,
198 sk, GFP_KERNEL); 198 sk, GFP_KERNEL);
199 if (unlikely(dp->dccps_hc_rx_ccid == NULL || 199 if (unlikely(dp->dccps_hc_rx_ccid == NULL ||
200 dp->dccps_hc_tx_ccid == NULL)) { 200 dp->dccps_hc_tx_ccid == NULL)) {
201 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); 201 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
202 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); 202 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
@@ -390,7 +390,7 @@ static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
390 struct dccp_sock *dp = dccp_sk(sk); 390 struct dccp_sock *dp = dccp_sk(sk);
391 struct dccp_service_list *sl = NULL; 391 struct dccp_service_list *sl = NULL;
392 392
393 if (service == DCCP_SERVICE_INVALID_VALUE || 393 if (service == DCCP_SERVICE_INVALID_VALUE ||
394 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32)) 394 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
395 return -EINVAL; 395 return -EINVAL;
396 396
@@ -830,7 +830,7 @@ EXPORT_SYMBOL_GPL(inet_dccp_listen);
830static const unsigned char dccp_new_state[] = { 830static const unsigned char dccp_new_state[] = {
831 /* current state: new state: action: */ 831 /* current state: new state: action: */
832 [0] = DCCP_CLOSED, 832 [0] = DCCP_CLOSED,
833 [DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN, 833 [DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
834 [DCCP_REQUESTING] = DCCP_CLOSED, 834 [DCCP_REQUESTING] = DCCP_CLOSED,
835 [DCCP_PARTOPEN] = DCCP_CLOSING | DCCP_ACTION_FIN, 835 [DCCP_PARTOPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
836 [DCCP_LISTEN] = DCCP_CLOSED, 836 [DCCP_LISTEN] = DCCP_CLOSED,
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index e8f519e7f481..e5348f369c60 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/dccp/timer.c 2 * net/dccp/timer.c
3 * 3 *
4 * An implementation of the DCCP protocol 4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 * 6 *
@@ -102,13 +102,13 @@ static void dccp_retransmit_timer(struct sock *sk)
102 * sk->sk_send_head has to have one skb with 102 * sk->sk_send_head has to have one skb with
103 * DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP 103 * DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP
104 * packet types. The only packets eligible for retransmission are: 104 * packet types. The only packets eligible for retransmission are:
105 * -- Requests in client-REQUEST state (sec. 8.1.1) 105 * -- Requests in client-REQUEST state (sec. 8.1.1)
106 * -- Acks in client-PARTOPEN state (sec. 8.1.5) 106 * -- Acks in client-PARTOPEN state (sec. 8.1.5)
107 * -- CloseReq in server-CLOSEREQ state (sec. 8.3) 107 * -- CloseReq in server-CLOSEREQ state (sec. 8.3)
108 * -- Close in node-CLOSING state (sec. 8.3) */ 108 * -- Close in node-CLOSING state (sec. 8.3) */
109 BUG_TRAP(sk->sk_send_head != NULL); 109 BUG_TRAP(sk->sk_send_head != NULL);
110 110
111 /* 111 /*
112 * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was 112 * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was
113 * sent, no need to retransmit, this sock is dead. 113 * sent, no need to retransmit, this sock is dead.
114 */ 114 */
@@ -200,7 +200,7 @@ static void dccp_keepalive_timer(unsigned long data)
200 /* Only process if socket is not in use. */ 200 /* Only process if socket is not in use. */
201 bh_lock_sock(sk); 201 bh_lock_sock(sk);
202 if (sock_owned_by_user(sk)) { 202 if (sock_owned_by_user(sk)) {
203 /* Try again later. */ 203 /* Try again later. */
204 inet_csk_reset_keepalive_timer(sk, HZ / 20); 204 inet_csk_reset_keepalive_timer(sk, HZ / 20);
205 goto out; 205 goto out;
206 } 206 }
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 0b9d4c955154..fc6f3c023a54 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -167,8 +167,7 @@ static int dn_forwarding_proc(ctl_table *, int, struct file *,
167 void __user *, size_t *, loff_t *); 167 void __user *, size_t *, loff_t *);
168static int dn_forwarding_sysctl(ctl_table *table, int __user *name, int nlen, 168static int dn_forwarding_sysctl(ctl_table *table, int __user *name, int nlen,
169 void __user *oldval, size_t __user *oldlenp, 169 void __user *oldval, size_t __user *oldlenp,
170 void __user *newval, size_t newlen, 170 void __user *newval, size_t newlen);
171 void **context);
172 171
173static struct dn_dev_sysctl_table { 172static struct dn_dev_sysctl_table {
174 struct ctl_table_header *sysctl_header; 173 struct ctl_table_header *sysctl_header;
@@ -347,8 +346,7 @@ static int dn_forwarding_proc(ctl_table *table, int write,
347 346
348static int dn_forwarding_sysctl(ctl_table *table, int __user *name, int nlen, 347static int dn_forwarding_sysctl(ctl_table *table, int __user *name, int nlen,
349 void __user *oldval, size_t __user *oldlenp, 348 void __user *oldval, size_t __user *oldlenp,
350 void __user *newval, size_t newlen, 349 void __user *newval, size_t newlen)
351 void **context)
352{ 350{
353#ifdef CONFIG_DECNET_ROUTER 351#ifdef CONFIG_DECNET_ROUTER
354 struct net_device *dev = table->extra1; 352 struct net_device *dev = table->extra1;
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index e246f054f368..a4065eb1341e 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -134,8 +134,7 @@ static int parse_addr(__le16 *addr, char *str)
134 134
135static int dn_node_address_strategy(ctl_table *table, int __user *name, int nlen, 135static int dn_node_address_strategy(ctl_table *table, int __user *name, int nlen,
136 void __user *oldval, size_t __user *oldlenp, 136 void __user *oldval, size_t __user *oldlenp,
137 void __user *newval, size_t newlen, 137 void __user *newval, size_t newlen)
138 void **context)
139{ 138{
140 size_t len; 139 size_t len;
141 __le16 addr; 140 __le16 addr;
@@ -220,8 +219,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
220 219
221static int dn_def_dev_strategy(ctl_table *table, int __user *name, int nlen, 220static int dn_def_dev_strategy(ctl_table *table, int __user *name, int nlen,
222 void __user *oldval, size_t __user *oldlenp, 221 void __user *oldval, size_t __user *oldlenp,
223 void __user *newval, size_t newlen, 222 void __user *newval, size_t newlen)
224 void **context)
225{ 223{
226 size_t len; 224 size_t len;
227 struct net_device *dev; 225 struct net_device *dev;
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index eec1a1dd91da..e3f37fdda65f 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -438,7 +438,7 @@ ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac)
438 438
439 spin_lock_irqsave(&mac->lock, flags); 439 spin_lock_irqsave(&mac->lock, flags);
440 mac->associnfo.associating = 1; 440 mac->associnfo.associating = 1;
441 schedule_work(&mac->associnfo.work); 441 schedule_delayed_work(&mac->associnfo.work, 0);
442 spin_unlock_irqrestore(&mac->lock, flags); 442 spin_unlock_irqrestore(&mac->lock, flags);
443} 443}
444 444
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 2fd899160f85..84bed40273ad 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1303,8 +1303,7 @@ int ipv4_doint_and_flush(ctl_table *ctl, int write,
1303 1303
1304int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen, 1304int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen,
1305 void __user *oldval, size_t __user *oldlenp, 1305 void __user *oldval, size_t __user *oldlenp,
1306 void __user *newval, size_t newlen, 1306 void __user *newval, size_t newlen)
1307 void **context)
1308{ 1307{
1309 int *valp = table->data; 1308 int *valp = table->data;
1310 int new; 1309 int new;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 74046efdf875..8ce00d3703da 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -565,7 +565,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
565 } else { 565 } else {
566 struct sk_buff *free_it = next; 566 struct sk_buff *free_it = next;
567 567
568 /* Old fragmnet is completely overridden with 568 /* Old fragment is completely overridden with
569 * new one drop it. 569 * new one drop it.
570 */ 570 */
571 next = next->next; 571 next = next->next;
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 91a075edd68e..7ea2d981a932 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -657,7 +657,7 @@ static void sync_master_loop(void)
657 if (stop_master_sync) 657 if (stop_master_sync)
658 break; 658 break;
659 659
660 ssleep(1); 660 msleep_interruptible(1000);
661 } 661 }
662 662
663 /* clean up the sync_buff queue */ 663 /* clean up the sync_buff queue */
@@ -714,7 +714,7 @@ static void sync_backup_loop(void)
714 if (stop_backup_sync) 714 if (stop_backup_sync)
715 break; 715 break;
716 716
717 ssleep(1); 717 msleep_interruptible(1000);
718 } 718 }
719 719
720 /* release the sending multicast socket */ 720 /* release the sending multicast socket */
@@ -826,7 +826,7 @@ static int fork_sync_thread(void *startup)
826 if ((pid = kernel_thread(sync_thread, startup, 0)) < 0) { 826 if ((pid = kernel_thread(sync_thread, startup, 0)) < 0) {
827 IP_VS_ERR("could not create sync_thread due to %d... " 827 IP_VS_ERR("could not create sync_thread due to %d... "
828 "retrying.\n", pid); 828 "retrying.\n", pid);
829 ssleep(1); 829 msleep_interruptible(1000);
830 goto repeat; 830 goto repeat;
831 } 831 }
832 832
@@ -849,10 +849,12 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
849 849
850 ip_vs_sync_state |= state; 850 ip_vs_sync_state |= state;
851 if (state == IP_VS_STATE_MASTER) { 851 if (state == IP_VS_STATE_MASTER) {
852 strlcpy(ip_vs_master_mcast_ifn, mcast_ifn, sizeof(ip_vs_master_mcast_ifn)); 852 strlcpy(ip_vs_master_mcast_ifn, mcast_ifn,
853 sizeof(ip_vs_master_mcast_ifn));
853 ip_vs_master_syncid = syncid; 854 ip_vs_master_syncid = syncid;
854 } else { 855 } else {
855 strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn, sizeof(ip_vs_backup_mcast_ifn)); 856 strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn,
857 sizeof(ip_vs_backup_mcast_ifn));
856 ip_vs_backup_syncid = syncid; 858 ip_vs_backup_syncid = syncid;
857 } 859 }
858 860
@@ -860,7 +862,7 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
860 if ((pid = kernel_thread(fork_sync_thread, &startup, 0)) < 0) { 862 if ((pid = kernel_thread(fork_sync_thread, &startup, 0)) < 0) {
861 IP_VS_ERR("could not create fork_sync_thread due to %d... " 863 IP_VS_ERR("could not create fork_sync_thread due to %d... "
862 "retrying.\n", pid); 864 "retrying.\n", pid);
863 ssleep(1); 865 msleep_interruptible(1000);
864 goto repeat; 866 goto repeat;
865 } 867 }
866 868
@@ -880,7 +882,8 @@ int stop_sync_thread(int state)
880 882
881 IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid); 883 IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid);
882 IP_VS_INFO("stopping sync thread %d ...\n", 884 IP_VS_INFO("stopping sync thread %d ...\n",
883 (state == IP_VS_STATE_MASTER) ? sync_master_pid : sync_backup_pid); 885 (state == IP_VS_STATE_MASTER) ?
886 sync_master_pid : sync_backup_pid);
884 887
885 __set_current_state(TASK_UNINTERRUPTIBLE); 888 __set_current_state(TASK_UNINTERRUPTIBLE);
886 add_wait_queue(&stop_sync_wait, &wait); 889 add_wait_queue(&stop_sync_wait, &wait);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 11c167118e87..1aaff0a2e098 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2872,8 +2872,7 @@ static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
2872 void __user *oldval, 2872 void __user *oldval,
2873 size_t __user *oldlenp, 2873 size_t __user *oldlenp,
2874 void __user *newval, 2874 void __user *newval,
2875 size_t newlen, 2875 size_t newlen)
2876 void **context)
2877{ 2876{
2878 int delay; 2877 int delay;
2879 if (newlen != sizeof(int)) 2878 if (newlen != sizeof(int))
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index dfcf47f10f88..fabf69a9108c 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -51,8 +51,7 @@ int ipv4_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
51static int ipv4_sysctl_forward_strategy(ctl_table *table, 51static int ipv4_sysctl_forward_strategy(ctl_table *table,
52 int __user *name, int nlen, 52 int __user *name, int nlen,
53 void __user *oldval, size_t __user *oldlenp, 53 void __user *oldval, size_t __user *oldlenp,
54 void __user *newval, size_t newlen, 54 void __user *newval, size_t newlen)
55 void **context)
56{ 55{
57 int *valp = table->data; 56 int *valp = table->data;
58 int new; 57 int new;
@@ -111,8 +110,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write, struct file *
111static int sysctl_tcp_congestion_control(ctl_table *table, int __user *name, 110static int sysctl_tcp_congestion_control(ctl_table *table, int __user *name,
112 int nlen, void __user *oldval, 111 int nlen, void __user *oldval,
113 size_t __user *oldlenp, 112 size_t __user *oldlenp,
114 void __user *newval, size_t newlen, 113 void __user *newval, size_t newlen)
115 void **context)
116{ 114{
117 char val[TCP_CA_NAME_MAX]; 115 char val[TCP_CA_NAME_MAX];
118 ctl_table tbl = { 116 ctl_table tbl = {
@@ -122,8 +120,7 @@ static int sysctl_tcp_congestion_control(ctl_table *table, int __user *name,
122 int ret; 120 int ret;
123 121
124 tcp_get_default_congestion_control(val); 122 tcp_get_default_congestion_control(val);
125 ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen, 123 ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen);
126 context);
127 if (ret == 0 && newval && newlen) 124 if (ret == 0 && newval && newlen)
128 ret = tcp_set_default_congestion_control(val); 125 ret = tcp_set_default_congestion_control(val);
129 return ret; 126 return ret;
@@ -169,8 +166,8 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
169static int strategy_allowed_congestion_control(ctl_table *table, int __user *name, 166static int strategy_allowed_congestion_control(ctl_table *table, int __user *name,
170 int nlen, void __user *oldval, 167 int nlen, void __user *oldval,
171 size_t __user *oldlenp, 168 size_t __user *oldlenp,
172 void __user *newval, size_t newlen, 169 void __user *newval,
173 void **context) 170 size_t newlen)
174{ 171{
175 ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX }; 172 ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
176 int ret; 173 int ret;
@@ -180,8 +177,7 @@ static int strategy_allowed_congestion_control(ctl_table *table, int __user *nam
180 return -ENOMEM; 177 return -ENOMEM;
181 178
182 tcp_get_available_congestion_control(tbl.data, tbl.maxlen); 179 tcp_get_available_congestion_control(tbl.data, tbl.maxlen);
183 ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen, 180 ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen);
184 context);
185 if (ret == 0 && newval && newlen) 181 if (ret == 0 && newval && newlen)
186 ret = tcp_set_allowed_congestion_control(tbl.data); 182 ret = tcp_set_allowed_congestion_control(tbl.data);
187 kfree(tbl.data); 183 kfree(tbl.data);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a5e8d207a51b..9b0a90643151 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3656,8 +3656,7 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table,
3656 int __user *name, int nlen, 3656 int __user *name, int nlen,
3657 void __user *oldval, 3657 void __user *oldval,
3658 size_t __user *oldlenp, 3658 size_t __user *oldlenp,
3659 void __user *newval, size_t newlen, 3659 void __user *newval, size_t newlen)
3660 void **context)
3661{ 3660{
3662 int *valp = table->data; 3661 int *valp = table->data;
3663 int new; 3662 int new;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 56ea92837307..6a9f616de37d 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1667,8 +1667,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * f
1667static int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl, int __user *name, 1667static int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl, int __user *name,
1668 int nlen, void __user *oldval, 1668 int nlen, void __user *oldval,
1669 size_t __user *oldlenp, 1669 size_t __user *oldlenp,
1670 void __user *newval, size_t newlen, 1670 void __user *newval, size_t newlen)
1671 void **context)
1672{ 1671{
1673 struct net_device *dev = ctl->extra1; 1672 struct net_device *dev = ctl->extra1;
1674 struct inet6_dev *idev; 1673 struct inet6_dev *idev;
@@ -1681,14 +1680,12 @@ static int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl, int __user *name,
1681 switch (ctl->ctl_name) { 1680 switch (ctl->ctl_name) {
1682 case NET_NEIGH_REACHABLE_TIME: 1681 case NET_NEIGH_REACHABLE_TIME:
1683 ret = sysctl_jiffies(ctl, name, nlen, 1682 ret = sysctl_jiffies(ctl, name, nlen,
1684 oldval, oldlenp, newval, newlen, 1683 oldval, oldlenp, newval, newlen);
1685 context);
1686 break; 1684 break;
1687 case NET_NEIGH_RETRANS_TIME_MS: 1685 case NET_NEIGH_RETRANS_TIME_MS:
1688 case NET_NEIGH_REACHABLE_TIME_MS: 1686 case NET_NEIGH_REACHABLE_TIME_MS:
1689 ret = sysctl_ms_jiffies(ctl, name, nlen, 1687 ret = sysctl_ms_jiffies(ctl, name, nlen,
1690 oldval, oldlenp, newval, newlen, 1688 oldval, oldlenp, newval, newlen);
1691 context);
1692 break; 1689 break;
1693 default: 1690 default:
1694 ret = 0; 1691 ret = 0;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 700353b330fd..066c64a97fd8 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -804,19 +804,19 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
804 804
805 integ_len = svc_getnl(&buf->head[0]); 805 integ_len = svc_getnl(&buf->head[0]);
806 if (integ_len & 3) 806 if (integ_len & 3)
807 goto out; 807 return stat;
808 if (integ_len > buf->len) 808 if (integ_len > buf->len)
809 goto out; 809 return stat;
810 if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len)) 810 if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len))
811 BUG(); 811 BUG();
812 /* copy out mic... */ 812 /* copy out mic... */
813 if (read_u32_from_xdr_buf(buf, integ_len, &mic.len)) 813 if (read_u32_from_xdr_buf(buf, integ_len, &mic.len))
814 BUG(); 814 BUG();
815 if (mic.len > RPC_MAX_AUTH_SIZE) 815 if (mic.len > RPC_MAX_AUTH_SIZE)
816 goto out; 816 return stat;
817 mic.data = kmalloc(mic.len, GFP_KERNEL); 817 mic.data = kmalloc(mic.len, GFP_KERNEL);
818 if (!mic.data) 818 if (!mic.data)
819 goto out; 819 return stat;
820 if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len)) 820 if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len))
821 goto out; 821 goto out;
822 maj_stat = gss_verify_mic(ctx, &integ_buf, &mic); 822 maj_stat = gss_verify_mic(ctx, &integ_buf, &mic);
@@ -826,6 +826,7 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
826 goto out; 826 goto out;
827 stat = 0; 827 stat = 0;
828out: 828out:
829 kfree(mic.data);
829 return stat; 830 return stat;
830} 831}
831 832
@@ -1065,7 +1066,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1065 } 1066 }
1066 switch(cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle)) { 1067 switch(cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle)) {
1067 case -EAGAIN: 1068 case -EAGAIN:
1068 goto drop; 1069 case -ETIMEDOUT:
1069 case -ENOENT: 1070 case -ENOENT:
1070 goto drop; 1071 goto drop;
1071 case 0: 1072 case 0:
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 80aff0474572..14274490f92e 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -34,7 +34,7 @@
34 34
35#define RPCDBG_FACILITY RPCDBG_CACHE 35#define RPCDBG_FACILITY RPCDBG_CACHE
36 36
37static void cache_defer_req(struct cache_req *req, struct cache_head *item); 37static int cache_defer_req(struct cache_req *req, struct cache_head *item);
38static void cache_revisit_request(struct cache_head *item); 38static void cache_revisit_request(struct cache_head *item);
39 39
40static void cache_init(struct cache_head *h) 40static void cache_init(struct cache_head *h)
@@ -185,6 +185,7 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h);
185 * 185 *
186 * Returns 0 if the cache_head can be used, or cache_puts it and returns 186 * Returns 0 if the cache_head can be used, or cache_puts it and returns
187 * -EAGAIN if upcall is pending, 187 * -EAGAIN if upcall is pending,
188 * -ETIMEDOUT if upcall failed and should be retried,
188 * -ENOENT if cache entry was negative 189 * -ENOENT if cache entry was negative
189 */ 190 */
190int cache_check(struct cache_detail *detail, 191int cache_check(struct cache_detail *detail,
@@ -236,7 +237,8 @@ int cache_check(struct cache_detail *detail,
236 } 237 }
237 238
238 if (rv == -EAGAIN) 239 if (rv == -EAGAIN)
239 cache_defer_req(rqstp, h); 240 if (cache_defer_req(rqstp, h) != 0)
241 rv = -ETIMEDOUT;
240 242
241 if (rv) 243 if (rv)
242 cache_put(h, detail); 244 cache_put(h, detail);
@@ -523,14 +525,21 @@ static LIST_HEAD(cache_defer_list);
523static struct list_head cache_defer_hash[DFR_HASHSIZE]; 525static struct list_head cache_defer_hash[DFR_HASHSIZE];
524static int cache_defer_cnt; 526static int cache_defer_cnt;
525 527
526static void cache_defer_req(struct cache_req *req, struct cache_head *item) 528static int cache_defer_req(struct cache_req *req, struct cache_head *item)
527{ 529{
528 struct cache_deferred_req *dreq; 530 struct cache_deferred_req *dreq;
529 int hash = DFR_HASH(item); 531 int hash = DFR_HASH(item);
530 532
533 if (cache_defer_cnt >= DFR_MAX) {
534 /* too much in the cache, randomly drop this one,
535 * or continue and drop the oldest below
536 */
537 if (net_random()&1)
538 return -ETIMEDOUT;
539 }
531 dreq = req->defer(req); 540 dreq = req->defer(req);
532 if (dreq == NULL) 541 if (dreq == NULL)
533 return; 542 return -ETIMEDOUT;
534 543
535 dreq->item = item; 544 dreq->item = item;
536 dreq->recv_time = get_seconds(); 545 dreq->recv_time = get_seconds();
@@ -546,17 +555,8 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item)
546 /* it is in, now maybe clean up */ 555 /* it is in, now maybe clean up */
547 dreq = NULL; 556 dreq = NULL;
548 if (++cache_defer_cnt > DFR_MAX) { 557 if (++cache_defer_cnt > DFR_MAX) {
549 /* too much in the cache, randomly drop 558 dreq = list_entry(cache_defer_list.prev,
550 * first or last 559 struct cache_deferred_req, recent);
551 */
552 if (net_random()&1)
553 dreq = list_entry(cache_defer_list.next,
554 struct cache_deferred_req,
555 recent);
556 else
557 dreq = list_entry(cache_defer_list.prev,
558 struct cache_deferred_req,
559 recent);
560 list_del(&dreq->recent); 560 list_del(&dreq->recent);
561 list_del(&dreq->hash); 561 list_del(&dreq->hash);
562 cache_defer_cnt--; 562 cache_defer_cnt--;
@@ -571,6 +571,7 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item)
571 /* must have just been validated... */ 571 /* must have just been validated... */
572 cache_revisit_request(item); 572 cache_revisit_request(item);
573 } 573 }
574 return 0;
574} 575}
575 576
576static void cache_revisit_request(struct cache_head *item) 577static void cache_revisit_request(struct cache_head *item)
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index eb44ec929ca1..f3001f3626f6 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -308,7 +308,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
308 308
309 serv->sv_nrpools = npools; 309 serv->sv_nrpools = npools;
310 serv->sv_pools = 310 serv->sv_pools =
311 kcalloc(sizeof(struct svc_pool), serv->sv_nrpools, 311 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
312 GFP_KERNEL); 312 GFP_KERNEL);
313 if (!serv->sv_pools) { 313 if (!serv->sv_pools) {
314 kfree(serv); 314 kfree(serv);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index a0a953a430c2..0d1e8fb83b93 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -53,6 +53,10 @@ struct auth_domain *unix_domain_find(char *name)
53 return NULL; 53 return NULL;
54 kref_init(&new->h.ref); 54 kref_init(&new->h.ref);
55 new->h.name = kstrdup(name, GFP_KERNEL); 55 new->h.name = kstrdup(name, GFP_KERNEL);
56 if (new->h.name == NULL) {
57 kfree(new);
58 return NULL;
59 }
56 new->h.flavour = &svcauth_unix; 60 new->h.flavour = &svcauth_unix;
57 new->addr_changes = 0; 61 new->addr_changes = 0;
58 rv = auth_domain_lookup(name, &new->h); 62 rv = auth_domain_lookup(name, &new->h);
@@ -435,6 +439,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
435 default: 439 default:
436 BUG(); 440 BUG();
437 case -EAGAIN: 441 case -EAGAIN:
442 case -ETIMEDOUT:
438 return SVC_DROP; 443 return SVC_DROP;
439 case -ENOENT: 444 case -ENOENT:
440 return SVC_DENIED; 445 return SVC_DENIED;
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 458a2c46cef3..baf55c459c8b 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -208,7 +208,7 @@ static void cfg_cmd_event(struct tipc_cmd_msg *msg,
208 208
209 if (mng.link_subscriptions > 64) 209 if (mng.link_subscriptions > 64)
210 break; 210 break;
211 sub = (struct subscr_data *)kmalloc(sizeof(*sub), 211 sub = kmalloc(sizeof(*sub),
212 GFP_ATOMIC); 212 GFP_ATOMIC);
213 if (sub == NULL) { 213 if (sub == NULL) {
214 warn("Memory squeeze; dropped remote link subscription\n"); 214 warn("Memory squeeze; dropped remote link subscription\n");
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 4f5ff19b992b..f01f8c072852 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -56,6 +56,9 @@ endef
56# gcc support functions 56# gcc support functions
57# See documentation in Documentation/kbuild/makefiles.txt 57# See documentation in Documentation/kbuild/makefiles.txt
58 58
59# output directory for tests below
60TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
61
59# as-option 62# as-option
60# Usage: cflags-y += $(call as-option, -Wa$(comma)-isa=foo,) 63# Usage: cflags-y += $(call as-option, -Wa$(comma)-isa=foo,)
61 64
@@ -66,9 +69,11 @@ as-option = $(shell if $(CC) $(CFLAGS) $(1) -Wa,-Z -c -o /dev/null \
66# as-instr 69# as-instr
67# Usage: cflags-y += $(call as-instr, instr, option1, option2) 70# Usage: cflags-y += $(call as-instr, instr, option1, option2)
68 71
69as-instr = $(shell if echo -e "$(1)" | $(AS) >/dev/null 2>&1 -W -Z -o astest$$$$.out ; \ 72as-instr = $(shell if echo -e "$(1)" | \
70 then echo "$(2)"; else echo "$(3)"; fi; \ 73 $(CC) $(AFLAGS) -c -xassembler - \
71 rm -f astest$$$$.out) 74 -o $(TMPOUT)astest$$$$.out > /dev/null 2>&1; \
75 then rm $(TMPOUT)astest$$$$.out; echo "$(2)"; \
76 else echo "$(3)"; fi)
72 77
73# cc-option 78# cc-option
74# Usage: cflags-y += $(call cc-option, -march=winchip-c6, -march=i586) 79# Usage: cflags-y += $(call cc-option, -march=winchip-c6, -march=i586)
@@ -97,10 +102,10 @@ cc-ifversion = $(shell if [ $(call cc-version, $(CC)) $(1) $(2) ]; then \
97 102
98# ld-option 103# ld-option
99# Usage: ldflags += $(call ld-option, -Wl$(comma)--hash-style=both) 104# Usage: ldflags += $(call ld-option, -Wl$(comma)--hash-style=both)
100ld-option = $(shell if $(CC) $(1) \ 105ld-option = $(shell if $(CC) $(1) -nostdlib -xc /dev/null \
101 -nostdlib -o ldtest$$$$.out -xc /dev/null \ 106 -o $(TMPOUT)ldtest$$$$.out > /dev/null 2>&1; \
102 > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi; \ 107 then rm $(TMPOUT)ldtest$$$$.out; echo "$(1)"; \
103 rm -f ldtest$$$$.out) 108 else echo "$(2)"; fi)
104 109
105### 110###
106# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.build obj= 111# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.build obj=
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 4dcb8867b5f4..124b341a18c0 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -600,7 +600,7 @@ int main(int ac, char **av)
600 input_mode = ask_silent; 600 input_mode = ask_silent;
601 valid_stdin = 1; 601 valid_stdin = 1;
602 } 602 }
603 } else if (sym_change_count) { 603 } else if (conf_get_changed()) {
604 name = getenv("KCONFIG_NOSILENTUPDATE"); 604 name = getenv("KCONFIG_NOSILENTUPDATE");
605 if (name && *name) { 605 if (name && *name) {
606 fprintf(stderr, _("\n*** Kernel configuration requires explicit update.\n\n")); 606 fprintf(stderr, _("\n*** Kernel configuration requires explicit update.\n\n"));
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 66b15ef02931..664fe29dacef 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -100,7 +100,7 @@ int conf_read_simple(const char *name, int def)
100 in = zconf_fopen(name); 100 in = zconf_fopen(name);
101 if (in) 101 if (in)
102 goto load; 102 goto load;
103 sym_change_count++; 103 sym_add_change_count(1);
104 if (!sym_defconfig_list) 104 if (!sym_defconfig_list)
105 return 1; 105 return 1;
106 106
@@ -312,7 +312,7 @@ int conf_read(const char *name)
312 struct expr *e; 312 struct expr *e;
313 int i, flags; 313 int i, flags;
314 314
315 sym_change_count = 0; 315 sym_set_change_count(0);
316 316
317 if (conf_read_simple(name, S_DEF_USER)) 317 if (conf_read_simple(name, S_DEF_USER))
318 return 1; 318 return 1;
@@ -364,7 +364,7 @@ int conf_read(const char *name)
364 sym->flags &= flags | ~SYMBOL_DEF_USER; 364 sym->flags &= flags | ~SYMBOL_DEF_USER;
365 } 365 }
366 366
367 sym_change_count += conf_warnings || conf_unsaved; 367 sym_add_change_count(conf_warnings || conf_unsaved);
368 368
369 return 0; 369 return 0;
370} 370}
@@ -432,7 +432,7 @@ int conf_write(const char *name)
432 use_timestamp ? "# " : "", 432 use_timestamp ? "# " : "",
433 use_timestamp ? ctime(&now) : ""); 433 use_timestamp ? ctime(&now) : "");
434 434
435 if (!sym_change_count) 435 if (!conf_get_changed())
436 sym_clear_all_valid(); 436 sym_clear_all_valid();
437 437
438 menu = rootmenu.list; 438 menu = rootmenu.list;
@@ -528,7 +528,7 @@ int conf_write(const char *name)
528 "# configuration written to %s\n" 528 "# configuration written to %s\n"
529 "#\n"), newname); 529 "#\n"), newname);
530 530
531 sym_change_count = 0; 531 sym_set_change_count(0);
532 532
533 return 0; 533 return 0;
534} 534}
@@ -765,3 +765,30 @@ int conf_write_autoconf(void)
765 765
766 return 0; 766 return 0;
767} 767}
768
769static int sym_change_count;
770static void (*conf_changed_callback)(void);
771
772void sym_set_change_count(int count)
773{
774 int _sym_change_count = sym_change_count;
775 sym_change_count = count;
776 if (conf_changed_callback &&
777 (bool)_sym_change_count != (bool)count)
778 conf_changed_callback();
779}
780
781void sym_add_change_count(int count)
782{
783 sym_set_change_count(count + sym_change_count);
784}
785
786bool conf_get_changed(void)
787{
788 return sym_change_count;
789}
790
791void conf_set_changed_callback(void (*fn)(void))
792{
793 conf_changed_callback = fn;
794}
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index 7b0d3a93d5c0..61d8166166ef 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -38,8 +38,6 @@ static gboolean show_all = FALSE;
38static gboolean show_debug = FALSE; 38static gboolean show_debug = FALSE;
39static gboolean resizeable = FALSE; 39static gboolean resizeable = FALSE;
40 40
41static gboolean config_changed = FALSE;
42
43static char nohelp_text[] = 41static char nohelp_text[] =
44 N_("Sorry, no help available for this option yet.\n"); 42 N_("Sorry, no help available for this option yet.\n");
45 43
@@ -50,6 +48,8 @@ GtkWidget *text_w = NULL;
50GtkWidget *hpaned = NULL; 48GtkWidget *hpaned = NULL;
51GtkWidget *vpaned = NULL; 49GtkWidget *vpaned = NULL;
52GtkWidget *back_btn = NULL; 50GtkWidget *back_btn = NULL;
51GtkWidget *save_btn = NULL;
52GtkWidget *save_menu_item = NULL;
53 53
54GtkTextTag *tag1, *tag2; 54GtkTextTag *tag1, *tag2;
55GdkColor color; 55GdkColor color;
@@ -75,7 +75,7 @@ static void display_tree_part(void);
75static void update_tree(struct menu *src, GtkTreeIter * dst); 75static void update_tree(struct menu *src, GtkTreeIter * dst);
76static void set_node(GtkTreeIter * node, struct menu *menu, gchar ** row); 76static void set_node(GtkTreeIter * node, struct menu *menu, gchar ** row);
77static gchar **fill_row(struct menu *menu); 77static gchar **fill_row(struct menu *menu);
78 78static void conf_changed(void);
79 79
80/* Helping/Debugging Functions */ 80/* Helping/Debugging Functions */
81 81
@@ -224,6 +224,10 @@ void init_main_window(const gchar * glade_file)
224 gtk_check_menu_item_set_active((GtkCheckMenuItem *) widget, 224 gtk_check_menu_item_set_active((GtkCheckMenuItem *) widget,
225 show_value); 225 show_value);
226 226
227 save_btn = glade_xml_get_widget(xml, "button3");
228 save_menu_item = glade_xml_get_widget(xml, "save1");
229 conf_set_changed_callback(conf_changed);
230
227 style = gtk_widget_get_style(main_wnd); 231 style = gtk_widget_get_style(main_wnd);
228 widget = glade_xml_get_widget(xml, "toolbar1"); 232 widget = glade_xml_get_widget(xml, "toolbar1");
229 233
@@ -512,14 +516,14 @@ static void text_insert_msg(const char *title, const char *message)
512 516
513/* Main Windows Callbacks */ 517/* Main Windows Callbacks */
514 518
515void on_save1_activate(GtkMenuItem * menuitem, gpointer user_data); 519void on_save_activate(GtkMenuItem * menuitem, gpointer user_data);
516gboolean on_window1_delete_event(GtkWidget * widget, GdkEvent * event, 520gboolean on_window1_delete_event(GtkWidget * widget, GdkEvent * event,
517 gpointer user_data) 521 gpointer user_data)
518{ 522{
519 GtkWidget *dialog, *label; 523 GtkWidget *dialog, *label;
520 gint result; 524 gint result;
521 525
522 if (config_changed == FALSE) 526 if (!conf_get_changed())
523 return FALSE; 527 return FALSE;
524 528
525 dialog = gtk_dialog_new_with_buttons(_("Warning !"), 529 dialog = gtk_dialog_new_with_buttons(_("Warning !"),
@@ -543,7 +547,7 @@ gboolean on_window1_delete_event(GtkWidget * widget, GdkEvent * event,
543 result = gtk_dialog_run(GTK_DIALOG(dialog)); 547 result = gtk_dialog_run(GTK_DIALOG(dialog));
544 switch (result) { 548 switch (result) {
545 case GTK_RESPONSE_YES: 549 case GTK_RESPONSE_YES:
546 on_save1_activate(NULL, NULL); 550 on_save_activate(NULL, NULL);
547 return FALSE; 551 return FALSE;
548 case GTK_RESPONSE_NO: 552 case GTK_RESPONSE_NO:
549 return FALSE; 553 return FALSE;
@@ -621,12 +625,10 @@ void on_load1_activate(GtkMenuItem * menuitem, gpointer user_data)
621} 625}
622 626
623 627
624void on_save1_activate(GtkMenuItem * menuitem, gpointer user_data) 628void on_save_activate(GtkMenuItem * menuitem, gpointer user_data)
625{ 629{
626 if (conf_write(NULL)) 630 if (conf_write(NULL))
627 text_insert_msg(_("Error"), _("Unable to save configuration !")); 631 text_insert_msg(_("Error"), _("Unable to save configuration !"));
628
629 config_changed = FALSE;
630} 632}
631 633
632 634
@@ -819,12 +821,6 @@ void on_load_clicked(GtkButton * button, gpointer user_data)
819} 821}
820 822
821 823
822void on_save_clicked(GtkButton * button, gpointer user_data)
823{
824 on_save1_activate(NULL, user_data);
825}
826
827
828void on_single_clicked(GtkButton * button, gpointer user_data) 824void on_single_clicked(GtkButton * button, gpointer user_data)
829{ 825{
830 view_mode = SINGLE_VIEW; 826 view_mode = SINGLE_VIEW;
@@ -899,7 +895,6 @@ static void renderer_edited(GtkCellRendererText * cell,
899 895
900 sym_set_string_value(sym, new_def); 896 sym_set_string_value(sym, new_def);
901 897
902 config_changed = TRUE;
903 update_tree(&rootmenu, NULL); 898 update_tree(&rootmenu, NULL);
904 899
905 gtk_tree_path_free(path); 900 gtk_tree_path_free(path);
@@ -930,7 +925,6 @@ static void change_sym_value(struct menu *menu, gint col)
930 if (!sym_tristate_within_range(sym, newval)) 925 if (!sym_tristate_within_range(sym, newval))
931 newval = yes; 926 newval = yes;
932 sym_set_tristate_value(sym, newval); 927 sym_set_tristate_value(sym, newval);
933 config_changed = TRUE;
934 if (view_mode == FULL_VIEW) 928 if (view_mode == FULL_VIEW)
935 update_tree(&rootmenu, NULL); 929 update_tree(&rootmenu, NULL);
936 else if (view_mode == SPLIT_VIEW) { 930 else if (view_mode == SPLIT_VIEW) {
@@ -1633,3 +1627,10 @@ int main(int ac, char *av[])
1633 1627
1634 return 0; 1628 return 0;
1635} 1629}
1630
1631static void conf_changed(void)
1632{
1633 bool changed = conf_get_changed();
1634 gtk_widget_set_sensitive(save_btn, changed);
1635 gtk_widget_set_sensitive(save_menu_item, changed);
1636}
diff --git a/scripts/kconfig/gconf.glade b/scripts/kconfig/gconf.glade
index f8744ed64967..803233fdd6dd 100644
--- a/scripts/kconfig/gconf.glade
+++ b/scripts/kconfig/gconf.glade
@@ -70,7 +70,7 @@
70 <property name="tooltip" translatable="yes">Save the config in .config</property> 70 <property name="tooltip" translatable="yes">Save the config in .config</property>
71 <property name="label" translatable="yes">_Save</property> 71 <property name="label" translatable="yes">_Save</property>
72 <property name="use_underline">True</property> 72 <property name="use_underline">True</property>
73 <signal name="activate" handler="on_save1_activate"/> 73 <signal name="activate" handler="on_save_activate"/>
74 <accelerator key="S" modifiers="GDK_CONTROL_MASK" signal="activate"/> 74 <accelerator key="S" modifiers="GDK_CONTROL_MASK" signal="activate"/>
75 75
76 <child internal-child="image"> 76 <child internal-child="image">
@@ -380,7 +380,7 @@
380 <property name="visible_horizontal">True</property> 380 <property name="visible_horizontal">True</property>
381 <property name="visible_vertical">True</property> 381 <property name="visible_vertical">True</property>
382 <property name="is_important">False</property> 382 <property name="is_important">False</property>
383 <signal name="clicked" handler="on_save_clicked"/> 383 <signal name="clicked" handler="on_save_activate"/>
384 </widget> 384 </widget>
385 <packing> 385 <packing>
386 <property name="expand">False</property> 386 <property name="expand">False</property>
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index 2628023a1fe1..9b2706a41548 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -65,6 +65,8 @@ char *zconf_curname(void);
65 65
66/* confdata.c */ 66/* confdata.c */
67char *conf_get_default_confname(void); 67char *conf_get_default_confname(void);
68void sym_set_change_count(int count);
69void sym_add_change_count(int count);
68 70
69/* kconfig_load.c */ 71/* kconfig_load.c */
70void kconfig_load(void); 72void kconfig_load(void);
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h
index a263746cfa7d..15030770d1ad 100644
--- a/scripts/kconfig/lkc_proto.h
+++ b/scripts/kconfig/lkc_proto.h
@@ -5,6 +5,8 @@ P(conf_read,int,(const char *name));
5P(conf_read_simple,int,(const char *name, int)); 5P(conf_read_simple,int,(const char *name, int));
6P(conf_write,int,(const char *name)); 6P(conf_write,int,(const char *name));
7P(conf_write_autoconf,int,(void)); 7P(conf_write_autoconf,int,(void));
8P(conf_get_changed,bool,(void));
9P(conf_set_changed_callback, void,(void (*fn)(void)));
8 10
9/* menu.c */ 11/* menu.c */
10P(rootmenu,struct menu,); 12P(rootmenu,struct menu,);
@@ -16,7 +18,6 @@ P(menu_get_parent_menu,struct menu *,(struct menu *menu));
16 18
17/* symbol.c */ 19/* symbol.c */
18P(symbol_hash,struct symbol *,[SYMBOL_HASHSIZE]); 20P(symbol_hash,struct symbol *,[SYMBOL_HASHSIZE]);
19P(sym_change_count,int,);
20 21
21P(sym_lookup,struct symbol *,(const char *name, int isconst)); 22P(sym_lookup,struct symbol *,(const char *name, int isconst));
22P(sym_find,struct symbol *,(const char *name)); 23P(sym_find,struct symbol *,(const char *name));
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 08a4c7af93ea..3f9a1321b3e6 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -890,14 +890,19 @@ int main(int ac, char **av)
890 do { 890 do {
891 conf(&rootmenu); 891 conf(&rootmenu);
892 dialog_clear(); 892 dialog_clear();
893 res = dialog_yesno(NULL, 893 if (conf_get_changed())
894 _("Do you wish to save your " 894 res = dialog_yesno(NULL,
895 "new kernel configuration?\n" 895 _("Do you wish to save your "
896 "<ESC><ESC> to continue."), 896 "new kernel configuration?\n"
897 6, 60); 897 "<ESC><ESC> to continue."),
898 6, 60);
899 else
900 res = -1;
898 } while (res == KEY_ESC); 901 } while (res == KEY_ESC);
899 end_dialog(); 902 end_dialog();
900 if (res == 0) { 903
904 switch (res) {
905 case 0:
901 if (conf_write(NULL)) { 906 if (conf_write(NULL)) {
902 fprintf(stderr, _("\n\n" 907 fprintf(stderr, _("\n\n"
903 "Error during writing of the kernel configuration.\n" 908 "Error during writing of the kernel configuration.\n"
@@ -905,11 +910,13 @@ int main(int ac, char **av)
905 "\n\n")); 910 "\n\n"));
906 return 1; 911 return 1;
907 } 912 }
913 case -1:
908 printf(_("\n\n" 914 printf(_("\n\n"
909 "*** End of Linux kernel configuration.\n" 915 "*** End of Linux kernel configuration.\n"
910 "*** Execute 'make' to build the kernel or try 'make help'." 916 "*** Execute 'make' to build the kernel or try 'make help'."
911 "\n\n")); 917 "\n\n"));
912 } else { 918 break;
919 default:
913 fprintf(stderr, _("\n\n" 920 fprintf(stderr, _("\n\n"
914 "Your kernel configuration changes were NOT saved." 921 "Your kernel configuration changes were NOT saved."
915 "\n\n")); 922 "\n\n"));
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index f5628c57640b..0b2fcc417f59 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -38,6 +38,8 @@
38static QApplication *configApp; 38static QApplication *configApp;
39static ConfigSettings *configSettings; 39static ConfigSettings *configSettings;
40 40
41QAction *ConfigMainWindow::saveAction;
42
41static inline QString qgettext(const char* str) 43static inline QString qgettext(const char* str)
42{ 44{
43 return QString::fromLocal8Bit(gettext(str)); 45 return QString::fromLocal8Bit(gettext(str));
@@ -1306,8 +1308,11 @@ ConfigMainWindow::ConfigMainWindow(void)
1306 connect(quitAction, SIGNAL(activated()), SLOT(close())); 1308 connect(quitAction, SIGNAL(activated()), SLOT(close()));
1307 QAction *loadAction = new QAction("Load", QPixmap(xpm_load), "&Load", CTRL+Key_L, this); 1309 QAction *loadAction = new QAction("Load", QPixmap(xpm_load), "&Load", CTRL+Key_L, this);
1308 connect(loadAction, SIGNAL(activated()), SLOT(loadConfig())); 1310 connect(loadAction, SIGNAL(activated()), SLOT(loadConfig()));
1309 QAction *saveAction = new QAction("Save", QPixmap(xpm_save), "&Save", CTRL+Key_S, this); 1311 saveAction = new QAction("Save", QPixmap(xpm_save), "&Save", CTRL+Key_S, this);
1310 connect(saveAction, SIGNAL(activated()), SLOT(saveConfig())); 1312 connect(saveAction, SIGNAL(activated()), SLOT(saveConfig()));
1313 conf_set_changed_callback(conf_changed);
1314 // Set saveAction's initial state
1315 conf_changed();
1311 QAction *saveAsAction = new QAction("Save As...", "Save &As...", 0, this); 1316 QAction *saveAsAction = new QAction("Save As...", "Save &As...", 0, this);
1312 connect(saveAsAction, SIGNAL(activated()), SLOT(saveConfigAs())); 1317 connect(saveAsAction, SIGNAL(activated()), SLOT(saveConfigAs()));
1313 QAction *searchAction = new QAction("Search", "&Search", CTRL+Key_F, this); 1318 QAction *searchAction = new QAction("Search", "&Search", CTRL+Key_F, this);
@@ -1585,7 +1590,7 @@ void ConfigMainWindow::showFullView(void)
1585 */ 1590 */
1586void ConfigMainWindow::closeEvent(QCloseEvent* e) 1591void ConfigMainWindow::closeEvent(QCloseEvent* e)
1587{ 1592{
1588 if (!sym_change_count) { 1593 if (!conf_get_changed()) {
1589 e->accept(); 1594 e->accept();
1590 return; 1595 return;
1591 } 1596 }
@@ -1658,6 +1663,12 @@ void ConfigMainWindow::saveSettings(void)
1658 configSettings->writeSizes("/split2", split2->sizes()); 1663 configSettings->writeSizes("/split2", split2->sizes());
1659} 1664}
1660 1665
1666void ConfigMainWindow::conf_changed(void)
1667{
1668 if (saveAction)
1669 saveAction->setEnabled(conf_get_changed());
1670}
1671
1661void fixup_rootmenu(struct menu *menu) 1672void fixup_rootmenu(struct menu *menu)
1662{ 1673{
1663 struct menu *child; 1674 struct menu *child;
diff --git a/scripts/kconfig/qconf.h b/scripts/kconfig/qconf.h
index 6a9e3b14c227..6fc1c5f14425 100644
--- a/scripts/kconfig/qconf.h
+++ b/scripts/kconfig/qconf.h
@@ -297,6 +297,9 @@ protected:
297 297
298class ConfigMainWindow : public QMainWindow { 298class ConfigMainWindow : public QMainWindow {
299 Q_OBJECT 299 Q_OBJECT
300
301 static QAction *saveAction;
302 static void conf_changed(void);
300public: 303public:
301 ConfigMainWindow(void); 304 ConfigMainWindow(void);
302public slots: 305public slots:
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index ee225ced2ce4..8f06c474d800 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -30,7 +30,6 @@ struct symbol symbol_yes = {
30 .flags = SYMBOL_VALID, 30 .flags = SYMBOL_VALID,
31}; 31};
32 32
33int sym_change_count;
34struct symbol *sym_defconfig_list; 33struct symbol *sym_defconfig_list;
35struct symbol *modules_sym; 34struct symbol *modules_sym;
36tristate modules_val; 35tristate modules_val;
@@ -379,7 +378,7 @@ void sym_clear_all_valid(void)
379 378
380 for_all_symbols(i, sym) 379 for_all_symbols(i, sym)
381 sym->flags &= ~SYMBOL_VALID; 380 sym->flags &= ~SYMBOL_VALID;
382 sym_change_count++; 381 sym_add_change_count(1);
383 if (modules_sym) 382 if (modules_sym)
384 sym_calc_value(modules_sym); 383 sym_calc_value(modules_sym);
385} 384}
diff --git a/scripts/kconfig/zconf.tab.c_shipped b/scripts/kconfig/zconf.tab.c_shipped
index 2fb0a4fc61d0..d777fe85627f 100644
--- a/scripts/kconfig/zconf.tab.c_shipped
+++ b/scripts/kconfig/zconf.tab.c_shipped
@@ -2135,7 +2135,7 @@ void conf_parse(const char *name)
2135 sym_check_deps(sym); 2135 sym_check_deps(sym);
2136 } 2136 }
2137 2137
2138 sym_change_count = 1; 2138 sym_set_change_count(1);
2139} 2139}
2140 2140
2141const char *zconf_tokenname(int token) 2141const char *zconf_tokenname(int token)
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index ab44feb3c600..04a5864c03b1 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -504,7 +504,7 @@ void conf_parse(const char *name)
504 sym_check_deps(sym); 504 sym_check_deps(sym);
505 } 505 }
506 506
507 sym_change_count = 1; 507 sym_set_change_count(1);
508} 508}
509 509
510const char *zconf_tokenname(int token) 510const char *zconf_tokenname(int token)
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index ac0a58222992..15ab5d02e80a 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -997,6 +997,7 @@ static int exit_section_ref_ok(const char *name)
997 "__bug_table", /* used by powerpc for BUG() */ 997 "__bug_table", /* used by powerpc for BUG() */
998 ".exitcall.exit", 998 ".exitcall.exit",
999 ".eh_frame", 999 ".eh_frame",
1000 ".parainstructions",
1000 ".stab", 1001 ".stab",
1001 "__ex_table", 1002 "__ex_table",
1002 ".fixup", 1003 ".fixup",
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 3753416eb9b9..65fb5e8ea941 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1734,7 +1734,7 @@ static inline void flush_unauthorized_files(struct files_struct * files)
1734 j++; 1734 j++;
1735 i = j * __NFDBITS; 1735 i = j * __NFDBITS;
1736 fdt = files_fdtable(files); 1736 fdt = files_fdtable(files);
1737 if (i >= fdt->max_fds || i >= fdt->max_fdset) 1737 if (i >= fdt->max_fds)
1738 break; 1738 break;
1739 set = fdt->open_fds->fds_bits[j]; 1739 set = fdt->open_fds->fds_bits[j];
1740 if (!set) 1740 if (!set)
diff --git a/sound/Kconfig b/sound/Kconfig
index 95949b6806ac..9d77300746c6 100644
--- a/sound/Kconfig
+++ b/sound/Kconfig
@@ -93,4 +93,12 @@ endmenu
93 93
94endif 94endif
95 95
96config AC97_BUS
97 tristate
98 help
99 This is used to avoid config and link hard dependencies between the
100 sound subsystem and other function drivers completely unrelated to
101 sound although they're sharing the AC97 bus. Concerned drivers
102 should "select" this.
103
96endmenu 104endmenu
diff --git a/sound/Makefile b/sound/Makefile
index 5f6bef57e825..9aee54c4882d 100644
--- a/sound/Makefile
+++ b/sound/Makefile
@@ -8,6 +8,9 @@ obj-$(CONFIG_DMASOUND) += oss/
8obj-$(CONFIG_SND) += core/ i2c/ drivers/ isa/ pci/ ppc/ arm/ synth/ usb/ sparc/ parisc/ pcmcia/ mips/ 8obj-$(CONFIG_SND) += core/ i2c/ drivers/ isa/ pci/ ppc/ arm/ synth/ usb/ sparc/ parisc/ pcmcia/ mips/
9obj-$(CONFIG_SND_AOA) += aoa/ 9obj-$(CONFIG_SND_AOA) += aoa/
10 10
11# This one must be compilable even if sound is configured out
12obj-$(CONFIG_AC97_BUS) += ac97_bus.o
13
11ifeq ($(CONFIG_SND),y) 14ifeq ($(CONFIG_SND),y)
12 obj-y += last.o 15 obj-y += last.o
13endif 16endif
diff --git a/sound/pci/ac97/ac97_bus.c b/sound/ac97_bus.c
index 66de2c2f1554..66de2c2f1554 100644
--- a/sound/pci/ac97/ac97_bus.c
+++ b/sound/ac97_bus.c
diff --git a/sound/aoa/fabrics/Kconfig b/sound/aoa/fabrics/Kconfig
index c3bc7705c86a..50d7021ff677 100644
--- a/sound/aoa/fabrics/Kconfig
+++ b/sound/aoa/fabrics/Kconfig
@@ -1,6 +1,6 @@
1config SND_AOA_FABRIC_LAYOUT 1config SND_AOA_FABRIC_LAYOUT
2 tristate "layout-id fabric" 2 tristate "layout-id fabric"
3 depends SND_AOA 3 depends on SND_AOA
4 select SND_AOA_SOUNDBUS 4 select SND_AOA_SOUNDBUS
5 select SND_AOA_SOUNDBUS_I2S 5 select SND_AOA_SOUNDBUS_I2S
6 ---help--- 6 ---help---
diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
index f4c67042e3ac..3391f2a9b4d1 100644
--- a/sound/core/oss/mixer_oss.c
+++ b/sound/core/oss/mixer_oss.c
@@ -1023,7 +1023,7 @@ static int snd_mixer_oss_build_input(struct snd_mixer_oss *mixer, struct snd_mix
1023 } 1023 }
1024 up_read(&mixer->card->controls_rwsem); 1024 up_read(&mixer->card->controls_rwsem);
1025 if (slot.present != 0) { 1025 if (slot.present != 0) {
1026 pslot = (struct slot *)kmalloc(sizeof(slot), GFP_KERNEL); 1026 pslot = kmalloc(sizeof(slot), GFP_KERNEL);
1027 if (! pslot) 1027 if (! pslot)
1028 return -ENOMEM; 1028 return -ENOMEM;
1029 *pslot = slot; 1029 *pslot = slot;
diff --git a/sound/drivers/Kconfig b/sound/drivers/Kconfig
index 7971285dfd5b..40ebd2f44056 100644
--- a/sound/drivers/Kconfig
+++ b/sound/drivers/Kconfig
@@ -26,11 +26,7 @@ config SND_VX_LIB
26config SND_AC97_CODEC 26config SND_AC97_CODEC
27 tristate 27 tristate
28 select SND_PCM 28 select SND_PCM
29 select SND_AC97_BUS 29 select AC97_BUS
30
31config SND_AC97_BUS
32 tristate
33
34 30
35config SND_DUMMY 31config SND_DUMMY
36 tristate "Dummy (/dev/null) soundcard" 32 tristate "Dummy (/dev/null) soundcard"
diff --git a/sound/oss/ad1848.c b/sound/oss/ad1848.c
index 0ffa9970bf0f..7cf9913a47b2 100644
--- a/sound/oss/ad1848.c
+++ b/sound/oss/ad1848.c
@@ -1992,7 +1992,7 @@ int ad1848_init (char *name, struct resource *ports, int irq, int dma_playback,
1992 devc->audio_flags |= DMA_DUPLEX; 1992 devc->audio_flags |= DMA_DUPLEX;
1993 } 1993 }
1994 1994
1995 portc = (ad1848_port_info *) kmalloc(sizeof(ad1848_port_info), GFP_KERNEL); 1995 portc = kmalloc(sizeof(ad1848_port_info), GFP_KERNEL);
1996 if(portc==NULL) { 1996 if(portc==NULL) {
1997 release_region(devc->base, 4); 1997 release_region(devc->base, 4);
1998 return -1; 1998 return -1;
diff --git a/sound/oss/cs4232.c b/sound/oss/cs4232.c
index b6924c7f1484..de40e21bf279 100644
--- a/sound/oss/cs4232.c
+++ b/sound/oss/cs4232.c
@@ -408,7 +408,7 @@ static int __init cs4232_pnp_probe(struct pnp_dev *dev, const struct pnp_device_
408{ 408{
409 struct address_info *isapnpcfg; 409 struct address_info *isapnpcfg;
410 410
411 isapnpcfg=(struct address_info*)kmalloc(sizeof(*isapnpcfg),GFP_KERNEL); 411 isapnpcfg = kmalloc(sizeof(*isapnpcfg),GFP_KERNEL);
412 if (!isapnpcfg) 412 if (!isapnpcfg)
413 return -ENOMEM; 413 return -ENOMEM;
414 414
diff --git a/sound/oss/emu10k1/audio.c b/sound/oss/emu10k1/audio.c
index 49f902f35c28..efcf589d7083 100644
--- a/sound/oss/emu10k1/audio.c
+++ b/sound/oss/emu10k1/audio.c
@@ -1139,7 +1139,7 @@ static int emu10k1_audio_open(struct inode *inode, struct file *file)
1139 1139
1140match: 1140match:
1141 1141
1142 wave_dev = (struct emu10k1_wavedevice *) kmalloc(sizeof(struct emu10k1_wavedevice), GFP_KERNEL); 1142 wave_dev = kmalloc(sizeof(struct emu10k1_wavedevice), GFP_KERNEL);
1143 1143
1144 if (wave_dev == NULL) { 1144 if (wave_dev == NULL) {
1145 ERROR(); 1145 ERROR();
@@ -1155,7 +1155,7 @@ match:
1155 /* Recording */ 1155 /* Recording */
1156 struct wiinst *wiinst; 1156 struct wiinst *wiinst;
1157 1157
1158 if ((wiinst = (struct wiinst *) kmalloc(sizeof(struct wiinst), GFP_KERNEL)) == NULL) { 1158 if ((wiinst = kmalloc(sizeof(struct wiinst), GFP_KERNEL)) == NULL) {
1159 ERROR(); 1159 ERROR();
1160 kfree(wave_dev); 1160 kfree(wave_dev);
1161 return -ENOMEM; 1161 return -ENOMEM;
@@ -1211,7 +1211,7 @@ match:
1211 struct woinst *woinst; 1211 struct woinst *woinst;
1212 int i; 1212 int i;
1213 1213
1214 if ((woinst = (struct woinst *) kmalloc(sizeof(struct woinst), GFP_KERNEL)) == NULL) { 1214 if ((woinst = kmalloc(sizeof(struct woinst), GFP_KERNEL)) == NULL) {
1215 ERROR(); 1215 ERROR();
1216 kfree(wave_dev); 1216 kfree(wave_dev);
1217 return -ENOMEM; 1217 return -ENOMEM;
diff --git a/sound/oss/emu10k1/cardmi.c b/sound/oss/emu10k1/cardmi.c
index 0545814cc67d..57674f8c8a2e 100644
--- a/sound/oss/emu10k1/cardmi.c
+++ b/sound/oss/emu10k1/cardmi.c
@@ -157,7 +157,7 @@ int emu10k1_mpuin_add_buffer(struct emu10k1_mpuin *card_mpuin, struct midi_hdr *
157 midihdr->flags |= MIDIBUF_INQUEUE; /* set */ 157 midihdr->flags |= MIDIBUF_INQUEUE; /* set */
158 midihdr->flags &= ~MIDIBUF_DONE; /* clear */ 158 midihdr->flags &= ~MIDIBUF_DONE; /* clear */
159 159
160 if ((midiq = (struct midi_queue *) kmalloc(sizeof(struct midi_queue), GFP_ATOMIC)) == NULL) { 160 if ((midiq = kmalloc(sizeof(struct midi_queue), GFP_ATOMIC)) == NULL) {
161 /* Message lost */ 161 /* Message lost */
162 return -1; 162 return -1;
163 } 163 }
diff --git a/sound/oss/emu10k1/cardmo.c b/sound/oss/emu10k1/cardmo.c
index 5938d31f9e21..a8cc75db3e45 100644
--- a/sound/oss/emu10k1/cardmo.c
+++ b/sound/oss/emu10k1/cardmo.c
@@ -117,7 +117,7 @@ int emu10k1_mpuout_add_buffer(struct emu10k1_card *card, struct midi_hdr *midihd
117 midihdr->flags |= MIDIBUF_INQUEUE; 117 midihdr->flags |= MIDIBUF_INQUEUE;
118 midihdr->flags &= ~MIDIBUF_DONE; 118 midihdr->flags &= ~MIDIBUF_DONE;
119 119
120 if ((midiq = (struct midi_queue *) kmalloc(sizeof(struct midi_queue), GFP_KERNEL)) == NULL) { 120 if ((midiq = kmalloc(sizeof(struct midi_queue), GFP_KERNEL)) == NULL) {
121 /* Message lost */ 121 /* Message lost */
122 return -1; 122 return -1;
123 } 123 }
diff --git a/sound/oss/emu10k1/midi.c b/sound/oss/emu10k1/midi.c
index 8ac77df86397..cca3dad2bdf4 100644
--- a/sound/oss/emu10k1/midi.c
+++ b/sound/oss/emu10k1/midi.c
@@ -58,7 +58,7 @@ static int midiin_add_buffer(struct emu10k1_mididevice *midi_dev, struct midi_hd
58{ 58{
59 struct midi_hdr *midihdr; 59 struct midi_hdr *midihdr;
60 60
61 if ((midihdr = (struct midi_hdr *) kmalloc(sizeof(struct midi_hdr), GFP_KERNEL)) == NULL) { 61 if ((midihdr = kmalloc(sizeof(struct midi_hdr), GFP_KERNEL)) == NULL) {
62 ERROR(); 62 ERROR();
63 return -EINVAL; 63 return -EINVAL;
64 } 64 }
@@ -128,7 +128,7 @@ match:
128 mutex_lock(&card->open_sem); 128 mutex_lock(&card->open_sem);
129 } 129 }
130 130
131 if ((midi_dev = (struct emu10k1_mididevice *) kmalloc(sizeof(*midi_dev), GFP_KERNEL)) == NULL) 131 if ((midi_dev = kmalloc(sizeof(*midi_dev), GFP_KERNEL)) == NULL)
132 return -EINVAL; 132 return -EINVAL;
133 133
134 midi_dev->card = card; 134 midi_dev->card = card;
@@ -328,7 +328,7 @@ static ssize_t emu10k1_midi_write(struct file *file, const char __user *buffer,
328 if (!access_ok(VERIFY_READ, buffer, count)) 328 if (!access_ok(VERIFY_READ, buffer, count))
329 return -EFAULT; 329 return -EFAULT;
330 330
331 if ((midihdr = (struct midi_hdr *) kmalloc(sizeof(struct midi_hdr), GFP_KERNEL)) == NULL) 331 if ((midihdr = kmalloc(sizeof(struct midi_hdr), GFP_KERNEL)) == NULL)
332 return -EINVAL; 332 return -EINVAL;
333 333
334 midihdr->bufferlength = count; 334 midihdr->bufferlength = count;
@@ -490,7 +490,7 @@ int emu10k1_seq_midi_open(int dev, int mode,
490 490
491 DPF(2, "emu10k1_seq_midi_open()\n"); 491 DPF(2, "emu10k1_seq_midi_open()\n");
492 492
493 if ((midi_dev = (struct emu10k1_mididevice *) kmalloc(sizeof(*midi_dev), GFP_KERNEL)) == NULL) 493 if ((midi_dev = kmalloc(sizeof(*midi_dev), GFP_KERNEL)) == NULL)
494 return -EINVAL; 494 return -EINVAL;
495 495
496 midi_dev->card = card; 496 midi_dev->card = card;
@@ -540,7 +540,7 @@ int emu10k1_seq_midi_out(int dev, unsigned char midi_byte)
540 540
541 card = midi_devs[dev]->devc; 541 card = midi_devs[dev]->devc;
542 542
543 if ((midihdr = (struct midi_hdr *) kmalloc(sizeof(struct midi_hdr), GFP_KERNEL)) == NULL) 543 if ((midihdr = kmalloc(sizeof(struct midi_hdr), GFP_KERNEL)) == NULL)
544 return -EINVAL; 544 return -EINVAL;
545 545
546 midihdr->bufferlength = 1; 546 midihdr->bufferlength = 1;
diff --git a/sound/oss/emu10k1/mixer.c b/sound/oss/emu10k1/mixer.c
index cbcaaa34189a..6419796c2ed7 100644
--- a/sound/oss/emu10k1/mixer.c
+++ b/sound/oss/emu10k1/mixer.c
@@ -194,7 +194,7 @@ static int emu10k1_private_mixer(struct emu10k1_card *card, unsigned int cmd, un
194 194
195 case SOUND_MIXER_PRIVATE3: 195 case SOUND_MIXER_PRIVATE3:
196 196
197 ctl = (struct mixer_private_ioctl *) kmalloc(sizeof(struct mixer_private_ioctl), GFP_KERNEL); 197 ctl = kmalloc(sizeof(struct mixer_private_ioctl), GFP_KERNEL);
198 if (ctl == NULL) 198 if (ctl == NULL)
199 return -ENOMEM; 199 return -ENOMEM;
200 200
diff --git a/sound/oss/hal2.c b/sound/oss/hal2.c
index 784bdd707055..d18286ccc14d 100644
--- a/sound/oss/hal2.c
+++ b/sound/oss/hal2.c
@@ -1435,7 +1435,7 @@ static int hal2_init_card(struct hal2_card **phal2, struct hpc3_regs *hpc3)
1435 int ret = 0; 1435 int ret = 0;
1436 struct hal2_card *hal2; 1436 struct hal2_card *hal2;
1437 1437
1438 hal2 = (struct hal2_card *) kmalloc(sizeof(struct hal2_card), GFP_KERNEL); 1438 hal2 = kmalloc(sizeof(struct hal2_card), GFP_KERNEL);
1439 if (!hal2) 1439 if (!hal2)
1440 return -ENOMEM; 1440 return -ENOMEM;
1441 memset(hal2, 0, sizeof(struct hal2_card)); 1441 memset(hal2, 0, sizeof(struct hal2_card));
diff --git a/sound/oss/mpu401.c b/sound/oss/mpu401.c
index e96220541971..2796c0ef985f 100644
--- a/sound/oss/mpu401.c
+++ b/sound/oss/mpu401.c
@@ -1023,7 +1023,7 @@ int attach_mpu401(struct address_info *hw_config, struct module *owner)
1023 devc->capabilities |= MPU_CAP_INTLG; /* Supports intelligent mode */ 1023 devc->capabilities |= MPU_CAP_INTLG; /* Supports intelligent mode */
1024 1024
1025 1025
1026 mpu401_synth_operations[m] = (struct synth_operations *)kmalloc(sizeof(struct synth_operations), GFP_KERNEL); 1026 mpu401_synth_operations[m] = kmalloc(sizeof(struct synth_operations), GFP_KERNEL);
1027 1027
1028 if (mpu401_synth_operations[m] == NULL) 1028 if (mpu401_synth_operations[m] == NULL)
1029 { 1029 {
diff --git a/sound/oss/opl3.c b/sound/oss/opl3.c
index 4799bc77f987..2e8cfa5481f2 100644
--- a/sound/oss/opl3.c
+++ b/sound/oss/opl3.c
@@ -166,7 +166,7 @@ int opl3_detect(int ioaddr, int *osp)
166 return 0; 166 return 0;
167 } 167 }
168 168
169 devc = (struct opl_devinfo *)kmalloc(sizeof(*devc), GFP_KERNEL); 169 devc = kmalloc(sizeof(*devc), GFP_KERNEL);
170 170
171 if (devc == NULL) 171 if (devc == NULL)
172 { 172 {
diff --git a/sound/oss/sb_common.c b/sound/oss/sb_common.c
index 440537c72604..07cbacf63824 100644
--- a/sound/oss/sb_common.c
+++ b/sound/oss/sb_common.c
@@ -625,7 +625,7 @@ int sb_dsp_detect(struct address_info *hw_config, int pci, int pciio, struct sb_
625 */ 625 */
626 626
627 627
628 detected_devc = (sb_devc *)kmalloc(sizeof(sb_devc), GFP_KERNEL); 628 detected_devc = kmalloc(sizeof(sb_devc), GFP_KERNEL);
629 if (detected_devc == NULL) 629 if (detected_devc == NULL)
630 { 630 {
631 printk(KERN_ERR "sb: Can't allocate memory for device information\n"); 631 printk(KERN_ERR "sb: Can't allocate memory for device information\n");
diff --git a/sound/oss/sb_midi.c b/sound/oss/sb_midi.c
index 2e3bc045caba..8b796704e112 100644
--- a/sound/oss/sb_midi.c
+++ b/sound/oss/sb_midi.c
@@ -173,7 +173,7 @@ void sb_dsp_midi_init(sb_devc * devc, struct module *owner)
173 return; 173 return;
174 } 174 }
175 std_midi_synth.midi_dev = devc->my_mididev = dev; 175 std_midi_synth.midi_dev = devc->my_mididev = dev;
176 midi_devs[dev] = (struct midi_operations *)kmalloc(sizeof(struct midi_operations), GFP_KERNEL); 176 midi_devs[dev] = kmalloc(sizeof(struct midi_operations), GFP_KERNEL);
177 if (midi_devs[dev] == NULL) 177 if (midi_devs[dev] == NULL)
178 { 178 {
179 printk(KERN_WARNING "Sound Blaster: failed to allocate MIDI memory.\n"); 179 printk(KERN_WARNING "Sound Blaster: failed to allocate MIDI memory.\n");
@@ -189,7 +189,7 @@ void sb_dsp_midi_init(sb_devc * devc, struct module *owner)
189 midi_devs[dev]->devc = devc; 189 midi_devs[dev]->devc = devc;
190 190
191 191
192 midi_devs[dev]->converter = (struct synth_operations *)kmalloc(sizeof(struct synth_operations), GFP_KERNEL); 192 midi_devs[dev]->converter = kmalloc(sizeof(struct synth_operations), GFP_KERNEL);
193 if (midi_devs[dev]->converter == NULL) 193 if (midi_devs[dev]->converter == NULL)
194 { 194 {
195 printk(KERN_WARNING "Sound Blaster: failed to allocate MIDI memory.\n"); 195 printk(KERN_WARNING "Sound Blaster: failed to allocate MIDI memory.\n");
diff --git a/sound/oss/sb_mixer.c b/sound/oss/sb_mixer.c
index 238e2cf44b08..fad1a4f25ad6 100644
--- a/sound/oss/sb_mixer.c
+++ b/sound/oss/sb_mixer.c
@@ -734,7 +734,7 @@ int sb_mixer_init(sb_devc * devc, struct module *owner)
734 if (m == -1) 734 if (m == -1)
735 return 0; 735 return 0;
736 736
737 mixer_devs[m] = (struct mixer_operations *)kmalloc(sizeof(struct mixer_operations), GFP_KERNEL); 737 mixer_devs[m] = kmalloc(sizeof(struct mixer_operations), GFP_KERNEL);
738 if (mixer_devs[m] == NULL) 738 if (mixer_devs[m] == NULL)
739 { 739 {
740 printk(KERN_ERR "sb_mixer: Can't allocate memory\n"); 740 printk(KERN_ERR "sb_mixer: Can't allocate memory\n");
diff --git a/sound/oss/v_midi.c b/sound/oss/v_midi.c
index d952b2264da1..103940fd5b4f 100644
--- a/sound/oss/v_midi.c
+++ b/sound/oss/v_midi.c
@@ -183,7 +183,7 @@ static void __init attach_v_midi (struct address_info *hw_config)
183 return; 183 return;
184 } 184 }
185 185
186 m=(struct vmidi_memory *)kmalloc(sizeof(struct vmidi_memory), GFP_KERNEL); 186 m = kmalloc(sizeof(struct vmidi_memory), GFP_KERNEL);
187 if (m == NULL) 187 if (m == NULL)
188 { 188 {
189 printk(KERN_WARNING "Loopback MIDI: Failed to allocate memory\n"); 189 printk(KERN_WARNING "Loopback MIDI: Failed to allocate memory\n");
diff --git a/sound/oss/waveartist.c b/sound/oss/waveartist.c
index c5bf363d32c2..26a7c6af95bc 100644
--- a/sound/oss/waveartist.c
+++ b/sound/oss/waveartist.c
@@ -1267,7 +1267,7 @@ static int __init waveartist_init(wavnc_info *devc)
1267 conf_printf2(dev_name, devc->hw.io_base, devc->hw.irq, 1267 conf_printf2(dev_name, devc->hw.io_base, devc->hw.irq,
1268 devc->hw.dma, devc->hw.dma2); 1268 devc->hw.dma, devc->hw.dma2);
1269 1269
1270 portc = (wavnc_port_info *)kmalloc(sizeof(wavnc_port_info), GFP_KERNEL); 1270 portc = kmalloc(sizeof(wavnc_port_info), GFP_KERNEL);
1271 if (portc == NULL) 1271 if (portc == NULL)
1272 goto nomem; 1272 goto nomem;
1273 1273
diff --git a/sound/pci/ac97/Makefile b/sound/pci/ac97/Makefile
index 77b3482cb133..3c3222122d8b 100644
--- a/sound/pci/ac97/Makefile
+++ b/sound/pci/ac97/Makefile
@@ -10,11 +10,9 @@ snd-ac97-codec-objs += ac97_proc.o
10endif 10endif
11 11
12snd-ak4531-codec-objs := ak4531_codec.o 12snd-ak4531-codec-objs := ak4531_codec.o
13snd-ac97-bus-objs := ac97_bus.o
14 13
15# Toplevel Module Dependency 14# Toplevel Module Dependency
16obj-$(CONFIG_SND_AC97_CODEC) += snd-ac97-codec.o 15obj-$(CONFIG_SND_AC97_CODEC) += snd-ac97-codec.o
17obj-$(CONFIG_SND_ENS1370) += snd-ak4531-codec.o 16obj-$(CONFIG_SND_ENS1370) += snd-ak4531-codec.o
18obj-$(CONFIG_SND_AC97_BUS) += snd-ac97-bus.o
19 17
20obj-m := $(sort $(obj-m)) 18obj-m := $(sort $(obj-m))