aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-06-28 14:06:39 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-06-28 14:06:39 -0400
commitf28e71617ddaf2483e3e5c5237103484a303743f (patch)
tree67627d2d8ddbf6a4449371e9261d796c013b1fa1
parentdc6a78f1af10d28fb8c395034ae1e099b85c05b0 (diff)
parenta39727f212426b9d5f9267b3318a2afaf9922d3b (diff)
Merge ../linux-2.6/
Conflicts: drivers/scsi/aacraid/comminit.c Fixed up by removing the now renamed CONFIG_IOMMU option from aacraid Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
-rw-r--r--CREDITS5
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl2
-rw-r--r--Documentation/RCU/torture.txt34
-rw-r--r--Documentation/arm/Samsung-S3C24XX/Overview.txt35
-rw-r--r--Documentation/arm/Samsung-S3C24XX/S3C2412.txt120
-rw-r--r--Documentation/arm/Samsung-S3C24XX/S3C2413.txt21
-rw-r--r--Documentation/atomic_ops.txt28
-rw-r--r--Documentation/console/console.txt144
-rw-r--r--Documentation/driver-model/overview.txt2
-rw-r--r--Documentation/fb/fbcon.txt180
-rw-r--r--Documentation/filesystems/ext3.txt8
-rw-r--r--Documentation/kbuild/makefiles.txt8
-rw-r--r--Documentation/kdump/gdbmacros.txt2
-rw-r--r--Documentation/kernel-parameters.txt23
-rw-r--r--Documentation/keys.txt43
-rw-r--r--Documentation/md.txt67
-rw-r--r--Documentation/pi-futex.txt121
-rw-r--r--Documentation/robust-futexes.txt2
-rw-r--r--Documentation/rt-mutex-design.txt781
-rw-r--r--Documentation/rt-mutex.txt79
-rw-r--r--Documentation/scsi/ppa.txt2
-rw-r--r--Documentation/tty.txt7
-rw-r--r--Documentation/video4linux/README.pvrusb2212
-rw-r--r--Documentation/x86_64/boot-options.txt21
-rw-r--r--MAINTAINERS18
-rw-r--r--Makefile213
-rw-r--r--arch/alpha/kernel/setup.c2
-rw-r--r--arch/alpha/oprofile/common.c2
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/compressed/head-at91rm9200.S6
-rw-r--r--arch/arm/boot/compressed/ll_char_wr.S6
-rw-r--r--arch/arm/common/locomo.c45
-rw-r--r--arch/arm/configs/onearm_defconfig1053
-rw-r--r--arch/arm/configs/s3c2410_defconfig50
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/kernel/head-nommu.S2
-rw-r--r--arch/arm/kernel/head.S4
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/lib/backtrace.S8
-rw-r--r--arch/arm/lib/clear_user.S4
-rw-r--r--arch/arm/lib/copy_page.S2
-rw-r--r--arch/arm/lib/csumipv6.S2
-rw-r--r--arch/arm/lib/delay.S18
-rw-r--r--arch/arm/lib/ecard.S4
-rw-r--r--arch/arm/lib/findbit.S10
-rw-r--r--arch/arm/lib/io-readsb.S6
-rw-r--r--arch/arm/lib/io-readsw-armv3.S6
-rw-r--r--arch/arm/lib/io-writesb.S6
-rw-r--r--arch/arm/lib/io-writesw-armv3.S6
-rw-r--r--arch/arm/lib/memchr.S2
-rw-r--r--arch/arm/lib/memset.S4
-rw-r--r--arch/arm/lib/memzero.S4
-rw-r--r--arch/arm/lib/strchr.S2
-rw-r--r--arch/arm/lib/strncpy_from_user.S5
-rw-r--r--arch/arm/lib/strnlen_user.S5
-rw-r--r--arch/arm/lib/strrchr.S2
-rw-r--r--arch/arm/lib/uaccess.S8
-rw-r--r--arch/arm/mach-at91rm9200/Kconfig6
-rw-r--r--arch/arm/mach-at91rm9200/Makefile1
-rw-r--r--arch/arm/mach-at91rm9200/board-1arm.c109
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig3
-rw-r--r--arch/arm/mach-ixp4xx/Makefile24
-rw-r--r--arch/arm/mach-pxa/sleep.S2
-rw-r--r--arch/arm/mach-s3c2410/Kconfig4
-rw-r--r--arch/arm/mach-s3c2410/sleep.S2
-rw-r--r--arch/arm/mach-sa1100/sleep.S2
-rw-r--r--arch/arm/mm/copypage-v3.S2
-rw-r--r--arch/arm/mm/proc-v6.S32
-rw-r--r--arch/arm/nwfpe/entry26.S2
-rw-r--r--arch/arm/tools/mach-types71
-rw-r--r--arch/i386/Kconfig54
-rw-r--r--arch/i386/Kconfig.cpu2
-rw-r--r--arch/i386/boot/Makefile9
-rw-r--r--arch/i386/boot/compressed/misc.c32
-rw-r--r--arch/i386/boot/video.S19
-rw-r--r--arch/i386/crypto/aes-i586-asm.S29
-rw-r--r--arch/i386/crypto/aes.c20
-rw-r--r--arch/i386/kernel/Makefile8
-rw-r--r--arch/i386/kernel/alternative.c118
-rw-r--r--arch/i386/kernel/apic.c16
-rw-r--r--arch/i386/kernel/apm.c6
-rw-r--r--arch/i386/kernel/asm-offsets.c7
-rw-r--r--arch/i386/kernel/cpu/amd.c22
-rw-r--r--arch/i386/kernel/cpu/common.c25
-rw-r--r--arch/i386/kernel/cpu/cyrix.c2
-rw-r--r--arch/i386/kernel/cpu/intel.c6
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c123
-rw-r--r--arch/i386/kernel/cpu/proc.c8
-rw-r--r--arch/i386/kernel/cpuid.c2
-rw-r--r--arch/i386/kernel/crash.c9
-rw-r--r--arch/i386/kernel/entry.S285
-rw-r--r--arch/i386/kernel/hpet.c67
-rw-r--r--arch/i386/kernel/i8253.c118
-rw-r--r--arch/i386/kernel/i8259.c2
-rw-r--r--arch/i386/kernel/io_apic.c49
-rw-r--r--arch/i386/kernel/irq.c10
-rw-r--r--arch/i386/kernel/kprobes.c95
-rw-r--r--arch/i386/kernel/machine_kexec.c4
-rw-r--r--arch/i386/kernel/msr.c2
-rw-r--r--arch/i386/kernel/nmi.c72
-rw-r--r--arch/i386/kernel/numaq.c10
-rw-r--r--arch/i386/kernel/process.c8
-rw-r--r--arch/i386/kernel/scx200.c66
-rw-r--r--arch/i386/kernel/setup.c1
-rw-r--r--arch/i386/kernel/signal.c4
-rw-r--r--arch/i386/kernel/smp.c12
-rw-r--r--arch/i386/kernel/smpboot.c38
-rw-r--r--arch/i386/kernel/sysenter.c128
-rw-r--r--arch/i386/kernel/time.c157
-rw-r--r--arch/i386/kernel/timers/Makefile9
-rw-r--r--arch/i386/kernel/timers/common.c172
-rw-r--r--arch/i386/kernel/timers/timer.c75
-rw-r--r--arch/i386/kernel/timers/timer_cyclone.c259
-rw-r--r--arch/i386/kernel/timers/timer_hpet.c217
-rw-r--r--arch/i386/kernel/timers/timer_none.c39
-rw-r--r--arch/i386/kernel/timers/timer_pit.c177
-rw-r--r--arch/i386/kernel/timers/timer_pm.c342
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c617
-rw-r--r--arch/i386/kernel/topology.c28
-rw-r--r--arch/i386/kernel/traps.c70
-rw-r--r--arch/i386/kernel/tsc.c478
-rw-r--r--arch/i386/kernel/vmlinux.lds.S9
-rw-r--r--arch/i386/kernel/vsyscall-sysenter.S4
-rw-r--r--arch/i386/kernel/vsyscall.lds.S4
-rw-r--r--arch/i386/lib/delay.c65
-rw-r--r--arch/i386/mach-voyager/setup.c5
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c2
-rw-r--r--arch/i386/mm/fault.c38
-rw-r--r--arch/i386/mm/init.c5
-rw-r--r--arch/i386/mm/pageattr.c4
-rw-r--r--arch/i386/oprofile/nmi_int.c4
-rw-r--r--arch/i386/oprofile/op_model_athlon.c1
-rw-r--r--arch/i386/oprofile/op_model_p4.c1
-rw-r--r--arch/i386/oprofile/op_model_ppro.c1
-rw-r--r--arch/i386/pci/pcbios.c6
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/kernel/palinfo.c4
-rw-r--r--arch/ia64/kernel/process.c4
-rw-r--r--arch/ia64/kernel/salinfo.c6
-rw-r--r--arch/ia64/kernel/topology.c32
-rw-r--r--arch/ia64/mm/discontig.c57
-rw-r--r--arch/ia64/mm/fault.c36
-rw-r--r--arch/ia64/mm/init.c5
-rw-r--r--arch/ia64/sn/kernel/irq.c2
-rw-r--r--arch/m32r/kernel/setup.c2
-rw-r--r--arch/m68k/mm/memory.c6
-rw-r--r--arch/m68k/sun3/sun3dvma.c6
-rw-r--r--arch/m68knommu/Kconfig78
-rw-r--r--arch/m68knommu/Makefile24
-rw-r--r--arch/m68knommu/defconfig207
-rw-r--r--arch/m68knommu/kernel/vmlinux.lds.S160
-rw-r--r--arch/m68knommu/platform/5307/head.S82
-rw-r--r--arch/m68knommu/platform/68328/head-pilot.S3
-rw-r--r--arch/m68knommu/platform/68328/head-ram.S6
-rw-r--r--arch/m68knommu/platform/68328/head-rom.S18
-rw-r--r--arch/m68knommu/platform/68360/head-ram.S19
-rw-r--r--arch/m68knommu/platform/68360/head-rom.S17
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/kernel/smtc.c4
-rw-r--r--arch/mips/momentum/ocelot_g/gt-irq.c4
-rw-r--r--arch/mips/oprofile/common.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-reset.c2
-rw-r--r--arch/mips/sgi-ip32/ip32-reset.c12
-rw-r--r--arch/parisc/kernel/topology.c3
-rw-r--r--arch/powerpc/kernel/crash.c2
-rw-r--r--arch/powerpc/kernel/machine_kexec_32.c4
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/sysfs.c31
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/mm/fault.c36
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/mm/mem.c11
-rw-r--r--arch/powerpc/mm/numa.c11
-rw-r--r--arch/powerpc/oprofile/common.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c4
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c2
-rw-r--r--arch/powerpc/sysdev/mmio_nvram.c2
-rw-r--r--arch/ppc/kernel/machine_kexec.c4
-rw-r--r--arch/ppc/kernel/setup.c2
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/crypto/aes_s390.c14
-rw-r--r--arch/s390/crypto/des_s390.c42
-rw-r--r--arch/s390/crypto/sha1_s390.c34
-rw-r--r--arch/s390/crypto/sha256_s390.c14
-rw-r--r--arch/s390/kernel/machine_kexec.c4
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/sh/Makefile4
-rw-r--r--arch/sh/kernel/machine_kexec.c4
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/oprofile/op_model_sh7750.c2
-rw-r--r--arch/sh64/kernel/setup.c2
-rw-r--r--arch/sparc/kernel/of_device.c2
-rw-r--r--arch/sparc/kernel/prom.c106
-rw-r--r--arch/sparc/lib/Makefile2
-rw-r--r--arch/sparc/lib/iomap.c48
-rw-r--r--arch/sparc64/kernel/auxio.c3
-rw-r--r--arch/sparc64/kernel/irq.c61
-rw-r--r--arch/sparc64/kernel/of_device.c3
-rw-r--r--arch/sparc64/kernel/prom.c107
-rw-r--r--arch/sparc64/kernel/setup.c2
-rw-r--r--arch/sparc64/mm/fault.c36
-rw-r--r--arch/sparc64/mm/init.c4
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--arch/x86_64/Kconfig51
-rw-r--r--arch/x86_64/Kconfig.debug18
-rw-r--r--arch/x86_64/Makefile4
-rw-r--r--arch/x86_64/boot/Makefile9
-rw-r--r--arch/x86_64/boot/compressed/misc.c46
-rw-r--r--arch/x86_64/boot/tools/build.c6
-rw-r--r--arch/x86_64/boot/video.S19
-rw-r--r--arch/x86_64/crypto/aes-x86_64-asm.S22
-rw-r--r--arch/x86_64/crypto/aes.c20
-rw-r--r--arch/x86_64/defconfig159
-rw-r--r--arch/x86_64/ia32/fpu32.c1
-rw-r--r--arch/x86_64/ia32/ia32_signal.c2
-rw-r--r--arch/x86_64/ia32/ia32entry.S11
-rw-r--r--arch/x86_64/ia32/ptrace32.c43
-rw-r--r--arch/x86_64/ia32/sys_ia32.c25
-rw-r--r--arch/x86_64/kernel/Makefile8
-rw-r--r--arch/x86_64/kernel/aperture.c26
-rw-r--r--arch/x86_64/kernel/apic.c32
-rw-r--r--arch/x86_64/kernel/asm-offsets.c3
-rw-r--r--arch/x86_64/kernel/crash.c6
-rw-r--r--arch/x86_64/kernel/e820.c2
-rw-r--r--arch/x86_64/kernel/entry.S115
-rw-r--r--arch/x86_64/kernel/genapic_flat.c30
-rw-r--r--arch/x86_64/kernel/head64.c2
-rw-r--r--arch/x86_64/kernel/i8259.c16
-rw-r--r--arch/x86_64/kernel/io_apic.c45
-rw-r--r--arch/x86_64/kernel/irq.c34
-rw-r--r--arch/x86_64/kernel/k8.c118
-rw-r--r--arch/x86_64/kernel/machine_kexec.c4
-rw-r--r--arch/x86_64/kernel/mce.c6
-rw-r--r--arch/x86_64/kernel/mce_amd.c506
-rw-r--r--arch/x86_64/kernel/module.c38
-rw-r--r--arch/x86_64/kernel/nmi.c89
-rw-r--r--arch/x86_64/kernel/pci-calgary.c1018
-rw-r--r--arch/x86_64/kernel/pci-dma.c55
-rw-r--r--arch/x86_64/kernel/pci-gart.c155
-rw-r--r--arch/x86_64/kernel/pci-nommu.c9
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/pmtimer.c2
-rw-r--r--arch/x86_64/kernel/process.c16
-rw-r--r--arch/x86_64/kernel/reboot.c1
-rw-r--r--arch/x86_64/kernel/setup.c181
-rw-r--r--arch/x86_64/kernel/setup64.c3
-rw-r--r--arch/x86_64/kernel/signal.c3
-rw-r--r--arch/x86_64/kernel/smp.c16
-rw-r--r--arch/x86_64/kernel/smpboot.c31
-rw-r--r--arch/x86_64/kernel/tce.c202
-rw-r--r--arch/x86_64/kernel/time.c87
-rw-r--r--arch/x86_64/kernel/traps.c83
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S29
-rw-r--r--arch/x86_64/kernel/vsyscall.c4
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c114
-rw-r--r--arch/x86_64/lib/csum-partial.c1
-rw-r--r--arch/x86_64/lib/csum-wrappers.c1
-rw-r--r--arch/x86_64/lib/delay.c5
-rw-r--r--arch/x86_64/lib/memmove.c4
-rw-r--r--arch/x86_64/lib/usercopy.c13
-rw-r--r--arch/x86_64/mm/fault.c47
-rw-r--r--arch/x86_64/mm/init.c121
-rw-r--r--arch/x86_64/mm/ioremap.c5
-rw-r--r--arch/x86_64/pci/k8-bus.c10
-rw-r--r--arch/xtensa/Makefile2
-rw-r--r--arch/xtensa/kernel/time.c2
-rw-r--r--arch/xtensa/kernel/traps.c2
-rw-r--r--block/as-iosched.c2
-rw-r--r--block/ll_rw_blk.c10
-rw-r--r--crypto/Kconfig2
-rw-r--r--crypto/aes.c14
-rw-r--r--crypto/anubis.c13
-rw-r--r--crypto/api.c17
-rw-r--r--crypto/arc4.c9
-rw-r--r--crypto/blowfish.c19
-rw-r--r--crypto/cast5.c14
-rw-r--r--crypto/cast6.c15
-rw-r--r--crypto/cipher.c14
-rw-r--r--crypto/compress.c15
-rw-r--r--crypto/crc32c.c19
-rw-r--r--crypto/crypto_null.c17
-rw-r--r--crypto/deflate.c23
-rw-r--r--crypto/des.c27
-rw-r--r--crypto/digest.c51
-rw-r--r--crypto/khazad.c21
-rw-r--r--crypto/md4.c12
-rw-r--r--crypto/md5.c12
-rw-r--r--crypto/michael_mic.c20
-rw-r--r--crypto/serpent.c27
-rw-r--r--crypto/sha1.c18
-rw-r--r--crypto/sha256.c19
-rw-r--r--crypto/sha512.c29
-rw-r--r--crypto/tcrypt.c179
-rw-r--r--crypto/tcrypt.h36
-rw-r--r--crypto/tea.c55
-rw-r--r--crypto/tgr192.c33
-rw-r--r--crypto/twofish.c14
-rw-r--r--crypto/wp512.c24
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpi_memhotplug.c146
-rw-r--r--drivers/acpi/numa.c15
-rw-r--r--drivers/acpi/processor_idle.c21
-rw-r--r--drivers/atm/firestream.c5
-rw-r--r--drivers/base/cpu.c28
-rw-r--r--drivers/base/dmapool.c3
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/base/node.c61
-rw-r--r--drivers/base/power/resume.c6
-rw-r--r--drivers/base/power/suspend.c13
-rw-r--r--drivers/base/topology.c4
-rw-r--r--drivers/block/loop.c24
-rw-r--r--drivers/bluetooth/dtl1_cs.c3
-rw-r--r--drivers/char/Kconfig55
-rw-r--r--drivers/char/Makefile4
-rw-r--r--drivers/char/agp/Kconfig4
-rw-r--r--drivers/char/agp/amd-k7-agp.c2
-rw-r--r--drivers/char/agp/amd64-agp.c81
-rw-r--r--drivers/char/agp/ati-agp.c2
-rw-r--r--drivers/char/agp/efficeon-agp.c2
-rw-r--r--drivers/char/agp/sgi-agp.c5
-rw-r--r--drivers/char/drm/drm_memory_debug.h2
-rw-r--r--drivers/char/drm/via_dmablit.c2
-rw-r--r--drivers/char/epca.c2
-rw-r--r--drivers/char/hangcheck-timer.c4
-rw-r--r--drivers/char/hvcs.c11
-rw-r--r--drivers/char/hw_random.c698
-rw-r--r--drivers/char/hw_random/Kconfig90
-rw-r--r--drivers/char/hw_random/Makefile11
-rw-r--r--drivers/char/hw_random/amd-rng.c152
-rw-r--r--drivers/char/hw_random/core.c354
-rw-r--r--drivers/char/hw_random/geode-rng.c128
-rw-r--r--drivers/char/hw_random/intel-rng.c189
-rw-r--r--drivers/char/hw_random/ixp4xx-rng.c73
-rw-r--r--drivers/char/hw_random/omap-rng.c208
-rw-r--r--drivers/char/hw_random/via-rng.c183
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c16
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2
-rw-r--r--drivers/char/keyboard.c26
-rw-r--r--drivers/char/moxa.c2
-rw-r--r--drivers/char/nsc_gpio.c142
-rw-r--r--drivers/char/pc8736x_gpio.c340
-rw-r--r--drivers/char/rio/riointr.c2
-rw-r--r--drivers/char/scx200_gpio.c162
-rw-r--r--drivers/char/specialix.c2
-rw-r--r--drivers/char/stallion.c208
-rw-r--r--drivers/char/sx.c2
-rw-r--r--drivers/char/tlclk.c2
-rw-r--r--drivers/char/tty_io.c7
-rw-r--r--drivers/char/vt.c580
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/acpi_pm.c177
-rw-r--r--drivers/clocksource/cyclone.c119
-rw-r--r--drivers/clocksource/scx200_hrt.c101
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq_stats.c4
-rw-r--r--drivers/crypto/padlock-aes.c39
-rw-r--r--drivers/dma/ioatdma.c5
-rw-r--r--drivers/hwmon/asb100.c2
-rw-r--r--drivers/hwmon/lm78.c2
-rw-r--r--drivers/hwmon/lm80.c2
-rw-r--r--drivers/hwmon/lm87.c2
-rw-r--r--drivers/hwmon/sis5595.c2
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/hwmon/w83781d.c2
-rw-r--r--drivers/hwmon/w83792d.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c16
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-io.c4
-rw-r--r--drivers/ide/ide-lib.c4
-rw-r--r--drivers/ide/ide-timing.h8
-rw-r--r--drivers/ide/pci/amd74xx.c7
-rw-r--r--drivers/ide/pci/pdc202xx_old.c40
-rw-r--r--drivers/ide/pci/piix.c12
-rw-r--r--drivers/ieee1394/eth1394.c3
-rw-r--r--drivers/ieee1394/hosts.c2
-rw-r--r--drivers/ieee1394/ieee1394_core.h2
-rw-r--r--drivers/ieee1394/raw1394.c3
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/mad.c9
-rw-r--r--drivers/infiniband/core/mad_rmpp.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c6
-rw-r--r--drivers/input/evdev.c10
-rw-r--r--drivers/input/input.c86
-rw-r--r--drivers/input/joydev.c69
-rw-r--r--drivers/input/joystick/a3d.c2
-rw-r--r--drivers/input/joystick/analog.c23
-rw-r--r--drivers/input/joystick/cobra.c3
-rw-r--r--drivers/input/joystick/db9.c3
-rw-r--r--drivers/input/joystick/gamecon.c3
-rw-r--r--drivers/input/joystick/gf2k.c2
-rw-r--r--drivers/input/joystick/grip.c3
-rw-r--r--drivers/input/joystick/guillemot.c2
-rw-r--r--drivers/input/joystick/iforce/iforce-ff.c8
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c6
-rw-r--r--drivers/input/joystick/interact.c2
-rw-r--r--drivers/input/joystick/magellan.c2
-rw-r--r--drivers/input/joystick/sidewinder.c15
-rw-r--r--drivers/input/joystick/spaceball.c2
-rw-r--r--drivers/input/joystick/spaceorb.c2
-rw-r--r--drivers/input/joystick/stinger.c2
-rw-r--r--drivers/input/joystick/twidjoy.c2
-rw-r--r--drivers/input/joystick/warrior.c2
-rw-r--r--drivers/input/keyboard/atkbd.c219
-rw-r--r--drivers/input/keyboard/lkkbd.c9
-rw-r--r--drivers/input/keyboard/newtonkbd.c2
-rw-r--r--drivers/input/keyboard/sunkbd.c2
-rw-r--r--drivers/input/keyboard/xtkbd.c2
-rw-r--r--drivers/input/mouse/alps.c2
-rw-r--r--drivers/input/mouse/psmouse-base.c39
-rw-r--r--drivers/input/mouse/sermouse.c2
-rw-r--r--drivers/input/mouse/vsxxxaa.c22
-rw-r--r--drivers/input/mousedev.c42
-rw-r--r--drivers/input/touchscreen/gunze.c2
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c2
-rw-r--r--drivers/input/touchscreen/mtouch.c2
-rw-r--r--drivers/input/tsdev.c22
-rw-r--r--drivers/isdn/capi/capi.c54
-rw-r--r--drivers/isdn/divert/isdn_divert.c2
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c298
-rw-r--r--drivers/isdn/gigaset/common.c2
-rw-r--r--drivers/isdn/gigaset/ev-layer.c13
-rw-r--r--drivers/isdn/hisax/q931.c4
-rw-r--r--drivers/isdn/i4l/isdn_common.c2
-rw-r--r--drivers/isdn/i4l/isdn_tty.c2
-rw-r--r--drivers/leds/led-core.c2
-rw-r--r--drivers/leds/led-triggers.c2
-rw-r--r--drivers/macintosh/Makefile2
-rw-r--r--drivers/macintosh/adbhid.c2
-rw-r--r--drivers/macintosh/via-pmu-event.c80
-rw-r--r--drivers/macintosh/via-pmu-event.h8
-rw-r--r--drivers/macintosh/via-pmu.c8
-rw-r--r--drivers/md/Kconfig46
-rw-r--r--drivers/md/Makefile5
-rw-r--r--drivers/md/bitmap.c483
-rw-r--r--drivers/md/dm-crypt.c56
-rw-r--r--drivers/md/dm-emc.c40
-rw-r--r--drivers/md/dm-exception-store.c67
-rw-r--r--drivers/md/dm-ioctl.c109
-rw-r--r--drivers/md/dm-linear.c8
-rw-r--r--drivers/md/dm-log.c157
-rw-r--r--drivers/md/dm-mpath.c43
-rw-r--r--drivers/md/dm-raid1.c97
-rw-r--r--drivers/md/dm-round-robin.c6
-rw-r--r--drivers/md/dm-snap.c16
-rw-r--r--drivers/md/dm-stripe.c25
-rw-r--r--drivers/md/dm-table.c57
-rw-r--r--drivers/md/dm-target.c2
-rw-r--r--drivers/md/dm-zero.c8
-rw-r--r--drivers/md/dm.c184
-rw-r--r--drivers/md/dm.h81
-rw-r--r--drivers/md/kcopyd.c4
-rw-r--r--drivers/md/linear.c74
-rw-r--r--drivers/md/md.c634
-rw-r--r--drivers/md/raid1.c43
-rw-r--r--drivers/md/raid10.c77
-rw-r--r--drivers/md/raid5.c1308
-rw-r--r--drivers/md/raid6main.c2427
-rw-r--r--drivers/media/Kconfig3
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c27
-rw-r--r--drivers/media/dvb/ttpci/av7110.c8
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c25
-rw-r--r--drivers/media/dvb/ttpci/av7110_v4l.c50
-rw-r--r--drivers/media/video/Kconfig14
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c2
-rw-r--r--drivers/media/video/cx2341x.c57
-rw-r--r--drivers/media/video/cx88/Kconfig15
-rw-r--r--drivers/media/video/cx88/Makefile5
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c30
-rw-r--r--drivers/media/video/cx88/cx88-cards.c5
-rw-r--r--drivers/media/video/cx88/cx88-core.c2
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c1
-rw-r--r--drivers/media/video/cx88/cx88-tvaudio.c2
-rw-r--r--drivers/media/video/cx88/cx88-video.c6
-rw-r--r--drivers/media/video/cx88/cx88.h7
-rw-r--r--drivers/media/video/pvrusb2/Kconfig62
-rw-r--r--drivers/media/video/pvrusb2/Makefile18
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-audio.c204
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-audio.h40
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-context.c230
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-context.h92
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ctrl.c593
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ctrl.h123
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c279
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.h53
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-debug.h67
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-debugifc.c478
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-debugifc.h53
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-demod.c126
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-demod.h38
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-eeprom.c164
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-eeprom.h40
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-encoder.c418
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-encoder.h42
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h384
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c3120
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.h335
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-chips-v4l2.c115
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.c232
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.h47
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.c937
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.h96
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-io.c695
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-io.h102
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ioread.c513
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ioread.h50
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-main.c172
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-std.c408
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-std.h60
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-sysfs.c865
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-sysfs.h47
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-tuner.c122
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-tuner.h38
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-util.h63
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c1126
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.h40
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-video-v4l.c253
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-video-v4l.h52
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-wm8775.c170
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-wm8775.h53
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2.h43
-rw-r--r--drivers/media/video/saa7134/saa6752hs.c4
-rw-r--r--drivers/media/video/stradis.c4
-rw-r--r--drivers/media/video/tda9887.c31
-rw-r--r--drivers/media/video/tuner-core.c18
-rw-r--r--drivers/media/video/usbvideo/quickcam_messenger.c2
-rw-r--r--drivers/media/video/v4l2-common.c14
-rw-r--r--drivers/message/fusion/mptfc.c6
-rw-r--r--drivers/message/i2o/iop.c1
-rw-r--r--drivers/mfd/ucb1x00-core.c4
-rw-r--r--drivers/misc/ibmasm/module.c2
-rw-r--r--drivers/mtd/Kconfig4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c1
-rw-r--r--drivers/mtd/chips/jedec.c1
-rw-r--r--drivers/mtd/chips/map_absent.c3
-rw-r--r--drivers/mtd/chips/map_ram.c1
-rw-r--r--drivers/mtd/chips/map_rom.c1
-rw-r--r--drivers/mtd/devices/block2mtd.c1
-rw-r--r--drivers/mtd/devices/ms02-nv.c1
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c1
-rw-r--r--drivers/mtd/devices/phram.c1
-rw-r--r--drivers/mtd/devices/pmc551.c3
-rw-r--r--drivers/mtd/devices/slram.c1
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/ixp2000.c2
-rw-r--r--drivers/mtd/maps/physmap.c2
-rw-r--r--drivers/mtd/maps/sun_uflash.c195
-rw-r--r--drivers/mtd/mtdchar.c2
-rw-r--r--drivers/mtd/nand/nand_base.c16
-rw-r--r--drivers/mtd/nand/ndfc.c6
-rw-r--r--drivers/mtd/nand/s3c2410.c164
-rw-r--r--drivers/mtd/nand/ts7250.c2
-rw-r--r--drivers/net/3c501.c4
-rw-r--r--drivers/net/3c59x.c79
-rw-r--r--drivers/net/dl2k.h12
-rw-r--r--drivers/net/dm9000.c34
-rw-r--r--drivers/net/eepro100.c5
-rw-r--r--drivers/net/epic100.c20
-rw-r--r--drivers/net/fealnx.c10
-rw-r--r--drivers/net/fec.c310
-rw-r--r--drivers/net/fs_enet/fs_enet-mii.c3
-rw-r--r--drivers/net/hamradio/dmascc.c2
-rw-r--r--drivers/net/irda/irport.c2
-rw-r--r--drivers/net/irda/nsc-ircc.c8
-rw-r--r--drivers/net/natsemi.c100
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/pcnet32.c6
-rw-r--r--drivers/net/phy/lxt.c8
-rw-r--r--drivers/net/ppp_generic.c3
-rw-r--r--drivers/net/tulip/winbond-840.c26
-rw-r--r--drivers/net/wan/c101.c2
-rw-r--r--drivers/net/wan/n2.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wireless/bcm43xx/Kconfig1
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h6
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c37
-rw-r--r--drivers/net/wireless/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2200.c22
-rw-r--r--drivers/net/yellowfin.c24
-rw-r--r--drivers/parport/parport_gsc.c2
-rw-r--r--drivers/parport/parport_gsc.h2
-rw-r--r--drivers/parport/parport_pc.c2
-rw-r--r--drivers/parport/parport_sunbpp.c2
-rw-r--r--drivers/parport/procfs.c2
-rw-r--r--drivers/pci/msi-apic.c1
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c4
-rw-r--r--drivers/rapidio/rio-access.c4
-rw-r--r--drivers/rtc/class.c2
-rw-r--r--drivers/rtc/rtc-ds1553.c2
-rw-r--r--drivers/rtc/rtc-sa1100.c2
-rw-r--r--drivers/rtc/rtc-vr41xx.c2
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/net/lcs.c7
-rw-r--r--drivers/s390/scsi/zfcp_erp.c14
-rw-r--r--drivers/sbus/char/cpwatchdog.c2
-rw-r--r--drivers/sbus/char/openprom.c593
-rw-r--r--drivers/sbus/char/riowatchdog.c4
-rw-r--r--drivers/scsi/Kconfig12
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/ahci.c2
-rw-r--r--drivers/scsi/ata_piix.c2
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/ibmmca.c8
-rw-r--r--drivers/scsi/imm.c3
-rw-r--r--drivers/scsi/imm.h2
-rw-r--r--drivers/scsi/ips.c4
-rw-r--r--drivers/scsi/libata-core.c153
-rw-r--r--drivers/scsi/libata-eh.c63
-rw-r--r--drivers/scsi/libata-scsi.c4
-rw-r--r--drivers/scsi/libata.h4
-rw-r--r--drivers/scsi/ncr53c8xx.c3
-rw-r--r--drivers/scsi/ppa.c2
-rw-r--r--drivers/scsi/ppa.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c3
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_sil.c31
-rw-r--r--drivers/scsi/sata_sil24.c2
-rw-r--r--drivers/scsi/sata_svw.c2
-rw-r--r--drivers/scsi/sata_uli.c2
-rw-r--r--drivers/scsi/sata_via.c2
-rw-r--r--drivers/scsi/sata_vsc.c12
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/serial/8250_pnp.c4
-rw-r--r--drivers/sn/ioc3.c2
-rw-r--r--drivers/telephony/ixj.c2
-rw-r--r--drivers/usb/host/hc_crisv10.c3
-rw-r--r--drivers/usb/input/fixp-arith.h15
-rw-r--r--drivers/usb/input/hid-debug.h2
-rw-r--r--drivers/usb/serial/whiteheat.c19
-rw-r--r--drivers/usb/storage/usb.h4
-rw-r--r--drivers/video/Kconfig49
-rw-r--r--drivers/video/Makefile9
-rw-r--r--drivers/video/aty/aty128fb.c36
-rw-r--r--drivers/video/aty/atyfb_base.c43
-rw-r--r--drivers/video/aty/mach64_accel.c10
-rw-r--r--drivers/video/aty/mach64_cursor.c33
-rw-r--r--drivers/video/aty/radeon_base.c2
-rw-r--r--drivers/video/au1100fb.c44
-rw-r--r--drivers/video/backlight/Kconfig12
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/hp680_bl.c2
-rw-r--r--drivers/video/backlight/locomolcd.c123
-rw-r--r--drivers/video/cfbimgblt.c1
-rw-r--r--drivers/video/cirrusfb.c2
-rw-r--r--drivers/video/console/fbcon.c361
-rw-r--r--drivers/video/console/fbcon.h1
-rw-r--r--drivers/video/console/mdacon.c2
-rw-r--r--drivers/video/console/newport_con.c38
-rw-r--r--drivers/video/console/promcon.c4
-rw-r--r--drivers/video/console/sticon.c2
-rw-r--r--drivers/video/console/vgacon.c28
-rw-r--r--drivers/video/epson1355fb.c29
-rw-r--r--drivers/video/fbcvt.c1
-rw-r--r--drivers/video/fbmem.c58
-rw-r--r--drivers/video/fbmon.c33
-rw-r--r--drivers/video/fbsysfs.c52
-rw-r--r--drivers/video/geode/gx1fb_core.c3
-rw-r--r--drivers/video/geode/gxfb_core.c3
-rw-r--r--drivers/video/i810/i810_main.c3
-rw-r--r--drivers/video/imacfb.c345
-rw-r--r--drivers/video/macmodes.c6
-rw-r--r--drivers/video/macmodes.h7
-rw-r--r--drivers/video/modedb.c11
-rw-r--r--drivers/video/neofb.c32
-rw-r--r--drivers/video/nvidia/nv_hw.c10
-rw-r--r--drivers/video/nvidia/nvidia.c370
-rw-r--r--drivers/video/riva/fbdev.c2
-rw-r--r--drivers/video/s3c2410fb.c17
-rw-r--r--drivers/video/savage/savagefb.h45
-rw-r--r--drivers/video/savage/savagefb_driver.c1475
-rw-r--r--drivers/video/sis/sis_main.c2
-rw-r--r--drivers/video/skeletonfb.c5
-rw-r--r--drivers/video/tgafb.c1
-rw-r--r--drivers/video/vesafb.c59
-rw-r--r--drivers/video/vfb.c29
-rw-r--r--drivers/video/vga16fb.c40
-rw-r--r--fs/9p/mux.c2
-rw-r--r--fs/Kconfig54
-rw-r--r--fs/afs/cell.c3
-rw-r--r--fs/afs/kafsasyncd.c9
-rw-r--r--fs/afs/server.c6
-rw-r--r--fs/afs/vlocation.c6
-rw-r--r--fs/afs/vnode.c3
-rw-r--r--fs/aio.c2
-rw-r--r--fs/autofs4/expire.c3
-rw-r--r--fs/buffer.c3
-rw-r--r--fs/cifs/CHANGES17
-rw-r--r--fs/cifs/Makefile2
-rw-r--r--fs/cifs/README39
-rw-r--r--fs/cifs/asn1.c10
-rw-r--r--fs/cifs/cifs_debug.c134
-rw-r--r--fs/cifs/cifs_debug.h4
-rw-r--r--fs/cifs/cifs_unicode.c1
-rw-r--r--fs/cifs/cifsencrypt.c140
-rw-r--r--fs/cifs/cifsfs.c6
-rw-r--r--fs/cifs/cifsfs.h3
-rw-r--r--fs/cifs/cifsglob.h71
-rw-r--r--fs/cifs/cifspdu.h98
-rw-r--r--fs/cifs/cifsproto.h14
-rw-r--r--fs/cifs/cifssmb.c287
-rw-r--r--fs/cifs/connect.c498
-rw-r--r--fs/cifs/dir.c15
-rw-r--r--fs/cifs/fcntl.c4
-rw-r--r--fs/cifs/file.c52
-rw-r--r--fs/cifs/inode.c39
-rw-r--r--fs/cifs/link.c7
-rw-r--r--fs/cifs/misc.c10
-rw-r--r--fs/cifs/netmisc.c4
-rw-r--r--fs/cifs/ntlmssp.c143
-rw-r--r--fs/cifs/readdir.c184
-rw-r--r--fs/cifs/sess.c538
-rw-r--r--fs/cifs/smbencrypt.c1
-rw-r--r--fs/cifs/transport.c3
-rw-r--r--fs/coda/psdev.c2
-rw-r--r--fs/coda/upcall.c2
-rw-r--r--fs/compat.c16
-rw-r--r--fs/compat_ioctl.c1
-rw-r--r--fs/configfs/dir.c6
-rw-r--r--fs/dcache.c5
-rw-r--r--fs/dquot.c4
-rw-r--r--fs/exec.c147
-rw-r--r--fs/ext3/super.c6
-rw-r--r--fs/jbd/journal.c3
-rw-r--r--fs/jffs2/acl.c2
-rw-r--r--fs/jffs2/erase.c34
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jffs2/gc.c6
-rw-r--r--fs/jffs2/jffs2_fs_sb.h3
-rw-r--r--fs/jffs2/malloc.c2
-rw-r--r--fs/jffs2/nodelist.c3
-rw-r--r--fs/jffs2/nodemgmt.c24
-rw-r--r--fs/jffs2/readinode.c1
-rw-r--r--fs/jffs2/scan.c55
-rw-r--r--fs/jffs2/summary.c43
-rw-r--r--fs/jffs2/wbuf.c3
-rw-r--r--fs/jffs2/xattr.c632
-rw-r--r--fs/jffs2/xattr.h21
-rw-r--r--fs/jfs/jfs_extent.c8
-rw-r--r--fs/libfs.c10
-rw-r--r--fs/namespace.c6
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h8
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/read.c2
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/nfsd/nfs4state.c5
-rw-r--r--fs/nfsd/nfscache.c3
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/ocfs2/dlm/dlmast.c15
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h63
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c33
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c6
-rw-r--r--fs/ocfs2/dlm/dlmdebug.h30
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c103
-rw-r--r--fs/ocfs2/dlm/dlmfs.c6
-rw-r--r--fs/ocfs2/dlm/dlmlock.c73
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c448
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c593
-rw-r--r--fs/ocfs2/dlm/dlmthread.c74
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c13
-rw-r--r--fs/ocfs2/dlm/userdlm.c2
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/journal.c5
-rw-r--r--fs/ocfs2/vote.c8
-rw-r--r--fs/openpromfs/inode.c1158
-rw-r--r--fs/pnode.c9
-rw-r--r--fs/proc/base.c1086
-rw-r--r--fs/proc/inode.c11
-rw-r--r--fs/proc/internal.h22
-rw-r--r--fs/proc/task_mmu.c140
-rw-r--r--fs/proc/task_nommu.c21
-rw-r--r--fs/reiserfs/file.c8
-rw-r--r--fs/reiserfs/journal.c6
-rw-r--r--fs/smbfs/request.c6
-rw-r--r--fs/smbfs/smbiod.c3
-rw-r--r--fs/sysfs/dir.c10
-rw-r--r--fs/ufs/inode.c111
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h2
-rw-r--r--fs/xfs/xfs_behavior.h3
-rw-r--r--fs/xfs/xfs_inode.c4
-rw-r--r--fs/xfs/xfs_log.c4
-rw-r--r--fs/xfs/xfs_log_recover.c2
-rw-r--r--fs/xfs/xfs_mount.c21
-rw-r--r--fs/xfs/xfs_rtalloc.c2
-rw-r--r--fs/xfs/xfs_trans.h4
-rw-r--r--fs/xfs/xfs_vnodeops.c11
-rw-r--r--include/asm-alpha/core_t2.h2
-rw-r--r--include/asm-arm/arch-s3c2410/regs-nand.h48
-rw-r--r--include/asm-arm/assembler.h36
-rw-r--r--include/asm-arm/hardware/locomo.h5
-rw-r--r--include/asm-generic/bug.h6
-rw-r--r--include/asm-i386/alternative.h2
-rw-r--r--include/asm-i386/apic.h12
-rw-r--r--include/asm-i386/cpu.h2
-rw-r--r--include/asm-i386/cpufeature.h1
-rw-r--r--include/asm-i386/delay.h2
-rw-r--r--include/asm-i386/dwarf2.h54
-rw-r--r--include/asm-i386/elf.h53
-rw-r--r--include/asm-i386/fixmap.h10
-rw-r--r--include/asm-i386/hw_irq.h2
-rw-r--r--include/asm-i386/intel_arch_perfmon.h19
-rw-r--r--include/asm-i386/k8.h1
-rw-r--r--include/asm-i386/kdebug.h2
-rw-r--r--include/asm-i386/kprobes.h1
-rw-r--r--include/asm-i386/local.h26
-rw-r--r--include/asm-i386/mach-default/mach_ipi.h7
-rw-r--r--include/asm-i386/mach-default/mach_timer.h4
-rw-r--r--include/asm-i386/mach-summit/mach_mpparse.h3
-rw-r--r--include/asm-i386/mmu.h1
-rw-r--r--include/asm-i386/nmi.h28
-rw-r--r--include/asm-i386/node.h29
-rw-r--r--include/asm-i386/page.h3
-rw-r--r--include/asm-i386/processor.h11
-rw-r--r--include/asm-i386/system.h2
-rw-r--r--include/asm-i386/thread_info.h18
-rw-r--r--include/asm-i386/timer.h57
-rw-r--r--include/asm-i386/timex.h34
-rw-r--r--include/asm-i386/topology.h11
-rw-r--r--include/asm-i386/tsc.h49
-rw-r--r--include/asm-i386/unwind.h98
-rw-r--r--include/asm-ia64/kdebug.h2
-rw-r--r--include/asm-ia64/kprobes.h1
-rw-r--r--include/asm-ia64/nodedata.h12
-rw-r--r--include/asm-ia64/thread_info.h5
-rw-r--r--include/asm-ia64/topology.h1
-rw-r--r--include/asm-m32r/system.h2
-rw-r--r--include/asm-m68knommu/page_offset.h43
-rw-r--r--include/asm-m68knommu/ptrace.h2
-rw-r--r--include/asm-powerpc/kdebug.h2
-rw-r--r--include/asm-powerpc/kprobes.h2
-rw-r--r--include/asm-powerpc/topology.h5
-rw-r--r--include/asm-sparc/io.h16
-rw-r--r--include/asm-sparc/prom.h10
-rw-r--r--include/asm-sparc64/dma-mapping.h43
-rw-r--r--include/asm-sparc64/floppy.h50
-rw-r--r--include/asm-sparc64/kdebug.h2
-rw-r--r--include/asm-sparc64/kprobes.h1
-rw-r--r--include/asm-sparc64/prom.h10
-rw-r--r--include/asm-sparc64/topology.h3
-rw-r--r--include/asm-x86_64/alternative.h146
-rw-r--r--include/asm-x86_64/apic.h26
-rw-r--r--include/asm-x86_64/atomic.h42
-rw-r--r--include/asm-x86_64/bitops.h7
-rw-r--r--include/asm-x86_64/calgary.h66
-rw-r--r--include/asm-x86_64/cpufeature.h3
-rw-r--r--include/asm-x86_64/dma-mapping.h17
-rw-r--r--include/asm-x86_64/dma.h2
-rw-r--r--include/asm-x86_64/gart-mapping.h16
-rw-r--r--include/asm-x86_64/hpet.h2
-rw-r--r--include/asm-x86_64/hw_irq.h4
-rw-r--r--include/asm-x86_64/ia32_unistd.h308
-rw-r--r--include/asm-x86_64/intel_arch_perfmon.h19
-rw-r--r--include/asm-x86_64/k8.h14
-rw-r--r--include/asm-x86_64/kdebug.h2
-rw-r--r--include/asm-x86_64/kprobes.h1
-rw-r--r--include/asm-x86_64/local.h26
-rw-r--r--include/asm-x86_64/mce.h13
-rw-r--r--include/asm-x86_64/mutex.h4
-rw-r--r--include/asm-x86_64/nmi.h30
-rw-r--r--include/asm-x86_64/pci.h4
-rw-r--r--include/asm-x86_64/pgtable.h6
-rw-r--r--include/asm-x86_64/processor.h5
-rw-r--r--include/asm-x86_64/proto.h15
-rw-r--r--include/asm-x86_64/rwlock.h8
-rw-r--r--include/asm-x86_64/semaphore.h8
-rw-r--r--include/asm-x86_64/smp.h2
-rw-r--r--include/asm-x86_64/spinlock.h10
-rw-r--r--include/asm-x86_64/string.h3
-rw-r--r--include/asm-x86_64/system.h86
-rw-r--r--include/asm-x86_64/tce.h47
-rw-r--r--include/asm-x86_64/thread_info.h19
-rw-r--r--include/asm-x86_64/topology.h10
-rw-r--r--include/asm-x86_64/unwind.h106
-rw-r--r--include/keys/user-type.h1
-rw-r--r--include/linux/acpi.h6
-rw-r--r--include/linux/bitmap.h5
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/clocksource.h185
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/compat_ioctl.h5
-rw-r--r--include/linux/console.h4
-rw-r--r--include/linux/cpu.h14
-rw-r--r--include/linux/crypto.h34
-rw-r--r--include/linux/device-mapper.h111
-rw-r--r--include/linux/dm-ioctl.h6
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/fb.h19
-rw-r--r--include/linux/futex.h12
-rw-r--r--include/linux/hw_random.h50
-rw-r--r--include/linux/idr.h1
-rw-r--r--include/linux/init_task.h4
-rw-r--r--include/linux/input.h10
-rw-r--r--include/linux/ioport.h3
-rw-r--r--include/linux/ipmi.h4
-rw-r--r--include/linux/jffs2.h1
-rw-r--r--include/linux/kernel.h7
-rw-r--r--include/linux/key.h13
-rw-r--r--include/linux/libata.h15
-rw-r--r--include/linux/license.h14
-rw-r--r--include/linux/list.h9
-rw-r--r--include/linux/loop.h2
-rw-r--r--include/linux/memory_hotplug.h73
-rw-r--r--include/linux/mm.h13
-rw-r--r--include/linux/module.h3
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/netpoll.h1
-rw-r--r--include/linux/node.h17
-rw-r--r--include/linux/nsc_gpio.h42
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/plist.h247
-rw-r--r--include/linux/poison.h58
-rw-r--r--include/linux/proc_fs.h16
-rw-r--r--include/linux/ptrace.h1
-rw-r--r--include/linux/raid/bitmap.h11
-rw-r--r--include/linux/raid/linear.h2
-rw-r--r--include/linux/raid/md.h4
-rw-r--r--include/linux/raid/md_k.h10
-rw-r--r--include/linux/raid/md_p.h5
-rw-r--r--include/linux/raid/raid10.h7
-rw-r--r--include/linux/raid/raid5.h1
-rw-r--r--include/linux/rcupdate.h1
-rw-r--r--include/linux/rtmutex.h117
-rw-r--r--include/linux/sched.h62
-rw-r--r--include/linux/scx200.h7
-rw-r--r--include/linux/scx200_gpio.h21
-rw-r--r--include/linux/security.h11
-rw-r--r--include/linux/sunrpc/gss_api.h2
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/syscalls.h4
-rw-r--r--include/linux/sysctl.h4
-rw-r--r--include/linux/time.h28
-rw-r--r--include/linux/timex.h2
-rw-r--r--include/linux/topology.h3
-rw-r--r--include/linux/unwind.h127
-rw-r--r--include/linux/videodev2.h6
-rw-r--r--include/media/cx2341x.h10
-rw-r--r--include/net/tipc/tipc_bearer.h12
-rw-r--r--init/Kconfig23
-rw-r--r--init/initramfs.c36
-rw-r--r--init/main.c5
-rw-r--r--kernel/Makefile5
-rw-r--r--kernel/acct.c3
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/auditsc.c10
-rw-r--r--kernel/cpu.c18
-rw-r--r--kernel/cpuset.c26
-rw-r--r--kernel/exit.c16
-rw-r--r--kernel/fork.c34
-rw-r--r--kernel/futex.c1067
-rw-r--r--kernel/futex_compat.c14
-rw-r--r--kernel/hrtimer.c4
-rw-r--r--kernel/kprobes.c58
-rw-r--r--kernel/module.c27
-rw-r--r--kernel/mutex-debug.c17
-rw-r--r--kernel/mutex-debug.h25
-rw-r--r--kernel/mutex.c21
-rw-r--r--kernel/mutex.h6
-rw-r--r--kernel/power/Kconfig13
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/ptrace.c23
-rw-r--r--kernel/rcupdate.c14
-rw-r--r--kernel/rcutorture.c201
-rw-r--r--kernel/resource.c38
-rw-r--r--kernel/rtmutex-debug.c513
-rw-r--r--kernel/rtmutex-debug.h37
-rw-r--r--kernel/rtmutex-tester.c440
-rw-r--r--kernel/rtmutex.c990
-rw-r--r--kernel/rtmutex.h29
-rw-r--r--kernel/rtmutex_common.h123
-rw-r--r--kernel/sched.c1210
-rw-r--r--kernel/signal.c35
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/sysctl.c38
-rw-r--r--kernel/time.c2
-rw-r--r--kernel/time/Makefile1
-rw-r--r--kernel/time/clocksource.c349
-rw-r--r--kernel/time/jiffies.c73
-rw-r--r--kernel/timer.c400
-rw-r--r--kernel/unwind.c918
-rw-r--r--kernel/workqueue.c2
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug30
-rw-r--r--lib/Makefile1
-rw-r--r--lib/idr.c43
-rw-r--r--lib/kernel_lock.c4
-rw-r--r--lib/plist.c118
-rw-r--r--lib/zlib_inflate/inffast.c6
-rw-r--r--lib/zlib_inflate/inftrees.c18
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/filemap.c18
-rw-r--r--mm/memory_hotplug.c126
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/readahead.c4
-rw-r--r--mm/slab.c21
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/swap.c3
-rw-r--r--mm/vmscan.c39
-rw-r--r--net/atm/mpc.c3
-rw-r--r--net/core/dev.c5
-rw-r--r--net/core/netpoll.c36
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/ipv4/tcp.c22
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/netrom/nr_route.c12
-rw-r--r--net/rxrpc/call.c3
-rw-r--r--net/rxrpc/connection.c3
-rw-r--r--net/rxrpc/krxsecd.c3
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c2
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c2
-rw-r--r--net/tipc/bcast.c83
-rw-r--r--net/tipc/bcast.h2
-rw-r--r--net/tipc/bearer.c72
-rw-r--r--net/tipc/cluster.c22
-rw-r--r--net/tipc/config.c87
-rw-r--r--net/tipc/core.c7
-rw-r--r--net/tipc/core.h21
-rw-r--r--net/tipc/dbg.c2
-rw-r--r--net/tipc/discover.c13
-rw-r--r--net/tipc/eth_media.c29
-rw-r--r--net/tipc/handler.c2
-rw-r--r--net/tipc/link.c217
-rw-r--r--net/tipc/name_distr.c30
-rw-r--r--net/tipc/name_table.c207
-rw-r--r--net/tipc/net.c2
-rw-r--r--net/tipc/node.c78
-rw-r--r--net/tipc/node.h2
-rw-r--r--net/tipc/node_subscr.c15
-rw-r--r--net/tipc/port.c45
-rw-r--r--net/tipc/ref.c35
-rw-r--r--net/tipc/socket.c100
-rw-r--r--net/tipc/subscr.c20
-rw-r--r--net/tipc/user_reg.c2
-rw-r--r--net/tipc/zone.c19
-rw-r--r--scripts/Makefile.build14
-rw-r--r--scripts/Makefile.host8
-rw-r--r--scripts/Makefile.modinst2
-rw-r--r--scripts/Makefile.modpost2
-rw-r--r--scripts/basic/Makefile6
-rw-r--r--scripts/basic/split-include.c226
-rw-r--r--scripts/export_report.pl169
-rw-r--r--scripts/genksyms/genksyms.c77
-rw-r--r--scripts/genksyms/genksyms.h1
-rw-r--r--scripts/genksyms/lex.c_shipped2
-rw-r--r--scripts/genksyms/lex.l2
-rw-r--r--scripts/kconfig/conf.c23
-rw-r--r--scripts/kconfig/confdata.c491
-rw-r--r--scripts/kconfig/expr.c53
-rw-r--r--scripts/kconfig/expr.h20
-rw-r--r--scripts/kconfig/gconf.c12
-rw-r--r--scripts/kconfig/lex.zconf.c_shipped91
-rw-r--r--scripts/kconfig/lkc.h10
-rw-r--r--scripts/kconfig/lkc_proto.h5
-rw-r--r--scripts/kconfig/menu.c34
-rw-r--r--scripts/kconfig/qconf.cc1048
-rw-r--r--scripts/kconfig/qconf.h158
-rw-r--r--scripts/kconfig/symbol.c50
-rw-r--r--scripts/kconfig/util.c4
-rw-r--r--scripts/kconfig/zconf.gperf3
-rw-r--r--scripts/kconfig/zconf.hash.c_shipped181
-rw-r--r--scripts/kconfig/zconf.tab.c_shipped930
-rw-r--r--scripts/kconfig/zconf.y33
-rw-r--r--scripts/mod/mk_elfconfig.c6
-rw-r--r--scripts/mod/modpost.c177
-rw-r--r--scripts/mod/modpost.h4
-rwxr-xr-xscripts/package/mkspec5
-rw-r--r--scripts/rt-tester/check-all.sh22
-rw-r--r--scripts/rt-tester/rt-tester.py222
-rw-r--r--scripts/rt-tester/t2-l1-2rt-sameprio.tst99
-rw-r--r--scripts/rt-tester/t2-l1-pi.tst82
-rw-r--r--scripts/rt-tester/t2-l1-signal.tst77
-rw-r--r--scripts/rt-tester/t2-l2-2rt-deadlock.tst89
-rw-r--r--scripts/rt-tester/t3-l1-pi-1rt.tst92
-rw-r--r--scripts/rt-tester/t3-l1-pi-2rt.tst93
-rw-r--r--scripts/rt-tester/t3-l1-pi-3rt.tst92
-rw-r--r--scripts/rt-tester/t3-l1-pi-signal.tst98
-rw-r--r--scripts/rt-tester/t3-l1-pi-steal.tst96
-rw-r--r--scripts/rt-tester/t3-l2-pi.tst92
-rw-r--r--scripts/rt-tester/t4-l2-pi-deboost.tst123
-rw-r--r--scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst183
-rw-r--r--scripts/rt-tester/t5-l4-pi-boost-deboost.tst143
-rw-r--r--scripts/setlocalversion4
-rw-r--r--security/Kconfig20
-rw-r--r--security/dummy.c3
-rw-r--r--security/keys/internal.h3
-rw-r--r--security/keys/key.c57
-rw-r--r--security/keys/keyctl.c61
-rw-r--r--security/keys/keyring.c25
-rw-r--r--security/keys/proc.c7
-rw-r--r--security/keys/process_keys.c24
-rw-r--r--security/keys/request_key.c36
-rw-r--r--security/keys/request_key_auth.c2
-rw-r--r--security/keys/user_defined.c25
-rw-r--r--security/selinux/hooks.c56
-rw-r--r--security/selinux/include/av_perm_to_string.h3
-rw-r--r--security/selinux/include/av_permissions.h4
-rw-r--r--security/selinux/include/objsec.h2
-rw-r--r--sound/core/seq/seq_memory.h2
-rw-r--r--sound/oss/Kconfig2
-rw-r--r--sound/oss/sb_ess.c28
-rw-r--r--sound/oss/via82cxxx_audio.c5
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c2
-rw-r--r--sound/ppc/toonie.c0
-rw-r--r--usr/Makefile3
1119 files changed, 51107 insertions, 19302 deletions
diff --git a/CREDITS b/CREDITS
index 1d35f10ec3..85c7c70b70 100644
--- a/CREDITS
+++ b/CREDITS
@@ -24,6 +24,11 @@ S: C. Negri 6, bl. D3
24S: Iasi 6600 24S: Iasi 6600
25S: Romania 25S: Romania
26 26
27N: Mark Adler
28E: madler@alumni.caltech.edu
29W: http://alumnus.caltech.edu/~madler/
30D: zlib decompression
31
27N: Monalisa Agrawal 32N: Monalisa Agrawal
28E: magrawal@nortelnetworks.com 33E: magrawal@nortelnetworks.com
29D: Basic Interphase 5575 driver with UBR and ABR support. 34D: Basic Interphase 5575 driver with UBR and ABR support.
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 158ffe9bfa..644c3884fa 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1590,7 +1590,7 @@ the amount of locking which needs to be done.
1590 <para> 1590 <para>
1591 Our final dilemma is this: when can we actually destroy the 1591 Our final dilemma is this: when can we actually destroy the
1592 removed element? Remember, a reader might be stepping through 1592 removed element? Remember, a reader might be stepping through
1593 this element in the list right now: it we free this element and 1593 this element in the list right now: if we free this element and
1594 the <symbol>next</symbol> pointer changes, the reader will jump 1594 the <symbol>next</symbol> pointer changes, the reader will jump
1595 off into garbage and crash. We need to wait until we know that 1595 off into garbage and crash. We need to wait until we know that
1596 all the readers who were traversing the list when we deleted the 1596 all the readers who were traversing the list when we deleted the
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index e4c38152f7..a494859160 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -7,7 +7,7 @@ The CONFIG_RCU_TORTURE_TEST config option is available for all RCU
7implementations. It creates an rcutorture kernel module that can 7implementations. It creates an rcutorture kernel module that can
8be loaded to run a torture test. The test periodically outputs 8be loaded to run a torture test. The test periodically outputs
9status messages via printk(), which can be examined via the dmesg 9status messages via printk(), which can be examined via the dmesg
10command (perhaps grepping for "rcutorture"). The test is started 10command (perhaps grepping for "torture"). The test is started
11when the module is loaded, and stops when the module is unloaded. 11when the module is loaded, and stops when the module is unloaded.
12 12
13However, actually setting this config option to "y" results in the system 13However, actually setting this config option to "y" results in the system
@@ -35,6 +35,19 @@ stat_interval The number of seconds between output of torture
35 be printed -only- when the module is unloaded, and this 35 be printed -only- when the module is unloaded, and this
36 is the default. 36 is the default.
37 37
38shuffle_interval
39 The number of seconds to keep the test threads affinitied
40 to a particular subset of the CPUs. Used in conjunction
41 with test_no_idle_hz.
42
43test_no_idle_hz Whether or not to test the ability of RCU to operate in
44 a kernel that disables the scheduling-clock interrupt to
45 idle CPUs. Boolean parameter, "1" to test, "0" otherwise.
46
47torture_type The type of RCU to test: "rcu" for the rcu_read_lock()
48 API, "rcu_bh" for the rcu_read_lock_bh() API, and "srcu"
49 for the "srcu_read_lock()" API.
50
38verbose Enable debug printk()s. Default is disabled. 51verbose Enable debug printk()s. Default is disabled.
39 52
40 53
@@ -42,14 +55,14 @@ OUTPUT
42 55
43The statistics output is as follows: 56The statistics output is as follows:
44 57
45 rcutorture: --- Start of test: nreaders=16 stat_interval=0 verbose=0 58 rcu-torture: --- Start of test: nreaders=16 stat_interval=0 verbose=0
46 rcutorture: rtc: 0000000000000000 ver: 1916 tfle: 0 rta: 1916 rtaf: 0 rtf: 1915 59 rcu-torture: rtc: 0000000000000000 ver: 1916 tfle: 0 rta: 1916 rtaf: 0 rtf: 1915
47 rcutorture: Reader Pipe: 1466408 9747 0 0 0 0 0 0 0 0 0 60 rcu-torture: Reader Pipe: 1466408 9747 0 0 0 0 0 0 0 0 0
48 rcutorture: Reader Batch: 1464477 11678 0 0 0 0 0 0 0 0 61 rcu-torture: Reader Batch: 1464477 11678 0 0 0 0 0 0 0 0
49 rcutorture: Free-Block Circulation: 1915 1915 1915 1915 1915 1915 1915 1915 1915 1915 0 62 rcu-torture: Free-Block Circulation: 1915 1915 1915 1915 1915 1915 1915 1915 1915 1915 0
50 rcutorture: --- End of test 63 rcu-torture: --- End of test
51 64
52The command "dmesg | grep rcutorture:" will extract this information on 65The command "dmesg | grep torture:" will extract this information on
53most systems. On more esoteric configurations, it may be necessary to 66most systems. On more esoteric configurations, it may be necessary to
54use other commands to access the output of the printk()s used by 67use other commands to access the output of the printk()s used by
55the RCU torture test. The printk()s use KERN_ALERT, so they should 68the RCU torture test. The printk()s use KERN_ALERT, so they should
@@ -115,8 +128,9 @@ The following script may be used to torture RCU:
115 modprobe rcutorture 128 modprobe rcutorture
116 sleep 100 129 sleep 100
117 rmmod rcutorture 130 rmmod rcutorture
118 dmesg | grep rcutorture: 131 dmesg | grep torture:
119 132
120The output can be manually inspected for the error flag of "!!!". 133The output can be manually inspected for the error flag of "!!!".
121One could of course create a more elaborate script that automatically 134One could of course create a more elaborate script that automatically
122checked for such errors. 135checked for such errors. The "rmmod" command forces a "SUCCESS" or
136"FAILURE" indication to be printk()ed.
diff --git a/Documentation/arm/Samsung-S3C24XX/Overview.txt b/Documentation/arm/Samsung-S3C24XX/Overview.txt
index 8c6ee68417..3e46d2a311 100644
--- a/Documentation/arm/Samsung-S3C24XX/Overview.txt
+++ b/Documentation/arm/Samsung-S3C24XX/Overview.txt
@@ -7,11 +7,13 @@ Introduction
7------------ 7------------
8 8
9 The Samsung S3C24XX range of ARM9 System-on-Chip CPUs are supported 9 The Samsung S3C24XX range of ARM9 System-on-Chip CPUs are supported
10 by the 's3c2410' architecture of ARM Linux. Currently the S3C2410 and 10 by the 's3c2410' architecture of ARM Linux. Currently the S3C2410,
11 the S3C2440 are supported CPUs. 11 S3C2440 and S3C2442 devices are supported.
12 12
13 Support for the S3C2400 series is in progress. 13 Support for the S3C2400 series is in progress.
14 14
15 Support for the S3C2412 and S3C2413 CPUs is being merged.
16
15 17
16Configuration 18Configuration
17------------- 19-------------
@@ -43,9 +45,18 @@ Machines
43 45
44 Samsung's own development board, geared for PDA work. 46 Samsung's own development board, geared for PDA work.
45 47
48 Samsung/Aiji SMDK2412
49
50 The S3C2412 version of the SMDK2440.
51
52 Samsung/Aiji SMDK2413
53
54 The S3C2412 version of the SMDK2440.
55
46 Samsung/Meritech SMDK2440 56 Samsung/Meritech SMDK2440
47 57
48 The S3C2440 compatible version of the SMDK2440 58 The S3C2440 compatible version of the SMDK2440, which has the
59 option of an S3C2440 or S3C2442 CPU module.
49 60
50 Thorcom VR1000 61 Thorcom VR1000
51 62
@@ -211,24 +222,6 @@ Port Contributors
211 Lucas Correia Villa Real (S3C2400 port) 222 Lucas Correia Villa Real (S3C2400 port)
212 223
213 224
214Document Changes
215----------------
216
217 05 Sep 2004 - BJD - Added Document Changes section
218 05 Sep 2004 - BJD - Added Klaus Fetscher to list of contributors
219 25 Oct 2004 - BJD - Added Dimitry Andric to list of contributors
220 25 Oct 2004 - BJD - Updated the MTD from the 2.6.9 merge
221 21 Jan 2005 - BJD - Added rx3715, added Shannon to contributors
222 10 Feb 2005 - BJD - Added Guillaume Gourat to contributors
223 02 Mar 2005 - BJD - Added SMDK2440 to list of machines
224 06 Mar 2005 - BJD - Added Christer Weinigel
225 08 Mar 2005 - BJD - Added LCVR to list of people, updated introduction
226 08 Mar 2005 - BJD - Added section on adding machines
227 09 Sep 2005 - BJD - Added section on platform data
228 11 Feb 2006 - BJD - Added I2C, RTC and Watchdog sections
229 11 Feb 2006 - BJD - Added Osiris machine, and S3C2400 information
230
231
232Document Author 225Document Author
233--------------- 226---------------
234 227
diff --git a/Documentation/arm/Samsung-S3C24XX/S3C2412.txt b/Documentation/arm/Samsung-S3C24XX/S3C2412.txt
new file mode 100644
index 0000000000..cb82a7fc79
--- /dev/null
+++ b/Documentation/arm/Samsung-S3C24XX/S3C2412.txt
@@ -0,0 +1,120 @@
1 S3C2412 ARM Linux Overview
2 ==========================
3
4Introduction
5------------
6
7 The S3C2412 is part of the S3C24XX range of ARM9 System-on-Chip CPUs
8 from Samsung. This part has an ARM926-EJS core, capable of running up
9 to 266MHz (see data-sheet for more information)
10
11
12Clock
13-----
14
15 The core clock code provides a set of clocks to the drivers, and allows
16 for source selection and a number of other features.
17
18
19Power
20-----
21
22 No support for suspend/resume to RAM in the current system.
23
24
25DMA
26---
27
28 No current support for DMA.
29
30
31GPIO
32----
33
34 There is support for setting the GPIO to input/output/special function
35 and reading or writing to them.
36
37
38UART
39----
40
41 The UART hardware is similar to the S3C2440, and is supported by the
42 s3c2410 driver in the drivers/serial directory.
43
44
45NAND
46----
47
48 The NAND hardware is similar to the S3C2440, and is supported by the
49 s3c2410 driver in the drivers/mtd/nand directory.
50
51
52USB Host
53--------
54
55 The USB hardware is similar to the S3C2410, with extended clock source
56 control. The OHCI portion is supported by the ohci-s3c2410 driver, and
57 the clock control selection is supported by the core clock code.
58
59
60USB Device
61----------
62
63 No current support in the kernel
64
65
66IRQs
67----
68
69 All the standard, and external interrupt sources are supported. The
70 extra sub-sources are not yet supported.
71
72
73RTC
74---
75
76 The RTC hardware is similar to the S3C2410, and is supported by the
77 s3c2410-rtc driver.
78
79
80Watchdog
81--------
82
83 The watchdog harware is the same as the S3C2410, and is supported by
84 the s3c2410_wdt driver.
85
86
87MMC/SD/SDIO
88-----------
89
90 No current support for the MMC/SD/SDIO block.
91
92IIC
93---
94
95 The IIC hardware is the same as the S3C2410, and is supported by the
96 i2c-s3c24xx driver.
97
98
99IIS
100---
101
102 No current support for the IIS interface.
103
104
105SPI
106---
107
108 No current support for the SPI interfaces.
109
110
111ATA
112---
113
114 No current support for the on-board ATA block.
115
116
117Document Author
118---------------
119
120Ben Dooks, (c) 2006 Simtec Electronics
diff --git a/Documentation/arm/Samsung-S3C24XX/S3C2413.txt b/Documentation/arm/Samsung-S3C24XX/S3C2413.txt
new file mode 100644
index 0000000000..ab2a88858f
--- /dev/null
+++ b/Documentation/arm/Samsung-S3C24XX/S3C2413.txt
@@ -0,0 +1,21 @@
1 S3C2413 ARM Linux Overview
2 ==========================
3
4Introduction
5------------
6
7 The S3C2413 is an extended version of the S3C2412, with an camera
8 interface and mobile DDR memory support. See the S3C2412 support
9 documentation for more information.
10
11
12Camera Interface
13---------------
14
15 This block is currently not supported.
16
17
18Document Author
19---------------
20
21Ben Dooks, (c) 2006 Simtec Electronics
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index 23a1c2402b..2a63d5662a 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -157,13 +157,13 @@ For example, smp_mb__before_atomic_dec() can be used like so:
157 smp_mb__before_atomic_dec(); 157 smp_mb__before_atomic_dec();
158 atomic_dec(&obj->ref_count); 158 atomic_dec(&obj->ref_count);
159 159
160It makes sure that all memory operations preceeding the atomic_dec() 160It makes sure that all memory operations preceding the atomic_dec()
161call are strongly ordered with respect to the atomic counter 161call are strongly ordered with respect to the atomic counter
162operation. In the above example, it guarentees that the assignment of 162operation. In the above example, it guarantees that the assignment of
163"1" to obj->dead will be globally visible to other cpus before the 163"1" to obj->dead will be globally visible to other cpus before the
164atomic counter decrement. 164atomic counter decrement.
165 165
166Without the explicitl smp_mb__before_atomic_dec() call, the 166Without the explicit smp_mb__before_atomic_dec() call, the
167implementation could legally allow the atomic counter update visible 167implementation could legally allow the atomic counter update visible
168to other cpus before the "obj->dead = 1;" assignment. 168to other cpus before the "obj->dead = 1;" assignment.
169 169
@@ -173,11 +173,11 @@ ordering with respect to memory operations after an atomic_dec() call
173(smp_mb__{before,after}_atomic_inc()). 173(smp_mb__{before,after}_atomic_inc()).
174 174
175A missing memory barrier in the cases where they are required by the 175A missing memory barrier in the cases where they are required by the
176atomic_t implementation above can have disasterous results. Here is 176atomic_t implementation above can have disastrous results. Here is
177an example, which follows a pattern occuring frequently in the Linux 177an example, which follows a pattern occurring frequently in the Linux
178kernel. It is the use of atomic counters to implement reference 178kernel. It is the use of atomic counters to implement reference
179counting, and it works such that once the counter falls to zero it can 179counting, and it works such that once the counter falls to zero it can
180be guarenteed that no other entity can be accessing the object: 180be guaranteed that no other entity can be accessing the object:
181 181
182static void obj_list_add(struct obj *obj) 182static void obj_list_add(struct obj *obj)
183{ 183{
@@ -291,9 +291,9 @@ to the size of an "unsigned long" C data type, and are least of that
291size. The endianness of the bits within each "unsigned long" are the 291size. The endianness of the bits within each "unsigned long" are the
292native endianness of the cpu. 292native endianness of the cpu.
293 293
294 void set_bit(unsigned long nr, volatils unsigned long *addr); 294 void set_bit(unsigned long nr, volatile unsigned long *addr);
295 void clear_bit(unsigned long nr, volatils unsigned long *addr); 295 void clear_bit(unsigned long nr, volatile unsigned long *addr);
296 void change_bit(unsigned long nr, volatils unsigned long *addr); 296 void change_bit(unsigned long nr, volatile unsigned long *addr);
297 297
298These routines set, clear, and change, respectively, the bit number 298These routines set, clear, and change, respectively, the bit number
299indicated by "nr" on the bit mask pointed to by "ADDR". 299indicated by "nr" on the bit mask pointed to by "ADDR".
@@ -301,9 +301,9 @@ indicated by "nr" on the bit mask pointed to by "ADDR".
301They must execute atomically, yet there are no implicit memory barrier 301They must execute atomically, yet there are no implicit memory barrier
302semantics required of these interfaces. 302semantics required of these interfaces.
303 303
304 int test_and_set_bit(unsigned long nr, volatils unsigned long *addr); 304 int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
305 int test_and_clear_bit(unsigned long nr, volatils unsigned long *addr); 305 int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
306 int test_and_change_bit(unsigned long nr, volatils unsigned long *addr); 306 int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
307 307
308Like the above, except that these routines return a boolean which 308Like the above, except that these routines return a boolean which
309indicates whether the changed bit was set _BEFORE_ the atomic bit 309indicates whether the changed bit was set _BEFORE_ the atomic bit
@@ -335,7 +335,7 @@ subsequent memory operation is made visible. For example:
335 /* ... */; 335 /* ... */;
336 obj->killed = 1; 336 obj->killed = 1;
337 337
338The implementation of test_and_set_bit() must guarentee that 338The implementation of test_and_set_bit() must guarantee that
339"obj->dead = 1;" is visible to cpus before the atomic memory operation 339"obj->dead = 1;" is visible to cpus before the atomic memory operation
340done by test_and_set_bit() becomes visible. Likewise, the atomic 340done by test_and_set_bit() becomes visible. Likewise, the atomic
341memory operation done by test_and_set_bit() must become visible before 341memory operation done by test_and_set_bit() must become visible before
@@ -474,7 +474,7 @@ Now, as far as memory barriers go, as long as spin_lock()
474strictly orders all subsequent memory operations (including 474strictly orders all subsequent memory operations (including
475the cas()) with respect to itself, things will be fine. 475the cas()) with respect to itself, things will be fine.
476 476
477Said another way, _atomic_dec_and_lock() must guarentee that 477Said another way, _atomic_dec_and_lock() must guarantee that
478a counter dropping to zero is never made visible before the 478a counter dropping to zero is never made visible before the
479spinlock being acquired. 479spinlock being acquired.
480 480
diff --git a/Documentation/console/console.txt b/Documentation/console/console.txt
new file mode 100644
index 0000000000..d3e1744732
--- /dev/null
+++ b/Documentation/console/console.txt
@@ -0,0 +1,144 @@
1Console Drivers
2===============
3
4The linux kernel has 2 general types of console drivers. The first type is
5assigned by the kernel to all the virtual consoles during the boot process.
6This type will be called 'system driver', and only one system driver is allowed
7to exist. The system driver is persistent and it can never be unloaded, though
8it may become inactive.
9
10The second type has to be explicitly loaded and unloaded. This will be called
11'modular driver' by this document. Multiple modular drivers can coexist at
12any time with each driver sharing the console with other drivers including
13the system driver. However, modular drivers cannot take over the console
14that is currently occupied by another modular driver. (Exception: Drivers that
15call take_over_console() will succeed in the takeover regardless of the type
16of driver occupying the consoles.) They can only take over the console that is
17occupied by the system driver. In the same token, if the modular driver is
18released by the console, the system driver will take over.
19
20Modular drivers, from the programmer's point of view, has to call:
21
22 take_over_console() - load and bind driver to console layer
23 give_up_console() - unbind and unload driver
24
25In newer kernels, the following are also available:
26
27 register_con_driver()
28 unregister_con_driver()
29
30If sysfs is enabled, the contents of /sys/class/vtconsole can be
31examined. This shows the console backends currently registered by the
32system which are named vtcon<n> where <n> is an integer fro 0 to 15. Thus:
33
34 ls /sys/class/vtconsole
35 . .. vtcon0 vtcon1
36
37Each directory in /sys/class/vtconsole has 3 files:
38
39 ls /sys/class/vtconsole/vtcon0
40 . .. bind name uevent
41
42What do these files signify?
43
44 1. bind - this is a read/write file. It shows the status of the driver if
45 read, or acts to bind or unbind the driver to the virtual consoles
46 when written to. The possible values are:
47
48 0 - means the driver is not bound and if echo'ed, commands the driver
49 to unbind
50
51 1 - means the driver is bound and if echo'ed, commands the driver to
52 bind
53
54 2. name - read-only file. Shows the name of the driver in this format:
55
56 cat /sys/class/vtconsole/vtcon0/name
57 (S) VGA+
58
59 '(S)' stands for a (S)ystem driver, ie, it cannot be directly
60 commanded to bind or unbind
61
62 'VGA+' is the name of the driver
63
64 cat /sys/class/vtconsole/vtcon1/name
65 (M) frame buffer device
66
67 In this case, '(M)' stands for a (M)odular driver, one that can be
68 directly commanded to bind or unbind.
69
70 3. uevent - ignore this file
71
72When unbinding, the modular driver is detached first, and then the system
73driver takes over the consoles vacated by the driver. Binding, on the other
74hand, will bind the driver to the consoles that are currently occupied by a
75system driver.
76
77NOTE1: Binding and binding must be selected in Kconfig. It's under:
78
79Device Drivers -> Character devices -> Support for binding and unbinding
80console drivers
81
82NOTE2: If any of the virtual consoles are in KD_GRAPHICS mode, then binding or
83unbinding will not succeed. An example of an application that sets the console
84to KD_GRAPHICS is X.
85
86How useful is this feature? This is very useful for console driver
87developers. By unbinding the driver from the console layer, one can unload the
88driver, make changes, recompile, reload and rebind the driver without any need
89for rebooting the kernel. For regular users who may want to switch from
90framebuffer console to VGA console and vice versa, this feature also makes
91this possible. (NOTE NOTE NOTE: Please read fbcon.txt under Documentation/fb
92for more details).
93
94Notes for developers:
95=====================
96
97take_over_console() is now broken up into:
98
99 register_con_driver()
100 bind_con_driver() - private function
101
102give_up_console() is a wrapper to unregister_con_driver(), and a driver must
103be fully unbound for this call to succeed. con_is_bound() will check if the
104driver is bound or not.
105
106Guidelines for console driver writers:
107=====================================
108
109In order for binding to and unbinding from the console to properly work,
110console drivers must follow these guidelines:
111
1121. All drivers, except system drivers, must call either register_con_driver()
113 or take_over_console(). register_con_driver() will just add the driver to
114 the console's internal list. It won't take over the
115 console. take_over_console(), as it name implies, will also take over (or
116 bind to) the console.
117
1182. All resources allocated during con->con_init() must be released in
119 con->con_deinit().
120
1213. All resources allocated in con->con_startup() must be released when the
122 driver, which was previously bound, becomes unbound. The console layer
123 does not have a complementary call to con->con_startup() so it's up to the
124 driver to check when it's legal to release these resources. Calling
125 con_is_bound() in con->con_deinit() will help. If the call returned
126 false(), then it's safe to release the resources. This balance has to be
127 ensured because con->con_startup() can be called again when a request to
128 rebind the driver to the console arrives.
129
1304. Upon exit of the driver, ensure that the driver is totally unbound. If the
131 condition is satisfied, then the driver must call unregister_con_driver()
132 or give_up_console().
133
1345. unregister_con_driver() can also be called on conditions which make it
135 impossible for the driver to service console requests. This can happen
136 with the framebuffer console that suddenly lost all of its drivers.
137
138The current crop of console drivers should still work correctly, but binding
139and unbinding them may cause problems. With minimal fixes, these drivers can
140be made to work correctly.
141
142==========================
143Antonino Daplas <adaplas@pol.net>
144
diff --git a/Documentation/driver-model/overview.txt b/Documentation/driver-model/overview.txt
index ac4a7a737e..2050c9ffc6 100644
--- a/Documentation/driver-model/overview.txt
+++ b/Documentation/driver-model/overview.txt
@@ -18,7 +18,7 @@ Traditional driver models implemented some sort of tree-like structure
18(sometimes just a list) for the devices they control. There wasn't any 18(sometimes just a list) for the devices they control. There wasn't any
19uniformity across the different bus types. 19uniformity across the different bus types.
20 20
21The current driver model provides a comon, uniform data model for describing 21The current driver model provides a common, uniform data model for describing
22a bus and the devices that can appear under the bus. The unified bus 22a bus and the devices that can appear under the bus. The unified bus
23model includes a set of common attributes which all busses carry, and a set 23model includes a set of common attributes which all busses carry, and a set
24of common callbacks, such as device discovery during bus probing, bus 24of common callbacks, such as device discovery during bus probing, bus
diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt
index 08dce0f631..f373df12ed 100644
--- a/Documentation/fb/fbcon.txt
+++ b/Documentation/fb/fbcon.txt
@@ -135,10 +135,10 @@ C. Boot options
135 135
136 The angle can be changed anytime afterwards by 'echoing' the same 136 The angle can be changed anytime afterwards by 'echoing' the same
137 numbers to any one of the 2 attributes found in 137 numbers to any one of the 2 attributes found in
138 /sys/class/graphics/fb{x} 138 /sys/class/graphics/fbcon
139 139
140 con_rotate - rotate the display of the active console 140 rotate - rotate the display of the active console
141 con_rotate_all - rotate the display of all consoles 141 rotate_all - rotate the display of all consoles
142 142
143 Console rotation will only become available if Console Rotation 143 Console rotation will only become available if Console Rotation
144 Support is compiled in your kernel. 144 Support is compiled in your kernel.
@@ -148,5 +148,177 @@ C. Boot options
148 Actually, the underlying fb driver is totally ignorant of console 148 Actually, the underlying fb driver is totally ignorant of console
149 rotation. 149 rotation.
150 150
151--- 151C. Attaching, Detaching and Unloading
152
153Before going on on how to attach, detach and unload the framebuffer console, an
154illustration of the dependencies may help.
155
156The console layer, as with most subsystems, needs a driver that interfaces with
157the hardware. Thus, in a VGA console:
158
159console ---> VGA driver ---> hardware.
160
161Assuming the VGA driver can be unloaded, one must first unbind the VGA driver
162from the console layer before unloading the driver. The VGA driver cannot be
163unloaded if it is still bound to the console layer. (See
164Documentation/console/console.txt for more information).
165
166This is more complicated in the case of the the framebuffer console (fbcon),
167because fbcon is an intermediate layer between the console and the drivers:
168
169console ---> fbcon ---> fbdev drivers ---> hardware
170
171The fbdev drivers cannot be unloaded if it's bound to fbcon, and fbcon cannot
172be unloaded if it's bound to the console layer.
173
174So to unload the fbdev drivers, one must first unbind fbcon from the console,
175then unbind the fbdev drivers from fbcon. Fortunately, unbinding fbcon from
176the console layer will automatically unbind framebuffer drivers from
177fbcon. Thus, there is no need to explicitly unbind the fbdev drivers from
178fbcon.
179
180So, how do we unbind fbcon from the console? Part of the answer is in
181Documentation/console/console.txt. To summarize:
182
183Echo a value to the bind file that represents the framebuffer console
184driver. So assuming vtcon1 represents fbcon, then:
185
186echo 1 > sys/class/vtconsole/vtcon1/bind - attach framebuffer console to
187 console layer
188echo 0 > sys/class/vtconsole/vtcon1/bind - detach framebuffer console from
189 console layer
190
191If fbcon is detached from the console layer, your boot console driver (which is
192usually VGA text mode) will take over. A few drivers (rivafb and i810fb) will
193restore VGA text mode for you. With the rest, before detaching fbcon, you
194must take a few additional steps to make sure that your VGA text mode is
195restored properly. The following is one of the several methods that you can do:
196
1971. Download or install vbetool. This utility is included with most
198 distributions nowadays, and is usually part of the suspend/resume tool.
199
2002. In your kernel configuration, ensure that CONFIG_FRAMEBUFFER_CONSOLE is set
201 to 'y' or 'm'. Enable one or more of your favorite framebuffer drivers.
202
2033. Boot into text mode and as root run:
204
205 vbetool vbestate save > <vga state file>
206
207 The above command saves the register contents of your graphics
208 hardware to <vga state file>. You need to do this step only once as
209 the state file can be reused.
210
2114. If fbcon is compiled as a module, load fbcon by doing:
212
213 modprobe fbcon
214
2155. Now to detach fbcon:
216
217 vbetool vbestate restore < <vga state file> && \
218 echo 0 > /sys/class/vtconsole/vtcon1/bind
219
2206. That's it, you're back to VGA mode. And if you compiled fbcon as a module,
221 you can unload it by 'rmmod fbcon'
222
2237. To reattach fbcon:
224
225 echo 1 > /sys/class/vtconsole/vtcon1/bind
226
2278. Once fbcon is unbound, all drivers registered to the system will also
228become unbound. This means that fbcon and individual framebuffer drivers
229can be unloaded or reloaded at will. Reloading the drivers or fbcon will
230automatically bind the console, fbcon and the drivers together. Unloading
231all the drivers without unloading fbcon will make it impossible for the
232console to bind fbcon.
233
234Notes for vesafb users:
235=======================
236
237Unfortunately, if your bootline includes a vga=xxx parameter that sets the
238hardware in graphics mode, such as when loading vesafb, vgacon will not load.
239Instead, vgacon will replace the default boot console with dummycon, and you
240won't get any display after detaching fbcon. Your machine is still alive, so
241you can reattach vesafb. However, to reattach vesafb, you need to do one of
242the following:
243
244Variation 1:
245
246 a. Before detaching fbcon, do
247
248 vbetool vbemode save > <vesa state file> # do once for each vesafb mode,
249 # the file can be reused
250
251 b. Detach fbcon as in step 5.
252
253 c. Attach fbcon
254
255 vbetool vbestate restore < <vesa state file> && \
256 echo 1 > /sys/class/vtconsole/vtcon1/bind
257
258Variation 2:
259
260 a. Before detaching fbcon, do:
261 echo <ID> > /sys/class/tty/console/bind
262
263
264 vbetool vbemode get
265
266 b. Take note of the mode number
267
268 b. Detach fbcon as in step 5.
269
270 c. Attach fbcon:
271
272 vbetool vbemode set <mode number> && \
273 echo 1 > /sys/class/vtconsole/vtcon1/bind
274
275Samples:
276========
277
278Here are 2 sample bash scripts that you can use to bind or unbind the
279framebuffer console driver if you are in an X86 box:
280
281---------------------------------------------------------------------------
282#!/bin/bash
283# Unbind fbcon
284
285# Change this to where your actual vgastate file is located
286# Or Use VGASTATE=$1 to indicate the state file at runtime
287VGASTATE=/tmp/vgastate
288
289# path to vbetool
290VBETOOL=/usr/local/bin
291
292
293for (( i = 0; i < 16; i++))
294do
295 if test -x /sys/class/vtconsole/vtcon$i; then
296 if [ `cat /sys/class/vtconsole/vtcon$i/name | grep -c "frame buffer"` \
297 = 1 ]; then
298 if test -x $VBETOOL/vbetool; then
299 echo Unbinding vtcon$i
300 $VBETOOL/vbetool vbestate restore < $VGASTATE
301 echo 0 > /sys/class/vtconsole/vtcon$i/bind
302 fi
303 fi
304 fi
305done
306
307---------------------------------------------------------------------------
308#!/bin/bash
309# Bind fbcon
310
311for (( i = 0; i < 16; i++))
312do
313 if test -x /sys/class/vtconsole/vtcon$i; then
314 if [ `cat /sys/class/vtconsole/vtcon$i/name | grep -c "frame buffer"` \
315 = 1 ]; then
316 echo Unbinding vtcon$i
317 echo 1 > /sys/class/vtconsole/vtcon$i/bind
318 fi
319 fi
320done
321---------------------------------------------------------------------------
322
323--
152Antonino Daplas <adaplas@pol.net> 324Antonino Daplas <adaplas@pol.net>
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index afb1335c05..4aecc9bdb2 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -113,6 +113,14 @@ noquota
113grpquota 113grpquota
114usrquota 114usrquota
115 115
116bh (*) ext3 associates buffer heads to data pages to
117nobh (a) cache disk block mapping information
118 (b) link pages into transaction to provide
119 ordering guarantees.
120 "bh" option forces use of buffer heads.
121 "nobh" option tries to avoid associating buffer
122 heads (supported only for "writeback" mode).
123
116 124
117Specification 125Specification
118============= 126=============
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index a9c00facdf..14ef3868a3 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -1123,6 +1123,14 @@ The top Makefile exports the following variables:
1123 $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE). The user may 1123 $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE). The user may
1124 override this value on the command line if desired. 1124 override this value on the command line if desired.
1125 1125
1126 INSTALL_MOD_STRIP
1127
1128 If this variable is specified, will cause modules to be stripped
1129 after they are installed. If INSTALL_MOD_STRIP is '1', then the
1130 default option --strip-debug will be used. Otherwise,
1131 INSTALL_MOD_STRIP will used as the option(s) to the strip command.
1132
1133
1126=== 8 Makefile language 1134=== 8 Makefile language
1127 1135
1128The kernel Makefiles are designed to run with GNU Make. The Makefiles 1136The kernel Makefiles are designed to run with GNU Make. The Makefiles
diff --git a/Documentation/kdump/gdbmacros.txt b/Documentation/kdump/gdbmacros.txt
index dcf5580380..9b9b454b04 100644
--- a/Documentation/kdump/gdbmacros.txt
+++ b/Documentation/kdump/gdbmacros.txt
@@ -175,7 +175,7 @@ end
175document trapinfo 175document trapinfo
176 Run info threads and lookup pid of thread #1 176 Run info threads and lookup pid of thread #1
177 'trapinfo <pid>' will tell you by which trap & possibly 177 'trapinfo <pid>' will tell you by which trap & possibly
178 addresthe kernel paniced. 178 address the kernel panicked.
179end 179end
180 180
181 181
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index bca6f389da..0d189c93ee 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -61,6 +61,7 @@ parameter is applicable:
61 MTD MTD support is enabled. 61 MTD MTD support is enabled.
62 NET Appropriate network support is enabled. 62 NET Appropriate network support is enabled.
63 NUMA NUMA support is enabled. 63 NUMA NUMA support is enabled.
64 GENERIC_TIME The generic timeofday code is enabled.
64 NFS Appropriate NFS support is enabled. 65 NFS Appropriate NFS support is enabled.
65 OSS OSS sound support is enabled. 66 OSS OSS sound support is enabled.
66 PARIDE The ParIDE subsystem is enabled. 67 PARIDE The ParIDE subsystem is enabled.
@@ -179,6 +180,11 @@ running once the system is up.
179 override platform specific driver. 180 override platform specific driver.
180 See also Documentation/acpi-hotkey.txt. 181 See also Documentation/acpi-hotkey.txt.
181 182
183 acpi_pm_good [IA-32,X86-64]
184 Override the pmtimer bug detection: force the kernel
185 to assume that this machine's pmtimer latches its value
186 and always returns good values.
187
182 enable_timer_pin_1 [i386,x86-64] 188 enable_timer_pin_1 [i386,x86-64]
183 Enable PIN 1 of APIC timer 189 Enable PIN 1 of APIC timer
184 Can be useful to work around chipset bugs 190 Can be useful to work around chipset bugs
@@ -341,10 +347,11 @@ running once the system is up.
341 Value can be changed at runtime via 347 Value can be changed at runtime via
342 /selinux/checkreqprot. 348 /selinux/checkreqprot.
343 349
344 clock= [BUGS=IA-32,HW] gettimeofday timesource override. 350 clock= [BUGS=IA-32, HW] gettimeofday clocksource override.
345 Forces specified timesource (if avaliable) to be used 351 [Deprecated]
346 when calculating gettimeofday(). If specicified 352 Forces specified clocksource (if avaliable) to be used
347 timesource is not avalible, it defaults to PIT. 353 when calculating gettimeofday(). If specified
354 clocksource is not avalible, it defaults to PIT.
348 Format: { pit | tsc | cyclone | pmtmr } 355 Format: { pit | tsc | cyclone | pmtmr }
349 356
350 disable_8254_timer 357 disable_8254_timer
@@ -1617,6 +1624,10 @@ running once the system is up.
1617 1624
1618 time Show timing data prefixed to each printk message line 1625 time Show timing data prefixed to each printk message line
1619 1626
1627 clocksource= [GENERIC_TIME] Override the default clocksource
1628 Override the default clocksource and use the clocksource
1629 with the name specified.
1630
1620 tipar.timeout= [HW,PPT] 1631 tipar.timeout= [HW,PPT]
1621 Set communications timeout in tenths of a second 1632 Set communications timeout in tenths of a second
1622 (default 15). 1633 (default 15).
@@ -1658,6 +1669,10 @@ running once the system is up.
1658 usbhid.mousepoll= 1669 usbhid.mousepoll=
1659 [USBHID] The interval which mice are to be polled at. 1670 [USBHID] The interval which mice are to be polled at.
1660 1671
1672 vdso= [IA-32]
1673 vdso=1: enable VDSO (default)
1674 vdso=0: disable VDSO mapping
1675
1661 video= [FB] Frame buffer configuration 1676 video= [FB] Frame buffer configuration
1662 See Documentation/fb/modedb.txt. 1677 See Documentation/fb/modedb.txt.
1663 1678
diff --git a/Documentation/keys.txt b/Documentation/keys.txt
index 3bbe157b45..61c0fad2fe 100644
--- a/Documentation/keys.txt
+++ b/Documentation/keys.txt
@@ -241,25 +241,30 @@ The security class "key" has been added to SELinux so that mandatory access
241controls can be applied to keys created within various contexts. This support 241controls can be applied to keys created within various contexts. This support
242is preliminary, and is likely to change quite significantly in the near future. 242is preliminary, and is likely to change quite significantly in the near future.
243Currently, all of the basic permissions explained above are provided in SELinux 243Currently, all of the basic permissions explained above are provided in SELinux
244as well; SE Linux is simply invoked after all basic permission checks have been 244as well; SELinux is simply invoked after all basic permission checks have been
245performed. 245performed.
246 246
247Each key is labeled with the same context as the task to which it belongs. 247The value of the file /proc/self/attr/keycreate influences the labeling of
248Typically, this is the same task that was running when the key was created. 248newly-created keys. If the contents of that file correspond to an SELinux
249The default keyrings are handled differently, but in a way that is very 249security context, then the key will be assigned that context. Otherwise, the
250intuitive: 250key will be assigned the current context of the task that invoked the key
251creation request. Tasks must be granted explicit permission to assign a
252particular context to newly-created keys, using the "create" permission in the
253key security class.
251 254
252 (*) The user and user session keyrings that are created when the user logs in 255The default keyrings associated with users will be labeled with the default
253 are currently labeled with the context of the login manager. 256context of the user if and only if the login programs have been instrumented to
254 257properly initialize keycreate during the login process. Otherwise, they will
255 (*) The keyrings associated with new threads are each labeled with the context 258be labeled with the context of the login program itself.
256 of their associated thread, and both session and process keyrings are
257 handled similarly.
258 259
259Note, however, that the default keyrings associated with the root user are 260Note, however, that the default keyrings associated with the root user are
260labeled with the default kernel context, since they are created early in the 261labeled with the default kernel context, since they are created early in the
261boot process, before root has a chance to log in. 262boot process, before root has a chance to log in.
262 263
264The keyrings associated with new threads are each labeled with the context of
265their associated thread, and both session and process keyrings are handled
266similarly.
267
263 268
264================ 269================
265NEW PROCFS FILES 270NEW PROCFS FILES
@@ -270,9 +275,17 @@ about the status of the key service:
270 275
271 (*) /proc/keys 276 (*) /proc/keys
272 277
273 This lists all the keys on the system, giving information about their 278 This lists the keys that are currently viewable by the task reading the
274 type, description and permissions. The payload of the key is not available 279 file, giving information about their type, description and permissions.
275 this way: 280 It is not possible to view the payload of the key this way, though some
281 information about it may be given.
282
283 The only keys included in the list are those that grant View permission to
284 the reading process whether or not it possesses them. Note that LSM
285 security checks are still performed, and may further filter out keys that
286 the current process is not authorised to view.
287
288 The contents of the file look like this:
276 289
277 SERIAL FLAGS USAGE EXPY PERM UID GID TYPE DESCRIPTION: SUMMARY 290 SERIAL FLAGS USAGE EXPY PERM UID GID TYPE DESCRIPTION: SUMMARY
278 00000001 I----- 39 perm 1f3f0000 0 0 keyring _uid_ses.0: 1/4 291 00000001 I----- 39 perm 1f3f0000 0 0 keyring _uid_ses.0: 1/4
@@ -300,7 +313,7 @@ about the status of the key service:
300 (*) /proc/key-users 313 (*) /proc/key-users
301 314
302 This file lists the tracking data for each user that has at least one key 315 This file lists the tracking data for each user that has at least one key
303 on the system. Such data includes quota information and statistics: 316 on the system. Such data includes quota information and statistics:
304 317
305 [root@andromeda root]# cat /proc/key-users 318 [root@andromeda root]# cat /proc/key-users
306 0: 46 45/45 1/100 13/10000 319 0: 46 45/45 1/100 13/10000
diff --git a/Documentation/md.txt b/Documentation/md.txt
index 03a13c462c..0668f9dc9d 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -200,6 +200,17 @@ All md devices contain:
200 This can be written only while the array is being assembled, not 200 This can be written only while the array is being assembled, not
201 after it is started. 201 after it is started.
202 202
203 layout
204 The "layout" for the array for the particular level. This is
205 simply a number that is interpretted differently by different
206 levels. It can be written while assembling an array.
207
208 resync_start
209 The point at which resync should start. If no resync is needed,
210 this will be a very large number. At array creation it will
211 default to 0, though starting the array as 'clean' will
212 set it much larger.
213
203 new_dev 214 new_dev
204 This file can be written but not read. The value written should 215 This file can be written but not read. The value written should
205 be a block device number as major:minor. e.g. 8:0 216 be a block device number as major:minor. e.g. 8:0
@@ -207,6 +218,54 @@ All md devices contain:
207 available. It will then appear at md/dev-XXX (depending on the 218 available. It will then appear at md/dev-XXX (depending on the
208 name of the device) and further configuration is then possible. 219 name of the device) and further configuration is then possible.
209 220
221 safe_mode_delay
222 When an md array has seen no write requests for a certain period
223 of time, it will be marked as 'clean'. When another write
224 request arrive, the array is marked as 'dirty' before the write
225 commenses. This is known as 'safe_mode'.
226 The 'certain period' is controlled by this file which stores the
227 period as a number of seconds. The default is 200msec (0.200).
228 Writing a value of 0 disables safemode.
229
230 array_state
231 This file contains a single word which describes the current
232 state of the array. In many cases, the state can be set by
233 writing the word for the desired state, however some states
234 cannot be explicitly set, and some transitions are not allowed.
235
236 clear
237 No devices, no size, no level
238 Writing is equivalent to STOP_ARRAY ioctl
239 inactive
240 May have some settings, but array is not active
241 all IO results in error
242 When written, doesn't tear down array, but just stops it
243 suspended (not supported yet)
244 All IO requests will block. The array can be reconfigured.
245 Writing this, if accepted, will block until array is quiessent
246 readonly
247 no resync can happen. no superblocks get written.
248 write requests fail
249 read-auto
250 like readonly, but behaves like 'clean' on a write request.
251
252 clean - no pending writes, but otherwise active.
253 When written to inactive array, starts without resync
254 If a write request arrives then
255 if metadata is known, mark 'dirty' and switch to 'active'.
256 if not known, block and switch to write-pending
257 If written to an active array that has pending writes, then fails.
258 active
259 fully active: IO and resync can be happening.
260 When written to inactive array, starts with resync
261
262 write-pending
263 clean, but writes are blocked waiting for 'active' to be written.
264
265 active-idle
266 like active, but no writes have been seen for a while (safe_mode_delay).
267
268
210 sync_speed_min 269 sync_speed_min
211 sync_speed_max 270 sync_speed_max
212 This are similar to /proc/sys/dev/raid/speed_limit_{min,max} 271 This are similar to /proc/sys/dev/raid/speed_limit_{min,max}
@@ -250,10 +309,18 @@ Each directory contains:
250 faulty - device has been kicked from active use due to 309 faulty - device has been kicked from active use due to
251 a detected fault 310 a detected fault
252 in_sync - device is a fully in-sync member of the array 311 in_sync - device is a fully in-sync member of the array
312 writemostly - device will only be subject to read
313 requests if there are no other options.
314 This applies only to raid1 arrays.
253 spare - device is working, but not a full member. 315 spare - device is working, but not a full member.
254 This includes spares that are in the process 316 This includes spares that are in the process
255 of being recoverred to 317 of being recoverred to
256 This list make grow in future. 318 This list make grow in future.
319 This can be written to.
320 Writing "faulty" simulates a failure on the device.
321 Writing "remove" removes the device from the array.
322 Writing "writemostly" sets the writemostly flag.
323 Writing "-writemostly" clears the writemostly flag.
257 324
258 errors 325 errors
259 An approximate count of read errors that have been detected on 326 An approximate count of read errors that have been detected on
diff --git a/Documentation/pi-futex.txt b/Documentation/pi-futex.txt
new file mode 100644
index 0000000000..5d61dacd21
--- /dev/null
+++ b/Documentation/pi-futex.txt
@@ -0,0 +1,121 @@
1Lightweight PI-futexes
2----------------------
3
4We are calling them lightweight for 3 reasons:
5
6 - in the user-space fastpath a PI-enabled futex involves no kernel work
7 (or any other PI complexity) at all. No registration, no extra kernel
8 calls - just pure fast atomic ops in userspace.
9
10 - even in the slowpath, the system call and scheduling pattern is very
11 similar to normal futexes.
12
13 - the in-kernel PI implementation is streamlined around the mutex
14 abstraction, with strict rules that keep the implementation
15 relatively simple: only a single owner may own a lock (i.e. no
16 read-write lock support), only the owner may unlock a lock, no
17 recursive locking, etc.
18
19Priority Inheritance - why?
20---------------------------
21
22The short reply: user-space PI helps achieving/improving determinism for
23user-space applications. In the best-case, it can help achieve
24determinism and well-bound latencies. Even in the worst-case, PI will
25improve the statistical distribution of locking related application
26delays.
27
28The longer reply:
29-----------------
30
31Firstly, sharing locks between multiple tasks is a common programming
32technique that often cannot be replaced with lockless algorithms. As we
33can see it in the kernel [which is a quite complex program in itself],
34lockless structures are rather the exception than the norm - the current
35ratio of lockless vs. locky code for shared data structures is somewhere
36between 1:10 and 1:100. Lockless is hard, and the complexity of lockless
37algorithms often endangers to ability to do robust reviews of said code.
38I.e. critical RT apps often choose lock structures to protect critical
39data structures, instead of lockless algorithms. Furthermore, there are
40cases (like shared hardware, or other resource limits) where lockless
41access is mathematically impossible.
42
43Media players (such as Jack) are an example of reasonable application
44design with multiple tasks (with multiple priority levels) sharing
45short-held locks: for example, a highprio audio playback thread is
46combined with medium-prio construct-audio-data threads and low-prio
47display-colory-stuff threads. Add video and decoding to the mix and
48we've got even more priority levels.
49
50So once we accept that synchronization objects (locks) are an
51unavoidable fact of life, and once we accept that multi-task userspace
52apps have a very fair expectation of being able to use locks, we've got
53to think about how to offer the option of a deterministic locking
54implementation to user-space.
55
56Most of the technical counter-arguments against doing priority
57inheritance only apply to kernel-space locks. But user-space locks are
58different, there we cannot disable interrupts or make the task
59non-preemptible in a critical section, so the 'use spinlocks' argument
60does not apply (user-space spinlocks have the same priority inversion
61problems as other user-space locking constructs). Fact is, pretty much
62the only technique that currently enables good determinism for userspace
63locks (such as futex-based pthread mutexes) is priority inheritance:
64
65Currently (without PI), if a high-prio and a low-prio task shares a lock
66[this is a quite common scenario for most non-trivial RT applications],
67even if all critical sections are coded carefully to be deterministic
68(i.e. all critical sections are short in duration and only execute a
69limited number of instructions), the kernel cannot guarantee any
70deterministic execution of the high-prio task: any medium-priority task
71could preempt the low-prio task while it holds the shared lock and
72executes the critical section, and could delay it indefinitely.
73
74Implementation:
75---------------
76
77As mentioned before, the userspace fastpath of PI-enabled pthread
78mutexes involves no kernel work at all - they behave quite similarly to
79normal futex-based locks: a 0 value means unlocked, and a value==TID
80means locked. (This is the same method as used by list-based robust
81futexes.) Userspace uses atomic ops to lock/unlock these mutexes without
82entering the kernel.
83
84To handle the slowpath, we have added two new futex ops:
85
86 FUTEX_LOCK_PI
87 FUTEX_UNLOCK_PI
88
89If the lock-acquire fastpath fails, [i.e. an atomic transition from 0 to
90TID fails], then FUTEX_LOCK_PI is called. The kernel does all the
91remaining work: if there is no futex-queue attached to the futex address
92yet then the code looks up the task that owns the futex [it has put its
93own TID into the futex value], and attaches a 'PI state' structure to
94the futex-queue. The pi_state includes an rt-mutex, which is a PI-aware,
95kernel-based synchronization object. The 'other' task is made the owner
96of the rt-mutex, and the FUTEX_WAITERS bit is atomically set in the
97futex value. Then this task tries to lock the rt-mutex, on which it
98blocks. Once it returns, it has the mutex acquired, and it sets the
99futex value to its own TID and returns. Userspace has no other work to
100perform - it now owns the lock, and futex value contains
101FUTEX_WAITERS|TID.
102
103If the unlock side fastpath succeeds, [i.e. userspace manages to do a
104TID -> 0 atomic transition of the futex value], then no kernel work is
105triggered.
106
107If the unlock fastpath fails (because the FUTEX_WAITERS bit is set),
108then FUTEX_UNLOCK_PI is called, and the kernel unlocks the futex on the
109behalf of userspace - and it also unlocks the attached
110pi_state->rt_mutex and thus wakes up any potential waiters.
111
112Note that under this approach, contrary to previous PI-futex approaches,
113there is no prior 'registration' of a PI-futex. [which is not quite
114possible anyway, due to existing ABI properties of pthread mutexes.]
115
116Also, under this scheme, 'robustness' and 'PI' are two orthogonal
117properties of futexes, and all four combinations are possible: futex,
118robust-futex, PI-futex, robust+PI-futex.
119
120More details about priority inheritance can be found in
121Documentation/rtmutex.txt.
diff --git a/Documentation/robust-futexes.txt b/Documentation/robust-futexes.txt
index df82d75245..76e8064b8c 100644
--- a/Documentation/robust-futexes.txt
+++ b/Documentation/robust-futexes.txt
@@ -95,7 +95,7 @@ comparison. If the thread has registered a list, then normally the list
95is empty. If the thread/process crashed or terminated in some incorrect 95is empty. If the thread/process crashed or terminated in some incorrect
96way then the list might be non-empty: in this case the kernel carefully 96way then the list might be non-empty: in this case the kernel carefully
97walks the list [not trusting it], and marks all locks that are owned by 97walks the list [not trusting it], and marks all locks that are owned by
98this thread with the FUTEX_OWNER_DEAD bit, and wakes up one waiter (if 98this thread with the FUTEX_OWNER_DIED bit, and wakes up one waiter (if
99any). 99any).
100 100
101The list is guaranteed to be private and per-thread at do_exit() time, 101The list is guaranteed to be private and per-thread at do_exit() time,
diff --git a/Documentation/rt-mutex-design.txt b/Documentation/rt-mutex-design.txt
new file mode 100644
index 0000000000..c472ffacc2
--- /dev/null
+++ b/Documentation/rt-mutex-design.txt
@@ -0,0 +1,781 @@
1#
2# Copyright (c) 2006 Steven Rostedt
3# Licensed under the GNU Free Documentation License, Version 1.2
4#
5
6RT-mutex implementation design
7------------------------------
8
9This document tries to describe the design of the rtmutex.c implementation.
10It doesn't describe the reasons why rtmutex.c exists. For that please see
11Documentation/rt-mutex.txt. Although this document does explain problems
12that happen without this code, but that is in the concept to understand
13what the code actually is doing.
14
15The goal of this document is to help others understand the priority
16inheritance (PI) algorithm that is used, as well as reasons for the
17decisions that were made to implement PI in the manner that was done.
18
19
20Unbounded Priority Inversion
21----------------------------
22
23Priority inversion is when a lower priority process executes while a higher
24priority process wants to run. This happens for several reasons, and
25most of the time it can't be helped. Anytime a high priority process wants
26to use a resource that a lower priority process has (a mutex for example),
27the high priority process must wait until the lower priority process is done
28with the resource. This is a priority inversion. What we want to prevent
29is something called unbounded priority inversion. That is when the high
30priority process is prevented from running by a lower priority process for
31an undetermined amount of time.
32
33The classic example of unbounded priority inversion is were you have three
34processes, let's call them processes A, B, and C, where A is the highest
35priority process, C is the lowest, and B is in between. A tries to grab a lock
36that C owns and must wait and lets C run to release the lock. But in the
37meantime, B executes, and since B is of a higher priority than C, it preempts C,
38but by doing so, it is in fact preempting A which is a higher priority process.
39Now there's no way of knowing how long A will be sleeping waiting for C
40to release the lock, because for all we know, B is a CPU hog and will
41never give C a chance to release the lock. This is called unbounded priority
42inversion.
43
44Here's a little ASCII art to show the problem.
45
46 grab lock L1 (owned by C)
47 |
48A ---+
49 C preempted by B
50 |
51C +----+
52
53B +-------->
54 B now keeps A from running.
55
56
57Priority Inheritance (PI)
58-------------------------
59
60There are several ways to solve this issue, but other ways are out of scope
61for this document. Here we only discuss PI.
62
63PI is where a process inherits the priority of another process if the other
64process blocks on a lock owned by the current process. To make this easier
65to understand, let's use the previous example, with processes A, B, and C again.
66
67This time, when A blocks on the lock owned by C, C would inherit the priority
68of A. So now if B becomes runnable, it would not preempt C, since C now has
69the high priority of A. As soon as C releases the lock, it loses its
70inherited priority, and A then can continue with the resource that C had.
71
72Terminology
73-----------
74
75Here I explain some terminology that is used in this document to help describe
76the design that is used to implement PI.
77
78PI chain - The PI chain is an ordered series of locks and processes that cause
79 processes to inherit priorities from a previous process that is
80 blocked on one of its locks. This is described in more detail
81 later in this document.
82
83mutex - In this document, to differentiate from locks that implement
84 PI and spin locks that are used in the PI code, from now on
85 the PI locks will be called a mutex.
86
87lock - In this document from now on, I will use the term lock when
88 referring to spin locks that are used to protect parts of the PI
89 algorithm. These locks disable preemption for UP (when
90 CONFIG_PREEMPT is enabled) and on SMP prevents multiple CPUs from
91 entering critical sections simultaneously.
92
93spin lock - Same as lock above.
94
95waiter - A waiter is a struct that is stored on the stack of a blocked
96 process. Since the scope of the waiter is within the code for
97 a process being blocked on the mutex, it is fine to allocate
98 the waiter on the process's stack (local variable). This
99 structure holds a pointer to the task, as well as the mutex that
100 the task is blocked on. It also has the plist node structures to
101 place the task in the waiter_list of a mutex as well as the
102 pi_list of a mutex owner task (described below).
103
104 waiter is sometimes used in reference to the task that is waiting
105 on a mutex. This is the same as waiter->task.
106
107waiters - A list of processes that are blocked on a mutex.
108
109top waiter - The highest priority process waiting on a specific mutex.
110
111top pi waiter - The highest priority process waiting on one of the mutexes
112 that a specific process owns.
113
114Note: task and process are used interchangeably in this document, mostly to
115 differentiate between two processes that are being described together.
116
117
118PI chain
119--------
120
121The PI chain is a list of processes and mutexes that may cause priority
122inheritance to take place. Multiple chains may converge, but a chain
123would never diverge, since a process can't be blocked on more than one
124mutex at a time.
125
126Example:
127
128 Process: A, B, C, D, E
129 Mutexes: L1, L2, L3, L4
130
131 A owns: L1
132 B blocked on L1
133 B owns L2
134 C blocked on L2
135 C owns L3
136 D blocked on L3
137 D owns L4
138 E blocked on L4
139
140The chain would be:
141
142 E->L4->D->L3->C->L2->B->L1->A
143
144To show where two chains merge, we could add another process F and
145another mutex L5 where B owns L5 and F is blocked on mutex L5.
146
147The chain for F would be:
148
149 F->L5->B->L1->A
150
151Since a process may own more than one mutex, but never be blocked on more than
152one, the chains merge.
153
154Here we show both chains:
155
156 E->L4->D->L3->C->L2-+
157 |
158 +->B->L1->A
159 |
160 F->L5-+
161
162For PI to work, the processes at the right end of these chains (or we may
163also call it the Top of the chain) must be equal to or higher in priority
164than the processes to the left or below in the chain.
165
166Also since a mutex may have more than one process blocked on it, we can
167have multiple chains merge at mutexes. If we add another process G that is
168blocked on mutex L2:
169
170 G->L2->B->L1->A
171
172And once again, to show how this can grow I will show the merging chains
173again.
174
175 E->L4->D->L3->C-+
176 +->L2-+
177 | |
178 G-+ +->B->L1->A
179 |
180 F->L5-+
181
182
183Plist
184-----
185
186Before I go further and talk about how the PI chain is stored through lists
187on both mutexes and processes, I'll explain the plist. This is similar to
188the struct list_head functionality that is already in the kernel.
189The implementation of plist is out of scope for this document, but it is
190very important to understand what it does.
191
192There are a few differences between plist and list, the most important one
193being that plist is a priority sorted linked list. This means that the
194priorities of the plist are sorted, such that it takes O(1) to retrieve the
195highest priority item in the list. Obviously this is useful to store processes
196based on their priorities.
197
198Another difference, which is important for implementation, is that, unlike
199list, the head of the list is a different element than the nodes of a list.
200So the head of the list is declared as struct plist_head and nodes that will
201be added to the list are declared as struct plist_node.
202
203
204Mutex Waiter List
205-----------------
206
207Every mutex keeps track of all the waiters that are blocked on itself. The mutex
208has a plist to store these waiters by priority. This list is protected by
209a spin lock that is located in the struct of the mutex. This lock is called
210wait_lock. Since the modification of the waiter list is never done in
211interrupt context, the wait_lock can be taken without disabling interrupts.
212
213
214Task PI List
215------------
216
217To keep track of the PI chains, each process has its own PI list. This is
218a list of all top waiters of the mutexes that are owned by the process.
219Note that this list only holds the top waiters and not all waiters that are
220blocked on mutexes owned by the process.
221
222The top of the task's PI list is always the highest priority task that
223is waiting on a mutex that is owned by the task. So if the task has
224inherited a priority, it will always be the priority of the task that is
225at the top of this list.
226
227This list is stored in the task structure of a process as a plist called
228pi_list. This list is protected by a spin lock also in the task structure,
229called pi_lock. This lock may also be taken in interrupt context, so when
230locking the pi_lock, interrupts must be disabled.
231
232
233Depth of the PI Chain
234---------------------
235
236The maximum depth of the PI chain is not dynamic, and could actually be
237defined. But is very complex to figure it out, since it depends on all
238the nesting of mutexes. Let's look at the example where we have 3 mutexes,
239L1, L2, and L3, and four separate functions func1, func2, func3 and func4.
240The following shows a locking order of L1->L2->L3, but may not actually
241be directly nested that way.
242
243void func1(void)
244{
245 mutex_lock(L1);
246
247 /* do anything */
248
249 mutex_unlock(L1);
250}
251
252void func2(void)
253{
254 mutex_lock(L1);
255 mutex_lock(L2);
256
257 /* do something */
258
259 mutex_unlock(L2);
260 mutex_unlock(L1);
261}
262
263void func3(void)
264{
265 mutex_lock(L2);
266 mutex_lock(L3);
267
268 /* do something else */
269
270 mutex_unlock(L3);
271 mutex_unlock(L2);
272}
273
274void func4(void)
275{
276 mutex_lock(L3);
277
278 /* do something again */
279
280 mutex_unlock(L3);
281}
282
283Now we add 4 processes that run each of these functions separately.
284Processes A, B, C, and D which run functions func1, func2, func3 and func4
285respectively, and such that D runs first and A last. With D being preempted
286in func4 in the "do something again" area, we have a locking that follows:
287
288D owns L3
289 C blocked on L3
290 C owns L2
291 B blocked on L2
292 B owns L1
293 A blocked on L1
294
295And thus we have the chain A->L1->B->L2->C->L3->D.
296
297This gives us a PI depth of 4 (four processes), but looking at any of the
298functions individually, it seems as though they only have at most a locking
299depth of two. So, although the locking depth is defined at compile time,
300it still is very difficult to find the possibilities of that depth.
301
302Now since mutexes can be defined by user-land applications, we don't want a DOS
303type of application that nests large amounts of mutexes to create a large
304PI chain, and have the code holding spin locks while looking at a large
305amount of data. So to prevent this, the implementation not only implements
306a maximum lock depth, but also only holds at most two different locks at a
307time, as it walks the PI chain. More about this below.
308
309
310Mutex owner and flags
311---------------------
312
313The mutex structure contains a pointer to the owner of the mutex. If the
314mutex is not owned, this owner is set to NULL. Since all architectures
315have the task structure on at least a four byte alignment (and if this is
316not true, the rtmutex.c code will be broken!), this allows for the two
317least significant bits to be used as flags. This part is also described
318in Documentation/rt-mutex.txt, but will also be briefly described here.
319
320Bit 0 is used as the "Pending Owner" flag. This is described later.
321Bit 1 is used as the "Has Waiters" flags. This is also described later
322 in more detail, but is set whenever there are waiters on a mutex.
323
324
325cmpxchg Tricks
326--------------
327
328Some architectures implement an atomic cmpxchg (Compare and Exchange). This
329is used (when applicable) to keep the fast path of grabbing and releasing
330mutexes short.
331
332cmpxchg is basically the following function performed atomically:
333
334unsigned long _cmpxchg(unsigned long *A, unsigned long *B, unsigned long *C)
335{
336 unsigned long T = *A;
337 if (*A == *B) {
338 *A = *C;
339 }
340 return T;
341}
342#define cmpxchg(a,b,c) _cmpxchg(&a,&b,&c)
343
344This is really nice to have, since it allows you to only update a variable
345if the variable is what you expect it to be. You know if it succeeded if
346the return value (the old value of A) is equal to B.
347
348The macro rt_mutex_cmpxchg is used to try to lock and unlock mutexes. If
349the architecture does not support CMPXCHG, then this macro is simply set
350to fail every time. But if CMPXCHG is supported, then this will
351help out extremely to keep the fast path short.
352
353The use of rt_mutex_cmpxchg with the flags in the owner field help optimize
354the system for architectures that support it. This will also be explained
355later in this document.
356
357
358Priority adjustments
359--------------------
360
361The implementation of the PI code in rtmutex.c has several places that a
362process must adjust its priority. With the help of the pi_list of a
363process this is rather easy to know what needs to be adjusted.
364
365The functions implementing the task adjustments are rt_mutex_adjust_prio,
366__rt_mutex_adjust_prio (same as the former, but expects the task pi_lock
367to already be taken), rt_mutex_get_prio, and rt_mutex_setprio.
368
369rt_mutex_getprio and rt_mutex_setprio are only used in __rt_mutex_adjust_prio.
370
371rt_mutex_getprio returns the priority that the task should have. Either the
372task's own normal priority, or if a process of a higher priority is waiting on
373a mutex owned by the task, then that higher priority should be returned.
374Since the pi_list of a task holds an order by priority list of all the top
375waiters of all the mutexes that the task owns, rt_mutex_getprio simply needs
376to compare the top pi waiter to its own normal priority, and return the higher
377priority back.
378
379(Note: if looking at the code, you will notice that the lower number of
380 prio is returned. This is because the prio field in the task structure
381 is an inverse order of the actual priority. So a "prio" of 5 is
382 of higher priority than a "prio" of 10.)
383
384__rt_mutex_adjust_prio examines the result of rt_mutex_getprio, and if the
385result does not equal the task's current priority, then rt_mutex_setprio
386is called to adjust the priority of the task to the new priority.
387Note that rt_mutex_setprio is defined in kernel/sched.c to implement the
388actual change in priority.
389
390It is interesting to note that __rt_mutex_adjust_prio can either increase
391or decrease the priority of the task. In the case that a higher priority
392process has just blocked on a mutex owned by the task, __rt_mutex_adjust_prio
393would increase/boost the task's priority. But if a higher priority task
394were for some reason to leave the mutex (timeout or signal), this same function
395would decrease/unboost the priority of the task. That is because the pi_list
396always contains the highest priority task that is waiting on a mutex owned
397by the task, so we only need to compare the priority of that top pi waiter
398to the normal priority of the given task.
399
400
401High level overview of the PI chain walk
402----------------------------------------
403
404The PI chain walk is implemented by the function rt_mutex_adjust_prio_chain.
405
406The implementation has gone through several iterations, and has ended up
407with what we believe is the best. It walks the PI chain by only grabbing
408at most two locks at a time, and is very efficient.
409
410The rt_mutex_adjust_prio_chain can be used either to boost or lower process
411priorities.
412
413rt_mutex_adjust_prio_chain is called with a task to be checked for PI
414(de)boosting (the owner of a mutex that a process is blocking on), a flag to
415check for deadlocking, the mutex that the task owns, and a pointer to a waiter
416that is the process's waiter struct that is blocked on the mutex (although this
417parameter may be NULL for deboosting).
418
419For this explanation, I will not mention deadlock detection. This explanation
420will try to stay at a high level.
421
422When this function is called, there are no locks held. That also means
423that the state of the owner and lock can change when entered into this function.
424
425Before this function is called, the task has already had rt_mutex_adjust_prio
426performed on it. This means that the task is set to the priority that it
427should be at, but the plist nodes of the task's waiter have not been updated
428with the new priorities, and that this task may not be in the proper locations
429in the pi_lists and wait_lists that the task is blocked on. This function
430solves all that.
431
432A loop is entered, where task is the owner to be checked for PI changes that
433was passed by parameter (for the first iteration). The pi_lock of this task is
434taken to prevent any more changes to the pi_list of the task. This also
435prevents new tasks from completing the blocking on a mutex that is owned by this
436task.
437
438If the task is not blocked on a mutex then the loop is exited. We are at
439the top of the PI chain.
440
441A check is now done to see if the original waiter (the process that is blocked
442on the current mutex) is the top pi waiter of the task. That is, is this
443waiter on the top of the task's pi_list. If it is not, it either means that
444there is another process higher in priority that is blocked on one of the
445mutexes that the task owns, or that the waiter has just woken up via a signal
446or timeout and has left the PI chain. In either case, the loop is exited, since
447we don't need to do any more changes to the priority of the current task, or any
448task that owns a mutex that this current task is waiting on. A priority chain
449walk is only needed when a new top pi waiter is made to a task.
450
451The next check sees if the task's waiter plist node has the priority equal to
452the priority the task is set at. If they are equal, then we are done with
453the loop. Remember that the function started with the priority of the
454task adjusted, but the plist nodes that hold the task in other processes
455pi_lists have not been adjusted.
456
457Next, we look at the mutex that the task is blocked on. The mutex's wait_lock
458is taken. This is done by a spin_trylock, because the locking order of the
459pi_lock and wait_lock goes in the opposite direction. If we fail to grab the
460lock, the pi_lock is released, and we restart the loop.
461
462Now that we have both the pi_lock of the task as well as the wait_lock of
463the mutex the task is blocked on, we update the task's waiter's plist node
464that is located on the mutex's wait_list.
465
466Now we release the pi_lock of the task.
467
468Next the owner of the mutex has its pi_lock taken, so we can update the
469task's entry in the owner's pi_list. If the task is the highest priority
470process on the mutex's wait_list, then we remove the previous top waiter
471from the owner's pi_list, and replace it with the task.
472
473Note: It is possible that the task was the current top waiter on the mutex,
474 in which case the task is not yet on the pi_list of the waiter. This
475 is OK, since plist_del does nothing if the plist node is not on any
476 list.
477
478If the task was not the top waiter of the mutex, but it was before we
479did the priority updates, that means we are deboosting/lowering the
480task. In this case, the task is removed from the pi_list of the owner,
481and the new top waiter is added.
482
483Lastly, we unlock both the pi_lock of the task, as well as the mutex's
484wait_lock, and continue the loop again. On the next iteration of the
485loop, the previous owner of the mutex will be the task that will be
486processed.
487
488Note: One might think that the owner of this mutex might have changed
489 since we just grab the mutex's wait_lock. And one could be right.
490 The important thing to remember is that the owner could not have
491 become the task that is being processed in the PI chain, since
492 we have taken that task's pi_lock at the beginning of the loop.
493 So as long as there is an owner of this mutex that is not the same
494 process as the tasked being worked on, we are OK.
495
496 Looking closely at the code, one might be confused. The check for the
497 end of the PI chain is when the task isn't blocked on anything or the
498 task's waiter structure "task" element is NULL. This check is
499 protected only by the task's pi_lock. But the code to unlock the mutex
500 sets the task's waiter structure "task" element to NULL with only
501 the protection of the mutex's wait_lock, which was not taken yet.
502 Isn't this a race condition if the task becomes the new owner?
503
504 The answer is No! The trick is the spin_trylock of the mutex's
505 wait_lock. If we fail that lock, we release the pi_lock of the
506 task and continue the loop, doing the end of PI chain check again.
507
508 In the code to release the lock, the wait_lock of the mutex is held
509 the entire time, and it is not let go when we grab the pi_lock of the
510 new owner of the mutex. So if the switch of a new owner were to happen
511 after the check for end of the PI chain and the grabbing of the
512 wait_lock, the unlocking code would spin on the new owner's pi_lock
513 but never give up the wait_lock. So the PI chain loop is guaranteed to
514 fail the spin_trylock on the wait_lock, release the pi_lock, and
515 try again.
516
517 If you don't quite understand the above, that's OK. You don't have to,
518 unless you really want to make a proof out of it ;)
519
520
521Pending Owners and Lock stealing
522--------------------------------
523
524One of the flags in the owner field of the mutex structure is "Pending Owner".
525What this means is that an owner was chosen by the process releasing the
526mutex, but that owner has yet to wake up and actually take the mutex.
527
528Why is this important? Why can't we just give the mutex to another process
529and be done with it?
530
531The PI code is to help with real-time processes, and to let the highest
532priority process run as long as possible with little latencies and delays.
533If a high priority process owns a mutex that a lower priority process is
534blocked on, when the mutex is released it would be given to the lower priority
535process. What if the higher priority process wants to take that mutex again.
536The high priority process would fail to take that mutex that it just gave up
537and it would need to boost the lower priority process to run with full
538latency of that critical section (since the low priority process just entered
539it).
540
541There's no reason a high priority process that gives up a mutex should be
542penalized if it tries to take that mutex again. If the new owner of the
543mutex has not woken up yet, there's no reason that the higher priority process
544could not take that mutex away.
545
546To solve this, we introduced Pending Ownership and Lock Stealing. When a
547new process is given a mutex that it was blocked on, it is only given
548pending ownership. This means that it's the new owner, unless a higher
549priority process comes in and tries to grab that mutex. If a higher priority
550process does come along and wants that mutex, we let the higher priority
551process "steal" the mutex from the pending owner (only if it is still pending)
552and continue with the mutex.
553
554
555Taking of a mutex (The walk through)
556------------------------------------
557
558OK, now let's take a look at the detailed walk through of what happens when
559taking a mutex.
560
561The first thing that is tried is the fast taking of the mutex. This is
562done when we have CMPXCHG enabled (otherwise the fast taking automatically
563fails). Only when the owner field of the mutex is NULL can the lock be
564taken with the CMPXCHG and nothing else needs to be done.
565
566If there is contention on the lock, whether it is owned or pending owner
567we go about the slow path (rt_mutex_slowlock).
568
569The slow path function is where the task's waiter structure is created on
570the stack. This is because the waiter structure is only needed for the
571scope of this function. The waiter structure holds the nodes to store
572the task on the wait_list of the mutex, and if need be, the pi_list of
573the owner.
574
575The wait_lock of the mutex is taken since the slow path of unlocking the
576mutex also takes this lock.
577
578We then call try_to_take_rt_mutex. This is where the architecture that
579does not implement CMPXCHG would always grab the lock (if there's no
580contention).
581
582try_to_take_rt_mutex is used every time the task tries to grab a mutex in the
583slow path. The first thing that is done here is an atomic setting of
584the "Has Waiters" flag of the mutex's owner field. Yes, this could really
585be false, because if the the mutex has no owner, there are no waiters and
586the current task also won't have any waiters. But we don't have the lock
587yet, so we assume we are going to be a waiter. The reason for this is to
588play nice for those architectures that do have CMPXCHG. By setting this flag
589now, the owner of the mutex can't release the mutex without going into the
590slow unlock path, and it would then need to grab the wait_lock, which this
591code currently holds. So setting the "Has Waiters" flag forces the owner
592to synchronize with this code.
593
594Now that we know that we can't have any races with the owner releasing the
595mutex, we check to see if we can take the ownership. This is done if the
596mutex doesn't have a owner, or if we can steal the mutex from a pending
597owner. Let's look at the situations we have here.
598
599 1) Has owner that is pending
600 ----------------------------
601
602 The mutex has a owner, but it hasn't woken up and the mutex flag
603 "Pending Owner" is set. The first check is to see if the owner isn't the
604 current task. This is because this function is also used for the pending
605 owner to grab the mutex. When a pending owner wakes up, it checks to see
606 if it can take the mutex, and this is done if the owner is already set to
607 itself. If so, we succeed and leave the function, clearing the "Pending
608 Owner" bit.
609
610 If the pending owner is not current, we check to see if the current priority is
611 higher than the pending owner. If not, we fail the function and return.
612
613 There's also something special about a pending owner. That is a pending owner
614 is never blocked on a mutex. So there is no PI chain to worry about. It also
615 means that if the mutex doesn't have any waiters, there's no accounting needed
616 to update the pending owner's pi_list, since we only worry about processes
617 blocked on the current mutex.
618
619 If there are waiters on this mutex, and we just stole the ownership, we need
620 to take the top waiter, remove it from the pi_list of the pending owner, and
621 add it to the current pi_list. Note that at this moment, the pending owner
622 is no longer on the list of waiters. This is fine, since the pending owner
623 would add itself back when it realizes that it had the ownership stolen
624 from itself. When the pending owner tries to grab the mutex, it will fail
625 in try_to_take_rt_mutex if the owner field points to another process.
626
627 2) No owner
628 -----------
629
630 If there is no owner (or we successfully stole the lock), we set the owner
631 of the mutex to current, and set the flag of "Has Waiters" if the current
632 mutex actually has waiters, or we clear the flag if it doesn't. See, it was
633 OK that we set that flag early, since now it is cleared.
634
635 3) Failed to grab ownership
636 ---------------------------
637
638 The most interesting case is when we fail to take ownership. This means that
639 there exists an owner, or there's a pending owner with equal or higher
640 priority than the current task.
641
642We'll continue on the failed case.
643
644If the mutex has a timeout, we set up a timer to go off to break us out
645of this mutex if we failed to get it after a specified amount of time.
646
647Now we enter a loop that will continue to try to take ownership of the mutex, or
648fail from a timeout or signal.
649
650Once again we try to take the mutex. This will usually fail the first time
651in the loop, since it had just failed to get the mutex. But the second time
652in the loop, this would likely succeed, since the task would likely be
653the pending owner.
654
655If the mutex is TASK_INTERRUPTIBLE a check for signals and timeout is done
656here.
657
658The waiter structure has a "task" field that points to the task that is blocked
659on the mutex. This field can be NULL the first time it goes through the loop
660or if the task is a pending owner and had it's mutex stolen. If the "task"
661field is NULL then we need to set up the accounting for it.
662
663Task blocks on mutex
664--------------------
665
666The accounting of a mutex and process is done with the waiter structure of
667the process. The "task" field is set to the process, and the "lock" field
668to the mutex. The plist nodes are initialized to the processes current
669priority.
670
671Since the wait_lock was taken at the entry of the slow lock, we can safely
672add the waiter to the wait_list. If the current process is the highest
673priority process currently waiting on this mutex, then we remove the
674previous top waiter process (if it exists) from the pi_list of the owner,
675and add the current process to that list. Since the pi_list of the owner
676has changed, we call rt_mutex_adjust_prio on the owner to see if the owner
677should adjust its priority accordingly.
678
679If the owner is also blocked on a lock, and had its pi_list changed
680(or deadlock checking is on), we unlock the wait_lock of the mutex and go ahead
681and run rt_mutex_adjust_prio_chain on the owner, as described earlier.
682
683Now all locks are released, and if the current process is still blocked on a
684mutex (waiter "task" field is not NULL), then we go to sleep (call schedule).
685
686Waking up in the loop
687---------------------
688
689The schedule can then wake up for a few reasons.
690 1) we were given pending ownership of the mutex.
691 2) we received a signal and was TASK_INTERRUPTIBLE
692 3) we had a timeout and was TASK_INTERRUPTIBLE
693
694In any of these cases, we continue the loop and once again try to grab the
695ownership of the mutex. If we succeed, we exit the loop, otherwise we continue
696and on signal and timeout, will exit the loop, or if we had the mutex stolen
697we just simply add ourselves back on the lists and go back to sleep.
698
699Note: For various reasons, because of timeout and signals, the steal mutex
700 algorithm needs to be careful. This is because the current process is
701 still on the wait_list. And because of dynamic changing of priorities,
702 especially on SCHED_OTHER tasks, the current process can be the
703 highest priority task on the wait_list.
704
705Failed to get mutex on Timeout or Signal
706----------------------------------------
707
708If a timeout or signal occurred, the waiter's "task" field would not be
709NULL and the task needs to be taken off the wait_list of the mutex and perhaps
710pi_list of the owner. If this process was a high priority process, then
711the rt_mutex_adjust_prio_chain needs to be executed again on the owner,
712but this time it will be lowering the priorities.
713
714
715Unlocking the Mutex
716-------------------
717
718The unlocking of a mutex also has a fast path for those architectures with
719CMPXCHG. Since the taking of a mutex on contention always sets the
720"Has Waiters" flag of the mutex's owner, we use this to know if we need to
721take the slow path when unlocking the mutex. If the mutex doesn't have any
722waiters, the owner field of the mutex would equal the current process and
723the mutex can be unlocked by just replacing the owner field with NULL.
724
725If the owner field has the "Has Waiters" bit set (or CMPXCHG is not available),
726the slow unlock path is taken.
727
728The first thing done in the slow unlock path is to take the wait_lock of the
729mutex. This synchronizes the locking and unlocking of the mutex.
730
731A check is made to see if the mutex has waiters or not. On architectures that
732do not have CMPXCHG, this is the location that the owner of the mutex will
733determine if a waiter needs to be awoken or not. On architectures that
734do have CMPXCHG, that check is done in the fast path, but it is still needed
735in the slow path too. If a waiter of a mutex woke up because of a signal
736or timeout between the time the owner failed the fast path CMPXCHG check and
737the grabbing of the wait_lock, the mutex may not have any waiters, thus the
738owner still needs to make this check. If there are no waiters than the mutex
739owner field is set to NULL, the wait_lock is released and nothing more is
740needed.
741
742If there are waiters, then we need to wake one up and give that waiter
743pending ownership.
744
745On the wake up code, the pi_lock of the current owner is taken. The top
746waiter of the lock is found and removed from the wait_list of the mutex
747as well as the pi_list of the current owner. The task field of the new
748pending owner's waiter structure is set to NULL, and the owner field of the
749mutex is set to the new owner with the "Pending Owner" bit set, as well
750as the "Has Waiters" bit if there still are other processes blocked on the
751mutex.
752
753The pi_lock of the previous owner is released, and the new pending owner's
754pi_lock is taken. Remember that this is the trick to prevent the race
755condition in rt_mutex_adjust_prio_chain from adding itself as a waiter
756on the mutex.
757
758We now clear the "pi_blocked_on" field of the new pending owner, and if
759the mutex still has waiters pending, we add the new top waiter to the pi_list
760of the pending owner.
761
762Finally we unlock the pi_lock of the pending owner and wake it up.
763
764
765Contact
766-------
767
768For updates on this document, please email Steven Rostedt <rostedt@goodmis.org>
769
770
771Credits
772-------
773
774Author: Steven Rostedt <rostedt@goodmis.org>
775
776Reviewers: Ingo Molnar, Thomas Gleixner, Thomas Duetsch, and Randy Dunlap
777
778Updates
779-------
780
781This document was originally written for 2.6.17-rc3-mm1
diff --git a/Documentation/rt-mutex.txt b/Documentation/rt-mutex.txt
new file mode 100644
index 0000000000..243393d882
--- /dev/null
+++ b/Documentation/rt-mutex.txt
@@ -0,0 +1,79 @@
1RT-mutex subsystem with PI support
2----------------------------------
3
4RT-mutexes with priority inheritance are used to support PI-futexes,
5which enable pthread_mutex_t priority inheritance attributes
6(PTHREAD_PRIO_INHERIT). [See Documentation/pi-futex.txt for more details
7about PI-futexes.]
8
9This technology was developed in the -rt tree and streamlined for
10pthread_mutex support.
11
12Basic principles:
13-----------------
14
15RT-mutexes extend the semantics of simple mutexes by the priority
16inheritance protocol.
17
18A low priority owner of a rt-mutex inherits the priority of a higher
19priority waiter until the rt-mutex is released. If the temporarily
20boosted owner blocks on a rt-mutex itself it propagates the priority
21boosting to the owner of the other rt_mutex it gets blocked on. The
22priority boosting is immediately removed once the rt_mutex has been
23unlocked.
24
25This approach allows us to shorten the block of high-prio tasks on
26mutexes which protect shared resources. Priority inheritance is not a
27magic bullet for poorly designed applications, but it allows
28well-designed applications to use userspace locks in critical parts of
29an high priority thread, without losing determinism.
30
31The enqueueing of the waiters into the rtmutex waiter list is done in
32priority order. For same priorities FIFO order is chosen. For each
33rtmutex, only the top priority waiter is enqueued into the owner's
34priority waiters list. This list too queues in priority order. Whenever
35the top priority waiter of a task changes (for example it timed out or
36got a signal), the priority of the owner task is readjusted. [The
37priority enqueueing is handled by "plists", see include/linux/plist.h
38for more details.]
39
40RT-mutexes are optimized for fastpath operations and have no internal
41locking overhead when locking an uncontended mutex or unlocking a mutex
42without waiters. The optimized fastpath operations require cmpxchg
43support. [If that is not available then the rt-mutex internal spinlock
44is used]
45
46The state of the rt-mutex is tracked via the owner field of the rt-mutex
47structure:
48
49rt_mutex->owner holds the task_struct pointer of the owner. Bit 0 and 1
50are used to keep track of the "owner is pending" and "rtmutex has
51waiters" state.
52
53 owner bit1 bit0
54 NULL 0 0 mutex is free (fast acquire possible)
55 NULL 0 1 invalid state
56 NULL 1 0 Transitional state*
57 NULL 1 1 invalid state
58 taskpointer 0 0 mutex is held (fast release possible)
59 taskpointer 0 1 task is pending owner
60 taskpointer 1 0 mutex is held and has waiters
61 taskpointer 1 1 task is pending owner and mutex has waiters
62
63Pending-ownership handling is a performance optimization:
64pending-ownership is assigned to the first (highest priority) waiter of
65the mutex, when the mutex is released. The thread is woken up and once
66it starts executing it can acquire the mutex. Until the mutex is taken
67by it (bit 0 is cleared) a competing higher priority thread can "steal"
68the mutex which puts the woken up thread back on the waiters list.
69
70The pending-ownership optimization is especially important for the
71uninterrupted workflow of high-prio tasks which repeatedly
72takes/releases locks that have lower-prio waiters. Without this
73optimization the higher-prio thread would ping-pong to the lower-prio
74task [because at unlock time we always assign a new owner].
75
76(*) The "mutex has waiters" bit gets set to take the lock. If the lock
77doesn't already have an owner, this bit is quickly cleared if there are
78no waiters. So this is a transitional state to synchronize with looking
79at the owner field of the mutex and the mutex owner releasing the lock.
diff --git a/Documentation/scsi/ppa.txt b/Documentation/scsi/ppa.txt
index 0dac88d86d..5d9223bc1b 100644
--- a/Documentation/scsi/ppa.txt
+++ b/Documentation/scsi/ppa.txt
@@ -12,5 +12,3 @@ http://www.torque.net/parport/
12Email list for Linux Parport 12Email list for Linux Parport
13linux-parport@torque.net 13linux-parport@torque.net
14 14
15Email for problems with ZIP or ZIP Plus drivers
16campbell@torque.net
diff --git a/Documentation/tty.txt b/Documentation/tty.txt
index 8ff7bc2a08..dab5660474 100644
--- a/Documentation/tty.txt
+++ b/Documentation/tty.txt
@@ -80,13 +80,6 @@ receive_buf() - Hand buffers of bytes from the driver to the ldisc
80 for processing. Semantics currently rather 80 for processing. Semantics currently rather
81 mysterious 8( 81 mysterious 8(
82 82
83receive_room() - Can be called by the driver layer at any time when
84 the ldisc is opened. The ldisc must be able to
85 handle the reported amount of data at that instant.
86 Synchronization between active receive_buf and
87 receive_room calls is down to the driver not the
88 ldisc. Must not sleep.
89
90write_wakeup() - May be called at any point between open and close. 83write_wakeup() - May be called at any point between open and close.
91 The TTY_DO_WRITE_WAKEUP flag indicates if a call 84 The TTY_DO_WRITE_WAKEUP flag indicates if a call
92 is needed but always races versus calls. Thus the 85 is needed but always races versus calls. Thus the
diff --git a/Documentation/video4linux/README.pvrusb2 b/Documentation/video4linux/README.pvrusb2
new file mode 100644
index 0000000000..c73a32c345
--- /dev/null
+++ b/Documentation/video4linux/README.pvrusb2
@@ -0,0 +1,212 @@
1
2$Id$
3Mike Isely <isely@pobox.com>
4
5 pvrusb2 driver
6
7Background:
8
9 This driver is intended for the "Hauppauge WinTV PVR USB 2.0", which
10 is a USB 2.0 hosted TV Tuner. This driver is a work in progress.
11 Its history started with the reverse-engineering effort by Björn
12 Danielsson <pvrusb2@dax.nu> whose web page can be found here:
13
14 http://pvrusb2.dax.nu/
15
16 From there Aurelien Alleaume <slts@free.fr> began an effort to
17 create a video4linux compatible driver. I began with Aurelien's
18 last known snapshot and evolved the driver to the state it is in
19 here.
20
21 More information on this driver can be found at:
22
23 http://www.isely.net/pvrusb2.html
24
25
26 This driver has a strong separation of layers. They are very
27 roughly:
28
29 1a. Low level wire-protocol implementation with the device.
30
31 1b. I2C adaptor implementation and corresponding I2C client drivers
32 implemented elsewhere in V4L.
33
34 1c. High level hardware driver implementation which coordinates all
35 activities that ensure correct operation of the device.
36
37 2. A "context" layer which manages instancing of driver, setup,
38 tear-down, arbitration, and interaction with high level
39 interfaces appropriately as devices are hotplugged in the
40 system.
41
42 3. High level interfaces which glue the driver to various published
43 Linux APIs (V4L, sysfs, maybe DVB in the future).
44
45 The most important shearing layer is between the top 2 layers. A
46 lot of work went into the driver to ensure that any kind of
47 conceivable API can be laid on top of the core driver. (Yes, the
48 driver internally leverages V4L to do its work but that really has
49 nothing to do with the API published by the driver to the outside
50 world.) The architecture allows for different APIs to
51 simultaneously access the driver. I have a strong sense of fairness
52 about APIs and also feel that it is a good design principle to keep
53 implementation and interface isolated from each other. Thus while
54 right now the V4L high level interface is the most complete, the
55 sysfs high level interface will work equally well for similar
56 functions, and there's no reason I see right now why it shouldn't be
57 possible to produce a DVB high level interface that can sit right
58 alongside V4L.
59
60 NOTE: Complete documentation on the pvrusb2 driver is contained in
61 the html files within the doc directory; these are exactly the same
62 as what is on the web site at the time. Browse those files
63 (especially the FAQ) before asking questions.
64
65
66Building
67
68 To build these modules essentially amounts to just running "Make",
69 but you need the kernel source tree nearby and you will likely also
70 want to set a few controlling environment variables first in order
71 to link things up with that source tree. Please see the Makefile
72 here for comments that explain how to do that.
73
74
75Source file list / functional overview:
76
77 (Note: The term "module" used below generally refers to loosely
78 defined functional units within the pvrusb2 driver and bears no
79 relation to the Linux kernel's concept of a loadable module.)
80
81 pvrusb2-audio.[ch] - This is glue logic that resides between this
82 driver and the msp3400.ko I2C client driver (which is found
83 elsewhere in V4L).
84
85 pvrusb2-context.[ch] - This module implements the context for an
86 instance of the driver. Everything else eventually ties back to
87 or is otherwise instanced within the data structures implemented
88 here. Hotplugging is ultimately coordinated here. All high level
89 interfaces tie into the driver through this module. This module
90 helps arbitrate each interface's access to the actual driver core,
91 and is designed to allow concurrent access through multiple
92 instances of multiple interfaces (thus you can for example change
93 the tuner's frequency through sysfs while simultaneously streaming
94 video through V4L out to an instance of mplayer).
95
96 pvrusb2-debug.h - This header defines a printk() wrapper and a mask
97 of debugging bit definitions for the various kinds of debug
98 messages that can be enabled within the driver.
99
100 pvrusb2-debugifc.[ch] - This module implements a crude command line
101 oriented debug interface into the driver. Aside from being part
102 of the process for implementing manual firmware extraction (see
103 the pvrusb2 web site mentioned earlier), probably I'm the only one
104 who has ever used this. It is mainly a debugging aid.
105
106 pvrusb2-eeprom.[ch] - This is glue logic that resides between this
107 driver the tveeprom.ko module, which is itself implemented
108 elsewhere in V4L.
109
110 pvrusb2-encoder.[ch] - This module implements all protocol needed to
111 interact with the Conexant mpeg2 encoder chip within the pvrusb2
112 device. It is a crude echo of corresponding logic in ivtv,
113 however the design goals (strict isolation) and physical layer
114 (proxy through USB instead of PCI) are enough different that this
115 implementation had to be completely different.
116
117 pvrusb2-hdw-internal.h - This header defines the core data structure
118 in the driver used to track ALL internal state related to control
119 of the hardware. Nobody outside of the core hardware-handling
120 modules should have any business using this header. All external
121 access to the driver should be through one of the high level
122 interfaces (e.g. V4L, sysfs, etc), and in fact even those high
123 level interfaces are restricted to the API defined in
124 pvrusb2-hdw.h and NOT this header.
125
126 pvrusb2-hdw.h - This header defines the full internal API for
127 controlling the hardware. High level interfaces (e.g. V4L, sysfs)
128 will work through here.
129
130 pvrusb2-hdw.c - This module implements all the various bits of logic
131 that handle overall control of a specific pvrusb2 device.
132 (Policy, instantiation, and arbitration of pvrusb2 devices fall
133 within the jurisdiction of pvrusb-context not here).
134
135 pvrusb2-i2c-chips-*.c - These modules implement the glue logic to
136 tie together and configure various I2C modules as they attach to
137 the I2C bus. There are two versions of this file. The "v4l2"
138 version is intended to be used in-tree alongside V4L, where we
139 implement just the logic that makes sense for a pure V4L
140 environment. The "all" version is intended for use outside of
141 V4L, where we might encounter other possibly "challenging" modules
142 from ivtv or older kernel snapshots (or even the support modules
143 in the standalone snapshot).
144
145 pvrusb2-i2c-cmd-v4l1.[ch] - This module implements generic V4L1
146 compatible commands to the I2C modules. It is here where state
147 changes inside the pvrusb2 driver are translated into V4L1
148 commands that are in turn send to the various I2C modules.
149
150 pvrusb2-i2c-cmd-v4l2.[ch] - This module implements generic V4L2
151 compatible commands to the I2C modules. It is here where state
152 changes inside the pvrusb2 driver are translated into V4L2
153 commands that are in turn send to the various I2C modules.
154
155 pvrusb2-i2c-core.[ch] - This module provides an implementation of a
156 kernel-friendly I2C adaptor driver, through which other external
157 I2C client drivers (e.g. msp3400, tuner, lirc) may connect and
158 operate corresponding chips within the the pvrusb2 device. It is
159 through here that other V4L modules can reach into this driver to
160 operate specific pieces (and those modules are in turn driven by
161 glue logic which is coordinated by pvrusb2-hdw, doled out by
162 pvrusb2-context, and then ultimately made available to users
163 through one of the high level interfaces).
164
165 pvrusb2-io.[ch] - This module implements a very low level ring of
166 transfer buffers, required in order to stream data from the
167 device. This module is *very* low level. It only operates the
168 buffers and makes no attempt to define any policy or mechanism for
169 how such buffers might be used.
170
171 pvrusb2-ioread.[ch] - This module layers on top of pvrusb2-io.[ch]
172 to provide a streaming API usable by a read() system call style of
173 I/O. Right now this is the only layer on top of pvrusb2-io.[ch],
174 however the underlying architecture here was intended to allow for
175 other styles of I/O to be implemented with additonal modules, like
176 mmap()'ed buffers or something even more exotic.
177
178 pvrusb2-main.c - This is the top level of the driver. Module level
179 and USB core entry points are here. This is our "main".
180
181 pvrusb2-sysfs.[ch] - This is the high level interface which ties the
182 pvrusb2 driver into sysfs. Through this interface you can do
183 everything with the driver except actually stream data.
184
185 pvrusb2-tuner.[ch] - This is glue logic that resides between this
186 driver and the tuner.ko I2C client driver (which is found
187 elsewhere in V4L).
188
189 pvrusb2-util.h - This header defines some common macros used
190 throughout the driver. These macros are not really specific to
191 the driver, but they had to go somewhere.
192
193 pvrusb2-v4l2.[ch] - This is the high level interface which ties the
194 pvrusb2 driver into video4linux. It is through here that V4L
195 applications can open and operate the driver in the usual V4L
196 ways. Note that **ALL** V4L functionality is published only
197 through here and nowhere else.
198
199 pvrusb2-video-*.[ch] - This is glue logic that resides between this
200 driver and the saa711x.ko I2C client driver (which is found
201 elsewhere in V4L). Note that saa711x.ko used to be known as
202 saa7115.ko in ivtv. There are two versions of this; one is
203 selected depending on the particular saa711[5x].ko that is found.
204
205 pvrusb2.h - This header contains compile time tunable parameters
206 (and at the moment the driver has very little that needs to be
207 tuned).
208
209
210 -Mike Isely
211 isely@pobox.com
212
diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt
index f2cd6ef53f..6887d44d26 100644
--- a/Documentation/x86_64/boot-options.txt
+++ b/Documentation/x86_64/boot-options.txt
@@ -205,6 +205,27 @@ IOMMU
205 pages Prereserve that many 128K pages for the software IO bounce buffering. 205 pages Prereserve that many 128K pages for the software IO bounce buffering.
206 force Force all IO through the software TLB. 206 force Force all IO through the software TLB.
207 207
208 calgary=[64k,128k,256k,512k,1M,2M,4M,8M]
209 calgary=[translate_empty_slots]
210 calgary=[disable=<PCI bus number>]
211
212 64k,...,8M - Set the size of each PCI slot's translation table
213 when using the Calgary IOMMU. This is the size of the translation
214 table itself in main memory. The smallest table, 64k, covers an IO
215 space of 32MB; the largest, 8MB table, can cover an IO space of
216 4GB. Normally the kernel will make the right choice by itself.
217
218 translate_empty_slots - Enable translation even on slots that have
219 no devices attached to them, in case a device will be hotplugged
220 in the future.
221
222 disable=<PCI bus number> - Disable translation on a given PHB. For
223 example, the built-in graphics adapter resides on the first bridge
224 (PCI bus number 0); if translation (isolation) is enabled on this
225 bridge, X servers that access the hardware directly from user
226 space might stop working. Use this option if you have devices that
227 are accessed from userspace directly on some PCI host bridge.
228
208Debugging 229Debugging
209 230
210 oops=panic Always panic on oopses. Default is to just kill the process, 231 oops=panic Always panic on oopses. Default is to just kill the process,
diff --git a/MAINTAINERS b/MAINTAINERS
index 4dcd2f1f14..31a13720f2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1118,6 +1118,11 @@ L: lm-sensors@lm-sensors.org
1118W: http://www.lm-sensors.nu/ 1118W: http://www.lm-sensors.nu/
1119S: Maintained 1119S: Maintained
1120 1120
1121HARDWARE RANDOM NUMBER GENERATOR CORE
1122P: Michael Buesch
1123M: mb@bu3sch.de
1124S: Maintained
1125
1121HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER 1126HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
1122P: Robert Love 1127P: Robert Love
1123M: rlove@rlove.org 1128M: rlove@rlove.org
@@ -1396,7 +1401,8 @@ S: Supported
1396 1401
1397INPUT (KEYBOARD, MOUSE, JOYSTICK) DRIVERS 1402INPUT (KEYBOARD, MOUSE, JOYSTICK) DRIVERS
1398P: Dmitry Torokhov 1403P: Dmitry Torokhov
1399M: dtor_core@ameritech.net 1404M: dmitry.torokhov@gmail.com
1405M: dtor@mail.ru
1400L: linux-input@atrey.karlin.mff.cuni.cz 1406L: linux-input@atrey.karlin.mff.cuni.cz
1401L: linux-joystick@atrey.karlin.mff.cuni.cz 1407L: linux-joystick@atrey.karlin.mff.cuni.cz
1402T: git kernel.org:/pub/scm/linux/kernel/git/dtor/input.git 1408T: git kernel.org:/pub/scm/linux/kernel/git/dtor/input.git
@@ -1436,6 +1442,11 @@ P: Tigran Aivazian
1436M: tigran@veritas.com 1442M: tigran@veritas.com
1437S: Maintained 1443S: Maintained
1438 1444
1445INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT
1446P: Deepak Saxena
1447M: dsaxena@plexity.net
1448S: Maintained
1449
1439INTEL PRO/100 ETHERNET SUPPORT 1450INTEL PRO/100 ETHERNET SUPPORT
1440P: John Ronciak 1451P: John Ronciak
1441M: john.ronciak@intel.com 1452M: john.ronciak@intel.com
@@ -2725,6 +2736,11 @@ P: Christoph Hellwig
2725M: hch@infradead.org 2736M: hch@infradead.org
2726S: Maintained 2737S: Maintained
2727 2738
2739TI OMAP RANDOM NUMBER GENERATOR SUPPORT
2740P: Deepak Saxena
2741M: dsaxena@plexity.net
2742S: Maintained
2743
2728TI PARALLEL LINK CABLE DRIVER 2744TI PARALLEL LINK CABLE DRIVER
2729P: Romain Lievin 2745P: Romain Lievin
2730M: roms@lpg.ticalc.org 2746M: roms@lpg.ticalc.org
diff --git a/Makefile b/Makefile
index 1700d3f6ea..e9560c6f81 100644
--- a/Makefile
+++ b/Makefile
@@ -71,7 +71,7 @@ endif
71# In both cases the working directory must be the root of the kernel src. 71# In both cases the working directory must be the root of the kernel src.
72# 1) O= 72# 1) O=
73# Use "make O=dir/to/store/output/files/" 73# Use "make O=dir/to/store/output/files/"
74# 74#
75# 2) Set KBUILD_OUTPUT 75# 2) Set KBUILD_OUTPUT
76# Set the environment variable KBUILD_OUTPUT to point to the directory 76# Set the environment variable KBUILD_OUTPUT to point to the directory
77# where the output files shall be placed. 77# where the output files shall be placed.
@@ -178,18 +178,20 @@ CROSS_COMPILE ?=
178# Architecture as present in compile.h 178# Architecture as present in compile.h
179UTS_MACHINE := $(ARCH) 179UTS_MACHINE := $(ARCH)
180 180
181KCONFIG_CONFIG ?= .config
182
181# SHELL used by kbuild 183# SHELL used by kbuild
182CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ 184CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
183 else if [ -x /bin/bash ]; then echo /bin/bash; \ 185 else if [ -x /bin/bash ]; then echo /bin/bash; \
184 else echo sh; fi ; fi) 186 else echo sh; fi ; fi)
185 187
186HOSTCC = gcc 188HOSTCC = gcc
187HOSTCXX = g++ 189HOSTCXX = g++
188HOSTCFLAGS = -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer 190HOSTCFLAGS = -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer
189HOSTCXXFLAGS = -O2 191HOSTCXXFLAGS = -O2
190 192
191# Decide whether to build built-in, modular, or both. 193# Decide whether to build built-in, modular, or both.
192# Normally, just do built-in. 194# Normally, just do built-in.
193 195
194KBUILD_MODULES := 196KBUILD_MODULES :=
195KBUILD_BUILTIN := 1 197KBUILD_BUILTIN := 1
@@ -197,7 +199,7 @@ KBUILD_BUILTIN := 1
197# If we have only "make modules", don't compile built-in objects. 199# If we have only "make modules", don't compile built-in objects.
198# When we're building modules with modversions, we need to consider 200# When we're building modules with modversions, we need to consider
199# the built-in objects during the descend as well, in order to 201# the built-in objects during the descend as well, in order to
200# make sure the checksums are uptodate before we record them. 202# make sure the checksums are up to date before we record them.
201 203
202ifeq ($(MAKECMDGOALS),modules) 204ifeq ($(MAKECMDGOALS),modules)
203 KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) 205 KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1)
@@ -230,7 +232,7 @@ export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD
230# 232#
231# If $(quiet) is empty, the whole command will be printed. 233# If $(quiet) is empty, the whole command will be printed.
232# If it is set to "quiet_", only the short version will be printed. 234# If it is set to "quiet_", only the short version will be printed.
233# If it is set to "silent_", nothing wil be printed at all, since 235# If it is set to "silent_", nothing will be printed at all, since
234# the variable $(silent_cmd_cc_o_c) doesn't exist. 236# the variable $(silent_cmd_cc_o_c) doesn't exist.
235# 237#
236# A simple variant is to prefix commands with $(Q) - that's useful 238# A simple variant is to prefix commands with $(Q) - that's useful
@@ -265,10 +267,9 @@ MAKEFLAGS += --include-dir=$(srctree)
265# We need some generic definitions 267# We need some generic definitions
266include $(srctree)/scripts/Kbuild.include 268include $(srctree)/scripts/Kbuild.include
267 269
268# For maximum performance (+ possibly random breakage, uncomment 270# Do not use make's built-in rules and variables
269# the following) 271# This increases performance and avoid hard-to-debug behavour
270 272MAKEFLAGS += -rR
271#MAKEFLAGS += -rR
272 273
273# Make variables (CC, etc...) 274# Make variables (CC, etc...)
274 275
@@ -305,21 +306,21 @@ LINUXINCLUDE := -Iinclude \
305 306
306CPPFLAGS := -D__KERNEL__ $(LINUXINCLUDE) 307CPPFLAGS := -D__KERNEL__ $(LINUXINCLUDE)
307 308
308CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ 309CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
309 -fno-strict-aliasing -fno-common 310 -fno-strict-aliasing -fno-common
310AFLAGS := -D__ASSEMBLY__ 311AFLAGS := -D__ASSEMBLY__
311 312
312# Read KERNELRELEASE from .kernelrelease (if it exists) 313# Read KERNELRELEASE from include/config/kernel.release (if it exists)
313KERNELRELEASE = $(shell cat .kernelrelease 2> /dev/null) 314KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
314KERNELVERSION = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) 315KERNELVERSION = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
315 316
316export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION \ 317export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
317 ARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC \ 318export ARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC
318 CPP AR NM STRIP OBJCOPY OBJDUMP MAKE AWK GENKSYMS PERL UTS_MACHINE \ 319export CPP AR NM STRIP OBJCOPY OBJDUMP MAKE AWK GENKSYMS PERL UTS_MACHINE
319 HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS 320export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
320 321
321export CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS 322export CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
322export CFLAGS CFLAGS_KERNEL CFLAGS_MODULE 323export CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
323export AFLAGS AFLAGS_KERNEL AFLAGS_MODULE 324export AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
324 325
325# When compiling out-of-tree modules, put MODVERDIR in the module 326# When compiling out-of-tree modules, put MODVERDIR in the module
@@ -357,12 +358,13 @@ endif
357# catch them early, and hand them over to scripts/kconfig/Makefile 358# catch them early, and hand them over to scripts/kconfig/Makefile
358# It is allowed to specify more targets when calling make, including 359# It is allowed to specify more targets when calling make, including
359# mixing *config targets and build targets. 360# mixing *config targets and build targets.
360# For example 'make oldconfig all'. 361# For example 'make oldconfig all'.
361# Detect when mixed targets is specified, and make a second invocation 362# Detect when mixed targets is specified, and make a second invocation
362# of make so .config is not included in this case either (for *config). 363# of make so .config is not included in this case either (for *config).
363 364
364no-dot-config-targets := clean mrproper distclean \ 365no-dot-config-targets := clean mrproper distclean \
365 cscope TAGS tags help %docs check% 366 cscope TAGS tags help %docs check% \
367 kernelrelease kernelversion
366 368
367config-targets := 0 369config-targets := 0
368mixed-targets := 0 370mixed-targets := 0
@@ -404,9 +406,8 @@ include $(srctree)/arch/$(ARCH)/Makefile
404export KBUILD_DEFCONFIG 406export KBUILD_DEFCONFIG
405 407
406config %config: scripts_basic outputmakefile FORCE 408config %config: scripts_basic outputmakefile FORCE
407 $(Q)mkdir -p include/linux 409 $(Q)mkdir -p include/linux include/config
408 $(Q)$(MAKE) $(build)=scripts/kconfig $@ 410 $(Q)$(MAKE) $(build)=scripts/kconfig $@
409 $(Q)$(MAKE) -C $(srctree) KBUILD_SRC= .kernelrelease
410 411
411else 412else
412# =========================================================================== 413# ===========================================================================
@@ -416,13 +417,11 @@ else
416ifeq ($(KBUILD_EXTMOD),) 417ifeq ($(KBUILD_EXTMOD),)
417# Additional helpers built in scripts/ 418# Additional helpers built in scripts/
418# Carefully list dependencies so we do not try to build scripts twice 419# Carefully list dependencies so we do not try to build scripts twice
419# in parrallel 420# in parallel
420PHONY += scripts 421PHONY += scripts
421scripts: scripts_basic include/config/MARKER 422scripts: scripts_basic include/config/auto.conf
422 $(Q)$(MAKE) $(build)=$(@) 423 $(Q)$(MAKE) $(build)=$(@)
423 424
424scripts_basic: include/linux/autoconf.h
425
426# Objects we will link into vmlinux / subdirs we need to visit 425# Objects we will link into vmlinux / subdirs we need to visit
427init-y := init/ 426init-y := init/
428drivers-y := drivers/ sound/ 427drivers-y := drivers/ sound/
@@ -436,31 +435,32 @@ ifeq ($(dot-config),1)
436 435
437# Read in dependencies to all Kconfig* files, make sure to run 436# Read in dependencies to all Kconfig* files, make sure to run
438# oldconfig if changes are detected. 437# oldconfig if changes are detected.
439-include .kconfig.d 438-include include/config/auto.conf.cmd
439-include include/config/auto.conf
440 440
441include .config
442
443# If .config needs to be updated, it will be done via the dependency
444# that autoconf has on .config.
445# To avoid any implicit rule to kick in, define an empty command 441# To avoid any implicit rule to kick in, define an empty command
446.config .kconfig.d: ; 442$(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
447 443
448# If .config is newer than include/linux/autoconf.h, someone tinkered 444# If .config is newer than include/config/auto.conf, someone tinkered
449# with it and forgot to run make oldconfig. 445# with it and forgot to run make oldconfig.
450# If kconfig.d is missing then we are probarly in a cleaned tree so 446# if auto.conf.cmd is missing then we are probably in a cleaned tree so
451# we execute the config step to be sure to catch updated Kconfig files 447# we execute the config step to be sure to catch updated Kconfig files
452include/linux/autoconf.h: .kconfig.d .config 448include/config/auto.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
453 $(Q)mkdir -p include/linux 449ifeq ($(KBUILD_EXTMOD),)
454 $(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig 450 $(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig
455else 451else
452 $(error kernel configuration not valid - run 'make prepare' in $(srctree) to update it)
453endif
454
455else
456# Dummy target needed, because used as prerequisite 456# Dummy target needed, because used as prerequisite
457include/linux/autoconf.h: ; 457include/config/auto.conf: ;
458endif 458endif
459 459
460# The all: target is the default when no target is given on the 460# The all: target is the default when no target is given on the
461# command line. 461# command line.
462# This allow a user to issue only 'make' to build a kernel including modules 462# This allow a user to issue only 'make' to build a kernel including modules
463# Defaults vmlinux but it is usually overriden in the arch makefile 463# Defaults vmlinux but it is usually overridden in the arch makefile
464all: vmlinux 464all: vmlinux
465 465
466ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE 466ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
@@ -492,11 +492,11 @@ CHECKFLAGS += $(NOSTDINC_FLAGS)
492# warn about C99 declaration after statement 492# warn about C99 declaration after statement
493CFLAGS += $(call cc-option,-Wdeclaration-after-statement,) 493CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
494 494
495# disable pointer signedness warnings in gcc 4.0 495# disable pointer signed / unsigned warnings in gcc 4.0
496CFLAGS += $(call cc-option,-Wno-pointer-sign,) 496CFLAGS += $(call cc-option,-Wno-pointer-sign,)
497 497
498# Default kernel image to build when no specific target is given. 498# Default kernel image to build when no specific target is given.
499# KBUILD_IMAGE may be overruled on the commandline or 499# KBUILD_IMAGE may be overruled on the command line or
500# set in the environment 500# set in the environment
501# Also any assignments in arch/$(ARCH)/Makefile take precedence over 501# Also any assignments in arch/$(ARCH)/Makefile take precedence over
502# this default value 502# this default value
@@ -510,12 +510,29 @@ export INSTALL_PATH ?= /boot
510# 510#
511# INSTALL_MOD_PATH specifies a prefix to MODLIB for module directory 511# INSTALL_MOD_PATH specifies a prefix to MODLIB for module directory
512# relocations required by build roots. This is not defined in the 512# relocations required by build roots. This is not defined in the
513# makefile but the arguement can be passed to make if needed. 513# makefile but the argument can be passed to make if needed.
514# 514#
515 515
516MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE) 516MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE)
517export MODLIB 517export MODLIB
518 518
519#
520# INSTALL_MOD_STRIP, if defined, will cause modules to be
521# stripped after they are installed. If INSTALL_MOD_STRIP is '1', then
522# the default option --strip-debug will be used. Otherwise,
523# INSTALL_MOD_STRIP will used as the options to the strip command.
524
525ifdef INSTALL_MOD_STRIP
526ifeq ($(INSTALL_MOD_STRIP),1)
527mod_strip_cmd = $STRIP) --strip-debug
528else
529mod_strip_cmd = $(STRIP) $(INSTALL_MOD_STRIP)
530endif # INSTALL_MOD_STRIP=1
531else
532mod_strip_cmd = true
533endif # INSTALL_MOD_STRIP
534export mod_strip_cmd
535
519 536
520ifeq ($(KBUILD_EXTMOD),) 537ifeq ($(KBUILD_EXTMOD),)
521core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ 538core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
@@ -539,7 +556,7 @@ libs-y := $(libs-y1) $(libs-y2)
539 556
540# Build vmlinux 557# Build vmlinux
541# --------------------------------------------------------------------------- 558# ---------------------------------------------------------------------------
542# vmlinux is build from the objects selected by $(vmlinux-init) and 559# vmlinux is built from the objects selected by $(vmlinux-init) and
543# $(vmlinux-main). Most are built-in.o files from top-level directories 560# $(vmlinux-main). Most are built-in.o files from top-level directories
544# in the kernel tree, others are specified in arch/$(ARCH)Makefile. 561# in the kernel tree, others are specified in arch/$(ARCH)Makefile.
545# Ordering when linking is important, and $(vmlinux-init) must be first. 562# Ordering when linking is important, and $(vmlinux-init) must be first.
@@ -590,7 +607,7 @@ quiet_cmd_vmlinux_version = GEN .version
590 $(MAKE) $(build)=init 607 $(MAKE) $(build)=init
591 608
592# Generate System.map 609# Generate System.map
593quiet_cmd_sysmap = SYSMAP 610quiet_cmd_sysmap = SYSMAP
594 cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap 611 cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap
595 612
596# Link of vmlinux 613# Link of vmlinux
@@ -719,7 +736,7 @@ $(vmlinux-dirs): prepare scripts
719 $(Q)$(MAKE) $(build)=$@ 736 $(Q)$(MAKE) $(build)=$@
720 737
721# Build the kernel release string 738# Build the kernel release string
722# The KERNELRELEASE is stored in a file named .kernelrelease 739# The KERNELRELEASE is stored in a file named include/config/kernel.release
723# to be used when executing for example make install or make modules_install 740# to be used when executing for example make install or make modules_install
724# 741#
725# Take the contents of any files called localversion* and the config 742# Take the contents of any files called localversion* and the config
@@ -737,10 +754,10 @@ _localver = $(foreach f, $(__localver), $(if $(findstring ~, $(f)),,$(f)))
737localver = $(subst $(space),, \ 754localver = $(subst $(space),, \
738 $(shell cat /dev/null $(_localver)) \ 755 $(shell cat /dev/null $(_localver)) \
739 $(patsubst "%",%,$(CONFIG_LOCALVERSION))) 756 $(patsubst "%",%,$(CONFIG_LOCALVERSION)))
740 757
741# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called 758# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called
742# and if the SCM is know a tag from the SCM is appended. 759# and if the SCM is know a tag from the SCM is appended.
743# The appended tag is determinded by the SCM used. 760# The appended tag is determined by the SCM used.
744# 761#
745# Currently, only git is supported. 762# Currently, only git is supported.
746# Other SCMs can edit scripts/setlocalversion and add the appropriate 763# Other SCMs can edit scripts/setlocalversion and add the appropriate
@@ -753,9 +770,9 @@ endif
753 770
754localver-full = $(localver)$(localver-auto) 771localver-full = $(localver)$(localver-auto)
755 772
756# Store (new) KERNELRELASE string in .kernelrelease 773# Store (new) KERNELRELASE string in include/config/kernel.release
757kernelrelease = $(KERNELVERSION)$(localver-full) 774kernelrelease = $(KERNELVERSION)$(localver-full)
758.kernelrelease: FORCE 775include/config/kernel.release: include/config/auto.conf FORCE
759 $(Q)rm -f $@ 776 $(Q)rm -f $@
760 $(Q)echo $(kernelrelease) > $@ 777 $(Q)echo $(kernelrelease) > $@
761 778
@@ -776,10 +793,10 @@ PHONY += prepare-all
776# and if so do: 793# and if so do:
777# 1) Check that make has not been executed in the kernel src $(srctree) 794# 1) Check that make has not been executed in the kernel src $(srctree)
778# 2) Create the include2 directory, used for the second asm symlink 795# 2) Create the include2 directory, used for the second asm symlink
779prepare3: .kernelrelease 796prepare3: include/config/kernel.release
780ifneq ($(KBUILD_SRC),) 797ifneq ($(KBUILD_SRC),)
781 @echo ' Using $(srctree) as source for kernel' 798 @echo ' Using $(srctree) as source for kernel'
782 $(Q)if [ -f $(srctree)/.config ]; then \ 799 $(Q)if [ -f $(srctree)/.config -o -d $(srctree)/include/config ]; then \
783 echo " $(srctree) is not clean, please run 'make mrproper'";\ 800 echo " $(srctree) is not clean, please run 'make mrproper'";\
784 echo " in the '$(srctree)' directory.";\ 801 echo " in the '$(srctree)' directory.";\
785 /bin/false; \ 802 /bin/false; \
@@ -792,7 +809,7 @@ endif
792prepare2: prepare3 outputmakefile 809prepare2: prepare3 outputmakefile
793 810
794prepare1: prepare2 include/linux/version.h include/asm \ 811prepare1: prepare2 include/linux/version.h include/asm \
795 include/config/MARKER 812 include/config/auto.conf
796ifneq ($(KBUILD_MODULES),) 813ifneq ($(KBUILD_MODULES),)
797 $(Q)mkdir -p $(MODVERDIR) 814 $(Q)mkdir -p $(MODVERDIR)
798 $(Q)rm -f $(MODVERDIR)/* 815 $(Q)rm -f $(MODVERDIR)/*
@@ -806,27 +823,20 @@ prepare0: archprepare FORCE
806# All the preparing.. 823# All the preparing..
807prepare prepare-all: prepare0 824prepare prepare-all: prepare0
808 825
809# Leave this as default for preprocessing vmlinux.lds.S, which is now 826# Leave this as default for preprocessing vmlinux.lds.S, which is now
810# done in arch/$(ARCH)/kernel/Makefile 827# done in arch/$(ARCH)/kernel/Makefile
811 828
812export CPPFLAGS_vmlinux.lds += -P -C -U$(ARCH) 829export CPPFLAGS_vmlinux.lds += -P -C -U$(ARCH)
813 830
814# FIXME: The asm symlink changes when $(ARCH) changes. That's 831# FIXME: The asm symlink changes when $(ARCH) changes. That's
815# hard to detect, but I suppose "make mrproper" is a good idea 832# hard to detect, but I suppose "make mrproper" is a good idea
816# before switching between archs anyway. 833# before switching between archs anyway.
817 834
818include/asm: 835include/asm:
819 @echo ' SYMLINK $@ -> include/asm-$(ARCH)' 836 @echo ' SYMLINK $@ -> include/asm-$(ARCH)'
820 $(Q)if [ ! -d include ]; then mkdir -p include; fi; 837 $(Q)if [ ! -d include ]; then mkdir -p include; fi;
821 @ln -fsn asm-$(ARCH) $@ 838 @ln -fsn asm-$(ARCH) $@
822 839
823# Split autoconf.h into include/linux/config/*
824
825include/config/MARKER: scripts/basic/split-include include/linux/autoconf.h
826 @echo ' SPLIT include/linux/autoconf.h -> include/config/*'
827 @scripts/basic/split-include include/linux/autoconf.h include/config
828 @touch $@
829
830# Generate some files 840# Generate some files
831# --------------------------------------------------------------------------- 841# ---------------------------------------------------------------------------
832 842
@@ -846,7 +856,7 @@ define filechk_version.h
846 ) 856 )
847endef 857endef
848 858
849include/linux/version.h: $(srctree)/Makefile .config .kernelrelease FORCE 859include/linux/version.h: $(srctree)/Makefile include/config/kernel.release FORCE
850 $(call filechk,version.h) 860 $(call filechk,version.h)
851 861
852# --------------------------------------------------------------------------- 862# ---------------------------------------------------------------------------
@@ -860,7 +870,7 @@ depend dep:
860 870
861ifdef CONFIG_MODULES 871ifdef CONFIG_MODULES
862 872
863# By default, build modules as well 873# By default, build modules as well
864 874
865all: modules 875all: modules
866 876
@@ -942,7 +952,7 @@ CLEAN_FILES += vmlinux System.map \
942MRPROPER_DIRS += include/config include2 952MRPROPER_DIRS += include/config include2
943MRPROPER_FILES += .config .config.old include/asm .version .old_version \ 953MRPROPER_FILES += .config .config.old include/asm .version .old_version \
944 include/linux/autoconf.h include/linux/version.h \ 954 include/linux/autoconf.h include/linux/version.h \
945 .kernelrelease Module.symvers tags TAGS cscope* 955 Module.symvers tags TAGS cscope*
946 956
947# clean - Delete most, but leave enough to build external modules 957# clean - Delete most, but leave enough to build external modules
948# 958#
@@ -958,8 +968,9 @@ clean: archclean $(clean-dirs)
958 $(call cmd,rmdirs) 968 $(call cmd,rmdirs)
959 $(call cmd,rmfiles) 969 $(call cmd,rmfiles)
960 @find . $(RCS_FIND_IGNORE) \ 970 @find . $(RCS_FIND_IGNORE) \
961 \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \ 971 \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
962 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \) \ 972 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
973 -o -name '*.symtypes' \) \
963 -type f -print | xargs rm -f 974 -type f -print | xargs rm -f
964 975
965# mrproper - Delete all generated files, including .config 976# mrproper - Delete all generated files, including .config
@@ -982,9 +993,9 @@ PHONY += distclean
982 993
983distclean: mrproper 994distclean: mrproper
984 @find $(srctree) $(RCS_FIND_IGNORE) \ 995 @find $(srctree) $(RCS_FIND_IGNORE) \
985 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \ 996 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
986 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \ 997 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
987 -o -name '.*.rej' -o -size 0 \ 998 -o -name '.*.rej' -o -size 0 \
988 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \ 999 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
989 -type f -print | xargs rm -f 1000 -type f -print | xargs rm -f
990 1001
@@ -994,9 +1005,9 @@ distclean: mrproper
994# rpm target kept for backward compatibility 1005# rpm target kept for backward compatibility
995package-dir := $(srctree)/scripts/package 1006package-dir := $(srctree)/scripts/package
996 1007
997%pkg: FORCE 1008%pkg: include/config/kernel.release FORCE
998 $(Q)$(MAKE) $(build)=$(package-dir) $@ 1009 $(Q)$(MAKE) $(build)=$(package-dir) $@
999rpm: FORCE 1010rpm: include/config/kernel.release FORCE
1000 $(Q)$(MAKE) $(build)=$(package-dir) $@ 1011 $(Q)$(MAKE) $(build)=$(package-dir) $@
1001 1012
1002 1013
@@ -1077,7 +1088,7 @@ else # KBUILD_EXTMOD
1077# make M=dir modules Make all modules in specified dir 1088# make M=dir modules Make all modules in specified dir
1078# make M=dir Same as 'make M=dir modules' 1089# make M=dir Same as 'make M=dir modules'
1079# make M=dir modules_install 1090# make M=dir modules_install
1080# Install the modules build in the module directory 1091# Install the modules built in the module directory
1081# Assumes install directory is already created 1092# Assumes install directory is already created
1082 1093
1083# We are always building modules 1094# We are always building modules
@@ -1136,7 +1147,7 @@ clean: rm-dirs := $(MODVERDIR)
1136clean: $(clean-dirs) 1147clean: $(clean-dirs)
1137 $(call cmd,rmdirs) 1148 $(call cmd,rmdirs)
1138 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \ 1149 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
1139 \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \ 1150 \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
1140 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \) \ 1151 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \) \
1141 -type f -print | xargs rm -f 1152 -type f -print | xargs rm -f
1142 1153
@@ -1175,31 +1186,41 @@ else
1175ALLINCLUDE_ARCHS := $(ARCH) 1186ALLINCLUDE_ARCHS := $(ARCH)
1176endif 1187endif
1177else 1188else
1178#Allow user to specify only ALLSOURCE_PATHS on the command line, keeping existing behaviour. 1189#Allow user to specify only ALLSOURCE_PATHS on the command line, keeping existing behavour.
1179ALLINCLUDE_ARCHS := $(ALLSOURCE_ARCHS) 1190ALLINCLUDE_ARCHS := $(ALLSOURCE_ARCHS)
1180endif 1191endif
1181 1192
1182ALLSOURCE_ARCHS := $(ARCH) 1193ALLSOURCE_ARCHS := $(ARCH)
1183 1194
1184define all-sources 1195define find-sources
1185 ( find $(__srctree) $(RCS_FIND_IGNORE) \ 1196 ( find $(__srctree) $(RCS_FIND_IGNORE) \
1186 \( -name include -o -name arch \) -prune -o \ 1197 \( -name include -o -name arch \) -prune -o \
1187 -name '*.[chS]' -print; \ 1198 -name $1 -print; \
1188 for ARCH in $(ALLSOURCE_ARCHS) ; do \ 1199 for ARCH in $(ALLSOURCE_ARCHS) ; do \
1189 find $(__srctree)arch/$${ARCH} $(RCS_FIND_IGNORE) \ 1200 find $(__srctree)arch/$${ARCH} $(RCS_FIND_IGNORE) \
1190 -name '*.[chS]' -print; \ 1201 -name $1 -print; \
1191 done ; \ 1202 done ; \
1192 find $(__srctree)security/selinux/include $(RCS_FIND_IGNORE) \ 1203 find $(__srctree)security/selinux/include $(RCS_FIND_IGNORE) \
1193 -name '*.[chS]' -print; \ 1204 -name $1 -print; \
1194 find $(__srctree)include $(RCS_FIND_IGNORE) \ 1205 find $(__srctree)include $(RCS_FIND_IGNORE) \
1195 \( -name config -o -name 'asm-*' \) -prune \ 1206 \( -name config -o -name 'asm-*' \) -prune \
1196 -o -name '*.[chS]' -print; \ 1207 -o -name $1 -print; \
1197 for ARCH in $(ALLINCLUDE_ARCHS) ; do \ 1208 for ARCH in $(ALLINCLUDE_ARCHS) ; do \
1198 find $(__srctree)include/asm-$${ARCH} $(RCS_FIND_IGNORE) \ 1209 find $(__srctree)include/asm-$${ARCH} $(RCS_FIND_IGNORE) \
1199 -name '*.[chS]' -print; \ 1210 -name $1 -print; \
1200 done ; \ 1211 done ; \
1201 find $(__srctree)include/asm-generic $(RCS_FIND_IGNORE) \ 1212 find $(__srctree)include/asm-generic $(RCS_FIND_IGNORE) \
1202 -name '*.[chS]' -print ) 1213 -name $1 -print )
1214endef
1215
1216define all-sources
1217 $(call find-sources,'*.[chS]')
1218endef
1219define all-kconfigs
1220 $(call find-sources,'Kconfig*')
1221endef
1222define all-defconfigs
1223 $(call find-sources,'defconfig')
1203endef 1224endef
1204 1225
1205quiet_cmd_cscope-file = FILELST cscope.files 1226quiet_cmd_cscope-file = FILELST cscope.files
@@ -1219,7 +1240,13 @@ define cmd_TAGS
1219 echo "-I __initdata,__exitdata,__acquires,__releases \ 1240 echo "-I __initdata,__exitdata,__acquires,__releases \
1220 -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL \ 1241 -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL \
1221 --extra=+f --c-kinds=+px"`; \ 1242 --extra=+f --c-kinds=+px"`; \
1222 $(all-sources) | xargs etags $$ETAGSF -a 1243 $(all-sources) | xargs etags $$ETAGSF -a; \
1244 if test "x$$ETAGSF" = x; then \
1245 $(all-kconfigs) | xargs etags -a \
1246 --regex='/^config[ \t]+\([a-zA-Z0-9_]+\)/\1/'; \
1247 $(all-defconfigs) | xargs etags -a \
1248 --regex='/^#?[ \t]?\(CONFIG_[a-zA-Z0-9_]+\)/\1/'; \
1249 fi
1223endef 1250endef
1224 1251
1225TAGS: FORCE 1252TAGS: FORCE
@@ -1259,14 +1286,14 @@ namespacecheck:
1259endif #ifeq ($(config-targets),1) 1286endif #ifeq ($(config-targets),1)
1260endif #ifeq ($(mixed-targets),1) 1287endif #ifeq ($(mixed-targets),1)
1261 1288
1262PHONY += checkstack 1289PHONY += checkstack kernelrelease kernelversion
1263checkstack: 1290checkstack:
1264 $(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \ 1291 $(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \
1265 $(PERL) $(src)/scripts/checkstack.pl $(ARCH) 1292 $(PERL) $(src)/scripts/checkstack.pl $(ARCH)
1266 1293
1267kernelrelease: 1294kernelrelease:
1268 $(if $(wildcard .kernelrelease), $(Q)echo $(KERNELRELEASE), \ 1295 $(if $(wildcard include/config/kernel.release), $(Q)echo $(KERNELRELEASE), \
1269 $(error kernelrelease not valid - run 'make *config' to update it)) 1296 $(error kernelrelease not valid - run 'make prepare' to update it))
1270kernelversion: 1297kernelversion:
1271 @echo $(KERNELVERSION) 1298 @echo $(KERNELVERSION)
1272 1299
@@ -1301,6 +1328,8 @@ endif
1301 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) 1328 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1302%.o: %.S prepare scripts FORCE 1329%.o: %.S prepare scripts FORCE
1303 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) 1330 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1331%.symtypes: %.c prepare scripts FORCE
1332 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
1304 1333
1305# Modules 1334# Modules
1306/ %/: prepare scripts FORCE 1335/ %/: prepare scripts FORCE
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 558b833685..254c507a60 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -481,7 +481,7 @@ register_cpus(void)
481 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); 481 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
482 if (!p) 482 if (!p)
483 return -ENOMEM; 483 return -ENOMEM;
484 register_cpu(p, i, NULL); 484 register_cpu(p, i);
485 } 485 }
486 return 0; 486 return 0;
487} 487}
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index ba788cfdc3..9fc0eeb4f0 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -112,7 +112,7 @@ op_axp_create_files(struct super_block * sb, struct dentry * root)
112 112
113 for (i = 0; i < model->num_counters; ++i) { 113 for (i = 0; i < model->num_counters; ++i) {
114 struct dentry *dir; 114 struct dentry *dir;
115 char buf[3]; 115 char buf[4];
116 116
117 snprintf(buf, sizeof buf, "%d", i); 117 snprintf(buf, sizeof buf, "%d", i);
118 dir = oprofilefs_mkdir(sb, root, buf); 118 dir = oprofilefs_mkdir(sb, root, buf);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 1b7e5c2e90..3d1a3fb7d5 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -253,7 +253,7 @@ config ARCH_SA1100
253 Support for StrongARM 11x0 based boards. 253 Support for StrongARM 11x0 based boards.
254 254
255config ARCH_S3C2410 255config ARCH_S3C2410
256 bool "Samsung S3C2410" 256 bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442"
257 help 257 help
258 Samsung S3C2410X CPU based systems, such as the Simtec Electronics 258 Samsung S3C2410X CPU based systems, such as the Simtec Electronics
259 BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or 259 BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or
@@ -372,7 +372,7 @@ config ISA_DMA_API
372 bool 372 bool
373 373
374config PCI 374config PCI
375 bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB 375 bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB || ARCH_IXP4XX
376 help 376 help
377 Find out whether you have a PCI motherboard. PCI is the name of a 377 Find out whether you have a PCI motherboard. PCI is the name of a
378 bus system, i.e. the way the CPU talks to the other stuff inside 378 bus system, i.e. the way the CPU talks to the other stuff inside
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 282b14e2f4..a3bbaaf480 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -177,7 +177,7 @@ boot := arch/arm/boot
177# them changed. We use .arch to indicate when they were updated 177# them changed. We use .arch to indicate when they were updated
178# last, otherwise make uses the target directory mtime. 178# last, otherwise make uses the target directory mtime.
179 179
180include/asm-arm/.arch: $(wildcard include/config/arch/*.h) include/config/MARKER 180include/asm-arm/.arch: $(wildcard include/config/arch/*.h) include/config/auto.conf
181 @echo ' SYMLINK include/asm-arm/arch -> include/asm-arm/$(INCDIR)' 181 @echo ' SYMLINK include/asm-arm/arch -> include/asm-arm/$(INCDIR)'
182ifneq ($(KBUILD_SRC),) 182ifneq ($(KBUILD_SRC),)
183 $(Q)mkdir -p include/asm-arm 183 $(Q)mkdir -p include/asm-arm
diff --git a/arch/arm/boot/compressed/head-at91rm9200.S b/arch/arm/boot/compressed/head-at91rm9200.S
index 57a3b163b2..d68b9acd82 100644
--- a/arch/arm/boot/compressed/head-at91rm9200.S
+++ b/arch/arm/boot/compressed/head-at91rm9200.S
@@ -61,6 +61,12 @@
61 cmp r7, r3 61 cmp r7, r3
62 beq 99f 62 beq 99f
63 63
64 @ Ajeco 1ARM : 1075
65 mov r3, #(MACH_TYPE_ONEARM & 0xff)
66 orr r3, r3, #(MACH_TYPE_ONEARM & 0xff00)
67 cmp r7, r3
68 beq 99f
69
64 @ Unknown board, use the AT91RM9200DK board 70 @ Unknown board, use the AT91RM9200DK board
65 @ mov r7, #MACH_TYPE_AT91RM9200 71 @ mov r7, #MACH_TYPE_AT91RM9200
66 mov r7, #(MACH_TYPE_AT91RM9200DK & 0xff) 72 mov r7, #(MACH_TYPE_AT91RM9200DK & 0xff)
diff --git a/arch/arm/boot/compressed/ll_char_wr.S b/arch/arm/boot/compressed/ll_char_wr.S
index d7bbd9da2f..8517c8606b 100644
--- a/arch/arm/boot/compressed/ll_char_wr.S
+++ b/arch/arm/boot/compressed/ll_char_wr.S
@@ -77,7 +77,7 @@ Lrow4bpplp:
77 subne r1, r1, #1 77 subne r1, r1, #1
78 ldrneb r7, [r6, r1] 78 ldrneb r7, [r6, r1]
79 bne Lrow4bpplp 79 bne Lrow4bpplp
80 LOADREGS(fd, sp!, {r4 - r7, pc}) 80 ldmfd sp!, {r4 - r7, pc}
81 81
82@ 82@
83@ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc) 83@ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc)
@@ -105,7 +105,7 @@ Lrow8bpplp:
105 subne r1, r1, #1 105 subne r1, r1, #1
106 ldrneb r7, [r6, r1] 106 ldrneb r7, [r6, r1]
107 bne Lrow8bpplp 107 bne Lrow8bpplp
108 LOADREGS(fd, sp!, {r4 - r7, pc}) 108 ldmfd sp!, {r4 - r7, pc}
109 109
110@ 110@
111@ Smashable regs: {r0 - r3}, [r4], {r5, r6}, [r7], (r8 - fp), [ip], (sp), [lr], (pc) 111@ Smashable regs: {r0 - r3}, [r4], {r5, r6}, [r7], (r8 - fp), [ip], (sp), [lr], (pc)
@@ -127,7 +127,7 @@ Lrow1bpp:
127 strb r7, [r0], r5 127 strb r7, [r0], r5
128 mov r7, r7, lsr #8 128 mov r7, r7, lsr #8
129 strb r7, [r0], r5 129 strb r7, [r0], r5
130 LOADREGS(fd, sp!, {r4 - r7, pc}) 130 ldmfd sp!, {r4 - r7, pc}
131 131
132 .bss 132 .bss
133ENTRY(con_charconvtable) 133ENTRY(con_charconvtable)
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index a7dc137069..0dafba3a70 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -629,21 +629,6 @@ static int locomo_resume(struct platform_device *dev)
629#endif 629#endif
630 630
631 631
632#define LCM_ALC_EN 0x8000
633
634void frontlight_set(struct locomo *lchip, int duty, int vr, int bpwf)
635{
636 unsigned long flags;
637
638 spin_lock_irqsave(&lchip->lock, flags);
639 locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
640 udelay(100);
641 locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
642 locomo_writel(bpwf | LCM_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
643 spin_unlock_irqrestore(&lchip->lock, flags);
644}
645
646
647/** 632/**
648 * locomo_probe - probe for a single LoCoMo chip. 633 * locomo_probe - probe for a single LoCoMo chip.
649 * @phys_addr: physical address of device. 634 * @phys_addr: physical address of device.
@@ -698,14 +683,10 @@ __locomo_probe(struct device *me, struct resource *mem, int irq)
698 , lchip->base + LOCOMO_GPD); 683 , lchip->base + LOCOMO_GPD);
699 locomo_writel(0, lchip->base + LOCOMO_GIE); 684 locomo_writel(0, lchip->base + LOCOMO_GIE);
700 685
701 /* FrontLight */ 686 /* Frontlight */
702 locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); 687 locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
703 locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); 688 locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
704 689
705 /* Same constants can be used for collie and poodle
706 (depending on CONFIG options in original sharp code)? */
707 frontlight_set(lchip, 163, 0, 148);
708
709 /* Longtime timer */ 690 /* Longtime timer */
710 locomo_writel(0, lchip->base + LOCOMO_LTINT); 691 locomo_writel(0, lchip->base + LOCOMO_LTINT);
711 /* SPI */ 692 /* SPI */
@@ -1063,6 +1044,30 @@ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int
1063} 1044}
1064 1045
1065/* 1046/*
1047 * Frontlight control
1048 */
1049
1050static struct locomo *locomo_chip_driver(struct locomo_dev *ldev);
1051
1052void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf)
1053{
1054 unsigned long flags;
1055 struct locomo *lchip = locomo_chip_driver(dev);
1056
1057 if (vr)
1058 locomo_gpio_write(dev, LOCOMO_GPIO_FL_VR, 1);
1059 else
1060 locomo_gpio_write(dev, LOCOMO_GPIO_FL_VR, 0);
1061
1062 spin_lock_irqsave(&lchip->lock, flags);
1063 locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
1064 udelay(100);
1065 locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
1066 locomo_writel(bpwf | LOCOMO_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
1067 spin_unlock_irqrestore(&lchip->lock, flags);
1068}
1069
1070/*
1066 * LoCoMo "Register Access Bus." 1071 * LoCoMo "Register Access Bus."
1067 * 1072 *
1068 * We model this as a regular bus type, and hang devices directly 1073 * We model this as a regular bus type, and hang devices directly
diff --git a/arch/arm/configs/onearm_defconfig b/arch/arm/configs/onearm_defconfig
new file mode 100644
index 0000000000..5401c01cae
--- /dev/null
+++ b/arch/arm/configs/onearm_defconfig
@@ -0,0 +1,1053 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17-git10
4# Mon Jun 26 13:45:44 2006
5#
6CONFIG_ARM=y
7CONFIG_MMU=y
8CONFIG_RWSEM_GENERIC_SPINLOCK=y
9CONFIG_GENERIC_HWEIGHT=y
10CONFIG_GENERIC_CALIBRATE_DELAY=y
11CONFIG_VECTORS_BASE=0xffff0000
12
13#
14# Code maturity level options
15#
16CONFIG_EXPERIMENTAL=y
17CONFIG_BROKEN_ON_SMP=y
18CONFIG_INIT_ENV_ARG_LIMIT=32
19
20#
21# General setup
22#
23CONFIG_LOCALVERSION=""
24CONFIG_LOCALVERSION_AUTO=y
25# CONFIG_SWAP is not set
26CONFIG_SYSVIPC=y
27# CONFIG_POSIX_MQUEUE is not set
28# CONFIG_BSD_PROCESS_ACCT is not set
29CONFIG_SYSCTL=y
30# CONFIG_AUDIT is not set
31# CONFIG_IKCONFIG is not set
32# CONFIG_RELAY is not set
33CONFIG_INITRAMFS_SOURCE=""
34CONFIG_UID16=y
35CONFIG_CC_OPTIMIZE_FOR_SIZE=y
36CONFIG_EMBEDDED=y
37CONFIG_KALLSYMS=y
38# CONFIG_KALLSYMS_ALL is not set
39# CONFIG_KALLSYMS_EXTRA_PASS is not set
40CONFIG_HOTPLUG=y
41CONFIG_PRINTK=y
42CONFIG_BUG=y
43CONFIG_ELF_CORE=y
44CONFIG_BASE_FULL=y
45CONFIG_FUTEX=y
46CONFIG_EPOLL=y
47CONFIG_SHMEM=y
48CONFIG_SLAB=y
49# CONFIG_TINY_SHMEM is not set
50CONFIG_BASE_SMALL=0
51# CONFIG_SLOB is not set
52
53#
54# Loadable module support
55#
56CONFIG_MODULES=y
57CONFIG_MODULE_UNLOAD=y
58# CONFIG_MODULE_FORCE_UNLOAD is not set
59# CONFIG_MODVERSIONS is not set
60# CONFIG_MODULE_SRCVERSION_ALL is not set
61CONFIG_KMOD=y
62
63#
64# Block layer
65#
66# CONFIG_BLK_DEV_IO_TRACE is not set
67
68#
69# IO Schedulers
70#
71CONFIG_IOSCHED_NOOP=y
72CONFIG_IOSCHED_AS=y
73# CONFIG_IOSCHED_DEADLINE is not set
74# CONFIG_IOSCHED_CFQ is not set
75CONFIG_DEFAULT_AS=y
76# CONFIG_DEFAULT_DEADLINE is not set
77# CONFIG_DEFAULT_CFQ is not set
78# CONFIG_DEFAULT_NOOP is not set
79CONFIG_DEFAULT_IOSCHED="anticipatory"
80
81#
82# System Type
83#
84# CONFIG_ARCH_AAEC2000 is not set
85# CONFIG_ARCH_INTEGRATOR is not set
86# CONFIG_ARCH_REALVIEW is not set
87# CONFIG_ARCH_VERSATILE is not set
88CONFIG_ARCH_AT91RM9200=y
89# CONFIG_ARCH_CLPS7500 is not set
90# CONFIG_ARCH_CLPS711X is not set
91# CONFIG_ARCH_CO285 is not set
92# CONFIG_ARCH_EBSA110 is not set
93# CONFIG_ARCH_EP93XX is not set
94# CONFIG_ARCH_FOOTBRIDGE is not set
95# CONFIG_ARCH_NETX is not set
96# CONFIG_ARCH_H720X is not set
97# CONFIG_ARCH_IMX is not set
98# CONFIG_ARCH_IOP3XX is not set
99# CONFIG_ARCH_IXP4XX is not set
100# CONFIG_ARCH_IXP2000 is not set
101# CONFIG_ARCH_IXP23XX is not set
102# CONFIG_ARCH_L7200 is not set
103# CONFIG_ARCH_PNX4008 is not set
104# CONFIG_ARCH_PXA is not set
105# CONFIG_ARCH_RPC is not set
106# CONFIG_ARCH_SA1100 is not set
107# CONFIG_ARCH_S3C2410 is not set
108# CONFIG_ARCH_SHARK is not set
109# CONFIG_ARCH_LH7A40X is not set
110# CONFIG_ARCH_OMAP is not set
111
112#
113# AT91RM9200 Implementations
114#
115
116#
117# AT91RM9200 Board Type
118#
119CONFIG_MACH_ONEARM=y
120# CONFIG_ARCH_AT91RM9200DK is not set
121# CONFIG_MACH_AT91RM9200EK is not set
122# CONFIG_MACH_CSB337 is not set
123# CONFIG_MACH_CSB637 is not set
124# CONFIG_MACH_CARMEVA is not set
125# CONFIG_MACH_KB9200 is not set
126# CONFIG_MACH_ATEB9200 is not set
127# CONFIG_MACH_KAFA is not set
128
129#
130# AT91RM9200 Feature Selections
131#
132CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
133
134#
135# Processor Type
136#
137CONFIG_CPU_32=y
138CONFIG_CPU_ARM920T=y
139CONFIG_CPU_32v4=y
140CONFIG_CPU_ABRT_EV4T=y
141CONFIG_CPU_CACHE_V4WT=y
142CONFIG_CPU_CACHE_VIVT=y
143CONFIG_CPU_COPY_V4WB=y
144CONFIG_CPU_TLB_V4WBI=y
145
146#
147# Processor Features
148#
149# CONFIG_ARM_THUMB is not set
150# CONFIG_CPU_ICACHE_DISABLE is not set
151# CONFIG_CPU_DCACHE_DISABLE is not set
152# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
153
154#
155# Bus support
156#
157
158#
159# PCCARD (PCMCIA/CardBus) support
160#
161CONFIG_PCCARD=y
162# CONFIG_PCMCIA_DEBUG is not set
163CONFIG_PCMCIA=y
164CONFIG_PCMCIA_LOAD_CIS=y
165CONFIG_PCMCIA_IOCTL=y
166
167#
168# PC-card bridges
169#
170CONFIG_AT91_CF=y
171
172#
173# Kernel Features
174#
175# CONFIG_PREEMPT is not set
176# CONFIG_NO_IDLE_HZ is not set
177CONFIG_HZ=100
178# CONFIG_AEABI is not set
179# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
180CONFIG_SELECT_MEMORY_MODEL=y
181CONFIG_FLATMEM_MANUAL=y
182# CONFIG_DISCONTIGMEM_MANUAL is not set
183# CONFIG_SPARSEMEM_MANUAL is not set
184CONFIG_FLATMEM=y
185CONFIG_FLAT_NODE_MEM_MAP=y
186# CONFIG_SPARSEMEM_STATIC is not set
187CONFIG_SPLIT_PTLOCK_CPUS=4096
188CONFIG_LEDS=y
189CONFIG_LEDS_TIMER=y
190# CONFIG_LEDS_CPU is not set
191CONFIG_ALIGNMENT_TRAP=y
192
193#
194# Boot options
195#
196CONFIG_ZBOOT_ROM_TEXT=0x0
197CONFIG_ZBOOT_ROM_BSS=0x0
198CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs ip=bootp mem=64M"
199# CONFIG_XIP_KERNEL is not set
200
201#
202# Floating point emulation
203#
204
205#
206# At least one emulation must be selected
207#
208CONFIG_FPE_NWFPE=y
209# CONFIG_FPE_NWFPE_XP is not set
210# CONFIG_FPE_FASTFPE is not set
211
212#
213# Userspace binary formats
214#
215CONFIG_BINFMT_ELF=y
216# CONFIG_BINFMT_AOUT is not set
217# CONFIG_BINFMT_MISC is not set
218# CONFIG_ARTHUR is not set
219
220#
221# Power management options
222#
223# CONFIG_PM is not set
224# CONFIG_APM is not set
225
226#
227# Networking
228#
229CONFIG_NET=y
230
231#
232# Networking options
233#
234# CONFIG_NETDEBUG is not set
235CONFIG_PACKET=y
236# CONFIG_PACKET_MMAP is not set
237CONFIG_UNIX=y
238CONFIG_XFRM=y
239# CONFIG_XFRM_USER is not set
240# CONFIG_NET_KEY is not set
241CONFIG_INET=y
242# CONFIG_IP_MULTICAST is not set
243# CONFIG_IP_ADVANCED_ROUTER is not set
244CONFIG_IP_FIB_HASH=y
245CONFIG_IP_PNP=y
246# CONFIG_IP_PNP_DHCP is not set
247CONFIG_IP_PNP_BOOTP=y
248# CONFIG_IP_PNP_RARP is not set
249# CONFIG_NET_IPIP is not set
250# CONFIG_NET_IPGRE is not set
251# CONFIG_ARPD is not set
252# CONFIG_SYN_COOKIES is not set
253# CONFIG_INET_AH is not set
254# CONFIG_INET_ESP is not set
255# CONFIG_INET_IPCOMP is not set
256# CONFIG_INET_XFRM_TUNNEL is not set
257# CONFIG_INET_TUNNEL is not set
258CONFIG_INET_XFRM_MODE_TRANSPORT=y
259CONFIG_INET_XFRM_MODE_TUNNEL=y
260CONFIG_INET_DIAG=y
261CONFIG_INET_TCP_DIAG=y
262# CONFIG_TCP_CONG_ADVANCED is not set
263CONFIG_TCP_CONG_BIC=y
264# CONFIG_IPV6 is not set
265# CONFIG_INET6_XFRM_TUNNEL is not set
266# CONFIG_INET6_TUNNEL is not set
267# CONFIG_NETWORK_SECMARK is not set
268# CONFIG_NETFILTER is not set
269
270#
271# DCCP Configuration (EXPERIMENTAL)
272#
273# CONFIG_IP_DCCP is not set
274
275#
276# SCTP Configuration (EXPERIMENTAL)
277#
278# CONFIG_IP_SCTP is not set
279
280#
281# TIPC Configuration (EXPERIMENTAL)
282#
283# CONFIG_TIPC is not set
284# CONFIG_ATM is not set
285# CONFIG_BRIDGE is not set
286# CONFIG_VLAN_8021Q is not set
287# CONFIG_DECNET is not set
288# CONFIG_LLC2 is not set
289# CONFIG_IPX is not set
290# CONFIG_ATALK is not set
291# CONFIG_X25 is not set
292# CONFIG_LAPB is not set
293# CONFIG_NET_DIVERT is not set
294# CONFIG_ECONET is not set
295# CONFIG_WAN_ROUTER is not set
296
297#
298# QoS and/or fair queueing
299#
300# CONFIG_NET_SCHED is not set
301
302#
303# Network testing
304#
305# CONFIG_NET_PKTGEN is not set
306# CONFIG_HAMRADIO is not set
307# CONFIG_IRDA is not set
308# CONFIG_BT is not set
309# CONFIG_IEEE80211 is not set
310
311#
312# Device Drivers
313#
314
315#
316# Generic Driver Options
317#
318CONFIG_STANDALONE=y
319CONFIG_PREVENT_FIRMWARE_BUILD=y
320CONFIG_FW_LOADER=y
321# CONFIG_DEBUG_DRIVER is not set
322# CONFIG_SYS_HYPERVISOR is not set
323
324#
325# Connector - unified userspace <-> kernelspace linker
326#
327# CONFIG_CONNECTOR is not set
328
329#
330# Memory Technology Devices (MTD)
331#
332CONFIG_MTD=y
333# CONFIG_MTD_DEBUG is not set
334# CONFIG_MTD_CONCAT is not set
335CONFIG_MTD_PARTITIONS=y
336# CONFIG_MTD_REDBOOT_PARTS is not set
337CONFIG_MTD_CMDLINE_PARTS=y
338# CONFIG_MTD_AFS_PARTS is not set
339
340#
341# User Modules And Translation Layers
342#
343CONFIG_MTD_CHAR=y
344CONFIG_MTD_BLOCK=y
345# CONFIG_FTL is not set
346# CONFIG_NFTL is not set
347# CONFIG_INFTL is not set
348# CONFIG_RFD_FTL is not set
349
350#
351# RAM/ROM/Flash chip drivers
352#
353CONFIG_MTD_CFI=y
354CONFIG_MTD_JEDECPROBE=y
355CONFIG_MTD_GEN_PROBE=y
356# CONFIG_MTD_CFI_ADV_OPTIONS is not set
357CONFIG_MTD_MAP_BANK_WIDTH_1=y
358CONFIG_MTD_MAP_BANK_WIDTH_2=y
359CONFIG_MTD_MAP_BANK_WIDTH_4=y
360# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
361# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
362# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
363CONFIG_MTD_CFI_I1=y
364CONFIG_MTD_CFI_I2=y
365# CONFIG_MTD_CFI_I4 is not set
366# CONFIG_MTD_CFI_I8 is not set
367# CONFIG_MTD_CFI_INTELEXT is not set
368CONFIG_MTD_CFI_AMDSTD=y
369# CONFIG_MTD_CFI_STAA is not set
370CONFIG_MTD_CFI_UTIL=y
371# CONFIG_MTD_RAM is not set
372# CONFIG_MTD_ROM is not set
373# CONFIG_MTD_ABSENT is not set
374# CONFIG_MTD_OBSOLETE_CHIPS is not set
375
376#
377# Mapping drivers for chip access
378#
379# CONFIG_MTD_COMPLEX_MAPPINGS is not set
380CONFIG_MTD_PHYSMAP=y
381CONFIG_MTD_PHYSMAP_START=0x0
382CONFIG_MTD_PHYSMAP_LEN=0x0
383CONFIG_MTD_PHYSMAP_BANKWIDTH=0
384# CONFIG_MTD_ARM_INTEGRATOR is not set
385# CONFIG_MTD_IMPA7 is not set
386# CONFIG_MTD_PLATRAM is not set
387
388#
389# Self-contained MTD device drivers
390#
391# CONFIG_MTD_SLRAM is not set
392# CONFIG_MTD_PHRAM is not set
393# CONFIG_MTD_MTDRAM is not set
394# CONFIG_MTD_BLOCK2MTD is not set
395
396#
397# Disk-On-Chip Device Drivers
398#
399# CONFIG_MTD_DOC2000 is not set
400# CONFIG_MTD_DOC2001 is not set
401# CONFIG_MTD_DOC2001PLUS is not set
402
403#
404# NAND Flash Device Drivers
405#
406# CONFIG_MTD_NAND is not set
407
408#
409# OneNAND Flash Device Drivers
410#
411# CONFIG_MTD_ONENAND is not set
412
413#
414# Parallel port support
415#
416# CONFIG_PARPORT is not set
417
418#
419# Plug and Play support
420#
421
422#
423# Block devices
424#
425# CONFIG_BLK_DEV_COW_COMMON is not set
426# CONFIG_BLK_DEV_LOOP is not set
427# CONFIG_BLK_DEV_NBD is not set
428# CONFIG_BLK_DEV_UB is not set
429CONFIG_BLK_DEV_RAM=y
430CONFIG_BLK_DEV_RAM_COUNT=16
431CONFIG_BLK_DEV_RAM_SIZE=8192
432CONFIG_BLK_DEV_INITRD=y
433# CONFIG_CDROM_PKTCDVD is not set
434# CONFIG_ATA_OVER_ETH is not set
435
436#
437# ATA/ATAPI/MFM/RLL support
438#
439# CONFIG_IDE is not set
440
441#
442# SCSI device support
443#
444# CONFIG_RAID_ATTRS is not set
445# CONFIG_SCSI is not set
446
447#
448# Multi-device support (RAID and LVM)
449#
450# CONFIG_MD is not set
451
452#
453# Fusion MPT device support
454#
455# CONFIG_FUSION is not set
456
457#
458# IEEE 1394 (FireWire) support
459#
460
461#
462# I2O device support
463#
464
465#
466# Network device support
467#
468CONFIG_NETDEVICES=y
469# CONFIG_DUMMY is not set
470# CONFIG_BONDING is not set
471# CONFIG_EQUALIZER is not set
472# CONFIG_TUN is not set
473
474#
475# PHY device support
476#
477# CONFIG_PHYLIB is not set
478
479#
480# Ethernet (10 or 100Mbit)
481#
482CONFIG_NET_ETHERNET=y
483CONFIG_MII=y
484CONFIG_ARM_AT91_ETHER=y
485# CONFIG_SMC91X is not set
486# CONFIG_DM9000 is not set
487
488#
489# Ethernet (1000 Mbit)
490#
491
492#
493# Ethernet (10000 Mbit)
494#
495
496#
497# Token Ring devices
498#
499
500#
501# Wireless LAN (non-hamradio)
502#
503# CONFIG_NET_RADIO is not set
504
505#
506# PCMCIA network device support
507#
508# CONFIG_NET_PCMCIA is not set
509
510#
511# Wan interfaces
512#
513# CONFIG_WAN is not set
514# CONFIG_PPP is not set
515# CONFIG_SLIP is not set
516# CONFIG_SHAPER is not set
517# CONFIG_NETCONSOLE is not set
518# CONFIG_NETPOLL is not set
519# CONFIG_NET_POLL_CONTROLLER is not set
520
521#
522# ISDN subsystem
523#
524# CONFIG_ISDN is not set
525
526#
527# Input device support
528#
529CONFIG_INPUT=y
530
531#
532# Userland interfaces
533#
534CONFIG_INPUT_MOUSEDEV=y
535# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
536CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
537CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
538# CONFIG_INPUT_JOYDEV is not set
539# CONFIG_INPUT_TSDEV is not set
540# CONFIG_INPUT_EVDEV is not set
541# CONFIG_INPUT_EVBUG is not set
542
543#
544# Input Device Drivers
545#
546# CONFIG_INPUT_KEYBOARD is not set
547# CONFIG_INPUT_MOUSE is not set
548# CONFIG_INPUT_JOYSTICK is not set
549# CONFIG_INPUT_TOUCHSCREEN is not set
550# CONFIG_INPUT_MISC is not set
551
552#
553# Hardware I/O ports
554#
555# CONFIG_SERIO is not set
556# CONFIG_GAMEPORT is not set
557
558#
559# Character devices
560#
561# CONFIG_VT is not set
562# CONFIG_SERIAL_NONSTANDARD is not set
563
564#
565# Serial drivers
566#
567# CONFIG_SERIAL_8250 is not set
568
569#
570# Non-8250 serial port support
571#
572CONFIG_SERIAL_AT91=y
573CONFIG_SERIAL_AT91_CONSOLE=y
574# CONFIG_SERIAL_AT91_TTYAT is not set
575CONFIG_SERIAL_CORE=y
576CONFIG_SERIAL_CORE_CONSOLE=y
577CONFIG_UNIX98_PTYS=y
578CONFIG_LEGACY_PTYS=y
579CONFIG_LEGACY_PTY_COUNT=256
580
581#
582# IPMI
583#
584# CONFIG_IPMI_HANDLER is not set
585
586#
587# Watchdog Cards
588#
589CONFIG_WATCHDOG=y
590CONFIG_WATCHDOG_NOWAYOUT=y
591
592#
593# Watchdog Device Drivers
594#
595# CONFIG_SOFT_WATCHDOG is not set
596CONFIG_AT91_WATCHDOG=y
597
598#
599# USB-based Watchdog Cards
600#
601# CONFIG_USBPCWATCHDOG is not set
602# CONFIG_NVRAM is not set
603# CONFIG_DTLK is not set
604# CONFIG_R3964 is not set
605
606#
607# Ftape, the floppy tape device driver
608#
609
610#
611# PCMCIA character devices
612#
613# CONFIG_SYNCLINK_CS is not set
614# CONFIG_CARDMAN_4000 is not set
615# CONFIG_CARDMAN_4040 is not set
616# CONFIG_RAW_DRIVER is not set
617
618#
619# TPM devices
620#
621# CONFIG_TCG_TPM is not set
622# CONFIG_TELCLOCK is not set
623
624#
625# I2C support
626#
627CONFIG_I2C=y
628CONFIG_I2C_CHARDEV=y
629
630#
631# I2C Algorithms
632#
633# CONFIG_I2C_ALGOBIT is not set
634# CONFIG_I2C_ALGOPCF is not set
635# CONFIG_I2C_ALGOPCA is not set
636
637#
638# I2C Hardware Bus support
639#
640# CONFIG_I2C_OCORES is not set
641# CONFIG_I2C_PARPORT_LIGHT is not set
642# CONFIG_I2C_STUB is not set
643# CONFIG_I2C_PCA_ISA is not set
644
645#
646# Miscellaneous I2C Chip support
647#
648# CONFIG_SENSORS_DS1337 is not set
649# CONFIG_SENSORS_DS1374 is not set
650# CONFIG_SENSORS_EEPROM is not set
651# CONFIG_SENSORS_PCF8574 is not set
652# CONFIG_SENSORS_PCA9539 is not set
653# CONFIG_SENSORS_PCF8591 is not set
654# CONFIG_SENSORS_MAX6875 is not set
655# CONFIG_I2C_DEBUG_CORE is not set
656# CONFIG_I2C_DEBUG_ALGO is not set
657# CONFIG_I2C_DEBUG_BUS is not set
658# CONFIG_I2C_DEBUG_CHIP is not set
659
660#
661# SPI support
662#
663# CONFIG_SPI is not set
664# CONFIG_SPI_MASTER is not set
665
666#
667# Dallas's 1-wire bus
668#
669
670#
671# Hardware Monitoring support
672#
673CONFIG_HWMON=y
674# CONFIG_HWMON_VID is not set
675# CONFIG_SENSORS_ABITUGURU is not set
676# CONFIG_SENSORS_ADM1021 is not set
677# CONFIG_SENSORS_ADM1025 is not set
678# CONFIG_SENSORS_ADM1026 is not set
679# CONFIG_SENSORS_ADM1031 is not set
680# CONFIG_SENSORS_ADM9240 is not set
681# CONFIG_SENSORS_ASB100 is not set
682# CONFIG_SENSORS_ATXP1 is not set
683# CONFIG_SENSORS_DS1621 is not set
684# CONFIG_SENSORS_F71805F is not set
685# CONFIG_SENSORS_FSCHER is not set
686# CONFIG_SENSORS_FSCPOS is not set
687# CONFIG_SENSORS_GL518SM is not set
688# CONFIG_SENSORS_GL520SM is not set
689# CONFIG_SENSORS_IT87 is not set
690# CONFIG_SENSORS_LM63 is not set
691# CONFIG_SENSORS_LM75 is not set
692# CONFIG_SENSORS_LM77 is not set
693# CONFIG_SENSORS_LM78 is not set
694# CONFIG_SENSORS_LM80 is not set
695# CONFIG_SENSORS_LM83 is not set
696# CONFIG_SENSORS_LM85 is not set
697# CONFIG_SENSORS_LM87 is not set
698# CONFIG_SENSORS_LM90 is not set
699# CONFIG_SENSORS_LM92 is not set
700# CONFIG_SENSORS_MAX1619 is not set
701# CONFIG_SENSORS_PC87360 is not set
702# CONFIG_SENSORS_SMSC47M1 is not set
703# CONFIG_SENSORS_SMSC47M192 is not set
704# CONFIG_SENSORS_SMSC47B397 is not set
705# CONFIG_SENSORS_W83781D is not set
706# CONFIG_SENSORS_W83791D is not set
707# CONFIG_SENSORS_W83792D is not set
708# CONFIG_SENSORS_W83L785TS is not set
709# CONFIG_SENSORS_W83627HF is not set
710# CONFIG_SENSORS_W83627EHF is not set
711# CONFIG_HWMON_DEBUG_CHIP is not set
712
713#
714# Misc devices
715#
716
717#
718# LED devices
719#
720# CONFIG_NEW_LEDS is not set
721
722#
723# LED drivers
724#
725
726#
727# LED Triggers
728#
729
730#
731# Multimedia devices
732#
733# CONFIG_VIDEO_DEV is not set
734CONFIG_VIDEO_V4L2=y
735
736#
737# Digital Video Broadcasting Devices
738#
739# CONFIG_DVB is not set
740# CONFIG_USB_DABUSB is not set
741
742#
743# Graphics support
744#
745# CONFIG_FB is not set
746
747#
748# Sound
749#
750# CONFIG_SOUND is not set
751
752#
753# USB support
754#
755CONFIG_USB_ARCH_HAS_HCD=y
756CONFIG_USB_ARCH_HAS_OHCI=y
757# CONFIG_USB_ARCH_HAS_EHCI is not set
758CONFIG_USB=y
759CONFIG_USB_DEBUG=y
760
761#
762# Miscellaneous USB options
763#
764CONFIG_USB_DEVICEFS=y
765# CONFIG_USB_BANDWIDTH is not set
766# CONFIG_USB_DYNAMIC_MINORS is not set
767# CONFIG_USB_OTG is not set
768
769#
770# USB Host Controller Drivers
771#
772# CONFIG_USB_ISP116X_HCD is not set
773CONFIG_USB_OHCI_HCD=y
774# CONFIG_USB_OHCI_BIG_ENDIAN is not set
775CONFIG_USB_OHCI_LITTLE_ENDIAN=y
776# CONFIG_USB_SL811_HCD is not set
777
778#
779# USB Device Class drivers
780#
781# CONFIG_USB_ACM is not set
782# CONFIG_USB_PRINTER is not set
783
784#
785# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
786#
787
788#
789# may also be needed; see USB_STORAGE Help for more information
790#
791# CONFIG_USB_STORAGE is not set
792# CONFIG_USB_LIBUSUAL is not set
793
794#
795# USB Input Devices
796#
797# CONFIG_USB_HID is not set
798
799#
800# USB HID Boot Protocol drivers
801#
802# CONFIG_USB_KBD is not set
803# CONFIG_USB_MOUSE is not set
804# CONFIG_USB_AIPTEK is not set
805# CONFIG_USB_WACOM is not set
806# CONFIG_USB_ACECAD is not set
807# CONFIG_USB_KBTAB is not set
808# CONFIG_USB_POWERMATE is not set
809# CONFIG_USB_TOUCHSCREEN is not set
810# CONFIG_USB_YEALINK is not set
811# CONFIG_USB_XPAD is not set
812# CONFIG_USB_ATI_REMOTE is not set
813# CONFIG_USB_ATI_REMOTE2 is not set
814# CONFIG_USB_KEYSPAN_REMOTE is not set
815# CONFIG_USB_APPLETOUCH is not set
816
817#
818# USB Imaging devices
819#
820# CONFIG_USB_MDC800 is not set
821
822#
823# USB Network Adapters
824#
825# CONFIG_USB_CATC is not set
826# CONFIG_USB_KAWETH is not set
827# CONFIG_USB_PEGASUS is not set
828# CONFIG_USB_RTL8150 is not set
829# CONFIG_USB_USBNET is not set
830CONFIG_USB_MON=y
831
832#
833# USB port drivers
834#
835
836#
837# USB Serial Converter support
838#
839# CONFIG_USB_SERIAL is not set
840
841#
842# USB Miscellaneous drivers
843#
844# CONFIG_USB_EMI62 is not set
845# CONFIG_USB_EMI26 is not set
846# CONFIG_USB_AUERSWALD is not set
847# CONFIG_USB_RIO500 is not set
848# CONFIG_USB_LEGOTOWER is not set
849# CONFIG_USB_LCD is not set
850# CONFIG_USB_LED is not set
851# CONFIG_USB_CY7C63 is not set
852# CONFIG_USB_CYTHERM is not set
853# CONFIG_USB_PHIDGETKIT is not set
854# CONFIG_USB_PHIDGETSERVO is not set
855# CONFIG_USB_IDMOUSE is not set
856# CONFIG_USB_APPLEDISPLAY is not set
857# CONFIG_USB_LD is not set
858# CONFIG_USB_TEST is not set
859
860#
861# USB DSL modem support
862#
863
864#
865# USB Gadget Support
866#
867CONFIG_USB_GADGET=y
868# CONFIG_USB_GADGET_DEBUG_FILES is not set
869CONFIG_USB_GADGET_SELECTED=y
870# CONFIG_USB_GADGET_NET2280 is not set
871# CONFIG_USB_GADGET_PXA2XX is not set
872# CONFIG_USB_GADGET_GOKU is not set
873# CONFIG_USB_GADGET_LH7A40X is not set
874# CONFIG_USB_GADGET_OMAP is not set
875CONFIG_USB_GADGET_AT91=y
876CONFIG_USB_AT91=y
877# CONFIG_USB_GADGET_DUMMY_HCD is not set
878# CONFIG_USB_GADGET_DUALSPEED is not set
879# CONFIG_USB_ZERO is not set
880# CONFIG_USB_ETH is not set
881# CONFIG_USB_GADGETFS is not set
882# CONFIG_USB_FILE_STORAGE is not set
883# CONFIG_USB_G_SERIAL is not set
884
885#
886# MMC/SD Card support
887#
888CONFIG_MMC=y
889# CONFIG_MMC_DEBUG is not set
890CONFIG_MMC_BLOCK=y
891CONFIG_MMC_AT91RM9200=y
892
893#
894# Real Time Clock
895#
896CONFIG_RTC_LIB=y
897# CONFIG_RTC_CLASS is not set
898
899#
900# File systems
901#
902CONFIG_EXT2_FS=y
903# CONFIG_EXT2_FS_XATTR is not set
904# CONFIG_EXT2_FS_XIP is not set
905# CONFIG_EXT3_FS is not set
906# CONFIG_REISERFS_FS is not set
907# CONFIG_JFS_FS is not set
908CONFIG_FS_POSIX_ACL=y
909# CONFIG_XFS_FS is not set
910# CONFIG_OCFS2_FS is not set
911# CONFIG_MINIX_FS is not set
912# CONFIG_ROMFS_FS is not set
913CONFIG_INOTIFY=y
914CONFIG_INOTIFY_USER=y
915# CONFIG_QUOTA is not set
916CONFIG_DNOTIFY=y
917# CONFIG_AUTOFS_FS is not set
918# CONFIG_AUTOFS4_FS is not set
919# CONFIG_FUSE_FS is not set
920
921#
922# CD-ROM/DVD Filesystems
923#
924# CONFIG_ISO9660_FS is not set
925# CONFIG_UDF_FS is not set
926
927#
928# DOS/FAT/NT Filesystems
929#
930# CONFIG_MSDOS_FS is not set
931# CONFIG_VFAT_FS is not set
932# CONFIG_NTFS_FS is not set
933
934#
935# Pseudo filesystems
936#
937CONFIG_PROC_FS=y
938CONFIG_SYSFS=y
939CONFIG_TMPFS=y
940# CONFIG_HUGETLB_PAGE is not set
941CONFIG_RAMFS=y
942# CONFIG_CONFIGFS_FS is not set
943
944#
945# Miscellaneous filesystems
946#
947# CONFIG_ADFS_FS is not set
948# CONFIG_AFFS_FS is not set
949# CONFIG_HFS_FS is not set
950# CONFIG_HFSPLUS_FS is not set
951# CONFIG_BEFS_FS is not set
952# CONFIG_BFS_FS is not set
953# CONFIG_EFS_FS is not set
954# CONFIG_JFFS_FS is not set
955# CONFIG_JFFS2_FS is not set
956CONFIG_CRAMFS=y
957# CONFIG_VXFS_FS is not set
958# CONFIG_HPFS_FS is not set
959# CONFIG_QNX4FS_FS is not set
960# CONFIG_SYSV_FS is not set
961# CONFIG_UFS_FS is not set
962
963#
964# Network File Systems
965#
966CONFIG_NFS_FS=y
967CONFIG_NFS_V3=y
968CONFIG_NFS_V3_ACL=y
969# CONFIG_NFS_V4 is not set
970# CONFIG_NFS_DIRECTIO is not set
971# CONFIG_NFSD is not set
972CONFIG_ROOT_NFS=y
973CONFIG_LOCKD=y
974CONFIG_LOCKD_V4=y
975CONFIG_NFS_ACL_SUPPORT=y
976CONFIG_NFS_COMMON=y
977CONFIG_SUNRPC=y
978# CONFIG_RPCSEC_GSS_KRB5 is not set
979# CONFIG_RPCSEC_GSS_SPKM3 is not set
980# CONFIG_SMB_FS is not set
981# CONFIG_CIFS is not set
982# CONFIG_NCP_FS is not set
983# CONFIG_CODA_FS is not set
984# CONFIG_AFS_FS is not set
985# CONFIG_9P_FS is not set
986
987#
988# Partition Types
989#
990# CONFIG_PARTITION_ADVANCED is not set
991CONFIG_MSDOS_PARTITION=y
992
993#
994# Native Language Support
995#
996# CONFIG_NLS is not set
997
998#
999# Profiling support
1000#
1001# CONFIG_PROFILING is not set
1002
1003#
1004# Kernel hacking
1005#
1006# CONFIG_PRINTK_TIME is not set
1007# CONFIG_MAGIC_SYSRQ is not set
1008CONFIG_DEBUG_KERNEL=y
1009CONFIG_LOG_BUF_SHIFT=14
1010CONFIG_DETECT_SOFTLOCKUP=y
1011# CONFIG_SCHEDSTATS is not set
1012# CONFIG_DEBUG_SLAB is not set
1013# CONFIG_DEBUG_MUTEXES is not set
1014# CONFIG_DEBUG_SPINLOCK is not set
1015# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1016# CONFIG_DEBUG_KOBJECT is not set
1017CONFIG_DEBUG_BUGVERBOSE=y
1018# CONFIG_DEBUG_INFO is not set
1019# CONFIG_DEBUG_FS is not set
1020# CONFIG_DEBUG_VM is not set
1021CONFIG_FRAME_POINTER=y
1022# CONFIG_UNWIND_INFO is not set
1023CONFIG_FORCED_INLINING=y
1024# CONFIG_RCU_TORTURE_TEST is not set
1025CONFIG_DEBUG_USER=y
1026# CONFIG_DEBUG_WAITQ is not set
1027# CONFIG_DEBUG_ERRORS is not set
1028CONFIG_DEBUG_LL=y
1029# CONFIG_DEBUG_ICEDCC is not set
1030
1031#
1032# Security options
1033#
1034# CONFIG_KEYS is not set
1035# CONFIG_SECURITY is not set
1036
1037#
1038# Cryptographic options
1039#
1040# CONFIG_CRYPTO is not set
1041
1042#
1043# Hardware crypto devices
1044#
1045
1046#
1047# Library routines
1048#
1049# CONFIG_CRC_CCITT is not set
1050# CONFIG_CRC16 is not set
1051CONFIG_CRC32=y
1052# CONFIG_LIBCRC32C is not set
1053CONFIG_ZLIB_INFLATE=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index e176613800..f20814e6f4 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17 3# Linux kernel version: 2.6.17-git9
4# Tue Jun 20 18:57:01 2006 4# Sun Jun 25 23:56:32 2006
5# 5#
6CONFIG_ARM=y 6CONFIG_ARM=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -49,7 +49,6 @@ CONFIG_SLAB=y
49# CONFIG_TINY_SHMEM is not set 49# CONFIG_TINY_SHMEM is not set
50CONFIG_BASE_SMALL=0 50CONFIG_BASE_SMALL=0
51# CONFIG_SLOB is not set 51# CONFIG_SLOB is not set
52CONFIG_OBSOLETE_INTERMODULE=y
53 52
54# 53#
55# Loadable module support 54# Loadable module support
@@ -81,18 +80,26 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
81# 80#
82# System Type 81# System Type
83# 82#
83# CONFIG_ARCH_AAEC2000 is not set
84# CONFIG_ARCH_INTEGRATOR is not set
85# CONFIG_ARCH_REALVIEW is not set
86# CONFIG_ARCH_VERSATILE is not set
87# CONFIG_ARCH_AT91RM9200 is not set
84# CONFIG_ARCH_CLPS7500 is not set 88# CONFIG_ARCH_CLPS7500 is not set
85# CONFIG_ARCH_CLPS711X is not set 89# CONFIG_ARCH_CLPS711X is not set
86# CONFIG_ARCH_CO285 is not set 90# CONFIG_ARCH_CO285 is not set
87# CONFIG_ARCH_EBSA110 is not set 91# CONFIG_ARCH_EBSA110 is not set
88# CONFIG_ARCH_EP93XX is not set 92# CONFIG_ARCH_EP93XX is not set
89# CONFIG_ARCH_FOOTBRIDGE is not set 93# CONFIG_ARCH_FOOTBRIDGE is not set
90# CONFIG_ARCH_INTEGRATOR is not set 94# CONFIG_ARCH_NETX is not set
95# CONFIG_ARCH_H720X is not set
96# CONFIG_ARCH_IMX is not set
91# CONFIG_ARCH_IOP3XX is not set 97# CONFIG_ARCH_IOP3XX is not set
92# CONFIG_ARCH_IXP4XX is not set 98# CONFIG_ARCH_IXP4XX is not set
93# CONFIG_ARCH_IXP2000 is not set 99# CONFIG_ARCH_IXP2000 is not set
94# CONFIG_ARCH_IXP23XX is not set 100# CONFIG_ARCH_IXP23XX is not set
95# CONFIG_ARCH_L7200 is not set 101# CONFIG_ARCH_L7200 is not set
102# CONFIG_ARCH_PNX4008 is not set
96# CONFIG_ARCH_PXA is not set 103# CONFIG_ARCH_PXA is not set
97# CONFIG_ARCH_RPC is not set 104# CONFIG_ARCH_RPC is not set
98# CONFIG_ARCH_SA1100 is not set 105# CONFIG_ARCH_SA1100 is not set
@@ -100,14 +107,6 @@ CONFIG_ARCH_S3C2410=y
100# CONFIG_ARCH_SHARK is not set 107# CONFIG_ARCH_SHARK is not set
101# CONFIG_ARCH_LH7A40X is not set 108# CONFIG_ARCH_LH7A40X is not set
102# CONFIG_ARCH_OMAP is not set 109# CONFIG_ARCH_OMAP is not set
103# CONFIG_ARCH_VERSATILE is not set
104# CONFIG_ARCH_REALVIEW is not set
105# CONFIG_ARCH_IMX is not set
106# CONFIG_ARCH_H720X is not set
107# CONFIG_ARCH_AAEC2000 is not set
108# CONFIG_ARCH_AT91RM9200 is not set
109# CONFIG_ARCH_PNX4008 is not set
110# CONFIG_ARCH_NETX is not set
111 110
112# 111#
113# S3C24XX Implementations 112# S3C24XX Implementations
@@ -123,11 +122,14 @@ CONFIG_ARCH_SMDK2410=y
123CONFIG_ARCH_S3C2440=y 122CONFIG_ARCH_S3C2440=y
124CONFIG_SMDK2440_CPU2440=y 123CONFIG_SMDK2440_CPU2440=y
125CONFIG_SMDK2440_CPU2442=y 124CONFIG_SMDK2440_CPU2442=y
125CONFIG_MACH_SMDK2413=y
126CONFIG_MACH_VR1000=y 126CONFIG_MACH_VR1000=y
127CONFIG_MACH_RX3715=y 127CONFIG_MACH_RX3715=y
128CONFIG_MACH_OTOM=y 128CONFIG_MACH_OTOM=y
129CONFIG_MACH_NEXCODER_2440=y 129CONFIG_MACH_NEXCODER_2440=y
130CONFIG_S3C2410_CLOCK=y
130CONFIG_CPU_S3C2410=y 131CONFIG_CPU_S3C2410=y
132CONFIG_CPU_S3C2412=y
131CONFIG_CPU_S3C244X=y 133CONFIG_CPU_S3C244X=y
132CONFIG_CPU_S3C2440=y 134CONFIG_CPU_S3C2440=y
133CONFIG_CPU_S3C2442=y 135CONFIG_CPU_S3C2442=y
@@ -153,8 +155,11 @@ CONFIG_S3C2410_LOWLEVEL_UART_PORT=0
153# 155#
154CONFIG_CPU_32=y 156CONFIG_CPU_32=y
155CONFIG_CPU_ARM920T=y 157CONFIG_CPU_ARM920T=y
158CONFIG_CPU_ARM926T=y
156CONFIG_CPU_32v4=y 159CONFIG_CPU_32v4=y
160CONFIG_CPU_32v5=y
157CONFIG_CPU_ABRT_EV4T=y 161CONFIG_CPU_ABRT_EV4T=y
162CONFIG_CPU_ABRT_EV5TJ=y
158CONFIG_CPU_CACHE_V4WT=y 163CONFIG_CPU_CACHE_V4WT=y
159CONFIG_CPU_CACHE_VIVT=y 164CONFIG_CPU_CACHE_VIVT=y
160CONFIG_CPU_COPY_V4WB=y 165CONFIG_CPU_COPY_V4WB=y
@@ -167,6 +172,7 @@ CONFIG_CPU_TLB_V4WBI=y
167# CONFIG_CPU_ICACHE_DISABLE is not set 172# CONFIG_CPU_ICACHE_DISABLE is not set
168# CONFIG_CPU_DCACHE_DISABLE is not set 173# CONFIG_CPU_DCACHE_DISABLE is not set
169# CONFIG_CPU_DCACHE_WRITETHROUGH is not set 174# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
175# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
170 176
171# 177#
172# Bus support 178# Bus support
@@ -214,6 +220,7 @@ CONFIG_CMDLINE="root=/dev/hda1 ro init=/bin/bash console=ttySAC0"
214CONFIG_FPE_NWFPE=y 220CONFIG_FPE_NWFPE=y
215# CONFIG_FPE_NWFPE_XP is not set 221# CONFIG_FPE_NWFPE_XP is not set
216# CONFIG_FPE_FASTFPE is not set 222# CONFIG_FPE_FASTFPE is not set
223# CONFIG_VFP is not set
217 224
218# 225#
219# Userspace binary formats 226# Userspace binary formats
@@ -242,6 +249,8 @@ CONFIG_NET=y
242# CONFIG_NETDEBUG is not set 249# CONFIG_NETDEBUG is not set
243# CONFIG_PACKET is not set 250# CONFIG_PACKET is not set
244CONFIG_UNIX=y 251CONFIG_UNIX=y
252CONFIG_XFRM=y
253# CONFIG_XFRM_USER is not set
245# CONFIG_NET_KEY is not set 254# CONFIG_NET_KEY is not set
246CONFIG_INET=y 255CONFIG_INET=y
247# CONFIG_IP_MULTICAST is not set 256# CONFIG_IP_MULTICAST is not set
@@ -260,6 +269,8 @@ CONFIG_IP_PNP_BOOTP=y
260# CONFIG_INET_IPCOMP is not set 269# CONFIG_INET_IPCOMP is not set
261# CONFIG_INET_XFRM_TUNNEL is not set 270# CONFIG_INET_XFRM_TUNNEL is not set
262# CONFIG_INET_TUNNEL is not set 271# CONFIG_INET_TUNNEL is not set
272CONFIG_INET_XFRM_MODE_TRANSPORT=y
273CONFIG_INET_XFRM_MODE_TUNNEL=y
263CONFIG_INET_DIAG=y 274CONFIG_INET_DIAG=y
264CONFIG_INET_TCP_DIAG=y 275CONFIG_INET_TCP_DIAG=y
265# CONFIG_TCP_CONG_ADVANCED is not set 276# CONFIG_TCP_CONG_ADVANCED is not set
@@ -267,6 +278,7 @@ CONFIG_TCP_CONG_BIC=y
267# CONFIG_IPV6 is not set 278# CONFIG_IPV6 is not set
268# CONFIG_INET6_XFRM_TUNNEL is not set 279# CONFIG_INET6_XFRM_TUNNEL is not set
269# CONFIG_INET6_TUNNEL is not set 280# CONFIG_INET6_TUNNEL is not set
281# CONFIG_NETWORK_SECMARK is not set
270# CONFIG_NETFILTER is not set 282# CONFIG_NETFILTER is not set
271 283
272# 284#
@@ -321,6 +333,7 @@ CONFIG_STANDALONE=y
321CONFIG_PREVENT_FIRMWARE_BUILD=y 333CONFIG_PREVENT_FIRMWARE_BUILD=y
322# CONFIG_FW_LOADER is not set 334# CONFIG_FW_LOADER is not set
323# CONFIG_DEBUG_DRIVER is not set 335# CONFIG_DEBUG_DRIVER is not set
336# CONFIG_SYS_HYPERVISOR is not set
324 337
325# 338#
326# Connector - unified userspace <-> kernelspace linker 339# Connector - unified userspace <-> kernelspace linker
@@ -408,10 +421,12 @@ CONFIG_MTD_BAST_MAXSIZE=4
408# 421#
409CONFIG_MTD_NAND=y 422CONFIG_MTD_NAND=y
410# CONFIG_MTD_NAND_VERIFY_WRITE is not set 423# CONFIG_MTD_NAND_VERIFY_WRITE is not set
424# CONFIG_MTD_NAND_ECC_SMC is not set
411CONFIG_MTD_NAND_IDS=y 425CONFIG_MTD_NAND_IDS=y
412CONFIG_MTD_NAND_S3C2410=y 426CONFIG_MTD_NAND_S3C2410=y
413# CONFIG_MTD_NAND_S3C2410_DEBUG is not set 427# CONFIG_MTD_NAND_S3C2410_DEBUG is not set
414# CONFIG_MTD_NAND_S3C2410_HWECC is not set 428# CONFIG_MTD_NAND_S3C2410_HWECC is not set
429# CONFIG_MTD_NAND_S3C2410_CLKSTOP is not set
415# CONFIG_MTD_NAND_DISKONCHIP is not set 430# CONFIG_MTD_NAND_DISKONCHIP is not set
416# CONFIG_MTD_NAND_NANDSIM is not set 431# CONFIG_MTD_NAND_NANDSIM is not set
417 432
@@ -425,8 +440,8 @@ CONFIG_MTD_NAND_S3C2410=y
425# 440#
426CONFIG_PARPORT=y 441CONFIG_PARPORT=y
427# CONFIG_PARPORT_PC is not set 442# CONFIG_PARPORT_PC is not set
428# CONFIG_PARPORT_ARC is not set
429# CONFIG_PARPORT_GSC is not set 443# CONFIG_PARPORT_GSC is not set
444# CONFIG_PARPORT_AX88796 is not set
430CONFIG_PARPORT_1284=y 445CONFIG_PARPORT_1284=y
431 446
432# 447#
@@ -735,6 +750,7 @@ CONFIG_I2C_ALGOBIT=m
735# 750#
736# CONFIG_I2C_ELEKTOR is not set 751# CONFIG_I2C_ELEKTOR is not set
737CONFIG_I2C_ISA=m 752CONFIG_I2C_ISA=m
753# CONFIG_I2C_OCORES is not set
738# CONFIG_I2C_PARPORT is not set 754# CONFIG_I2C_PARPORT is not set
739# CONFIG_I2C_PARPORT_LIGHT is not set 755# CONFIG_I2C_PARPORT_LIGHT is not set
740CONFIG_I2C_S3C2410=y 756CONFIG_I2C_S3C2410=y
@@ -765,13 +781,13 @@ CONFIG_SENSORS_EEPROM=m
765# 781#
766# Dallas's 1-wire bus 782# Dallas's 1-wire bus
767# 783#
768# CONFIG_W1 is not set
769 784
770# 785#
771# Hardware Monitoring support 786# Hardware Monitoring support
772# 787#
773CONFIG_HWMON=y 788CONFIG_HWMON=y
774CONFIG_HWMON_VID=m 789CONFIG_HWMON_VID=m
790# CONFIG_SENSORS_ABITUGURU is not set
775# CONFIG_SENSORS_ADM1021 is not set 791# CONFIG_SENSORS_ADM1021 is not set
776# CONFIG_SENSORS_ADM1025 is not set 792# CONFIG_SENSORS_ADM1025 is not set
777# CONFIG_SENSORS_ADM1026 is not set 793# CONFIG_SENSORS_ADM1026 is not set
@@ -799,8 +815,10 @@ CONFIG_SENSORS_LM85=m
799# CONFIG_SENSORS_MAX1619 is not set 815# CONFIG_SENSORS_MAX1619 is not set
800# CONFIG_SENSORS_PC87360 is not set 816# CONFIG_SENSORS_PC87360 is not set
801# CONFIG_SENSORS_SMSC47M1 is not set 817# CONFIG_SENSORS_SMSC47M1 is not set
818# CONFIG_SENSORS_SMSC47M192 is not set
802# CONFIG_SENSORS_SMSC47B397 is not set 819# CONFIG_SENSORS_SMSC47B397 is not set
803# CONFIG_SENSORS_W83781D is not set 820# CONFIG_SENSORS_W83781D is not set
821# CONFIG_SENSORS_W83791D is not set
804# CONFIG_SENSORS_W83792D is not set 822# CONFIG_SENSORS_W83792D is not set
805# CONFIG_SENSORS_W83L785TS is not set 823# CONFIG_SENSORS_W83L785TS is not set
806# CONFIG_SENSORS_W83627HF is not set 824# CONFIG_SENSORS_W83627HF is not set
@@ -845,6 +863,7 @@ CONFIG_FB_CFB_COPYAREA=y
845CONFIG_FB_CFB_IMAGEBLIT=y 863CONFIG_FB_CFB_IMAGEBLIT=y
846# CONFIG_FB_MACMODES is not set 864# CONFIG_FB_MACMODES is not set
847CONFIG_FB_FIRMWARE_EDID=y 865CONFIG_FB_FIRMWARE_EDID=y
866# CONFIG_FB_BACKLIGHT is not set
848CONFIG_FB_MODE_HELPERS=y 867CONFIG_FB_MODE_HELPERS=y
849# CONFIG_FB_TILEBLITTING is not set 868# CONFIG_FB_TILEBLITTING is not set
850# CONFIG_FB_S1D13XXX is not set 869# CONFIG_FB_S1D13XXX is not set
@@ -976,10 +995,12 @@ CONFIG_USB_MON=y
976# CONFIG_USB_LEGOTOWER is not set 995# CONFIG_USB_LEGOTOWER is not set
977# CONFIG_USB_LCD is not set 996# CONFIG_USB_LCD is not set
978# CONFIG_USB_LED is not set 997# CONFIG_USB_LED is not set
998# CONFIG_USB_CY7C63 is not set
979# CONFIG_USB_CYTHERM is not set 999# CONFIG_USB_CYTHERM is not set
980# CONFIG_USB_PHIDGETKIT is not set 1000# CONFIG_USB_PHIDGETKIT is not set
981# CONFIG_USB_PHIDGETSERVO is not set 1001# CONFIG_USB_PHIDGETSERVO is not set
982# CONFIG_USB_IDMOUSE is not set 1002# CONFIG_USB_IDMOUSE is not set
1003# CONFIG_USB_APPLEDISPLAY is not set
983# CONFIG_USB_LD is not set 1004# CONFIG_USB_LD is not set
984# CONFIG_USB_TEST is not set 1005# CONFIG_USB_TEST is not set
985 1006
@@ -1024,6 +1045,7 @@ CONFIG_FS_MBCACHE=y
1024# CONFIG_MINIX_FS is not set 1045# CONFIG_MINIX_FS is not set
1025CONFIG_ROMFS_FS=y 1046CONFIG_ROMFS_FS=y
1026CONFIG_INOTIFY=y 1047CONFIG_INOTIFY=y
1048CONFIG_INOTIFY_USER=y
1027# CONFIG_QUOTA is not set 1049# CONFIG_QUOTA is not set
1028CONFIG_DNOTIFY=y 1050CONFIG_DNOTIFY=y
1029# CONFIG_AUTOFS_FS is not set 1051# CONFIG_AUTOFS_FS is not set
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index b5bcebca1c..75af6d6e2f 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -340,7 +340,7 @@ sys_mmap2:
340 streq r5, [sp, #4] 340 streq r5, [sp, #4]
341 beq do_mmap2 341 beq do_mmap2
342 mov r0, #-EINVAL 342 mov r0, #-EINVAL
343 RETINSTR(mov,pc, lr) 343 mov pc, lr
344#else 344#else
345 str r5, [sp, #4] 345 str r5, [sp, #4]
346 b do_mmap2 346 b do_mmap2
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index adf62e5eaa..2af7e44218 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -39,7 +39,7 @@
39 __INIT 39 __INIT
40 .type stext, %function 40 .type stext, %function
41ENTRY(stext) 41ENTRY(stext)
42 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode 42 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
43 @ and irqs disabled 43 @ and irqs disabled
44 mrc p15, 0, r9, c0, c0 @ get processor id 44 mrc p15, 0, r9, c0, c0 @ get processor id
45 bl __lookup_processor_type @ r5=procinfo r9=cpuid 45 bl __lookup_processor_type @ r5=procinfo r9=cpuid
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 04f7344e35..330b9476c3 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -71,7 +71,7 @@
71 __INIT 71 __INIT
72 .type stext, %function 72 .type stext, %function
73ENTRY(stext) 73ENTRY(stext)
74 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode 74 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
75 @ and irqs disabled 75 @ and irqs disabled
76 mrc p15, 0, r9, c0, c0 @ get processor id 76 mrc p15, 0, r9, c0, c0 @ get processor id
77 bl __lookup_processor_type @ r5=procinfo r9=cpuid 77 bl __lookup_processor_type @ r5=procinfo r9=cpuid
@@ -104,7 +104,7 @@ ENTRY(secondary_startup)
104 * the processor type - there is no need to check the machine type 104 * the processor type - there is no need to check the machine type
105 * as it has already been validated by the primary processor. 105 * as it has already been validated by the primary processor.
106 */ 106 */
107 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC 107 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
108 mrc p15, 0, r9, c0, c0 @ get processor id 108 mrc p15, 0, r9, c0, c0 @ get processor id
109 bl __lookup_processor_type 109 bl __lookup_processor_type
110 movs r10, r5 @ invalid processor? 110 movs r10, r5 @ invalid processor?
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 9fc9af88c6..093ccba050 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -808,7 +808,7 @@ static int __init topology_init(void)
808 int cpu; 808 int cpu;
809 809
810 for_each_possible_cpu(cpu) 810 for_each_possible_cpu(cpu)
811 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL); 811 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
812 812
813 return 0; 813 return 0;
814} 814}
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index 16153c86c3..058b80d72a 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -41,7 +41,7 @@ ENTRY(c_backtrace)
41 movne r0, #0 41 movne r0, #0
42 movs frame, r0 42 movs frame, r0
431: moveq r0, #-2 431: moveq r0, #-2
44 LOADREGS(eqfd, sp!, {r4 - r8, pc}) 44 ldmeqfd sp!, {r4 - r8, pc}
45 45
462: stmfd sp!, {pc} @ calculate offset of PC in STMIA instruction 462: stmfd sp!, {pc} @ calculate offset of PC in STMIA instruction
47 ldr r0, [sp], #4 47 ldr r0, [sp], #4
@@ -85,7 +85,7 @@ ENTRY(c_backtrace)
85 * A zero next framepointer means we're done. 85 * A zero next framepointer means we're done.
86 */ 86 */
87 teq next, #0 87 teq next, #0
88 LOADREGS(eqfd, sp!, {r4 - r8, pc}) 88 ldmeqfd sp!, {r4 - r8, pc}
89 89
90 /* 90 /*
91 * The next framepointer must be above the 91 * The next framepointer must be above the
@@ -104,7 +104,7 @@ ENTRY(c_backtrace)
1041007: ldr r0, =.Lbad 1041007: ldr r0, =.Lbad
105 mov r1, frame 105 mov r1, frame
106 bl printk 106 bl printk
107 LOADREGS(fd, sp!, {r4 - r8, pc}) 107 ldmfd sp!, {r4 - r8, pc}
108 .ltorg 108 .ltorg
109 .previous 109 .previous
110 110
@@ -145,7 +145,7 @@ ENTRY(c_backtrace)
145 adrne r0, .Lcr 145 adrne r0, .Lcr
146 blne printk 146 blne printk
147 mov r0, stack 147 mov r0, stack
148 LOADREGS(fd, sp!, {instr, reg, stack, r7, r8, pc}) 148 ldmfd sp!, {instr, reg, stack, r7, r8, pc}
149 149
150.Lfp: .asciz " r%d = %08X%c" 150.Lfp: .asciz " r%d = %08X%c"
151.Lcr: .asciz "\n" 151.Lcr: .asciz "\n"
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 7ff9f831b3..ea435ae2e4 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -43,10 +43,10 @@ USER( strnebt r2, [r0], #1)
43 tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1 43 tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1
44USER( strnebt r2, [r0], #1) 44USER( strnebt r2, [r0], #1)
45 mov r0, #0 45 mov r0, #0
46 LOADREGS(fd,sp!, {r1, pc}) 46 ldmfd sp!, {r1, pc}
47 47
48 .section .fixup,"ax" 48 .section .fixup,"ax"
49 .align 0 49 .align 0
509001: LOADREGS(fd,sp!, {r0, pc}) 509001: ldmfd sp!, {r0, pc}
51 .previous 51 .previous
52 52
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
index 6811796848..666c99cc07 100644
--- a/arch/arm/lib/copy_page.S
+++ b/arch/arm/lib/copy_page.S
@@ -43,4 +43,4 @@ ENTRY(copy_page)
43 bgt 1b @ 1 43 bgt 1b @ 1
44 PLD( ldmeqia r1!, {r3, r4, ip, lr} ) 44 PLD( ldmeqia r1!, {r3, r4, ip, lr} )
45 PLD( beq 2b ) 45 PLD( beq 2b )
46 LOADREGS(fd, sp!, {r4, pc}) @ 3 46 ldmfd sp!, {r4, pc} @ 3
diff --git a/arch/arm/lib/csumipv6.S b/arch/arm/lib/csumipv6.S
index 7065a20ee8..9621469bee 100644
--- a/arch/arm/lib/csumipv6.S
+++ b/arch/arm/lib/csumipv6.S
@@ -28,5 +28,5 @@ ENTRY(__csum_ipv6_magic)
28 adcs r0, r0, r3 28 adcs r0, r0, r3
29 adcs r0, r0, r2 29 adcs r0, r0, r2
30 adcs r0, r0, #0 30 adcs r0, r0, #0
31 LOADREGS(fd, sp!, {pc}) 31 ldmfd sp!, {pc}
32 32
diff --git a/arch/arm/lib/delay.S b/arch/arm/lib/delay.S
index 9183b06c0e..930a702592 100644
--- a/arch/arm/lib/delay.S
+++ b/arch/arm/lib/delay.S
@@ -31,7 +31,7 @@ ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06
31 mov r2, r2, lsr #10 @ max = 0x00007fff 31 mov r2, r2, lsr #10 @ max = 0x00007fff
32 mul r0, r2, r0 @ max = 2^32-1 32 mul r0, r2, r0 @ max = 2^32-1
33 movs r0, r0, lsr #6 33 movs r0, r0, lsr #6
34 RETINSTR(moveq,pc,lr) 34 moveq pc, lr
35 35
36/* 36/*
37 * loops = r0 * HZ * loops_per_jiffy / 1000000 37 * loops = r0 * HZ * loops_per_jiffy / 1000000
@@ -43,20 +43,20 @@ ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06
43ENTRY(__delay) 43ENTRY(__delay)
44 subs r0, r0, #1 44 subs r0, r0, #1
45#if 0 45#if 0
46 RETINSTR(movls,pc,lr) 46 movls pc, lr
47 subs r0, r0, #1 47 subs r0, r0, #1
48 RETINSTR(movls,pc,lr) 48 movls pc, lr
49 subs r0, r0, #1 49 subs r0, r0, #1
50 RETINSTR(movls,pc,lr) 50 movls pc, lr
51 subs r0, r0, #1 51 subs r0, r0, #1
52 RETINSTR(movls,pc,lr) 52 movls pc, lr
53 subs r0, r0, #1 53 subs r0, r0, #1
54 RETINSTR(movls,pc,lr) 54 movls pc, lr
55 subs r0, r0, #1 55 subs r0, r0, #1
56 RETINSTR(movls,pc,lr) 56 movls pc, lr
57 subs r0, r0, #1 57 subs r0, r0, #1
58 RETINSTR(movls,pc,lr) 58 movls pc, lr
59 subs r0, r0, #1 59 subs r0, r0, #1
60#endif 60#endif
61 bhi __delay 61 bhi __delay
62 RETINSTR(mov,pc,lr) 62 mov pc, lr
diff --git a/arch/arm/lib/ecard.S b/arch/arm/lib/ecard.S
index fb7b602a6f..c55aaa2a20 100644
--- a/arch/arm/lib/ecard.S
+++ b/arch/arm/lib/ecard.S
@@ -29,7 +29,7 @@ ENTRY(ecard_loader_read)
29 CPSR2SPSR(r0) 29 CPSR2SPSR(r0)
30 mov lr, pc 30 mov lr, pc
31 mov pc, r2 31 mov pc, r2
32 LOADREGS(fd, sp!, {r4 - r12, pc}) 32 ldmfd sp!, {r4 - r12, pc}
33 33
34@ Purpose: call an expansion card loader to reset the card 34@ Purpose: call an expansion card loader to reset the card
35@ Proto : void read_loader(int card_base, char *loader); 35@ Proto : void read_loader(int card_base, char *loader);
@@ -41,5 +41,5 @@ ENTRY(ecard_loader_reset)
41 CPSR2SPSR(r0) 41 CPSR2SPSR(r0)
42 mov lr, pc 42 mov lr, pc
43 add pc, r1, #8 43 add pc, r1, #8
44 LOADREGS(fd, sp!, {r4 - r12, pc}) 44 ldmfd sp!, {r4 - r12, pc}
45 45
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 6f8e27a58c..a5ca0248aa 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -32,7 +32,7 @@ ENTRY(_find_first_zero_bit_le)
322: cmp r2, r1 @ any more? 322: cmp r2, r1 @ any more?
33 blo 1b 33 blo 1b
343: mov r0, r1 @ no free bits 343: mov r0, r1 @ no free bits
35 RETINSTR(mov,pc,lr) 35 mov pc, lr
36 36
37/* 37/*
38 * Purpose : Find next 'zero' bit 38 * Purpose : Find next 'zero' bit
@@ -66,7 +66,7 @@ ENTRY(_find_first_bit_le)
662: cmp r2, r1 @ any more? 662: cmp r2, r1 @ any more?
67 blo 1b 67 blo 1b
683: mov r0, r1 @ no free bits 683: mov r0, r1 @ no free bits
69 RETINSTR(mov,pc,lr) 69 mov pc, lr
70 70
71/* 71/*
72 * Purpose : Find next 'one' bit 72 * Purpose : Find next 'one' bit
@@ -98,7 +98,7 @@ ENTRY(_find_first_zero_bit_be)
982: cmp r2, r1 @ any more? 982: cmp r2, r1 @ any more?
99 blo 1b 99 blo 1b
1003: mov r0, r1 @ no free bits 1003: mov r0, r1 @ no free bits
101 RETINSTR(mov,pc,lr) 101 mov pc, lr
102 102
103ENTRY(_find_next_zero_bit_be) 103ENTRY(_find_next_zero_bit_be)
104 teq r1, #0 104 teq r1, #0
@@ -126,7 +126,7 @@ ENTRY(_find_first_bit_be)
1262: cmp r2, r1 @ any more? 1262: cmp r2, r1 @ any more?
127 blo 1b 127 blo 1b
1283: mov r0, r1 @ no free bits 1283: mov r0, r1 @ no free bits
129 RETINSTR(mov,pc,lr) 129 mov pc, lr
130 130
131ENTRY(_find_next_bit_be) 131ENTRY(_find_next_bit_be)
132 teq r1, #0 132 teq r1, #0
@@ -164,5 +164,5 @@ ENTRY(_find_next_bit_be)
164 addeq r2, r2, #1 164 addeq r2, r2, #1
165 mov r0, r2 165 mov r0, r2
166#endif 166#endif
167 RETINSTR(mov,pc,lr) 167 mov pc, lr
168 168
diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S
index d3d8de71a2..fb966ad027 100644
--- a/arch/arm/lib/io-readsb.S
+++ b/arch/arm/lib/io-readsb.S
@@ -72,7 +72,7 @@ ENTRY(__raw_readsb)
72 bpl .Linsb_16_lp 72 bpl .Linsb_16_lp
73 73
74 tst r2, #15 74 tst r2, #15
75 LOADREGS(eqfd, sp!, {r4 - r6, pc}) 75 ldmeqfd sp!, {r4 - r6, pc}
76 76
77.Linsb_no_16: tst r2, #8 77.Linsb_no_16: tst r2, #8
78 beq .Linsb_no_8 78 beq .Linsb_no_8
@@ -109,7 +109,7 @@ ENTRY(__raw_readsb)
109 str r3, [r1], #4 109 str r3, [r1], #4
110 110
111.Linsb_no_4: ands r2, r2, #3 111.Linsb_no_4: ands r2, r2, #3
112 LOADREGS(eqfd, sp!, {r4 - r6, pc}) 112 ldmeqfd sp!, {r4 - r6, pc}
113 113
114 cmp r2, #2 114 cmp r2, #2
115 ldrb r3, [r0] 115 ldrb r3, [r0]
@@ -119,4 +119,4 @@ ENTRY(__raw_readsb)
119 ldrgtb r3, [r0] 119 ldrgtb r3, [r0]
120 strgtb r3, [r1] 120 strgtb r3, [r1]
121 121
122 LOADREGS(fd, sp!, {r4 - r6, pc}) 122 ldmfd sp!, {r4 - r6, pc}
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
index 146d47c154..4ef9041851 100644
--- a/arch/arm/lib/io-readsw-armv3.S
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -28,7 +28,7 @@
28 strb r3, [r1], #1 28 strb r3, [r1], #1
29 29
30 subs r2, r2, #1 30 subs r2, r2, #1
31 RETINSTR(moveq, pc, lr) 31 moveq pc, lr
32 32
33ENTRY(__raw_readsw) 33ENTRY(__raw_readsw)
34 teq r2, #0 @ do we have to check for the zero len? 34 teq r2, #0 @ do we have to check for the zero len?
@@ -69,7 +69,7 @@ ENTRY(__raw_readsw)
69 bpl .Linsw_8_lp 69 bpl .Linsw_8_lp
70 70
71 tst r2, #7 71 tst r2, #7
72 LOADREGS(eqfd, sp!, {r4, r5, r6, pc}) 72 ldmeqfd sp!, {r4, r5, r6, pc}
73 73
74.Lno_insw_8: tst r2, #4 74.Lno_insw_8: tst r2, #4
75 beq .Lno_insw_4 75 beq .Lno_insw_4
@@ -102,6 +102,6 @@ ENTRY(__raw_readsw)
102 movne r3, r3, lsr #8 102 movne r3, r3, lsr #8
103 strneb r3, [r1] 103 strneb r3, [r1]
104 104
105 LOADREGS(fd, sp!, {r4, r5, r6, pc}) 105 ldmfd sp!, {r4, r5, r6, pc}
106 106
107 107
diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S
index 08209fc640..7eba2b6cc6 100644
--- a/arch/arm/lib/io-writesb.S
+++ b/arch/arm/lib/io-writesb.S
@@ -64,7 +64,7 @@ ENTRY(__raw_writesb)
64 bpl .Loutsb_16_lp 64 bpl .Loutsb_16_lp
65 65
66 tst r2, #15 66 tst r2, #15
67 LOADREGS(eqfd, sp!, {r4, r5, pc}) 67 ldmeqfd sp!, {r4, r5, pc}
68 68
69.Loutsb_no_16: tst r2, #8 69.Loutsb_no_16: tst r2, #8
70 beq .Loutsb_no_8 70 beq .Loutsb_no_8
@@ -80,7 +80,7 @@ ENTRY(__raw_writesb)
80 outword r3 80 outword r3
81 81
82.Loutsb_no_4: ands r2, r2, #3 82.Loutsb_no_4: ands r2, r2, #3
83 LOADREGS(eqfd, sp!, {r4, r5, pc}) 83 ldmeqfd sp!, {r4, r5, pc}
84 84
85 cmp r2, #2 85 cmp r2, #2
86 ldrb r3, [r1], #1 86 ldrb r3, [r1], #1
@@ -90,4 +90,4 @@ ENTRY(__raw_writesb)
90 ldrgtb r3, [r1] 90 ldrgtb r3, [r1]
91 strgtb r3, [r0] 91 strgtb r3, [r0]
92 92
93 LOADREGS(fd, sp!, {r4, r5, pc}) 93 ldmfd sp!, {r4, r5, pc}
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
index 52d62b4812..1607a29f49 100644
--- a/arch/arm/lib/io-writesw-armv3.S
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -29,7 +29,7 @@
29 orr r3, r3, r3, lsl #16 29 orr r3, r3, r3, lsl #16
30 str r3, [r0] 30 str r3, [r0]
31 subs r2, r2, #1 31 subs r2, r2, #1
32 RETINSTR(moveq, pc, lr) 32 moveq pc, lr
33 33
34ENTRY(__raw_writesw) 34ENTRY(__raw_writesw)
35 teq r2, #0 @ do we have to check for the zero len? 35 teq r2, #0 @ do we have to check for the zero len?
@@ -80,7 +80,7 @@ ENTRY(__raw_writesw)
80 bpl .Loutsw_8_lp 80 bpl .Loutsw_8_lp
81 81
82 tst r2, #7 82 tst r2, #7
83 LOADREGS(eqfd, sp!, {r4, r5, r6, pc}) 83 ldmeqfd sp!, {r4, r5, r6, pc}
84 84
85.Lno_outsw_8: tst r2, #4 85.Lno_outsw_8: tst r2, #4
86 beq .Lno_outsw_4 86 beq .Lno_outsw_4
@@ -124,4 +124,4 @@ ENTRY(__raw_writesw)
124 orrne ip, ip, ip, lsr #16 124 orrne ip, ip, ip, lsr #16
125 strne ip, [r0] 125 strne ip, [r0]
126 126
127 LOADREGS(fd, sp!, {r4, r5, r6, pc}) 127 ldmfd sp!, {r4, r5, r6, pc}
diff --git a/arch/arm/lib/memchr.S b/arch/arm/lib/memchr.S
index ac34fe55d2..e7ab1ea8eb 100644
--- a/arch/arm/lib/memchr.S
+++ b/arch/arm/lib/memchr.S
@@ -22,4 +22,4 @@ ENTRY(memchr)
22 bne 1b 22 bne 1b
23 sub r0, r0, #1 23 sub r0, r0, #1
242: movne r0, #0 242: movne r0, #0
25 RETINSTR(mov,pc,lr) 25 mov pc, lr
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index a1795f5999..95b110b07a 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -53,7 +53,7 @@ ENTRY(memset)
53 stmgeia r0!, {r1, r3, ip, lr} 53 stmgeia r0!, {r1, r3, ip, lr}
54 stmgeia r0!, {r1, r3, ip, lr} 54 stmgeia r0!, {r1, r3, ip, lr}
55 bgt 2b 55 bgt 2b
56 LOADREGS(eqfd, sp!, {pc}) @ Now <64 bytes to go. 56 ldmeqfd sp!, {pc} @ Now <64 bytes to go.
57/* 57/*
58 * No need to correct the count; we're only testing bits from now on 58 * No need to correct the count; we're only testing bits from now on
59 */ 59 */
@@ -77,4 +77,4 @@ ENTRY(memset)
77 strneb r1, [r0], #1 77 strneb r1, [r0], #1
78 tst r2, #1 78 tst r2, #1
79 strneb r1, [r0], #1 79 strneb r1, [r0], #1
80 RETINSTR(mov,pc,lr) 80 mov pc, lr
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S
index 51ccc60160..abf2508e82 100644
--- a/arch/arm/lib/memzero.S
+++ b/arch/arm/lib/memzero.S
@@ -53,7 +53,7 @@ ENTRY(__memzero)
53 stmgeia r0!, {r2, r3, ip, lr} @ 4 53 stmgeia r0!, {r2, r3, ip, lr} @ 4
54 stmgeia r0!, {r2, r3, ip, lr} @ 4 54 stmgeia r0!, {r2, r3, ip, lr} @ 4
55 bgt 3b @ 1 55 bgt 3b @ 1
56 LOADREGS(eqfd, sp!, {pc}) @ 1/2 quick exit 56 ldmeqfd sp!, {pc} @ 1/2 quick exit
57/* 57/*
58 * No need to correct the count; we're only testing bits from now on 58 * No need to correct the count; we're only testing bits from now on
59 */ 59 */
@@ -77,4 +77,4 @@ ENTRY(__memzero)
77 strneb r2, [r0], #1 @ 1 77 strneb r2, [r0], #1 @ 1
78 tst r1, #1 @ 1 a byte left over 78 tst r1, #1 @ 1 a byte left over
79 strneb r2, [r0], #1 @ 1 79 strneb r2, [r0], #1 @ 1
80 RETINSTR(mov,pc,lr) @ 1 80 mov pc, lr @ 1
diff --git a/arch/arm/lib/strchr.S b/arch/arm/lib/strchr.S
index 5b9b493733..9f18d6fdee 100644
--- a/arch/arm/lib/strchr.S
+++ b/arch/arm/lib/strchr.S
@@ -23,4 +23,4 @@ ENTRY(strchr)
23 teq r2, r1 23 teq r2, r1
24 movne r0, #0 24 movne r0, #0
25 subeq r0, r0, #1 25 subeq r0, r0, #1
26 RETINSTR(mov,pc,lr) 26 mov pc, lr
diff --git a/arch/arm/lib/strncpy_from_user.S b/arch/arm/lib/strncpy_from_user.S
index 629cc87752..35649f04fc 100644
--- a/arch/arm/lib/strncpy_from_user.S
+++ b/arch/arm/lib/strncpy_from_user.S
@@ -21,7 +21,6 @@
21 * -EFAULT on exception, or "len" if we fill the whole buffer 21 * -EFAULT on exception, or "len" if we fill the whole buffer
22 */ 22 */
23ENTRY(__arch_strncpy_from_user) 23ENTRY(__arch_strncpy_from_user)
24 save_lr
25 mov ip, r1 24 mov ip, r1
261: subs r2, r2, #1 251: subs r2, r2, #1
27USER( ldrplbt r3, [r1], #1) 26USER( ldrplbt r3, [r1], #1)
@@ -31,13 +30,13 @@ USER( ldrplbt r3, [r1], #1)
31 bne 1b 30 bne 1b
32 sub r1, r1, #1 @ take NUL character out of count 31 sub r1, r1, #1 @ take NUL character out of count
332: sub r0, r1, ip 322: sub r0, r1, ip
34 restore_pc 33 mov pc, lr
35 34
36 .section .fixup,"ax" 35 .section .fixup,"ax"
37 .align 0 36 .align 0
389001: mov r3, #0 379001: mov r3, #0
39 strb r3, [r0, #0] @ null terminate 38 strb r3, [r0, #0] @ null terminate
40 mov r0, #-EFAULT 39 mov r0, #-EFAULT
41 restore_pc 40 mov pc, lr
42 .previous 41 .previous
43 42
diff --git a/arch/arm/lib/strnlen_user.S b/arch/arm/lib/strnlen_user.S
index 67bcd82681..3668a15991 100644
--- a/arch/arm/lib/strnlen_user.S
+++ b/arch/arm/lib/strnlen_user.S
@@ -21,7 +21,6 @@
21 * or zero on exception, or n + 1 if too long 21 * or zero on exception, or n + 1 if too long
22 */ 22 */
23ENTRY(__arch_strnlen_user) 23ENTRY(__arch_strnlen_user)
24 save_lr
25 mov r2, r0 24 mov r2, r0
261: 251:
27USER( ldrbt r3, [r0], #1) 26USER( ldrbt r3, [r0], #1)
@@ -31,10 +30,10 @@ USER( ldrbt r3, [r0], #1)
31 bne 1b 30 bne 1b
32 add r0, r0, #1 31 add r0, r0, #1
332: sub r0, r0, r2 322: sub r0, r0, r2
34 restore_pc 33 mov pc, lr
35 34
36 .section .fixup,"ax" 35 .section .fixup,"ax"
37 .align 0 36 .align 0
389001: mov r0, #0 379001: mov r0, #0
39 restore_pc 38 mov pc, lr
40 .previous 39 .previous
diff --git a/arch/arm/lib/strrchr.S b/arch/arm/lib/strrchr.S
index fa923f026f..538df220aa 100644
--- a/arch/arm/lib/strrchr.S
+++ b/arch/arm/lib/strrchr.S
@@ -22,4 +22,4 @@ ENTRY(strrchr)
22 teq r2, #0 22 teq r2, #0
23 bne 1b 23 bne 1b
24 mov r0, r3 24 mov r0, r3
25 RETINSTR(mov,pc,lr) 25 mov pc, lr
diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
index 0cc450f863..1f1545d737 100644
--- a/arch/arm/lib/uaccess.S
+++ b/arch/arm/lib/uaccess.S
@@ -105,7 +105,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
105 movs ip, r2 105 movs ip, r2
106 bne .Lc2u_nowords 106 bne .Lc2u_nowords
107.Lc2u_finished: mov r0, #0 107.Lc2u_finished: mov r0, #0
108 LOADREGS(fd,sp!,{r2, r4 - r7, pc}) 108 ldmfd sp!, {r2, r4 - r7, pc}
109 109
110.Lc2u_src_not_aligned: 110.Lc2u_src_not_aligned:
111 bic r1, r1, #3 111 bic r1, r1, #3
@@ -280,7 +280,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
280 280
281 .section .fixup,"ax" 281 .section .fixup,"ax"
282 .align 0 282 .align 0
2839001: LOADREGS(fd,sp!, {r0, r4 - r7, pc}) 2839001: ldmfd sp!, {r0, r4 - r7, pc}
284 .previous 284 .previous
285 285
286/* Prototype: unsigned long __arch_copy_from_user(void *to,const void *from,unsigned long n); 286/* Prototype: unsigned long __arch_copy_from_user(void *to,const void *from,unsigned long n);
@@ -369,7 +369,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
369 bne .Lcfu_nowords 369 bne .Lcfu_nowords
370.Lcfu_finished: mov r0, #0 370.Lcfu_finished: mov r0, #0
371 add sp, sp, #8 371 add sp, sp, #8
372 LOADREGS(fd,sp!,{r4 - r7, pc}) 372 ldmfd sp!, {r4 - r7, pc}
373 373
374.Lcfu_src_not_aligned: 374.Lcfu_src_not_aligned:
375 bic r1, r1, #3 375 bic r1, r1, #3
@@ -556,6 +556,6 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
556 movne r1, r4 556 movne r1, r4
557 blne __memzero 557 blne __memzero
558 mov r0, r4 558 mov r0, r4
559 LOADREGS(fd,sp!, {r4 - r7, pc}) 559 ldmfd sp!, {r4 - r7, pc}
560 .previous 560 .previous
561 561
diff --git a/arch/arm/mach-at91rm9200/Kconfig b/arch/arm/mach-at91rm9200/Kconfig
index 1ab5b78283..70d402f76c 100644
--- a/arch/arm/mach-at91rm9200/Kconfig
+++ b/arch/arm/mach-at91rm9200/Kconfig
@@ -4,6 +4,12 @@ menu "AT91RM9200 Implementations"
4 4
5comment "AT91RM9200 Board Type" 5comment "AT91RM9200 Board Type"
6 6
7config MACH_ONEARM
8 bool "Ajeco 1ARM Single Board Computer"
9 depends on ARCH_AT91RM9200
10 help
11 Select this if you are using Ajeco's 1ARM Single Board Computer
12
7config ARCH_AT91RM9200DK 13config ARCH_AT91RM9200DK
8 bool "Atmel AT91RM9200-DK Development board" 14 bool "Atmel AT91RM9200-DK Development board"
9 depends on ARCH_AT91RM9200 15 depends on ARCH_AT91RM9200
diff --git a/arch/arm/mach-at91rm9200/Makefile b/arch/arm/mach-at91rm9200/Makefile
index 81ebc6684a..82db957322 100644
--- a/arch/arm/mach-at91rm9200/Makefile
+++ b/arch/arm/mach-at91rm9200/Makefile
@@ -10,6 +10,7 @@ obj- :=
10obj-$(CONFIG_PM) += pm.o 10obj-$(CONFIG_PM) += pm.o
11 11
12# Board-specific support 12# Board-specific support
13obj-$(CONFIG_MACH_ONEARM) += board-1arm.o
13obj-$(CONFIG_ARCH_AT91RM9200DK) += board-dk.o 14obj-$(CONFIG_ARCH_AT91RM9200DK) += board-dk.o
14obj-$(CONFIG_MACH_AT91RM9200EK) += board-ek.o 15obj-$(CONFIG_MACH_AT91RM9200EK) += board-ek.o
15obj-$(CONFIG_MACH_CSB337) += board-csb337.o 16obj-$(CONFIG_MACH_CSB337) += board-csb337.o
diff --git a/arch/arm/mach-at91rm9200/board-1arm.c b/arch/arm/mach-at91rm9200/board-1arm.c
new file mode 100644
index 0000000000..dc79e0992a
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/board-1arm.c
@@ -0,0 +1,109 @@
1/*
2 * linux/arch/arm/mach-at91rm9200/board-1arm.c
3 *
4 * Copyright (C) 2005 SAN People
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/config.h>
22#include <linux/types.h>
23#include <linux/init.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27
28#include <asm/hardware.h>
29#include <asm/setup.h>
30#include <asm/mach-types.h>
31#include <asm/irq.h>
32
33#include <asm/mach/arch.h>
34#include <asm/mach/map.h>
35#include <asm/mach/irq.h>
36
37#include <asm/hardware.h>
38#include <asm/arch/board.h>
39#include <asm/arch/gpio.h>
40
41#include "generic.h"
42
43static void __init onearm_init_irq(void)
44{
45 /* Initialize AIC controller */
46 at91rm9200_init_irq(NULL);
47
48 /* Set up the GPIO interrupts */
49 at91_gpio_irq_setup(PQFP_GPIO_BANKS);
50}
51
52/*
53 * Serial port configuration.
54 * 0 .. 3 = USART0 .. USART3
55 * 4 = DBGU
56 */
57static struct at91_uart_config __initdata onearm_uart_config = {
58 .console_tty = 0, /* ttyS0 */
59 .nr_tty = 3,
60 .tty_map = { 4, 0, 1, -1, -1 }, /* ttyS0, ..., ttyS4 */
61};
62
63static void __init onearm_map_io(void)
64{
65 at91rm9200_map_io();
66
67 /* Initialize clocks: 18.432 MHz crystal */
68 at91_clock_init(18432000);
69
70 /* Setup the serial ports and console */
71 at91_init_serial(&onearm_uart_config);
72}
73
74static struct at91_eth_data __initdata onearm_eth_data = {
75 .phy_irq_pin = AT91_PIN_PC4,
76 .is_rmii = 1,
77};
78
79static struct at91_usbh_data __initdata onearm_usbh_data = {
80 .ports = 1,
81};
82
83static struct at91_udc_data __initdata onearm_udc_data = {
84 .vbus_pin = AT91_PIN_PC2,
85 .pullup_pin = AT91_PIN_PC3,
86};
87
88static void __init onearm_board_init(void)
89{
90 /* Serial */
91 at91_add_device_serial();
92 /* Ethernet */
93 at91_add_device_eth(&onearm_eth_data);
94 /* USB Host */
95 at91_add_device_usbh(&onearm_usbh_data);
96 /* USB Device */
97 at91_add_device_udc(&onearm_udc_data);
98}
99
100MACHINE_START(ONEARM, "Ajeco 1ARM single board computer")
101 /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
102 .phys_io = AT91_BASE_SYS,
103 .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
104 .boot_params = AT91_SDRAM_BASE + 0x100,
105 .timer = &at91rm9200_timer,
106 .map_io = onearm_map_io,
107 .init_irq = onearm_init_irq,
108 .init_machine = onearm_board_init,
109MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 3b23f43cb1..57f23b4653 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -35,7 +35,6 @@ config ARCH_ADI_COYOTE
35 35
36config ARCH_IXDP425 36config ARCH_IXDP425
37 bool "IXDP425" 37 bool "IXDP425"
38 select PCI
39 help 38 help
40 Say 'Y' here if you want your kernel to support Intel's 39 Say 'Y' here if you want your kernel to support Intel's
41 IXDP425 Development Platform (Also known as Richfield). 40 IXDP425 Development Platform (Also known as Richfield).
@@ -43,7 +42,6 @@ config ARCH_IXDP425
43 42
44config MACH_IXDPG425 43config MACH_IXDPG425
45 bool "IXDPG425" 44 bool "IXDPG425"
46 select PCI
47 help 45 help
48 Say 'Y' here if you want your kernel to support Intel's 46 Say 'Y' here if you want your kernel to support Intel's
49 IXDPG425 Development Platform (Also known as Montajade). 47 IXDPG425 Development Platform (Also known as Montajade).
@@ -51,7 +49,6 @@ config MACH_IXDPG425
51 49
52config MACH_IXDP465 50config MACH_IXDP465
53 bool "IXDP465" 51 bool "IXDP465"
54 select PCI
55 help 52 help
56 Say 'Y' here if you want your kernel to support Intel's 53 Say 'Y' here if you want your kernel to support Intel's
57 IXDP465 Development Platform (Also known as BMP). 54 IXDP465 Development Platform (Also known as BMP).
diff --git a/arch/arm/mach-ixp4xx/Makefile b/arch/arm/mach-ixp4xx/Makefile
index 5a4aaa0e0a..640315d8b9 100644
--- a/arch/arm/mach-ixp4xx/Makefile
+++ b/arch/arm/mach-ixp4xx/Makefile
@@ -2,13 +2,23 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5obj-pci-y :=
6obj-pci-n :=
7
8obj-pci-$(CONFIG_ARCH_IXDP4XX) += ixdp425-pci.o
9obj-pci-$(CONFIG_MACH_IXDPG425) += ixdpg425-pci.o
10obj-pci-$(CONFIG_ARCH_ADI_COYOTE) += coyote-pci.o
11obj-pci-$(CONFIG_MACH_GTWX5715) += gtwx5715-pci.o
12obj-pci-$(CONFIG_MACH_NSLU2) += nslu2-pci.o
13obj-pci-$(CONFIG_MACH_NAS100D) += nas100d-pci.o
14
5obj-y += common.o 15obj-y += common.o
6 16
7obj-$(CONFIG_PCI) += common-pci.o 17obj-$(CONFIG_ARCH_IXDP4XX) += ixdp425-setup.o
8obj-$(CONFIG_ARCH_IXDP4XX) += ixdp425-pci.o ixdp425-setup.o 18obj-$(CONFIG_MACH_IXDPG425) += coyote-setup.o
9obj-$(CONFIG_MACH_IXDPG425) += ixdpg425-pci.o coyote-setup.o 19obj-$(CONFIG_ARCH_ADI_COYOTE) += coyote-setup.o
10obj-$(CONFIG_ARCH_ADI_COYOTE) += coyote-pci.o coyote-setup.o 20obj-$(CONFIG_MACH_GTWX5715) += gtwx5715-setup.o
11obj-$(CONFIG_MACH_GTWX5715) += gtwx5715-pci.o gtwx5715-setup.o 21obj-$(CONFIG_MACH_NSLU2) += nslu2-setup.o nslu2-power.o
12obj-$(CONFIG_MACH_NSLU2) += nslu2-pci.o nslu2-setup.o nslu2-power.o 22obj-$(CONFIG_MACH_NAS100D) += nas100d-setup.o nas100d-power.o
13obj-$(CONFIG_MACH_NAS100D) += nas100d-pci.o nas100d-setup.o nas100d-power.o
14 23
24obj-$(CONFIG_PCI) += $(obj-pci-$(CONFIG_PCI)) common-pci.o
diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S
index c9862688ff..0650bed3b9 100644
--- a/arch/arm/mach-pxa/sleep.S
+++ b/arch/arm/mach-pxa/sleep.S
@@ -189,7 +189,7 @@ ENTRY(pxa_cpu_suspend)
189 .data 189 .data
190 .align 5 190 .align 5
191ENTRY(pxa_cpu_resume) 191ENTRY(pxa_cpu_resume)
192 mov r0, #PSR_I_BIT | PSR_F_BIT | MODE_SVC @ set SVC, irqs off 192 mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
193 msr cpsr_c, r0 193 msr cpsr_c, r0
194 194
195 ldr r0, sleep_save_sp @ stack phys addr 195 ldr r0, sleep_save_sp @ stack phys addr
diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig
index f5d9cd498a..b4171dd43d 100644
--- a/arch/arm/mach-s3c2410/Kconfig
+++ b/arch/arm/mach-s3c2410/Kconfig
@@ -71,13 +71,13 @@ config ARCH_S3C2440
71 Say Y here if you are using the SMDK2440. 71 Say Y here if you are using the SMDK2440.
72 72
73config SMDK2440_CPU2440 73config SMDK2440_CPU2440
74 bool "SMDK2440 with S3C2440 cpu module" 74 bool "SMDK2440 with S3C2440 CPU module"
75 depends on ARCH_S3C2440 75 depends on ARCH_S3C2440
76 default y if ARCH_S3C2440 76 default y if ARCH_S3C2440
77 select CPU_S3C2440 77 select CPU_S3C2440
78 78
79config SMDK2440_CPU2442 79config SMDK2440_CPU2442
80 bool "SMDM2440 with S3C2442 cpu module" 80 bool "SMDM2440 with S3C2442 CPU module"
81 depends on ARCH_S3C2440 81 depends on ARCH_S3C2440
82 select CPU_S3C2442 82 select CPU_S3C2442
83 83
diff --git a/arch/arm/mach-s3c2410/sleep.S b/arch/arm/mach-s3c2410/sleep.S
index 5f6761ed96..dc27167f4d 100644
--- a/arch/arm/mach-s3c2410/sleep.S
+++ b/arch/arm/mach-s3c2410/sleep.S
@@ -128,7 +128,7 @@ s3c2410_sleep_save_phys:
128 */ 128 */
129 129
130ENTRY(s3c2410_cpu_resume) 130ENTRY(s3c2410_cpu_resume)
131 mov r0, #PSR_I_BIT | PSR_F_BIT | MODE_SVC 131 mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE
132 msr cpsr_c, r0 132 msr cpsr_c, r0
133 133
134 @@ load UART to allow us to print the two characters for 134 @@ load UART to allow us to print the two characters for
diff --git a/arch/arm/mach-sa1100/sleep.S b/arch/arm/mach-sa1100/sleep.S
index 2fa1e289d1..5a84062f92 100644
--- a/arch/arm/mach-sa1100/sleep.S
+++ b/arch/arm/mach-sa1100/sleep.S
@@ -177,7 +177,7 @@ sa1110_sdram_controller_fix:
177 .data 177 .data
178 .align 5 178 .align 5
179ENTRY(sa1100_cpu_resume) 179ENTRY(sa1100_cpu_resume)
180 mov r0, #PSR_F_BIT | PSR_I_BIT | MODE_SVC 180 mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
181 msr cpsr_c, r0 @ set SVC, irqs off 181 msr cpsr_c, r0 @ set SVC, irqs off
182 182
183 ldr r0, sleep_save_sp @ stack phys addr 183 ldr r0, sleep_save_sp @ stack phys addr
diff --git a/arch/arm/mm/copypage-v3.S b/arch/arm/mm/copypage-v3.S
index 3c58ebbf03..2ee394b11b 100644
--- a/arch/arm/mm/copypage-v3.S
+++ b/arch/arm/mm/copypage-v3.S
@@ -35,7 +35,7 @@ ENTRY(v3_copy_user_page)
35 stmia r0!, {r3, r4, ip, lr} @ 4 35 stmia r0!, {r3, r4, ip, lr} @ 4
36 ldmneia r1!, {r3, r4, ip, lr} @ 4 36 ldmneia r1!, {r3, r4, ip, lr} @ 4
37 bne 1b @ 1 37 bne 1b @ 1
38 LOADREGS(fd, sp!, {r4, pc}) @ 3 38 ldmfd sp!, {r4, pc} @ 3
39 39
40 .align 5 40 .align 5
41/* 41/*
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index ee6f152987..09b1a41a6d 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -29,38 +29,6 @@
29#define TTB_RGN_WT (2 << 3) 29#define TTB_RGN_WT (2 << 3)
30#define TTB_RGN_WB (3 << 3) 30#define TTB_RGN_WB (3 << 3)
31 31
32 .macro cpsie, flags
33 .ifc \flags, f
34 .long 0xf1080040
35 .exitm
36 .endif
37 .ifc \flags, i
38 .long 0xf1080080
39 .exitm
40 .endif
41 .ifc \flags, if
42 .long 0xf10800c0
43 .exitm
44 .endif
45 .err
46 .endm
47
48 .macro cpsid, flags
49 .ifc \flags, f
50 .long 0xf10c0040
51 .exitm
52 .endif
53 .ifc \flags, i
54 .long 0xf10c0080
55 .exitm
56 .endif
57 .ifc \flags, if
58 .long 0xf10c00c0
59 .exitm
60 .endif
61 .err
62 .endm
63
64ENTRY(cpu_v6_proc_init) 32ENTRY(cpu_v6_proc_init)
65 mov pc, lr 33 mov pc, lr
66 34
diff --git a/arch/arm/nwfpe/entry26.S b/arch/arm/nwfpe/entry26.S
index 51940a96d6..3e6fb5d21d 100644
--- a/arch/arm/nwfpe/entry26.S
+++ b/arch/arm/nwfpe/entry26.S
@@ -26,7 +26,7 @@
26It is called from the kernel with code similar to this: 26It is called from the kernel with code similar to this:
27 27
28 mov fp, #0 28 mov fp, #0
29 teqp pc, #PSR_I_BIT | MODE_SVC 29 teqp pc, #PSR_I_BIT | SVC_MODE
30 ldr r4, .LC2 30 ldr r4, .LC2
31 ldr pc, [r4] @ Call FP module USR entry point 31 ldr pc, [r4] @ Call FP module USR entry point
32 32
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 6d7de9c041..e1372a2531 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
12# 12#
13# http://www.arm.linux.org.uk/developer/machines/?action=new 13# http://www.arm.linux.org.uk/developer/machines/?action=new
14# 14#
15# Last update: Mon May 8 20:11:05 2006 15# Last update: Mon Jun 26 22:26:08 2006
16# 16#
17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number 17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
18# 18#
@@ -566,8 +566,8 @@ switchgrass MACH_SWITCHGRASS SWITCHGRASS 549
566ens_cmu MACH_ENS_CMU ENS_CMU 550 566ens_cmu MACH_ENS_CMU ENS_CMU 550
567mm6_sdb MACH_MM6_SDB MM6_SDB 551 567mm6_sdb MACH_MM6_SDB MM6_SDB 551
568saturn MACH_SATURN SATURN 552 568saturn MACH_SATURN SATURN 552
569i30030evb MACH_ARGONPLUSEVB ARGONPLUSEVB 553 569i30030evb MACH_I30030EVB I30030EVB 553
570mxc27530evb MACH_SCMA11EVB SCMA11EVB 554 570mxc27530evb MACH_MXC27530EVB MXC27530EVB 554
571smdk2800 MACH_SMDK2800 SMDK2800 555 571smdk2800 MACH_SMDK2800 SMDK2800 555
572mtwilson MACH_MTWILSON MTWILSON 556 572mtwilson MACH_MTWILSON MTWILSON 556
573ziti MACH_ZITI ZITI 557 573ziti MACH_ZITI ZITI 557
@@ -647,7 +647,7 @@ sendt MACH_SENDT SENDT 630
647mx2jazz MACH_MX2JAZZ MX2JAZZ 631 647mx2jazz MACH_MX2JAZZ MX2JAZZ 631
648multiio MACH_MULTIIO MULTIIO 632 648multiio MACH_MULTIIO MULTIIO 632
649hrdisplay MACH_HRDISPLAY HRDISPLAY 633 649hrdisplay MACH_HRDISPLAY HRDISPLAY 633
650mxc27530ads MACH_SCMA11BB SCMA11BB 634 650mxc27530ads MACH_MXC27530ADS MXC27530ADS 634
651trizeps3 MACH_TRIZEPS3 TRIZEPS3 635 651trizeps3 MACH_TRIZEPS3 TRIZEPS3 635
652zefeerdza MACH_ZEFEERDZA ZEFEERDZA 636 652zefeerdza MACH_ZEFEERDZA ZEFEERDZA 636
653zefeerdzb MACH_ZEFEERDZB ZEFEERDZB 637 653zefeerdzb MACH_ZEFEERDZB ZEFEERDZB 637
@@ -721,7 +721,7 @@ gp32 MACH_GP32 GP32 706
721gem MACH_GEM GEM 707 721gem MACH_GEM GEM 707
722i858 MACH_I858 I858 708 722i858 MACH_I858 I858 708
723hx2750 MACH_HX2750 HX2750 709 723hx2750 MACH_HX2750 HX2750 709
724mxc91131evb MACH_ZEUSEVB ZEUSEVB 710 724mxc91131evb MACH_MXC91131EVB MXC91131EVB 710
725p700 MACH_P700 P700 711 725p700 MACH_P700 P700 711
726cpe MACH_CPE CPE 712 726cpe MACH_CPE CPE 712
727spitz MACH_SPITZ SPITZ 713 727spitz MACH_SPITZ SPITZ 713
@@ -802,7 +802,7 @@ cpuat91 MACH_CPUAT91 CPUAT91 787
802rea9200 MACH_REA9200 REA9200 788 802rea9200 MACH_REA9200 REA9200 788
803acts_pune_sa1110 MACH_ACTS_PUNE_SA1110 ACTS_PUNE_SA1110 789 803acts_pune_sa1110 MACH_ACTS_PUNE_SA1110 ACTS_PUNE_SA1110 789
804ixp425 MACH_IXP425 IXP425 790 804ixp425 MACH_IXP425 IXP425 790
805i30030ads MACH_ARGONPLUSODYSSEY ARGONPLUSODYSSEY 791 805i30030ads MACH_I30030ADS I30030ADS 791
806perch MACH_PERCH PERCH 792 806perch MACH_PERCH PERCH 792
807eis05r1 MACH_EIS05R1 EIS05R1 793 807eis05r1 MACH_EIS05R1 EIS05R1 793
808pepperpad MACH_PEPPERPAD PEPPERPAD 794 808pepperpad MACH_PEPPERPAD PEPPERPAD 794
@@ -930,7 +930,7 @@ netclient MACH_NETCLIENT NETCLIENT 916
930xscale_palmtt5 MACH_XSCALE_PALMTT5 XSCALE_PALMTT5 917 930xscale_palmtt5 MACH_XSCALE_PALMTT5 XSCALE_PALMTT5 917
931xscale_palmtc MACH_OMAP_PALMTC OMAP_PALMTC 918 931xscale_palmtc MACH_OMAP_PALMTC OMAP_PALMTC 918
932omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919 932omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919
933mxc30030evb MACH_ARGONLVEVB ARGONLVEVB 920 933mxc30030evb MACH_MXC30030EVB MXC30030EVB 920
934rea_2d MACH_REA_2D REA_2D 921 934rea_2d MACH_REA_2D REA_2D 921
935eti3e524 MACH_TI3E524 TI3E524 922 935eti3e524 MACH_TI3E524 TI3E524 922
936ateb9200 MACH_ATEB9200 ATEB9200 923 936ateb9200 MACH_ATEB9200 ATEB9200 923
@@ -986,7 +986,7 @@ redfox MACH_REDFOX REDFOX 972
986mysh_ep9315_1 MACH_MYSH_EP9315_1 MYSH_EP9315_1 973 986mysh_ep9315_1 MACH_MYSH_EP9315_1 MYSH_EP9315_1 973
987tpf106 MACH_TPF106 TPF106 974 987tpf106 MACH_TPF106 TPF106 974
988at91rm9200kg MACH_AT91RM9200KG AT91RM9200KG 975 988at91rm9200kg MACH_AT91RM9200KG AT91RM9200KG 975
989racemt2 MACH_SLEDB SLEDB 976 989rcmt2 MACH_SLEDB SLEDB 976
990ontrack MACH_ONTRACK ONTRACK 977 990ontrack MACH_ONTRACK ONTRACK 977
991pm1200 MACH_PM1200 PM1200 978 991pm1200 MACH_PM1200 PM1200 978
992ess24562 MACH_ESS24XXX ESS24XXX 979 992ess24562 MACH_ESS24XXX ESS24XXX 979
@@ -1022,7 +1022,7 @@ smdk2440 MACH_SMDK2440 SMDK2440 1008
1022smdk2412 MACH_SMDK2412 SMDK2412 1009 1022smdk2412 MACH_SMDK2412 SMDK2412 1009
1023webbox MACH_WEBBOX WEBBOX 1010 1023webbox MACH_WEBBOX WEBBOX 1010
1024cwwndp MACH_CWWNDP CWWNDP 1011 1024cwwndp MACH_CWWNDP CWWNDP 1011
1025dragon MACH_DRAGON DRAGON 1012 1025i839 MACH_DRAGON DRAGON 1012
1026opendo_cpu_board MACH_OPENDO_CPU_BOARD OPENDO_CPU_BOARD 1013 1026opendo_cpu_board MACH_OPENDO_CPU_BOARD OPENDO_CPU_BOARD 1013
1027ccm2200 MACH_CCM2200 CCM2200 1014 1027ccm2200 MACH_CCM2200 CCM2200 1014
1028etwarm MACH_ETWARM ETWARM 1015 1028etwarm MACH_ETWARM ETWARM 1015
@@ -1040,3 +1040,56 @@ edg79524 MACH_EDG79524 EDG79524 1026
1040ai2410 MACH_AI2410 AI2410 1027 1040ai2410 MACH_AI2410 AI2410 1027
1041ixp465 MACH_IXP465 IXP465 1028 1041ixp465 MACH_IXP465 IXP465 1028
1042balloon3 MACH_BALLOON3 BALLOON3 1029 1042balloon3 MACH_BALLOON3 BALLOON3 1029
1043heins MACH_HEINS HEINS 1030
1044mpluseva MACH_MPLUSEVA MPLUSEVA 1031
1045rt042 MACH_RT042 RT042 1032
1046cwiem MACH_CWIEM CWIEM 1033
1047cm_x270 MACH_CM_X270 CM_X270 1034
1048cm_x255 MACH_CM_X255 CM_X255 1035
1049esh_at91 MACH_ESH_AT91 ESH_AT91 1036
1050sandgate3 MACH_SANDGATE3 SANDGATE3 1037
1051primo MACH_PRIMO PRIMO 1038
1052gemstone MACH_GEMSTONE GEMSTONE 1039
1053pronghorn_metro MACH_PRONGHORNMETRO PRONGHORNMETRO 1040
1054sidewinder MACH_SIDEWINDER SIDEWINDER 1041
1055picomod1 MACH_PICOMOD1 PICOMOD1 1042
1056sg590 MACH_SG590 SG590 1043
1057akai9307 MACH_AKAI9307 AKAI9307 1044
1058fontaine MACH_FONTAINE FONTAINE 1045
1059wombat MACH_WOMBAT WOMBAT 1046
1060acq300 MACH_ACQ300 ACQ300 1047
1061mod_270 MACH_MOD_270 MOD_270 1048
1062vmc_vc0820 MACH_VC0820 VC0820 1049
1063ani_aim MACH_ANI_AIM ANI_AIM 1050
1064jellyfish MACH_JELLYFISH JELLYFISH 1051
1065amanita MACH_AMANITA AMANITA 1052
1066vlink MACH_VLINK VLINK 1053
1067dexflex MACH_DEXFLEX DEXFLEX 1054
1068eigen_ttq MACH_EIGEN_TTQ EIGEN_TTQ 1055
1069arcom_titan MACH_ARCOM_TITAN ARCOM_TITAN 1056
1070tabla MACH_TABLA TABLA 1057
1071mdirac3 MACH_MDIRAC3 MDIRAC3 1058
1072mrhfbp2 MACH_MRHFBP2 MRHFBP2 1059
1073at91rm9200rb MACH_AT91RM9200RB AT91RM9200RB 1060
1074ani_apm MACH_ANI_APM ANI_APM 1061
1075ella1 MACH_ELLA1 ELLA1 1062
1076inhand_pxa27x MACH_INHAND_PXA27X INHAND_PXA27X 1063
1077inhand_pxa25x MACH_INHAND_PXA25X INHAND_PXA25X 1064
1078empos_xm MACH_EMPOS_XM EMPOS_XM 1065
1079empos MACH_EMPOS EMPOS 1066
1080empos_tiny MACH_EMPOS_TINY EMPOS_TINY 1067
1081empos_sm MACH_EMPOS_SM EMPOS_SM 1068
1082egret MACH_EGRET EGRET 1069
1083ostrich MACH_OSTRICH OSTRICH 1070
1084n50 MACH_N50 N50 1071
1085ecbat91 MACH_ECBAT91 ECBAT91 1072
1086stareast MACH_STAREAST STAREAST 1073
1087dspg_dw MACH_DSPG_DW DSPG_DW 1074
1088onearm MACH_ONEARM ONEARM 1075
1089mrg110_6 MACH_MRG110_6 MRG110_6 1076
1090wrt300nv2 MACH_WRT300NV2 WRT300NV2 1077
1091xm_bulverde MACH_XM_BULVERDE XM_BULVERDE 1078
1092msm6100 MACH_MSM6100 MSM6100 1079
1093eti_b1 MACH_ETI_B1 ETI_B1 1080
1094za9l_series MACH_ZILOG_ZA9L ZILOG_ZA9L 1081
1095bit2440 MACH_BIT2440 BIT2440 1082
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 1596101cfa..3bb221db16 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -14,6 +14,10 @@ config X86_32
14 486, 586, Pentiums, and various instruction-set-compatible chips by 14 486, 586, Pentiums, and various instruction-set-compatible chips by
15 AMD, Cyrix, and others. 15 AMD, Cyrix, and others.
16 16
17config GENERIC_TIME
18 bool
19 default y
20
17config SEMAPHORE_SLEEPERS 21config SEMAPHORE_SLEEPERS
18 bool 22 bool
19 default y 23 default y
@@ -229,7 +233,7 @@ config NR_CPUS
229 233
230config SCHED_SMT 234config SCHED_SMT
231 bool "SMT (Hyperthreading) scheduler support" 235 bool "SMT (Hyperthreading) scheduler support"
232 depends on SMP 236 depends on X86_HT
233 help 237 help
234 SMT scheduler support improves the CPU scheduler's decision making 238 SMT scheduler support improves the CPU scheduler's decision making
235 when dealing with Intel Pentium 4 chips with HyperThreading at a 239 when dealing with Intel Pentium 4 chips with HyperThreading at a
@@ -238,7 +242,7 @@ config SCHED_SMT
238 242
239config SCHED_MC 243config SCHED_MC
240 bool "Multi-core scheduler support" 244 bool "Multi-core scheduler support"
241 depends on SMP 245 depends on X86_HT
242 default y 246 default y
243 help 247 help
244 Multi-core scheduler support improves the CPU scheduler's decision 248 Multi-core scheduler support improves the CPU scheduler's decision
@@ -324,6 +328,15 @@ config X86_MCE_P4THERMAL
324 Enabling this feature will cause a message to be printed when the P4 328 Enabling this feature will cause a message to be printed when the P4
325 enters thermal throttling. 329 enters thermal throttling.
326 330
331config VM86
332 default y
333 bool "Enable VM86 support" if EMBEDDED
334 help
335 This option is required by programs like DOSEMU to run 16-bit legacy
336 code on X86 processors. It also may be needed by software like
337 XFree86 to initialize some video cards via BIOS. Disabling this
338 option saves about 6k.
339
327config TOSHIBA 340config TOSHIBA
328 tristate "Toshiba Laptop support" 341 tristate "Toshiba Laptop support"
329 ---help--- 342 ---help---
@@ -721,7 +734,7 @@ config KEXEC
721 help 734 help
722 kexec is a system call that implements the ability to shutdown your 735 kexec is a system call that implements the ability to shutdown your
723 current kernel, and to start another kernel. It is like a reboot 736 current kernel, and to start another kernel. It is like a reboot
724 but it is indepedent of the system firmware. And like a reboot 737 but it is independent of the system firmware. And like a reboot
725 you can start any kernel with it, not just Linux. 738 you can start any kernel with it, not just Linux.
726 739
727 The name comes from the similiarity to the exec system call. 740 The name comes from the similiarity to the exec system call.
@@ -767,6 +780,17 @@ config HOTPLUG_CPU
767 enable suspend on SMP systems. CPUs can be controlled through 780 enable suspend on SMP systems. CPUs can be controlled through
768 /sys/devices/system/cpu. 781 /sys/devices/system/cpu.
769 782
783config COMPAT_VDSO
784 bool "Compat VDSO support"
785 default y
786 help
787 Map the VDSO to the predictable old-style address too.
788 ---help---
789 Say N here if you are running a sufficiently recent glibc
790 version (2.3.3 or later), to remove the high-mapped
791 VDSO mapping and to exclusively use the randomized VDSO.
792
793 If unsure, say Y.
770 794
771endmenu 795endmenu
772 796
@@ -1046,13 +1070,27 @@ config SCx200
1046 tristate "NatSemi SCx200 support" 1070 tristate "NatSemi SCx200 support"
1047 depends on !X86_VOYAGER 1071 depends on !X86_VOYAGER
1048 help 1072 help
1049 This provides basic support for the National Semiconductor SCx200 1073 This provides basic support for National Semiconductor's
1050 processor. Right now this is just a driver for the GPIO pins. 1074 (now AMD's) Geode processors. The driver probes for the
1075 PCI-IDs of several on-chip devices, so its a good dependency
1076 for other scx200_* drivers.
1051 1077
1052 If you don't know what to do here, say N. 1078 If compiled as a module, the driver is named scx200.
1053 1079
1054 This support is also available as a module. If compiled as a 1080config SCx200HR_TIMER
1055 module, it will be called scx200. 1081 tristate "NatSemi SCx200 27MHz High-Resolution Timer Support"
1082 depends on SCx200 && GENERIC_TIME
1083 default y
1084 help
1085 This driver provides a clocksource built upon the on-chip
1086 27MHz high-resolution timer. Its also a workaround for
1087 NSC Geode SC-1100's buggy TSC, which loses time when the
1088 processor goes idle (as is done by the scheduler). The
1089 other workaround is idle=poll boot option.
1090
1091config K8_NB
1092 def_bool y
1093 depends on AGP_AMD64
1056 1094
1057source "drivers/pcmcia/Kconfig" 1095source "drivers/pcmcia/Kconfig"
1058 1096
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
index eb130482ba..21c9a4e711 100644
--- a/arch/i386/Kconfig.cpu
+++ b/arch/i386/Kconfig.cpu
@@ -41,7 +41,7 @@ config M386
41 - "GeodeGX1" for Geode GX1 (Cyrix MediaGX). 41 - "GeodeGX1" for Geode GX1 (Cyrix MediaGX).
42 - "Geode GX/LX" For AMD Geode GX and LX processors. 42 - "Geode GX/LX" For AMD Geode GX and LX processors.
43 - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3. 43 - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
44 - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above). 44 - "VIA C3-2" for VIA C3-2 "Nehemiah" (model 9 and above).
45 45
46 If you don't know what to do, choose "386". 46 If you don't know what to do, choose "386".
47 47
diff --git a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile
index 33e5547638..e979466260 100644
--- a/arch/i386/boot/Makefile
+++ b/arch/i386/boot/Makefile
@@ -109,8 +109,13 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
109isoimage: $(BOOTIMAGE) 109isoimage: $(BOOTIMAGE)
110 -rm -rf $(obj)/isoimage 110 -rm -rf $(obj)/isoimage
111 mkdir $(obj)/isoimage 111 mkdir $(obj)/isoimage
112 cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \ 112 for i in lib lib64 share end ; do \
113 $(obj)/isoimage 113 if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
114 cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
115 break ; \
116 fi ; \
117 if [ $$i = end ] ; then exit 1 ; fi ; \
118 done
114 cp $(BOOTIMAGE) $(obj)/isoimage/linux 119 cp $(BOOTIMAGE) $(obj)/isoimage/linux
115 echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg 120 echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
116 if [ -f '$(FDINITRD)' ] ; then \ 121 if [ -f '$(FDINITRD)' ] ; then \
diff --git a/arch/i386/boot/compressed/misc.c b/arch/i386/boot/compressed/misc.c
index f19f3a7492..b2ccd54341 100644
--- a/arch/i386/boot/compressed/misc.c
+++ b/arch/i386/boot/compressed/misc.c
@@ -24,14 +24,6 @@
24 24
25#undef memset 25#undef memset
26#undef memcpy 26#undef memcpy
27
28/*
29 * Why do we do this? Don't ask me..
30 *
31 * Incomprehensible are the ways of bootloaders.
32 */
33static void* memset(void *, int, size_t);
34static void* memcpy(void *, __const void *, size_t);
35#define memzero(s, n) memset ((s), 0, (n)) 27#define memzero(s, n) memset ((s), 0, (n))
36 28
37typedef unsigned char uch; 29typedef unsigned char uch;
@@ -93,7 +85,7 @@ static unsigned char *real_mode; /* Pointer to real-mode data */
93#endif 85#endif
94#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0)) 86#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
95 87
96extern char input_data[]; 88extern unsigned char input_data[];
97extern int input_len; 89extern int input_len;
98 90
99static long bytes_out = 0; 91static long bytes_out = 0;
@@ -103,6 +95,9 @@ static unsigned long output_ptr = 0;
103static void *malloc(int size); 95static void *malloc(int size);
104static void free(void *where); 96static void free(void *where);
105 97
98static void *memset(void *s, int c, unsigned n);
99static void *memcpy(void *dest, const void *src, unsigned n);
100
106static void putstr(const char *); 101static void putstr(const char *);
107 102
108extern int end; 103extern int end;
@@ -205,7 +200,7 @@ static void putstr(const char *s)
205 outb_p(0xff & (pos >> 1), vidport+1); 200 outb_p(0xff & (pos >> 1), vidport+1);
206} 201}
207 202
208static void* memset(void* s, int c, size_t n) 203static void* memset(void* s, int c, unsigned n)
209{ 204{
210 int i; 205 int i;
211 char *ss = (char*)s; 206 char *ss = (char*)s;
@@ -214,14 +209,13 @@ static void* memset(void* s, int c, size_t n)
214 return s; 209 return s;
215} 210}
216 211
217static void* memcpy(void* __dest, __const void* __src, 212static void* memcpy(void* dest, const void* src, unsigned n)
218 size_t __n)
219{ 213{
220 int i; 214 int i;
221 char *d = (char *)__dest, *s = (char *)__src; 215 char *d = (char *)dest, *s = (char *)src;
222 216
223 for (i=0;i<__n;i++) d[i] = s[i]; 217 for (i=0;i<n;i++) d[i] = s[i];
224 return __dest; 218 return dest;
225} 219}
226 220
227/* =========================================================================== 221/* ===========================================================================
@@ -309,7 +303,7 @@ static void setup_normal_output_buffer(void)
309#else 303#else
310 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory"); 304 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
311#endif 305#endif
312 output_data = (char *)__PHYSICAL_START; /* Normally Points to 1M */ 306 output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */
313 free_mem_end_ptr = (long)real_mode; 307 free_mem_end_ptr = (long)real_mode;
314} 308}
315 309
@@ -324,11 +318,9 @@ static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
324#ifdef STANDARD_MEMORY_BIOS_CALL 318#ifdef STANDARD_MEMORY_BIOS_CALL
325 if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory"); 319 if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
326#else 320#else
327 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 321 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
328 (3*1024))
329 error("Less than 4MB of memory");
330#endif 322#endif
331 mv->low_buffer_start = output_data = (char *)LOW_BUFFER_START; 323 mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START;
332 low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX 324 low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
333 ? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff; 325 ? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff;
334 low_buffer_size = low_buffer_end - LOW_BUFFER_START; 326 low_buffer_size = low_buffer_end - LOW_BUFFER_START;
diff --git a/arch/i386/boot/video.S b/arch/i386/boot/video.S
index c9343c3a80..8c2a6faeea 100644
--- a/arch/i386/boot/video.S
+++ b/arch/i386/boot/video.S
@@ -1929,7 +1929,7 @@ skip10: movb %ah, %al
1929 ret 1929 ret
1930 1930
1931store_edid: 1931store_edid:
1932#ifdef CONFIG_FB_FIRMWARE_EDID 1932#ifdef CONFIG_FIRMWARE_EDID
1933 pushw %es # just save all registers 1933 pushw %es # just save all registers
1934 pushw %ax 1934 pushw %ax
1935 pushw %bx 1935 pushw %bx
@@ -1947,6 +1947,22 @@ store_edid:
1947 rep 1947 rep
1948 stosl 1948 stosl
1949 1949
1950 pushw %es # save ES
1951 xorw %di, %di # Report Capability
1952 pushw %di
1953 popw %es # ES:DI must be 0:0
1954 movw $0x4f15, %ax
1955 xorw %bx, %bx
1956 xorw %cx, %cx
1957 int $0x10
1958 popw %es # restore ES
1959
1960 cmpb $0x00, %ah # call successful
1961 jne no_edid
1962
1963 cmpb $0x4f, %al # function supported
1964 jne no_edid
1965
1950 movw $0x4f15, %ax # do VBE/DDC 1966 movw $0x4f15, %ax # do VBE/DDC
1951 movw $0x01, %bx 1967 movw $0x01, %bx
1952 movw $0x00, %cx 1968 movw $0x00, %cx
@@ -1954,6 +1970,7 @@ store_edid:
1954 movw $0x140, %di 1970 movw $0x140, %di
1955 int $0x10 1971 int $0x10
1956 1972
1973no_edid:
1957 popw %di # restore all registers 1974 popw %di # restore all registers
1958 popw %dx 1975 popw %dx
1959 popw %cx 1976 popw %cx
diff --git a/arch/i386/crypto/aes-i586-asm.S b/arch/i386/crypto/aes-i586-asm.S
index 911b15377f..f942f0c8f6 100644
--- a/arch/i386/crypto/aes-i586-asm.S
+++ b/arch/i386/crypto/aes-i586-asm.S
@@ -36,22 +36,19 @@
36.file "aes-i586-asm.S" 36.file "aes-i586-asm.S"
37.text 37.text
38 38
39// aes_rval aes_enc_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])// 39#include <asm/asm-offsets.h>
40// aes_rval aes_dec_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])//
41
42#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words)
43 40
44// offsets to parameters with one register pushed onto stack 41#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words)
45
46#define in_blk 8 // input byte array address parameter
47#define out_blk 12 // output byte array address parameter
48#define ctx 16 // AES context structure
49 42
50// offsets in context structure 43/* offsets to parameters with one register pushed onto stack */
44#define tfm 8
45#define out_blk 12
46#define in_blk 16
51 47
52#define ekey 0 // encryption key schedule base address 48/* offsets in crypto_tfm structure */
53#define nrnd 256 // number of rounds 49#define ekey (crypto_tfm_ctx_offset + 0)
54#define dkey 260 // decryption key schedule base address 50#define nrnd (crypto_tfm_ctx_offset + 256)
51#define dkey (crypto_tfm_ctx_offset + 260)
55 52
56// register mapping for encrypt and decrypt subroutines 53// register mapping for encrypt and decrypt subroutines
57 54
@@ -220,6 +217,7 @@
220 do_col (table, r5,r0,r1,r4, r2,r3); /* idx=r5 */ 217 do_col (table, r5,r0,r1,r4, r2,r3); /* idx=r5 */
221 218
222// AES (Rijndael) Encryption Subroutine 219// AES (Rijndael) Encryption Subroutine
220/* void aes_enc_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */
223 221
224.global aes_enc_blk 222.global aes_enc_blk
225 223
@@ -230,7 +228,7 @@
230 228
231aes_enc_blk: 229aes_enc_blk:
232 push %ebp 230 push %ebp
233 mov ctx(%esp),%ebp // pointer to context 231 mov tfm(%esp),%ebp
234 232
235// CAUTION: the order and the values used in these assigns 233// CAUTION: the order and the values used in these assigns
236// rely on the register mappings 234// rely on the register mappings
@@ -295,6 +293,7 @@ aes_enc_blk:
295 ret 293 ret
296 294
297// AES (Rijndael) Decryption Subroutine 295// AES (Rijndael) Decryption Subroutine
296/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */
298 297
299.global aes_dec_blk 298.global aes_dec_blk
300 299
@@ -305,7 +304,7 @@ aes_enc_blk:
305 304
306aes_dec_blk: 305aes_dec_blk:
307 push %ebp 306 push %ebp
308 mov ctx(%esp),%ebp // pointer to context 307 mov tfm(%esp),%ebp
309 308
310// CAUTION: the order and the values used in these assigns 309// CAUTION: the order and the values used in these assigns
311// rely on the register mappings 310// rely on the register mappings
diff --git a/arch/i386/crypto/aes.c b/arch/i386/crypto/aes.c
index a50397b1d5..d3806daa3d 100644
--- a/arch/i386/crypto/aes.c
+++ b/arch/i386/crypto/aes.c
@@ -45,8 +45,8 @@
45#include <linux/crypto.h> 45#include <linux/crypto.h>
46#include <linux/linkage.h> 46#include <linux/linkage.h>
47 47
48asmlinkage void aes_enc_blk(const u8 *src, u8 *dst, void *ctx); 48asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
49asmlinkage void aes_dec_blk(const u8 *src, u8 *dst, void *ctx); 49asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
50 50
51#define AES_MIN_KEY_SIZE 16 51#define AES_MIN_KEY_SIZE 16
52#define AES_MAX_KEY_SIZE 32 52#define AES_MAX_KEY_SIZE 32
@@ -378,12 +378,12 @@ static void gen_tabs(void)
378 k[8*(i)+11] = ss[3]; \ 378 k[8*(i)+11] = ss[3]; \
379} 379}
380 380
381static int 381static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
382aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) 382 unsigned int key_len, u32 *flags)
383{ 383{
384 int i; 384 int i;
385 u32 ss[8]; 385 u32 ss[8];
386 struct aes_ctx *ctx = ctx_arg; 386 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
387 const __le32 *key = (const __le32 *)in_key; 387 const __le32 *key = (const __le32 *)in_key;
388 388
389 /* encryption schedule */ 389 /* encryption schedule */
@@ -464,16 +464,16 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
464 return 0; 464 return 0;
465} 465}
466 466
467static inline void aes_encrypt(void *ctx, u8 *dst, const u8 *src) 467static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
468{ 468{
469 aes_enc_blk(src, dst, ctx); 469 aes_enc_blk(tfm, dst, src);
470} 470}
471static inline void aes_decrypt(void *ctx, u8 *dst, const u8 *src) 471
472static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
472{ 473{
473 aes_dec_blk(src, dst, ctx); 474 aes_dec_blk(tfm, dst, src);
474} 475}
475 476
476
477static struct crypto_alg aes_alg = { 477static struct crypto_alg aes_alg = {
478 .cra_name = "aes", 478 .cra_name = "aes",
479 .cra_driver_name = "aes-i586", 479 .cra_driver_name = "aes-i586",
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 96fb8a020a..5e70c2fb27 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -7,10 +7,9 @@ extra-y := head.o init_task.o vmlinux.lds
7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ 7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ 8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
9 pci-dma.o i386_ksyms.o i387.o bootflag.o \ 9 pci-dma.o i386_ksyms.o i387.o bootflag.o \
10 quirks.o i8237.o topology.o alternative.o 10 quirks.o i8237.o topology.o alternative.o i8253.o tsc.o
11 11
12obj-y += cpu/ 12obj-y += cpu/
13obj-y += timers/
14obj-y += acpi/ 13obj-y += acpi/
15obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o 14obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o
16obj-$(CONFIG_MCA) += mca.o 15obj-$(CONFIG_MCA) += mca.o
@@ -37,6 +36,8 @@ obj-$(CONFIG_EFI) += efi.o efi_stub.o
37obj-$(CONFIG_DOUBLEFAULT) += doublefault.o 36obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
38obj-$(CONFIG_VM86) += vm86.o 37obj-$(CONFIG_VM86) += vm86.o
39obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 38obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
39obj-$(CONFIG_HPET_TIMER) += hpet.o
40obj-$(CONFIG_K8_NB) += k8.o
40 41
41EXTRA_AFLAGS := -traditional 42EXTRA_AFLAGS := -traditional
42 43
@@ -76,3 +77,6 @@ SYSCFLAGS_vsyscall-syms.o = -r
76$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \ 77$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
77 $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE 78 $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
78 $(call if_changed,syscall) 79 $(call if_changed,syscall)
80
81k8-y += ../../x86_64/kernel/k8.o
82
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 5cbd6f99fb..50eb0e0377 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -4,27 +4,41 @@
4#include <asm/alternative.h> 4#include <asm/alternative.h>
5#include <asm/sections.h> 5#include <asm/sections.h>
6 6
7#define DEBUG 0 7static int no_replacement = 0;
8#if DEBUG 8static int smp_alt_once = 0;
9# define DPRINTK(fmt, args...) printk(fmt, args) 9static int debug_alternative = 0;
10#else 10
11# define DPRINTK(fmt, args...) 11static int __init noreplacement_setup(char *s)
12#endif 12{
13 no_replacement = 1;
14 return 1;
15}
16static int __init bootonly(char *str)
17{
18 smp_alt_once = 1;
19 return 1;
20}
21static int __init debug_alt(char *str)
22{
23 debug_alternative = 1;
24 return 1;
25}
13 26
27__setup("noreplacement", noreplacement_setup);
28__setup("smp-alt-boot", bootonly);
29__setup("debug-alternative", debug_alt);
30
31#define DPRINTK(fmt, args...) if (debug_alternative) \
32 printk(KERN_DEBUG fmt, args)
33
34#ifdef GENERIC_NOP1
14/* Use inline assembly to define this because the nops are defined 35/* Use inline assembly to define this because the nops are defined
15 as inline assembly strings in the include files and we cannot 36 as inline assembly strings in the include files and we cannot
16 get them easily into strings. */ 37 get them easily into strings. */
17asm("\t.data\nintelnops: " 38asm("\t.data\nintelnops: "
18 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 39 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
19 GENERIC_NOP7 GENERIC_NOP8); 40 GENERIC_NOP7 GENERIC_NOP8);
20asm("\t.data\nk8nops: " 41extern unsigned char intelnops[];
21 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
22 K8_NOP7 K8_NOP8);
23asm("\t.data\nk7nops: "
24 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
25 K7_NOP7 K7_NOP8);
26
27extern unsigned char intelnops[], k8nops[], k7nops[];
28static unsigned char *intel_nops[ASM_NOP_MAX+1] = { 42static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
29 NULL, 43 NULL,
30 intelnops, 44 intelnops,
@@ -36,6 +50,13 @@ static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
36 intelnops + 1 + 2 + 3 + 4 + 5 + 6, 50 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
37 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 51 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
38}; 52};
53#endif
54
55#ifdef K8_NOP1
56asm("\t.data\nk8nops: "
57 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
58 K8_NOP7 K8_NOP8);
59extern unsigned char k8nops[];
39static unsigned char *k8_nops[ASM_NOP_MAX+1] = { 60static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
40 NULL, 61 NULL,
41 k8nops, 62 k8nops,
@@ -47,6 +68,13 @@ static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
47 k8nops + 1 + 2 + 3 + 4 + 5 + 6, 68 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
48 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 69 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
49}; 70};
71#endif
72
73#ifdef K7_NOP1
74asm("\t.data\nk7nops: "
75 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
76 K7_NOP7 K7_NOP8);
77extern unsigned char k7nops[];
50static unsigned char *k7_nops[ASM_NOP_MAX+1] = { 78static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
51 NULL, 79 NULL,
52 k7nops, 80 k7nops,
@@ -58,6 +86,18 @@ static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
58 k7nops + 1 + 2 + 3 + 4 + 5 + 6, 86 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
59 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 87 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
60}; 88};
89#endif
90
91#ifdef CONFIG_X86_64
92
93extern char __vsyscall_0;
94static inline unsigned char** find_nop_table(void)
95{
96 return k8_nops;
97}
98
99#else /* CONFIG_X86_64 */
100
61static struct nop { 101static struct nop {
62 int cpuid; 102 int cpuid;
63 unsigned char **noptable; 103 unsigned char **noptable;
@@ -67,14 +107,6 @@ static struct nop {
67 { -1, NULL } 107 { -1, NULL }
68}; 108};
69 109
70
71extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
72extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
73extern u8 *__smp_locks[], *__smp_locks_end[];
74
75extern u8 __smp_alt_begin[], __smp_alt_end[];
76
77
78static unsigned char** find_nop_table(void) 110static unsigned char** find_nop_table(void)
79{ 111{
80 unsigned char **noptable = intel_nops; 112 unsigned char **noptable = intel_nops;
@@ -89,6 +121,14 @@ static unsigned char** find_nop_table(void)
89 return noptable; 121 return noptable;
90} 122}
91 123
124#endif /* CONFIG_X86_64 */
125
126extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
127extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
128extern u8 *__smp_locks[], *__smp_locks_end[];
129
130extern u8 __smp_alt_begin[], __smp_alt_end[];
131
92/* Replace instructions with better alternatives for this CPU type. 132/* Replace instructions with better alternatives for this CPU type.
93 This runs before SMP is initialized to avoid SMP problems with 133 This runs before SMP is initialized to avoid SMP problems with
94 self modifying code. This implies that assymetric systems where 134 self modifying code. This implies that assymetric systems where
@@ -99,6 +139,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
99{ 139{
100 unsigned char **noptable = find_nop_table(); 140 unsigned char **noptable = find_nop_table();
101 struct alt_instr *a; 141 struct alt_instr *a;
142 u8 *instr;
102 int diff, i, k; 143 int diff, i, k;
103 144
104 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end); 145 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
@@ -106,7 +147,16 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
106 BUG_ON(a->replacementlen > a->instrlen); 147 BUG_ON(a->replacementlen > a->instrlen);
107 if (!boot_cpu_has(a->cpuid)) 148 if (!boot_cpu_has(a->cpuid))
108 continue; 149 continue;
109 memcpy(a->instr, a->replacement, a->replacementlen); 150 instr = a->instr;
151#ifdef CONFIG_X86_64
152 /* vsyscall code is not mapped yet. resolve it manually. */
153 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
154 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
155 DPRINTK("%s: vsyscall fixup: %p => %p\n",
156 __FUNCTION__, a->instr, instr);
157 }
158#endif
159 memcpy(instr, a->replacement, a->replacementlen);
110 diff = a->instrlen - a->replacementlen; 160 diff = a->instrlen - a->replacementlen;
111 /* Pad the rest with nops */ 161 /* Pad the rest with nops */
112 for (i = a->replacementlen; diff > 0; diff -= k, i += k) { 162 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
@@ -186,14 +236,6 @@ struct smp_alt_module {
186static LIST_HEAD(smp_alt_modules); 236static LIST_HEAD(smp_alt_modules);
187static DEFINE_SPINLOCK(smp_alt); 237static DEFINE_SPINLOCK(smp_alt);
188 238
189static int smp_alt_once = 0;
190static int __init bootonly(char *str)
191{
192 smp_alt_once = 1;
193 return 1;
194}
195__setup("smp-alt-boot", bootonly);
196
197void alternatives_smp_module_add(struct module *mod, char *name, 239void alternatives_smp_module_add(struct module *mod, char *name,
198 void *locks, void *locks_end, 240 void *locks, void *locks_end,
199 void *text, void *text_end) 241 void *text, void *text_end)
@@ -201,6 +243,9 @@ void alternatives_smp_module_add(struct module *mod, char *name,
201 struct smp_alt_module *smp; 243 struct smp_alt_module *smp;
202 unsigned long flags; 244 unsigned long flags;
203 245
246 if (no_replacement)
247 return;
248
204 if (smp_alt_once) { 249 if (smp_alt_once) {
205 if (boot_cpu_has(X86_FEATURE_UP)) 250 if (boot_cpu_has(X86_FEATURE_UP))
206 alternatives_smp_unlock(locks, locks_end, 251 alternatives_smp_unlock(locks, locks_end,
@@ -235,7 +280,7 @@ void alternatives_smp_module_del(struct module *mod)
235 struct smp_alt_module *item; 280 struct smp_alt_module *item;
236 unsigned long flags; 281 unsigned long flags;
237 282
238 if (smp_alt_once) 283 if (no_replacement || smp_alt_once)
239 return; 284 return;
240 285
241 spin_lock_irqsave(&smp_alt, flags); 286 spin_lock_irqsave(&smp_alt, flags);
@@ -256,7 +301,7 @@ void alternatives_smp_switch(int smp)
256 struct smp_alt_module *mod; 301 struct smp_alt_module *mod;
257 unsigned long flags; 302 unsigned long flags;
258 303
259 if (smp_alt_once) 304 if (no_replacement || smp_alt_once)
260 return; 305 return;
261 BUG_ON(!smp && (num_online_cpus() > 1)); 306 BUG_ON(!smp && (num_online_cpus() > 1));
262 307
@@ -285,6 +330,13 @@ void alternatives_smp_switch(int smp)
285 330
286void __init alternative_instructions(void) 331void __init alternative_instructions(void)
287{ 332{
333 if (no_replacement) {
334 printk(KERN_INFO "(SMP-)alternatives turned off\n");
335 free_init_pages("SMP alternatives",
336 (unsigned long)__smp_alt_begin,
337 (unsigned long)__smp_alt_end);
338 return;
339 }
288 apply_alternatives(__alt_instructions, __alt_instructions_end); 340 apply_alternatives(__alt_instructions, __alt_instructions_end);
289 341
290 /* switch to patch-once-at-boottime-only mode and free the 342 /* switch to patch-once-at-boottime-only mode and free the
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 5ab59c1233..7ce09492fc 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -36,6 +36,7 @@
36#include <asm/arch_hooks.h> 36#include <asm/arch_hooks.h>
37#include <asm/hpet.h> 37#include <asm/hpet.h>
38#include <asm/i8253.h> 38#include <asm/i8253.h>
39#include <asm/nmi.h>
39 40
40#include <mach_apic.h> 41#include <mach_apic.h>
41#include <mach_apicdef.h> 42#include <mach_apicdef.h>
@@ -156,7 +157,7 @@ void clear_local_APIC(void)
156 maxlvt = get_maxlvt(); 157 maxlvt = get_maxlvt();
157 158
158 /* 159 /*
159 * Masking an LVT entry on a P6 can trigger a local APIC error 160 * Masking an LVT entry can trigger a local APIC error
160 * if the vector is zero. Mask LVTERR first to prevent this. 161 * if the vector is zero. Mask LVTERR first to prevent this.
161 */ 162 */
162 if (maxlvt >= 3) { 163 if (maxlvt >= 3) {
@@ -1117,7 +1118,18 @@ void disable_APIC_timer(void)
1117 unsigned long v; 1118 unsigned long v;
1118 1119
1119 v = apic_read(APIC_LVTT); 1120 v = apic_read(APIC_LVTT);
1120 apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED); 1121 /*
1122 * When an illegal vector value (0-15) is written to an LVT
1123 * entry and delivery mode is Fixed, the APIC may signal an
1124 * illegal vector error, with out regard to whether the mask
1125 * bit is set or whether an interrupt is actually seen on input.
1126 *
1127 * Boot sequence might call this function when the LVTT has
1128 * '0' vector value. So make sure vector field is set to
1129 * valid value.
1130 */
1131 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1132 apic_write_around(APIC_LVTT, v);
1121 } 1133 }
1122} 1134}
1123 1135
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 9e819eb682..7c5729d1fd 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -764,9 +764,9 @@ static int apm_do_idle(void)
764 int idled = 0; 764 int idled = 0;
765 int polling; 765 int polling;
766 766
767 polling = test_thread_flag(TIF_POLLING_NRFLAG); 767 polling = !!(current_thread_info()->status & TS_POLLING);
768 if (polling) { 768 if (polling) {
769 clear_thread_flag(TIF_POLLING_NRFLAG); 769 current_thread_info()->status &= ~TS_POLLING;
770 smp_mb__after_clear_bit(); 770 smp_mb__after_clear_bit();
771 } 771 }
772 if (!need_resched()) { 772 if (!need_resched()) {
@@ -774,7 +774,7 @@ static int apm_do_idle(void)
774 ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax); 774 ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax);
775 } 775 }
776 if (polling) 776 if (polling)
777 set_thread_flag(TIF_POLLING_NRFLAG); 777 current_thread_info()->status |= TS_POLLING;
778 778
779 if (!idled) 779 if (!idled)
780 return 0; 780 return 0;
diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c
index 36d66e2077..c80271f8f0 100644
--- a/arch/i386/kernel/asm-offsets.c
+++ b/arch/i386/kernel/asm-offsets.c
@@ -4,6 +4,7 @@
4 * to extract and format the required data. 4 * to extract and format the required data.
5 */ 5 */
6 6
7#include <linux/crypto.h>
7#include <linux/sched.h> 8#include <linux/sched.h>
8#include <linux/signal.h> 9#include <linux/signal.h>
9#include <linux/personality.h> 10#include <linux/personality.h>
@@ -13,6 +14,7 @@
13#include <asm/fixmap.h> 14#include <asm/fixmap.h>
14#include <asm/processor.h> 15#include <asm/processor.h>
15#include <asm/thread_info.h> 16#include <asm/thread_info.h>
17#include <asm/elf.h>
16 18
17#define DEFINE(sym, val) \ 19#define DEFINE(sym, val) \
18 asm volatile("\n->" #sym " %0 " #val : : "i" (val)) 20 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
@@ -53,6 +55,7 @@ void foo(void)
53 OFFSET(TI_preempt_count, thread_info, preempt_count); 55 OFFSET(TI_preempt_count, thread_info, preempt_count);
54 OFFSET(TI_addr_limit, thread_info, addr_limit); 56 OFFSET(TI_addr_limit, thread_info, addr_limit);
55 OFFSET(TI_restart_block, thread_info, restart_block); 57 OFFSET(TI_restart_block, thread_info, restart_block);
58 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
56 BLANK(); 59 BLANK();
57 60
58 OFFSET(EXEC_DOMAIN_handler, exec_domain, handler); 61 OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
@@ -68,5 +71,7 @@ void foo(void)
68 sizeof(struct tss_struct)); 71 sizeof(struct tss_struct));
69 72
70 DEFINE(PAGE_SIZE_asm, PAGE_SIZE); 73 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
71 DEFINE(VSYSCALL_BASE, __fix_to_virt(FIX_VSYSCALL)); 74 DEFINE(VDSO_PRELINK, VDSO_PRELINK);
75
76 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
72} 77}
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 786d1a5704..e6a2d6b80c 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -224,22 +224,26 @@ static void __init init_amd(struct cpuinfo_x86 *c)
224 224
225#ifdef CONFIG_X86_HT 225#ifdef CONFIG_X86_HT
226 /* 226 /*
227 * On a AMD dual core setup the lower bits of the APIC id 227 * On a AMD multi core setup the lower bits of the APIC id
228 * distingush the cores. Assumes number of cores is a power 228 * distingush the cores.
229 * of two.
230 */ 229 */
231 if (c->x86_max_cores > 1) { 230 if (c->x86_max_cores > 1) {
232 int cpu = smp_processor_id(); 231 int cpu = smp_processor_id();
233 unsigned bits = 0; 232 unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf;
234 while ((1 << bits) < c->x86_max_cores) 233
235 bits++; 234 if (bits == 0) {
236 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1); 235 while ((1 << bits) < c->x86_max_cores)
237 phys_proc_id[cpu] >>= bits; 236 bits++;
237 }
238 c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1);
239 c->phys_proc_id >>= bits;
238 printk(KERN_INFO "CPU %d(%d) -> Core %d\n", 240 printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
239 cpu, c->x86_max_cores, cpu_core_id[cpu]); 241 cpu, c->x86_max_cores, c->cpu_core_id);
240 } 242 }
241#endif 243#endif
242 244
245 if (cpuid_eax(0x80000000) >= 0x80000006)
246 num_cache_leaves = 3;
243} 247}
244 248
245static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) 249static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 44f2c5f2dd..70c87de582 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -294,7 +294,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c)
294 if (c->x86 >= 0x6) 294 if (c->x86 >= 0x6)
295 c->x86_model += ((tfms >> 16) & 0xF) << 4; 295 c->x86_model += ((tfms >> 16) & 0xF) << 4;
296 c->x86_mask = tfms & 15; 296 c->x86_mask = tfms & 15;
297#ifdef CONFIG_SMP 297#ifdef CONFIG_X86_HT
298 c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); 298 c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
299#else 299#else
300 c->apicid = (ebx >> 24) & 0xFF; 300 c->apicid = (ebx >> 24) & 0xFF;
@@ -319,7 +319,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c)
319 early_intel_workaround(c); 319 early_intel_workaround(c);
320 320
321#ifdef CONFIG_X86_HT 321#ifdef CONFIG_X86_HT
322 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff; 322 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
323#endif 323#endif
324} 324}
325 325
@@ -477,11 +477,9 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
477{ 477{
478 u32 eax, ebx, ecx, edx; 478 u32 eax, ebx, ecx, edx;
479 int index_msb, core_bits; 479 int index_msb, core_bits;
480 int cpu = smp_processor_id();
481 480
482 cpuid(1, &eax, &ebx, &ecx, &edx); 481 cpuid(1, &eax, &ebx, &ecx, &edx);
483 482
484
485 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) 483 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
486 return; 484 return;
487 485
@@ -492,16 +490,17 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
492 } else if (smp_num_siblings > 1 ) { 490 } else if (smp_num_siblings > 1 ) {
493 491
494 if (smp_num_siblings > NR_CPUS) { 492 if (smp_num_siblings > NR_CPUS) {
495 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); 493 printk(KERN_WARNING "CPU: Unsupported number of the "
494 "siblings %d", smp_num_siblings);
496 smp_num_siblings = 1; 495 smp_num_siblings = 1;
497 return; 496 return;
498 } 497 }
499 498
500 index_msb = get_count_order(smp_num_siblings); 499 index_msb = get_count_order(smp_num_siblings);
501 phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); 500 c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
502 501
503 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 502 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
504 phys_proc_id[cpu]); 503 c->phys_proc_id);
505 504
506 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 505 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
507 506
@@ -509,12 +508,12 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
509 508
510 core_bits = get_count_order(c->x86_max_cores); 509 core_bits = get_count_order(c->x86_max_cores);
511 510
512 cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & 511 c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
513 ((1 << core_bits) - 1); 512 ((1 << core_bits) - 1);
514 513
515 if (c->x86_max_cores > 1) 514 if (c->x86_max_cores > 1)
516 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 515 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
517 cpu_core_id[cpu]); 516 c->cpu_core_id);
518 } 517 }
519} 518}
520#endif 519#endif
@@ -613,6 +612,12 @@ void __cpuinit cpu_init(void)
613 set_in_cr4(X86_CR4_TSD); 612 set_in_cr4(X86_CR4_TSD);
614 } 613 }
615 614
615 /* The CPU hotplug case */
616 if (cpu_gdt_descr->address) {
617 gdt = (struct desc_struct *)cpu_gdt_descr->address;
618 memset(gdt, 0, PAGE_SIZE);
619 goto old_gdt;
620 }
616 /* 621 /*
617 * This is a horrible hack to allocate the GDT. The problem 622 * This is a horrible hack to allocate the GDT. The problem
618 * is that cpu_init() is called really early for the boot CPU 623 * is that cpu_init() is called really early for the boot CPU
@@ -631,7 +636,7 @@ void __cpuinit cpu_init(void)
631 local_irq_enable(); 636 local_irq_enable();
632 } 637 }
633 } 638 }
634 639old_gdt:
635 /* 640 /*
636 * Initialize the per-CPU GDT with the boot GDT, 641 * Initialize the per-CPU GDT with the boot GDT,
637 * and set up the GDT descriptor: 642 * and set up the GDT descriptor:
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index fc32c8028e..f03b7f94c3 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -354,7 +354,7 @@ static void __init init_nsc(struct cpuinfo_x86 *c)
354 * This function only handles the GX processor, and kicks every 354 * This function only handles the GX processor, and kicks every
355 * thing else to the Cyrix init function above - that should 355 * thing else to the Cyrix init function above - that should
356 * cover any processors that might have been branded differently 356 * cover any processors that might have been branded differently
357 * after NSC aquired Cyrix. 357 * after NSC acquired Cyrix.
358 * 358 *
359 * If this breaks your GX1 horribly, please e-mail 359 * If this breaks your GX1 horribly, please e-mail
360 * info-linux@ldcmail.amd.com to tell us. 360 * info-linux@ldcmail.amd.com to tell us.
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 5386b29bb5..10afc645c5 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -122,6 +122,12 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
122 122
123 select_idle_routine(c); 123 select_idle_routine(c);
124 l2 = init_intel_cacheinfo(c); 124 l2 = init_intel_cacheinfo(c);
125 if (c->cpuid_level > 9 ) {
126 unsigned eax = cpuid_eax(10);
127 /* Check for version and the number of counters */
128 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
129 set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
130 }
125 131
126 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ 132 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
127 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) 133 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index c8547a6fa7..e9f0b928b0 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -4,6 +4,7 @@
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen : CPUID4 emulation on AMD.
7 */ 8 */
8 9
9#include <linux/init.h> 10#include <linux/init.h>
@@ -130,25 +131,111 @@ struct _cpuid4_info {
130 cpumask_t shared_cpu_map; 131 cpumask_t shared_cpu_map;
131}; 132};
132 133
133static unsigned short num_cache_leaves; 134unsigned short num_cache_leaves;
135
136/* AMD doesn't have CPUID4. Emulate it here to report the same
137 information to the user. This makes some assumptions about the machine:
138 No L3, L2 not shared, no SMT etc. that is currently true on AMD CPUs.
139
140 In theory the TLBs could be reported as fake type (they are in "dummy").
141 Maybe later */
142union l1_cache {
143 struct {
144 unsigned line_size : 8;
145 unsigned lines_per_tag : 8;
146 unsigned assoc : 8;
147 unsigned size_in_kb : 8;
148 };
149 unsigned val;
150};
151
152union l2_cache {
153 struct {
154 unsigned line_size : 8;
155 unsigned lines_per_tag : 4;
156 unsigned assoc : 4;
157 unsigned size_in_kb : 16;
158 };
159 unsigned val;
160};
161
162static const unsigned short assocs[] = {
163 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
164 [8] = 16,
165 [0xf] = 0xffff // ??
166 };
167static const unsigned char levels[] = { 1, 1, 2 };
168static const unsigned char types[] = { 1, 2, 3 };
169
170static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
171 union _cpuid4_leaf_ebx *ebx,
172 union _cpuid4_leaf_ecx *ecx)
173{
174 unsigned dummy;
175 unsigned line_size, lines_per_tag, assoc, size_in_kb;
176 union l1_cache l1i, l1d;
177 union l2_cache l2;
178
179 eax->full = 0;
180 ebx->full = 0;
181 ecx->full = 0;
182
183 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
184 cpuid(0x80000006, &dummy, &dummy, &l2.val, &dummy);
185
186 if (leaf > 2 || !l1d.val || !l1i.val || !l2.val)
187 return;
188
189 eax->split.is_self_initializing = 1;
190 eax->split.type = types[leaf];
191 eax->split.level = levels[leaf];
192 eax->split.num_threads_sharing = 0;
193 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
194
195 if (leaf <= 1) {
196 union l1_cache *l1 = leaf == 0 ? &l1d : &l1i;
197 assoc = l1->assoc;
198 line_size = l1->line_size;
199 lines_per_tag = l1->lines_per_tag;
200 size_in_kb = l1->size_in_kb;
201 } else {
202 assoc = l2.assoc;
203 line_size = l2.line_size;
204 lines_per_tag = l2.lines_per_tag;
205 /* cpu_data has errata corrections for K7 applied */
206 size_in_kb = current_cpu_data.x86_cache_size;
207 }
208
209 if (assoc == 0xf)
210 eax->split.is_fully_associative = 1;
211 ebx->split.coherency_line_size = line_size - 1;
212 ebx->split.ways_of_associativity = assocs[assoc] - 1;
213 ebx->split.physical_line_partition = lines_per_tag - 1;
214 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
215 (ebx->split.ways_of_associativity + 1) - 1;
216}
134 217
135static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 218static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
136{ 219{
137 unsigned int eax, ebx, ecx, edx; 220 union _cpuid4_leaf_eax eax;
138 union _cpuid4_leaf_eax cache_eax; 221 union _cpuid4_leaf_ebx ebx;
222 union _cpuid4_leaf_ecx ecx;
223 unsigned edx;
139 224
140 cpuid_count(4, index, &eax, &ebx, &ecx, &edx); 225 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
141 cache_eax.full = eax; 226 amd_cpuid4(index, &eax, &ebx, &ecx);
142 if (cache_eax.split.type == CACHE_TYPE_NULL) 227 else
228 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
229 if (eax.split.type == CACHE_TYPE_NULL)
143 return -EIO; /* better error ? */ 230 return -EIO; /* better error ? */
144 231
145 this_leaf->eax.full = eax; 232 this_leaf->eax = eax;
146 this_leaf->ebx.full = ebx; 233 this_leaf->ebx = ebx;
147 this_leaf->ecx.full = ecx; 234 this_leaf->ecx = ecx;
148 this_leaf->size = (this_leaf->ecx.split.number_of_sets + 1) * 235 this_leaf->size = (ecx.split.number_of_sets + 1) *
149 (this_leaf->ebx.split.coherency_line_size + 1) * 236 (ebx.split.coherency_line_size + 1) *
150 (this_leaf->ebx.split.physical_line_partition + 1) * 237 (ebx.split.physical_line_partition + 1) *
151 (this_leaf->ebx.split.ways_of_associativity + 1); 238 (ebx.split.ways_of_associativity + 1);
152 return 0; 239 return 0;
153} 240}
154 241
@@ -174,7 +261,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
174 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 261 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
175 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ 262 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
176 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; 263 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
177#ifdef CONFIG_SMP 264#ifdef CONFIG_X86_HT
178 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data); 265 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
179#endif 266#endif
180 267
@@ -296,14 +383,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
296 383
297 if (new_l2) { 384 if (new_l2) {
298 l2 = new_l2; 385 l2 = new_l2;
299#ifdef CONFIG_SMP 386#ifdef CONFIG_X86_HT
300 cpu_llc_id[cpu] = l2_id; 387 cpu_llc_id[cpu] = l2_id;
301#endif 388#endif
302 } 389 }
303 390
304 if (new_l3) { 391 if (new_l3) {
305 l3 = new_l3; 392 l3 = new_l3;
306#ifdef CONFIG_SMP 393#ifdef CONFIG_X86_HT
307 cpu_llc_id[cpu] = l3_id; 394 cpu_llc_id[cpu] = l3_id;
308#endif 395#endif
309 } 396 }
@@ -642,7 +729,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
642 return; 729 return;
643} 730}
644 731
645static int cacheinfo_cpu_callback(struct notifier_block *nfb, 732static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
646 unsigned long action, void *hcpu) 733 unsigned long action, void *hcpu)
647{ 734{
648 unsigned int cpu = (unsigned long)hcpu; 735 unsigned int cpu = (unsigned long)hcpu;
@@ -660,7 +747,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
660 return NOTIFY_OK; 747 return NOTIFY_OK;
661} 748}
662 749
663static struct notifier_block cacheinfo_cpu_notifier = 750static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
664{ 751{
665 .notifier_call = cacheinfo_cpu_callback, 752 .notifier_call = cacheinfo_cpu_callback,
666}; 753};
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index a19fcb262d..f54a15268e 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -18,7 +18,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
18 * applications want to get the raw CPUID data, they should access 18 * applications want to get the raw CPUID data, they should access
19 * /dev/cpu/<cpu_nr>/cpuid instead. 19 * /dev/cpu/<cpu_nr>/cpuid instead.
20 */ 20 */
21 static char *x86_cap_flags[] = { 21 static const char * const x86_cap_flags[] = {
22 /* Intel-defined */ 22 /* Intel-defined */
23 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", 23 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
24 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", 24 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
@@ -62,7 +62,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
62 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 62 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
63 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 63 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
64 }; 64 };
65 static char *x86_power_flags[] = { 65 static const char * const x86_power_flags[] = {
66 "ts", /* temperature sensor */ 66 "ts", /* temperature sensor */
67 "fid", /* frequency id control */ 67 "fid", /* frequency id control */
68 "vid", /* voltage id control */ 68 "vid", /* voltage id control */
@@ -109,9 +109,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
109 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); 109 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
110#ifdef CONFIG_X86_HT 110#ifdef CONFIG_X86_HT
111 if (c->x86_max_cores * smp_num_siblings > 1) { 111 if (c->x86_max_cores * smp_num_siblings > 1) {
112 seq_printf(m, "physical id\t: %d\n", phys_proc_id[n]); 112 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
113 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n])); 113 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
114 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]); 114 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
115 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 115 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
116 } 116 }
117#endif 117#endif
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index 1d9a4abcdf..f6dfa9fb67 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -183,7 +183,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long ac
183 return NOTIFY_OK; 183 return NOTIFY_OK;
184} 184}
185 185
186static struct notifier_block cpuid_class_cpu_notifier = 186static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
187{ 187{
188 .notifier_call = cpuid_class_cpu_callback, 188 .notifier_call = cpuid_class_cpu_callback,
189}; 189};
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 21dc1bbb80..48f0f62f78 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -120,14 +120,9 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
120 return 1; 120 return 1;
121} 121}
122 122
123/*
124 * By using the NMI code instead of a vector we just sneak thru the
125 * word generator coming out with just what we want. AND it does
126 * not matter if clustered_apic_mode is set or not.
127 */
128static void smp_send_nmi_allbutself(void) 123static void smp_send_nmi_allbutself(void)
129{ 124{
130 send_IPI_allbutself(APIC_DM_NMI); 125 send_IPI_allbutself(NMI_VECTOR);
131} 126}
132 127
133static void nmi_shootdown_cpus(void) 128static void nmi_shootdown_cpus(void)
@@ -163,7 +158,7 @@ static void nmi_shootdown_cpus(void)
163void machine_crash_shutdown(struct pt_regs *regs) 158void machine_crash_shutdown(struct pt_regs *regs)
164{ 159{
165 /* This function is only called after the system 160 /* This function is only called after the system
166 * has paniced or is otherwise in a critical state. 161 * has panicked or is otherwise in a critical state.
167 * The minimum amount of code to allow a kexec'd kernel 162 * The minimum amount of code to allow a kexec'd kernel
168 * to run successfully needs to happen here. 163 * to run successfully needs to happen here.
169 * 164 *
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index cfc683f153..fbdb933251 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -48,6 +48,7 @@
48#include <asm/smp.h> 48#include <asm/smp.h>
49#include <asm/page.h> 49#include <asm/page.h>
50#include <asm/desc.h> 50#include <asm/desc.h>
51#include <asm/dwarf2.h>
51#include "irq_vectors.h" 52#include "irq_vectors.h"
52 53
53#define nr_syscalls ((syscall_table_size)/4) 54#define nr_syscalls ((syscall_table_size)/4)
@@ -82,34 +83,76 @@ VM_MASK = 0x00020000
82#define resume_kernel restore_nocheck 83#define resume_kernel restore_nocheck
83#endif 84#endif
84 85
86#ifdef CONFIG_VM86
87#define resume_userspace_sig check_userspace
88#else
89#define resume_userspace_sig resume_userspace
90#endif
91
85#define SAVE_ALL \ 92#define SAVE_ALL \
86 cld; \ 93 cld; \
87 pushl %es; \ 94 pushl %es; \
95 CFI_ADJUST_CFA_OFFSET 4;\
96 /*CFI_REL_OFFSET es, 0;*/\
88 pushl %ds; \ 97 pushl %ds; \
98 CFI_ADJUST_CFA_OFFSET 4;\
99 /*CFI_REL_OFFSET ds, 0;*/\
89 pushl %eax; \ 100 pushl %eax; \
101 CFI_ADJUST_CFA_OFFSET 4;\
102 CFI_REL_OFFSET eax, 0;\
90 pushl %ebp; \ 103 pushl %ebp; \
104 CFI_ADJUST_CFA_OFFSET 4;\
105 CFI_REL_OFFSET ebp, 0;\
91 pushl %edi; \ 106 pushl %edi; \
107 CFI_ADJUST_CFA_OFFSET 4;\
108 CFI_REL_OFFSET edi, 0;\
92 pushl %esi; \ 109 pushl %esi; \
110 CFI_ADJUST_CFA_OFFSET 4;\
111 CFI_REL_OFFSET esi, 0;\
93 pushl %edx; \ 112 pushl %edx; \
113 CFI_ADJUST_CFA_OFFSET 4;\
114 CFI_REL_OFFSET edx, 0;\
94 pushl %ecx; \ 115 pushl %ecx; \
116 CFI_ADJUST_CFA_OFFSET 4;\
117 CFI_REL_OFFSET ecx, 0;\
95 pushl %ebx; \ 118 pushl %ebx; \
119 CFI_ADJUST_CFA_OFFSET 4;\
120 CFI_REL_OFFSET ebx, 0;\
96 movl $(__USER_DS), %edx; \ 121 movl $(__USER_DS), %edx; \
97 movl %edx, %ds; \ 122 movl %edx, %ds; \
98 movl %edx, %es; 123 movl %edx, %es;
99 124
100#define RESTORE_INT_REGS \ 125#define RESTORE_INT_REGS \
101 popl %ebx; \ 126 popl %ebx; \
127 CFI_ADJUST_CFA_OFFSET -4;\
128 CFI_RESTORE ebx;\
102 popl %ecx; \ 129 popl %ecx; \
130 CFI_ADJUST_CFA_OFFSET -4;\
131 CFI_RESTORE ecx;\
103 popl %edx; \ 132 popl %edx; \
133 CFI_ADJUST_CFA_OFFSET -4;\
134 CFI_RESTORE edx;\
104 popl %esi; \ 135 popl %esi; \
136 CFI_ADJUST_CFA_OFFSET -4;\
137 CFI_RESTORE esi;\
105 popl %edi; \ 138 popl %edi; \
139 CFI_ADJUST_CFA_OFFSET -4;\
140 CFI_RESTORE edi;\
106 popl %ebp; \ 141 popl %ebp; \
107 popl %eax 142 CFI_ADJUST_CFA_OFFSET -4;\
143 CFI_RESTORE ebp;\
144 popl %eax; \
145 CFI_ADJUST_CFA_OFFSET -4;\
146 CFI_RESTORE eax
108 147
109#define RESTORE_REGS \ 148#define RESTORE_REGS \
110 RESTORE_INT_REGS; \ 149 RESTORE_INT_REGS; \
1111: popl %ds; \ 1501: popl %ds; \
151 CFI_ADJUST_CFA_OFFSET -4;\
152 /*CFI_RESTORE ds;*/\
1122: popl %es; \ 1532: popl %es; \
154 CFI_ADJUST_CFA_OFFSET -4;\
155 /*CFI_RESTORE es;*/\
113.section .fixup,"ax"; \ 156.section .fixup,"ax"; \
1143: movl $0,(%esp); \ 1573: movl $0,(%esp); \
115 jmp 1b; \ 158 jmp 1b; \
@@ -122,13 +165,43 @@ VM_MASK = 0x00020000
122 .long 2b,4b; \ 165 .long 2b,4b; \
123.previous 166.previous
124 167
168#define RING0_INT_FRAME \
169 CFI_STARTPROC simple;\
170 CFI_DEF_CFA esp, 3*4;\
171 /*CFI_OFFSET cs, -2*4;*/\
172 CFI_OFFSET eip, -3*4
173
174#define RING0_EC_FRAME \
175 CFI_STARTPROC simple;\
176 CFI_DEF_CFA esp, 4*4;\
177 /*CFI_OFFSET cs, -2*4;*/\
178 CFI_OFFSET eip, -3*4
179
180#define RING0_PTREGS_FRAME \
181 CFI_STARTPROC simple;\
182 CFI_DEF_CFA esp, OLDESP-EBX;\
183 /*CFI_OFFSET cs, CS-OLDESP;*/\
184 CFI_OFFSET eip, EIP-OLDESP;\
185 /*CFI_OFFSET es, ES-OLDESP;*/\
186 /*CFI_OFFSET ds, DS-OLDESP;*/\
187 CFI_OFFSET eax, EAX-OLDESP;\
188 CFI_OFFSET ebp, EBP-OLDESP;\
189 CFI_OFFSET edi, EDI-OLDESP;\
190 CFI_OFFSET esi, ESI-OLDESP;\
191 CFI_OFFSET edx, EDX-OLDESP;\
192 CFI_OFFSET ecx, ECX-OLDESP;\
193 CFI_OFFSET ebx, EBX-OLDESP
125 194
126ENTRY(ret_from_fork) 195ENTRY(ret_from_fork)
196 CFI_STARTPROC
127 pushl %eax 197 pushl %eax
198 CFI_ADJUST_CFA_OFFSET -4
128 call schedule_tail 199 call schedule_tail
129 GET_THREAD_INFO(%ebp) 200 GET_THREAD_INFO(%ebp)
130 popl %eax 201 popl %eax
202 CFI_ADJUST_CFA_OFFSET -4
131 jmp syscall_exit 203 jmp syscall_exit
204 CFI_ENDPROC
132 205
133/* 206/*
134 * Return to user mode is not as complex as all this looks, 207 * Return to user mode is not as complex as all this looks,
@@ -139,10 +212,12 @@ ENTRY(ret_from_fork)
139 212
140 # userspace resumption stub bypassing syscall exit tracing 213 # userspace resumption stub bypassing syscall exit tracing
141 ALIGN 214 ALIGN
215 RING0_PTREGS_FRAME
142ret_from_exception: 216ret_from_exception:
143 preempt_stop 217 preempt_stop
144ret_from_intr: 218ret_from_intr:
145 GET_THREAD_INFO(%ebp) 219 GET_THREAD_INFO(%ebp)
220check_userspace:
146 movl EFLAGS(%esp), %eax # mix EFLAGS and CS 221 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
147 movb CS(%esp), %al 222 movb CS(%esp), %al
148 testl $(VM_MASK | 3), %eax 223 testl $(VM_MASK | 3), %eax
@@ -171,20 +246,38 @@ need_resched:
171 call preempt_schedule_irq 246 call preempt_schedule_irq
172 jmp need_resched 247 jmp need_resched
173#endif 248#endif
249 CFI_ENDPROC
174 250
175/* SYSENTER_RETURN points to after the "sysenter" instruction in 251/* SYSENTER_RETURN points to after the "sysenter" instruction in
176 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ 252 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
177 253
178 # sysenter call handler stub 254 # sysenter call handler stub
179ENTRY(sysenter_entry) 255ENTRY(sysenter_entry)
256 CFI_STARTPROC simple
257 CFI_DEF_CFA esp, 0
258 CFI_REGISTER esp, ebp
180 movl TSS_sysenter_esp0(%esp),%esp 259 movl TSS_sysenter_esp0(%esp),%esp
181sysenter_past_esp: 260sysenter_past_esp:
182 sti 261 sti
183 pushl $(__USER_DS) 262 pushl $(__USER_DS)
263 CFI_ADJUST_CFA_OFFSET 4
264 /*CFI_REL_OFFSET ss, 0*/
184 pushl %ebp 265 pushl %ebp
266 CFI_ADJUST_CFA_OFFSET 4
267 CFI_REL_OFFSET esp, 0
185 pushfl 268 pushfl
269 CFI_ADJUST_CFA_OFFSET 4
186 pushl $(__USER_CS) 270 pushl $(__USER_CS)
187 pushl $SYSENTER_RETURN 271 CFI_ADJUST_CFA_OFFSET 4
272 /*CFI_REL_OFFSET cs, 0*/
273 /*
274 * Push current_thread_info()->sysenter_return to the stack.
275 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
276 * pushed above; +8 corresponds to copy_thread's esp0 setting.
277 */
278 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
279 CFI_ADJUST_CFA_OFFSET 4
280 CFI_REL_OFFSET eip, 0
188 281
189/* 282/*
190 * Load the potential sixth argument from user stack. 283 * Load the potential sixth argument from user stack.
@@ -199,6 +292,7 @@ sysenter_past_esp:
199.previous 292.previous
200 293
201 pushl %eax 294 pushl %eax
295 CFI_ADJUST_CFA_OFFSET 4
202 SAVE_ALL 296 SAVE_ALL
203 GET_THREAD_INFO(%ebp) 297 GET_THREAD_INFO(%ebp)
204 298
@@ -219,11 +313,14 @@ sysenter_past_esp:
219 xorl %ebp,%ebp 313 xorl %ebp,%ebp
220 sti 314 sti
221 sysexit 315 sysexit
316 CFI_ENDPROC
222 317
223 318
224 # system call handler stub 319 # system call handler stub
225ENTRY(system_call) 320ENTRY(system_call)
321 RING0_INT_FRAME # can't unwind into user space anyway
226 pushl %eax # save orig_eax 322 pushl %eax # save orig_eax
323 CFI_ADJUST_CFA_OFFSET 4
227 SAVE_ALL 324 SAVE_ALL
228 GET_THREAD_INFO(%ebp) 325 GET_THREAD_INFO(%ebp)
229 testl $TF_MASK,EFLAGS(%esp) 326 testl $TF_MASK,EFLAGS(%esp)
@@ -256,10 +353,12 @@ restore_all:
256 movb CS(%esp), %al 353 movb CS(%esp), %al
257 andl $(VM_MASK | (4 << 8) | 3), %eax 354 andl $(VM_MASK | (4 << 8) | 3), %eax
258 cmpl $((4 << 8) | 3), %eax 355 cmpl $((4 << 8) | 3), %eax
356 CFI_REMEMBER_STATE
259 je ldt_ss # returning to user-space with LDT SS 357 je ldt_ss # returning to user-space with LDT SS
260restore_nocheck: 358restore_nocheck:
261 RESTORE_REGS 359 RESTORE_REGS
262 addl $4, %esp 360 addl $4, %esp
361 CFI_ADJUST_CFA_OFFSET -4
2631: iret 3621: iret
264.section .fixup,"ax" 363.section .fixup,"ax"
265iret_exc: 364iret_exc:
@@ -273,6 +372,7 @@ iret_exc:
273 .long 1b,iret_exc 372 .long 1b,iret_exc
274.previous 373.previous
275 374
375 CFI_RESTORE_STATE
276ldt_ss: 376ldt_ss:
277 larl OLDSS(%esp), %eax 377 larl OLDSS(%esp), %eax
278 jnz restore_nocheck 378 jnz restore_nocheck
@@ -285,11 +385,13 @@ ldt_ss:
285 * CPUs, which we can try to work around to make 385 * CPUs, which we can try to work around to make
286 * dosemu and wine happy. */ 386 * dosemu and wine happy. */
287 subl $8, %esp # reserve space for switch16 pointer 387 subl $8, %esp # reserve space for switch16 pointer
388 CFI_ADJUST_CFA_OFFSET 8
288 cli 389 cli
289 movl %esp, %eax 390 movl %esp, %eax
290 /* Set up the 16bit stack frame with switch32 pointer on top, 391 /* Set up the 16bit stack frame with switch32 pointer on top,
291 * and a switch16 pointer on top of the current frame. */ 392 * and a switch16 pointer on top of the current frame. */
292 call setup_x86_bogus_stack 393 call setup_x86_bogus_stack
394 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
293 RESTORE_REGS 395 RESTORE_REGS
294 lss 20+4(%esp), %esp # switch to 16bit stack 396 lss 20+4(%esp), %esp # switch to 16bit stack
2951: iret 3971: iret
@@ -297,9 +399,11 @@ ldt_ss:
297 .align 4 399 .align 4
298 .long 1b,iret_exc 400 .long 1b,iret_exc
299.previous 401.previous
402 CFI_ENDPROC
300 403
301 # perform work that needs to be done immediately before resumption 404 # perform work that needs to be done immediately before resumption
302 ALIGN 405 ALIGN
406 RING0_PTREGS_FRAME # can't unwind into user space anyway
303work_pending: 407work_pending:
304 testb $_TIF_NEED_RESCHED, %cl 408 testb $_TIF_NEED_RESCHED, %cl
305 jz work_notifysig 409 jz work_notifysig
@@ -323,18 +427,20 @@ work_notifysig: # deal with pending signals and
323 # vm86-space 427 # vm86-space
324 xorl %edx, %edx 428 xorl %edx, %edx
325 call do_notify_resume 429 call do_notify_resume
326 jmp resume_userspace 430 jmp resume_userspace_sig
327 431
328 ALIGN 432 ALIGN
329work_notifysig_v86: 433work_notifysig_v86:
330#ifdef CONFIG_VM86 434#ifdef CONFIG_VM86
331 pushl %ecx # save ti_flags for do_notify_resume 435 pushl %ecx # save ti_flags for do_notify_resume
436 CFI_ADJUST_CFA_OFFSET 4
332 call save_v86_state # %eax contains pt_regs pointer 437 call save_v86_state # %eax contains pt_regs pointer
333 popl %ecx 438 popl %ecx
439 CFI_ADJUST_CFA_OFFSET -4
334 movl %eax, %esp 440 movl %eax, %esp
335 xorl %edx, %edx 441 xorl %edx, %edx
336 call do_notify_resume 442 call do_notify_resume
337 jmp resume_userspace 443 jmp resume_userspace_sig
338#endif 444#endif
339 445
340 # perform syscall exit tracing 446 # perform syscall exit tracing
@@ -363,19 +469,21 @@ syscall_exit_work:
363 movl $1, %edx 469 movl $1, %edx
364 call do_syscall_trace 470 call do_syscall_trace
365 jmp resume_userspace 471 jmp resume_userspace
472 CFI_ENDPROC
366 473
367 ALIGN 474 RING0_INT_FRAME # can't unwind into user space anyway
368syscall_fault: 475syscall_fault:
369 pushl %eax # save orig_eax 476 pushl %eax # save orig_eax
477 CFI_ADJUST_CFA_OFFSET 4
370 SAVE_ALL 478 SAVE_ALL
371 GET_THREAD_INFO(%ebp) 479 GET_THREAD_INFO(%ebp)
372 movl $-EFAULT,EAX(%esp) 480 movl $-EFAULT,EAX(%esp)
373 jmp resume_userspace 481 jmp resume_userspace
374 482
375 ALIGN
376syscall_badsys: 483syscall_badsys:
377 movl $-ENOSYS,EAX(%esp) 484 movl $-ENOSYS,EAX(%esp)
378 jmp resume_userspace 485 jmp resume_userspace
486 CFI_ENDPROC
379 487
380#define FIXUP_ESPFIX_STACK \ 488#define FIXUP_ESPFIX_STACK \
381 movl %esp, %eax; \ 489 movl %esp, %eax; \
@@ -387,16 +495,21 @@ syscall_badsys:
387 movl %eax, %esp; 495 movl %eax, %esp;
388#define UNWIND_ESPFIX_STACK \ 496#define UNWIND_ESPFIX_STACK \
389 pushl %eax; \ 497 pushl %eax; \
498 CFI_ADJUST_CFA_OFFSET 4; \
390 movl %ss, %eax; \ 499 movl %ss, %eax; \
391 /* see if on 16bit stack */ \ 500 /* see if on 16bit stack */ \
392 cmpw $__ESPFIX_SS, %ax; \ 501 cmpw $__ESPFIX_SS, %ax; \
393 jne 28f; \ 502 je 28f; \
394 movl $__KERNEL_DS, %edx; \ 50327: popl %eax; \
395 movl %edx, %ds; \ 504 CFI_ADJUST_CFA_OFFSET -4; \
396 movl %edx, %es; \ 505.section .fixup,"ax"; \
50628: movl $__KERNEL_DS, %eax; \
507 movl %eax, %ds; \
508 movl %eax, %es; \
397 /* switch to 32bit stack */ \ 509 /* switch to 32bit stack */ \
398 FIXUP_ESPFIX_STACK \ 510 FIXUP_ESPFIX_STACK; \
39928: popl %eax; 511 jmp 27b; \
512.previous
400 513
401/* 514/*
402 * Build the entry stubs and pointer table with 515 * Build the entry stubs and pointer table with
@@ -408,9 +521,14 @@ ENTRY(interrupt)
408 521
409vector=0 522vector=0
410ENTRY(irq_entries_start) 523ENTRY(irq_entries_start)
524 RING0_INT_FRAME
411.rept NR_IRQS 525.rept NR_IRQS
412 ALIGN 526 ALIGN
4131: pushl $vector-256 527 .if vector
528 CFI_ADJUST_CFA_OFFSET -4
529 .endif
5301: pushl $~(vector)
531 CFI_ADJUST_CFA_OFFSET 4
414 jmp common_interrupt 532 jmp common_interrupt
415.data 533.data
416 .long 1b 534 .long 1b
@@ -424,60 +542,99 @@ common_interrupt:
424 movl %esp,%eax 542 movl %esp,%eax
425 call do_IRQ 543 call do_IRQ
426 jmp ret_from_intr 544 jmp ret_from_intr
545 CFI_ENDPROC
427 546
428#define BUILD_INTERRUPT(name, nr) \ 547#define BUILD_INTERRUPT(name, nr) \
429ENTRY(name) \ 548ENTRY(name) \
430 pushl $nr-256; \ 549 RING0_INT_FRAME; \
431 SAVE_ALL \ 550 pushl $~(nr); \
551 CFI_ADJUST_CFA_OFFSET 4; \
552 SAVE_ALL; \
432 movl %esp,%eax; \ 553 movl %esp,%eax; \
433 call smp_/**/name; \ 554 call smp_/**/name; \
434 jmp ret_from_intr; 555 jmp ret_from_intr; \
556 CFI_ENDPROC
435 557
436/* The include is where all of the SMP etc. interrupts come from */ 558/* The include is where all of the SMP etc. interrupts come from */
437#include "entry_arch.h" 559#include "entry_arch.h"
438 560
439ENTRY(divide_error) 561ENTRY(divide_error)
562 RING0_INT_FRAME
440 pushl $0 # no error code 563 pushl $0 # no error code
564 CFI_ADJUST_CFA_OFFSET 4
441 pushl $do_divide_error 565 pushl $do_divide_error
566 CFI_ADJUST_CFA_OFFSET 4
442 ALIGN 567 ALIGN
443error_code: 568error_code:
444 pushl %ds 569 pushl %ds
570 CFI_ADJUST_CFA_OFFSET 4
571 /*CFI_REL_OFFSET ds, 0*/
445 pushl %eax 572 pushl %eax
573 CFI_ADJUST_CFA_OFFSET 4
574 CFI_REL_OFFSET eax, 0
446 xorl %eax, %eax 575 xorl %eax, %eax
447 pushl %ebp 576 pushl %ebp
577 CFI_ADJUST_CFA_OFFSET 4
578 CFI_REL_OFFSET ebp, 0
448 pushl %edi 579 pushl %edi
580 CFI_ADJUST_CFA_OFFSET 4
581 CFI_REL_OFFSET edi, 0
449 pushl %esi 582 pushl %esi
583 CFI_ADJUST_CFA_OFFSET 4
584 CFI_REL_OFFSET esi, 0
450 pushl %edx 585 pushl %edx
586 CFI_ADJUST_CFA_OFFSET 4
587 CFI_REL_OFFSET edx, 0
451 decl %eax # eax = -1 588 decl %eax # eax = -1
452 pushl %ecx 589 pushl %ecx
590 CFI_ADJUST_CFA_OFFSET 4
591 CFI_REL_OFFSET ecx, 0
453 pushl %ebx 592 pushl %ebx
593 CFI_ADJUST_CFA_OFFSET 4
594 CFI_REL_OFFSET ebx, 0
454 cld 595 cld
455 pushl %es 596 pushl %es
597 CFI_ADJUST_CFA_OFFSET 4
598 /*CFI_REL_OFFSET es, 0*/
456 UNWIND_ESPFIX_STACK 599 UNWIND_ESPFIX_STACK
457 popl %ecx 600 popl %ecx
601 CFI_ADJUST_CFA_OFFSET -4
602 /*CFI_REGISTER es, ecx*/
458 movl ES(%esp), %edi # get the function address 603 movl ES(%esp), %edi # get the function address
459 movl ORIG_EAX(%esp), %edx # get the error code 604 movl ORIG_EAX(%esp), %edx # get the error code
460 movl %eax, ORIG_EAX(%esp) 605 movl %eax, ORIG_EAX(%esp)
461 movl %ecx, ES(%esp) 606 movl %ecx, ES(%esp)
607 /*CFI_REL_OFFSET es, ES*/
462 movl $(__USER_DS), %ecx 608 movl $(__USER_DS), %ecx
463 movl %ecx, %ds 609 movl %ecx, %ds
464 movl %ecx, %es 610 movl %ecx, %es
465 movl %esp,%eax # pt_regs pointer 611 movl %esp,%eax # pt_regs pointer
466 call *%edi 612 call *%edi
467 jmp ret_from_exception 613 jmp ret_from_exception
614 CFI_ENDPROC
468 615
469ENTRY(coprocessor_error) 616ENTRY(coprocessor_error)
617 RING0_INT_FRAME
470 pushl $0 618 pushl $0
619 CFI_ADJUST_CFA_OFFSET 4
471 pushl $do_coprocessor_error 620 pushl $do_coprocessor_error
621 CFI_ADJUST_CFA_OFFSET 4
472 jmp error_code 622 jmp error_code
623 CFI_ENDPROC
473 624
474ENTRY(simd_coprocessor_error) 625ENTRY(simd_coprocessor_error)
626 RING0_INT_FRAME
475 pushl $0 627 pushl $0
628 CFI_ADJUST_CFA_OFFSET 4
476 pushl $do_simd_coprocessor_error 629 pushl $do_simd_coprocessor_error
630 CFI_ADJUST_CFA_OFFSET 4
477 jmp error_code 631 jmp error_code
632 CFI_ENDPROC
478 633
479ENTRY(device_not_available) 634ENTRY(device_not_available)
635 RING0_INT_FRAME
480 pushl $-1 # mark this as an int 636 pushl $-1 # mark this as an int
637 CFI_ADJUST_CFA_OFFSET 4
481 SAVE_ALL 638 SAVE_ALL
482 movl %cr0, %eax 639 movl %cr0, %eax
483 testl $0x4, %eax # EM (math emulation bit) 640 testl $0x4, %eax # EM (math emulation bit)
@@ -487,9 +644,12 @@ ENTRY(device_not_available)
487 jmp ret_from_exception 644 jmp ret_from_exception
488device_not_available_emulate: 645device_not_available_emulate:
489 pushl $0 # temporary storage for ORIG_EIP 646 pushl $0 # temporary storage for ORIG_EIP
647 CFI_ADJUST_CFA_OFFSET 4
490 call math_emulate 648 call math_emulate
491 addl $4, %esp 649 addl $4, %esp
650 CFI_ADJUST_CFA_OFFSET -4
492 jmp ret_from_exception 651 jmp ret_from_exception
652 CFI_ENDPROC
493 653
494/* 654/*
495 * Debug traps and NMI can happen at the one SYSENTER instruction 655 * Debug traps and NMI can happen at the one SYSENTER instruction
@@ -514,16 +674,19 @@ label: \
514 pushl $sysenter_past_esp 674 pushl $sysenter_past_esp
515 675
516KPROBE_ENTRY(debug) 676KPROBE_ENTRY(debug)
677 RING0_INT_FRAME
517 cmpl $sysenter_entry,(%esp) 678 cmpl $sysenter_entry,(%esp)
518 jne debug_stack_correct 679 jne debug_stack_correct
519 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) 680 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
520debug_stack_correct: 681debug_stack_correct:
521 pushl $-1 # mark this as an int 682 pushl $-1 # mark this as an int
683 CFI_ADJUST_CFA_OFFSET 4
522 SAVE_ALL 684 SAVE_ALL
523 xorl %edx,%edx # error code 0 685 xorl %edx,%edx # error code 0
524 movl %esp,%eax # pt_regs pointer 686 movl %esp,%eax # pt_regs pointer
525 call do_debug 687 call do_debug
526 jmp ret_from_exception 688 jmp ret_from_exception
689 CFI_ENDPROC
527 .previous .text 690 .previous .text
528/* 691/*
529 * NMI is doubly nasty. It can happen _while_ we're handling 692 * NMI is doubly nasty. It can happen _while_ we're handling
@@ -534,14 +697,18 @@ debug_stack_correct:
534 * fault happened on the sysenter path. 697 * fault happened on the sysenter path.
535 */ 698 */
536ENTRY(nmi) 699ENTRY(nmi)
700 RING0_INT_FRAME
537 pushl %eax 701 pushl %eax
702 CFI_ADJUST_CFA_OFFSET 4
538 movl %ss, %eax 703 movl %ss, %eax
539 cmpw $__ESPFIX_SS, %ax 704 cmpw $__ESPFIX_SS, %ax
540 popl %eax 705 popl %eax
706 CFI_ADJUST_CFA_OFFSET -4
541 je nmi_16bit_stack 707 je nmi_16bit_stack
542 cmpl $sysenter_entry,(%esp) 708 cmpl $sysenter_entry,(%esp)
543 je nmi_stack_fixup 709 je nmi_stack_fixup
544 pushl %eax 710 pushl %eax
711 CFI_ADJUST_CFA_OFFSET 4
545 movl %esp,%eax 712 movl %esp,%eax
546 /* Do not access memory above the end of our stack page, 713 /* Do not access memory above the end of our stack page,
547 * it might not exist. 714 * it might not exist.
@@ -549,16 +716,19 @@ ENTRY(nmi)
549 andl $(THREAD_SIZE-1),%eax 716 andl $(THREAD_SIZE-1),%eax
550 cmpl $(THREAD_SIZE-20),%eax 717 cmpl $(THREAD_SIZE-20),%eax
551 popl %eax 718 popl %eax
719 CFI_ADJUST_CFA_OFFSET -4
552 jae nmi_stack_correct 720 jae nmi_stack_correct
553 cmpl $sysenter_entry,12(%esp) 721 cmpl $sysenter_entry,12(%esp)
554 je nmi_debug_stack_check 722 je nmi_debug_stack_check
555nmi_stack_correct: 723nmi_stack_correct:
556 pushl %eax 724 pushl %eax
725 CFI_ADJUST_CFA_OFFSET 4
557 SAVE_ALL 726 SAVE_ALL
558 xorl %edx,%edx # zero error code 727 xorl %edx,%edx # zero error code
559 movl %esp,%eax # pt_regs pointer 728 movl %esp,%eax # pt_regs pointer
560 call do_nmi 729 call do_nmi
561 jmp restore_all 730 jmp restore_all
731 CFI_ENDPROC
562 732
563nmi_stack_fixup: 733nmi_stack_fixup:
564 FIX_STACK(12,nmi_stack_correct, 1) 734 FIX_STACK(12,nmi_stack_correct, 1)
@@ -574,94 +744,177 @@ nmi_debug_stack_check:
574 jmp nmi_stack_correct 744 jmp nmi_stack_correct
575 745
576nmi_16bit_stack: 746nmi_16bit_stack:
747 RING0_INT_FRAME
577 /* create the pointer to lss back */ 748 /* create the pointer to lss back */
578 pushl %ss 749 pushl %ss
750 CFI_ADJUST_CFA_OFFSET 4
579 pushl %esp 751 pushl %esp
752 CFI_ADJUST_CFA_OFFSET 4
580 movzwl %sp, %esp 753 movzwl %sp, %esp
581 addw $4, (%esp) 754 addw $4, (%esp)
582 /* copy the iret frame of 12 bytes */ 755 /* copy the iret frame of 12 bytes */
583 .rept 3 756 .rept 3
584 pushl 16(%esp) 757 pushl 16(%esp)
758 CFI_ADJUST_CFA_OFFSET 4
585 .endr 759 .endr
586 pushl %eax 760 pushl %eax
761 CFI_ADJUST_CFA_OFFSET 4
587 SAVE_ALL 762 SAVE_ALL
588 FIXUP_ESPFIX_STACK # %eax == %esp 763 FIXUP_ESPFIX_STACK # %eax == %esp
764 CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
589 xorl %edx,%edx # zero error code 765 xorl %edx,%edx # zero error code
590 call do_nmi 766 call do_nmi
591 RESTORE_REGS 767 RESTORE_REGS
592 lss 12+4(%esp), %esp # back to 16bit stack 768 lss 12+4(%esp), %esp # back to 16bit stack
5931: iret 7691: iret
770 CFI_ENDPROC
594.section __ex_table,"a" 771.section __ex_table,"a"
595 .align 4 772 .align 4
596 .long 1b,iret_exc 773 .long 1b,iret_exc
597.previous 774.previous
598 775
599KPROBE_ENTRY(int3) 776KPROBE_ENTRY(int3)
777 RING0_INT_FRAME
600 pushl $-1 # mark this as an int 778 pushl $-1 # mark this as an int
779 CFI_ADJUST_CFA_OFFSET 4
601 SAVE_ALL 780 SAVE_ALL
602 xorl %edx,%edx # zero error code 781 xorl %edx,%edx # zero error code
603 movl %esp,%eax # pt_regs pointer 782 movl %esp,%eax # pt_regs pointer
604 call do_int3 783 call do_int3
605 jmp ret_from_exception 784 jmp ret_from_exception
785 CFI_ENDPROC
606 .previous .text 786 .previous .text
607 787
608ENTRY(overflow) 788ENTRY(overflow)
789 RING0_INT_FRAME
609 pushl $0 790 pushl $0
791 CFI_ADJUST_CFA_OFFSET 4
610 pushl $do_overflow 792 pushl $do_overflow
793 CFI_ADJUST_CFA_OFFSET 4
611 jmp error_code 794 jmp error_code
795 CFI_ENDPROC
612 796
613ENTRY(bounds) 797ENTRY(bounds)
798 RING0_INT_FRAME
614 pushl $0 799 pushl $0
800 CFI_ADJUST_CFA_OFFSET 4
615 pushl $do_bounds 801 pushl $do_bounds
802 CFI_ADJUST_CFA_OFFSET 4
616 jmp error_code 803 jmp error_code
804 CFI_ENDPROC
617 805
618ENTRY(invalid_op) 806ENTRY(invalid_op)
807 RING0_INT_FRAME
619 pushl $0 808 pushl $0
809 CFI_ADJUST_CFA_OFFSET 4
620 pushl $do_invalid_op 810 pushl $do_invalid_op
811 CFI_ADJUST_CFA_OFFSET 4
621 jmp error_code 812 jmp error_code
813 CFI_ENDPROC
622 814
623ENTRY(coprocessor_segment_overrun) 815ENTRY(coprocessor_segment_overrun)
816 RING0_INT_FRAME
624 pushl $0 817 pushl $0
818 CFI_ADJUST_CFA_OFFSET 4
625 pushl $do_coprocessor_segment_overrun 819 pushl $do_coprocessor_segment_overrun
820 CFI_ADJUST_CFA_OFFSET 4
626 jmp error_code 821 jmp error_code
822 CFI_ENDPROC
627 823
628ENTRY(invalid_TSS) 824ENTRY(invalid_TSS)
825 RING0_EC_FRAME
629 pushl $do_invalid_TSS 826 pushl $do_invalid_TSS
827 CFI_ADJUST_CFA_OFFSET 4
630 jmp error_code 828 jmp error_code
829 CFI_ENDPROC
631 830
632ENTRY(segment_not_present) 831ENTRY(segment_not_present)
832 RING0_EC_FRAME
633 pushl $do_segment_not_present 833 pushl $do_segment_not_present
834 CFI_ADJUST_CFA_OFFSET 4
634 jmp error_code 835 jmp error_code
836 CFI_ENDPROC
635 837
636ENTRY(stack_segment) 838ENTRY(stack_segment)
839 RING0_EC_FRAME
637 pushl $do_stack_segment 840 pushl $do_stack_segment
841 CFI_ADJUST_CFA_OFFSET 4
638 jmp error_code 842 jmp error_code
843 CFI_ENDPROC
639 844
640KPROBE_ENTRY(general_protection) 845KPROBE_ENTRY(general_protection)
846 RING0_EC_FRAME
641 pushl $do_general_protection 847 pushl $do_general_protection
848 CFI_ADJUST_CFA_OFFSET 4
642 jmp error_code 849 jmp error_code
850 CFI_ENDPROC
643 .previous .text 851 .previous .text
644 852
645ENTRY(alignment_check) 853ENTRY(alignment_check)
854 RING0_EC_FRAME
646 pushl $do_alignment_check 855 pushl $do_alignment_check
856 CFI_ADJUST_CFA_OFFSET 4
647 jmp error_code 857 jmp error_code
858 CFI_ENDPROC
648 859
649KPROBE_ENTRY(page_fault) 860KPROBE_ENTRY(page_fault)
861 RING0_EC_FRAME
650 pushl $do_page_fault 862 pushl $do_page_fault
863 CFI_ADJUST_CFA_OFFSET 4
651 jmp error_code 864 jmp error_code
865 CFI_ENDPROC
652 .previous .text 866 .previous .text
653 867
654#ifdef CONFIG_X86_MCE 868#ifdef CONFIG_X86_MCE
655ENTRY(machine_check) 869ENTRY(machine_check)
870 RING0_INT_FRAME
656 pushl $0 871 pushl $0
872 CFI_ADJUST_CFA_OFFSET 4
657 pushl machine_check_vector 873 pushl machine_check_vector
874 CFI_ADJUST_CFA_OFFSET 4
658 jmp error_code 875 jmp error_code
876 CFI_ENDPROC
659#endif 877#endif
660 878
661ENTRY(spurious_interrupt_bug) 879ENTRY(spurious_interrupt_bug)
880 RING0_INT_FRAME
662 pushl $0 881 pushl $0
882 CFI_ADJUST_CFA_OFFSET 4
663 pushl $do_spurious_interrupt_bug 883 pushl $do_spurious_interrupt_bug
884 CFI_ADJUST_CFA_OFFSET 4
664 jmp error_code 885 jmp error_code
886 CFI_ENDPROC
887
888#ifdef CONFIG_STACK_UNWIND
889ENTRY(arch_unwind_init_running)
890 CFI_STARTPROC
891 movl 4(%esp), %edx
892 movl (%esp), %ecx
893 leal 4(%esp), %eax
894 movl %ebx, EBX(%edx)
895 xorl %ebx, %ebx
896 movl %ebx, ECX(%edx)
897 movl %ebx, EDX(%edx)
898 movl %esi, ESI(%edx)
899 movl %edi, EDI(%edx)
900 movl %ebp, EBP(%edx)
901 movl %ebx, EAX(%edx)
902 movl $__USER_DS, DS(%edx)
903 movl $__USER_DS, ES(%edx)
904 movl %ebx, ORIG_EAX(%edx)
905 movl %ecx, EIP(%edx)
906 movl 12(%esp), %ecx
907 movl $__KERNEL_CS, CS(%edx)
908 movl %ebx, EFLAGS(%edx)
909 movl %eax, OLDESP(%edx)
910 movl 8(%esp), %eax
911 movl %ecx, 8(%esp)
912 movl EBX(%edx), %ebx
913 movl $__KERNEL_DS, OLDSS(%edx)
914 jmpl *%eax
915 CFI_ENDPROC
916ENDPROC(arch_unwind_init_running)
917#endif
665 918
666.section .rodata,"a" 919.section .rodata,"a"
667#include "syscall_table.S" 920#include "syscall_table.S"
diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c
new file mode 100644
index 0000000000..c6737c3581
--- /dev/null
+++ b/arch/i386/kernel/hpet.c
@@ -0,0 +1,67 @@
1#include <linux/clocksource.h>
2#include <linux/errno.h>
3#include <linux/hpet.h>
4#include <linux/init.h>
5
6#include <asm/hpet.h>
7#include <asm/io.h>
8
9#define HPET_MASK CLOCKSOURCE_MASK(32)
10#define HPET_SHIFT 22
11
12/* FSEC = 10^-15 NSEC = 10^-9 */
13#define FSEC_PER_NSEC 1000000
14
15static void *hpet_ptr;
16
17static cycle_t read_hpet(void)
18{
19 return (cycle_t)readl(hpet_ptr);
20}
21
22static struct clocksource clocksource_hpet = {
23 .name = "hpet",
24 .rating = 250,
25 .read = read_hpet,
26 .mask = HPET_MASK,
27 .mult = 0, /* set below */
28 .shift = HPET_SHIFT,
29 .is_continuous = 1,
30};
31
32static int __init init_hpet_clocksource(void)
33{
34 unsigned long hpet_period;
35 void __iomem* hpet_base;
36 u64 tmp;
37
38 if (!hpet_address)
39 return -ENODEV;
40
41 /* calculate the hpet address: */
42 hpet_base =
43 (void __iomem*)ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
44 hpet_ptr = hpet_base + HPET_COUNTER;
45
46 /* calculate the frequency: */
47 hpet_period = readl(hpet_base + HPET_PERIOD);
48
49 /*
50 * hpet period is in femto seconds per cycle
51 * so we need to convert this to ns/cyc units
52 * aproximated by mult/2^shift
53 *
54 * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
55 * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
56 * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
57 * (fsec/cyc << shift)/1000000 = mult
58 * (hpet_period << shift)/FSEC_PER_NSEC = mult
59 */
60 tmp = (u64)hpet_period << HPET_SHIFT;
61 do_div(tmp, FSEC_PER_NSEC);
62 clocksource_hpet.mult = (u32)tmp;
63
64 return clocksource_register(&clocksource_hpet);
65}
66
67module_init(init_hpet_clocksource);
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c
new file mode 100644
index 0000000000..477b24daff
--- /dev/null
+++ b/arch/i386/kernel/i8253.c
@@ -0,0 +1,118 @@
1/*
2 * i8253.c 8253/PIT functions
3 *
4 */
5#include <linux/clocksource.h>
6#include <linux/spinlock.h>
7#include <linux/jiffies.h>
8#include <linux/sysdev.h>
9#include <linux/module.h>
10#include <linux/init.h>
11
12#include <asm/smp.h>
13#include <asm/delay.h>
14#include <asm/i8253.h>
15#include <asm/io.h>
16
17#include "io_ports.h"
18
19DEFINE_SPINLOCK(i8253_lock);
20EXPORT_SYMBOL(i8253_lock);
21
22void setup_pit_timer(void)
23{
24 unsigned long flags;
25
26 spin_lock_irqsave(&i8253_lock, flags);
27 outb_p(0x34,PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */
28 udelay(10);
29 outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
30 udelay(10);
31 outb(LATCH >> 8 , PIT_CH0); /* MSB */
32 spin_unlock_irqrestore(&i8253_lock, flags);
33}
34
35/*
36 * Since the PIT overflows every tick, its not very useful
37 * to just read by itself. So use jiffies to emulate a free
38 * running counter:
39 */
40static cycle_t pit_read(void)
41{
42 unsigned long flags;
43 int count;
44 u32 jifs;
45 static int old_count;
46 static u32 old_jifs;
47
48 spin_lock_irqsave(&i8253_lock, flags);
49 /*
50 * Although our caller may have the read side of xtime_lock,
51 * this is now a seqlock, and we are cheating in this routine
52 * by having side effects on state that we cannot undo if
53 * there is a collision on the seqlock and our caller has to
54 * retry. (Namely, old_jifs and old_count.) So we must treat
55 * jiffies as volatile despite the lock. We read jiffies
56 * before latching the timer count to guarantee that although
57 * the jiffies value might be older than the count (that is,
58 * the counter may underflow between the last point where
59 * jiffies was incremented and the point where we latch the
60 * count), it cannot be newer.
61 */
62 jifs = jiffies;
63 outb_p(0x00, PIT_MODE); /* latch the count ASAP */
64 count = inb_p(PIT_CH0); /* read the latched count */
65 count |= inb_p(PIT_CH0) << 8;
66
67 /* VIA686a test code... reset the latch if count > max + 1 */
68 if (count > LATCH) {
69 outb_p(0x34, PIT_MODE);
70 outb_p(LATCH & 0xff, PIT_CH0);
71 outb(LATCH >> 8, PIT_CH0);
72 count = LATCH - 1;
73 }
74
75 /*
76 * It's possible for count to appear to go the wrong way for a
77 * couple of reasons:
78 *
79 * 1. The timer counter underflows, but we haven't handled the
80 * resulting interrupt and incremented jiffies yet.
81 * 2. Hardware problem with the timer, not giving us continuous time,
82 * the counter does small "jumps" upwards on some Pentium systems,
83 * (see c't 95/10 page 335 for Neptun bug.)
84 *
85 * Previous attempts to handle these cases intelligently were
86 * buggy, so we just do the simple thing now.
87 */
88 if (count > old_count && jifs == old_jifs) {
89 count = old_count;
90 }
91 old_count = count;
92 old_jifs = jifs;
93
94 spin_unlock_irqrestore(&i8253_lock, flags);
95
96 count = (LATCH - 1) - count;
97
98 return (cycle_t)(jifs * LATCH) + count;
99}
100
101static struct clocksource clocksource_pit = {
102 .name = "pit",
103 .rating = 110,
104 .read = pit_read,
105 .mask = CLOCKSOURCE_MASK(32),
106 .mult = 0,
107 .shift = 20,
108};
109
110static int __init init_pit_clocksource(void)
111{
112 if (num_possible_cpus() > 4) /* PIT does not scale! */
113 return 0;
114
115 clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20);
116 return clocksource_register(&clocksource_pit);
117}
118module_init(init_pit_clocksource);
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index b7636b96e1..c1a42feba2 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -175,7 +175,7 @@ static void mask_and_ack_8259A(unsigned int irq)
175 * Lightweight spurious IRQ detection. We do not want 175 * Lightweight spurious IRQ detection. We do not want
176 * to overdo spurious IRQ handling - it's usually a sign 176 * to overdo spurious IRQ handling - it's usually a sign
177 * of hardware problems, so we only do the checks we can 177 * of hardware problems, so we only do the checks we can
178 * do without slowing down good hardware unnecesserily. 178 * do without slowing down good hardware unnecessarily.
179 * 179 *
180 * Note that IRQ7 and IRQ15 (the two spurious IRQs 180 * Note that IRQ7 and IRQ15 (the two spurious IRQs
181 * usually resulting from the 8259A-1|2 PICs) occur 181 * usually resulting from the 8259A-1|2 PICs) occur
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index a62df3e764..72ae414e4d 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -38,6 +38,7 @@
38#include <asm/desc.h> 38#include <asm/desc.h>
39#include <asm/timer.h> 39#include <asm/timer.h>
40#include <asm/i8259.h> 40#include <asm/i8259.h>
41#include <asm/nmi.h>
41 42
42#include <mach_apic.h> 43#include <mach_apic.h>
43 44
@@ -50,6 +51,7 @@ atomic_t irq_mis_count;
50static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 51static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
51 52
52static DEFINE_SPINLOCK(ioapic_lock); 53static DEFINE_SPINLOCK(ioapic_lock);
54static DEFINE_SPINLOCK(vector_lock);
53 55
54int timer_over_8254 __initdata = 1; 56int timer_over_8254 __initdata = 1;
55 57
@@ -1161,10 +1163,17 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
1161int assign_irq_vector(int irq) 1163int assign_irq_vector(int irq)
1162{ 1164{
1163 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; 1165 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
1166 unsigned long flags;
1167 int vector;
1168
1169 BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
1164 1170
1165 BUG_ON(irq >= NR_IRQ_VECTORS); 1171 spin_lock_irqsave(&vector_lock, flags);
1166 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) 1172
1173 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
1174 spin_unlock_irqrestore(&vector_lock, flags);
1167 return IO_APIC_VECTOR(irq); 1175 return IO_APIC_VECTOR(irq);
1176 }
1168next: 1177next:
1169 current_vector += 8; 1178 current_vector += 8;
1170 if (current_vector == SYSCALL_VECTOR) 1179 if (current_vector == SYSCALL_VECTOR)
@@ -1172,16 +1181,21 @@ next:
1172 1181
1173 if (current_vector >= FIRST_SYSTEM_VECTOR) { 1182 if (current_vector >= FIRST_SYSTEM_VECTOR) {
1174 offset++; 1183 offset++;
1175 if (!(offset%8)) 1184 if (!(offset%8)) {
1185 spin_unlock_irqrestore(&vector_lock, flags);
1176 return -ENOSPC; 1186 return -ENOSPC;
1187 }
1177 current_vector = FIRST_DEVICE_VECTOR + offset; 1188 current_vector = FIRST_DEVICE_VECTOR + offset;
1178 } 1189 }
1179 1190
1180 vector_irq[current_vector] = irq; 1191 vector = current_vector;
1192 vector_irq[vector] = irq;
1181 if (irq != AUTO_ASSIGN) 1193 if (irq != AUTO_ASSIGN)
1182 IO_APIC_VECTOR(irq) = current_vector; 1194 IO_APIC_VECTOR(irq) = vector;
1183 1195
1184 return current_vector; 1196 spin_unlock_irqrestore(&vector_lock, flags);
1197
1198 return vector;
1185} 1199}
1186 1200
1187static struct hw_interrupt_type ioapic_level_type; 1201static struct hw_interrupt_type ioapic_level_type;
@@ -1193,21 +1207,14 @@ static struct hw_interrupt_type ioapic_edge_type;
1193 1207
1194static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger) 1208static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
1195{ 1209{
1196 if (use_pci_vector() && !platform_legacy_irq(irq)) { 1210 unsigned idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
1197 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1211
1198 trigger == IOAPIC_LEVEL) 1212 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1199 irq_desc[vector].handler = &ioapic_level_type; 1213 trigger == IOAPIC_LEVEL)
1200 else 1214 irq_desc[idx].handler = &ioapic_level_type;
1201 irq_desc[vector].handler = &ioapic_edge_type; 1215 else
1202 set_intr_gate(vector, interrupt[vector]); 1216 irq_desc[idx].handler = &ioapic_edge_type;
1203 } else { 1217 set_intr_gate(vector, interrupt[idx]);
1204 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1205 trigger == IOAPIC_LEVEL)
1206 irq_desc[irq].handler = &ioapic_level_type;
1207 else
1208 irq_desc[irq].handler = &ioapic_edge_type;
1209 set_intr_gate(vector, interrupt[irq]);
1210 }
1211} 1218}
1212 1219
1213static void __init setup_IO_APIC_irqs(void) 1220static void __init setup_IO_APIC_irqs(void)
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 49ce4c31b7..c703bc7b08 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -53,8 +53,8 @@ static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
53 */ 53 */
54fastcall unsigned int do_IRQ(struct pt_regs *regs) 54fastcall unsigned int do_IRQ(struct pt_regs *regs)
55{ 55{
56 /* high bits used in ret_from_ code */ 56 /* high bit used in ret_from_ code */
57 int irq = regs->orig_eax & 0xff; 57 int irq = ~regs->orig_eax;
58#ifdef CONFIG_4KSTACKS 58#ifdef CONFIG_4KSTACKS
59 union irq_ctx *curctx, *irqctx; 59 union irq_ctx *curctx, *irqctx;
60 u32 *isp; 60 u32 *isp;
@@ -100,8 +100,8 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
100 * softirq checks work in the hardirq context. 100 * softirq checks work in the hardirq context.
101 */ 101 */
102 irqctx->tinfo.preempt_count = 102 irqctx->tinfo.preempt_count =
103 irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK | 103 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
104 curctx->tinfo.preempt_count & SOFTIRQ_MASK; 104 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
105 105
106 asm volatile( 106 asm volatile(
107 " xchgl %%ebx,%%esp \n" 107 " xchgl %%ebx,%%esp \n"
@@ -227,7 +227,7 @@ int show_interrupts(struct seq_file *p, void *v)
227 if (i == 0) { 227 if (i == 0) {
228 seq_printf(p, " "); 228 seq_printf(p, " ");
229 for_each_online_cpu(j) 229 for_each_online_cpu(j)
230 seq_printf(p, "CPU%d ",j); 230 seq_printf(p, "CPU%-8d",j);
231 seq_putc(p, '\n'); 231 seq_putc(p, '\n');
232 } 232 }
233 233
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 395a9a6dff..727e419ad7 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -57,34 +57,85 @@ static __always_inline void set_jmp_op(void *from, void *to)
57/* 57/*
58 * returns non-zero if opcodes can be boosted. 58 * returns non-zero if opcodes can be boosted.
59 */ 59 */
60static __always_inline int can_boost(kprobe_opcode_t opcode) 60static __always_inline int can_boost(kprobe_opcode_t *opcodes)
61{ 61{
62 switch (opcode & 0xf0 ) { 62#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \
63 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
64 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
65 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
66 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
67 << (row % 32))
68 /*
69 * Undefined/reserved opcodes, conditional jump, Opcode Extension
70 * Groups, and some special opcodes can not be boost.
71 */
72 static const unsigned long twobyte_is_boostable[256 / 32] = {
73 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
74 /* ------------------------------- */
75 W(0x00, 0,0,1,1,0,0,1,0,1,1,0,0,0,0,0,0)| /* 00 */
76 W(0x10, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 10 */
77 W(0x20, 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0)| /* 20 */
78 W(0x30, 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 30 */
79 W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 40 */
80 W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 50 */
81 W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1)| /* 60 */
82 W(0x70, 0,0,0,0,1,1,1,1,0,0,0,0,0,0,1,1), /* 70 */
83 W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 80 */
84 W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), /* 90 */
85 W(0xa0, 1,1,0,1,1,1,0,0,1,1,0,1,1,1,0,1)| /* a0 */
86 W(0xb0, 1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1), /* b0 */
87 W(0xc0, 1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1)| /* c0 */
88 W(0xd0, 0,1,1,1,0,1,0,0,1,1,0,1,1,1,0,1), /* d0 */
89 W(0xe0, 0,1,1,0,0,1,0,0,1,1,0,1,1,1,0,1)| /* e0 */
90 W(0xf0, 0,1,1,1,0,1,0,0,1,1,1,0,1,1,1,0) /* f0 */
91 /* ------------------------------- */
92 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
93 };
94#undef W
95 kprobe_opcode_t opcode;
96 kprobe_opcode_t *orig_opcodes = opcodes;
97retry:
98 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
99 return 0;
100 opcode = *(opcodes++);
101
102 /* 2nd-byte opcode */
103 if (opcode == 0x0f) {
104 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
105 return 0;
106 return test_bit(*opcodes, twobyte_is_boostable);
107 }
108
109 switch (opcode & 0xf0) {
110 case 0x60:
111 if (0x63 < opcode && opcode < 0x67)
112 goto retry; /* prefixes */
113 /* can't boost Address-size override and bound */
114 return (opcode != 0x62 && opcode != 0x67);
63 case 0x70: 115 case 0x70:
64 return 0; /* can't boost conditional jump */ 116 return 0; /* can't boost conditional jump */
65 case 0x90:
66 /* can't boost call and pushf */
67 return opcode != 0x9a && opcode != 0x9c;
68 case 0xc0: 117 case 0xc0:
69 /* can't boost undefined opcodes and soft-interruptions */ 118 /* can't boost software-interruptions */
70 return (0xc1 < opcode && opcode < 0xc6) || 119 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
71 (0xc7 < opcode && opcode < 0xcc) || opcode == 0xcf;
72 case 0xd0: 120 case 0xd0:
73 /* can boost AA* and XLAT */ 121 /* can boost AA* and XLAT */
74 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); 122 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
75 case 0xe0: 123 case 0xe0:
76 /* can boost in/out and (may be) jmps */ 124 /* can boost in/out and absolute jmps */
77 return (0xe3 < opcode && opcode != 0xe8); 125 return ((opcode & 0x04) || opcode == 0xea);
78 case 0xf0: 126 case 0xf0:
127 if ((opcode & 0x0c) == 0 && opcode != 0xf1)
128 goto retry; /* lock/rep(ne) prefix */
79 /* clear and set flags can be boost */ 129 /* clear and set flags can be boost */
80 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); 130 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
81 default: 131 default:
82 /* currently, can't boost 2 bytes opcodes */ 132 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
83 return opcode != 0x0f; 133 goto retry; /* prefixes */
134 /* can't boost CS override and call */
135 return (opcode != 0x2e && opcode != 0x9a);
84 } 136 }
85} 137}
86 138
87
88/* 139/*
89 * returns non-zero if opcode modifies the interrupt flag. 140 * returns non-zero if opcode modifies the interrupt flag.
90 */ 141 */
@@ -109,7 +160,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
109 160
110 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 161 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
111 p->opcode = *p->addr; 162 p->opcode = *p->addr;
112 if (can_boost(p->opcode)) { 163 if (can_boost(p->addr)) {
113 p->ainsn.boostable = 0; 164 p->ainsn.boostable = 0;
114 } else { 165 } else {
115 p->ainsn.boostable = -1; 166 p->ainsn.boostable = -1;
@@ -208,7 +259,9 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
208 struct kprobe_ctlblk *kcb; 259 struct kprobe_ctlblk *kcb;
209#ifdef CONFIG_PREEMPT 260#ifdef CONFIG_PREEMPT
210 unsigned pre_preempt_count = preempt_count(); 261 unsigned pre_preempt_count = preempt_count();
211#endif /* CONFIG_PREEMPT */ 262#else
263 unsigned pre_preempt_count = 1;
264#endif
212 265
213 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); 266 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
214 267
@@ -285,22 +338,14 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
285 /* handler has already set things up, so skip ss setup */ 338 /* handler has already set things up, so skip ss setup */
286 return 1; 339 return 1;
287 340
288 if (p->ainsn.boostable == 1 && 341ss_probe:
289#ifdef CONFIG_PREEMPT 342 if (pre_preempt_count && p->ainsn.boostable == 1 && !p->post_handler){
290 !(pre_preempt_count) && /*
291 * This enables booster when the direct
292 * execution path aren't preempted.
293 */
294#endif /* CONFIG_PREEMPT */
295 !p->post_handler && !p->break_handler ) {
296 /* Boost up -- we can execute copied instructions directly */ 343 /* Boost up -- we can execute copied instructions directly */
297 reset_current_kprobe(); 344 reset_current_kprobe();
298 regs->eip = (unsigned long)p->ainsn.insn; 345 regs->eip = (unsigned long)p->ainsn.insn;
299 preempt_enable_no_resched(); 346 preempt_enable_no_resched();
300 return 1; 347 return 1;
301 } 348 }
302
303ss_probe:
304 prepare_singlestep(p, regs); 349 prepare_singlestep(p, regs);
305 kcb->kprobe_status = KPROBE_HIT_SS; 350 kcb->kprobe_status = KPROBE_HIT_SS;
306 return 1; 351 return 1;
diff --git a/arch/i386/kernel/machine_kexec.c b/arch/i386/kernel/machine_kexec.c
index f73d7374a2..511abe52a9 100644
--- a/arch/i386/kernel/machine_kexec.c
+++ b/arch/i386/kernel/machine_kexec.c
@@ -133,9 +133,9 @@ typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)(
133 unsigned long start_address, 133 unsigned long start_address,
134 unsigned int has_pae) ATTRIB_NORET; 134 unsigned int has_pae) ATTRIB_NORET;
135 135
136const extern unsigned char relocate_new_kernel[]; 136extern const unsigned char relocate_new_kernel[];
137extern void relocate_new_kernel_end(void); 137extern void relocate_new_kernel_end(void);
138const extern unsigned int relocate_new_kernel_size; 138extern const unsigned int relocate_new_kernel_size;
139 139
140/* 140/*
141 * A architecture hook called to validate the 141 * A architecture hook called to validate the
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index 7a328230e5..d022cb8fd7 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -266,7 +266,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb, unsigned long acti
266 return NOTIFY_OK; 266 return NOTIFY_OK;
267} 267}
268 268
269static struct notifier_block msr_class_cpu_notifier = 269static struct notifier_block __cpuinitdata msr_class_cpu_notifier =
270{ 270{
271 .notifier_call = msr_class_cpu_callback, 271 .notifier_call = msr_class_cpu_callback,
272}; 272};
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index d43b498ec7..a76e931465 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -14,21 +14,17 @@
14 */ 14 */
15 15
16#include <linux/config.h> 16#include <linux/config.h>
17#include <linux/mm.h>
18#include <linux/delay.h> 17#include <linux/delay.h>
19#include <linux/bootmem.h>
20#include <linux/smp_lock.h>
21#include <linux/interrupt.h> 18#include <linux/interrupt.h>
22#include <linux/mc146818rtc.h>
23#include <linux/kernel_stat.h>
24#include <linux/module.h> 19#include <linux/module.h>
25#include <linux/nmi.h> 20#include <linux/nmi.h>
26#include <linux/sysdev.h> 21#include <linux/sysdev.h>
27#include <linux/sysctl.h> 22#include <linux/sysctl.h>
23#include <linux/percpu.h>
28 24
29#include <asm/smp.h> 25#include <asm/smp.h>
30#include <asm/div64.h>
31#include <asm/nmi.h> 26#include <asm/nmi.h>
27#include <asm/intel_arch_perfmon.h>
32 28
33#include "mach_traps.h" 29#include "mach_traps.h"
34 30
@@ -100,6 +96,9 @@ int nmi_active;
100 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ 96 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
101 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) 97 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
102 98
99#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
100#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
101
103#ifdef CONFIG_SMP 102#ifdef CONFIG_SMP
104/* The performance counters used by NMI_LOCAL_APIC don't trigger when 103/* The performance counters used by NMI_LOCAL_APIC don't trigger when
105 * the CPU is idle. To make sure the NMI watchdog really ticks on all 104 * the CPU is idle. To make sure the NMI watchdog really ticks on all
@@ -212,6 +211,8 @@ static int __init setup_nmi_watchdog(char *str)
212 211
213__setup("nmi_watchdog=", setup_nmi_watchdog); 212__setup("nmi_watchdog=", setup_nmi_watchdog);
214 213
214static void disable_intel_arch_watchdog(void);
215
215static void disable_lapic_nmi_watchdog(void) 216static void disable_lapic_nmi_watchdog(void)
216{ 217{
217 if (nmi_active <= 0) 218 if (nmi_active <= 0)
@@ -221,6 +222,10 @@ static void disable_lapic_nmi_watchdog(void)
221 wrmsr(MSR_K7_EVNTSEL0, 0, 0); 222 wrmsr(MSR_K7_EVNTSEL0, 0, 0);
222 break; 223 break;
223 case X86_VENDOR_INTEL: 224 case X86_VENDOR_INTEL:
225 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
226 disable_intel_arch_watchdog();
227 break;
228 }
224 switch (boot_cpu_data.x86) { 229 switch (boot_cpu_data.x86) {
225 case 6: 230 case 6:
226 if (boot_cpu_data.x86_model > 0xd) 231 if (boot_cpu_data.x86_model > 0xd)
@@ -449,6 +454,53 @@ static int setup_p4_watchdog(void)
449 return 1; 454 return 1;
450} 455}
451 456
457static void disable_intel_arch_watchdog(void)
458{
459 unsigned ebx;
460
461 /*
462 * Check whether the Architectural PerfMon supports
463 * Unhalted Core Cycles Event or not.
464 * NOTE: Corresponding bit = 0 in ebp indicates event present.
465 */
466 ebx = cpuid_ebx(10);
467 if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
468 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
469}
470
471static int setup_intel_arch_watchdog(void)
472{
473 unsigned int evntsel;
474 unsigned ebx;
475
476 /*
477 * Check whether the Architectural PerfMon supports
478 * Unhalted Core Cycles Event or not.
479 * NOTE: Corresponding bit = 0 in ebp indicates event present.
480 */
481 ebx = cpuid_ebx(10);
482 if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
483 return 0;
484
485 nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
486
487 clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
488 clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
489
490 evntsel = ARCH_PERFMON_EVENTSEL_INT
491 | ARCH_PERFMON_EVENTSEL_OS
492 | ARCH_PERFMON_EVENTSEL_USR
493 | ARCH_PERFMON_NMI_EVENT_SEL
494 | ARCH_PERFMON_NMI_EVENT_UMASK;
495
496 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
497 write_watchdog_counter("INTEL_ARCH_PERFCTR0");
498 apic_write(APIC_LVTPC, APIC_DM_NMI);
499 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
500 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
501 return 1;
502}
503
452void setup_apic_nmi_watchdog (void) 504void setup_apic_nmi_watchdog (void)
453{ 505{
454 switch (boot_cpu_data.x86_vendor) { 506 switch (boot_cpu_data.x86_vendor) {
@@ -458,6 +510,11 @@ void setup_apic_nmi_watchdog (void)
458 setup_k7_watchdog(); 510 setup_k7_watchdog();
459 break; 511 break;
460 case X86_VENDOR_INTEL: 512 case X86_VENDOR_INTEL:
513 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
514 if (!setup_intel_arch_watchdog())
515 return;
516 break;
517 }
461 switch (boot_cpu_data.x86) { 518 switch (boot_cpu_data.x86) {
462 case 6: 519 case 6:
463 if (boot_cpu_data.x86_model > 0xd) 520 if (boot_cpu_data.x86_model > 0xd)
@@ -561,7 +618,8 @@ void nmi_watchdog_tick (struct pt_regs * regs)
561 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); 618 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
562 apic_write(APIC_LVTPC, APIC_DM_NMI); 619 apic_write(APIC_LVTPC, APIC_DM_NMI);
563 } 620 }
564 else if (nmi_perfctr_msr == MSR_P6_PERFCTR0) { 621 else if (nmi_perfctr_msr == MSR_P6_PERFCTR0 ||
622 nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
565 /* Only P6 based Pentium M need to re-unmask 623 /* Only P6 based Pentium M need to re-unmask
566 * the apic vector but it doesn't hurt 624 * the apic vector but it doesn't hurt
567 * other P6 variant */ 625 * other P6 variant */
diff --git a/arch/i386/kernel/numaq.c b/arch/i386/kernel/numaq.c
index 5f5b075f86..0caf14652b 100644
--- a/arch/i386/kernel/numaq.c
+++ b/arch/i386/kernel/numaq.c
@@ -79,10 +79,12 @@ int __init get_memcfg_numaq(void)
79 return 1; 79 return 1;
80} 80}
81 81
82static int __init numaq_dsc_disable(void) 82static int __init numaq_tsc_disable(void)
83{ 83{
84 printk(KERN_DEBUG "NUMAQ: disabling TSC\n"); 84 if (num_online_nodes() > 1) {
85 tsc_disable = 1; 85 printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
86 tsc_disable = 1;
87 }
86 return 0; 88 return 0;
87} 89}
88core_initcall(numaq_dsc_disable); 90arch_initcall(numaq_tsc_disable);
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 6259afea46..6946b06e27 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -102,7 +102,7 @@ void default_idle(void)
102 local_irq_enable(); 102 local_irq_enable();
103 103
104 if (!hlt_counter && boot_cpu_data.hlt_works_ok) { 104 if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
105 clear_thread_flag(TIF_POLLING_NRFLAG); 105 current_thread_info()->status &= ~TS_POLLING;
106 smp_mb__after_clear_bit(); 106 smp_mb__after_clear_bit();
107 while (!need_resched()) { 107 while (!need_resched()) {
108 local_irq_disable(); 108 local_irq_disable();
@@ -111,7 +111,7 @@ void default_idle(void)
111 else 111 else
112 local_irq_enable(); 112 local_irq_enable();
113 } 113 }
114 set_thread_flag(TIF_POLLING_NRFLAG); 114 current_thread_info()->status |= TS_POLLING;
115 } else { 115 } else {
116 while (!need_resched()) 116 while (!need_resched())
117 cpu_relax(); 117 cpu_relax();
@@ -174,7 +174,7 @@ void cpu_idle(void)
174{ 174{
175 int cpu = smp_processor_id(); 175 int cpu = smp_processor_id();
176 176
177 set_thread_flag(TIF_POLLING_NRFLAG); 177 current_thread_info()->status |= TS_POLLING;
178 178
179 /* endless idle loop with no priority at all */ 179 /* endless idle loop with no priority at all */
180 while (1) { 180 while (1) {
@@ -312,7 +312,7 @@ void show_regs(struct pt_regs * regs)
312 cr3 = read_cr3(); 312 cr3 = read_cr3();
313 cr4 = read_cr4_safe(); 313 cr4 = read_cr4_safe();
314 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); 314 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
315 show_trace(NULL, &regs->esp); 315 show_trace(NULL, regs, &regs->esp);
316} 316}
317 317
318/* 318/*
diff --git a/arch/i386/kernel/scx200.c b/arch/i386/kernel/scx200.c
index 321f5fd26e..9bf590cefc 100644
--- a/arch/i386/kernel/scx200.c
+++ b/arch/i386/kernel/scx200.c
@@ -9,6 +9,7 @@
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/mutex.h>
12#include <linux/pci.h> 13#include <linux/pci.h>
13 14
14#include <linux/scx200.h> 15#include <linux/scx200.h>
@@ -45,11 +46,19 @@ static struct pci_driver scx200_pci_driver = {
45 .probe = scx200_probe, 46 .probe = scx200_probe,
46}; 47};
47 48
48static DEFINE_SPINLOCK(scx200_gpio_config_lock); 49static DEFINE_MUTEX(scx200_gpio_config_lock);
49 50
50static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 51static void __devinit scx200_init_shadow(void)
51{ 52{
52 int bank; 53 int bank;
54
55 /* read the current values driven on the GPIO signals */
56 for (bank = 0; bank < 2; ++bank)
57 scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank);
58}
59
60static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
61{
53 unsigned base; 62 unsigned base;
54 63
55 if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE || 64 if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE ||
@@ -63,10 +72,7 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_
63 } 72 }
64 73
65 scx200_gpio_base = base; 74 scx200_gpio_base = base;
66 75 scx200_init_shadow();
67 /* read the current values driven on the GPIO signals */
68 for (bank = 0; bank < 2; ++bank)
69 scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank);
70 76
71 } else { 77 } else {
72 /* find the base of the Configuration Block */ 78 /* find the base of the Configuration Block */
@@ -87,12 +93,11 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_
87 return 0; 93 return 0;
88} 94}
89 95
90u32 scx200_gpio_configure(int index, u32 mask, u32 bits) 96u32 scx200_gpio_configure(unsigned index, u32 mask, u32 bits)
91{ 97{
92 u32 config, new_config; 98 u32 config, new_config;
93 unsigned long flags;
94 99
95 spin_lock_irqsave(&scx200_gpio_config_lock, flags); 100 mutex_lock(&scx200_gpio_config_lock);
96 101
97 outl(index, scx200_gpio_base + 0x20); 102 outl(index, scx200_gpio_base + 0x20);
98 config = inl(scx200_gpio_base + 0x24); 103 config = inl(scx200_gpio_base + 0x24);
@@ -100,45 +105,11 @@ u32 scx200_gpio_configure(int index, u32 mask, u32 bits)
100 new_config = (config & mask) | bits; 105 new_config = (config & mask) | bits;
101 outl(new_config, scx200_gpio_base + 0x24); 106 outl(new_config, scx200_gpio_base + 0x24);
102 107
103 spin_unlock_irqrestore(&scx200_gpio_config_lock, flags); 108 mutex_unlock(&scx200_gpio_config_lock);
104 109
105 return config; 110 return config;
106} 111}
107 112
108#if 0
109void scx200_gpio_dump(unsigned index)
110{
111 u32 config = scx200_gpio_configure(index, ~0, 0);
112 printk(KERN_DEBUG "GPIO%02u: 0x%08lx", index, (unsigned long)config);
113
114 if (config & 1)
115 printk(" OE"); /* output enabled */
116 else
117 printk(" TS"); /* tristate */
118 if (config & 2)
119 printk(" PP"); /* push pull */
120 else
121 printk(" OD"); /* open drain */
122 if (config & 4)
123 printk(" PUE"); /* pull up enabled */
124 else
125 printk(" PUD"); /* pull up disabled */
126 if (config & 8)
127 printk(" LOCKED"); /* locked */
128 if (config & 16)
129 printk(" LEVEL"); /* level input */
130 else
131 printk(" EDGE"); /* edge input */
132 if (config & 32)
133 printk(" HI"); /* trigger on rising edge */
134 else
135 printk(" LO"); /* trigger on falling edge */
136 if (config & 64)
137 printk(" DEBOUNCE"); /* debounce */
138 printk("\n");
139}
140#endif /* 0 */
141
142static int __init scx200_init(void) 113static int __init scx200_init(void)
143{ 114{
144 printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n"); 115 printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n");
@@ -159,10 +130,3 @@ EXPORT_SYMBOL(scx200_gpio_base);
159EXPORT_SYMBOL(scx200_gpio_shadow); 130EXPORT_SYMBOL(scx200_gpio_shadow);
160EXPORT_SYMBOL(scx200_gpio_configure); 131EXPORT_SYMBOL(scx200_gpio_configure);
161EXPORT_SYMBOL(scx200_cb_base); 132EXPORT_SYMBOL(scx200_cb_base);
162
163/*
164 Local variables:
165 compile-command: "make -k -C ../../.. SUBDIRS=arch/i386/kernel modules"
166 c-basic-offset: 8
167 End:
168*/
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 6bef927373..4a65040cc6 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -1575,6 +1575,7 @@ void __init setup_arch(char **cmdline_p)
1575 conswitchp = &dummy_con; 1575 conswitchp = &dummy_con;
1576#endif 1576#endif
1577#endif 1577#endif
1578 tsc_init();
1578} 1579}
1579 1580
1580static __init int add_pcspkr(void) 1581static __init int add_pcspkr(void)
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index 5c352c3a9e..43002cfb40 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -351,7 +351,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
351 goto give_sigsegv; 351 goto give_sigsegv;
352 } 352 }
353 353
354 restorer = &__kernel_sigreturn; 354 restorer = (void *)VDSO_SYM(&__kernel_sigreturn);
355 if (ka->sa.sa_flags & SA_RESTORER) 355 if (ka->sa.sa_flags & SA_RESTORER)
356 restorer = ka->sa.sa_restorer; 356 restorer = ka->sa.sa_restorer;
357 357
@@ -447,7 +447,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
447 goto give_sigsegv; 447 goto give_sigsegv;
448 448
449 /* Set up to return from userspace. */ 449 /* Set up to return from userspace. */
450 restorer = &__kernel_rt_sigreturn; 450 restorer = (void *)VDSO_SYM(&__kernel_rt_sigreturn);
451 if (ka->sa.sa_flags & SA_RESTORER) 451 if (ka->sa.sa_flags & SA_RESTORER)
452 restorer = ka->sa.sa_restorer; 452 restorer = ka->sa.sa_restorer;
453 err |= __put_user(restorer, &frame->pretcode); 453 err |= __put_user(restorer, &frame->pretcode);
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index d134e9643a..c10789d7a9 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -114,7 +114,17 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_m
114 114
115static inline int __prepare_ICR (unsigned int shortcut, int vector) 115static inline int __prepare_ICR (unsigned int shortcut, int vector)
116{ 116{
117 return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL; 117 unsigned int icr = shortcut | APIC_DEST_LOGICAL;
118
119 switch (vector) {
120 default:
121 icr |= APIC_DM_FIXED | vector;
122 break;
123 case NMI_VECTOR:
124 icr |= APIC_DM_NMI;
125 break;
126 }
127 return icr;
118} 128}
119 129
120static inline int __prepare_ICR2 (unsigned int mask) 130static inline int __prepare_ICR2 (unsigned int mask)
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index bd0ca5c9f0..89e7315e53 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -52,6 +52,7 @@
52#include <asm/tlbflush.h> 52#include <asm/tlbflush.h>
53#include <asm/desc.h> 53#include <asm/desc.h>
54#include <asm/arch_hooks.h> 54#include <asm/arch_hooks.h>
55#include <asm/nmi.h>
55 56
56#include <mach_apic.h> 57#include <mach_apic.h>
57#include <mach_wakecpu.h> 58#include <mach_wakecpu.h>
@@ -66,12 +67,6 @@ int smp_num_siblings = 1;
66EXPORT_SYMBOL(smp_num_siblings); 67EXPORT_SYMBOL(smp_num_siblings);
67#endif 68#endif
68 69
69/* Package ID of each logical CPU */
70int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
71
72/* Core ID of each logical CPU */
73int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
74
75/* Last level cache ID of each logical CPU */ 70/* Last level cache ID of each logical CPU */
76int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; 71int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
77 72
@@ -453,10 +448,12 @@ cpumask_t cpu_coregroup_map(int cpu)
453 struct cpuinfo_x86 *c = cpu_data + cpu; 448 struct cpuinfo_x86 *c = cpu_data + cpu;
454 /* 449 /*
455 * For perf, we return last level cache shared map. 450 * For perf, we return last level cache shared map.
456 * TBD: when power saving sched policy is added, we will return 451 * And for power savings, we return cpu_core_map
457 * cpu_core_map when power saving policy is enabled
458 */ 452 */
459 return c->llc_shared_map; 453 if (sched_mc_power_savings || sched_smt_power_savings)
454 return cpu_core_map[cpu];
455 else
456 return c->llc_shared_map;
460} 457}
461 458
462/* representing cpus for which sibling maps can be computed */ 459/* representing cpus for which sibling maps can be computed */
@@ -472,8 +469,8 @@ set_cpu_sibling_map(int cpu)
472 469
473 if (smp_num_siblings > 1) { 470 if (smp_num_siblings > 1) {
474 for_each_cpu_mask(i, cpu_sibling_setup_map) { 471 for_each_cpu_mask(i, cpu_sibling_setup_map) {
475 if (phys_proc_id[cpu] == phys_proc_id[i] && 472 if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
476 cpu_core_id[cpu] == cpu_core_id[i]) { 473 c[cpu].cpu_core_id == c[i].cpu_core_id) {
477 cpu_set(i, cpu_sibling_map[cpu]); 474 cpu_set(i, cpu_sibling_map[cpu]);
478 cpu_set(cpu, cpu_sibling_map[i]); 475 cpu_set(cpu, cpu_sibling_map[i]);
479 cpu_set(i, cpu_core_map[cpu]); 476 cpu_set(i, cpu_core_map[cpu]);
@@ -500,7 +497,7 @@ set_cpu_sibling_map(int cpu)
500 cpu_set(i, c[cpu].llc_shared_map); 497 cpu_set(i, c[cpu].llc_shared_map);
501 cpu_set(cpu, c[i].llc_shared_map); 498 cpu_set(cpu, c[i].llc_shared_map);
502 } 499 }
503 if (phys_proc_id[cpu] == phys_proc_id[i]) { 500 if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
504 cpu_set(i, cpu_core_map[cpu]); 501 cpu_set(i, cpu_core_map[cpu]);
505 cpu_set(cpu, cpu_core_map[i]); 502 cpu_set(cpu, cpu_core_map[i]);
506 /* 503 /*
@@ -1055,6 +1052,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
1055 struct warm_boot_cpu_info info; 1052 struct warm_boot_cpu_info info;
1056 struct work_struct task; 1053 struct work_struct task;
1057 int apicid, ret; 1054 int apicid, ret;
1055 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
1058 1056
1059 apicid = x86_cpu_to_apicid[cpu]; 1057 apicid = x86_cpu_to_apicid[cpu];
1060 if (apicid == BAD_APICID) { 1058 if (apicid == BAD_APICID) {
@@ -1062,6 +1060,18 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
1062 goto exit; 1060 goto exit;
1063 } 1061 }
1064 1062
1063 /*
1064 * the CPU isn't initialized at boot time, allocate gdt table here.
1065 * cpu_init will initialize it
1066 */
1067 if (!cpu_gdt_descr->address) {
1068 cpu_gdt_descr->address = get_zeroed_page(GFP_KERNEL);
1069 if (!cpu_gdt_descr->address)
1070 printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
1071 ret = -ENOMEM;
1072 goto exit;
1073 }
1074
1065 info.complete = &done; 1075 info.complete = &done;
1066 info.apicid = apicid; 1076 info.apicid = apicid;
1067 info.cpu = cpu; 1077 info.cpu = cpu;
@@ -1339,8 +1349,8 @@ remove_siblinginfo(int cpu)
1339 cpu_clear(cpu, cpu_sibling_map[sibling]); 1349 cpu_clear(cpu, cpu_sibling_map[sibling]);
1340 cpus_clear(cpu_sibling_map[cpu]); 1350 cpus_clear(cpu_sibling_map[cpu]);
1341 cpus_clear(cpu_core_map[cpu]); 1351 cpus_clear(cpu_core_map[cpu]);
1342 phys_proc_id[cpu] = BAD_APICID; 1352 c[cpu].phys_proc_id = 0;
1343 cpu_core_id[cpu] = BAD_APICID; 1353 c[cpu].cpu_core_id = 0;
1344 cpu_clear(cpu, cpu_sibling_setup_map); 1354 cpu_clear(cpu, cpu_sibling_setup_map);
1345} 1355}
1346 1356
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 0bada1870b..c60419dee0 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -2,6 +2,8 @@
2 * linux/arch/i386/kernel/sysenter.c 2 * linux/arch/i386/kernel/sysenter.c
3 * 3 *
4 * (C) Copyright 2002 Linus Torvalds 4 * (C) Copyright 2002 Linus Torvalds
5 * Portions based on the vdso-randomization code from exec-shield:
6 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
5 * 7 *
6 * This file contains the needed initializations to support sysenter. 8 * This file contains the needed initializations to support sysenter.
7 */ 9 */
@@ -13,12 +15,31 @@
13#include <linux/gfp.h> 15#include <linux/gfp.h>
14#include <linux/string.h> 16#include <linux/string.h>
15#include <linux/elf.h> 17#include <linux/elf.h>
18#include <linux/mm.h>
19#include <linux/module.h>
16 20
17#include <asm/cpufeature.h> 21#include <asm/cpufeature.h>
18#include <asm/msr.h> 22#include <asm/msr.h>
19#include <asm/pgtable.h> 23#include <asm/pgtable.h>
20#include <asm/unistd.h> 24#include <asm/unistd.h>
21 25
26/*
27 * Should the kernel map a VDSO page into processes and pass its
28 * address down to glibc upon exec()?
29 */
30unsigned int __read_mostly vdso_enabled = 1;
31
32EXPORT_SYMBOL_GPL(vdso_enabled);
33
34static int __init vdso_setup(char *s)
35{
36 vdso_enabled = simple_strtoul(s, NULL, 0);
37
38 return 1;
39}
40
41__setup("vdso=", vdso_setup);
42
22extern asmlinkage void sysenter_entry(void); 43extern asmlinkage void sysenter_entry(void);
23 44
24void enable_sep_cpu(void) 45void enable_sep_cpu(void)
@@ -45,23 +66,122 @@ void enable_sep_cpu(void)
45 */ 66 */
46extern const char vsyscall_int80_start, vsyscall_int80_end; 67extern const char vsyscall_int80_start, vsyscall_int80_end;
47extern const char vsyscall_sysenter_start, vsyscall_sysenter_end; 68extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
69static void *syscall_page;
48 70
49int __init sysenter_setup(void) 71int __init sysenter_setup(void)
50{ 72{
51 void *page = (void *)get_zeroed_page(GFP_ATOMIC); 73 syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
52 74
53 __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC); 75#ifdef CONFIG_COMPAT_VDSO
76 __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
77 printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
78#else
79 /*
80 * In the non-compat case the ELF coredumping code needs the fixmap:
81 */
82 __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_KERNEL_RO);
83#endif
54 84
55 if (!boot_cpu_has(X86_FEATURE_SEP)) { 85 if (!boot_cpu_has(X86_FEATURE_SEP)) {
56 memcpy(page, 86 memcpy(syscall_page,
57 &vsyscall_int80_start, 87 &vsyscall_int80_start,
58 &vsyscall_int80_end - &vsyscall_int80_start); 88 &vsyscall_int80_end - &vsyscall_int80_start);
59 return 0; 89 return 0;
60 } 90 }
61 91
62 memcpy(page, 92 memcpy(syscall_page,
63 &vsyscall_sysenter_start, 93 &vsyscall_sysenter_start,
64 &vsyscall_sysenter_end - &vsyscall_sysenter_start); 94 &vsyscall_sysenter_end - &vsyscall_sysenter_start);
65 95
66 return 0; 96 return 0;
67} 97}
98
99static struct page *syscall_nopage(struct vm_area_struct *vma,
100 unsigned long adr, int *type)
101{
102 struct page *p = virt_to_page(adr - vma->vm_start + syscall_page);
103 get_page(p);
104 return p;
105}
106
107/* Prevent VMA merging */
108static void syscall_vma_close(struct vm_area_struct *vma)
109{
110}
111
112static struct vm_operations_struct syscall_vm_ops = {
113 .close = syscall_vma_close,
114 .nopage = syscall_nopage,
115};
116
117/* Defined in vsyscall-sysenter.S */
118extern void SYSENTER_RETURN;
119
120/* Setup a VMA at program startup for the vsyscall page */
121int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
122{
123 struct vm_area_struct *vma;
124 struct mm_struct *mm = current->mm;
125 unsigned long addr;
126 int ret;
127
128 down_write(&mm->mmap_sem);
129 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
130 if (IS_ERR_VALUE(addr)) {
131 ret = addr;
132 goto up_fail;
133 }
134
135 vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
136 if (!vma) {
137 ret = -ENOMEM;
138 goto up_fail;
139 }
140
141 vma->vm_start = addr;
142 vma->vm_end = addr + PAGE_SIZE;
143 /* MAYWRITE to allow gdb to COW and set breakpoints */
144 vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
145 vma->vm_flags |= mm->def_flags;
146 vma->vm_page_prot = protection_map[vma->vm_flags & 7];
147 vma->vm_ops = &syscall_vm_ops;
148 vma->vm_mm = mm;
149
150 ret = insert_vm_struct(mm, vma);
151 if (ret)
152 goto free_vma;
153
154 current->mm->context.vdso = (void *)addr;
155 current_thread_info()->sysenter_return =
156 (void *)VDSO_SYM(&SYSENTER_RETURN);
157 mm->total_vm++;
158up_fail:
159 up_write(&mm->mmap_sem);
160 return ret;
161
162free_vma:
163 kmem_cache_free(vm_area_cachep, vma);
164 return ret;
165}
166
167const char *arch_vma_name(struct vm_area_struct *vma)
168{
169 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
170 return "[vdso]";
171 return NULL;
172}
173
174struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
175{
176 return NULL;
177}
178
179int in_gate_area(struct task_struct *task, unsigned long addr)
180{
181 return 0;
182}
183
184int in_gate_area_no_task(unsigned long addr)
185{
186 return 0;
187}
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 9d30747598..5f43d04101 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -82,13 +82,6 @@ extern unsigned long wall_jiffies;
82DEFINE_SPINLOCK(rtc_lock); 82DEFINE_SPINLOCK(rtc_lock);
83EXPORT_SYMBOL(rtc_lock); 83EXPORT_SYMBOL(rtc_lock);
84 84
85#include <asm/i8253.h>
86
87DEFINE_SPINLOCK(i8253_lock);
88EXPORT_SYMBOL(i8253_lock);
89
90struct timer_opts *cur_timer __read_mostly = &timer_none;
91
92/* 85/*
93 * This is a special lock that is owned by the CPU and holds the index 86 * This is a special lock that is owned by the CPU and holds the index
94 * register we are working with. It is required for NMI access to the 87 * register we are working with. It is required for NMI access to the
@@ -118,99 +111,19 @@ void rtc_cmos_write(unsigned char val, unsigned char addr)
118} 111}
119EXPORT_SYMBOL(rtc_cmos_write); 112EXPORT_SYMBOL(rtc_cmos_write);
120 113
121/*
122 * This version of gettimeofday has microsecond resolution
123 * and better than microsecond precision on fast x86 machines with TSC.
124 */
125void do_gettimeofday(struct timeval *tv)
126{
127 unsigned long seq;
128 unsigned long usec, sec;
129 unsigned long max_ntp_tick;
130
131 do {
132 unsigned long lost;
133
134 seq = read_seqbegin(&xtime_lock);
135
136 usec = cur_timer->get_offset();
137 lost = jiffies - wall_jiffies;
138
139 /*
140 * If time_adjust is negative then NTP is slowing the clock
141 * so make sure not to go into next possible interval.
142 * Better to lose some accuracy than have time go backwards..
143 */
144 if (unlikely(time_adjust < 0)) {
145 max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
146 usec = min(usec, max_ntp_tick);
147
148 if (lost)
149 usec += lost * max_ntp_tick;
150 }
151 else if (unlikely(lost))
152 usec += lost * (USEC_PER_SEC / HZ);
153
154 sec = xtime.tv_sec;
155 usec += (xtime.tv_nsec / 1000);
156 } while (read_seqretry(&xtime_lock, seq));
157
158 while (usec >= 1000000) {
159 usec -= 1000000;
160 sec++;
161 }
162
163 tv->tv_sec = sec;
164 tv->tv_usec = usec;
165}
166
167EXPORT_SYMBOL(do_gettimeofday);
168
169int do_settimeofday(struct timespec *tv)
170{
171 time_t wtm_sec, sec = tv->tv_sec;
172 long wtm_nsec, nsec = tv->tv_nsec;
173
174 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
175 return -EINVAL;
176
177 write_seqlock_irq(&xtime_lock);
178 /*
179 * This is revolting. We need to set "xtime" correctly. However, the
180 * value in this location is the value at the most recent update of
181 * wall time. Discover what correction gettimeofday() would have
182 * made, and then undo it!
183 */
184 nsec -= cur_timer->get_offset() * NSEC_PER_USEC;
185 nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
186
187 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
188 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
189
190 set_normalized_timespec(&xtime, sec, nsec);
191 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
192
193 ntp_clear();
194 write_sequnlock_irq(&xtime_lock);
195 clock_was_set();
196 return 0;
197}
198
199EXPORT_SYMBOL(do_settimeofday);
200
201static int set_rtc_mmss(unsigned long nowtime) 114static int set_rtc_mmss(unsigned long nowtime)
202{ 115{
203 int retval; 116 int retval;
204 117 unsigned long flags;
205 WARN_ON(irqs_disabled());
206 118
207 /* gets recalled with irq locally disabled */ 119 /* gets recalled with irq locally disabled */
208 spin_lock_irq(&rtc_lock); 120 /* XXX - does irqsave resolve this? -johnstul */
121 spin_lock_irqsave(&rtc_lock, flags);
209 if (efi_enabled) 122 if (efi_enabled)
210 retval = efi_set_rtc_mmss(nowtime); 123 retval = efi_set_rtc_mmss(nowtime);
211 else 124 else
212 retval = mach_set_rtc_mmss(nowtime); 125 retval = mach_set_rtc_mmss(nowtime);
213 spin_unlock_irq(&rtc_lock); 126 spin_unlock_irqrestore(&rtc_lock, flags);
214 127
215 return retval; 128 return retval;
216} 129}
@@ -218,16 +131,6 @@ static int set_rtc_mmss(unsigned long nowtime)
218 131
219int timer_ack; 132int timer_ack;
220 133
221/* monotonic_clock(): returns # of nanoseconds passed since time_init()
222 * Note: This function is required to return accurate
223 * time even in the absence of multiple timer ticks.
224 */
225unsigned long long monotonic_clock(void)
226{
227 return cur_timer->monotonic_clock();
228}
229EXPORT_SYMBOL(monotonic_clock);
230
231#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER) 134#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
232unsigned long profile_pc(struct pt_regs *regs) 135unsigned long profile_pc(struct pt_regs *regs)
233{ 136{
@@ -242,11 +145,21 @@ EXPORT_SYMBOL(profile_pc);
242#endif 145#endif
243 146
244/* 147/*
245 * timer_interrupt() needs to keep up the real-time clock, 148 * This is the same as the above, except we _also_ save the current
246 * as well as call the "do_timer()" routine every clocktick 149 * Time Stamp Counter value at the time of the timer interrupt, so that
150 * we later on can estimate the time of day more exactly.
247 */ 151 */
248static inline void do_timer_interrupt(int irq, struct pt_regs *regs) 152irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
249{ 153{
154 /*
155 * Here we are in the timer irq handler. We just have irqs locally
156 * disabled but we don't know if the timer_bh is running on the other
157 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
158 * the irq version of write_lock because as just said we have irq
159 * locally disabled. -arca
160 */
161 write_seqlock(&xtime_lock);
162
250#ifdef CONFIG_X86_IO_APIC 163#ifdef CONFIG_X86_IO_APIC
251 if (timer_ack) { 164 if (timer_ack) {
252 /* 165 /*
@@ -279,27 +192,6 @@ static inline void do_timer_interrupt(int irq, struct pt_regs *regs)
279 irq = inb_p( 0x61 ); /* read the current state */ 192 irq = inb_p( 0x61 ); /* read the current state */
280 outb_p( irq|0x80, 0x61 ); /* reset the IRQ */ 193 outb_p( irq|0x80, 0x61 ); /* reset the IRQ */
281 } 194 }
282}
283
284/*
285 * This is the same as the above, except we _also_ save the current
286 * Time Stamp Counter value at the time of the timer interrupt, so that
287 * we later on can estimate the time of day more exactly.
288 */
289irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
290{
291 /*
292 * Here we are in the timer irq handler. We just have irqs locally
293 * disabled but we don't know if the timer_bh is running on the other
294 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
295 * the irq version of write_lock because as just said we have irq
296 * locally disabled. -arca
297 */
298 write_seqlock(&xtime_lock);
299
300 cur_timer->mark_offset();
301
302 do_timer_interrupt(irq, regs);
303 195
304 write_sequnlock(&xtime_lock); 196 write_sequnlock(&xtime_lock);
305 197
@@ -380,7 +272,6 @@ void notify_arch_cmos_timer(void)
380 272
381static long clock_cmos_diff, sleep_start; 273static long clock_cmos_diff, sleep_start;
382 274
383static struct timer_opts *last_timer;
384static int timer_suspend(struct sys_device *dev, pm_message_t state) 275static int timer_suspend(struct sys_device *dev, pm_message_t state)
385{ 276{
386 /* 277 /*
@@ -389,10 +280,6 @@ static int timer_suspend(struct sys_device *dev, pm_message_t state)
389 clock_cmos_diff = -get_cmos_time(); 280 clock_cmos_diff = -get_cmos_time();
390 clock_cmos_diff += get_seconds(); 281 clock_cmos_diff += get_seconds();
391 sleep_start = get_cmos_time(); 282 sleep_start = get_cmos_time();
392 last_timer = cur_timer;
393 cur_timer = &timer_none;
394 if (last_timer->suspend)
395 last_timer->suspend(state);
396 return 0; 283 return 0;
397} 284}
398 285
@@ -415,10 +302,6 @@ static int timer_resume(struct sys_device *dev)
415 jiffies_64 += sleep_length; 302 jiffies_64 += sleep_length;
416 wall_jiffies += sleep_length; 303 wall_jiffies += sleep_length;
417 write_sequnlock_irqrestore(&xtime_lock, flags); 304 write_sequnlock_irqrestore(&xtime_lock, flags);
418 if (last_timer->resume)
419 last_timer->resume();
420 cur_timer = last_timer;
421 last_timer = NULL;
422 touch_softlockup_watchdog(); 305 touch_softlockup_watchdog();
423 return 0; 306 return 0;
424} 307}
@@ -460,9 +343,6 @@ static void __init hpet_time_init(void)
460 printk("Using HPET for base-timer\n"); 343 printk("Using HPET for base-timer\n");
461 } 344 }
462 345
463 cur_timer = select_timer();
464 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
465
466 time_init_hook(); 346 time_init_hook();
467} 347}
468#endif 348#endif
@@ -484,8 +364,5 @@ void __init time_init(void)
484 set_normalized_timespec(&wall_to_monotonic, 364 set_normalized_timespec(&wall_to_monotonic,
485 -xtime.tv_sec, -xtime.tv_nsec); 365 -xtime.tv_sec, -xtime.tv_nsec);
486 366
487 cur_timer = select_timer();
488 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
489
490 time_init_hook(); 367 time_init_hook();
491} 368}
diff --git a/arch/i386/kernel/timers/Makefile b/arch/i386/kernel/timers/Makefile
deleted file mode 100644
index 8fa12be658..0000000000
--- a/arch/i386/kernel/timers/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1#
2# Makefile for x86 timers
3#
4
5obj-y := timer.o timer_none.o timer_tsc.o timer_pit.o common.o
6
7obj-$(CONFIG_X86_CYCLONE_TIMER) += timer_cyclone.o
8obj-$(CONFIG_HPET_TIMER) += timer_hpet.o
9obj-$(CONFIG_X86_PM_TIMER) += timer_pm.o
diff --git a/arch/i386/kernel/timers/common.c b/arch/i386/kernel/timers/common.c
deleted file mode 100644
index 8163fe0cf1..0000000000
--- a/arch/i386/kernel/timers/common.c
+++ /dev/null
@@ -1,172 +0,0 @@
1/*
2 * Common functions used across the timers go here
3 */
4
5#include <linux/init.h>
6#include <linux/timex.h>
7#include <linux/errno.h>
8#include <linux/jiffies.h>
9#include <linux/module.h>
10
11#include <asm/io.h>
12#include <asm/timer.h>
13#include <asm/hpet.h>
14
15#include "mach_timer.h"
16
17/* ------ Calibrate the TSC -------
18 * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
19 * Too much 64-bit arithmetic here to do this cleanly in C, and for
20 * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
21 * output busy loop as low as possible. We avoid reading the CTC registers
22 * directly because of the awkward 8-bit access mechanism of the 82C54
23 * device.
24 */
25
26#define CALIBRATE_TIME (5 * 1000020/HZ)
27
28unsigned long calibrate_tsc(void)
29{
30 mach_prepare_counter();
31
32 {
33 unsigned long startlow, starthigh;
34 unsigned long endlow, endhigh;
35 unsigned long count;
36
37 rdtsc(startlow,starthigh);
38 mach_countup(&count);
39 rdtsc(endlow,endhigh);
40
41
42 /* Error: ECTCNEVERSET */
43 if (count <= 1)
44 goto bad_ctc;
45
46 /* 64-bit subtract - gcc just messes up with long longs */
47 __asm__("subl %2,%0\n\t"
48 "sbbl %3,%1"
49 :"=a" (endlow), "=d" (endhigh)
50 :"g" (startlow), "g" (starthigh),
51 "0" (endlow), "1" (endhigh));
52
53 /* Error: ECPUTOOFAST */
54 if (endhigh)
55 goto bad_ctc;
56
57 /* Error: ECPUTOOSLOW */
58 if (endlow <= CALIBRATE_TIME)
59 goto bad_ctc;
60
61 __asm__("divl %2"
62 :"=a" (endlow), "=d" (endhigh)
63 :"r" (endlow), "0" (0), "1" (CALIBRATE_TIME));
64
65 return endlow;
66 }
67
68 /*
69 * The CTC wasn't reliable: we got a hit on the very first read,
70 * or the CPU was so fast/slow that the quotient wouldn't fit in
71 * 32 bits..
72 */
73bad_ctc:
74 return 0;
75}
76
77#ifdef CONFIG_HPET_TIMER
78/* ------ Calibrate the TSC using HPET -------
79 * Return 2^32 * (1 / (TSC clocks per usec)) for getting the CPU freq.
80 * Second output is parameter 1 (when non NULL)
81 * Set 2^32 * (1 / (tsc per HPET clk)) for delay_hpet().
82 * calibrate_tsc() calibrates the processor TSC by comparing
83 * it to the HPET timer of known frequency.
84 * Too much 64-bit arithmetic here to do this cleanly in C
85 */
86#define CALIBRATE_CNT_HPET (5 * hpet_tick)
87#define CALIBRATE_TIME_HPET (5 * KERNEL_TICK_USEC)
88
89unsigned long __devinit calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr)
90{
91 unsigned long tsc_startlow, tsc_starthigh;
92 unsigned long tsc_endlow, tsc_endhigh;
93 unsigned long hpet_start, hpet_end;
94 unsigned long result, remain;
95
96 hpet_start = hpet_readl(HPET_COUNTER);
97 rdtsc(tsc_startlow, tsc_starthigh);
98 do {
99 hpet_end = hpet_readl(HPET_COUNTER);
100 } while ((hpet_end - hpet_start) < CALIBRATE_CNT_HPET);
101 rdtsc(tsc_endlow, tsc_endhigh);
102
103 /* 64-bit subtract - gcc just messes up with long longs */
104 __asm__("subl %2,%0\n\t"
105 "sbbl %3,%1"
106 :"=a" (tsc_endlow), "=d" (tsc_endhigh)
107 :"g" (tsc_startlow), "g" (tsc_starthigh),
108 "0" (tsc_endlow), "1" (tsc_endhigh));
109
110 /* Error: ECPUTOOFAST */
111 if (tsc_endhigh)
112 goto bad_calibration;
113
114 /* Error: ECPUTOOSLOW */
115 if (tsc_endlow <= CALIBRATE_TIME_HPET)
116 goto bad_calibration;
117
118 ASM_DIV64_REG(result, remain, tsc_endlow, 0, CALIBRATE_TIME_HPET);
119 if (remain > (tsc_endlow >> 1))
120 result++; /* rounding the result */
121
122 if (tsc_hpet_quotient_ptr) {
123 unsigned long tsc_hpet_quotient;
124
125 ASM_DIV64_REG(tsc_hpet_quotient, remain, tsc_endlow, 0,
126 CALIBRATE_CNT_HPET);
127 if (remain > (tsc_endlow >> 1))
128 tsc_hpet_quotient++; /* rounding the result */
129 *tsc_hpet_quotient_ptr = tsc_hpet_quotient;
130 }
131
132 return result;
133bad_calibration:
134 /*
135 * the CPU was so fast/slow that the quotient wouldn't fit in
136 * 32 bits..
137 */
138 return 0;
139}
140#endif
141
142
143unsigned long read_timer_tsc(void)
144{
145 unsigned long retval;
146 rdtscl(retval);
147 return retval;
148}
149
150
151/* calculate cpu_khz */
152void init_cpu_khz(void)
153{
154 if (cpu_has_tsc) {
155 unsigned long tsc_quotient = calibrate_tsc();
156 if (tsc_quotient) {
157 /* report CPU clock rate in Hz.
158 * The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =
159 * clock/second. Our precision is about 100 ppm.
160 */
161 { unsigned long eax=0, edx=1000;
162 __asm__("divl %2"
163 :"=a" (cpu_khz), "=d" (edx)
164 :"r" (tsc_quotient),
165 "0" (eax), "1" (edx));
166 printk("Detected %u.%03u MHz processor.\n",
167 cpu_khz / 1000, cpu_khz % 1000);
168 }
169 }
170 }
171}
172
diff --git a/arch/i386/kernel/timers/timer.c b/arch/i386/kernel/timers/timer.c
deleted file mode 100644
index 7e39ed8e33..0000000000
--- a/arch/i386/kernel/timers/timer.c
+++ /dev/null
@@ -1,75 +0,0 @@
1#include <linux/init.h>
2#include <linux/kernel.h>
3#include <linux/string.h>
4#include <asm/timer.h>
5
6#ifdef CONFIG_HPET_TIMER
7/*
8 * HPET memory read is slower than tsc reads, but is more dependable as it
9 * always runs at constant frequency and reduces complexity due to
10 * cpufreq. So, we prefer HPET timer to tsc based one. Also, we cannot use
11 * timer_pit when HPET is active. So, we default to timer_tsc.
12 */
13#endif
14/* list of timers, ordered by preference, NULL terminated */
15static struct init_timer_opts* __initdata timers[] = {
16#ifdef CONFIG_X86_CYCLONE_TIMER
17 &timer_cyclone_init,
18#endif
19#ifdef CONFIG_HPET_TIMER
20 &timer_hpet_init,
21#endif
22#ifdef CONFIG_X86_PM_TIMER
23 &timer_pmtmr_init,
24#endif
25 &timer_tsc_init,
26 &timer_pit_init,
27 NULL,
28};
29
30static char clock_override[10] __initdata;
31
32static int __init clock_setup(char* str)
33{
34 if (str)
35 strlcpy(clock_override, str, sizeof(clock_override));
36 return 1;
37}
38__setup("clock=", clock_setup);
39
40
41/* The chosen timesource has been found to be bad.
42 * Fall back to a known good timesource (the PIT)
43 */
44void clock_fallback(void)
45{
46 cur_timer = &timer_pit;
47}
48
49/* iterates through the list of timers, returning the first
50 * one that initializes successfully.
51 */
52struct timer_opts* __init select_timer(void)
53{
54 int i = 0;
55
56 /* find most preferred working timer */
57 while (timers[i]) {
58 if (timers[i]->init)
59 if (timers[i]->init(clock_override) == 0)
60 return timers[i]->opts;
61 ++i;
62 }
63
64 panic("select_timer: Cannot find a suitable timer\n");
65 return NULL;
66}
67
68int read_current_timer(unsigned long *timer_val)
69{
70 if (cur_timer->read_timer) {
71 *timer_val = cur_timer->read_timer();
72 return 0;
73 }
74 return -1;
75}
diff --git a/arch/i386/kernel/timers/timer_cyclone.c b/arch/i386/kernel/timers/timer_cyclone.c
deleted file mode 100644
index 13892a65c9..0000000000
--- a/arch/i386/kernel/timers/timer_cyclone.c
+++ /dev/null
@@ -1,259 +0,0 @@
1/* Cyclone-timer:
2 * This code implements timer_ops for the cyclone counter found
3 * on IBM x440, x360, and other Summit based systems.
4 *
5 * Copyright (C) 2002 IBM, John Stultz (johnstul@us.ibm.com)
6 */
7
8
9#include <linux/spinlock.h>
10#include <linux/init.h>
11#include <linux/timex.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/jiffies.h>
15
16#include <asm/timer.h>
17#include <asm/io.h>
18#include <asm/pgtable.h>
19#include <asm/fixmap.h>
20#include <asm/i8253.h>
21
22#include "io_ports.h"
23
24/* Number of usecs that the last interrupt was delayed */
25static int delay_at_last_interrupt;
26
27#define CYCLONE_CBAR_ADDR 0xFEB00CD0
28#define CYCLONE_PMCC_OFFSET 0x51A0
29#define CYCLONE_MPMC_OFFSET 0x51D0
30#define CYCLONE_MPCS_OFFSET 0x51A8
31#define CYCLONE_TIMER_FREQ 100000000
32#define CYCLONE_TIMER_MASK (((u64)1<<40)-1) /* 40 bit mask */
33int use_cyclone = 0;
34
35static u32* volatile cyclone_timer; /* Cyclone MPMC0 register */
36static u32 last_cyclone_low;
37static u32 last_cyclone_high;
38static unsigned long long monotonic_base;
39static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
40
41/* helper macro to atomically read both cyclone counter registers */
42#define read_cyclone_counter(low,high) \
43 do{ \
44 high = cyclone_timer[1]; low = cyclone_timer[0]; \
45 } while (high != cyclone_timer[1]);
46
47
48static void mark_offset_cyclone(void)
49{
50 unsigned long lost, delay;
51 unsigned long delta = last_cyclone_low;
52 int count;
53 unsigned long long this_offset, last_offset;
54
55 write_seqlock(&monotonic_lock);
56 last_offset = ((unsigned long long)last_cyclone_high<<32)|last_cyclone_low;
57
58 spin_lock(&i8253_lock);
59 read_cyclone_counter(last_cyclone_low,last_cyclone_high);
60
61 /* read values for delay_at_last_interrupt */
62 outb_p(0x00, 0x43); /* latch the count ASAP */
63
64 count = inb_p(0x40); /* read the latched count */
65 count |= inb(0x40) << 8;
66
67 /*
68 * VIA686a test code... reset the latch if count > max + 1
69 * from timer_pit.c - cjb
70 */
71 if (count > LATCH) {
72 outb_p(0x34, PIT_MODE);
73 outb_p(LATCH & 0xff, PIT_CH0);
74 outb(LATCH >> 8, PIT_CH0);
75 count = LATCH - 1;
76 }
77 spin_unlock(&i8253_lock);
78
79 /* lost tick compensation */
80 delta = last_cyclone_low - delta;
81 delta /= (CYCLONE_TIMER_FREQ/1000000);
82 delta += delay_at_last_interrupt;
83 lost = delta/(1000000/HZ);
84 delay = delta%(1000000/HZ);
85 if (lost >= 2)
86 jiffies_64 += lost-1;
87
88 /* update the monotonic base value */
89 this_offset = ((unsigned long long)last_cyclone_high<<32)|last_cyclone_low;
90 monotonic_base += (this_offset - last_offset) & CYCLONE_TIMER_MASK;
91 write_sequnlock(&monotonic_lock);
92
93 /* calculate delay_at_last_interrupt */
94 count = ((LATCH-1) - count) * TICK_SIZE;
95 delay_at_last_interrupt = (count + LATCH/2) / LATCH;
96
97
98 /* catch corner case where tick rollover occured
99 * between cyclone and pit reads (as noted when
100 * usec delta is > 90% # of usecs/tick)
101 */
102 if (lost && abs(delay - delay_at_last_interrupt) > (900000/HZ))
103 jiffies_64++;
104}
105
106static unsigned long get_offset_cyclone(void)
107{
108 u32 offset;
109
110 if(!cyclone_timer)
111 return delay_at_last_interrupt;
112
113 /* Read the cyclone timer */
114 offset = cyclone_timer[0];
115
116 /* .. relative to previous jiffy */
117 offset = offset - last_cyclone_low;
118
119 /* convert cyclone ticks to microseconds */
120 /* XXX slow, can we speed this up? */
121 offset = offset/(CYCLONE_TIMER_FREQ/1000000);
122
123 /* our adjusted time offset in microseconds */
124 return delay_at_last_interrupt + offset;
125}
126
127static unsigned long long monotonic_clock_cyclone(void)
128{
129 u32 now_low, now_high;
130 unsigned long long last_offset, this_offset, base;
131 unsigned long long ret;
132 unsigned seq;
133
134 /* atomically read monotonic base & last_offset */
135 do {
136 seq = read_seqbegin(&monotonic_lock);
137 last_offset = ((unsigned long long)last_cyclone_high<<32)|last_cyclone_low;
138 base = monotonic_base;
139 } while (read_seqretry(&monotonic_lock, seq));
140
141
142 /* Read the cyclone counter */
143 read_cyclone_counter(now_low,now_high);
144 this_offset = ((unsigned long long)now_high<<32)|now_low;
145
146 /* convert to nanoseconds */
147 ret = base + ((this_offset - last_offset)&CYCLONE_TIMER_MASK);
148 return ret * (1000000000 / CYCLONE_TIMER_FREQ);
149}
150
151static int __init init_cyclone(char* override)
152{
153 u32* reg;
154 u32 base; /* saved cyclone base address */
155 u32 pageaddr; /* page that contains cyclone_timer register */
156 u32 offset; /* offset from pageaddr to cyclone_timer register */
157 int i;
158
159 /* check clock override */
160 if (override[0] && strncmp(override,"cyclone",7))
161 return -ENODEV;
162
163 /*make sure we're on a summit box*/
164 if(!use_cyclone) return -ENODEV;
165
166 printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n");
167
168 /* find base address */
169 pageaddr = (CYCLONE_CBAR_ADDR)&PAGE_MASK;
170 offset = (CYCLONE_CBAR_ADDR)&(~PAGE_MASK);
171 set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
172 reg = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
173 if(!reg){
174 printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n");
175 return -ENODEV;
176 }
177 base = *reg;
178 if(!base){
179 printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n");
180 return -ENODEV;
181 }
182
183 /* setup PMCC */
184 pageaddr = (base + CYCLONE_PMCC_OFFSET)&PAGE_MASK;
185 offset = (base + CYCLONE_PMCC_OFFSET)&(~PAGE_MASK);
186 set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
187 reg = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
188 if(!reg){
189 printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n");
190 return -ENODEV;
191 }
192 reg[0] = 0x00000001;
193
194 /* setup MPCS */
195 pageaddr = (base + CYCLONE_MPCS_OFFSET)&PAGE_MASK;
196 offset = (base + CYCLONE_MPCS_OFFSET)&(~PAGE_MASK);
197 set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
198 reg = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
199 if(!reg){
200 printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n");
201 return -ENODEV;
202 }
203 reg[0] = 0x00000001;
204
205 /* map in cyclone_timer */
206 pageaddr = (base + CYCLONE_MPMC_OFFSET)&PAGE_MASK;
207 offset = (base + CYCLONE_MPMC_OFFSET)&(~PAGE_MASK);
208 set_fixmap_nocache(FIX_CYCLONE_TIMER, pageaddr);
209 cyclone_timer = (u32*)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);
210 if(!cyclone_timer){
211 printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n");
212 return -ENODEV;
213 }
214
215 /*quick test to make sure its ticking*/
216 for(i=0; i<3; i++){
217 u32 old = cyclone_timer[0];
218 int stall = 100;
219 while(stall--) barrier();
220 if(cyclone_timer[0] == old){
221 printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n");
222 cyclone_timer = 0;
223 return -ENODEV;
224 }
225 }
226
227 init_cpu_khz();
228
229 /* Everything looks good! */
230 return 0;
231}
232
233
234static void delay_cyclone(unsigned long loops)
235{
236 unsigned long bclock, now;
237 if(!cyclone_timer)
238 return;
239 bclock = cyclone_timer[0];
240 do {
241 rep_nop();
242 now = cyclone_timer[0];
243 } while ((now-bclock) < loops);
244}
245/************************************************************/
246
247/* cyclone timer_opts struct */
248static struct timer_opts timer_cyclone = {
249 .name = "cyclone",
250 .mark_offset = mark_offset_cyclone,
251 .get_offset = get_offset_cyclone,
252 .monotonic_clock = monotonic_clock_cyclone,
253 .delay = delay_cyclone,
254};
255
256struct init_timer_opts __initdata timer_cyclone_init = {
257 .init = init_cyclone,
258 .opts = &timer_cyclone,
259};
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c
deleted file mode 100644
index 17a6fe7166..0000000000
--- a/arch/i386/kernel/timers/timer_hpet.c
+++ /dev/null
@@ -1,217 +0,0 @@
1/*
2 * This code largely moved from arch/i386/kernel/time.c.
3 * See comments there for proper credits.
4 */
5
6#include <linux/spinlock.h>
7#include <linux/init.h>
8#include <linux/timex.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <linux/jiffies.h>
12
13#include <asm/timer.h>
14#include <asm/io.h>
15#include <asm/processor.h>
16
17#include "io_ports.h"
18#include "mach_timer.h"
19#include <asm/hpet.h>
20
21static unsigned long hpet_usec_quotient __read_mostly; /* convert hpet clks to usec */
22static unsigned long tsc_hpet_quotient __read_mostly; /* convert tsc to hpet clks */
23static unsigned long hpet_last; /* hpet counter value at last tick*/
24static unsigned long last_tsc_low; /* lsb 32 bits of Time Stamp Counter */
25static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */
26static unsigned long long monotonic_base;
27static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
28
29/* convert from cycles(64bits) => nanoseconds (64bits)
30 * basic equation:
31 * ns = cycles / (freq / ns_per_sec)
32 * ns = cycles * (ns_per_sec / freq)
33 * ns = cycles * (10^9 / (cpu_khz * 10^3))
34 * ns = cycles * (10^6 / cpu_khz)
35 *
36 * Then we use scaling math (suggested by george@mvista.com) to get:
37 * ns = cycles * (10^6 * SC / cpu_khz) / SC
38 * ns = cycles * cyc2ns_scale / SC
39 *
40 * And since SC is a constant power of two, we can convert the div
41 * into a shift.
42 *
43 * We can use khz divisor instead of mhz to keep a better percision, since
44 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
45 * (mathieu.desnoyers@polymtl.ca)
46 *
47 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
48 */
49static unsigned long cyc2ns_scale __read_mostly;
50#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
51
52static inline void set_cyc2ns_scale(unsigned long cpu_khz)
53{
54 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
55}
56
57static inline unsigned long long cycles_2_ns(unsigned long long cyc)
58{
59 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
60}
61
62static unsigned long long monotonic_clock_hpet(void)
63{
64 unsigned long long last_offset, this_offset, base;
65 unsigned seq;
66
67 /* atomically read monotonic base & last_offset */
68 do {
69 seq = read_seqbegin(&monotonic_lock);
70 last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
71 base = monotonic_base;
72 } while (read_seqretry(&monotonic_lock, seq));
73
74 /* Read the Time Stamp Counter */
75 rdtscll(this_offset);
76
77 /* return the value in ns */
78 return base + cycles_2_ns(this_offset - last_offset);
79}
80
81static unsigned long get_offset_hpet(void)
82{
83 register unsigned long eax, edx;
84
85 eax = hpet_readl(HPET_COUNTER);
86 eax -= hpet_last; /* hpet delta */
87 eax = min(hpet_tick, eax);
88 /*
89 * Time offset = (hpet delta) * ( usecs per HPET clock )
90 * = (hpet delta) * ( usecs per tick / HPET clocks per tick)
91 * = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
92 *
93 * Where,
94 * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
95 *
96 * Using a mull instead of a divl saves some cycles in critical path.
97 */
98 ASM_MUL64_REG(eax, edx, hpet_usec_quotient, eax);
99
100 /* our adjusted time offset in microseconds */
101 return edx;
102}
103
104static void mark_offset_hpet(void)
105{
106 unsigned long long this_offset, last_offset;
107 unsigned long offset;
108
109 write_seqlock(&monotonic_lock);
110 last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
111 rdtsc(last_tsc_low, last_tsc_high);
112
113 if (hpet_use_timer)
114 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
115 else
116 offset = hpet_readl(HPET_COUNTER);
117 if (unlikely(((offset - hpet_last) >= (2*hpet_tick)) && (hpet_last != 0))) {
118 int lost_ticks = ((offset - hpet_last) / hpet_tick) - 1;
119 jiffies_64 += lost_ticks;
120 }
121 hpet_last = offset;
122
123 /* update the monotonic base value */
124 this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
125 monotonic_base += cycles_2_ns(this_offset - last_offset);
126 write_sequnlock(&monotonic_lock);
127}
128
129static void delay_hpet(unsigned long loops)
130{
131 unsigned long hpet_start, hpet_end;
132 unsigned long eax;
133
134 /* loops is the number of cpu cycles. Convert it to hpet clocks */
135 ASM_MUL64_REG(eax, loops, tsc_hpet_quotient, loops);
136
137 hpet_start = hpet_readl(HPET_COUNTER);
138 do {
139 rep_nop();
140 hpet_end = hpet_readl(HPET_COUNTER);
141 } while ((hpet_end - hpet_start) < (loops));
142}
143
144static struct timer_opts timer_hpet;
145
146static int __init init_hpet(char* override)
147{
148 unsigned long result, remain;
149
150 /* check clock override */
151 if (override[0] && strncmp(override,"hpet",4))
152 return -ENODEV;
153
154 if (!is_hpet_enabled())
155 return -ENODEV;
156
157 printk("Using HPET for gettimeofday\n");
158 if (cpu_has_tsc) {
159 unsigned long tsc_quotient = calibrate_tsc_hpet(&tsc_hpet_quotient);
160 if (tsc_quotient) {
161 /* report CPU clock rate in Hz.
162 * The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =
163 * clock/second. Our precision is about 100 ppm.
164 */
165 { unsigned long eax=0, edx=1000;
166 ASM_DIV64_REG(cpu_khz, edx, tsc_quotient,
167 eax, edx);
168 printk("Detected %u.%03u MHz processor.\n",
169 cpu_khz / 1000, cpu_khz % 1000);
170 }
171 set_cyc2ns_scale(cpu_khz);
172 }
173 /* set this only when cpu_has_tsc */
174 timer_hpet.read_timer = read_timer_tsc;
175 }
176
177 /*
178 * Math to calculate hpet to usec multiplier
179 * Look for the comments at get_offset_hpet()
180 */
181 ASM_DIV64_REG(result, remain, hpet_tick, 0, KERNEL_TICK_USEC);
182 if (remain > (hpet_tick >> 1))
183 result++; /* rounding the result */
184 hpet_usec_quotient = result;
185
186 return 0;
187}
188
189static int hpet_resume(void)
190{
191 write_seqlock(&monotonic_lock);
192 /* Assume this is the last mark offset time */
193 rdtsc(last_tsc_low, last_tsc_high);
194
195 if (hpet_use_timer)
196 hpet_last = hpet_readl(HPET_T0_CMP) - hpet_tick;
197 else
198 hpet_last = hpet_readl(HPET_COUNTER);
199 write_sequnlock(&monotonic_lock);
200 return 0;
201}
202/************************************************************/
203
204/* tsc timer_opts struct */
205static struct timer_opts timer_hpet __read_mostly = {
206 .name = "hpet",
207 .mark_offset = mark_offset_hpet,
208 .get_offset = get_offset_hpet,
209 .monotonic_clock = monotonic_clock_hpet,
210 .delay = delay_hpet,
211 .resume = hpet_resume,
212};
213
214struct init_timer_opts __initdata timer_hpet_init = {
215 .init = init_hpet,
216 .opts = &timer_hpet,
217};
diff --git a/arch/i386/kernel/timers/timer_none.c b/arch/i386/kernel/timers/timer_none.c
deleted file mode 100644
index 4ea2f414db..0000000000
--- a/arch/i386/kernel/timers/timer_none.c
+++ /dev/null
@@ -1,39 +0,0 @@
1#include <linux/init.h>
2#include <asm/timer.h>
3
4static void mark_offset_none(void)
5{
6 /* nothing needed */
7}
8
9static unsigned long get_offset_none(void)
10{
11 return 0;
12}
13
14static unsigned long long monotonic_clock_none(void)
15{
16 return 0;
17}
18
19static void delay_none(unsigned long loops)
20{
21 int d0;
22 __asm__ __volatile__(
23 "\tjmp 1f\n"
24 ".align 16\n"
25 "1:\tjmp 2f\n"
26 ".align 16\n"
27 "2:\tdecl %0\n\tjns 2b"
28 :"=&a" (d0)
29 :"0" (loops));
30}
31
32/* none timer_opts struct */
33struct timer_opts timer_none = {
34 .name = "none",
35 .mark_offset = mark_offset_none,
36 .get_offset = get_offset_none,
37 .monotonic_clock = monotonic_clock_none,
38 .delay = delay_none,
39};
diff --git a/arch/i386/kernel/timers/timer_pit.c b/arch/i386/kernel/timers/timer_pit.c
deleted file mode 100644
index b9b6bd56b9..0000000000
--- a/arch/i386/kernel/timers/timer_pit.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * This code largely moved from arch/i386/kernel/time.c.
3 * See comments there for proper credits.
4 */
5
6#include <linux/spinlock.h>
7#include <linux/module.h>
8#include <linux/device.h>
9#include <linux/sysdev.h>
10#include <linux/timex.h>
11#include <asm/delay.h>
12#include <asm/mpspec.h>
13#include <asm/timer.h>
14#include <asm/smp.h>
15#include <asm/io.h>
16#include <asm/arch_hooks.h>
17#include <asm/i8253.h>
18
19#include "do_timer.h"
20#include "io_ports.h"
21
22static int count_p; /* counter in get_offset_pit() */
23
24static int __init init_pit(char* override)
25{
26 /* check clock override */
27 if (override[0] && strncmp(override,"pit",3))
28 printk(KERN_ERR "Warning: clock= override failed. Defaulting "
29 "to PIT\n");
30 init_cpu_khz();
31 count_p = LATCH;
32 return 0;
33}
34
35static void mark_offset_pit(void)
36{
37 /* nothing needed */
38}
39
40static unsigned long long monotonic_clock_pit(void)
41{
42 return 0;
43}
44
45static void delay_pit(unsigned long loops)
46{
47 int d0;
48 __asm__ __volatile__(
49 "\tjmp 1f\n"
50 ".align 16\n"
51 "1:\tjmp 2f\n"
52 ".align 16\n"
53 "2:\tdecl %0\n\tjns 2b"
54 :"=&a" (d0)
55 :"0" (loops));
56}
57
58
59/* This function must be called with xtime_lock held.
60 * It was inspired by Steve McCanne's microtime-i386 for BSD. -- jrs
61 *
62 * However, the pc-audio speaker driver changes the divisor so that
63 * it gets interrupted rather more often - it loads 64 into the
64 * counter rather than 11932! This has an adverse impact on
65 * do_gettimeoffset() -- it stops working! What is also not
66 * good is that the interval that our timer function gets called
67 * is no longer 10.0002 ms, but 9.9767 ms. To get around this
68 * would require using a different timing source. Maybe someone
69 * could use the RTC - I know that this can interrupt at frequencies
70 * ranging from 8192Hz to 2Hz. If I had the energy, I'd somehow fix
71 * it so that at startup, the timer code in sched.c would select
72 * using either the RTC or the 8253 timer. The decision would be
73 * based on whether there was any other device around that needed
74 * to trample on the 8253. I'd set up the RTC to interrupt at 1024 Hz,
75 * and then do some jiggery to have a version of do_timer that
76 * advanced the clock by 1/1024 s. Every time that reached over 1/100
77 * of a second, then do all the old code. If the time was kept correct
78 * then do_gettimeoffset could just return 0 - there is no low order
79 * divider that can be accessed.
80 *
81 * Ideally, you would be able to use the RTC for the speaker driver,
82 * but it appears that the speaker driver really needs interrupt more
83 * often than every 120 us or so.
84 *
85 * Anyway, this needs more thought.... pjsg (1993-08-28)
86 *
87 * If you are really that interested, you should be reading
88 * comp.protocols.time.ntp!
89 */
90
91static unsigned long get_offset_pit(void)
92{
93 int count;
94 unsigned long flags;
95 static unsigned long jiffies_p = 0;
96
97 /*
98 * cache volatile jiffies temporarily; we have xtime_lock.
99 */
100 unsigned long jiffies_t;
101
102 spin_lock_irqsave(&i8253_lock, flags);
103 /* timer count may underflow right here */
104 outb_p(0x00, PIT_MODE); /* latch the count ASAP */
105
106 count = inb_p(PIT_CH0); /* read the latched count */
107
108 /*
109 * We do this guaranteed double memory access instead of a _p
110 * postfix in the previous port access. Wheee, hackady hack
111 */
112 jiffies_t = jiffies;
113
114 count |= inb_p(PIT_CH0) << 8;
115
116 /* VIA686a test code... reset the latch if count > max + 1 */
117 if (count > LATCH) {
118 outb_p(0x34, PIT_MODE);
119 outb_p(LATCH & 0xff, PIT_CH0);
120 outb(LATCH >> 8, PIT_CH0);
121 count = LATCH - 1;
122 }
123
124 /*
125 * avoiding timer inconsistencies (they are rare, but they happen)...
126 * there are two kinds of problems that must be avoided here:
127 * 1. the timer counter underflows
128 * 2. hardware problem with the timer, not giving us continuous time,
129 * the counter does small "jumps" upwards on some Pentium systems,
130 * (see c't 95/10 page 335 for Neptun bug.)
131 */
132
133 if( jiffies_t == jiffies_p ) {
134 if( count > count_p ) {
135 /* the nutcase */
136 count = do_timer_overflow(count);
137 }
138 } else
139 jiffies_p = jiffies_t;
140
141 count_p = count;
142
143 spin_unlock_irqrestore(&i8253_lock, flags);
144
145 count = ((LATCH-1) - count) * TICK_SIZE;
146 count = (count + LATCH/2) / LATCH;
147
148 return count;
149}
150
151
152/* tsc timer_opts struct */
153struct timer_opts timer_pit = {
154 .name = "pit",
155 .mark_offset = mark_offset_pit,
156 .get_offset = get_offset_pit,
157 .monotonic_clock = monotonic_clock_pit,
158 .delay = delay_pit,
159};
160
161struct init_timer_opts __initdata timer_pit_init = {
162 .init = init_pit,
163 .opts = &timer_pit,
164};
165
166void setup_pit_timer(void)
167{
168 unsigned long flags;
169
170 spin_lock_irqsave(&i8253_lock, flags);
171 outb_p(0x34,PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */
172 udelay(10);
173 outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
174 udelay(10);
175 outb(LATCH >> 8 , PIT_CH0); /* MSB */
176 spin_unlock_irqrestore(&i8253_lock, flags);
177}
diff --git a/arch/i386/kernel/timers/timer_pm.c b/arch/i386/kernel/timers/timer_pm.c
deleted file mode 100644
index 144e94a049..0000000000
--- a/arch/i386/kernel/timers/timer_pm.c
+++ /dev/null
@@ -1,342 +0,0 @@
1/*
2 * (C) Dominik Brodowski <linux@brodo.de> 2003
3 *
4 * Driver to use the Power Management Timer (PMTMR) available in some
5 * southbridges as primary timing source for the Linux kernel.
6 *
7 * Based on parts of linux/drivers/acpi/hardware/hwtimer.c, timer_pit.c,
8 * timer_hpet.c, and on Arjan van de Ven's implementation for 2.4.
9 *
10 * This file is licensed under the GPL v2.
11 */
12
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/device.h>
17#include <linux/init.h>
18#include <linux/pci.h>
19#include <asm/types.h>
20#include <asm/timer.h>
21#include <asm/smp.h>
22#include <asm/io.h>
23#include <asm/arch_hooks.h>
24
25#include <linux/timex.h>
26#include "mach_timer.h"
27
28/* Number of PMTMR ticks expected during calibration run */
29#define PMTMR_TICKS_PER_SEC 3579545
30#define PMTMR_EXPECTED_RATE \
31 ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10))
32
33
34/* The I/O port the PMTMR resides at.
35 * The location is detected during setup_arch(),
36 * in arch/i386/acpi/boot.c */
37u32 pmtmr_ioport = 0;
38
39
40/* value of the Power timer at last timer interrupt */
41static u32 offset_tick;
42static u32 offset_delay;
43
44static unsigned long long monotonic_base;
45static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
46
47#define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */
48
49static int pmtmr_need_workaround __read_mostly = 1;
50
51/*helper function to safely read acpi pm timesource*/
52static inline u32 read_pmtmr(void)
53{
54 if (pmtmr_need_workaround) {
55 u32 v1, v2, v3;
56
57 /* It has been reported that because of various broken
58 * chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM time
59 * source is not latched, so you must read it multiple
60 * times to insure a safe value is read.
61 */
62 do {
63 v1 = inl(pmtmr_ioport);
64 v2 = inl(pmtmr_ioport);
65 v3 = inl(pmtmr_ioport);
66 } while ((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
67 || (v3 > v1 && v3 < v2));
68
69 /* mask the output to 24 bits */
70 return v2 & ACPI_PM_MASK;
71 }
72
73 return inl(pmtmr_ioport) & ACPI_PM_MASK;
74}
75
76
77/*
78 * Some boards have the PMTMR running way too fast. We check
79 * the PMTMR rate against PIT channel 2 to catch these cases.
80 */
81static int verify_pmtmr_rate(void)
82{
83 u32 value1, value2;
84 unsigned long count, delta;
85
86 mach_prepare_counter();
87 value1 = read_pmtmr();
88 mach_countup(&count);
89 value2 = read_pmtmr();
90 delta = (value2 - value1) & ACPI_PM_MASK;
91
92 /* Check that the PMTMR delta is within 5% of what we expect */
93 if (delta < (PMTMR_EXPECTED_RATE * 19) / 20 ||
94 delta > (PMTMR_EXPECTED_RATE * 21) / 20) {
95 printk(KERN_INFO "PM-Timer running at invalid rate: %lu%% of normal - aborting.\n", 100UL * delta / PMTMR_EXPECTED_RATE);
96 return -1;
97 }
98
99 return 0;
100}
101
102
103static int init_pmtmr(char* override)
104{
105 u32 value1, value2;
106 unsigned int i;
107
108 if (override[0] && strncmp(override,"pmtmr",5))
109 return -ENODEV;
110
111 if (!pmtmr_ioport)
112 return -ENODEV;
113
114 /* we use the TSC for delay_pmtmr, so make sure it exists */
115 if (!cpu_has_tsc)
116 return -ENODEV;
117
118 /* "verify" this timing source */
119 value1 = read_pmtmr();
120 for (i = 0; i < 10000; i++) {
121 value2 = read_pmtmr();
122 if (value2 == value1)
123 continue;
124 if (value2 > value1)
125 goto pm_good;
126 if ((value2 < value1) && ((value2) < 0xFFF))
127 goto pm_good;
128 printk(KERN_INFO "PM-Timer had inconsistent results: 0x%#x, 0x%#x - aborting.\n", value1, value2);
129 return -EINVAL;
130 }
131 printk(KERN_INFO "PM-Timer had no reasonable result: 0x%#x - aborting.\n", value1);
132 return -ENODEV;
133
134pm_good:
135 if (verify_pmtmr_rate() != 0)
136 return -ENODEV;
137
138 init_cpu_khz();
139 return 0;
140}
141
142static inline u32 cyc2us(u32 cycles)
143{
144 /* The Power Management Timer ticks at 3.579545 ticks per microsecond.
145 * 1 / PM_TIMER_FREQUENCY == 0.27936511 =~ 286/1024 [error: 0.024%]
146 *
147 * Even with HZ = 100, delta is at maximum 35796 ticks, so it can
148 * easily be multiplied with 286 (=0x11E) without having to fear
149 * u32 overflows.
150 */
151 cycles *= 286;
152 return (cycles >> 10);
153}
154
155/*
156 * this gets called during each timer interrupt
157 * - Called while holding the writer xtime_lock
158 */
159static void mark_offset_pmtmr(void)
160{
161 u32 lost, delta, last_offset;
162 static int first_run = 1;
163 last_offset = offset_tick;
164
165 write_seqlock(&monotonic_lock);
166
167 offset_tick = read_pmtmr();
168
169 /* calculate tick interval */
170 delta = (offset_tick - last_offset) & ACPI_PM_MASK;
171
172 /* convert to usecs */
173 delta = cyc2us(delta);
174
175 /* update the monotonic base value */
176 monotonic_base += delta * NSEC_PER_USEC;
177 write_sequnlock(&monotonic_lock);
178
179 /* convert to ticks */
180 delta += offset_delay;
181 lost = delta / (USEC_PER_SEC / HZ);
182 offset_delay = delta % (USEC_PER_SEC / HZ);
183
184
185 /* compensate for lost ticks */
186 if (lost >= 2)
187 jiffies_64 += lost - 1;
188
189 /* don't calculate delay for first run,
190 or if we've got less then a tick */
191 if (first_run || (lost < 1)) {
192 first_run = 0;
193 offset_delay = 0;
194 }
195}
196
197static int pmtmr_resume(void)
198{
199 write_seqlock(&monotonic_lock);
200 /* Assume this is the last mark offset time */
201 offset_tick = read_pmtmr();
202 write_sequnlock(&monotonic_lock);
203 return 0;
204}
205
206static unsigned long long monotonic_clock_pmtmr(void)
207{
208 u32 last_offset, this_offset;
209 unsigned long long base, ret;
210 unsigned seq;
211
212
213 /* atomically read monotonic base & last_offset */
214 do {
215 seq = read_seqbegin(&monotonic_lock);
216 last_offset = offset_tick;
217 base = monotonic_base;
218 } while (read_seqretry(&monotonic_lock, seq));
219
220 /* Read the pmtmr */
221 this_offset = read_pmtmr();
222
223 /* convert to nanoseconds */
224 ret = (this_offset - last_offset) & ACPI_PM_MASK;
225 ret = base + (cyc2us(ret) * NSEC_PER_USEC);
226 return ret;
227}
228
229static void delay_pmtmr(unsigned long loops)
230{
231 unsigned long bclock, now;
232
233 rdtscl(bclock);
234 do
235 {
236 rep_nop();
237 rdtscl(now);
238 } while ((now-bclock) < loops);
239}
240
241
242/*
243 * get the offset (in microseconds) from the last call to mark_offset()
244 * - Called holding a reader xtime_lock
245 */
246static unsigned long get_offset_pmtmr(void)
247{
248 u32 now, offset, delta = 0;
249
250 offset = offset_tick;
251 now = read_pmtmr();
252 delta = (now - offset)&ACPI_PM_MASK;
253
254 return (unsigned long) offset_delay + cyc2us(delta);
255}
256
257
258/* acpi timer_opts struct */
259static struct timer_opts timer_pmtmr = {
260 .name = "pmtmr",
261 .mark_offset = mark_offset_pmtmr,
262 .get_offset = get_offset_pmtmr,
263 .monotonic_clock = monotonic_clock_pmtmr,
264 .delay = delay_pmtmr,
265 .read_timer = read_timer_tsc,
266 .resume = pmtmr_resume,
267};
268
269struct init_timer_opts __initdata timer_pmtmr_init = {
270 .init = init_pmtmr,
271 .opts = &timer_pmtmr,
272};
273
274#ifdef CONFIG_PCI
275/*
276 * PIIX4 Errata:
277 *
278 * The power management timer may return improper results when read.
279 * Although the timer value settles properly after incrementing,
280 * while incrementing there is a 3 ns window every 69.8 ns where the
281 * timer value is indeterminate (a 4.2% chance that the data will be
282 * incorrect when read). As a result, the ACPI free running count up
283 * timer specification is violated due to erroneous reads.
284 */
285static int __init pmtmr_bug_check(void)
286{
287 static struct pci_device_id gray_list[] __initdata = {
288 /* these chipsets may have bug. */
289 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
290 PCI_DEVICE_ID_INTEL_82801DB_0) },
291 { },
292 };
293 struct pci_dev *dev;
294 int pmtmr_has_bug = 0;
295 u8 rev;
296
297 if (cur_timer != &timer_pmtmr || !pmtmr_need_workaround)
298 return 0;
299
300 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
301 PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
302 if (dev) {
303 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
304 /* the bug has been fixed in PIIX4M */
305 if (rev < 3) {
306 printk(KERN_WARNING "* Found PM-Timer Bug on this "
307 "chipset. Due to workarounds for a bug,\n"
308 "* this time source is slow. Consider trying "
309 "other time sources (clock=)\n");
310 pmtmr_has_bug = 1;
311 }
312 pci_dev_put(dev);
313 }
314
315 if (pci_dev_present(gray_list)) {
316 printk(KERN_WARNING "* This chipset may have PM-Timer Bug. Due"
317 " to workarounds for a bug,\n"
318 "* this time source is slow. If you are sure your timer"
319 " does not have\n"
320 "* this bug, please use \"pmtmr_good\" to disable the "
321 "workaround\n");
322 pmtmr_has_bug = 1;
323 }
324
325 if (!pmtmr_has_bug)
326 pmtmr_need_workaround = 0;
327
328 return 0;
329}
330device_initcall(pmtmr_bug_check);
331#endif
332
333static int __init pmtr_good_setup(char *__str)
334{
335 pmtmr_need_workaround = 0;
336 return 1;
337}
338__setup("pmtmr_good", pmtr_good_setup);
339
340MODULE_LICENSE("GPL");
341MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
342MODULE_DESCRIPTION("Power Management Timer (PMTMR) as primary timing source for x86");
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
deleted file mode 100644
index f1187ddb0d..0000000000
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ /dev/null
@@ -1,617 +0,0 @@
1/*
2 * This code largely moved from arch/i386/kernel/time.c.
3 * See comments there for proper credits.
4 *
5 * 2004-06-25 Jesper Juhl
6 * moved mark_offset_tsc below cpufreq_delayed_get to avoid gcc 3.4
7 * failing to inline.
8 */
9
10#include <linux/spinlock.h>
11#include <linux/init.h>
12#include <linux/timex.h>
13#include <linux/errno.h>
14#include <linux/cpufreq.h>
15#include <linux/string.h>
16#include <linux/jiffies.h>
17
18#include <asm/timer.h>
19#include <asm/io.h>
20/* processor.h for distable_tsc flag */
21#include <asm/processor.h>
22
23#include "io_ports.h"
24#include "mach_timer.h"
25
26#include <asm/hpet.h>
27#include <asm/i8253.h>
28
29#ifdef CONFIG_HPET_TIMER
30static unsigned long hpet_usec_quotient;
31static unsigned long hpet_last;
32static struct timer_opts timer_tsc;
33#endif
34
35static inline void cpufreq_delayed_get(void);
36
37int tsc_disable __devinitdata = 0;
38
39static int use_tsc;
40/* Number of usecs that the last interrupt was delayed */
41static int delay_at_last_interrupt;
42
43static unsigned long last_tsc_low; /* lsb 32 bits of Time Stamp Counter */
44static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */
45static unsigned long long monotonic_base;
46static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
47
48/* Avoid compensating for lost ticks before TSCs are synched */
49static int detect_lost_ticks;
50static int __init start_lost_tick_compensation(void)
51{
52 detect_lost_ticks = 1;
53 return 0;
54}
55late_initcall(start_lost_tick_compensation);
56
57/* convert from cycles(64bits) => nanoseconds (64bits)
58 * basic equation:
59 * ns = cycles / (freq / ns_per_sec)
60 * ns = cycles * (ns_per_sec / freq)
61 * ns = cycles * (10^9 / (cpu_khz * 10^3))
62 * ns = cycles * (10^6 / cpu_khz)
63 *
64 * Then we use scaling math (suggested by george@mvista.com) to get:
65 * ns = cycles * (10^6 * SC / cpu_khz) / SC
66 * ns = cycles * cyc2ns_scale / SC
67 *
68 * And since SC is a constant power of two, we can convert the div
69 * into a shift.
70 *
71 * We can use khz divisor instead of mhz to keep a better percision, since
72 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
73 * (mathieu.desnoyers@polymtl.ca)
74 *
75 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
76 */
77static unsigned long cyc2ns_scale __read_mostly;
78#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
79
80static inline void set_cyc2ns_scale(unsigned long cpu_khz)
81{
82 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
83}
84
85static inline unsigned long long cycles_2_ns(unsigned long long cyc)
86{
87 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
88}
89
90static int count2; /* counter for mark_offset_tsc() */
91
92/* Cached *multiplier* to convert TSC counts to microseconds.
93 * (see the equation below).
94 * Equal to 2^32 * (1 / (clocks per usec) ).
95 * Initialized in time_init.
96 */
97static unsigned long fast_gettimeoffset_quotient;
98
99static unsigned long get_offset_tsc(void)
100{
101 register unsigned long eax, edx;
102
103 /* Read the Time Stamp Counter */
104
105 rdtsc(eax,edx);
106
107 /* .. relative to previous jiffy (32 bits is enough) */
108 eax -= last_tsc_low; /* tsc_low delta */
109
110 /*
111 * Time offset = (tsc_low delta) * fast_gettimeoffset_quotient
112 * = (tsc_low delta) * (usecs_per_clock)
113 * = (tsc_low delta) * (usecs_per_jiffy / clocks_per_jiffy)
114 *
115 * Using a mull instead of a divl saves up to 31 clock cycles
116 * in the critical path.
117 */
118
119 __asm__("mull %2"
120 :"=a" (eax), "=d" (edx)
121 :"rm" (fast_gettimeoffset_quotient),
122 "0" (eax));
123
124 /* our adjusted time offset in microseconds */
125 return delay_at_last_interrupt + edx;
126}
127
128static unsigned long long monotonic_clock_tsc(void)
129{
130 unsigned long long last_offset, this_offset, base;
131 unsigned seq;
132
133 /* atomically read monotonic base & last_offset */
134 do {
135 seq = read_seqbegin(&monotonic_lock);
136 last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
137 base = monotonic_base;
138 } while (read_seqretry(&monotonic_lock, seq));
139
140 /* Read the Time Stamp Counter */
141 rdtscll(this_offset);
142
143 /* return the value in ns */
144 return base + cycles_2_ns(this_offset - last_offset);
145}
146
147/*
148 * Scheduler clock - returns current time in nanosec units.
149 */
150unsigned long long sched_clock(void)
151{
152 unsigned long long this_offset;
153
154 /*
155 * In the NUMA case we dont use the TSC as they are not
156 * synchronized across all CPUs.
157 */
158#ifndef CONFIG_NUMA
159 if (!use_tsc)
160#endif
161 /* no locking but a rare wrong value is not a big deal */
162 return jiffies_64 * (1000000000 / HZ);
163
164 /* Read the Time Stamp Counter */
165 rdtscll(this_offset);
166
167 /* return the value in ns */
168 return cycles_2_ns(this_offset);
169}
170
171static void delay_tsc(unsigned long loops)
172{
173 unsigned long bclock, now;
174
175 rdtscl(bclock);
176 do
177 {
178 rep_nop();
179 rdtscl(now);
180 } while ((now-bclock) < loops);
181}
182
183#ifdef CONFIG_HPET_TIMER
184static void mark_offset_tsc_hpet(void)
185{
186 unsigned long long this_offset, last_offset;
187 unsigned long offset, temp, hpet_current;
188
189 write_seqlock(&monotonic_lock);
190 last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
191 /*
192 * It is important that these two operations happen almost at
193 * the same time. We do the RDTSC stuff first, since it's
194 * faster. To avoid any inconsistencies, we need interrupts
195 * disabled locally.
196 */
197 /*
198 * Interrupts are just disabled locally since the timer irq
199 * has the SA_INTERRUPT flag set. -arca
200 */
201 /* read Pentium cycle counter */
202
203 hpet_current = hpet_readl(HPET_COUNTER);
204 rdtsc(last_tsc_low, last_tsc_high);
205
206 /* lost tick compensation */
207 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
208 if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))
209 && detect_lost_ticks) {
210 int lost_ticks = (offset - hpet_last) / hpet_tick;
211 jiffies_64 += lost_ticks;
212 }
213 hpet_last = hpet_current;
214
215 /* update the monotonic base value */
216 this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
217 monotonic_base += cycles_2_ns(this_offset - last_offset);
218 write_sequnlock(&monotonic_lock);
219
220 /* calculate delay_at_last_interrupt */
221 /*
222 * Time offset = (hpet delta) * ( usecs per HPET clock )
223 * = (hpet delta) * ( usecs per tick / HPET clocks per tick)
224 * = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
225 * Where,
226 * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
227 */
228 delay_at_last_interrupt = hpet_current - offset;
229 ASM_MUL64_REG(temp, delay_at_last_interrupt,
230 hpet_usec_quotient, delay_at_last_interrupt);
231}
232#endif
233
234
235#ifdef CONFIG_CPU_FREQ
236#include <linux/workqueue.h>
237
238static unsigned int cpufreq_delayed_issched = 0;
239static unsigned int cpufreq_init = 0;
240static struct work_struct cpufreq_delayed_get_work;
241
242static void handle_cpufreq_delayed_get(void *v)
243{
244 unsigned int cpu;
245 for_each_online_cpu(cpu) {
246 cpufreq_get(cpu);
247 }
248 cpufreq_delayed_issched = 0;
249}
250
251/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
252 * to verify the CPU frequency the timing core thinks the CPU is running
253 * at is still correct.
254 */
255static inline void cpufreq_delayed_get(void)
256{
257 if (cpufreq_init && !cpufreq_delayed_issched) {
258 cpufreq_delayed_issched = 1;
259 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
260 schedule_work(&cpufreq_delayed_get_work);
261 }
262}
263
264/* If the CPU frequency is scaled, TSC-based delays will need a different
265 * loops_per_jiffy value to function properly.
266 */
267
268static unsigned int ref_freq = 0;
269static unsigned long loops_per_jiffy_ref = 0;
270
271#ifndef CONFIG_SMP
272static unsigned long fast_gettimeoffset_ref = 0;
273static unsigned int cpu_khz_ref = 0;
274#endif
275
276static int
277time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
278 void *data)
279{
280 struct cpufreq_freqs *freq = data;
281
282 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
283 write_seqlock_irq(&xtime_lock);
284 if (!ref_freq) {
285 if (!freq->old){
286 ref_freq = freq->new;
287 goto end;
288 }
289 ref_freq = freq->old;
290 loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
291#ifndef CONFIG_SMP
292 fast_gettimeoffset_ref = fast_gettimeoffset_quotient;
293 cpu_khz_ref = cpu_khz;
294#endif
295 }
296
297 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
298 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
299 (val == CPUFREQ_RESUMECHANGE)) {
300 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
301 cpu_data[freq->cpu].loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
302#ifndef CONFIG_SMP
303 if (cpu_khz)
304 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
305 if (use_tsc) {
306 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
307 fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq);
308 set_cyc2ns_scale(cpu_khz);
309 }
310 }
311#endif
312 }
313
314end:
315 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
316 write_sequnlock_irq(&xtime_lock);
317
318 return 0;
319}
320
321static struct notifier_block time_cpufreq_notifier_block = {
322 .notifier_call = time_cpufreq_notifier
323};
324
325
326static int __init cpufreq_tsc(void)
327{
328 int ret;
329 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
330 ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
331 CPUFREQ_TRANSITION_NOTIFIER);
332 if (!ret)
333 cpufreq_init = 1;
334 return ret;
335}
336core_initcall(cpufreq_tsc);
337
338#else /* CONFIG_CPU_FREQ */
339static inline void cpufreq_delayed_get(void) { return; }
340#endif
341
342int recalibrate_cpu_khz(void)
343{
344#ifndef CONFIG_SMP
345 unsigned int cpu_khz_old = cpu_khz;
346
347 if (cpu_has_tsc) {
348 local_irq_disable();
349 init_cpu_khz();
350 local_irq_enable();
351 cpu_data[0].loops_per_jiffy =
352 cpufreq_scale(cpu_data[0].loops_per_jiffy,
353 cpu_khz_old,
354 cpu_khz);
355 return 0;
356 } else
357 return -ENODEV;
358#else
359 return -ENODEV;
360#endif
361}
362EXPORT_SYMBOL(recalibrate_cpu_khz);
363
364static void mark_offset_tsc(void)
365{
366 unsigned long lost,delay;
367 unsigned long delta = last_tsc_low;
368 int count;
369 int countmp;
370 static int count1 = 0;
371 unsigned long long this_offset, last_offset;
372 static int lost_count = 0;
373
374 write_seqlock(&monotonic_lock);
375 last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
376 /*
377 * It is important that these two operations happen almost at
378 * the same time. We do the RDTSC stuff first, since it's
379 * faster. To avoid any inconsistencies, we need interrupts
380 * disabled locally.
381 */
382
383 /*
384 * Interrupts are just disabled locally since the timer irq
385 * has the SA_INTERRUPT flag set. -arca
386 */
387
388 /* read Pentium cycle counter */
389
390 rdtsc(last_tsc_low, last_tsc_high);
391
392 spin_lock(&i8253_lock);
393 outb_p(0x00, PIT_MODE); /* latch the count ASAP */
394
395 count = inb_p(PIT_CH0); /* read the latched count */
396 count |= inb(PIT_CH0) << 8;
397
398 /*
399 * VIA686a test code... reset the latch if count > max + 1
400 * from timer_pit.c - cjb
401 */
402 if (count > LATCH) {
403 outb_p(0x34, PIT_MODE);
404 outb_p(LATCH & 0xff, PIT_CH0);
405 outb(LATCH >> 8, PIT_CH0);
406 count = LATCH - 1;
407 }
408
409 spin_unlock(&i8253_lock);
410
411 if (pit_latch_buggy) {
412 /* get center value of last 3 time lutch */
413 if ((count2 >= count && count >= count1)
414 || (count1 >= count && count >= count2)) {
415 count2 = count1; count1 = count;
416 } else if ((count1 >= count2 && count2 >= count)
417 || (count >= count2 && count2 >= count1)) {
418 countmp = count;count = count2;
419 count2 = count1;count1 = countmp;
420 } else {
421 count2 = count1; count1 = count; count = count1;
422 }
423 }
424
425 /* lost tick compensation */
426 delta = last_tsc_low - delta;
427 {
428 register unsigned long eax, edx;
429 eax = delta;
430 __asm__("mull %2"
431 :"=a" (eax), "=d" (edx)
432 :"rm" (fast_gettimeoffset_quotient),
433 "0" (eax));
434 delta = edx;
435 }
436 delta += delay_at_last_interrupt;
437 lost = delta/(1000000/HZ);
438 delay = delta%(1000000/HZ);
439 if (lost >= 2 && detect_lost_ticks) {
440 jiffies_64 += lost-1;
441
442 /* sanity check to ensure we're not always losing ticks */
443 if (lost_count++ > 100) {
444 printk(KERN_WARNING "Losing too many ticks!\n");
445 printk(KERN_WARNING "TSC cannot be used as a timesource. \n");
446 printk(KERN_WARNING "Possible reasons for this are:\n");
447 printk(KERN_WARNING " You're running with Speedstep,\n");
448 printk(KERN_WARNING " You don't have DMA enabled for your hard disk (see hdparm),\n");
449 printk(KERN_WARNING " Incorrect TSC synchronization on an SMP system (see dmesg).\n");
450 printk(KERN_WARNING "Falling back to a sane timesource now.\n");
451
452 clock_fallback();
453 }
454 /* ... but give the TSC a fair chance */
455 if (lost_count > 25)
456 cpufreq_delayed_get();
457 } else
458 lost_count = 0;
459 /* update the monotonic base value */
460 this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
461 monotonic_base += cycles_2_ns(this_offset - last_offset);
462 write_sequnlock(&monotonic_lock);
463
464 /* calculate delay_at_last_interrupt */
465 count = ((LATCH-1) - count) * TICK_SIZE;
466 delay_at_last_interrupt = (count + LATCH/2) / LATCH;
467
468 /* catch corner case where tick rollover occured
469 * between tsc and pit reads (as noted when
470 * usec delta is > 90% # of usecs/tick)
471 */
472 if (lost && abs(delay - delay_at_last_interrupt) > (900000/HZ))
473 jiffies_64++;
474}
475
476static int __init init_tsc(char* override)
477{
478
479 /* check clock override */
480 if (override[0] && strncmp(override,"tsc",3)) {
481#ifdef CONFIG_HPET_TIMER
482 if (is_hpet_enabled()) {
483 printk(KERN_ERR "Warning: clock= override failed. Defaulting to tsc\n");
484 } else
485#endif
486 {
487 return -ENODEV;
488 }
489 }
490
491 /*
492 * If we have APM enabled or the CPU clock speed is variable
493 * (CPU stops clock on HLT or slows clock to save power)
494 * then the TSC timestamps may diverge by up to 1 jiffy from
495 * 'real time' but nothing will break.
496 * The most frequent case is that the CPU is "woken" from a halt
497 * state by the timer interrupt itself, so we get 0 error. In the
498 * rare cases where a driver would "wake" the CPU and request a
499 * timestamp, the maximum error is < 1 jiffy. But timestamps are
500 * still perfectly ordered.
501 * Note that the TSC counter will be reset if APM suspends
502 * to disk; this won't break the kernel, though, 'cuz we're
503 * smart. See arch/i386/kernel/apm.c.
504 */
505 /*
506 * Firstly we have to do a CPU check for chips with
507 * a potentially buggy TSC. At this point we haven't run
508 * the ident/bugs checks so we must run this hook as it
509 * may turn off the TSC flag.
510 *
511 * NOTE: this doesn't yet handle SMP 486 machines where only
512 * some CPU's have a TSC. Thats never worked and nobody has
513 * moaned if you have the only one in the world - you fix it!
514 */
515
516 count2 = LATCH; /* initialize counter for mark_offset_tsc() */
517
518 if (cpu_has_tsc) {
519 unsigned long tsc_quotient;
520#ifdef CONFIG_HPET_TIMER
521 if (is_hpet_enabled() && hpet_use_timer) {
522 unsigned long result, remain;
523 printk("Using TSC for gettimeofday\n");
524 tsc_quotient = calibrate_tsc_hpet(NULL);
525 timer_tsc.mark_offset = &mark_offset_tsc_hpet;
526 /*
527 * Math to calculate hpet to usec multiplier
528 * Look for the comments at get_offset_tsc_hpet()
529 */
530 ASM_DIV64_REG(result, remain, hpet_tick,
531 0, KERNEL_TICK_USEC);
532 if (remain > (hpet_tick >> 1))
533 result++; /* rounding the result */
534
535 hpet_usec_quotient = result;
536 } else
537#endif
538 {
539 tsc_quotient = calibrate_tsc();
540 }
541
542 if (tsc_quotient) {
543 fast_gettimeoffset_quotient = tsc_quotient;
544 use_tsc = 1;
545 /*
546 * We could be more selective here I suspect
547 * and just enable this for the next intel chips ?
548 */
549 /* report CPU clock rate in Hz.
550 * The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =
551 * clock/second. Our precision is about 100 ppm.
552 */
553 { unsigned long eax=0, edx=1000;
554 __asm__("divl %2"
555 :"=a" (cpu_khz), "=d" (edx)
556 :"r" (tsc_quotient),
557 "0" (eax), "1" (edx));
558 printk("Detected %u.%03u MHz processor.\n",
559 cpu_khz / 1000, cpu_khz % 1000);
560 }
561 set_cyc2ns_scale(cpu_khz);
562 return 0;
563 }
564 }
565 return -ENODEV;
566}
567
568static int tsc_resume(void)
569{
570 write_seqlock(&monotonic_lock);
571 /* Assume this is the last mark offset time */
572 rdtsc(last_tsc_low, last_tsc_high);
573#ifdef CONFIG_HPET_TIMER
574 if (is_hpet_enabled() && hpet_use_timer)
575 hpet_last = hpet_readl(HPET_COUNTER);
576#endif
577 write_sequnlock(&monotonic_lock);
578 return 0;
579}
580
581#ifndef CONFIG_X86_TSC
582/* disable flag for tsc. Takes effect by clearing the TSC cpu flag
583 * in cpu/common.c */
584static int __init tsc_setup(char *str)
585{
586 tsc_disable = 1;
587 return 1;
588}
589#else
590static int __init tsc_setup(char *str)
591{
592 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
593 "cannot disable TSC.\n");
594 return 1;
595}
596#endif
597__setup("notsc", tsc_setup);
598
599
600
601/************************************************************/
602
603/* tsc timer_opts struct */
604static struct timer_opts timer_tsc = {
605 .name = "tsc",
606 .mark_offset = mark_offset_tsc,
607 .get_offset = get_offset_tsc,
608 .monotonic_clock = monotonic_clock_tsc,
609 .delay = delay_tsc,
610 .read_timer = read_timer_tsc,
611 .resume = tsc_resume,
612};
613
614struct init_timer_opts __initdata timer_tsc_init = {
615 .init = init_tsc,
616 .opts = &timer_tsc,
617};
diff --git a/arch/i386/kernel/topology.c b/arch/i386/kernel/topology.c
index 296355292c..e2e281d4bc 100644
--- a/arch/i386/kernel/topology.c
+++ b/arch/i386/kernel/topology.c
@@ -32,15 +32,8 @@
32 32
33static struct i386_cpu cpu_devices[NR_CPUS]; 33static struct i386_cpu cpu_devices[NR_CPUS];
34 34
35int arch_register_cpu(int num){ 35int arch_register_cpu(int num)
36 struct node *parent = NULL; 36{
37
38#ifdef CONFIG_NUMA
39 int node = cpu_to_node(num);
40 if (node_online(node))
41 parent = &node_devices[node].node;
42#endif /* CONFIG_NUMA */
43
44 /* 37 /*
45 * CPU0 cannot be offlined due to several 38 * CPU0 cannot be offlined due to several
46 * restrictions and assumptions in kernel. This basically 39 * restrictions and assumptions in kernel. This basically
@@ -50,21 +43,13 @@ int arch_register_cpu(int num){
50 if (!num) 43 if (!num)
51 cpu_devices[num].cpu.no_control = 1; 44 cpu_devices[num].cpu.no_control = 1;
52 45
53 return register_cpu(&cpu_devices[num].cpu, num, parent); 46 return register_cpu(&cpu_devices[num].cpu, num);
54} 47}
55 48
56#ifdef CONFIG_HOTPLUG_CPU 49#ifdef CONFIG_HOTPLUG_CPU
57 50
58void arch_unregister_cpu(int num) { 51void arch_unregister_cpu(int num) {
59 struct node *parent = NULL; 52 return unregister_cpu(&cpu_devices[num].cpu);
60
61#ifdef CONFIG_NUMA
62 int node = cpu_to_node(num);
63 if (node_online(node))
64 parent = &node_devices[node].node;
65#endif /* CONFIG_NUMA */
66
67 return unregister_cpu(&cpu_devices[num].cpu, parent);
68} 53}
69EXPORT_SYMBOL(arch_register_cpu); 54EXPORT_SYMBOL(arch_register_cpu);
70EXPORT_SYMBOL(arch_unregister_cpu); 55EXPORT_SYMBOL(arch_unregister_cpu);
@@ -74,16 +59,13 @@ EXPORT_SYMBOL(arch_unregister_cpu);
74 59
75#ifdef CONFIG_NUMA 60#ifdef CONFIG_NUMA
76#include <linux/mmzone.h> 61#include <linux/mmzone.h>
77#include <asm/node.h>
78
79struct i386_node node_devices[MAX_NUMNODES];
80 62
81static int __init topology_init(void) 63static int __init topology_init(void)
82{ 64{
83 int i; 65 int i;
84 66
85 for_each_online_node(i) 67 for_each_online_node(i)
86 arch_register_node(i); 68 register_one_node(i);
87 69
88 for_each_present_cpu(i) 70 for_each_present_cpu(i)
89 arch_register_cpu(i); 71 arch_register_cpu(i);
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index dcc14477af..7846409747 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -28,6 +28,7 @@
28#include <linux/utsname.h> 28#include <linux/utsname.h>
29#include <linux/kprobes.h> 29#include <linux/kprobes.h>
30#include <linux/kexec.h> 30#include <linux/kexec.h>
31#include <linux/unwind.h>
31 32
32#ifdef CONFIG_EISA 33#ifdef CONFIG_EISA
33#include <linux/ioport.h> 34#include <linux/ioport.h>
@@ -47,7 +48,7 @@
47#include <asm/desc.h> 48#include <asm/desc.h>
48#include <asm/i387.h> 49#include <asm/i387.h>
49#include <asm/nmi.h> 50#include <asm/nmi.h>
50 51#include <asm/unwind.h>
51#include <asm/smp.h> 52#include <asm/smp.h>
52#include <asm/arch_hooks.h> 53#include <asm/arch_hooks.h>
53#include <asm/kdebug.h> 54#include <asm/kdebug.h>
@@ -92,6 +93,7 @@ asmlinkage void spurious_interrupt_bug(void);
92asmlinkage void machine_check(void); 93asmlinkage void machine_check(void);
93 94
94static int kstack_depth_to_print = 24; 95static int kstack_depth_to_print = 24;
96static int call_trace = 1;
95ATOMIC_NOTIFIER_HEAD(i386die_chain); 97ATOMIC_NOTIFIER_HEAD(i386die_chain);
96 98
97int register_die_notifier(struct notifier_block *nb) 99int register_die_notifier(struct notifier_block *nb)
@@ -170,7 +172,23 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
170 return ebp; 172 return ebp;
171} 173}
172 174
173static void show_trace_log_lvl(struct task_struct *task, 175static asmlinkage int show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
176{
177 int n = 0;
178 int printed = 0; /* nr of entries already printed on current line */
179
180 while (unwind(info) == 0 && UNW_PC(info)) {
181 ++n;
182 printed = print_addr_and_symbol(UNW_PC(info), log_lvl, printed);
183 if (arch_unw_user_mode(info))
184 break;
185 }
186 if (printed)
187 printk("\n");
188 return n;
189}
190
191static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
174 unsigned long *stack, char *log_lvl) 192 unsigned long *stack, char *log_lvl)
175{ 193{
176 unsigned long ebp; 194 unsigned long ebp;
@@ -178,6 +196,26 @@ static void show_trace_log_lvl(struct task_struct *task,
178 if (!task) 196 if (!task)
179 task = current; 197 task = current;
180 198
199 if (call_trace >= 0) {
200 int unw_ret = 0;
201 struct unwind_frame_info info;
202
203 if (regs) {
204 if (unwind_init_frame_info(&info, task, regs) == 0)
205 unw_ret = show_trace_unwind(&info, log_lvl);
206 } else if (task == current)
207 unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl);
208 else {
209 if (unwind_init_blocked(&info, task) == 0)
210 unw_ret = show_trace_unwind(&info, log_lvl);
211 }
212 if (unw_ret > 0) {
213 if (call_trace > 0)
214 return;
215 printk("%sLegacy call trace:\n", log_lvl);
216 }
217 }
218
181 if (task == current) { 219 if (task == current) {
182 /* Grab ebp right from our regs */ 220 /* Grab ebp right from our regs */
183 asm ("movl %%ebp, %0" : "=r" (ebp) : ); 221 asm ("movl %%ebp, %0" : "=r" (ebp) : );
@@ -198,13 +236,13 @@ static void show_trace_log_lvl(struct task_struct *task,
198 } 236 }
199} 237}
200 238
201void show_trace(struct task_struct *task, unsigned long * stack) 239void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack)
202{ 240{
203 show_trace_log_lvl(task, stack, ""); 241 show_trace_log_lvl(task, regs, stack, "");
204} 242}
205 243
206static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp, 244static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
207 char *log_lvl) 245 unsigned long *esp, char *log_lvl)
208{ 246{
209 unsigned long *stack; 247 unsigned long *stack;
210 int i; 248 int i;
@@ -225,13 +263,13 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp,
225 printk("%08lx ", *stack++); 263 printk("%08lx ", *stack++);
226 } 264 }
227 printk("\n%sCall Trace:\n", log_lvl); 265 printk("\n%sCall Trace:\n", log_lvl);
228 show_trace_log_lvl(task, esp, log_lvl); 266 show_trace_log_lvl(task, regs, esp, log_lvl);
229} 267}
230 268
231void show_stack(struct task_struct *task, unsigned long *esp) 269void show_stack(struct task_struct *task, unsigned long *esp)
232{ 270{
233 printk(" "); 271 printk(" ");
234 show_stack_log_lvl(task, esp, ""); 272 show_stack_log_lvl(task, NULL, esp, "");
235} 273}
236 274
237/* 275/*
@@ -241,7 +279,7 @@ void dump_stack(void)
241{ 279{
242 unsigned long stack; 280 unsigned long stack;
243 281
244 show_trace(current, &stack); 282 show_trace(current, NULL, &stack);
245} 283}
246 284
247EXPORT_SYMBOL(dump_stack); 285EXPORT_SYMBOL(dump_stack);
@@ -285,7 +323,7 @@ void show_registers(struct pt_regs *regs)
285 u8 __user *eip; 323 u8 __user *eip;
286 324
287 printk("\n" KERN_EMERG "Stack: "); 325 printk("\n" KERN_EMERG "Stack: ");
288 show_stack_log_lvl(NULL, (unsigned long *)esp, KERN_EMERG); 326 show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
289 327
290 printk(KERN_EMERG "Code: "); 328 printk(KERN_EMERG "Code: ");
291 329
@@ -1215,3 +1253,15 @@ static int __init kstack_setup(char *s)
1215 return 1; 1253 return 1;
1216} 1254}
1217__setup("kstack=", kstack_setup); 1255__setup("kstack=", kstack_setup);
1256
1257static int __init call_trace_setup(char *s)
1258{
1259 if (strcmp(s, "old") == 0)
1260 call_trace = -1;
1261 else if (strcmp(s, "both") == 0)
1262 call_trace = 0;
1263 else if (strcmp(s, "new") == 0)
1264 call_trace = 1;
1265 return 1;
1266}
1267__setup("call_trace=", call_trace_setup);
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
new file mode 100644
index 0000000000..7e0d8dab20
--- /dev/null
+++ b/arch/i386/kernel/tsc.c
@@ -0,0 +1,478 @@
1/*
2 * This code largely moved from arch/i386/kernel/timer/timer_tsc.c
3 * which was originally moved from arch/i386/kernel/time.c.
4 * See comments there for proper credits.
5 */
6
7#include <linux/clocksource.h>
8#include <linux/workqueue.h>
9#include <linux/cpufreq.h>
10#include <linux/jiffies.h>
11#include <linux/init.h>
12#include <linux/dmi.h>
13
14#include <asm/delay.h>
15#include <asm/tsc.h>
16#include <asm/delay.h>
17#include <asm/io.h>
18
19#include "mach_timer.h"
20
21/*
22 * On some systems the TSC frequency does not
23 * change with the cpu frequency. So we need
24 * an extra value to store the TSC freq
25 */
26unsigned int tsc_khz;
27
28int tsc_disable __cpuinitdata = 0;
29
30#ifdef CONFIG_X86_TSC
31static int __init tsc_setup(char *str)
32{
33 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
34 "cannot disable TSC.\n");
35 return 1;
36}
37#else
38/*
39 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
40 * in cpu/common.c
41 */
42static int __init tsc_setup(char *str)
43{
44 tsc_disable = 1;
45
46 return 1;
47}
48#endif
49
50__setup("notsc", tsc_setup);
51
52/*
53 * code to mark and check if the TSC is unstable
54 * due to cpufreq or due to unsynced TSCs
55 */
56static int tsc_unstable;
57
58static inline int check_tsc_unstable(void)
59{
60 return tsc_unstable;
61}
62
63void mark_tsc_unstable(void)
64{
65 tsc_unstable = 1;
66}
67EXPORT_SYMBOL_GPL(mark_tsc_unstable);
68
69/* Accellerators for sched_clock()
70 * convert from cycles(64bits) => nanoseconds (64bits)
71 * basic equation:
72 * ns = cycles / (freq / ns_per_sec)
73 * ns = cycles * (ns_per_sec / freq)
74 * ns = cycles * (10^9 / (cpu_khz * 10^3))
75 * ns = cycles * (10^6 / cpu_khz)
76 *
77 * Then we use scaling math (suggested by george@mvista.com) to get:
78 * ns = cycles * (10^6 * SC / cpu_khz) / SC
79 * ns = cycles * cyc2ns_scale / SC
80 *
81 * And since SC is a constant power of two, we can convert the div
82 * into a shift.
83 *
84 * We can use khz divisor instead of mhz to keep a better percision, since
85 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
86 * (mathieu.desnoyers@polymtl.ca)
87 *
88 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
89 */
90static unsigned long cyc2ns_scale __read_mostly;
91
92#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
93
94static inline void set_cyc2ns_scale(unsigned long cpu_khz)
95{
96 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
97}
98
99static inline unsigned long long cycles_2_ns(unsigned long long cyc)
100{
101 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
102}
103
104/*
105 * Scheduler clock - returns current time in nanosec units.
106 */
107unsigned long long sched_clock(void)
108{
109 unsigned long long this_offset;
110
111 /*
112 * in the NUMA case we dont use the TSC as they are not
113 * synchronized across all CPUs.
114 */
115#ifndef CONFIG_NUMA
116 if (!cpu_khz || check_tsc_unstable())
117#endif
118 /* no locking but a rare wrong value is not a big deal */
119 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
120
121 /* read the Time Stamp Counter: */
122 rdtscll(this_offset);
123
124 /* return the value in ns */
125 return cycles_2_ns(this_offset);
126}
127
128static unsigned long calculate_cpu_khz(void)
129{
130 unsigned long long start, end;
131 unsigned long count;
132 u64 delta64;
133 int i;
134 unsigned long flags;
135
136 local_irq_save(flags);
137
138 /* run 3 times to ensure the cache is warm */
139 for (i = 0; i < 3; i++) {
140 mach_prepare_counter();
141 rdtscll(start);
142 mach_countup(&count);
143 rdtscll(end);
144 }
145 /*
146 * Error: ECTCNEVERSET
147 * The CTC wasn't reliable: we got a hit on the very first read,
148 * or the CPU was so fast/slow that the quotient wouldn't fit in
149 * 32 bits..
150 */
151 if (count <= 1)
152 goto err;
153
154 delta64 = end - start;
155
156 /* cpu freq too fast: */
157 if (delta64 > (1ULL<<32))
158 goto err;
159
160 /* cpu freq too slow: */
161 if (delta64 <= CALIBRATE_TIME_MSEC)
162 goto err;
163
164 delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
165 do_div(delta64,CALIBRATE_TIME_MSEC);
166
167 local_irq_restore(flags);
168 return (unsigned long)delta64;
169err:
170 local_irq_restore(flags);
171 return 0;
172}
173
174int recalibrate_cpu_khz(void)
175{
176#ifndef CONFIG_SMP
177 unsigned long cpu_khz_old = cpu_khz;
178
179 if (cpu_has_tsc) {
180 cpu_khz = calculate_cpu_khz();
181 tsc_khz = cpu_khz;
182 cpu_data[0].loops_per_jiffy =
183 cpufreq_scale(cpu_data[0].loops_per_jiffy,
184 cpu_khz_old, cpu_khz);
185 return 0;
186 } else
187 return -ENODEV;
188#else
189 return -ENODEV;
190#endif
191}
192
193EXPORT_SYMBOL(recalibrate_cpu_khz);
194
195void tsc_init(void)
196{
197 if (!cpu_has_tsc || tsc_disable)
198 return;
199
200 cpu_khz = calculate_cpu_khz();
201 tsc_khz = cpu_khz;
202
203 if (!cpu_khz)
204 return;
205
206 printk("Detected %lu.%03lu MHz processor.\n",
207 (unsigned long)cpu_khz / 1000,
208 (unsigned long)cpu_khz % 1000);
209
210 set_cyc2ns_scale(cpu_khz);
211 use_tsc_delay();
212}
213
214#ifdef CONFIG_CPU_FREQ
215
216static unsigned int cpufreq_delayed_issched = 0;
217static unsigned int cpufreq_init = 0;
218static struct work_struct cpufreq_delayed_get_work;
219
220static void handle_cpufreq_delayed_get(void *v)
221{
222 unsigned int cpu;
223
224 for_each_online_cpu(cpu)
225 cpufreq_get(cpu);
226
227 cpufreq_delayed_issched = 0;
228}
229
230/*
231 * if we notice cpufreq oddness, schedule a call to cpufreq_get() as it tries
232 * to verify the CPU frequency the timing core thinks the CPU is running
233 * at is still correct.
234 */
235static inline void cpufreq_delayed_get(void)
236{
237 if (cpufreq_init && !cpufreq_delayed_issched) {
238 cpufreq_delayed_issched = 1;
239 printk(KERN_DEBUG "Checking if CPU frequency changed.\n");
240 schedule_work(&cpufreq_delayed_get_work);
241 }
242}
243
244/*
245 * if the CPU frequency is scaled, TSC-based delays will need a different
246 * loops_per_jiffy value to function properly.
247 */
248static unsigned int ref_freq = 0;
249static unsigned long loops_per_jiffy_ref = 0;
250static unsigned long cpu_khz_ref = 0;
251
252static int
253time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
254{
255 struct cpufreq_freqs *freq = data;
256
257 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
258 write_seqlock_irq(&xtime_lock);
259
260 if (!ref_freq) {
261 if (!freq->old){
262 ref_freq = freq->new;
263 goto end;
264 }
265 ref_freq = freq->old;
266 loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
267 cpu_khz_ref = cpu_khz;
268 }
269
270 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
271 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
272 (val == CPUFREQ_RESUMECHANGE)) {
273 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
274 cpu_data[freq->cpu].loops_per_jiffy =
275 cpufreq_scale(loops_per_jiffy_ref,
276 ref_freq, freq->new);
277
278 if (cpu_khz) {
279
280 if (num_online_cpus() == 1)
281 cpu_khz = cpufreq_scale(cpu_khz_ref,
282 ref_freq, freq->new);
283 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
284 tsc_khz = cpu_khz;
285 set_cyc2ns_scale(cpu_khz);
286 /*
287 * TSC based sched_clock turns
288 * to junk w/ cpufreq
289 */
290 mark_tsc_unstable();
291 }
292 }
293 }
294end:
295 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
296 write_sequnlock_irq(&xtime_lock);
297
298 return 0;
299}
300
301static struct notifier_block time_cpufreq_notifier_block = {
302 .notifier_call = time_cpufreq_notifier
303};
304
305static int __init cpufreq_tsc(void)
306{
307 int ret;
308
309 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
310 ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
311 CPUFREQ_TRANSITION_NOTIFIER);
312 if (!ret)
313 cpufreq_init = 1;
314
315 return ret;
316}
317
318core_initcall(cpufreq_tsc);
319
320#endif
321
322/* clock source code */
323
324static unsigned long current_tsc_khz = 0;
325static int tsc_update_callback(void);
326
327static cycle_t read_tsc(void)
328{
329 cycle_t ret;
330
331 rdtscll(ret);
332
333 return ret;
334}
335
336static struct clocksource clocksource_tsc = {
337 .name = "tsc",
338 .rating = 300,
339 .read = read_tsc,
340 .mask = CLOCKSOURCE_MASK(64),
341 .mult = 0, /* to be set */
342 .shift = 22,
343 .update_callback = tsc_update_callback,
344 .is_continuous = 1,
345};
346
347static int tsc_update_callback(void)
348{
349 int change = 0;
350
351 /* check to see if we should switch to the safe clocksource: */
352 if (clocksource_tsc.rating != 50 && check_tsc_unstable()) {
353 clocksource_tsc.rating = 50;
354 clocksource_reselect();
355 change = 1;
356 }
357
358 /* only update if tsc_khz has changed: */
359 if (current_tsc_khz != tsc_khz) {
360 current_tsc_khz = tsc_khz;
361 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
362 clocksource_tsc.shift);
363 change = 1;
364 }
365
366 return change;
367}
368
369static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d)
370{
371 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
372 d->ident);
373 mark_tsc_unstable();
374 return 0;
375}
376
377/* List of systems that have known TSC problems */
378static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
379 {
380 .callback = dmi_mark_tsc_unstable,
381 .ident = "IBM Thinkpad 380XD",
382 .matches = {
383 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
384 DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
385 },
386 },
387 {}
388};
389
390#define TSC_FREQ_CHECK_INTERVAL (10*MSEC_PER_SEC) /* 10sec in MS */
391static struct timer_list verify_tsc_freq_timer;
392
393/* XXX - Probably should add locking */
394static void verify_tsc_freq(unsigned long unused)
395{
396 static u64 last_tsc;
397 static unsigned long last_jiffies;
398
399 u64 now_tsc, interval_tsc;
400 unsigned long now_jiffies, interval_jiffies;
401
402
403 if (check_tsc_unstable())
404 return;
405
406 rdtscll(now_tsc);
407 now_jiffies = jiffies;
408
409 if (!last_jiffies) {
410 goto out;
411 }
412
413 interval_jiffies = now_jiffies - last_jiffies;
414 interval_tsc = now_tsc - last_tsc;
415 interval_tsc *= HZ;
416 do_div(interval_tsc, cpu_khz*1000);
417
418 if (interval_tsc < (interval_jiffies * 3 / 4)) {
419 printk("TSC appears to be running slowly. "
420 "Marking it as unstable\n");
421 mark_tsc_unstable();
422 return;
423 }
424
425out:
426 last_tsc = now_tsc;
427 last_jiffies = now_jiffies;
428 /* set us up to go off on the next interval: */
429 mod_timer(&verify_tsc_freq_timer,
430 jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL));
431}
432
433/*
434 * Make an educated guess if the TSC is trustworthy and synchronized
435 * over all CPUs.
436 */
437static __init int unsynchronized_tsc(void)
438{
439 /*
440 * Intel systems are normally all synchronized.
441 * Exceptions must mark TSC as unstable:
442 */
443 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
444 return 0;
445
446 /* assume multi socket systems are not synchronized: */
447 return num_possible_cpus() > 1;
448}
449
450static int __init init_tsc_clocksource(void)
451{
452
453 if (cpu_has_tsc && tsc_khz && !tsc_disable) {
454 /* check blacklist */
455 dmi_check_system(bad_tsc_dmi_table);
456
457 if (unsynchronized_tsc()) /* mark unstable if unsynced */
458 mark_tsc_unstable();
459 current_tsc_khz = tsc_khz;
460 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
461 clocksource_tsc.shift);
462 /* lower the rating if we already know its unstable: */
463 if (check_tsc_unstable())
464 clocksource_tsc.rating = 50;
465
466 init_timer(&verify_tsc_freq_timer);
467 verify_tsc_freq_timer.function = verify_tsc_freq;
468 verify_tsc_freq_timer.expires =
469 jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL);
470 add_timer(&verify_tsc_freq_timer);
471
472 return clocksource_register(&clocksource_tsc);
473 }
474
475 return 0;
476}
477
478module_init(init_tsc_clocksource);
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 7512f39c9f..2d4f1386e2 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -71,6 +71,15 @@ SECTIONS
71 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) } 71 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) }
72 _edata = .; /* End of data section */ 72 _edata = .; /* End of data section */
73 73
74#ifdef CONFIG_STACK_UNWIND
75 . = ALIGN(4);
76 .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
77 __start_unwind = .;
78 *(.eh_frame)
79 __end_unwind = .;
80 }
81#endif
82
74 . = ALIGN(THREAD_SIZE); /* init_task */ 83 . = ALIGN(THREAD_SIZE); /* init_task */
75 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { 84 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
76 *(.data.init_task) 85 *(.data.init_task)
diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S
index 3b62baa6a3..1a36d26e15 100644
--- a/arch/i386/kernel/vsyscall-sysenter.S
+++ b/arch/i386/kernel/vsyscall-sysenter.S
@@ -42,10 +42,10 @@ __kernel_vsyscall:
42 /* 7: align return point with nop's to make disassembly easier */ 42 /* 7: align return point with nop's to make disassembly easier */
43 .space 7,0x90 43 .space 7,0x90
44 44
45 /* 14: System call restart point is here! (SYSENTER_RETURN - 2) */ 45 /* 14: System call restart point is here! (SYSENTER_RETURN-2) */
46 jmp .Lenter_kernel 46 jmp .Lenter_kernel
47 /* 16: System call normal return point is here! */ 47 /* 16: System call normal return point is here! */
48 .globl SYSENTER_RETURN /* Symbol used by entry.S. */ 48 .globl SYSENTER_RETURN /* Symbol used by sysenter.c */
49SYSENTER_RETURN: 49SYSENTER_RETURN:
50 pop %ebp 50 pop %ebp
51.Lpop_ebp: 51.Lpop_ebp:
diff --git a/arch/i386/kernel/vsyscall.lds.S b/arch/i386/kernel/vsyscall.lds.S
index 98699ca6e5..e26975fc68 100644
--- a/arch/i386/kernel/vsyscall.lds.S
+++ b/arch/i386/kernel/vsyscall.lds.S
@@ -7,7 +7,7 @@
7 7
8SECTIONS 8SECTIONS
9{ 9{
10 . = VSYSCALL_BASE + SIZEOF_HEADERS; 10 . = VDSO_PRELINK + SIZEOF_HEADERS;
11 11
12 .hash : { *(.hash) } :text 12 .hash : { *(.hash) } :text
13 .dynsym : { *(.dynsym) } 13 .dynsym : { *(.dynsym) }
@@ -20,7 +20,7 @@ SECTIONS
20 For the layouts to match, we need to skip more than enough 20 For the layouts to match, we need to skip more than enough
21 space for the dynamic symbol table et al. If this amount 21 space for the dynamic symbol table et al. If this amount
22 is insufficient, ld -shared will barf. Just increase it here. */ 22 is insufficient, ld -shared will barf. Just increase it here. */
23 . = VSYSCALL_BASE + 0x400; 23 . = VDSO_PRELINK + 0x400;
24 24
25 .text : { *(.text) } :text =0x90909090 25 .text : { *(.text) } :text =0x90909090
26 .note : { *(.note.*) } :text :note 26 .note : { *(.note.*) } :text :note
diff --git a/arch/i386/lib/delay.c b/arch/i386/lib/delay.c
index c49a6acbee..3c0714c4b6 100644
--- a/arch/i386/lib/delay.c
+++ b/arch/i386/lib/delay.c
@@ -10,43 +10,92 @@
10 * we have to worry about. 10 * we have to worry about.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/config.h> 14#include <linux/config.h>
14#include <linux/sched.h> 15#include <linux/sched.h>
15#include <linux/delay.h> 16#include <linux/delay.h>
16#include <linux/module.h> 17
17#include <asm/processor.h> 18#include <asm/processor.h>
18#include <asm/delay.h> 19#include <asm/delay.h>
19#include <asm/timer.h> 20#include <asm/timer.h>
20 21
21#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
22#include <asm/smp.h> 23# include <asm/smp.h>
23#endif 24#endif
24 25
25extern struct timer_opts* timer; 26/* simple loop based delay: */
27static void delay_loop(unsigned long loops)
28{
29 int d0;
30
31 __asm__ __volatile__(
32 "\tjmp 1f\n"
33 ".align 16\n"
34 "1:\tjmp 2f\n"
35 ".align 16\n"
36 "2:\tdecl %0\n\tjns 2b"
37 :"=&a" (d0)
38 :"0" (loops));
39}
40
41/* TSC based delay: */
42static void delay_tsc(unsigned long loops)
43{
44 unsigned long bclock, now;
45
46 rdtscl(bclock);
47 do {
48 rep_nop();
49 rdtscl(now);
50 } while ((now-bclock) < loops);
51}
52
53/*
54 * Since we calibrate only once at boot, this
55 * function should be set once at boot and not changed
56 */
57static void (*delay_fn)(unsigned long) = delay_loop;
58
59void use_tsc_delay(void)
60{
61 delay_fn = delay_tsc;
62}
63
64int read_current_timer(unsigned long *timer_val)
65{
66 if (delay_fn == delay_tsc) {
67 rdtscl(*timer_val);
68 return 0;
69 }
70 return -1;
71}
26 72
27void __delay(unsigned long loops) 73void __delay(unsigned long loops)
28{ 74{
29 cur_timer->delay(loops); 75 delay_fn(loops);
30} 76}
31 77
32inline void __const_udelay(unsigned long xloops) 78inline void __const_udelay(unsigned long xloops)
33{ 79{
34 int d0; 80 int d0;
81
35 xloops *= 4; 82 xloops *= 4;
36 __asm__("mull %0" 83 __asm__("mull %0"
37 :"=d" (xloops), "=&a" (d0) 84 :"=d" (xloops), "=&a" (d0)
38 :"1" (xloops),"0" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))); 85 :"1" (xloops), "0"
39 __delay(++xloops); 86 (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4)));
87
88 __delay(++xloops);
40} 89}
41 90
42void __udelay(unsigned long usecs) 91void __udelay(unsigned long usecs)
43{ 92{
44 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ 93 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
45} 94}
46 95
47void __ndelay(unsigned long nsecs) 96void __ndelay(unsigned long nsecs)
48{ 97{
49 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ 98 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
50} 99}
51 100
52EXPORT_SYMBOL(__delay); 101EXPORT_SYMBOL(__delay);
diff --git a/arch/i386/mach-voyager/setup.c b/arch/i386/mach-voyager/setup.c
index 0e225054e2..defc6ebbd5 100644
--- a/arch/i386/mach-voyager/setup.c
+++ b/arch/i386/mach-voyager/setup.c
@@ -5,10 +5,10 @@
5#include <linux/config.h> 5#include <linux/config.h>
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/interrupt.h> 7#include <linux/interrupt.h>
8#include <asm/acpi.h>
9#include <asm/arch_hooks.h> 8#include <asm/arch_hooks.h>
10#include <asm/voyager.h> 9#include <asm/voyager.h>
11#include <asm/e820.h> 10#include <asm/e820.h>
11#include <asm/io.h>
12#include <asm/setup.h> 12#include <asm/setup.h>
13 13
14void __init pre_intr_init_hook(void) 14void __init pre_intr_init_hook(void)
@@ -27,8 +27,7 @@ void __init intr_init_hook(void)
27 smp_intr_init(); 27 smp_intr_init();
28#endif 28#endif
29 29
30 if (!acpi_ioapic) 30 setup_irq(2, &irq2);
31 setup_irq(2, &irq2);
32} 31}
33 32
34void __init pre_setup_arch_hook(void) 33void __init pre_setup_arch_hook(void)
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index 70e560a1b7..8242af9ebc 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -661,6 +661,7 @@ do_boot_cpu(__u8 cpu)
661 print_cpu_info(&cpu_data[cpu]); 661 print_cpu_info(&cpu_data[cpu]);
662 wmb(); 662 wmb();
663 cpu_set(cpu, cpu_callout_map); 663 cpu_set(cpu, cpu_callout_map);
664 cpu_set(cpu, cpu_present_map);
664 } 665 }
665 else { 666 else {
666 printk("CPU%d FAILED TO BOOT: ", cpu); 667 printk("CPU%d FAILED TO BOOT: ", cpu);
@@ -1912,6 +1913,7 @@ void __devinit smp_prepare_boot_cpu(void)
1912 cpu_set(smp_processor_id(), cpu_online_map); 1913 cpu_set(smp_processor_id(), cpu_online_map);
1913 cpu_set(smp_processor_id(), cpu_callout_map); 1914 cpu_set(smp_processor_id(), cpu_callout_map);
1914 cpu_set(smp_processor_id(), cpu_possible_map); 1915 cpu_set(smp_processor_id(), cpu_possible_map);
1916 cpu_set(smp_processor_id(), cpu_present_map);
1915} 1917}
1916 1918
1917int __devinit 1919int __devinit
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index bd6fe96cc1..6ee7faaf2c 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -30,6 +30,40 @@
30 30
31extern void die(const char *,struct pt_regs *,long); 31extern void die(const char *,struct pt_regs *,long);
32 32
33#ifdef CONFIG_KPROBES
34ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
35int register_page_fault_notifier(struct notifier_block *nb)
36{
37 vmalloc_sync_all();
38 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
39}
40
41int unregister_page_fault_notifier(struct notifier_block *nb)
42{
43 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
44}
45
46static inline int notify_page_fault(enum die_val val, const char *str,
47 struct pt_regs *regs, long err, int trap, int sig)
48{
49 struct die_args args = {
50 .regs = regs,
51 .str = str,
52 .err = err,
53 .trapnr = trap,
54 .signr = sig
55 };
56 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
57}
58#else
59static inline int notify_page_fault(enum die_val val, const char *str,
60 struct pt_regs *regs, long err, int trap, int sig)
61{
62 return NOTIFY_DONE;
63}
64#endif
65
66
33/* 67/*
34 * Unlock any spinlocks which will prevent us from getting the 68 * Unlock any spinlocks which will prevent us from getting the
35 * message out 69 * message out
@@ -324,7 +358,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
324 if (unlikely(address >= TASK_SIZE)) { 358 if (unlikely(address >= TASK_SIZE)) {
325 if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0) 359 if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
326 return; 360 return;
327 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, 361 if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
328 SIGSEGV) == NOTIFY_STOP) 362 SIGSEGV) == NOTIFY_STOP)
329 return; 363 return;
330 /* 364 /*
@@ -334,7 +368,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
334 goto bad_area_nosemaphore; 368 goto bad_area_nosemaphore;
335 } 369 }
336 370
337 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, 371 if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
338 SIGSEGV) == NOTIFY_STOP) 372 SIGSEGV) == NOTIFY_STOP)
339 return; 373 return;
340 374
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index bf19513f0c..f84b16e007 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/highmem.h> 24#include <linux/highmem.h>
25#include <linux/pagemap.h> 25#include <linux/pagemap.h>
26#include <linux/poison.h>
26#include <linux/bootmem.h> 27#include <linux/bootmem.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/proc_fs.h> 29#include <linux/proc_fs.h>
@@ -654,7 +655,7 @@ void __init mem_init(void)
654 */ 655 */
655#ifdef CONFIG_MEMORY_HOTPLUG 656#ifdef CONFIG_MEMORY_HOTPLUG
656#ifndef CONFIG_NEED_MULTIPLE_NODES 657#ifndef CONFIG_NEED_MULTIPLE_NODES
657int add_memory(u64 start, u64 size) 658int arch_add_memory(int nid, u64 start, u64 size)
658{ 659{
659 struct pglist_data *pgdata = &contig_page_data; 660 struct pglist_data *pgdata = &contig_page_data;
660 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; 661 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
@@ -753,7 +754,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
753 for (addr = begin; addr < end; addr += PAGE_SIZE) { 754 for (addr = begin; addr < end; addr += PAGE_SIZE) {
754 ClearPageReserved(virt_to_page(addr)); 755 ClearPageReserved(virt_to_page(addr));
755 init_page_count(virt_to_page(addr)); 756 init_page_count(virt_to_page(addr));
756 memset((void *)addr, 0xcc, PAGE_SIZE); 757 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
757 free_page(addr); 758 free_page(addr);
758 totalram_pages++; 759 totalram_pages++;
759 } 760 }
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 0887b34bc5..353a836ed6 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -229,8 +229,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
229 if (PageHighMem(page)) 229 if (PageHighMem(page))
230 return; 230 return;
231 if (!enable) 231 if (!enable)
232 mutex_debug_check_no_locks_freed(page_address(page), 232 debug_check_no_locks_freed(page_address(page),
233 numpages * PAGE_SIZE); 233 numpages * PAGE_SIZE);
234 234
235 /* the return value is ignored - the calls cannot fail, 235 /* the return value is ignored - the calls cannot fail,
236 * large pages are disabled at boot time. 236 * large pages are disabled at boot time.
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c
index ec0fd3cfa7..fa8a37bcb3 100644
--- a/arch/i386/oprofile/nmi_int.c
+++ b/arch/i386/oprofile/nmi_int.c
@@ -281,9 +281,9 @@ static int nmi_create_files(struct super_block * sb, struct dentry * root)
281 281
282 for (i = 0; i < model->num_counters; ++i) { 282 for (i = 0; i < model->num_counters; ++i) {
283 struct dentry * dir; 283 struct dentry * dir;
284 char buf[2]; 284 char buf[4];
285 285
286 snprintf(buf, 2, "%d", i); 286 snprintf(buf, sizeof(buf), "%d", i);
287 dir = oprofilefs_mkdir(sb, root, buf); 287 dir = oprofilefs_mkdir(sb, root, buf);
288 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled); 288 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
289 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event); 289 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
diff --git a/arch/i386/oprofile/op_model_athlon.c b/arch/i386/oprofile/op_model_athlon.c
index 3ad9a72a50..693bdea4a5 100644
--- a/arch/i386/oprofile/op_model_athlon.c
+++ b/arch/i386/oprofile/op_model_athlon.c
@@ -13,6 +13,7 @@
13#include <linux/oprofile.h> 13#include <linux/oprofile.h>
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/msr.h> 15#include <asm/msr.h>
16#include <asm/nmi.h>
16 17
17#include "op_x86_model.h" 18#include "op_x86_model.h"
18#include "op_counter.h" 19#include "op_counter.h"
diff --git a/arch/i386/oprofile/op_model_p4.c b/arch/i386/oprofile/op_model_p4.c
index ac8a066035..7c61d357b8 100644
--- a/arch/i386/oprofile/op_model_p4.c
+++ b/arch/i386/oprofile/op_model_p4.c
@@ -14,6 +14,7 @@
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/fixmap.h> 15#include <asm/fixmap.h>
16#include <asm/apic.h> 16#include <asm/apic.h>
17#include <asm/nmi.h>
17 18
18#include "op_x86_model.h" 19#include "op_x86_model.h"
19#include "op_counter.h" 20#include "op_counter.h"
diff --git a/arch/i386/oprofile/op_model_ppro.c b/arch/i386/oprofile/op_model_ppro.c
index d719015fc0..5c3ab4b027 100644
--- a/arch/i386/oprofile/op_model_ppro.c
+++ b/arch/i386/oprofile/op_model_ppro.c
@@ -14,6 +14,7 @@
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/msr.h> 15#include <asm/msr.h>
16#include <asm/apic.h> 16#include <asm/apic.h>
17#include <asm/nmi.h>
17 18
18#include "op_x86_model.h" 19#include "op_x86_model.h"
19#include "op_counter.h" 20#include "op_counter.h"
diff --git a/arch/i386/pci/pcbios.c b/arch/i386/pci/pcbios.c
index 1eec0868f4..ed1512a175 100644
--- a/arch/i386/pci/pcbios.c
+++ b/arch/i386/pci/pcbios.c
@@ -371,8 +371,7 @@ void __devinit pcibios_sort(void)
371 list_for_each(ln, &pci_devices) { 371 list_for_each(ln, &pci_devices) {
372 d = pci_dev_g(ln); 372 d = pci_dev_g(ln);
373 if (d->bus->number == bus && d->devfn == devfn) { 373 if (d->bus->number == bus && d->devfn == devfn) {
374 list_del(&d->global_list); 374 list_move_tail(&d->global_list, &sorted_devices);
375 list_add_tail(&d->global_list, &sorted_devices);
376 if (d == dev) 375 if (d == dev)
377 found = 1; 376 found = 1;
378 break; 377 break;
@@ -390,8 +389,7 @@ void __devinit pcibios_sort(void)
390 if (!found) { 389 if (!found) {
391 printk(KERN_WARNING "PCI: Device %s not found by BIOS\n", 390 printk(KERN_WARNING "PCI: Device %s not found by BIOS\n",
392 pci_name(dev)); 391 pci_name(dev));
393 list_del(&dev->global_list); 392 list_move_tail(&dev->global_list, &sorted_devices);
394 list_add_tail(&dev->global_list, &sorted_devices);
395 } 393 }
396 } 394 }
397 list_splice(&sorted_devices, &pci_devices); 395 list_splice(&sorted_devices, &pci_devices);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1831874988..a56df7bf02 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -374,6 +374,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
374 def_bool y 374 def_bool y
375 depends on NEED_MULTIPLE_NODES 375 depends on NEED_MULTIPLE_NODES
376 376
377config HAVE_ARCH_NODEDATA_EXTENSION
378 def_bool y
379 depends on NUMA
380
377config IA32_SUPPORT 381config IA32_SUPPORT
378 bool "Support for Linux/x86 binaries" 382 bool "Support for Linux/x86 binaries"
379 help 383 help
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 859fb37ff4..303a9afcf2 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -959,7 +959,7 @@ remove_palinfo_proc_entries(unsigned int hcpu)
959 } 959 }
960} 960}
961 961
962static int palinfo_cpu_callback(struct notifier_block *nfb, 962static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
963 unsigned long action, 963 unsigned long action,
964 void *hcpu) 964 void *hcpu)
965{ 965{
@@ -978,7 +978,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
978 return NOTIFY_OK; 978 return NOTIFY_OK;
979} 979}
980 980
981static struct notifier_block palinfo_cpu_notifier = 981static struct notifier_block __cpuinitdata palinfo_cpu_notifier =
982{ 982{
983 .notifier_call = palinfo_cpu_callback, 983 .notifier_call = palinfo_cpu_callback,
984 .priority = 0, 984 .priority = 0,
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 355d57970b..b045c27913 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -272,9 +272,9 @@ cpu_idle (void)
272 /* endless idle loop with no priority at all */ 272 /* endless idle loop with no priority at all */
273 while (1) { 273 while (1) {
274 if (can_do_pal_halt) 274 if (can_do_pal_halt)
275 clear_thread_flag(TIF_POLLING_NRFLAG); 275 current_thread_info()->status &= ~TS_POLLING;
276 else 276 else
277 set_thread_flag(TIF_POLLING_NRFLAG); 277 current_thread_info()->status |= TS_POLLING;
278 278
279 if (!need_resched()) { 279 if (!need_resched()) {
280 void (*idle)(void); 280 void (*idle)(void);
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 663a186ad1..9065f0f01b 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -572,7 +572,7 @@ static struct file_operations salinfo_data_fops = {
572}; 572};
573 573
574#ifdef CONFIG_HOTPLUG_CPU 574#ifdef CONFIG_HOTPLUG_CPU
575static int 575static int __devinit
576salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) 576salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
577{ 577{
578 unsigned int i, cpu = (unsigned long)hcpu; 578 unsigned int i, cpu = (unsigned long)hcpu;
@@ -673,9 +673,7 @@ salinfo_init(void)
673 salinfo_timer.function = &salinfo_timeout; 673 salinfo_timer.function = &salinfo_timeout;
674 add_timer(&salinfo_timer); 674 add_timer(&salinfo_timer);
675 675
676#ifdef CONFIG_HOTPLUG_CPU 676 register_hotcpu_notifier(&salinfo_cpu_notifier);
677 register_cpu_notifier(&salinfo_cpu_notifier);
678#endif
679 677
680 return 0; 678 return 0;
681} 679}
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 879edb51d1..5511d9c6c7 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -26,19 +26,10 @@
26#include <asm/numa.h> 26#include <asm/numa.h>
27#include <asm/cpu.h> 27#include <asm/cpu.h>
28 28
29#ifdef CONFIG_NUMA
30static struct node *sysfs_nodes;
31#endif
32static struct ia64_cpu *sysfs_cpus; 29static struct ia64_cpu *sysfs_cpus;
33 30
34int arch_register_cpu(int num) 31int arch_register_cpu(int num)
35{ 32{
36 struct node *parent = NULL;
37
38#ifdef CONFIG_NUMA
39 parent = &sysfs_nodes[cpu_to_node(num)];
40#endif /* CONFIG_NUMA */
41
42#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) 33#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
43 /* 34 /*
44 * If CPEI cannot be re-targetted, and this is 35 * If CPEI cannot be re-targetted, and this is
@@ -48,21 +39,14 @@ int arch_register_cpu(int num)
48 sysfs_cpus[num].cpu.no_control = 1; 39 sysfs_cpus[num].cpu.no_control = 1;
49#endif 40#endif
50 41
51 return register_cpu(&sysfs_cpus[num].cpu, num, parent); 42 return register_cpu(&sysfs_cpus[num].cpu, num);
52} 43}
53 44
54#ifdef CONFIG_HOTPLUG_CPU 45#ifdef CONFIG_HOTPLUG_CPU
55 46
56void arch_unregister_cpu(int num) 47void arch_unregister_cpu(int num)
57{ 48{
58 struct node *parent = NULL; 49 return unregister_cpu(&sysfs_cpus[num].cpu);
59
60#ifdef CONFIG_NUMA
61 int node = cpu_to_node(num);
62 parent = &sysfs_nodes[node];
63#endif /* CONFIG_NUMA */
64
65 return unregister_cpu(&sysfs_cpus[num].cpu, parent);
66} 50}
67EXPORT_SYMBOL(arch_register_cpu); 51EXPORT_SYMBOL(arch_register_cpu);
68EXPORT_SYMBOL(arch_unregister_cpu); 52EXPORT_SYMBOL(arch_unregister_cpu);
@@ -74,17 +58,11 @@ static int __init topology_init(void)
74 int i, err = 0; 58 int i, err = 0;
75 59
76#ifdef CONFIG_NUMA 60#ifdef CONFIG_NUMA
77 sysfs_nodes = kzalloc(sizeof(struct node) * MAX_NUMNODES, GFP_KERNEL);
78 if (!sysfs_nodes) {
79 err = -ENOMEM;
80 goto out;
81 }
82
83 /* 61 /*
84 * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes? 62 * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
85 */ 63 */
86 for_each_online_node(i) { 64 for_each_online_node(i) {
87 if ((err = register_node(&sysfs_nodes[i], i, 0))) 65 if ((err = register_one_node(i)))
88 goto out; 66 goto out;
89 } 67 }
90#endif 68#endif
@@ -426,7 +404,7 @@ static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
426 * When a cpu is hot-plugged, do a check and initiate 404 * When a cpu is hot-plugged, do a check and initiate
427 * cache kobject if necessary 405 * cache kobject if necessary
428 */ 406 */
429static int cache_cpu_callback(struct notifier_block *nfb, 407static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
430 unsigned long action, void *hcpu) 408 unsigned long action, void *hcpu)
431{ 409{
432 unsigned int cpu = (unsigned long)hcpu; 410 unsigned int cpu = (unsigned long)hcpu;
@@ -444,7 +422,7 @@ static int cache_cpu_callback(struct notifier_block *nfb,
444 return NOTIFY_OK; 422 return NOTIFY_OK;
445} 423}
446 424
447static struct notifier_block cache_cpu_notifier = 425static struct notifier_block __cpuinitdata cache_cpu_notifier =
448{ 426{
449 .notifier_call = cache_cpu_callback 427 .notifier_call = cache_cpu_callback
450}; 428};
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index b6bcc9fa36..525b082eb6 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -33,7 +33,6 @@
33 */ 33 */
34struct early_node_data { 34struct early_node_data {
35 struct ia64_node_data *node_data; 35 struct ia64_node_data *node_data;
36 pg_data_t *pgdat;
37 unsigned long pernode_addr; 36 unsigned long pernode_addr;
38 unsigned long pernode_size; 37 unsigned long pernode_size;
39 struct bootmem_data bootmem_data; 38 struct bootmem_data bootmem_data;
@@ -46,6 +45,8 @@ struct early_node_data {
46static struct early_node_data mem_data[MAX_NUMNODES] __initdata; 45static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
47static nodemask_t memory_less_mask __initdata; 46static nodemask_t memory_less_mask __initdata;
48 47
48static pg_data_t *pgdat_list[MAX_NUMNODES];
49
49/* 50/*
50 * To prevent cache aliasing effects, align per-node structures so that they 51 * To prevent cache aliasing effects, align per-node structures so that they
51 * start at addresses that are strided by node number. 52 * start at addresses that are strided by node number.
@@ -99,7 +100,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
99 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been 100 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
100 * called yet. Note that node 0 will also count all non-existent cpus. 101 * called yet. Note that node 0 will also count all non-existent cpus.
101 */ 102 */
102static int __init early_nr_cpus_node(int node) 103static int __meminit early_nr_cpus_node(int node)
103{ 104{
104 int cpu, n = 0; 105 int cpu, n = 0;
105 106
@@ -114,7 +115,7 @@ static int __init early_nr_cpus_node(int node)
114 * compute_pernodesize - compute size of pernode data 115 * compute_pernodesize - compute size of pernode data
115 * @node: the node id. 116 * @node: the node id.
116 */ 117 */
117static unsigned long __init compute_pernodesize(int node) 118static unsigned long __meminit compute_pernodesize(int node)
118{ 119{
119 unsigned long pernodesize = 0, cpus; 120 unsigned long pernodesize = 0, cpus;
120 121
@@ -175,13 +176,13 @@ static void __init fill_pernode(int node, unsigned long pernode,
175 pernode += PERCPU_PAGE_SIZE * cpus; 176 pernode += PERCPU_PAGE_SIZE * cpus;
176 pernode += node * L1_CACHE_BYTES; 177 pernode += node * L1_CACHE_BYTES;
177 178
178 mem_data[node].pgdat = __va(pernode); 179 pgdat_list[node] = __va(pernode);
179 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); 180 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
180 181
181 mem_data[node].node_data = __va(pernode); 182 mem_data[node].node_data = __va(pernode);
182 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); 183 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
183 184
184 mem_data[node].pgdat->bdata = bdp; 185 pgdat_list[node]->bdata = bdp;
185 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); 186 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
186 187
187 cpu_data = per_cpu_node_setup(cpu_data, node); 188 cpu_data = per_cpu_node_setup(cpu_data, node);
@@ -268,7 +269,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
268static int __init free_node_bootmem(unsigned long start, unsigned long len, 269static int __init free_node_bootmem(unsigned long start, unsigned long len,
269 int node) 270 int node)
270{ 271{
271 free_bootmem_node(mem_data[node].pgdat, start, len); 272 free_bootmem_node(pgdat_list[node], start, len);
272 273
273 return 0; 274 return 0;
274} 275}
@@ -287,7 +288,7 @@ static void __init reserve_pernode_space(void)
287 int node; 288 int node;
288 289
289 for_each_online_node(node) { 290 for_each_online_node(node) {
290 pg_data_t *pdp = mem_data[node].pgdat; 291 pg_data_t *pdp = pgdat_list[node];
291 292
292 if (node_isset(node, memory_less_mask)) 293 if (node_isset(node, memory_less_mask))
293 continue; 294 continue;
@@ -307,6 +308,17 @@ static void __init reserve_pernode_space(void)
307 } 308 }
308} 309}
309 310
311static void __meminit scatter_node_data(void)
312{
313 pg_data_t **dst;
314 int node;
315
316 for_each_online_node(node) {
317 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
318 memcpy(dst, pgdat_list, sizeof(pgdat_list));
319 }
320}
321
310/** 322/**
311 * initialize_pernode_data - fixup per-cpu & per-node pointers 323 * initialize_pernode_data - fixup per-cpu & per-node pointers
312 * 324 *
@@ -317,17 +329,10 @@ static void __init reserve_pernode_space(void)
317 */ 329 */
318static void __init initialize_pernode_data(void) 330static void __init initialize_pernode_data(void)
319{ 331{
320 pg_data_t *pgdat_list[MAX_NUMNODES];
321 int cpu, node; 332 int cpu, node;
322 333
323 for_each_online_node(node) 334 scatter_node_data();
324 pgdat_list[node] = mem_data[node].pgdat;
325 335
326 /* Copy the pg_data_t list to each node and init the node field */
327 for_each_online_node(node) {
328 memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
329 sizeof(pgdat_list));
330 }
331#ifdef CONFIG_SMP 336#ifdef CONFIG_SMP
332 /* Set the node_data pointer for each per-cpu struct */ 337 /* Set the node_data pointer for each per-cpu struct */
333 for (cpu = 0; cpu < NR_CPUS; cpu++) { 338 for (cpu = 0; cpu < NR_CPUS; cpu++) {
@@ -372,7 +377,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
372 if (bestnode == -1) 377 if (bestnode == -1)
373 bestnode = anynode; 378 bestnode = anynode;
374 379
375 ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, pernodesize, 380 ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
376 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 381 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
377 382
378 return ptr; 383 return ptr;
@@ -476,7 +481,7 @@ void __init find_memory(void)
476 pernodesize = mem_data[node].pernode_size; 481 pernodesize = mem_data[node].pernode_size;
477 map = pernode + pernodesize; 482 map = pernode + pernodesize;
478 483
479 init_bootmem_node(mem_data[node].pgdat, 484 init_bootmem_node(pgdat_list[node],
480 map>>PAGE_SHIFT, 485 map>>PAGE_SHIFT,
481 bdp->node_boot_start>>PAGE_SHIFT, 486 bdp->node_boot_start>>PAGE_SHIFT,
482 bdp->node_low_pfn); 487 bdp->node_low_pfn);
@@ -786,3 +791,21 @@ void __init paging_init(void)
786 791
787 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); 792 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
788} 793}
794
795pg_data_t *arch_alloc_nodedata(int nid)
796{
797 unsigned long size = compute_pernodesize(nid);
798
799 return kzalloc(size, GFP_KERNEL);
800}
801
802void arch_free_nodedata(pg_data_t *pgdat)
803{
804 kfree(pgdat);
805}
806
807void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
808{
809 pgdat_list[update_node] = update_pgdat;
810 scatter_node_data();
811}
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index d98ec49570..14ef7cceb2 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -19,6 +19,40 @@
19 19
20extern void die (char *, struct pt_regs *, long); 20extern void die (char *, struct pt_regs *, long);
21 21
22#ifdef CONFIG_KPROBES
23ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
24
25/* Hook to register for page fault notifications */
26int register_page_fault_notifier(struct notifier_block *nb)
27{
28 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
29}
30
31int unregister_page_fault_notifier(struct notifier_block *nb)
32{
33 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
34}
35
36static inline int notify_page_fault(enum die_val val, const char *str,
37 struct pt_regs *regs, long err, int trap, int sig)
38{
39 struct die_args args = {
40 .regs = regs,
41 .str = str,
42 .err = err,
43 .trapnr = trap,
44 .signr = sig
45 };
46 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
47}
48#else
49static inline int notify_page_fault(enum die_val val, const char *str,
50 struct pt_regs *regs, long err, int trap, int sig)
51{
52 return NOTIFY_DONE;
53}
54#endif
55
22/* 56/*
23 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment 57 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
24 * (inside region 5, on ia64) and that page is present. 58 * (inside region 5, on ia64) and that page is present.
@@ -84,7 +118,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
84 /* 118 /*
85 * This is to handle the kprobes on user space access instructions 119 * This is to handle the kprobes on user space access instructions
86 */ 120 */
87 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT, 121 if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT,
88 SIGSEGV) == NOTIFY_STOP) 122 SIGSEGV) == NOTIFY_STOP)
89 return; 123 return;
90 124
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 11f08001f8..38306e98f0 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -652,7 +652,7 @@ void online_page(struct page *page)
652 num_physpages++; 652 num_physpages++;
653} 653}
654 654
655int add_memory(u64 start, u64 size) 655int arch_add_memory(int nid, u64 start, u64 size)
656{ 656{
657 pg_data_t *pgdat; 657 pg_data_t *pgdat;
658 struct zone *zone; 658 struct zone *zone;
@@ -660,7 +660,7 @@ int add_memory(u64 start, u64 size)
660 unsigned long nr_pages = size >> PAGE_SHIFT; 660 unsigned long nr_pages = size >> PAGE_SHIFT;
661 int ret; 661 int ret;
662 662
663 pgdat = NODE_DATA(0); 663 pgdat = NODE_DATA(nid);
664 664
665 zone = pgdat->node_zones + ZONE_NORMAL; 665 zone = pgdat->node_zones + ZONE_NORMAL;
666 ret = __add_pages(zone, start_pfn, nr_pages); 666 ret = __add_pages(zone, start_pfn, nr_pages);
@@ -671,7 +671,6 @@ int add_memory(u64 start, u64 size)
671 671
672 return ret; 672 return ret;
673} 673}
674EXPORT_SYMBOL_GPL(add_memory);
675 674
676int remove_memory(u64 start, u64 size) 675int remove_memory(u64 start, u64 size)
677{ 676{
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index dc8e2b6967..677c6c0fd6 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -27,7 +27,7 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
27int sn_force_interrupt_flag = 1; 27int sn_force_interrupt_flag = 1;
28extern int sn_ioif_inited; 28extern int sn_ioif_inited;
29struct list_head **sn_irq_lh; 29struct list_head **sn_irq_lh;
30static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ 30static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
31 31
32u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, 32u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
33 struct sn_irq_info *sn_irq_info, 33 struct sn_irq_info *sn_irq_info,
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index 3cd3c2988a..1ff483c8a4 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -275,7 +275,7 @@ static int __init topology_init(void)
275 int i; 275 int i;
276 276
277 for_each_present_cpu(i) 277 for_each_present_cpu(i)
278 register_cpu(&cpu_devices[i], i, NULL); 278 register_cpu(&cpu_devices[i], i);
279 279
280 return 0; 280 return 0;
281} 281}
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index d6d582a5ab..a226668f20 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -94,8 +94,7 @@ pmd_t *get_pointer_table (void)
94 PD_MARKBITS(dp) = mask & ~tmp; 94 PD_MARKBITS(dp) = mask & ~tmp;
95 if (!PD_MARKBITS(dp)) { 95 if (!PD_MARKBITS(dp)) {
96 /* move to end of list */ 96 /* move to end of list */
97 list_del(dp); 97 list_move_tail(dp, &ptable_list);
98 list_add_tail(dp, &ptable_list);
99 } 98 }
100 return (pmd_t *) (page_address(PD_PAGE(dp)) + off); 99 return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
101} 100}
@@ -123,8 +122,7 @@ int free_pointer_table (pmd_t *ptable)
123 * move this descriptor to the front of the list, since 122 * move this descriptor to the front of the list, since
124 * it has one or more free tables. 123 * it has one or more free tables.
125 */ 124 */
126 list_del(dp); 125 list_move(dp, &ptable_list);
127 list_add(dp, &ptable_list);
128 } 126 }
129 return 0; 127 return 0;
130} 128}
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c
index f04a1d25f1..97c7bfde8a 100644
--- a/arch/m68k/sun3/sun3dvma.c
+++ b/arch/m68k/sun3/sun3dvma.c
@@ -119,8 +119,7 @@ static inline int refill(void)
119 if(hole->end == prev->start) { 119 if(hole->end == prev->start) {
120 hole->size += prev->size; 120 hole->size += prev->size;
121 hole->end = prev->end; 121 hole->end = prev->end;
122 list_del(&(prev->list)); 122 list_move(&(prev->list), &hole_cache);
123 list_add(&(prev->list), &hole_cache);
124 ret++; 123 ret++;
125 } 124 }
126 125
@@ -182,8 +181,7 @@ static inline unsigned long get_baddr(int len, unsigned long align)
182#endif 181#endif
183 return hole->end; 182 return hole->end;
184 } else if(hole->size == newlen) { 183 } else if(hole->size == newlen) {
185 list_del(&(hole->list)); 184 list_move(&(hole->list), &hole_cache);
186 list_add(&(hole->list), &hole_cache);
187 dvma_entry_use(hole->start) = newlen; 185 dvma_entry_use(hole->start) = newlen;
188#ifdef DVMA_DEBUG 186#ifdef DVMA_DEBUG
189 dvma_allocs++; 187 dvma_allocs++;
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 6c6980b9b6..8b6e723eb8 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -472,38 +472,46 @@ config 4KSTACKS
472 running more threads on a system and also reduces the pressure 472 running more threads on a system and also reduces the pressure
473 on the VM subsystem for higher order allocations. 473 on the VM subsystem for higher order allocations.
474 474
475choice 475comment "RAM configuration"
476 prompt "RAM size" 476
477 default AUTO 477config RAMBASE
478 478 hex "Address of the base of RAM"
479config RAMAUTO 479 default "0"
480 bool "AUTO" 480 help
481 ---help--- 481 Define the address that RAM starts at. On many platforms this is
482 Configure the RAM size on your platform. Many platforms can auto 482 0, the base of the address space. And this is the default. Some
483 detect this, on those choose the AUTO option. Otherwise set the 483 platforms choose to setup their RAM at other addresses within the
484 RAM size you intend using. 484 processor address space.
485 485
486config RAM4MB 486config RAMSIZE
487 bool "4MiB" 487 hex "Size of RAM (in bytes)"
488 help 488 default "0x400000"
489 Set RAM size to be 4MiB. 489 help
490 490 Define the size of the system RAM. If you select 0 then the
491config RAM8MB 491 kernel will try to probe the RAM size at runtime. This is not
492 bool "8MiB" 492 supported on all CPU types.
493 help 493
494 Set RAM size to be 8MiB. 494config VECTORBASE
495 495 hex "Address of the base of system vectors"
496config RAM16MB 496 default "0"
497 bool "16MiB" 497 help
498 help 498 Define the address of the the system vectors. Commonly this is
499 Set RAM size to be 16MiB. 499 put at the start of RAM, but it doesn't have to be. On ColdFire
500 500 platforms this address is programmed into the VBR register, thus
501config RAM32MB 501 actually setting the address to use.
502 bool "32MiB" 502
503 help 503config KERNELBASE
504 Set RAM size to be 32MiB. 504 hex "Address of the base of kernel code"
505 505 default "0x400"
506endchoice 506 help
507 Typically on m68k systems the kernel will not start at the base
508 of RAM, but usually some small offset from it. Define the start
509 address of the kernel here. The most common setup will have the
510 processor vectors at the base of RAM and then the start of the
511 kernel. On some platforms some RAM is reserved for boot loaders
512 and the kernel starts after that. The 0x400 default was based on
513 a system with the RAM based at address 0, and leaving enough room
514 for the theoretical maximum number of 256 vectors.
507 515
508choice 516choice
509 prompt "RAM bus width" 517 prompt "RAM bus width"
@@ -511,7 +519,7 @@ choice
511 519
512config RAMAUTOBIT 520config RAMAUTOBIT
513 bool "AUTO" 521 bool "AUTO"
514 ---help--- 522 help
515 Select the physical RAM data bus size. Not needed on most platforms, 523 Select the physical RAM data bus size. Not needed on most platforms,
516 so you can generally choose AUTO. 524 so you can generally choose AUTO.
517 525
@@ -545,7 +553,9 @@ config RAMKERNEL
545config ROMKERNEL 553config ROMKERNEL
546 bool "ROM" 554 bool "ROM"
547 help 555 help
548 The kernel will be resident in FLASH/ROM when running. 556 The kernel will be resident in FLASH/ROM when running. This is
557 often referred to as Execute-in-Place (XIP), since the kernel
558 code executes from the position it is stored in the FLASH/ROM.
549 559
550endchoice 560endchoice
551 561
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile
index 6f880cbff1..8951793fd8 100644
--- a/arch/m68knommu/Makefile
+++ b/arch/m68knommu/Makefile
@@ -21,6 +21,7 @@ platform-$(CONFIG_M527x) := 527x
21platform-$(CONFIG_M5272) := 5272 21platform-$(CONFIG_M5272) := 5272
22platform-$(CONFIG_M528x) := 528x 22platform-$(CONFIG_M528x) := 528x
23platform-$(CONFIG_M5307) := 5307 23platform-$(CONFIG_M5307) := 5307
24platform-$(CONFIG_M532x) := 532x
24platform-$(CONFIG_M5407) := 5407 25platform-$(CONFIG_M5407) := 5407
25PLATFORM := $(platform-y) 26PLATFORM := $(platform-y)
26 27
@@ -44,6 +45,7 @@ board-$(CONFIG_senTec) := senTec
44board-$(CONFIG_SNEHA) := SNEHA 45board-$(CONFIG_SNEHA) := SNEHA
45board-$(CONFIG_M5208EVB) := M5208EVB 46board-$(CONFIG_M5208EVB) := M5208EVB
46board-$(CONFIG_MOD5272) := MOD5272 47board-$(CONFIG_MOD5272) := MOD5272
48board-$(CONFIG_AVNET) := AVNET
47BOARD := $(board-y) 49BOARD := $(board-y)
48 50
49model-$(CONFIG_RAMKERNEL) := ram 51model-$(CONFIG_RAMKERNEL) := ram
@@ -65,6 +67,7 @@ cpuclass-$(CONFIG_M527x) := 5307
65cpuclass-$(CONFIG_M5272) := 5307 67cpuclass-$(CONFIG_M5272) := 5307
66cpuclass-$(CONFIG_M528x) := 5307 68cpuclass-$(CONFIG_M528x) := 5307
67cpuclass-$(CONFIG_M5307) := 5307 69cpuclass-$(CONFIG_M5307) := 5307
70cpuclass-$(CONFIG_M532x) := 5307
68cpuclass-$(CONFIG_M5407) := 5307 71cpuclass-$(CONFIG_M5407) := 5307
69cpuclass-$(CONFIG_M68328) := 68328 72cpuclass-$(CONFIG_M68328) := 68328
70cpuclass-$(CONFIG_M68EZ328) := 68328 73cpuclass-$(CONFIG_M68EZ328) := 68328
@@ -81,16 +84,17 @@ export PLATFORM BOARD MODEL CPUCLASS
81# 84#
82# Some CFLAG additions based on specific CPU type. 85# Some CFLAG additions based on specific CPU type.
83# 86#
84cflags-$(CONFIG_M5206) := -m5200 -Wa,-S -Wa,-m5200 87cflags-$(CONFIG_M5206) := -m5200
85cflags-$(CONFIG_M5206e) := -m5200 -Wa,-S -Wa,-m5200 88cflags-$(CONFIG_M5206e) := -m5200
86cflags-$(CONFIG_M520x) := -m5307 -Wa,-S -Wa,-m5307 89cflags-$(CONFIG_M520x) := -m5307
87cflags-$(CONFIG_M523x) := -m5307 -Wa,-S -Wa,-m5307 90cflags-$(CONFIG_M523x) := -m5307
88cflags-$(CONFIG_M5249) := -m5200 -Wa,-S -Wa,-m5200 91cflags-$(CONFIG_M5249) := -m5200
89cflags-$(CONFIG_M527x) := -m5307 -Wa,-S -Wa,-m5307 92cflags-$(CONFIG_M527x) := -m5307
90cflags-$(CONFIG_M5272) := -m5307 -Wa,-S -Wa,-m5307 93cflags-$(CONFIG_M5272) := -m5307
91cflags-$(CONFIG_M528x) := -m5307 -Wa,-S -Wa,-m5307 94cflags-$(CONFIG_M528x) := -m5307
92cflags-$(CONFIG_M5307) := -m5307 -Wa,-S -Wa,-m5307 95cflags-$(CONFIG_M5307) := -m5307
93cflags-$(CONFIG_M5407) := -m5200 -Wa,-S -Wa,-m5200 96cflags-$(CONFIG_M532x) := -m5307
97cflags-$(CONFIG_M5407) := -m5200
94cflags-$(CONFIG_M68328) := -m68000 98cflags-$(CONFIG_M68328) := -m68000
95cflags-$(CONFIG_M68EZ328) := -m68000 99cflags-$(CONFIG_M68EZ328) := -m68000
96cflags-$(CONFIG_M68VZ328) := -m68000 100cflags-$(CONFIG_M68VZ328) := -m68000
diff --git a/arch/m68knommu/defconfig b/arch/m68knommu/defconfig
index 2d59ba1a79..3891de09ac 100644
--- a/arch/m68knommu/defconfig
+++ b/arch/m68knommu/defconfig
@@ -1,21 +1,22 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.13-uc0 3# Linux kernel version: 2.6.17
4# Wed Aug 31 15:03:26 2005 4# Tue Jun 27 12:57:06 2006
5# 5#
6CONFIG_M68KNOMMU=y 6CONFIG_M68K=y
7# CONFIG_MMU is not set 7# CONFIG_MMU is not set
8# CONFIG_FPU is not set 8# CONFIG_FPU is not set
9CONFIG_UID16=y
10CONFIG_RWSEM_GENERIC_SPINLOCK=y 9CONFIG_RWSEM_GENERIC_SPINLOCK=y
11# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set 10# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
11CONFIG_GENERIC_FIND_NEXT_BIT=y
12CONFIG_GENERIC_HWEIGHT=y
12CONFIG_GENERIC_CALIBRATE_DELAY=y 13CONFIG_GENERIC_CALIBRATE_DELAY=y
14CONFIG_TIME_LOW_RES=y
13 15
14# 16#
15# Code maturity level options 17# Code maturity level options
16# 18#
17CONFIG_EXPERIMENTAL=y 19CONFIG_EXPERIMENTAL=y
18CONFIG_CLEAN_COMPILE=y
19CONFIG_BROKEN_ON_SMP=y 20CONFIG_BROKEN_ON_SMP=y
20CONFIG_INIT_ENV_ARG_LIMIT=32 21CONFIG_INIT_ENV_ARG_LIMIT=32
21 22
@@ -23,26 +24,30 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
23# General setup 24# General setup
24# 25#
25CONFIG_LOCALVERSION="" 26CONFIG_LOCALVERSION=""
27CONFIG_LOCALVERSION_AUTO=y
28# CONFIG_SYSVIPC is not set
26# CONFIG_POSIX_MQUEUE is not set 29# CONFIG_POSIX_MQUEUE is not set
27# CONFIG_BSD_PROCESS_ACCT is not set 30# CONFIG_BSD_PROCESS_ACCT is not set
28# CONFIG_SYSCTL is not set 31# CONFIG_SYSCTL is not set
29# CONFIG_AUDIT is not set 32# CONFIG_AUDIT is not set
30# CONFIG_HOTPLUG is not set
31# CONFIG_KOBJECT_UEVENT is not set
32# CONFIG_IKCONFIG is not set 33# CONFIG_IKCONFIG is not set
34# CONFIG_RELAY is not set
35CONFIG_INITRAMFS_SOURCE=""
36CONFIG_UID16=y
37# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
33CONFIG_EMBEDDED=y 38CONFIG_EMBEDDED=y
34# CONFIG_KALLSYMS is not set 39# CONFIG_KALLSYMS is not set
40# CONFIG_HOTPLUG is not set
35CONFIG_PRINTK=y 41CONFIG_PRINTK=y
36CONFIG_BUG=y 42CONFIG_BUG=y
43CONFIG_ELF_CORE=y
37CONFIG_BASE_FULL=y 44CONFIG_BASE_FULL=y
38# CONFIG_FUTEX is not set 45# CONFIG_FUTEX is not set
39# CONFIG_EPOLL is not set 46# CONFIG_EPOLL is not set
40# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 47CONFIG_SLAB=y
41CONFIG_CC_ALIGN_FUNCTIONS=0 48CONFIG_TINY_SHMEM=y
42CONFIG_CC_ALIGN_LABELS=0
43CONFIG_CC_ALIGN_LOOPS=0
44CONFIG_CC_ALIGN_JUMPS=0
45CONFIG_BASE_SMALL=0 49CONFIG_BASE_SMALL=0
50# CONFIG_SLOB is not set
46 51
47# 52#
48# Loadable module support 53# Loadable module support
@@ -50,6 +55,24 @@ CONFIG_BASE_SMALL=0
50# CONFIG_MODULES is not set 55# CONFIG_MODULES is not set
51 56
52# 57#
58# Block layer
59#
60# CONFIG_BLK_DEV_IO_TRACE is not set
61
62#
63# IO Schedulers
64#
65CONFIG_IOSCHED_NOOP=y
66# CONFIG_IOSCHED_AS is not set
67# CONFIG_IOSCHED_DEADLINE is not set
68# CONFIG_IOSCHED_CFQ is not set
69# CONFIG_DEFAULT_AS is not set
70# CONFIG_DEFAULT_DEADLINE is not set
71# CONFIG_DEFAULT_CFQ is not set
72CONFIG_DEFAULT_NOOP=y
73CONFIG_DEFAULT_IOSCHED="noop"
74
75#
53# Processor type and features 76# Processor type and features
54# 77#
55# CONFIG_M68328 is not set 78# CONFIG_M68328 is not set
@@ -58,6 +81,7 @@ CONFIG_BASE_SMALL=0
58# CONFIG_M68360 is not set 81# CONFIG_M68360 is not set
59# CONFIG_M5206 is not set 82# CONFIG_M5206 is not set
60# CONFIG_M5206e is not set 83# CONFIG_M5206e is not set
84# CONFIG_M520x is not set
61# CONFIG_M523x is not set 85# CONFIG_M523x is not set
62# CONFIG_M5249 is not set 86# CONFIG_M5249 is not set
63# CONFIG_M5271 is not set 87# CONFIG_M5271 is not set
@@ -65,29 +89,12 @@ CONFIG_M5272=y
65# CONFIG_M5275 is not set 89# CONFIG_M5275 is not set
66# CONFIG_M528x is not set 90# CONFIG_M528x is not set
67# CONFIG_M5307 is not set 91# CONFIG_M5307 is not set
92# CONFIG_M532x is not set
68# CONFIG_M5407 is not set 93# CONFIG_M5407 is not set
69CONFIG_COLDFIRE=y 94CONFIG_COLDFIRE=y
70# CONFIG_CLOCK_AUTO is not set 95CONFIG_CLOCK_SET=y
71# CONFIG_CLOCK_11MHz is not set 96CONFIG_CLOCK_FREQ=66666666
72# CONFIG_CLOCK_16MHz is not set 97CONFIG_CLOCK_DIV=1
73# CONFIG_CLOCK_20MHz is not set
74# CONFIG_CLOCK_24MHz is not set
75# CONFIG_CLOCK_25MHz is not set
76# CONFIG_CLOCK_33MHz is not set
77# CONFIG_CLOCK_40MHz is not set
78# CONFIG_CLOCK_45MHz is not set
79# CONFIG_CLOCK_48MHz is not set
80# CONFIG_CLOCK_50MHz is not set
81# CONFIG_CLOCK_54MHz is not set
82# CONFIG_CLOCK_60MHz is not set
83# CONFIG_CLOCK_62_5MHz is not set
84# CONFIG_CLOCK_64MHz is not set
85CONFIG_CLOCK_66MHz=y
86# CONFIG_CLOCK_70MHz is not set
87# CONFIG_CLOCK_100MHz is not set
88# CONFIG_CLOCK_140MHz is not set
89# CONFIG_CLOCK_150MHz is not set
90# CONFIG_CLOCK_166MHz is not set
91 98
92# 99#
93# Platform 100# Platform
@@ -102,11 +109,14 @@ CONFIG_M5272C3=y
102CONFIG_FREESCALE=y 109CONFIG_FREESCALE=y
103# CONFIG_LARGE_ALLOCS is not set 110# CONFIG_LARGE_ALLOCS is not set
104CONFIG_4KSTACKS=y 111CONFIG_4KSTACKS=y
105CONFIG_RAMAUTO=y 112
106# CONFIG_RAM4MB is not set 113#
107# CONFIG_RAM8MB is not set 114# RAM configuration
108# CONFIG_RAM16MB is not set 115#
109# CONFIG_RAM32MB is not set 116CONFIG_RAMBASE=0x0
117CONFIG_RAMSIZE=0x800000
118CONFIG_VECTORBASE=0x0
119CONFIG_KERNELBASE=0x20000
110CONFIG_RAMAUTOBIT=y 120CONFIG_RAMAUTOBIT=y
111# CONFIG_RAM8BIT is not set 121# CONFIG_RAM8BIT is not set
112# CONFIG_RAM16BIT is not set 122# CONFIG_RAM16BIT is not set
@@ -119,6 +129,8 @@ CONFIG_FLATMEM_MANUAL=y
119# CONFIG_SPARSEMEM_MANUAL is not set 129# CONFIG_SPARSEMEM_MANUAL is not set
120CONFIG_FLATMEM=y 130CONFIG_FLATMEM=y
121CONFIG_FLAT_NODE_MEM_MAP=y 131CONFIG_FLAT_NODE_MEM_MAP=y
132# CONFIG_SPARSEMEM_STATIC is not set
133CONFIG_SPLIT_PTLOCK_CPUS=4
122 134
123# 135#
124# Bus options (PCI, PCMCIA, EISA, MCA, ISA) 136# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
@@ -140,6 +152,7 @@ CONFIG_FLAT_NODE_MEM_MAP=y
140CONFIG_BINFMT_FLAT=y 152CONFIG_BINFMT_FLAT=y
141# CONFIG_BINFMT_ZFLAT is not set 153# CONFIG_BINFMT_ZFLAT is not set
142# CONFIG_BINFMT_SHARED_FLAT is not set 154# CONFIG_BINFMT_SHARED_FLAT is not set
155# CONFIG_BINFMT_AOUT is not set
143# CONFIG_BINFMT_MISC is not set 156# CONFIG_BINFMT_MISC is not set
144 157
145# 158#
@@ -155,6 +168,7 @@ CONFIG_NET=y
155# 168#
156# Networking options 169# Networking options
157# 170#
171# CONFIG_NETDEBUG is not set
158CONFIG_PACKET=y 172CONFIG_PACKET=y
159# CONFIG_PACKET_MMAP is not set 173# CONFIG_PACKET_MMAP is not set
160CONFIG_UNIX=y 174CONFIG_UNIX=y
@@ -171,18 +185,30 @@ CONFIG_IP_FIB_HASH=y
171# CONFIG_INET_AH is not set 185# CONFIG_INET_AH is not set
172# CONFIG_INET_ESP is not set 186# CONFIG_INET_ESP is not set
173# CONFIG_INET_IPCOMP is not set 187# CONFIG_INET_IPCOMP is not set
188# CONFIG_INET_XFRM_TUNNEL is not set
174# CONFIG_INET_TUNNEL is not set 189# CONFIG_INET_TUNNEL is not set
175# CONFIG_IP_TCPDIAG is not set 190# CONFIG_INET_DIAG is not set
176# CONFIG_IP_TCPDIAG_IPV6 is not set
177# CONFIG_TCP_CONG_ADVANCED is not set 191# CONFIG_TCP_CONG_ADVANCED is not set
178CONFIG_TCP_CONG_BIC=y 192CONFIG_TCP_CONG_BIC=y
179# CONFIG_IPV6 is not set 193# CONFIG_IPV6 is not set
194# CONFIG_INET6_XFRM_TUNNEL is not set
195# CONFIG_INET6_TUNNEL is not set
180# CONFIG_NETFILTER is not set 196# CONFIG_NETFILTER is not set
181 197
182# 198#
199# DCCP Configuration (EXPERIMENTAL)
200#
201# CONFIG_IP_DCCP is not set
202
203#
183# SCTP Configuration (EXPERIMENTAL) 204# SCTP Configuration (EXPERIMENTAL)
184# 205#
185# CONFIG_IP_SCTP is not set 206# CONFIG_IP_SCTP is not set
207
208#
209# TIPC Configuration (EXPERIMENTAL)
210#
211# CONFIG_TIPC is not set
186# CONFIG_ATM is not set 212# CONFIG_ATM is not set
187# CONFIG_BRIDGE is not set 213# CONFIG_BRIDGE is not set
188# CONFIG_VLAN_8021Q is not set 214# CONFIG_VLAN_8021Q is not set
@@ -195,8 +221,11 @@ CONFIG_TCP_CONG_BIC=y
195# CONFIG_NET_DIVERT is not set 221# CONFIG_NET_DIVERT is not set
196# CONFIG_ECONET is not set 222# CONFIG_ECONET is not set
197# CONFIG_WAN_ROUTER is not set 223# CONFIG_WAN_ROUTER is not set
224
225#
226# QoS and/or fair queueing
227#
198# CONFIG_NET_SCHED is not set 228# CONFIG_NET_SCHED is not set
199# CONFIG_NET_CLS_ROUTE is not set
200 229
201# 230#
202# Network testing 231# Network testing
@@ -205,6 +234,7 @@ CONFIG_TCP_CONG_BIC=y
205# CONFIG_HAMRADIO is not set 234# CONFIG_HAMRADIO is not set
206# CONFIG_IRDA is not set 235# CONFIG_IRDA is not set
207# CONFIG_BT is not set 236# CONFIG_BT is not set
237# CONFIG_IEEE80211 is not set
208 238
209# 239#
210# Device Drivers 240# Device Drivers
@@ -218,6 +248,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
218# CONFIG_FW_LOADER is not set 248# CONFIG_FW_LOADER is not set
219 249
220# 250#
251# Connector - unified userspace <-> kernelspace linker
252#
253# CONFIG_CONNECTOR is not set
254
255#
221# Memory Technology Devices (MTD) 256# Memory Technology Devices (MTD)
222# 257#
223CONFIG_MTD=y 258CONFIG_MTD=y
@@ -235,6 +270,7 @@ CONFIG_MTD_BLOCK=y
235# CONFIG_FTL is not set 270# CONFIG_FTL is not set
236# CONFIG_NFTL is not set 271# CONFIG_NFTL is not set
237# CONFIG_INFTL is not set 272# CONFIG_INFTL is not set
273# CONFIG_RFD_FTL is not set
238 274
239# 275#
240# RAM/ROM/Flash chip drivers 276# RAM/ROM/Flash chip drivers
@@ -254,13 +290,13 @@ CONFIG_MTD_CFI_I2=y
254CONFIG_MTD_RAM=y 290CONFIG_MTD_RAM=y
255# CONFIG_MTD_ROM is not set 291# CONFIG_MTD_ROM is not set
256# CONFIG_MTD_ABSENT is not set 292# CONFIG_MTD_ABSENT is not set
293# CONFIG_MTD_OBSOLETE_CHIPS is not set
257 294
258# 295#
259# Mapping drivers for chip access 296# Mapping drivers for chip access
260# 297#
261# CONFIG_MTD_COMPLEX_MAPPINGS is not set 298# CONFIG_MTD_COMPLEX_MAPPINGS is not set
262CONFIG_MTD_UCLINUX=y 299CONFIG_MTD_UCLINUX=y
263# CONFIG_MTD_SNAPGEARuC is not set
264# CONFIG_MTD_PLATRAM is not set 300# CONFIG_MTD_PLATRAM is not set
265 301
266# 302#
@@ -269,7 +305,6 @@ CONFIG_MTD_UCLINUX=y
269# CONFIG_MTD_SLRAM is not set 305# CONFIG_MTD_SLRAM is not set
270# CONFIG_MTD_PHRAM is not set 306# CONFIG_MTD_PHRAM is not set
271# CONFIG_MTD_MTDRAM is not set 307# CONFIG_MTD_MTDRAM is not set
272# CONFIG_MTD_BLKMTD is not set
273# CONFIG_MTD_BLOCK2MTD is not set 308# CONFIG_MTD_BLOCK2MTD is not set
274 309
275# 310#
@@ -285,6 +320,11 @@ CONFIG_MTD_UCLINUX=y
285# CONFIG_MTD_NAND is not set 320# CONFIG_MTD_NAND is not set
286 321
287# 322#
323# OneNAND Flash Device Drivers
324#
325# CONFIG_MTD_ONENAND is not set
326
327#
288# Parallel port support 328# Parallel port support
289# 329#
290# CONFIG_PARPORT is not set 330# CONFIG_PARPORT is not set
@@ -296,7 +336,6 @@ CONFIG_MTD_UCLINUX=y
296# 336#
297# Block devices 337# Block devices
298# 338#
299# CONFIG_BLK_DEV_FD is not set
300# CONFIG_BLK_DEV_COW_COMMON is not set 339# CONFIG_BLK_DEV_COW_COMMON is not set
301# CONFIG_BLK_DEV_LOOP is not set 340# CONFIG_BLK_DEV_LOOP is not set
302# CONFIG_BLK_DEV_NBD is not set 341# CONFIG_BLK_DEV_NBD is not set
@@ -304,16 +343,7 @@ CONFIG_BLK_DEV_RAM=y
304CONFIG_BLK_DEV_RAM_COUNT=16 343CONFIG_BLK_DEV_RAM_COUNT=16
305CONFIG_BLK_DEV_RAM_SIZE=4096 344CONFIG_BLK_DEV_RAM_SIZE=4096
306# CONFIG_BLK_DEV_INITRD is not set 345# CONFIG_BLK_DEV_INITRD is not set
307CONFIG_INITRAMFS_SOURCE=""
308# CONFIG_CDROM_PKTCDVD is not set 346# CONFIG_CDROM_PKTCDVD is not set
309
310#
311# IO Schedulers
312#
313CONFIG_IOSCHED_NOOP=y
314# CONFIG_IOSCHED_AS is not set
315# CONFIG_IOSCHED_DEADLINE is not set
316# CONFIG_IOSCHED_CFQ is not set
317# CONFIG_ATA_OVER_ETH is not set 347# CONFIG_ATA_OVER_ETH is not set
318 348
319# 349#
@@ -324,6 +354,7 @@ CONFIG_IOSCHED_NOOP=y
324# 354#
325# SCSI device support 355# SCSI device support
326# 356#
357# CONFIG_RAID_ATTRS is not set
327# CONFIG_SCSI is not set 358# CONFIG_SCSI is not set
328 359
329# 360#
@@ -354,13 +385,15 @@ CONFIG_NETDEVICES=y
354# CONFIG_TUN is not set 385# CONFIG_TUN is not set
355 386
356# 387#
388# PHY device support
389#
390# CONFIG_PHYLIB is not set
391
392#
357# Ethernet (10 or 100Mbit) 393# Ethernet (10 or 100Mbit)
358# 394#
359CONFIG_NET_ETHERNET=y 395CONFIG_NET_ETHERNET=y
360# CONFIG_MII is not set 396# CONFIG_MII is not set
361# CONFIG_NET_VENDOR_SMC is not set
362# CONFIG_NE2000 is not set
363# CONFIG_NET_PCI is not set
364CONFIG_FEC=y 397CONFIG_FEC=y
365# CONFIG_FEC2 is not set 398# CONFIG_FEC2 is not set
366 399
@@ -392,6 +425,7 @@ CONFIG_PPP=y
392# CONFIG_PPP_SYNC_TTY is not set 425# CONFIG_PPP_SYNC_TTY is not set
393# CONFIG_PPP_DEFLATE is not set 426# CONFIG_PPP_DEFLATE is not set
394# CONFIG_PPP_BSDCOMP is not set 427# CONFIG_PPP_BSDCOMP is not set
428# CONFIG_PPP_MPPE is not set
395# CONFIG_PPPOE is not set 429# CONFIG_PPPOE is not set
396# CONFIG_SLIP is not set 430# CONFIG_SLIP is not set
397# CONFIG_SHAPER is not set 431# CONFIG_SHAPER is not set
@@ -425,8 +459,6 @@ CONFIG_PPP=y
425# 459#
426# CONFIG_VT is not set 460# CONFIG_VT is not set
427# CONFIG_SERIAL_NONSTANDARD is not set 461# CONFIG_SERIAL_NONSTANDARD is not set
428# CONFIG_LEDMAN is not set
429# CONFIG_RESETSWITCH is not set
430 462
431# 463#
432# Serial drivers 464# Serial drivers
@@ -450,8 +482,6 @@ CONFIG_LEGACY_PTY_COUNT=256
450# Watchdog Cards 482# Watchdog Cards
451# 483#
452# CONFIG_WATCHDOG is not set 484# CONFIG_WATCHDOG is not set
453# CONFIG_MCFWATCHDOG is not set
454# CONFIG_RTC is not set
455# CONFIG_GEN_RTC is not set 485# CONFIG_GEN_RTC is not set
456# CONFIG_DTLK is not set 486# CONFIG_DTLK is not set
457# CONFIG_R3964 is not set 487# CONFIG_R3964 is not set
@@ -464,14 +494,19 @@ CONFIG_LEGACY_PTY_COUNT=256
464# 494#
465# TPM devices 495# TPM devices
466# 496#
467# CONFIG_MCF_QSPI is not set 497# CONFIG_TCG_TPM is not set
468# CONFIG_M41T11M6 is not set 498# CONFIG_TELCLOCK is not set
469 499
470# 500#
471# I2C support 501# I2C support
472# 502#
473# CONFIG_I2C is not set 503# CONFIG_I2C is not set
474# CONFIG_I2C_SENSOR is not set 504
505#
506# SPI support
507#
508# CONFIG_SPI is not set
509# CONFIG_SPI_MASTER is not set
475 510
476# 511#
477# Dallas's 1-wire bus 512# Dallas's 1-wire bus
@@ -482,6 +517,7 @@ CONFIG_LEGACY_PTY_COUNT=256
482# Hardware Monitoring support 517# Hardware Monitoring support
483# 518#
484# CONFIG_HWMON is not set 519# CONFIG_HWMON is not set
520# CONFIG_HWMON_VID is not set
485 521
486# 522#
487# Misc devices 523# Misc devices
@@ -491,6 +527,7 @@ CONFIG_LEGACY_PTY_COUNT=256
491# Multimedia devices 527# Multimedia devices
492# 528#
493# CONFIG_VIDEO_DEV is not set 529# CONFIG_VIDEO_DEV is not set
530CONFIG_VIDEO_V4L2=y
494 531
495# 532#
496# Digital Video Broadcasting Devices 533# Digital Video Broadcasting Devices
@@ -503,11 +540,6 @@ CONFIG_LEGACY_PTY_COUNT=256
503# CONFIG_FB is not set 540# CONFIG_FB is not set
504 541
505# 542#
506# SPI support
507#
508# CONFIG_SPI is not set
509
510#
511# Sound 543# Sound
512# 544#
513# CONFIG_SOUND is not set 545# CONFIG_SOUND is not set
@@ -517,6 +549,11 @@ CONFIG_LEGACY_PTY_COUNT=256
517# 549#
518# CONFIG_USB_ARCH_HAS_HCD is not set 550# CONFIG_USB_ARCH_HAS_HCD is not set
519# CONFIG_USB_ARCH_HAS_OHCI is not set 551# CONFIG_USB_ARCH_HAS_OHCI is not set
552# CONFIG_USB_ARCH_HAS_EHCI is not set
553
554#
555# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
556#
520 557
521# 558#
522# USB Gadget Support 559# USB Gadget Support
@@ -529,29 +566,43 @@ CONFIG_LEGACY_PTY_COUNT=256
529# CONFIG_MMC is not set 566# CONFIG_MMC is not set
530 567
531# 568#
569# LED devices
570#
571# CONFIG_NEW_LEDS is not set
572
573#
574# LED drivers
575#
576
577#
578# LED Triggers
579#
580
581#
532# InfiniBand support 582# InfiniBand support
533# 583#
534 584
535# 585#
536# SN Devices 586# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
537# 587#
538 588
539# 589#
590# Real Time Clock
591#
592# CONFIG_RTC_CLASS is not set
593
594#
540# File systems 595# File systems
541# 596#
542CONFIG_EXT2_FS=y 597CONFIG_EXT2_FS=y
543# CONFIG_EXT2_FS_XATTR is not set 598# CONFIG_EXT2_FS_XATTR is not set
544# CONFIG_EXT2_FS_XIP is not set 599# CONFIG_EXT2_FS_XIP is not set
545# CONFIG_EXT3_FS is not set 600# CONFIG_EXT3_FS is not set
546# CONFIG_JBD is not set
547# CONFIG_REISERFS_FS is not set 601# CONFIG_REISERFS_FS is not set
548# CONFIG_JFS_FS is not set 602# CONFIG_JFS_FS is not set
549# CONFIG_FS_POSIX_ACL is not set 603# CONFIG_FS_POSIX_ACL is not set
550
551#
552# XFS support
553#
554# CONFIG_XFS_FS is not set 604# CONFIG_XFS_FS is not set
605# CONFIG_OCFS2_FS is not set
555# CONFIG_MINIX_FS is not set 606# CONFIG_MINIX_FS is not set
556CONFIG_ROMFS_FS=y 607CONFIG_ROMFS_FS=y
557# CONFIG_INOTIFY is not set 608# CONFIG_INOTIFY is not set
@@ -559,6 +610,7 @@ CONFIG_ROMFS_FS=y
559# CONFIG_DNOTIFY is not set 610# CONFIG_DNOTIFY is not set
560# CONFIG_AUTOFS_FS is not set 611# CONFIG_AUTOFS_FS is not set
561# CONFIG_AUTOFS4_FS is not set 612# CONFIG_AUTOFS4_FS is not set
613# CONFIG_FUSE_FS is not set
562 614
563# 615#
564# CD-ROM/DVD Filesystems 616# CD-ROM/DVD Filesystems
@@ -581,6 +633,7 @@ CONFIG_SYSFS=y
581# CONFIG_TMPFS is not set 633# CONFIG_TMPFS is not set
582# CONFIG_HUGETLB_PAGE is not set 634# CONFIG_HUGETLB_PAGE is not set
583CONFIG_RAMFS=y 635CONFIG_RAMFS=y
636# CONFIG_CONFIGFS_FS is not set
584 637
585# 638#
586# Miscellaneous filesystems 639# Miscellaneous filesystems
@@ -611,6 +664,7 @@ CONFIG_RAMFS=y
611# CONFIG_NCP_FS is not set 664# CONFIG_NCP_FS is not set
612# CONFIG_CODA_FS is not set 665# CONFIG_CODA_FS is not set
613# CONFIG_AFS_FS is not set 666# CONFIG_AFS_FS is not set
667# CONFIG_9P_FS is not set
614 668
615# 669#
616# Partition Types 670# Partition Types
@@ -627,8 +681,12 @@ CONFIG_MSDOS_PARTITION=y
627# Kernel hacking 681# Kernel hacking
628# 682#
629# CONFIG_PRINTK_TIME is not set 683# CONFIG_PRINTK_TIME is not set
684# CONFIG_MAGIC_SYSRQ is not set
630# CONFIG_DEBUG_KERNEL is not set 685# CONFIG_DEBUG_KERNEL is not set
631CONFIG_LOG_BUF_SHIFT=14 686CONFIG_LOG_BUF_SHIFT=14
687# CONFIG_DEBUG_BUGVERBOSE is not set
688# CONFIG_DEBUG_FS is not set
689# CONFIG_UNWIND_INFO is not set
632# CONFIG_FULLDEBUG is not set 690# CONFIG_FULLDEBUG is not set
633# CONFIG_HIGHPROFILE is not set 691# CONFIG_HIGHPROFILE is not set
634# CONFIG_BOOTPARAM is not set 692# CONFIG_BOOTPARAM is not set
@@ -655,5 +713,6 @@ CONFIG_LOG_BUF_SHIFT=14
655# Library routines 713# Library routines
656# 714#
657# CONFIG_CRC_CCITT is not set 715# CONFIG_CRC_CCITT is not set
716# CONFIG_CRC16 is not set
658# CONFIG_CRC32 is not set 717# CONFIG_CRC32 is not set
659# CONFIG_LIBCRC32C is not set 718# CONFIG_LIBCRC32C is not set
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index a331cc9079..6a2f0c6932 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * vmlinux.lds.S -- master linker script for m68knommu arch 2 * vmlinux.lds.S -- master linker script for m68knommu arch
3 * 3 *
4 * (C) Copyright 2002-2004, Greg Ungerer <gerg@snapgear.com> 4 * (C) Copyright 2002-2006, Greg Ungerer <gerg@snapgear.com>
5 * 5 *
6 * This ends up looking compilcated, because of the number of 6 * This ends up looking compilcated, because of the number of
7 * address variations for ram and rom/flash layouts. The real 7 * address variations for ram and rom/flash layouts. The real
@@ -22,13 +22,7 @@
22#define ROM_START 0x10c10400 22#define ROM_START 0x10c10400
23#define ROM_LENGTH 0xfec00 23#define ROM_LENGTH 0xfec00
24#define ROM_END 0x10d00000 24#define ROM_END 0x10d00000
25#define RAMVEC_START 0x00000000 25#define DATA_ADDR CONFIG_KERNELBASE
26#define RAMVEC_LENGTH 0x400
27#define RAM_START 0x10000400
28#define RAM_LENGTH 0xffc00
29#define RAM_END 0x10100000
30#define _ramend _ram_end_notused
31#define DATA_ADDR RAM_START
32#endif 26#endif
33 27
34/* 28/*
@@ -41,11 +35,6 @@
41#define ROM_START 0x10c10400 35#define ROM_START 0x10c10400
42#define ROM_LENGTH 0x1efc00 36#define ROM_LENGTH 0x1efc00
43#define ROM_END 0x10e00000 37#define ROM_END 0x10e00000
44#define RAMVEC_START 0x00000000
45#define RAMVEC_LENGTH 0x400
46#define RAM_START 0x00020400
47#define RAM_LENGTH 0x7dfc00
48#define RAM_END 0x00800000
49#endif 38#endif
50#ifdef CONFIG_ROMKERNEL 39#ifdef CONFIG_ROMKERNEL
51#define ROMVEC_START 0x10c10000 40#define ROMVEC_START 0x10c10000
@@ -53,11 +42,6 @@
53#define ROM_START 0x10c10400 42#define ROM_START 0x10c10400
54#define ROM_LENGTH 0x1efc00 43#define ROM_LENGTH 0x1efc00
55#define ROM_END 0x10e00000 44#define ROM_END 0x10e00000
56#define RAMVEC_START 0x00000000
57#define RAMVEC_LENGTH 0x400
58#define RAM_START 0x00020000
59#define RAM_LENGTH 0x600000
60#define RAM_END 0x00800000
61#endif 45#endif
62#ifdef CONFIG_HIMEMKERNEL 46#ifdef CONFIG_HIMEMKERNEL
63#define ROMVEC_START 0x00600000 47#define ROMVEC_START 0x00600000
@@ -65,141 +49,28 @@
65#define ROM_START 0x00600400 49#define ROM_START 0x00600400
66#define ROM_LENGTH 0x1efc00 50#define ROM_LENGTH 0x1efc00
67#define ROM_END 0x007f0000 51#define ROM_END 0x007f0000
68#define RAMVEC_START 0x00000000
69#define RAMVEC_LENGTH 0x400
70#define RAM_START 0x00020000
71#define RAM_LENGTH 0x5e0000
72#define RAM_END 0x00600000
73#endif 52#endif
74#endif 53#endif
75 54
76#ifdef CONFIG_DRAGEN2
77#define RAM_START 0x10000
78#define RAM_LENGTH 0x7f0000
79#endif
80
81#ifdef CONFIG_UCQUICC 55#ifdef CONFIG_UCQUICC
82#define ROMVEC_START 0x00000000 56#define ROMVEC_START 0x00000000
83#define ROMVEC_LENGTH 0x404 57#define ROMVEC_LENGTH 0x404
84#define ROM_START 0x00000404 58#define ROM_START 0x00000404
85#define ROM_LENGTH 0x1ff6fc 59#define ROM_LENGTH 0x1ff6fc
86#define ROM_END 0x00200000 60#define ROM_END 0x00200000
87#define RAMVEC_START 0x00200000
88#define RAMVEC_LENGTH 0x404
89#define RAM_START 0x00200404
90#define RAM_LENGTH 0x1ff6fc
91#define RAM_END 0x00400000
92#endif
93
94/*
95 * The standard Arnewsh 5206 board only has 1MiB of ram. Not normally
96 * enough to be useful. Assume the user has fitted something larger,
97 * at least 4MiB in size. No point in not letting the kernel completely
98 * link, it will be obvious if it is too big when they go to load it.
99 */
100#if defined(CONFIG_ARN5206)
101#define RAM_START 0x10000
102#define RAM_LENGTH 0x3f0000
103#endif
104
105/*
106 * The Motorola 5206eLITE board only has 1MiB of static RAM.
107 */
108#if defined(CONFIG_ELITE)
109#define RAM_START 0x30020000
110#define RAM_LENGTH 0xe0000
111#endif
112
113/*
114 * All the Motorola eval boards have the same basic arrangement.
115 * The end of RAM will vary depending on how much ram is fitted,
116 * but this isn't important here, we assume at least 4MiB.
117 */
118#if defined(CONFIG_M5206eC3) || defined(CONFIG_M5249C3) || \
119 defined(CONFIG_M5272C3) || defined(CONFIG_M5307C3) || \
120 defined(CONFIG_ARN5307) || defined(CONFIG_M5407C3) || \
121 defined(CONFIG_M5271EVB) || defined(CONFIG_M5275EVB) || \
122 defined(CONFIG_M5235EVB)
123#define RAM_START 0x20000
124#define RAM_LENGTH 0x3e0000
125#endif
126
127/*
128 * The Freescale 5208EVB board has 32MB of RAM.
129 */
130#if defined(CONFIG_M5208EVB)
131#define RAM_START 0x40020000
132#define RAM_LENGTH 0x01fe0000
133#endif
134
135/*
136 * The senTec COBRA5272 board has nearly the same memory layout as
137 * the M5272C3. We assume 16MiB ram.
138 */
139#if defined(CONFIG_COBRA5272)
140#define RAM_START 0x20000
141#define RAM_LENGTH 0xfe0000
142#endif
143
144#if defined(CONFIG_M5282EVB)
145#define RAM_START 0x10000
146#define RAM_LENGTH 0x3f0000
147#endif
148
149/*
150 * The senTec COBRA5282 board has the same memory layout as the M5282EVB.
151 */
152#if defined(CONFIG_COBRA5282)
153#define RAM_START 0x10000
154#define RAM_LENGTH 0x3f0000
155#endif
156
157
158/*
159 * The EMAC SoM-5282EM module.
160 */
161#if defined(CONFIG_SOM5282EM)
162#define RAM_START 0x10000
163#define RAM_LENGTH 0xff0000
164#endif
165
166
167/*
168 * These flash boot boards use all of ram for operation. Again the
169 * actual memory size is not important here, assume at least 4MiB.
170 * They currently have no support for running in flash.
171 */
172#if defined(CONFIG_NETtel) || defined(CONFIG_eLIA) || \
173 defined(CONFIG_DISKtel) || defined(CONFIG_SECUREEDGEMP3) || \
174 defined(CONFIG_HW_FEITH)
175#define RAM_START 0x400
176#define RAM_LENGTH 0x3ffc00
177#endif
178
179/*
180 * Sneha Boards mimimun memory
181 * The end of RAM will vary depending on how much ram is fitted,
182 * but this isn't important here, we assume at least 4MiB.
183 */
184#if defined(CONFIG_CPU16B)
185#define RAM_START 0x20000
186#define RAM_LENGTH 0x3e0000
187#endif
188
189#if defined(CONFIG_MOD5272)
190#define RAM_START 0x02000000
191#define RAM_LENGTH 0x00800000
192#define RAMVEC_START 0x20000000
193#define RAMVEC_LENGTH 0x00000400
194#endif 61#endif
195 62
196#if defined(CONFIG_RAMKERNEL) 63#if defined(CONFIG_RAMKERNEL)
64#define RAM_START CONFIG_KERNELBASE
65#define RAM_LENGTH (CONFIG_RAMBASE + CONFIG_RAMSIZE - CONFIG_KERNELBASE)
197#define TEXT ram 66#define TEXT ram
198#define DATA ram 67#define DATA ram
199#define INIT ram 68#define INIT ram
200#define BSS ram 69#define BSS ram
201#endif 70#endif
202#if defined(CONFIG_ROMKERNEL) || defined(CONFIG_HIMEMKERNEL) 71#if defined(CONFIG_ROMKERNEL) || defined(CONFIG_HIMEMKERNEL)
72#define RAM_START CONFIG_RAMBASE
73#define RAM_LENGTH CONFIG_RAMSIZE
203#define TEXT rom 74#define TEXT rom
204#define DATA ram 75#define DATA ram
205#define INIT ram 76#define INIT ram
@@ -215,13 +86,7 @@ OUTPUT_ARCH(m68k)
215ENTRY(_start) 86ENTRY(_start)
216 87
217MEMORY { 88MEMORY {
218#ifdef RAMVEC_START
219 ramvec : ORIGIN = RAMVEC_START, LENGTH = RAMVEC_LENGTH
220#endif
221 ram : ORIGIN = RAM_START, LENGTH = RAM_LENGTH 89 ram : ORIGIN = RAM_START, LENGTH = RAM_LENGTH
222#ifdef RAM_END
223 eram : ORIGIN = RAM_END, LENGTH = 0
224#endif
225#ifdef ROM_START 90#ifdef ROM_START
226 romvec : ORIGIN = ROMVEC_START, LENGTH = ROMVEC_LENGTH 91 romvec : ORIGIN = ROMVEC_START, LENGTH = ROMVEC_LENGTH
227 rom : ORIGIN = ROM_START, LENGTH = ROM_LENGTH 92 rom : ORIGIN = ROM_START, LENGTH = ROM_LENGTH
@@ -308,12 +173,6 @@ SECTIONS {
308 __rom_end = . ; 173 __rom_end = . ;
309 } > erom 174 } > erom
310#endif 175#endif
311#ifdef RAMVEC_START
312 . = RAMVEC_START ;
313 .ramvec : {
314 __ramvec = .;
315 } > ramvec
316#endif
317 176
318 .data DATA_ADDR : { 177 .data DATA_ADDR : {
319 . = ALIGN(4); 178 . = ALIGN(4);
@@ -373,12 +232,5 @@ SECTIONS {
373 _ebss = . ; 232 _ebss = . ;
374 } > BSS 233 } > BSS
375 234
376#ifdef RAM_END
377 . = RAM_END ;
378 .eram : {
379 __ramend = . ;
380 _ramend = . ;
381 } > eram
382#endif
383} 235}
384 236
diff --git a/arch/m68knommu/platform/5307/head.S b/arch/m68knommu/platform/5307/head.S
index c30c462b99..1d9eb301d7 100644
--- a/arch/m68knommu/platform/5307/head.S
+++ b/arch/m68knommu/platform/5307/head.S
@@ -3,7 +3,7 @@
3/* 3/*
4 * head.S -- common startup code for ColdFire CPUs. 4 * head.S -- common startup code for ColdFire CPUs.
5 * 5 *
6 * (C) Copyright 1999-2004, Greg Ungerer (gerg@snapgear.com). 6 * (C) Copyright 1999-2006, Greg Ungerer <gerg@snapgear.com>.
7 */ 7 */
8 8
9/*****************************************************************************/ 9/*****************************************************************************/
@@ -19,47 +19,15 @@
19/*****************************************************************************/ 19/*****************************************************************************/
20 20
21/* 21/*
22 * Define fixed memory sizes. Configuration of a fixed memory size 22 * If we don't have a fixed memory size, then lets build in code
23 * overrides everything else. If the user defined a size we just
24 * blindly use it (they know what they are doing right :-)
25 */
26#if defined(CONFIG_RAM32MB)
27#define MEM_SIZE 0x02000000 /* memory size 32Mb */
28#elif defined(CONFIG_RAM16MB)
29#define MEM_SIZE 0x01000000 /* memory size 16Mb */
30#elif defined(CONFIG_RAM8MB)
31#define MEM_SIZE 0x00800000 /* memory size 8Mb */
32#elif defined(CONFIG_RAM4MB)
33#define MEM_SIZE 0x00400000 /* memory size 4Mb */
34#elif defined(CONFIG_RAM1MB)
35#define MEM_SIZE 0x00100000 /* memory size 1Mb */
36#endif
37
38/*
39 * Memory size exceptions for special cases. Some boards may be set
40 * for auto memory sizing, but we can't do it that way for some reason.
41 * For example the 5206eLITE board has static RAM, and auto-detecting
42 * the SDRAM will do you no good at all. Same goes for the MOD5272.
43 */
44#ifdef CONFIG_RAMAUTO
45#if defined(CONFIG_M5206eLITE)
46#define MEM_SIZE 0x00100000 /* 1MiB default memory */
47#endif
48#if defined(CONFIG_MOD5272)
49#define MEM_SIZE 0x00800000 /* 8MiB default memory */
50#endif
51#endif /* CONFIG_RAMAUTO */
52
53
54/*
55 * If we don't have a fixed memory size now, then lets build in code
56 * to auto detect the DRAM size. Obviously this is the prefered 23 * to auto detect the DRAM size. Obviously this is the prefered
57 * method, and should work for most boards (it won't work for those 24 * method, and should work for most boards. It won't work for those
58 * that do not have their RAM starting at address 0). 25 * that do not have their RAM starting at address 0, and it only
26 * works on SDRAM (not boards fitted with SRAM).
59 */ 27 */
60#if defined(MEM_SIZE) 28#if CONFIG_RAMSIZE != 0
61.macro GET_MEM_SIZE 29.macro GET_MEM_SIZE
62 movel #MEM_SIZE,%d0 /* hard coded memory size */ 30 movel #CONFIG_RAMSIZE,%d0 /* hard coded memory size */
63.endm 31.endm
64 32
65#elif defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \ 33#elif defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
@@ -98,37 +66,7 @@
98.endm 66.endm
99 67
100#else 68#else
101#error "ERROR: I don't know how to determine your boards memory size?" 69#error "ERROR: I don't know how to probe your boards memory size?"
102#endif
103
104
105/*
106 * Most ColdFire boards have their DRAM starting at address 0.
107 * Notable exception is the 5206eLITE board, another is the MOD5272.
108 */
109#if defined(CONFIG_M5206eLITE)
110#define MEM_BASE 0x30000000
111#endif
112#if defined(CONFIG_MOD5272)
113#define MEM_BASE 0x02000000
114#define VBR_BASE 0x20000000 /* vectors in SRAM */
115#endif
116#if defined(CONFIG_M5208EVB)
117#define MEM_BASE 0x40000000
118#endif
119
120#ifndef MEM_BASE
121#define MEM_BASE 0x00000000 /* memory base at address 0 */
122#endif
123
124/*
125 * The default location for the vectors is at the base of RAM.
126 * Some boards might like to use internal SRAM or something like
127 * that. If no board specific header defines an alternative then
128 * use the base of RAM.
129 */
130#ifndef VBR_BASE
131#define VBR_BASE MEM_BASE /* vector address */
132#endif 70#endif
133 71
134/*****************************************************************************/ 72/*****************************************************************************/
@@ -191,11 +129,11 @@ _start:
191 * Create basic memory configuration. Set VBR accordingly, 129 * Create basic memory configuration. Set VBR accordingly,
192 * and size memory. 130 * and size memory.
193 */ 131 */
194 movel #VBR_BASE,%a7 132 movel #CONFIG_VECTORBASE,%a7
195 movec %a7,%VBR /* set vectors addr */ 133 movec %a7,%VBR /* set vectors addr */
196 movel %a7,_ramvec 134 movel %a7,_ramvec
197 135
198 movel #MEM_BASE,%a7 /* mark the base of RAM */ 136 movel #CONFIG_RAMBASE,%a7 /* mark the base of RAM */
199 movel %a7,_rambase 137 movel %a7,_rambase
200 138
201 GET_MEM_SIZE /* macro code determines size */ 139 GET_MEM_SIZE /* macro code determines size */
diff --git a/arch/m68knommu/platform/68328/head-pilot.S b/arch/m68knommu/platform/68328/head-pilot.S
index c46775fe04..46b3604f99 100644
--- a/arch/m68knommu/platform/68328/head-pilot.S
+++ b/arch/m68knommu/platform/68328/head-pilot.S
@@ -21,7 +21,6 @@
21.global _start 21.global _start
22 22
23.global _rambase 23.global _rambase
24.global __ramvec
25.global _ramvec 24.global _ramvec
26.global _ramstart 25.global _ramstart
27.global _ramend 26.global _ramend
@@ -121,7 +120,7 @@ L0:
121 DBG_PUTC('B') 120 DBG_PUTC('B')
122 121
123 /* Copy command line from beginning of RAM (+16) to end of bss */ 122 /* Copy command line from beginning of RAM (+16) to end of bss */
124 movel #__ramvec, %d7 123 movel #CONFIG_VECTORBASE, %d7
125 addl #16, %d7 124 addl #16, %d7
126 moveal %d7, %a0 125 moveal %d7, %a0
127 moveal #_ebss, %a1 126 moveal #_ebss, %a1
diff --git a/arch/m68knommu/platform/68328/head-ram.S b/arch/m68knommu/platform/68328/head-ram.S
index 6bdc9bce43..e8dc9241ff 100644
--- a/arch/m68knommu/platform/68328/head-ram.S
+++ b/arch/m68knommu/platform/68328/head-ram.S
@@ -1,10 +1,7 @@
1#include <linux/config.h> 1#include <linux/config.h>
2 2
3 .global __main 3 .global __main
4 .global __ram_start
5 .global __ram_end
6 .global __rom_start 4 .global __rom_start
7 .global __rom_end
8 5
9 .global _rambase 6 .global _rambase
10 .global _ramstart 7 .global _ramstart
@@ -12,6 +9,7 @@
12 .global splash_bits 9 .global splash_bits
13 .global _start 10 .global _start
14 .global _stext 11 .global _stext
12 .global _edata
15 13
16#define DEBUG 14#define DEBUG
17#define ROM_OFFSET 0x10C00000 15#define ROM_OFFSET 0x10C00000
@@ -73,7 +71,7 @@ pclp1:
73#ifdef CONFIG_RELOCATE 71#ifdef CONFIG_RELOCATE
74 /* Copy me to RAM */ 72 /* Copy me to RAM */
75 moveal #__rom_start, %a0 73 moveal #__rom_start, %a0
76 moveal #__ram_start, %a1 74 moveal #_stext, %a1
77 moveal #_edata, %a2 75 moveal #_edata, %a2
78 76
79 /* Copy %a0 to %a1 until %a1 == %a2 */ 77 /* Copy %a0 to %a1 until %a1 == %a2 */
diff --git a/arch/m68knommu/platform/68328/head-rom.S b/arch/m68knommu/platform/68328/head-rom.S
index 2b448a2970..234430b955 100644
--- a/arch/m68knommu/platform/68328/head-rom.S
+++ b/arch/m68knommu/platform/68328/head-rom.S
@@ -28,6 +28,8 @@ _ramstart:
28_ramend: 28_ramend:
29.long 0 29.long 0
30 30
31#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
32
31#ifdef CONFIG_INIT_LCD 33#ifdef CONFIG_INIT_LCD
32splash_bits: 34splash_bits:
33#include "bootlogo.rh" 35#include "bootlogo.rh"
@@ -48,7 +50,7 @@ _stext: movew #0x2700,%sr
48 moveb #0x81, 0xfffffA27 /* LCKCON */ 50 moveb #0x81, 0xfffffA27 /* LCKCON */
49 movew #0xff00, 0xfffff412 /* LCD pins */ 51 movew #0xff00, 0xfffff412 /* LCD pins */
50#endif 52#endif
51 moveal #__ramend-CONFIG_MEMORY_RESERVE*0x100000 - 0x10, %sp 53 moveal #RAMEND-CONFIG_MEMORY_RESERVE*0x100000 - 0x10, %sp
52 movew #32767, %d0 /* PLL settle wait loop */ 54 movew #32767, %d0 /* PLL settle wait loop */
531: subq #1, %d0 551: subq #1, %d0
54 bne 1b 56 bne 1b
@@ -73,13 +75,13 @@ _stext: movew #0x2700,%sr
73 bhi 1b 75 bhi 1b
74 76
75 movel #_sdata, %d0 77 movel #_sdata, %d0
76 movel %d0, _rambase 78 movel %d0, _rambase
77 movel #_ebss, %d0 79 movel #_ebss, %d0
78 movel %d0, _ramstart 80 movel %d0, _ramstart
79 movel #__ramend-CONFIG_MEMORY_RESERVE*0x100000, %d0 81 movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0
80 movel %d0, _ramend 82 movel %d0, _ramend
81 movel #__ramvec, %d0 83 movel #CONFIG_VECTORBASE, %d0
82 movel %d0, _ramvec 84 movel %d0, _ramvec
83 85
84/* 86/*
85 * load the current task pointer and stack 87 * load the current task pointer and stack
diff --git a/arch/m68knommu/platform/68360/head-ram.S b/arch/m68knommu/platform/68360/head-ram.S
index a5c639a51e..f497713a4e 100644
--- a/arch/m68knommu/platform/68360/head-ram.S
+++ b/arch/m68knommu/platform/68360/head-ram.S
@@ -18,7 +18,6 @@
18.global _start 18.global _start
19 19
20.global _rambase 20.global _rambase
21.global __ramvec
22.global _ramvec 21.global _ramvec
23.global _ramstart 22.global _ramstart
24.global _ramend 23.global _ramend
@@ -26,6 +25,8 @@
26.global _quicc_base 25.global _quicc_base
27.global _periph_base 26.global _periph_base
28 27
28#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
29
29#define REGB 0x1000 30#define REGB 0x1000
30#define PEPAR (_dprbase + REGB + 0x0016) 31#define PEPAR (_dprbase + REGB + 0x0016)
31#define GMR (_dprbase + REGB + 0x0040) 32#define GMR (_dprbase + REGB + 0x0040)
@@ -103,7 +104,7 @@ _stext:
103 nop 104 nop
104 ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */ 105 ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */
105 /* We should not need to setup the boot stack the reset should do it. */ 106 /* We should not need to setup the boot stack the reset should do it. */
106 movea.l #__ramend, %sp /*set up stack at the end of DRAM:*/ 107 movea.l #RAMEND, %sp /*set up stack at the end of DRAM:*/
107 108
108set_mbar_register: 109set_mbar_register:
109 moveq.l #0x07, %d1 /* Setup MBAR */ 110 moveq.l #0x07, %d1 /* Setup MBAR */
@@ -163,7 +164,7 @@ configure_memory_controller:
163 move.l %d0, GMR 164 move.l %d0, GMR
164 165
165configure_chip_select_0: 166configure_chip_select_0:
166 move.l #__ramend, %d0 167 move.l #RAMEND, %d0
167 subi.l #__ramstart, %d0 168 subi.l #__ramstart, %d0
168 subq.l #0x01, %d0 169 subq.l #0x01, %d0
169 eori.l #SIM_OR_MASK, %d0 170 eori.l #SIM_OR_MASK, %d0
@@ -234,16 +235,10 @@ store_ram_size:
234 /* Set ram size information */ 235 /* Set ram size information */
235 move.l #_sdata, _rambase 236 move.l #_sdata, _rambase
236 move.l #_ebss, _ramstart 237 move.l #_ebss, _ramstart
237 move.l #__ramend, %d0 238 move.l #RAMEND, %d0
238 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ 239 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
239 move.l %d0, _ramend /* Different from __ramend.*/ 240 move.l %d0, _ramend /* Different from RAMEND.*/
240 241
241store_flash_size:
242 /* Set rom size information */
243 move.l #__rom_end, %d0
244 sub.l #__rom_start, %d0
245 move.l %d0, rom_length
246
247 pea 0 242 pea 0
248 pea env 243 pea env
249 pea %sp@(4) 244 pea %sp@(4)
@@ -286,7 +281,7 @@ _dprbase:
286 */ 281 */
287 282
288.section ".data.initvect","awx" 283.section ".data.initvect","awx"
289 .long __ramend /* Reset: Initial Stack Pointer - 0. */ 284 .long RAMEND /* Reset: Initial Stack Pointer - 0. */
290 .long _start /* Reset: Initial Program Counter - 1. */ 285 .long _start /* Reset: Initial Program Counter - 1. */
291 .long buserr /* Bus Error - 2. */ 286 .long buserr /* Bus Error - 2. */
292 .long trap /* Address Error - 3. */ 287 .long trap /* Address Error - 3. */
diff --git a/arch/m68knommu/platform/68360/head-rom.S b/arch/m68knommu/platform/68360/head-rom.S
index 0da357a4cf..2d28c3e19a 100644
--- a/arch/m68knommu/platform/68360/head-rom.S
+++ b/arch/m68knommu/platform/68360/head-rom.S
@@ -18,7 +18,6 @@
18.global _start 18.global _start
19 19
20.global _rambase 20.global _rambase
21.global __ramvec
22.global _ramvec 21.global _ramvec
23.global _ramstart 22.global _ramstart
24.global _ramend 23.global _ramend
@@ -26,6 +25,8 @@
26.global _quicc_base 25.global _quicc_base
27.global _periph_base 26.global _periph_base
28 27
28#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
29
29#define REGB 0x1000 30#define REGB 0x1000
30#define PEPAR (_dprbase + REGB + 0x0016) 31#define PEPAR (_dprbase + REGB + 0x0016)
31#define GMR (_dprbase + REGB + 0x0040) 32#define GMR (_dprbase + REGB + 0x0040)
@@ -115,7 +116,7 @@ _stext:
115 nop 116 nop
116 ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */ 117 ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */
117 /* We should not need to setup the boot stack the reset should do it. */ 118 /* We should not need to setup the boot stack the reset should do it. */
118 movea.l #__ramend, %sp /* set up stack at the end of DRAM:*/ 119 movea.l #RAMEND, %sp /* set up stack at the end of DRAM:*/
119 120
120 121
121set_mbar_register: 122set_mbar_register:
@@ -245,16 +246,10 @@ store_ram_size:
245 /* Set ram size information */ 246 /* Set ram size information */
246 move.l #_sdata, _rambase 247 move.l #_sdata, _rambase
247 move.l #_ebss, _ramstart 248 move.l #_ebss, _ramstart
248 move.l #__ramend, %d0 249 move.l #RAMEND, %d0
249 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ 250 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
250 move.l %d0, _ramend /* Different from __ramend.*/ 251 move.l %d0, _ramend /* Different from RAMEND.*/
251 252
252store_flash_size:
253 /* Set rom size information */
254 move.l #__rom_end, %d0
255 sub.l #__rom_start, %d0
256 move.l %d0, rom_length
257
258 pea 0 253 pea 0
259 pea env 254 pea env
260 pea %sp@(4) 255 pea %sp@(4)
@@ -298,7 +293,7 @@ _dprbase:
298 */ 293 */
299 294
300.section ".data.initvect","awx" 295.section ".data.initvect","awx"
301 .long __ramend /* Reset: Initial Stack Pointer - 0. */ 296 .long RAMEND /* Reset: Initial Stack Pointer - 0. */
302 .long _start /* Reset: Initial Program Counter - 1. */ 297 .long _start /* Reset: Initial Program Counter - 1. */
303 .long buserr /* Bus Error - 2. */ 298 .long buserr /* Bus Error - 2. */
304 .long trap /* Address Error - 3. */ 299 .long trap /* Address Error - 3. */
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 298f82fe84..9096a5ea42 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -446,7 +446,7 @@ static int __init topology_init(void)
446 int ret; 446 int ret;
447 447
448 for_each_present_cpu(cpu) { 448 for_each_present_cpu(cpu) {
449 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); 449 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu);
450 if (ret) 450 if (ret)
451 printk(KERN_WARNING "topology_init: register_cpu %d " 451 printk(KERN_WARNING "topology_init: register_cpu %d "
452 "failed (%d)\n", cpu, ret); 452 "failed (%d)\n", cpu, ret);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 2e8e52c135..70cf09afdf 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -367,7 +367,7 @@ void mipsmt_prepare_cpus(void)
367 dvpe(); 367 dvpe();
368 dmt(); 368 dmt();
369 369
370 freeIPIq.lock = SPIN_LOCK_UNLOCKED; 370 spin_lock_init(&freeIPIq.lock);
371 371
372 /* 372 /*
373 * We probably don't have as many VPEs as we do SMP "CPUs", 373 * We probably don't have as many VPEs as we do SMP "CPUs",
@@ -375,7 +375,7 @@ void mipsmt_prepare_cpus(void)
375 */ 375 */
376 for (i=0; i<NR_CPUS; i++) { 376 for (i=0; i<NR_CPUS; i++) {
377 IPIQ[i].head = IPIQ[i].tail = NULL; 377 IPIQ[i].head = IPIQ[i].tail = NULL;
378 IPIQ[i].lock = SPIN_LOCK_UNLOCKED; 378 spin_lock_init(&IPIQ[i].lock);
379 IPIQ[i].depth = 0; 379 IPIQ[i].depth = 0;
380 ipi_timer_latch[i] = 0; 380 ipi_timer_latch[i] = 0;
381 } 381 }
diff --git a/arch/mips/momentum/ocelot_g/gt-irq.c b/arch/mips/momentum/ocelot_g/gt-irq.c
index e5eceed1be..8bd9b844fa 100644
--- a/arch/mips/momentum/ocelot_g/gt-irq.c
+++ b/arch/mips/momentum/ocelot_g/gt-irq.c
@@ -59,7 +59,7 @@ void hook_irq_handler(int int_cause, int bit_num, void *isr_ptr)
59 * bit_num - Indicates which bit number in the cause register 59 * bit_num - Indicates which bit number in the cause register
60 * 60 *
61 * Outputs : 61 * Outputs :
62 * 1 if succesful, 0 if failure 62 * 1 if successful, 0 if failure
63 */ 63 */
64int enable_galileo_irq(int int_cause, int bit_num) 64int enable_galileo_irq(int int_cause, int bit_num)
65{ 65{
@@ -83,7 +83,7 @@ int enable_galileo_irq(int int_cause, int bit_num)
83 * bit_num - Indicates which bit number in the cause register 83 * bit_num - Indicates which bit number in the cause register
84 * 84 *
85 * Outputs : 85 * Outputs :
86 * 1 if succesful, 0 if failure 86 * 1 if successful, 0 if failure
87 */ 87 */
88int disable_galileo_irq(int int_cause, int bit_num) 88int disable_galileo_irq(int int_cause, int bit_num)
89{ 89{
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index c31e4cff64..65eb55400d 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -38,7 +38,7 @@ static int op_mips_create_files(struct super_block * sb, struct dentry * root)
38 38
39 for (i = 0; i < model->num_counters; ++i) { 39 for (i = 0; i < model->num_counters; ++i) {
40 struct dentry *dir; 40 struct dentry *dir;
41 char buf[3]; 41 char buf[4];
42 42
43 snprintf(buf, sizeof buf, "%d", i); 43 snprintf(buf, sizeof buf, "%d", i);
44 dir = oprofilefs_mkdir(sb, root, buf); 44 dir = oprofilefs_mkdir(sb, root, buf);
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index a9c58e067b..8134220ed6 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -34,7 +34,7 @@
34#define POWERDOWN_TIMEOUT 120 34#define POWERDOWN_TIMEOUT 120
35 35
36/* 36/*
37 * Blink frequency during reboot grace period and when paniced. 37 * Blink frequency during reboot grace period and when panicked.
38 */ 38 */
39#define POWERDOWN_FREQ (HZ / 4) 39#define POWERDOWN_FREQ (HZ / 4)
40#define PANIC_FREQ (HZ / 8) 40#define PANIC_FREQ (HZ / 8)
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index ab9d9cef08..79ddb46056 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -28,13 +28,13 @@
28 28
29#define POWERDOWN_TIMEOUT 120 29#define POWERDOWN_TIMEOUT 120
30/* 30/*
31 * Blink frequency during reboot grace period and when paniced. 31 * Blink frequency during reboot grace period and when panicked.
32 */ 32 */
33#define POWERDOWN_FREQ (HZ / 4) 33#define POWERDOWN_FREQ (HZ / 4)
34#define PANIC_FREQ (HZ / 8) 34#define PANIC_FREQ (HZ / 8)
35 35
36static struct timer_list power_timer, blink_timer, debounce_timer; 36static struct timer_list power_timer, blink_timer, debounce_timer;
37static int has_paniced, shuting_down; 37static int has_panicked, shuting_down;
38 38
39static void ip32_machine_restart(char *command) __attribute__((noreturn)); 39static void ip32_machine_restart(char *command) __attribute__((noreturn));
40static void ip32_machine_halt(void) __attribute__((noreturn)); 40static void ip32_machine_halt(void) __attribute__((noreturn));
@@ -109,7 +109,7 @@ static void debounce(unsigned long data)
109 } 109 }
110 CMOS_WRITE(reg_a & ~DS_REGA_DV0, RTC_REG_A); 110 CMOS_WRITE(reg_a & ~DS_REGA_DV0, RTC_REG_A);
111 111
112 if (has_paniced) 112 if (has_panicked)
113 ip32_machine_restart(NULL); 113 ip32_machine_restart(NULL);
114 114
115 enable_irq(MACEISA_RTC_IRQ); 115 enable_irq(MACEISA_RTC_IRQ);
@@ -117,7 +117,7 @@ static void debounce(unsigned long data)
117 117
118static inline void ip32_power_button(void) 118static inline void ip32_power_button(void)
119{ 119{
120 if (has_paniced) 120 if (has_panicked)
121 return; 121 return;
122 122
123 if (shuting_down || kill_proc(1, SIGINT, 1)) { 123 if (shuting_down || kill_proc(1, SIGINT, 1)) {
@@ -161,9 +161,9 @@ static int panic_event(struct notifier_block *this, unsigned long event,
161{ 161{
162 unsigned long led; 162 unsigned long led;
163 163
164 if (has_paniced) 164 if (has_panicked)
165 return NOTIFY_DONE; 165 return NOTIFY_DONE;
166 has_paniced = 1; 166 has_panicked = 1;
167 167
168 /* turn off the green LED */ 168 /* turn off the green LED */
169 led = mace->perif.ctrl.misc | MACEISA_LED_GREEN; 169 led = mace->perif.ctrl.misc | MACEISA_LED_GREEN;
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c
index 3ba040050e..068b20d822 100644
--- a/arch/parisc/kernel/topology.c
+++ b/arch/parisc/kernel/topology.c
@@ -26,11 +26,10 @@ static struct cpu cpu_devices[NR_CPUS] __read_mostly;
26 26
27static int __init topology_init(void) 27static int __init topology_init(void)
28{ 28{
29 struct node *parent = NULL;
30 int num; 29 int num;
31 30
32 for_each_present_cpu(num) { 31 for_each_present_cpu(num) {
33 register_cpu(&cpu_devices[num], num, parent); 32 register_cpu(&cpu_devices[num], num);
34 } 33 }
35 return 0; 34 return 0;
36} 35}
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index dbcb85994f..e253a45dcf 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -179,7 +179,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
179 179
180 /* 180 /*
181 * This function is only called after the system 181 * This function is only called after the system
182 * has paniced or is otherwise in a critical state. 182 * has panicked or is otherwise in a critical state.
183 * The minimum amount of code to allow a kexec'd kernel 183 * The minimum amount of code to allow a kexec'd kernel
184 * to run successfully needs to happen here. 184 * to run successfully needs to happen here.
185 * 185 *
diff --git a/arch/powerpc/kernel/machine_kexec_32.c b/arch/powerpc/kernel/machine_kexec_32.c
index 443606134d..cbaa341967 100644
--- a/arch/powerpc/kernel/machine_kexec_32.c
+++ b/arch/powerpc/kernel/machine_kexec_32.c
@@ -30,8 +30,8 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)(
30 */ 30 */
31void default_machine_kexec(struct kimage *image) 31void default_machine_kexec(struct kimage *image)
32{ 32{
33 const extern unsigned char relocate_new_kernel[]; 33 extern const unsigned char relocate_new_kernel[];
34 const extern unsigned int relocate_new_kernel_size; 34 extern const unsigned int relocate_new_kernel_size;
35 unsigned long page_list; 35 unsigned long page_list;
36 unsigned long reboot_code_buffer, reboot_code_buffer_phys; 36 unsigned long reboot_code_buffer, reboot_code_buffer_phys;
37 relocate_new_kernel_t rnk; 37 relocate_new_kernel_t rnk;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index e5a4481244..0932a62a1c 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -215,7 +215,7 @@ int __init ppc_init(void)
215 215
216 /* register CPU devices */ 216 /* register CPU devices */
217 for_each_possible_cpu(i) 217 for_each_possible_cpu(i)
218 register_cpu(&cpu_devices[i], i, NULL); 218 register_cpu(&cpu_devices[i], i);
219 219
220 /* call platform init */ 220 /* call platform init */
221 if (ppc_md.init != NULL) { 221 if (ppc_md.init != NULL) {
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 5bc2585c80..4662b580ef 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -279,7 +279,7 @@ static void unregister_cpu_online(unsigned int cpu)
279} 279}
280#endif /* CONFIG_HOTPLUG_CPU */ 280#endif /* CONFIG_HOTPLUG_CPU */
281 281
282static int sysfs_cpu_notify(struct notifier_block *self, 282static int __devinit sysfs_cpu_notify(struct notifier_block *self,
283 unsigned long action, void *hcpu) 283 unsigned long action, void *hcpu)
284{ 284{
285 unsigned int cpu = (unsigned int)(long)hcpu; 285 unsigned int cpu = (unsigned int)(long)hcpu;
@@ -297,30 +297,19 @@ static int sysfs_cpu_notify(struct notifier_block *self,
297 return NOTIFY_OK; 297 return NOTIFY_OK;
298} 298}
299 299
300static struct notifier_block sysfs_cpu_nb = { 300static struct notifier_block __devinitdata sysfs_cpu_nb = {
301 .notifier_call = sysfs_cpu_notify, 301 .notifier_call = sysfs_cpu_notify,
302}; 302};
303 303
304/* NUMA stuff */ 304/* NUMA stuff */
305 305
306#ifdef CONFIG_NUMA 306#ifdef CONFIG_NUMA
307static struct node node_devices[MAX_NUMNODES];
308
309static void register_nodes(void) 307static void register_nodes(void)
310{ 308{
311 int i; 309 int i;
312 310
313 for (i = 0; i < MAX_NUMNODES; i++) { 311 for (i = 0; i < MAX_NUMNODES; i++)
314 if (node_online(i)) { 312 register_one_node(i);
315 int p_node = parent_node(i);
316 struct node *parent = NULL;
317
318 if (p_node != i)
319 parent = &node_devices[p_node];
320
321 register_node(&node_devices[i], i, parent);
322 }
323 }
324} 313}
325 314
326int sysfs_add_device_to_node(struct sys_device *dev, int nid) 315int sysfs_add_device_to_node(struct sys_device *dev, int nid)
@@ -359,23 +348,13 @@ static SYSDEV_ATTR(physical_id, 0444, show_physical_id, NULL);
359static int __init topology_init(void) 348static int __init topology_init(void)
360{ 349{
361 int cpu; 350 int cpu;
362 struct node *parent = NULL;
363 351
364 register_nodes(); 352 register_nodes();
365
366 register_cpu_notifier(&sysfs_cpu_nb); 353 register_cpu_notifier(&sysfs_cpu_nb);
367 354
368 for_each_possible_cpu(cpu) { 355 for_each_possible_cpu(cpu) {
369 struct cpu *c = &per_cpu(cpu_devices, cpu); 356 struct cpu *c = &per_cpu(cpu_devices, cpu);
370 357
371#ifdef CONFIG_NUMA
372 /* The node to which a cpu belongs can't be known
373 * until the cpu is made present.
374 */
375 parent = NULL;
376 if (cpu_present(cpu))
377 parent = &node_devices[cpu_to_node(cpu)];
378#endif
379 /* 358 /*
380 * For now, we just see if the system supports making 359 * For now, we just see if the system supports making
381 * the RTAS calls for CPU hotplug. But, there may be a 360 * the RTAS calls for CPU hotplug. But, there may be a
@@ -387,7 +366,7 @@ static int __init topology_init(void)
387 c->no_control = 1; 366 c->no_control = 1;
388 367
389 if (cpu_online(cpu) || (c->no_control == 0)) { 368 if (cpu_online(cpu) || (c->no_control == 0)) {
390 register_cpu(c, cpu, parent); 369 register_cpu(c, cpu);
391 370
392 sysdev_create_file(&c->sysdev, &attr_physical_id); 371 sysdev_create_file(&c->sysdev, &attr_physical_id);
393 } 372 }
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index d20907561f..7dd5dab789 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -102,7 +102,7 @@ EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
102u64 tb_to_xs; 102u64 tb_to_xs;
103unsigned tb_to_us; 103unsigned tb_to_us;
104 104
105#define TICKLEN_SCALE (SHIFT_SCALE - 10) 105#define TICKLEN_SCALE TICK_LENGTH_SHIFT
106u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ 106u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
107u64 ticklen_to_xs; /* 0.64 fraction */ 107u64 ticklen_to_xs; /* 0.64 fraction */
108 108
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index fdbba4206d..a0a9e1e006 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -40,6 +40,40 @@
40#include <asm/kdebug.h> 40#include <asm/kdebug.h>
41#include <asm/siginfo.h> 41#include <asm/siginfo.h>
42 42
43#ifdef CONFIG_KPROBES
44ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
45
46/* Hook to register for page fault notifications */
47int register_page_fault_notifier(struct notifier_block *nb)
48{
49 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
50}
51
52int unregister_page_fault_notifier(struct notifier_block *nb)
53{
54 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
55}
56
57static inline int notify_page_fault(enum die_val val, const char *str,
58 struct pt_regs *regs, long err, int trap, int sig)
59{
60 struct die_args args = {
61 .regs = regs,
62 .str = str,
63 .err = err,
64 .trapnr = trap,
65 .signr = sig
66 };
67 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
68}
69#else
70static inline int notify_page_fault(enum die_val val, const char *str,
71 struct pt_regs *regs, long err, int trap, int sig)
72{
73 return NOTIFY_DONE;
74}
75#endif
76
43/* 77/*
44 * Check whether the instruction at regs->nip is a store using 78 * Check whether the instruction at regs->nip is a store using
45 * an update addressing form which will update r1. 79 * an update addressing form which will update r1.
@@ -142,7 +176,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
142 is_write = error_code & ESR_DST; 176 is_write = error_code & ESR_DST;
143#endif /* CONFIG_4xx || CONFIG_BOOKE */ 177#endif /* CONFIG_4xx || CONFIG_BOOKE */
144 178
145 if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code, 179 if (notify_page_fault(DIE_PAGE_FAULT, "page_fault", regs, error_code,
146 11, SIGSEGV) == NOTIFY_STOP) 180 11, SIGSEGV) == NOTIFY_STOP)
147 return 0; 181 return 0;
148 182
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 9e30f968c1..d454caada2 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -41,6 +41,7 @@
41#include <linux/idr.h> 41#include <linux/idr.h>
42#include <linux/nodemask.h> 42#include <linux/nodemask.h>
43#include <linux/module.h> 43#include <linux/module.h>
44#include <linux/poison.h>
44 45
45#include <asm/pgalloc.h> 46#include <asm/pgalloc.h>
46#include <asm/page.h> 47#include <asm/page.h>
@@ -90,7 +91,7 @@ void free_initmem(void)
90 91
91 addr = (unsigned long)__init_begin; 92 addr = (unsigned long)__init_begin;
92 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { 93 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
93 memset((void *)addr, 0xcc, PAGE_SIZE); 94 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
94 ClearPageReserved(virt_to_page(addr)); 95 ClearPageReserved(virt_to_page(addr));
95 init_page_count(virt_to_page(addr)); 96 init_page_count(virt_to_page(addr));
96 free_page(addr); 97 free_page(addr);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 69f3b9a20b..089d939a0b 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -114,15 +114,20 @@ void online_page(struct page *page)
114 num_physpages++; 114 num_physpages++;
115} 115}
116 116
117int __devinit add_memory(u64 start, u64 size) 117#ifdef CONFIG_NUMA
118int memory_add_physaddr_to_nid(u64 start)
119{
120 return hot_add_scn_to_nid(start);
121}
122#endif
123
124int __devinit arch_add_memory(int nid, u64 start, u64 size)
118{ 125{
119 struct pglist_data *pgdata; 126 struct pglist_data *pgdata;
120 struct zone *zone; 127 struct zone *zone;
121 int nid;
122 unsigned long start_pfn = start >> PAGE_SHIFT; 128 unsigned long start_pfn = start >> PAGE_SHIFT;
123 unsigned long nr_pages = size >> PAGE_SHIFT; 129 unsigned long nr_pages = size >> PAGE_SHIFT;
124 130
125 nid = hot_add_scn_to_nid(start);
126 pgdata = NODE_DATA(nid); 131 pgdata = NODE_DATA(nid);
127 132
128 start = (unsigned long)__va(start); 133 start = (unsigned long)__va(start);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index aa98cb3b59..fbe23933f7 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -334,7 +334,7 @@ out:
334 return nid; 334 return nid;
335} 335}
336 336
337static int cpu_numa_callback(struct notifier_block *nfb, 337static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
338 unsigned long action, 338 unsigned long action,
339 void *hcpu) 339 void *hcpu)
340{ 340{
@@ -609,14 +609,15 @@ static void __init *careful_allocation(int nid, unsigned long size,
609 return (void *)ret; 609 return (void *)ret;
610} 610}
611 611
612static struct notifier_block __cpuinitdata ppc64_numa_nb = {
613 .notifier_call = cpu_numa_callback,
614 .priority = 1 /* Must run before sched domains notifier. */
615};
616
612void __init do_init_bootmem(void) 617void __init do_init_bootmem(void)
613{ 618{
614 int nid; 619 int nid;
615 unsigned int i; 620 unsigned int i;
616 static struct notifier_block ppc64_numa_nb = {
617 .notifier_call = cpu_numa_callback,
618 .priority = 1 /* Must run before sched domains notifier. */
619 };
620 621
621 min_low_pfn = 0; 622 min_low_pfn = 0;
622 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 623 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 27ad56bd22..fd0bbbe7a4 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -94,7 +94,7 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
94 94
95 for (i = 0; i < model->num_counters; ++i) { 95 for (i = 0; i < model->num_counters; ++i) {
96 struct dentry *dir; 96 struct dentry *dir;
97 char buf[3]; 97 char buf[4];
98 98
99 snprintf(buf, sizeof buf, "%d", i); 99 snprintf(buf, sizeof buf, "%d", i);
100 dir = oprofilefs_mkdir(sb, root, buf); 100 dir = oprofilefs_mkdir(sb, root, buf);
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index b30e55dab8..a656d810a4 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -2100,7 +2100,7 @@ EXPORT_SYMBOL_GPL(spu_save);
2100 * @spu: pointer to SPU iomem structure. 2100 * @spu: pointer to SPU iomem structure.
2101 * 2101 *
2102 * Perform harvest + restore, as we may not be coming 2102 * Perform harvest + restore, as we may not be coming
2103 * from a previous succesful save operation, and the 2103 * from a previous successful save operation, and the
2104 * hardware state is unknown. 2104 * hardware state is unknown.
2105 */ 2105 */
2106int spu_restore(struct spu_state *new, struct spu *spu) 2106int spu_restore(struct spu_state *new, struct spu *spu)
@@ -2203,7 +2203,7 @@ void spu_init_csa(struct spu_state *csa)
2203 2203
2204 memset(lscsa, 0, sizeof(struct spu_lscsa)); 2204 memset(lscsa, 0, sizeof(struct spu_lscsa));
2205 csa->lscsa = lscsa; 2205 csa->lscsa = lscsa;
2206 csa->register_lock = SPIN_LOCK_UNLOCKED; 2206 spin_lock_init(&csa->register_lock);
2207 2207
2208 /* Set LS pages reserved to allow for user-space mapping. */ 2208 /* Set LS pages reserved to allow for user-space mapping. */
2209 for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE) 2209 for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index 047f954a89..93e7505deb 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -546,7 +546,7 @@ struct pmf_device {
546}; 546};
547 547
548static LIST_HEAD(pmf_devices); 548static LIST_HEAD(pmf_devices);
549static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED; 549static DEFINE_SPINLOCK(pmf_lock);
550static DEFINE_MUTEX(pmf_irq_mutex); 550static DEFINE_MUTEX(pmf_irq_mutex);
551 551
552static void pmf_release_device(struct kref *kref) 552static void pmf_release_device(struct kref *kref)
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index 98c23aec85..c37a8497c6 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -287,7 +287,7 @@ void pci_addr_cache_remove_device(struct pci_dev *dev)
287 * find the pci device that corresponds to a given address. 287 * find the pci device that corresponds to a given address.
288 * This routine scans all pci busses to build the cache. 288 * This routine scans all pci busses to build the cache.
289 * Must be run late in boot process, after the pci controllers 289 * Must be run late in boot process, after the pci controllers
290 * have been scaned for devices (after all device resources are known). 290 * have been scanned for devices (after all device resources are known).
291 */ 291 */
292void __init pci_addr_cache_build(void) 292void __init pci_addr_cache_build(void)
293{ 293{
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 8f2d12935b..45ccc687e5 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -35,7 +35,7 @@
35 */ 35 */
36 36
37/* EEH event workqueue setup. */ 37/* EEH event workqueue setup. */
38static spinlock_t eeh_eventlist_lock = SPIN_LOCK_UNLOCKED; 38static DEFINE_SPINLOCK(eeh_eventlist_lock);
39LIST_HEAD(eeh_eventlist); 39LIST_HEAD(eeh_eventlist);
40static void eeh_thread_launcher(void *); 40static void eeh_thread_launcher(void *);
41DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL); 41DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c
index 74e0d31a35..615350d46b 100644
--- a/arch/powerpc/sysdev/mmio_nvram.c
+++ b/arch/powerpc/sysdev/mmio_nvram.c
@@ -32,7 +32,7 @@
32 32
33static void __iomem *mmio_nvram_start; 33static void __iomem *mmio_nvram_start;
34static long mmio_nvram_len; 34static long mmio_nvram_len;
35static spinlock_t mmio_nvram_lock = SPIN_LOCK_UNLOCKED; 35static DEFINE_SPINLOCK(mmio_nvram_lock);
36 36
37static ssize_t mmio_nvram_read(char *buf, size_t count, loff_t *index) 37static ssize_t mmio_nvram_read(char *buf, size_t count, loff_t *index)
38{ 38{
diff --git a/arch/ppc/kernel/machine_kexec.c b/arch/ppc/kernel/machine_kexec.c
index 84d65a8719..a469ba438c 100644
--- a/arch/ppc/kernel/machine_kexec.c
+++ b/arch/ppc/kernel/machine_kexec.c
@@ -25,8 +25,8 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)(
25 unsigned long reboot_code_buffer, 25 unsigned long reboot_code_buffer,
26 unsigned long start_address) ATTRIB_NORET; 26 unsigned long start_address) ATTRIB_NORET;
27 27
28const extern unsigned char relocate_new_kernel[]; 28extern const unsigned char relocate_new_kernel[];
29const extern unsigned int relocate_new_kernel_size; 29extern const unsigned int relocate_new_kernel_size;
30 30
31void machine_shutdown(void) 31void machine_shutdown(void)
32{ 32{
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 1f79e84ab4..4b4607d89b 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -475,7 +475,7 @@ int __init ppc_init(void)
475 475
476 /* register CPU devices */ 476 /* register CPU devices */
477 for_each_possible_cpu(i) 477 for_each_possible_cpu(i)
478 register_cpu(&cpu_devices[i], i, NULL); 478 register_cpu(&cpu_devices[i], i);
479 479
480 /* call platform init */ 480 /* call platform init */
481 if (ppc_md.init != NULL) { 481 if (ppc_md.init != NULL) {
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 9a22434a58..54d35c1309 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -652,7 +652,7 @@ appldata_cpu_notify(struct notifier_block *self,
652 return NOTIFY_OK; 652 return NOTIFY_OK;
653} 653}
654 654
655static struct notifier_block appldata_nb = { 655static struct notifier_block __devinitdata appldata_nb = {
656 .notifier_call = appldata_cpu_notify, 656 .notifier_call = appldata_cpu_notify,
657}; 657};
658 658
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index c5ca2dc5d4..5713c7e5bd 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -37,10 +37,10 @@ struct s390_aes_ctx {
37 int key_len; 37 int key_len;
38}; 38};
39 39
40static int aes_set_key(void *ctx, const u8 *in_key, unsigned int key_len, 40static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
41 u32 *flags) 41 unsigned int key_len, u32 *flags)
42{ 42{
43 struct s390_aes_ctx *sctx = ctx; 43 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
44 44
45 switch (key_len) { 45 switch (key_len) {
46 case 16: 46 case 16:
@@ -70,9 +70,9 @@ fail:
70 return -EINVAL; 70 return -EINVAL;
71} 71}
72 72
73static void aes_encrypt(void *ctx, u8 *out, const u8 *in) 73static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
74{ 74{
75 const struct s390_aes_ctx *sctx = ctx; 75 const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
76 76
77 switch (sctx->key_len) { 77 switch (sctx->key_len) {
78 case 16: 78 case 16:
@@ -90,9 +90,9 @@ static void aes_encrypt(void *ctx, u8 *out, const u8 *in)
90 } 90 }
91} 91}
92 92
93static void aes_decrypt(void *ctx, u8 *out, const u8 *in) 93static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
94{ 94{
95 const struct s390_aes_ctx *sctx = ctx; 95 const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
96 96
97 switch (sctx->key_len) { 97 switch (sctx->key_len) {
98 case 16: 98 case 16:
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index e3c37aa0a1..b3f7496a79 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -44,10 +44,10 @@ struct crypt_s390_des3_192_ctx {
44 u8 key[DES3_192_KEY_SIZE]; 44 u8 key[DES3_192_KEY_SIZE];
45}; 45};
46 46
47static int des_setkey(void *ctx, const u8 *key, unsigned int keylen, 47static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
48 u32 *flags) 48 unsigned int keylen, u32 *flags)
49{ 49{
50 struct crypt_s390_des_ctx *dctx = ctx; 50 struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
51 int ret; 51 int ret;
52 52
53 /* test if key is valid (not a weak key) */ 53 /* test if key is valid (not a weak key) */
@@ -57,16 +57,16 @@ static int des_setkey(void *ctx, const u8 *key, unsigned int keylen,
57 return ret; 57 return ret;
58} 58}
59 59
60static void des_encrypt(void *ctx, u8 *out, const u8 *in) 60static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
61{ 61{
62 struct crypt_s390_des_ctx *dctx = ctx; 62 struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
63 63
64 crypt_s390_km(KM_DEA_ENCRYPT, dctx->key, out, in, DES_BLOCK_SIZE); 64 crypt_s390_km(KM_DEA_ENCRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
65} 65}
66 66
67static void des_decrypt(void *ctx, u8 *out, const u8 *in) 67static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
68{ 68{
69 struct crypt_s390_des_ctx *dctx = ctx; 69 struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
70 70
71 crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE); 71 crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
72} 72}
@@ -166,11 +166,11 @@ static struct crypto_alg des_alg = {
166 * Implementers MUST reject keys that exhibit this property. 166 * Implementers MUST reject keys that exhibit this property.
167 * 167 *
168 */ 168 */
169static int des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen, 169static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key,
170 u32 *flags) 170 unsigned int keylen, u32 *flags)
171{ 171{
172 int i, ret; 172 int i, ret;
173 struct crypt_s390_des3_128_ctx *dctx = ctx; 173 struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm);
174 const u8* temp_key = key; 174 const u8* temp_key = key;
175 175
176 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { 176 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) {
@@ -186,17 +186,17 @@ static int des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen,
186 return 0; 186 return 0;
187} 187}
188 188
189static void des3_128_encrypt(void *ctx, u8 *dst, const u8 *src) 189static void des3_128_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
190{ 190{
191 struct crypt_s390_des3_128_ctx *dctx = ctx; 191 struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm);
192 192
193 crypt_s390_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src, 193 crypt_s390_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src,
194 DES3_128_BLOCK_SIZE); 194 DES3_128_BLOCK_SIZE);
195} 195}
196 196
197static void des3_128_decrypt(void *ctx, u8 *dst, const u8 *src) 197static void des3_128_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
198{ 198{
199 struct crypt_s390_des3_128_ctx *dctx = ctx; 199 struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm);
200 200
201 crypt_s390_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src, 201 crypt_s390_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src,
202 DES3_128_BLOCK_SIZE); 202 DES3_128_BLOCK_SIZE);
@@ -302,11 +302,11 @@ static struct crypto_alg des3_128_alg = {
302 * property. 302 * property.
303 * 303 *
304 */ 304 */
305static int des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen, 305static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
306 u32 *flags) 306 unsigned int keylen, u32 *flags)
307{ 307{
308 int i, ret; 308 int i, ret;
309 struct crypt_s390_des3_192_ctx *dctx = ctx; 309 struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
310 const u8* temp_key = key; 310 const u8* temp_key = key;
311 311
312 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && 312 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
@@ -325,17 +325,17 @@ static int des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen,
325 return 0; 325 return 0;
326} 326}
327 327
328static void des3_192_encrypt(void *ctx, u8 *dst, const u8 *src) 328static void des3_192_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
329{ 329{
330 struct crypt_s390_des3_192_ctx *dctx = ctx; 330 struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
331 331
332 crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src, 332 crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src,
333 DES3_192_BLOCK_SIZE); 333 DES3_192_BLOCK_SIZE);
334} 334}
335 335
336static void des3_192_decrypt(void *ctx, u8 *dst, const u8 *src) 336static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
337{ 337{
338 struct crypt_s390_des3_192_ctx *dctx = ctx; 338 struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
339 339
340 crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src, 340 crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src,
341 DES3_192_BLOCK_SIZE); 341 DES3_192_BLOCK_SIZE);
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 98c896b86d..9d34a35b1a 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -40,28 +40,29 @@ struct crypt_s390_sha1_ctx {
40 u8 buffer[2 * SHA1_BLOCK_SIZE]; 40 u8 buffer[2 * SHA1_BLOCK_SIZE];
41}; 41};
42 42
43static void 43static void sha1_init(struct crypto_tfm *tfm)
44sha1_init(void *ctx)
45{ 44{
46 static const struct crypt_s390_sha1_ctx initstate = { 45 struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm);
47 .state = { 46 static const u32 initstate[5] = {
48 0x67452301, 47 0x67452301,
49 0xEFCDAB89, 48 0xEFCDAB89,
50 0x98BADCFE, 49 0x98BADCFE,
51 0x10325476, 50 0x10325476,
52 0xC3D2E1F0 51 0xC3D2E1F0
53 },
54 }; 52 };
55 memcpy(ctx, &initstate, sizeof(initstate)); 53
54 ctx->count = 0;
55 memcpy(ctx->state, &initstate, sizeof(initstate));
56 ctx->buf_len = 0;
56} 57}
57 58
58static void 59static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
59sha1_update(void *ctx, const u8 *data, unsigned int len) 60 unsigned int len)
60{ 61{
61 struct crypt_s390_sha1_ctx *sctx; 62 struct crypt_s390_sha1_ctx *sctx;
62 long imd_len; 63 long imd_len;
63 64
64 sctx = ctx; 65 sctx = crypto_tfm_ctx(tfm);
65 sctx->count += len * 8; //message bit length 66 sctx->count += len * 8; //message bit length
66 67
67 //anything in buffer yet? -> must be completed 68 //anything in buffer yet? -> must be completed
@@ -110,10 +111,9 @@ pad_message(struct crypt_s390_sha1_ctx* sctx)
110} 111}
111 112
112/* Add padding and return the message digest. */ 113/* Add padding and return the message digest. */
113static void 114static void sha1_final(struct crypto_tfm *tfm, u8 *out)
114sha1_final(void* ctx, u8 *out)
115{ 115{
116 struct crypt_s390_sha1_ctx *sctx = ctx; 116 struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
117 117
118 //must perform manual padding 118 //must perform manual padding
119 pad_message(sctx); 119 pad_message(sctx);
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 1ec5e92b34..f573df30f3 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -31,9 +31,9 @@ struct s390_sha256_ctx {
31 u8 buf[2 * SHA256_BLOCK_SIZE]; 31 u8 buf[2 * SHA256_BLOCK_SIZE];
32}; 32};
33 33
34static void sha256_init(void *ctx) 34static void sha256_init(struct crypto_tfm *tfm)
35{ 35{
36 struct s390_sha256_ctx *sctx = ctx; 36 struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
37 37
38 sctx->state[0] = 0x6a09e667; 38 sctx->state[0] = 0x6a09e667;
39 sctx->state[1] = 0xbb67ae85; 39 sctx->state[1] = 0xbb67ae85;
@@ -44,12 +44,12 @@ static void sha256_init(void *ctx)
44 sctx->state[6] = 0x1f83d9ab; 44 sctx->state[6] = 0x1f83d9ab;
45 sctx->state[7] = 0x5be0cd19; 45 sctx->state[7] = 0x5be0cd19;
46 sctx->count = 0; 46 sctx->count = 0;
47 memset(sctx->buf, 0, sizeof(sctx->buf));
48} 47}
49 48
50static void sha256_update(void *ctx, const u8 *data, unsigned int len) 49static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
50 unsigned int len)
51{ 51{
52 struct s390_sha256_ctx *sctx = ctx; 52 struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
53 unsigned int index; 53 unsigned int index;
54 int ret; 54 int ret;
55 55
@@ -108,9 +108,9 @@ static void pad_message(struct s390_sha256_ctx* sctx)
108} 108}
109 109
110/* Add padding and return the message digest */ 110/* Add padding and return the message digest */
111static void sha256_final(void* ctx, u8 *out) 111static void sha256_final(struct crypto_tfm *tfm, u8 *out)
112{ 112{
113 struct s390_sha256_ctx *sctx = ctx; 113 struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
114 114
115 /* must perform manual padding */ 115 /* must perform manual padding */
116 pad_message(sctx); 116 pad_message(sctx);
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index bad81b5832..fbde6a9152 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -27,8 +27,8 @@ static void kexec_halt_all_cpus(void *);
27 27
28typedef void (*relocate_kernel_t) (kimage_entry_t *, unsigned long); 28typedef void (*relocate_kernel_t) (kimage_entry_t *, unsigned long);
29 29
30const extern unsigned char relocate_kernel[]; 30extern const unsigned char relocate_kernel[];
31const extern unsigned long long relocate_kernel_len; 31extern const unsigned long long relocate_kernel_len;
32 32
33int 33int
34machine_kexec_prepare(struct kimage *image) 34machine_kexec_prepare(struct kimage *image)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 343120c922..8e03219eea 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -869,7 +869,7 @@ static int __init topology_init(void)
869 int ret; 869 int ret;
870 870
871 for_each_possible_cpu(cpu) { 871 for_each_possible_cpu(cpu) {
872 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); 872 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu);
873 if (ret) 873 if (ret)
874 printk(KERN_WARNING "topology_init: register_cpu %d " 874 printk(KERN_WARNING "topology_init: register_cpu %d "
875 "failed (%d)\n", cpu, ret); 875 "failed (%d)\n", cpu, ret);
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index dfe6f08566..1f0439dc24 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -356,7 +356,7 @@ static void internal_add_vtimer(struct vtimer_list *timer)
356 356
357 set_vtimer(event->expires); 357 set_vtimer(event->expires);
358 spin_unlock_irqrestore(&vt_list->lock, flags); 358 spin_unlock_irqrestore(&vt_list->lock, flags);
359 /* release CPU aquired in prepare_vtimer or mod_virt_timer() */ 359 /* release CPU acquired in prepare_vtimer or mod_virt_timer() */
360 put_cpu(); 360 put_cpu();
361} 361}
362 362
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index c72e17a96e..e467a45066 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -147,7 +147,7 @@ endif
147# them changed. We use .arch and .mach to indicate when they were 147# them changed. We use .arch and .mach to indicate when they were
148# updated last, otherwise make uses the target directory mtime. 148# updated last, otherwise make uses the target directory mtime.
149 149
150include/asm-sh/.cpu: $(wildcard include/config/cpu/*.h) include/config/MARKER 150include/asm-sh/.cpu: $(wildcard include/config/cpu/*.h) include/config/auto.conf
151 @echo ' SYMLINK include/asm-sh/cpu -> include/asm-sh/$(cpuincdir-y)' 151 @echo ' SYMLINK include/asm-sh/cpu -> include/asm-sh/$(cpuincdir-y)'
152 $(Q)if [ ! -d include/asm-sh ]; then mkdir -p include/asm-sh; fi 152 $(Q)if [ ! -d include/asm-sh ]; then mkdir -p include/asm-sh; fi
153 $(Q)ln -fsn $(incdir-prefix)$(cpuincdir-y) include/asm-sh/cpu 153 $(Q)ln -fsn $(incdir-prefix)$(cpuincdir-y) include/asm-sh/cpu
@@ -157,7 +157,7 @@ include/asm-sh/.cpu: $(wildcard include/config/cpu/*.h) include/config/MARKER
157# don't, just reference the parent directory so the semantics are 157# don't, just reference the parent directory so the semantics are
158# kept roughly the same. 158# kept roughly the same.
159 159
160include/asm-sh/.mach: $(wildcard include/config/sh/*.h) include/config/MARKER 160include/asm-sh/.mach: $(wildcard include/config/sh/*.h) include/config/auto.conf
161 @echo -n ' SYMLINK include/asm-sh/mach -> ' 161 @echo -n ' SYMLINK include/asm-sh/mach -> '
162 $(Q)if [ ! -d include/asm-sh ]; then mkdir -p include/asm-sh; fi 162 $(Q)if [ ! -d include/asm-sh ]; then mkdir -p include/asm-sh; fi
163 $(Q)if [ -d $(incdir-prefix)$(incdir-y) ]; then \ 163 $(Q)if [ -d $(incdir-prefix)$(incdir-y) ]; then \
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 43546525f2..6bcd8d9239 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -25,8 +25,8 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)(
25 unsigned long start_address, 25 unsigned long start_address,
26 unsigned long vbr_reg) ATTRIB_NORET; 26 unsigned long vbr_reg) ATTRIB_NORET;
27 27
28const extern unsigned char relocate_new_kernel[]; 28extern const unsigned char relocate_new_kernel[];
29const extern unsigned int relocate_new_kernel_size; 29extern const unsigned int relocate_new_kernel_size;
30extern void *gdb_vbr_vector; 30extern void *gdb_vbr_vector;
31 31
32/* 32/*
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index bb229ef030..9af22116c9 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -402,7 +402,7 @@ static int __init topology_init(void)
402 int cpu_id; 402 int cpu_id;
403 403
404 for_each_possible_cpu(cpu_id) 404 for_each_possible_cpu(cpu_id)
405 register_cpu(&cpu[cpu_id], cpu_id, NULL); 405 register_cpu(&cpu[cpu_id], cpu_id);
406 406
407 return 0; 407 return 0;
408} 408}
diff --git a/arch/sh/oprofile/op_model_sh7750.c b/arch/sh/oprofile/op_model_sh7750.c
index 5ec9ddcc4b..c265185b22 100644
--- a/arch/sh/oprofile/op_model_sh7750.c
+++ b/arch/sh/oprofile/op_model_sh7750.c
@@ -198,7 +198,7 @@ static int sh7750_perf_counter_create_files(struct super_block *sb, struct dentr
198 198
199 for (i = 0; i < NR_CNTRS; i++) { 199 for (i = 0; i < NR_CNTRS; i++) {
200 struct dentry *dir; 200 struct dentry *dir;
201 char buf[3]; 201 char buf[4];
202 202
203 snprintf(buf, sizeof(buf), "%d", i); 203 snprintf(buf, sizeof(buf), "%d", i);
204 dir = oprofilefs_mkdir(sb, root, buf); 204 dir = oprofilefs_mkdir(sb, root, buf);
diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c
index d2711c9c9d..da98d8dbcf 100644
--- a/arch/sh64/kernel/setup.c
+++ b/arch/sh64/kernel/setup.c
@@ -309,7 +309,7 @@ static struct cpu cpu[1];
309 309
310static int __init topology_init(void) 310static int __init topology_init(void)
311{ 311{
312 return register_cpu(cpu, 0, NULL); 312 return register_cpu(cpu, 0);
313} 313}
314 314
315subsys_initcall(topology_init); 315subsys_initcall(topology_init);
diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c
index 001b8673b4..80a8094787 100644
--- a/arch/sparc/kernel/of_device.c
+++ b/arch/sparc/kernel/of_device.c
@@ -138,6 +138,7 @@ struct bus_type ebus_bus_type = {
138 .suspend = of_device_suspend, 138 .suspend = of_device_suspend,
139 .resume = of_device_resume, 139 .resume = of_device_resume,
140}; 140};
141EXPORT_SYMBOL(ebus_bus_type);
141#endif 142#endif
142 143
143#ifdef CONFIG_SBUS 144#ifdef CONFIG_SBUS
@@ -149,6 +150,7 @@ struct bus_type sbus_bus_type = {
149 .suspend = of_device_suspend, 150 .suspend = of_device_suspend,
150 .resume = of_device_resume, 151 .resume = of_device_resume,
151}; 152};
153EXPORT_SYMBOL(sbus_bus_type);
152#endif 154#endif
153 155
154static int __init of_bus_driver_init(void) 156static int __init of_bus_driver_init(void)
diff --git a/arch/sparc/kernel/prom.c b/arch/sparc/kernel/prom.c
index 63b2b9bd77..946ce6d158 100644
--- a/arch/sparc/kernel/prom.c
+++ b/arch/sparc/kernel/prom.c
@@ -27,6 +27,11 @@
27 27
28static struct device_node *allnodes; 28static struct device_node *allnodes;
29 29
30/* use when traversing tree through the allnext, child, sibling,
31 * or parent members of struct device_node.
32 */
33static DEFINE_RWLOCK(devtree_lock);
34
30int of_device_is_compatible(struct device_node *device, const char *compat) 35int of_device_is_compatible(struct device_node *device, const char *compat)
31{ 36{
32 const char* cp; 37 const char* cp;
@@ -185,6 +190,54 @@ int of_getintprop_default(struct device_node *np, const char *name, int def)
185} 190}
186EXPORT_SYMBOL(of_getintprop_default); 191EXPORT_SYMBOL(of_getintprop_default);
187 192
193int of_set_property(struct device_node *dp, const char *name, void *val, int len)
194{
195 struct property **prevp;
196 void *new_val;
197 int err;
198
199 new_val = kmalloc(len, GFP_KERNEL);
200 if (!new_val)
201 return -ENOMEM;
202
203 memcpy(new_val, val, len);
204
205 err = -ENODEV;
206
207 write_lock(&devtree_lock);
208 prevp = &dp->properties;
209 while (*prevp) {
210 struct property *prop = *prevp;
211
212 if (!strcmp(prop->name, name)) {
213 void *old_val = prop->value;
214 int ret;
215
216 ret = prom_setprop(dp->node, name, val, len);
217 err = -EINVAL;
218 if (ret >= 0) {
219 prop->value = new_val;
220 prop->length = len;
221
222 if (OF_IS_DYNAMIC(prop))
223 kfree(old_val);
224
225 OF_MARK_DYNAMIC(prop);
226
227 err = 0;
228 }
229 break;
230 }
231 prevp = &(*prevp)->next;
232 }
233 write_unlock(&devtree_lock);
234
235 /* XXX Upate procfs if necessary... */
236
237 return err;
238}
239EXPORT_SYMBOL(of_set_property);
240
188static unsigned int prom_early_allocated; 241static unsigned int prom_early_allocated;
189 242
190static void * __init prom_early_alloc(unsigned long size) 243static void * __init prom_early_alloc(unsigned long size)
@@ -354,7 +407,9 @@ static char * __init build_full_name(struct device_node *dp)
354 return n; 407 return n;
355} 408}
356 409
357static struct property * __init build_one_prop(phandle node, char *prev) 410static unsigned int unique_id;
411
412static struct property * __init build_one_prop(phandle node, char *prev, char *special_name, void *special_val, int special_len)
358{ 413{
359 static struct property *tmp = NULL; 414 static struct property *tmp = NULL;
360 struct property *p; 415 struct property *p;
@@ -364,25 +419,34 @@ static struct property * __init build_one_prop(phandle node, char *prev)
364 p = tmp; 419 p = tmp;
365 memset(p, 0, sizeof(*p) + 32); 420 memset(p, 0, sizeof(*p) + 32);
366 tmp = NULL; 421 tmp = NULL;
367 } else 422 } else {
368 p = prom_early_alloc(sizeof(struct property) + 32); 423 p = prom_early_alloc(sizeof(struct property) + 32);
424 p->unique_id = unique_id++;
425 }
369 426
370 p->name = (char *) (p + 1); 427 p->name = (char *) (p + 1);
371 if (prev == NULL) { 428 if (special_name) {
372 prom_firstprop(node, p->name); 429 p->length = special_len;
430 p->value = prom_early_alloc(special_len);
431 memcpy(p->value, special_val, special_len);
373 } else { 432 } else {
374 prom_nextprop(node, prev, p->name); 433 if (prev == NULL) {
375 } 434 prom_firstprop(node, p->name);
376 if (strlen(p->name) == 0) { 435 } else {
377 tmp = p; 436 prom_nextprop(node, prev, p->name);
378 return NULL; 437 }
379 } 438 if (strlen(p->name) == 0) {
380 p->length = prom_getproplen(node, p->name); 439 tmp = p;
381 if (p->length <= 0) { 440 return NULL;
382 p->length = 0; 441 }
383 } else { 442 p->length = prom_getproplen(node, p->name);
384 p->value = prom_early_alloc(p->length); 443 if (p->length <= 0) {
385 len = prom_getproperty(node, p->name, p->value, p->length); 444 p->length = 0;
445 } else {
446 p->value = prom_early_alloc(p->length + 1);
447 prom_getproperty(node, p->name, p->value, p->length);
448 ((unsigned char *)p->value)[p->length] = '\0';
449 }
386 } 450 }
387 return p; 451 return p;
388} 452}
@@ -391,9 +455,14 @@ static struct property * __init build_prop_list(phandle node)
391{ 455{
392 struct property *head, *tail; 456 struct property *head, *tail;
393 457
394 head = tail = build_one_prop(node, NULL); 458 head = tail = build_one_prop(node, NULL,
459 ".node", &node, sizeof(node));
460
461 tail->next = build_one_prop(node, NULL, NULL, NULL, 0);
462 tail = tail->next;
395 while(tail) { 463 while(tail) {
396 tail->next = build_one_prop(node, tail->name); 464 tail->next = build_one_prop(node, tail->name,
465 NULL, NULL, 0);
397 tail = tail->next; 466 tail = tail->next;
398 } 467 }
399 468
@@ -422,6 +491,7 @@ static struct device_node * __init create_node(phandle node)
422 return NULL; 491 return NULL;
423 492
424 dp = prom_early_alloc(sizeof(*dp)); 493 dp = prom_early_alloc(sizeof(*dp));
494 dp->unique_id = unique_id++;
425 495
426 kref_init(&dp->kref); 496 kref_init(&dp->kref);
427 497
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index fa50069460..5db7e1d853 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -9,3 +9,5 @@ lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
9 strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \ 9 strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
10 copy_user.o locks.o atomic.o atomic32.o bitops.o \ 10 copy_user.o locks.o atomic.o atomic32.o bitops.o \
11 lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o 11 lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o
12
13obj-y += iomap.o
diff --git a/arch/sparc/lib/iomap.c b/arch/sparc/lib/iomap.c
new file mode 100644
index 0000000000..54501c1ca7
--- /dev/null
+++ b/arch/sparc/lib/iomap.c
@@ -0,0 +1,48 @@
1/*
2 * Implement the sparc iomap interfaces
3 */
4#include <linux/pci.h>
5#include <linux/module.h>
6#include <asm/io.h>
7
8/* Create a virtual mapping cookie for an IO port range */
9void __iomem *ioport_map(unsigned long port, unsigned int nr)
10{
11 return (void __iomem *) (unsigned long) port;
12}
13
14void ioport_unmap(void __iomem *addr)
15{
16 /* Nothing to do */
17}
18EXPORT_SYMBOL(ioport_map);
19EXPORT_SYMBOL(ioport_unmap);
20
21/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
22void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
23{
24 unsigned long start = pci_resource_start(dev, bar);
25 unsigned long len = pci_resource_len(dev, bar);
26 unsigned long flags = pci_resource_flags(dev, bar);
27
28 if (!len || !start)
29 return NULL;
30 if (maxlen && len > maxlen)
31 len = maxlen;
32 if (flags & IORESOURCE_IO)
33 return ioport_map(start, len);
34 if (flags & IORESOURCE_MEM) {
35 if (flags & IORESOURCE_CACHEABLE)
36 return ioremap(start, len);
37 return ioremap_nocache(start, len);
38 }
39 /* What? */
40 return NULL;
41}
42
43void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
44{
45 /* nothing to do */
46}
47EXPORT_SYMBOL(pci_iomap);
48EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c
index 2c42894b18..c2c69c167d 100644
--- a/arch/sparc64/kernel/auxio.c
+++ b/arch/sparc64/kernel/auxio.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/module.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/init.h> 11#include <linux/init.h>
11#include <linux/ioport.h> 12#include <linux/ioport.h>
@@ -16,8 +17,8 @@
16#include <asm/ebus.h> 17#include <asm/ebus.h>
17#include <asm/auxio.h> 18#include <asm/auxio.h>
18 19
19/* This cannot be static, as it is referenced in irq.c */
20void __iomem *auxio_register = NULL; 20void __iomem *auxio_register = NULL;
21EXPORT_SYMBOL(auxio_register);
21 22
22enum auxio_type { 23enum auxio_type {
23 AUXIO_TYPE_NODEV, 24 AUXIO_TYPE_NODEV,
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 31e0fbb0d8..cc89b06d01 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -563,67 +563,6 @@ void handler_irq(int irq, struct pt_regs *regs)
563 irq_exit(); 563 irq_exit();
564} 564}
565 565
566#ifdef CONFIG_BLK_DEV_FD
567extern irqreturn_t floppy_interrupt(int, void *, struct pt_regs *);
568
569/* XXX No easy way to include asm/floppy.h XXX */
570extern unsigned char *pdma_vaddr;
571extern unsigned long pdma_size;
572extern volatile int doing_pdma;
573extern unsigned long fdc_status;
574
575irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
576{
577 if (likely(doing_pdma)) {
578 void __iomem *stat = (void __iomem *) fdc_status;
579 unsigned char *vaddr = pdma_vaddr;
580 unsigned long size = pdma_size;
581 u8 val;
582
583 while (size) {
584 val = readb(stat);
585 if (unlikely(!(val & 0x80))) {
586 pdma_vaddr = vaddr;
587 pdma_size = size;
588 return IRQ_HANDLED;
589 }
590 if (unlikely(!(val & 0x20))) {
591 pdma_vaddr = vaddr;
592 pdma_size = size;
593 doing_pdma = 0;
594 goto main_interrupt;
595 }
596 if (val & 0x40) {
597 /* read */
598 *vaddr++ = readb(stat + 1);
599 } else {
600 unsigned char data = *vaddr++;
601
602 /* write */
603 writeb(data, stat + 1);
604 }
605 size--;
606 }
607
608 pdma_vaddr = vaddr;
609 pdma_size = size;
610
611 /* Send Terminal Count pulse to floppy controller. */
612 val = readb(auxio_register);
613 val |= AUXIO_AUX1_FTCNT;
614 writeb(val, auxio_register);
615 val &= ~AUXIO_AUX1_FTCNT;
616 writeb(val, auxio_register);
617
618 doing_pdma = 0;
619 }
620
621main_interrupt:
622 return floppy_interrupt(irq, dev_cookie, regs);
623}
624EXPORT_SYMBOL(sparc_floppy_irq);
625#endif
626
627struct sun5_timer { 566struct sun5_timer {
628 u64 count0; 567 u64 count0;
629 u64 limit0; 568 u64 limit0;
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index 566aa343aa..768475bbce 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -138,6 +138,7 @@ struct bus_type isa_bus_type = {
138 .suspend = of_device_suspend, 138 .suspend = of_device_suspend,
139 .resume = of_device_resume, 139 .resume = of_device_resume,
140}; 140};
141EXPORT_SYMBOL(isa_bus_type);
141 142
142struct bus_type ebus_bus_type = { 143struct bus_type ebus_bus_type = {
143 .name = "ebus", 144 .name = "ebus",
@@ -147,6 +148,7 @@ struct bus_type ebus_bus_type = {
147 .suspend = of_device_suspend, 148 .suspend = of_device_suspend,
148 .resume = of_device_resume, 149 .resume = of_device_resume,
149}; 150};
151EXPORT_SYMBOL(ebus_bus_type);
150#endif 152#endif
151 153
152#ifdef CONFIG_SBUS 154#ifdef CONFIG_SBUS
@@ -158,6 +160,7 @@ struct bus_type sbus_bus_type = {
158 .suspend = of_device_suspend, 160 .suspend = of_device_suspend,
159 .resume = of_device_resume, 161 .resume = of_device_resume,
160}; 162};
163EXPORT_SYMBOL(sbus_bus_type);
161#endif 164#endif
162 165
163static int __init of_bus_driver_init(void) 166static int __init of_bus_driver_init(void)
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index e9d703eea8..8e87e7ea03 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -27,6 +27,11 @@
27 27
28static struct device_node *allnodes; 28static struct device_node *allnodes;
29 29
30/* use when traversing tree through the allnext, child, sibling,
31 * or parent members of struct device_node.
32 */
33static DEFINE_RWLOCK(devtree_lock);
34
30int of_device_is_compatible(struct device_node *device, const char *compat) 35int of_device_is_compatible(struct device_node *device, const char *compat)
31{ 36{
32 const char* cp; 37 const char* cp;
@@ -185,6 +190,54 @@ int of_getintprop_default(struct device_node *np, const char *name, int def)
185} 190}
186EXPORT_SYMBOL(of_getintprop_default); 191EXPORT_SYMBOL(of_getintprop_default);
187 192
193int of_set_property(struct device_node *dp, const char *name, void *val, int len)
194{
195 struct property **prevp;
196 void *new_val;
197 int err;
198
199 new_val = kmalloc(len, GFP_KERNEL);
200 if (!new_val)
201 return -ENOMEM;
202
203 memcpy(new_val, val, len);
204
205 err = -ENODEV;
206
207 write_lock(&devtree_lock);
208 prevp = &dp->properties;
209 while (*prevp) {
210 struct property *prop = *prevp;
211
212 if (!strcmp(prop->name, name)) {
213 void *old_val = prop->value;
214 int ret;
215
216 ret = prom_setprop(dp->node, name, val, len);
217 err = -EINVAL;
218 if (ret >= 0) {
219 prop->value = new_val;
220 prop->length = len;
221
222 if (OF_IS_DYNAMIC(prop))
223 kfree(old_val);
224
225 OF_MARK_DYNAMIC(prop);
226
227 err = 0;
228 }
229 break;
230 }
231 prevp = &(*prevp)->next;
232 }
233 write_unlock(&devtree_lock);
234
235 /* XXX Upate procfs if necessary... */
236
237 return err;
238}
239EXPORT_SYMBOL(of_set_property);
240
188static unsigned int prom_early_allocated; 241static unsigned int prom_early_allocated;
189 242
190static void * __init prom_early_alloc(unsigned long size) 243static void * __init prom_early_alloc(unsigned long size)
@@ -531,7 +584,9 @@ static char * __init build_full_name(struct device_node *dp)
531 return n; 584 return n;
532} 585}
533 586
534static struct property * __init build_one_prop(phandle node, char *prev) 587static unsigned int unique_id;
588
589static struct property * __init build_one_prop(phandle node, char *prev, char *special_name, void *special_val, int special_len)
535{ 590{
536 static struct property *tmp = NULL; 591 static struct property *tmp = NULL;
537 struct property *p; 592 struct property *p;
@@ -540,25 +595,35 @@ static struct property * __init build_one_prop(phandle node, char *prev)
540 p = tmp; 595 p = tmp;
541 memset(p, 0, sizeof(*p) + 32); 596 memset(p, 0, sizeof(*p) + 32);
542 tmp = NULL; 597 tmp = NULL;
543 } else 598 } else {
544 p = prom_early_alloc(sizeof(struct property) + 32); 599 p = prom_early_alloc(sizeof(struct property) + 32);
600 p->unique_id = unique_id++;
601 }
545 602
546 p->name = (char *) (p + 1); 603 p->name = (char *) (p + 1);
547 if (prev == NULL) { 604 if (special_name) {
548 prom_firstprop(node, p->name); 605 strcpy(p->name, special_name);
606 p->length = special_len;
607 p->value = prom_early_alloc(special_len);
608 memcpy(p->value, special_val, special_len);
549 } else { 609 } else {
550 prom_nextprop(node, prev, p->name); 610 if (prev == NULL) {
551 } 611 prom_firstprop(node, p->name);
552 if (strlen(p->name) == 0) { 612 } else {
553 tmp = p; 613 prom_nextprop(node, prev, p->name);
554 return NULL; 614 }
555 } 615 if (strlen(p->name) == 0) {
556 p->length = prom_getproplen(node, p->name); 616 tmp = p;
557 if (p->length <= 0) { 617 return NULL;
558 p->length = 0; 618 }
559 } else { 619 p->length = prom_getproplen(node, p->name);
560 p->value = prom_early_alloc(p->length); 620 if (p->length <= 0) {
561 prom_getproperty(node, p->name, p->value, p->length); 621 p->length = 0;
622 } else {
623 p->value = prom_early_alloc(p->length + 1);
624 prom_getproperty(node, p->name, p->value, p->length);
625 ((unsigned char *)p->value)[p->length] = '\0';
626 }
562 } 627 }
563 return p; 628 return p;
564} 629}
@@ -567,9 +632,14 @@ static struct property * __init build_prop_list(phandle node)
567{ 632{
568 struct property *head, *tail; 633 struct property *head, *tail;
569 634
570 head = tail = build_one_prop(node, NULL); 635 head = tail = build_one_prop(node, NULL,
636 ".node", &node, sizeof(node));
637
638 tail->next = build_one_prop(node, NULL, NULL, NULL, 0);
639 tail = tail->next;
571 while(tail) { 640 while(tail) {
572 tail->next = build_one_prop(node, tail->name); 641 tail->next = build_one_prop(node, tail->name,
642 NULL, NULL, 0);
573 tail = tail->next; 643 tail = tail->next;
574 } 644 }
575 645
@@ -598,6 +668,7 @@ static struct device_node * __init create_node(phandle node)
598 return NULL; 668 return NULL;
599 669
600 dp = prom_early_alloc(sizeof(*dp)); 670 dp = prom_early_alloc(sizeof(*dp));
671 dp->unique_id = unique_id++;
601 672
602 kref_init(&dp->kref); 673 kref_init(&dp->kref);
603 674
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index a6a7d81683..116d963200 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -537,7 +537,7 @@ static int __init topology_init(void)
537 for_each_possible_cpu(i) { 537 for_each_possible_cpu(i) {
538 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); 538 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
539 if (p) { 539 if (p) {
540 register_cpu(p, i, NULL); 540 register_cpu(p, i);
541 err = 0; 541 err = 0;
542 } 542 }
543 } 543 }
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 6e002aacb9..1605967cce 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -31,6 +31,40 @@
31#include <asm/kdebug.h> 31#include <asm/kdebug.h>
32#include <asm/mmu_context.h> 32#include <asm/mmu_context.h>
33 33
34#ifdef CONFIG_KPROBES
35ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
36
37/* Hook to register for page fault notifications */
38int register_page_fault_notifier(struct notifier_block *nb)
39{
40 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
41}
42
43int unregister_page_fault_notifier(struct notifier_block *nb)
44{
45 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
46}
47
48static inline int notify_page_fault(enum die_val val, const char *str,
49 struct pt_regs *regs, long err, int trap, int sig)
50{
51 struct die_args args = {
52 .regs = regs,
53 .str = str,
54 .err = err,
55 .trapnr = trap,
56 .signr = sig
57 };
58 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
59}
60#else
61static inline int notify_page_fault(enum die_val val, const char *str,
62 struct pt_regs *regs, long err, int trap, int sig)
63{
64 return NOTIFY_DONE;
65}
66#endif
67
34/* 68/*
35 * To debug kernel to catch accesses to certain virtual/physical addresses. 69 * To debug kernel to catch accesses to certain virtual/physical addresses.
36 * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints. 70 * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints.
@@ -263,7 +297,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
263 297
264 fault_code = get_thread_fault_code(); 298 fault_code = get_thread_fault_code();
265 299
266 if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, 300 if (notify_page_fault(DIE_PAGE_FAULT, "page_fault", regs,
267 fault_code, 0, SIGSEGV) == NOTIFY_STOP) 301 fault_code, 0, SIGSEGV) == NOTIFY_STOP)
268 return; 302 return;
269 303
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 5139934147..cb75a27adb 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -18,6 +18,7 @@
18#include <linux/initrd.h> 18#include <linux/initrd.h>
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/pagemap.h> 20#include <linux/pagemap.h>
21#include <linux/poison.h>
21#include <linux/fs.h> 22#include <linux/fs.h>
22#include <linux/seq_file.h> 23#include <linux/seq_file.h>
23#include <linux/kprobes.h> 24#include <linux/kprobes.h>
@@ -1520,7 +1521,7 @@ void free_initmem(void)
1520 page = (addr + 1521 page = (addr +
1521 ((unsigned long) __va(kern_base)) - 1522 ((unsigned long) __va(kern_base)) -
1522 ((unsigned long) KERNBASE)); 1523 ((unsigned long) KERNBASE));
1523 memset((void *)addr, 0xcc, PAGE_SIZE); 1524 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1524 p = virt_to_page(page); 1525 p = virt_to_page(page);
1525 1526
1526 ClearPageReserved(p); 1527 ClearPageReserved(p);
@@ -1568,6 +1569,7 @@ pgprot_t PAGE_EXEC __read_mostly;
1568unsigned long pg_iobits __read_mostly; 1569unsigned long pg_iobits __read_mostly;
1569 1570
1570unsigned long _PAGE_IE __read_mostly; 1571unsigned long _PAGE_IE __read_mostly;
1572EXPORT_SYMBOL(_PAGE_IE);
1571 1573
1572unsigned long _PAGE_E __read_mostly; 1574unsigned long _PAGE_E __read_mostly;
1573EXPORT_SYMBOL(_PAGE_E); 1575EXPORT_SYMBOL(_PAGE_E);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 0897852b09..290cec6d69 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1222,7 +1222,7 @@ int open_ubd_file(char *file, struct openflags *openflags, int shared,
1222 } 1222 }
1223 } 1223 }
1224 1224
1225 /* Succesful return case! */ 1225 /* Successful return case! */
1226 if(backing_file_out == NULL) 1226 if(backing_file_out == NULL)
1227 return(fd); 1227 return(fd);
1228 1228
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index af44130f0d..ccc4a7fb97 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -386,24 +386,45 @@ config HPET_EMULATE_RTC
386 bool "Provide RTC interrupt" 386 bool "Provide RTC interrupt"
387 depends on HPET_TIMER && RTC=y 387 depends on HPET_TIMER && RTC=y
388 388
389config GART_IOMMU 389# Mark as embedded because too many people got it wrong.
390 bool "K8 GART IOMMU support" 390# The code disables itself when not needed.
391config IOMMU
392 bool "IOMMU support" if EMBEDDED
391 default y 393 default y
392 select SWIOTLB 394 select SWIOTLB
393 select AGP 395 select AGP
394 depends on PCI 396 depends on PCI
395 help 397 help
396 Support for hardware IOMMU in AMD's Opteron/Athlon64 Processors 398 Support for full DMA access of devices with 32bit memory access only
397 and for the bounce buffering software IOMMU. 399 on systems with more than 3GB. This is usually needed for USB,
398 Needed to run systems with more than 3GB of memory properly with 400 sound, many IDE/SATA chipsets and some other devices.
399 32-bit PCI devices that do not support DAC (Double Address Cycle). 401 Provides a driver for the AMD Athlon64/Opteron/Turion/Sempron GART
400 The IOMMU can be turned off at runtime with the iommu=off parameter. 402 based IOMMU and a software bounce buffer based IOMMU used on Intel
401 Normally the kernel will take the right choice by itself. 403 systems and as fallback.
402 This option includes a driver for the AMD Opteron/Athlon64 IOMMU 404 The code is only active when needed (enough memory and limited
403 northbridge and a software emulation used on other systems without 405 device) unless CONFIG_IOMMU_DEBUG or iommu=force is specified
404 hardware IOMMU. If unsure, say Y. 406 too.
405 407
406# need this always selected by GART_IOMMU for the VIA workaround 408config CALGARY_IOMMU
409 bool "IBM Calgary IOMMU support"
410 default y
411 select SWIOTLB
412 depends on PCI && EXPERIMENTAL
413 help
414 Support for hardware IOMMUs in IBM's xSeries x366 and x460
415 systems. Needed to run systems with more than 3GB of memory
416 properly with 32-bit PCI devices that do not support DAC
417 (Double Address Cycle). Calgary also supports bus level
418 isolation, where all DMAs pass through the IOMMU. This
419 prevents them from going anywhere except their intended
420 destination. This catches hard-to-find kernel bugs and
421 mis-behaving drivers and devices that do not use the DMA-API
422 properly to set up their DMA buffers. The IOMMU can be
423 turned off at boot time with the iommu=off parameter.
424 Normally the kernel will make the right choice by itself.
425 If unsure, say Y.
426
427# need this always selected by IOMMU for the VIA workaround
407config SWIOTLB 428config SWIOTLB
408 bool 429 bool
409 430
@@ -501,6 +522,10 @@ config REORDER
501 optimal TLB usage. If you have pretty much any version of binutils, 522 optimal TLB usage. If you have pretty much any version of binutils,
502 this can increase your kernel build time by roughly one minute. 523 this can increase your kernel build time by roughly one minute.
503 524
525config K8_NB
526 def_bool y
527 depends on AGP_AMD64 || IOMMU || (PCI && NUMA)
528
504endmenu 529endmenu
505 530
506# 531#
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug
index ea31b4c621..1d92ab56c0 100644
--- a/arch/x86_64/Kconfig.debug
+++ b/arch/x86_64/Kconfig.debug
@@ -13,7 +13,7 @@ config DEBUG_RODATA
13 If in doubt, say "N". 13 If in doubt, say "N".
14 14
15config IOMMU_DEBUG 15config IOMMU_DEBUG
16 depends on GART_IOMMU && DEBUG_KERNEL 16 depends on IOMMU && DEBUG_KERNEL
17 bool "Enable IOMMU debugging" 17 bool "Enable IOMMU debugging"
18 help 18 help
19 Force the IOMMU to on even when you have less than 4GB of 19 Force the IOMMU to on even when you have less than 4GB of
@@ -35,6 +35,22 @@ config IOMMU_LEAK
35 Add a simple leak tracer to the IOMMU code. This is useful when you 35 Add a simple leak tracer to the IOMMU code. This is useful when you
36 are debugging a buggy device driver that leaks IOMMU mappings. 36 are debugging a buggy device driver that leaks IOMMU mappings.
37 37
38config DEBUG_STACKOVERFLOW
39 bool "Check for stack overflows"
40 depends on DEBUG_KERNEL
41 help
42 This option will cause messages to be printed if free stack space
43 drops below a certain limit.
44
45config DEBUG_STACK_USAGE
46 bool "Stack utilization instrumentation"
47 depends on DEBUG_KERNEL
48 help
49 Enables the display of the minimum amount of free stack which each
50 task has ever had available in the sysrq-T and sysrq-P debug output.
51
52 This option will slow down process creation somewhat.
53
38#config X86_REMOTE_DEBUG 54#config X86_REMOTE_DEBUG
39# bool "kgdb debugging stub" 55# bool "kgdb debugging stub"
40 56
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index e573e2ab55..431bb4bc36 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -27,6 +27,7 @@ LDFLAGS_vmlinux :=
27CHECKFLAGS += -D__x86_64__ -m64 27CHECKFLAGS += -D__x86_64__ -m64
28 28
29cflags-y := 29cflags-y :=
30cflags-kernel-y :=
30cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8) 31cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
31cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona) 32cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
32cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic) 33cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
@@ -35,7 +36,7 @@ cflags-y += -m64
35cflags-y += -mno-red-zone 36cflags-y += -mno-red-zone
36cflags-y += -mcmodel=kernel 37cflags-y += -mcmodel=kernel
37cflags-y += -pipe 38cflags-y += -pipe
38cflags-$(CONFIG_REORDER) += -ffunction-sections 39cflags-kernel-$(CONFIG_REORDER) += -ffunction-sections
39# this makes reading assembly source easier, but produces worse code 40# this makes reading assembly source easier, but produces worse code
40# actually it makes the kernel smaller too. 41# actually it makes the kernel smaller too.
41cflags-y += -fno-reorder-blocks 42cflags-y += -fno-reorder-blocks
@@ -55,6 +56,7 @@ cflags-y += $(call cc-option,-funit-at-a-time)
55cflags-y += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) 56cflags-y += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
56 57
57CFLAGS += $(cflags-y) 58CFLAGS += $(cflags-y)
59CFLAGS_KERNEL += $(cflags-kernel-y)
58AFLAGS += -m64 60AFLAGS += -m64
59 61
60head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o 62head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o
diff --git a/arch/x86_64/boot/Makefile b/arch/x86_64/boot/Makefile
index 43ee6c50c2..deb063e776 100644
--- a/arch/x86_64/boot/Makefile
+++ b/arch/x86_64/boot/Makefile
@@ -107,8 +107,13 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
107isoimage: $(BOOTIMAGE) 107isoimage: $(BOOTIMAGE)
108 -rm -rf $(obj)/isoimage 108 -rm -rf $(obj)/isoimage
109 mkdir $(obj)/isoimage 109 mkdir $(obj)/isoimage
110 cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \ 110 for i in lib lib64 share end ; do \
111 $(obj)/isoimage 111 if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
112 cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
113 break ; \
114 fi ; \
115 if [ $$i = end ] ; then exit 1 ; fi ; \
116 done
112 cp $(BOOTIMAGE) $(obj)/isoimage/linux 117 cp $(BOOTIMAGE) $(obj)/isoimage/linux
113 echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg 118 echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
114 if [ -f '$(FDINITRD)' ] ; then \ 119 if [ -f '$(FDINITRD)' ] ; then \
diff --git a/arch/x86_64/boot/compressed/misc.c b/arch/x86_64/boot/compressed/misc.c
index cf4b88c416..3755b2e394 100644
--- a/arch/x86_64/boot/compressed/misc.c
+++ b/arch/x86_64/boot/compressed/misc.c
@@ -77,11 +77,11 @@ static void gzip_release(void **);
77 */ 77 */
78static unsigned char *real_mode; /* Pointer to real-mode data */ 78static unsigned char *real_mode; /* Pointer to real-mode data */
79 79
80#define EXT_MEM_K (*(unsigned short *)(real_mode + 0x2)) 80#define RM_EXT_MEM_K (*(unsigned short *)(real_mode + 0x2))
81#ifndef STANDARD_MEMORY_BIOS_CALL 81#ifndef STANDARD_MEMORY_BIOS_CALL
82#define ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0)) 82#define RM_ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0))
83#endif 83#endif
84#define SCREEN_INFO (*(struct screen_info *)(real_mode+0)) 84#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
85 85
86extern unsigned char input_data[]; 86extern unsigned char input_data[];
87extern int input_len; 87extern int input_len;
@@ -92,9 +92,9 @@ static unsigned long output_ptr = 0;
92 92
93static void *malloc(int size); 93static void *malloc(int size);
94static void free(void *where); 94static void free(void *where);
95 95
96void* memset(void* s, int c, unsigned n); 96static void *memset(void *s, int c, unsigned n);
97void* memcpy(void* dest, const void* src, unsigned n); 97static void *memcpy(void *dest, const void *src, unsigned n);
98 98
99static void putstr(const char *); 99static void putstr(const char *);
100 100
@@ -162,8 +162,8 @@ static void putstr(const char *s)
162 int x,y,pos; 162 int x,y,pos;
163 char c; 163 char c;
164 164
165 x = SCREEN_INFO.orig_x; 165 x = RM_SCREEN_INFO.orig_x;
166 y = SCREEN_INFO.orig_y; 166 y = RM_SCREEN_INFO.orig_y;
167 167
168 while ( ( c = *s++ ) != '\0' ) { 168 while ( ( c = *s++ ) != '\0' ) {
169 if ( c == '\n' ) { 169 if ( c == '\n' ) {
@@ -184,8 +184,8 @@ static void putstr(const char *s)
184 } 184 }
185 } 185 }
186 186
187 SCREEN_INFO.orig_x = x; 187 RM_SCREEN_INFO.orig_x = x;
188 SCREEN_INFO.orig_y = y; 188 RM_SCREEN_INFO.orig_y = y;
189 189
190 pos = (x + cols * y) * 2; /* Update cursor position */ 190 pos = (x + cols * y) * 2; /* Update cursor position */
191 outb_p(14, vidport); 191 outb_p(14, vidport);
@@ -194,7 +194,7 @@ static void putstr(const char *s)
194 outb_p(0xff & (pos >> 1), vidport+1); 194 outb_p(0xff & (pos >> 1), vidport+1);
195} 195}
196 196
197void* memset(void* s, int c, unsigned n) 197static void* memset(void* s, int c, unsigned n)
198{ 198{
199 int i; 199 int i;
200 char *ss = (char*)s; 200 char *ss = (char*)s;
@@ -203,7 +203,7 @@ void* memset(void* s, int c, unsigned n)
203 return s; 203 return s;
204} 204}
205 205
206void* memcpy(void* dest, const void* src, unsigned n) 206static void* memcpy(void* dest, const void* src, unsigned n)
207{ 207{
208 int i; 208 int i;
209 char *d = (char *)dest, *s = (char *)src; 209 char *d = (char *)dest, *s = (char *)src;
@@ -278,15 +278,15 @@ static void error(char *x)
278 putstr(x); 278 putstr(x);
279 putstr("\n\n -- System halted"); 279 putstr("\n\n -- System halted");
280 280
281 while(1); 281 while(1); /* Halt */
282} 282}
283 283
284void setup_normal_output_buffer(void) 284static void setup_normal_output_buffer(void)
285{ 285{
286#ifdef STANDARD_MEMORY_BIOS_CALL 286#ifdef STANDARD_MEMORY_BIOS_CALL
287 if (EXT_MEM_K < 1024) error("Less than 2MB of memory"); 287 if (RM_EXT_MEM_K < 1024) error("Less than 2MB of memory");
288#else 288#else
289 if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < 1024) error("Less than 2MB of memory"); 289 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
290#endif 290#endif
291 output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */ 291 output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */
292 free_mem_end_ptr = (long)real_mode; 292 free_mem_end_ptr = (long)real_mode;
@@ -297,13 +297,13 @@ struct moveparams {
297 uch *high_buffer_start; int hcount; 297 uch *high_buffer_start; int hcount;
298}; 298};
299 299
300void setup_output_buffer_if_we_run_high(struct moveparams *mv) 300static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
301{ 301{
302 high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE); 302 high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE);
303#ifdef STANDARD_MEMORY_BIOS_CALL 303#ifdef STANDARD_MEMORY_BIOS_CALL
304 if (EXT_MEM_K < (3*1024)) error("Less than 4MB of memory"); 304 if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
305#else 305#else
306 if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory"); 306 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
307#endif 307#endif
308 mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START; 308 mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START;
309 low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX 309 low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
@@ -319,7 +319,7 @@ void setup_output_buffer_if_we_run_high(struct moveparams *mv)
319 mv->high_buffer_start = high_buffer_start; 319 mv->high_buffer_start = high_buffer_start;
320} 320}
321 321
322void close_output_buffer_if_we_run_high(struct moveparams *mv) 322static void close_output_buffer_if_we_run_high(struct moveparams *mv)
323{ 323{
324 if (bytes_out > low_buffer_size) { 324 if (bytes_out > low_buffer_size) {
325 mv->lcount = low_buffer_size; 325 mv->lcount = low_buffer_size;
@@ -335,7 +335,7 @@ int decompress_kernel(struct moveparams *mv, void *rmode)
335{ 335{
336 real_mode = rmode; 336 real_mode = rmode;
337 337
338 if (SCREEN_INFO.orig_video_mode == 7) { 338 if (RM_SCREEN_INFO.orig_video_mode == 7) {
339 vidmem = (char *) 0xb0000; 339 vidmem = (char *) 0xb0000;
340 vidport = 0x3b4; 340 vidport = 0x3b4;
341 } else { 341 } else {
@@ -343,8 +343,8 @@ int decompress_kernel(struct moveparams *mv, void *rmode)
343 vidport = 0x3d4; 343 vidport = 0x3d4;
344 } 344 }
345 345
346 lines = SCREEN_INFO.orig_video_lines; 346 lines = RM_SCREEN_INFO.orig_video_lines;
347 cols = SCREEN_INFO.orig_video_cols; 347 cols = RM_SCREEN_INFO.orig_video_cols;
348 348
349 if (free_mem_ptr < 0x100000) setup_normal_output_buffer(); 349 if (free_mem_ptr < 0x100000) setup_normal_output_buffer();
350 else setup_output_buffer_if_we_run_high(mv); 350 else setup_output_buffer_if_we_run_high(mv);
diff --git a/arch/x86_64/boot/tools/build.c b/arch/x86_64/boot/tools/build.c
index c44f5e2ec1..eae8669170 100644
--- a/arch/x86_64/boot/tools/build.c
+++ b/arch/x86_64/boot/tools/build.c
@@ -149,10 +149,8 @@ int main(int argc, char ** argv)
149 sz = sb.st_size; 149 sz = sb.st_size;
150 fprintf (stderr, "System is %d kB\n", sz/1024); 150 fprintf (stderr, "System is %d kB\n", sz/1024);
151 sys_size = (sz + 15) / 16; 151 sys_size = (sz + 15) / 16;
152 /* 0x40000*16 = 4.0 MB, reasonable estimate for the current maximum */ 152 if (!is_big_kernel && sys_size > DEF_SYSSIZE)
153 if (sys_size > (is_big_kernel ? 0x40000 : DEF_SYSSIZE)) 153 die("System is too big. Try using bzImage or modules.");
154 die("System is too big. Try using %smodules.",
155 is_big_kernel ? "" : "bzImage or ");
156 while (sz > 0) { 154 while (sz > 0) {
157 int l, n; 155 int l, n;
158 156
diff --git a/arch/x86_64/boot/video.S b/arch/x86_64/boot/video.S
index 32327bb37a..2aa565c136 100644
--- a/arch/x86_64/boot/video.S
+++ b/arch/x86_64/boot/video.S
@@ -1929,6 +1929,7 @@ skip10: movb %ah, %al
1929 ret 1929 ret
1930 1930
1931store_edid: 1931store_edid:
1932#ifdef CONFIG_FIRMWARE_EDID
1932 pushw %es # just save all registers 1933 pushw %es # just save all registers
1933 pushw %ax 1934 pushw %ax
1934 pushw %bx 1935 pushw %bx
@@ -1946,6 +1947,22 @@ store_edid:
1946 rep 1947 rep
1947 stosl 1948 stosl
1948 1949
1950 pushw %es # save ES
1951 xorw %di, %di # Report Capability
1952 pushw %di
1953 popw %es # ES:DI must be 0:0
1954 movw $0x4f15, %ax
1955 xorw %bx, %bx
1956 xorw %cx, %cx
1957 int $0x10
1958 popw %es # restore ES
1959
1960 cmpb $0x00, %ah # call successful
1961 jne no_edid
1962
1963 cmpb $0x4f, %al # function supported
1964 jne no_edid
1965
1949 movw $0x4f15, %ax # do VBE/DDC 1966 movw $0x4f15, %ax # do VBE/DDC
1950 movw $0x01, %bx 1967 movw $0x01, %bx
1951 movw $0x00, %cx 1968 movw $0x00, %cx
@@ -1953,12 +1970,14 @@ store_edid:
1953 movw $0x140, %di 1970 movw $0x140, %di
1954 int $0x10 1971 int $0x10
1955 1972
1973no_edid:
1956 popw %di # restore all registers 1974 popw %di # restore all registers
1957 popw %dx 1975 popw %dx
1958 popw %cx 1976 popw %cx
1959 popw %bx 1977 popw %bx
1960 popw %ax 1978 popw %ax
1961 popw %es 1979 popw %es
1980#endif
1962 ret 1981 ret
1963 1982
1964# VIDEO_SELECT-only variables 1983# VIDEO_SELECT-only variables
diff --git a/arch/x86_64/crypto/aes-x86_64-asm.S b/arch/x86_64/crypto/aes-x86_64-asm.S
index 483cbb23ab..26b40de4d0 100644
--- a/arch/x86_64/crypto/aes-x86_64-asm.S
+++ b/arch/x86_64/crypto/aes-x86_64-asm.S
@@ -15,6 +15,10 @@
15 15
16.text 16.text
17 17
18#include <asm/asm-offsets.h>
19
20#define BASE crypto_tfm_ctx_offset
21
18#define R1 %rax 22#define R1 %rax
19#define R1E %eax 23#define R1E %eax
20#define R1X %ax 24#define R1X %ax
@@ -46,19 +50,19 @@
46#define R10 %r10 50#define R10 %r10
47#define R11 %r11 51#define R11 %r11
48 52
49#define prologue(FUNC,BASE,B128,B192,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11) \ 53#define prologue(FUNC,KEY,B128,B192,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11) \
50 .global FUNC; \ 54 .global FUNC; \
51 .type FUNC,@function; \ 55 .type FUNC,@function; \
52 .align 8; \ 56 .align 8; \
53FUNC: movq r1,r2; \ 57FUNC: movq r1,r2; \
54 movq r3,r4; \ 58 movq r3,r4; \
55 leaq BASE+52(r8),r9; \ 59 leaq BASE+KEY+52(r8),r9; \
56 movq r10,r11; \ 60 movq r10,r11; \
57 movl (r7),r5 ## E; \ 61 movl (r7),r5 ## E; \
58 movl 4(r7),r1 ## E; \ 62 movl 4(r7),r1 ## E; \
59 movl 8(r7),r6 ## E; \ 63 movl 8(r7),r6 ## E; \
60 movl 12(r7),r7 ## E; \ 64 movl 12(r7),r7 ## E; \
61 movl (r8),r10 ## E; \ 65 movl BASE(r8),r10 ## E; \
62 xorl -48(r9),r5 ## E; \ 66 xorl -48(r9),r5 ## E; \
63 xorl -44(r9),r1 ## E; \ 67 xorl -44(r9),r1 ## E; \
64 xorl -40(r9),r6 ## E; \ 68 xorl -40(r9),r6 ## E; \
@@ -128,8 +132,8 @@ FUNC: movq r1,r2; \
128 movl r3 ## E,r1 ## E; \ 132 movl r3 ## E,r1 ## E; \
129 movl r4 ## E,r2 ## E; 133 movl r4 ## E,r2 ## E;
130 134
131#define entry(FUNC,BASE,B128,B192) \ 135#define entry(FUNC,KEY,B128,B192) \
132 prologue(FUNC,BASE,B128,B192,R2,R8,R7,R9,R1,R3,R4,R6,R10,R5,R11) 136 prologue(FUNC,KEY,B128,B192,R2,R8,R7,R9,R1,R3,R4,R6,R10,R5,R11)
133 137
134#define return epilogue(R8,R2,R9,R7,R5,R6,R3,R4,R11) 138#define return epilogue(R8,R2,R9,R7,R5,R6,R3,R4,R11)
135 139
@@ -147,9 +151,9 @@ FUNC: movq r1,r2; \
147#define decrypt_final(TAB,OFFSET) \ 151#define decrypt_final(TAB,OFFSET) \
148 round(TAB,OFFSET,R2,R1,R4,R3,R6,R5,R7,R10,R5,R6,R3,R4) 152 round(TAB,OFFSET,R2,R1,R4,R3,R6,R5,R7,R10,R5,R6,R3,R4)
149 153
150/* void aes_encrypt(void *ctx, u8 *out, const u8 *in) */ 154/* void aes_enc_blk(stuct crypto_tfm *tfm, u8 *out, const u8 *in) */
151 155
152 entry(aes_encrypt,0,enc128,enc192) 156 entry(aes_enc_blk,0,enc128,enc192)
153 encrypt_round(aes_ft_tab,-96) 157 encrypt_round(aes_ft_tab,-96)
154 encrypt_round(aes_ft_tab,-80) 158 encrypt_round(aes_ft_tab,-80)
155enc192: encrypt_round(aes_ft_tab,-64) 159enc192: encrypt_round(aes_ft_tab,-64)
@@ -166,9 +170,9 @@ enc128: encrypt_round(aes_ft_tab,-32)
166 encrypt_final(aes_fl_tab,112) 170 encrypt_final(aes_fl_tab,112)
167 return 171 return
168 172
169/* void aes_decrypt(void *ctx, u8 *out, const u8 *in) */ 173/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in) */
170 174
171 entry(aes_decrypt,240,dec128,dec192) 175 entry(aes_dec_blk,240,dec128,dec192)
172 decrypt_round(aes_it_tab,-96) 176 decrypt_round(aes_it_tab,-96)
173 decrypt_round(aes_it_tab,-80) 177 decrypt_round(aes_it_tab,-80)
174dec192: decrypt_round(aes_it_tab,-64) 178dec192: decrypt_round(aes_it_tab,-64)
diff --git a/arch/x86_64/crypto/aes.c b/arch/x86_64/crypto/aes.c
index 6f77e7700d..68866fab37 100644
--- a/arch/x86_64/crypto/aes.c
+++ b/arch/x86_64/crypto/aes.c
@@ -227,10 +227,10 @@ static void __init gen_tabs(void)
227 t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \ 227 t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \
228} 228}
229 229
230static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, 230static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
231 u32 *flags) 231 unsigned int key_len, u32 *flags)
232{ 232{
233 struct aes_ctx *ctx = ctx_arg; 233 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
234 const __le32 *key = (const __le32 *)in_key; 234 const __le32 *key = (const __le32 *)in_key;
235 u32 i, j, t, u, v, w; 235 u32 i, j, t, u, v, w;
236 236
@@ -283,8 +283,18 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len,
283 return 0; 283 return 0;
284} 284}
285 285
286extern void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in); 286asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
287extern void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in); 287asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
288
289static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
290{
291 aes_enc_blk(tfm, dst, src);
292}
293
294static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
295{
296 aes_dec_blk(tfm, dst, src);
297}
288 298
289static struct crypto_alg aes_alg = { 299static struct crypto_alg aes_alg = {
290 .cra_name = "aes", 300 .cra_name = "aes",
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 69db0c0721..e69d403949 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17-rc1-git11 3# Linux kernel version: 2.6.17-git6
4# Sun Apr 16 07:22:36 2006 4# Sat Jun 24 00:52:28 2006
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
@@ -42,7 +42,6 @@ CONFIG_IKCONFIG_PROC=y
42# CONFIG_RELAY is not set 42# CONFIG_RELAY is not set
43CONFIG_INITRAMFS_SOURCE="" 43CONFIG_INITRAMFS_SOURCE=""
44CONFIG_UID16=y 44CONFIG_UID16=y
45CONFIG_VM86=y
46CONFIG_CC_OPTIMIZE_FOR_SIZE=y 45CONFIG_CC_OPTIMIZE_FOR_SIZE=y
47# CONFIG_EMBEDDED is not set 46# CONFIG_EMBEDDED is not set
48CONFIG_KALLSYMS=y 47CONFIG_KALLSYMS=y
@@ -57,7 +56,6 @@ CONFIG_FUTEX=y
57CONFIG_EPOLL=y 56CONFIG_EPOLL=y
58CONFIG_SHMEM=y 57CONFIG_SHMEM=y
59CONFIG_SLAB=y 58CONFIG_SLAB=y
60CONFIG_DOUBLEFAULT=y
61# CONFIG_TINY_SHMEM is not set 59# CONFIG_TINY_SHMEM is not set
62CONFIG_BASE_SMALL=0 60CONFIG_BASE_SMALL=0
63# CONFIG_SLOB is not set 61# CONFIG_SLOB is not set
@@ -144,7 +142,8 @@ CONFIG_NR_CPUS=32
144CONFIG_HOTPLUG_CPU=y 142CONFIG_HOTPLUG_CPU=y
145CONFIG_HPET_TIMER=y 143CONFIG_HPET_TIMER=y
146CONFIG_HPET_EMULATE_RTC=y 144CONFIG_HPET_EMULATE_RTC=y
147CONFIG_GART_IOMMU=y 145CONFIG_IOMMU=y
146# CONFIG_CALGARY_IOMMU is not set
148CONFIG_SWIOTLB=y 147CONFIG_SWIOTLB=y
149CONFIG_X86_MCE=y 148CONFIG_X86_MCE=y
150CONFIG_X86_MCE_INTEL=y 149CONFIG_X86_MCE_INTEL=y
@@ -158,6 +157,7 @@ CONFIG_HZ_250=y
158# CONFIG_HZ_1000 is not set 157# CONFIG_HZ_1000 is not set
159CONFIG_HZ=250 158CONFIG_HZ=250
160# CONFIG_REORDER is not set 159# CONFIG_REORDER is not set
160CONFIG_K8_NB=y
161CONFIG_GENERIC_HARDIRQS=y 161CONFIG_GENERIC_HARDIRQS=y
162CONFIG_GENERIC_IRQ_PROBE=y 162CONFIG_GENERIC_IRQ_PROBE=y
163CONFIG_ISA_DMA_API=y 163CONFIG_ISA_DMA_API=y
@@ -293,6 +293,8 @@ CONFIG_IP_PNP_DHCP=y
293# CONFIG_INET_IPCOMP is not set 293# CONFIG_INET_IPCOMP is not set
294# CONFIG_INET_XFRM_TUNNEL is not set 294# CONFIG_INET_XFRM_TUNNEL is not set
295# CONFIG_INET_TUNNEL is not set 295# CONFIG_INET_TUNNEL is not set
296# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
297# CONFIG_INET_XFRM_MODE_TUNNEL is not set
296CONFIG_INET_DIAG=y 298CONFIG_INET_DIAG=y
297CONFIG_INET_TCP_DIAG=y 299CONFIG_INET_TCP_DIAG=y
298# CONFIG_TCP_CONG_ADVANCED is not set 300# CONFIG_TCP_CONG_ADVANCED is not set
@@ -305,7 +307,10 @@ CONFIG_IPV6=y
305# CONFIG_INET6_IPCOMP is not set 307# CONFIG_INET6_IPCOMP is not set
306# CONFIG_INET6_XFRM_TUNNEL is not set 308# CONFIG_INET6_XFRM_TUNNEL is not set
307# CONFIG_INET6_TUNNEL is not set 309# CONFIG_INET6_TUNNEL is not set
310# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
311# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
308# CONFIG_IPV6_TUNNEL is not set 312# CONFIG_IPV6_TUNNEL is not set
313# CONFIG_NETWORK_SECMARK is not set
309# CONFIG_NETFILTER is not set 314# CONFIG_NETFILTER is not set
310 315
311# 316#
@@ -344,6 +349,7 @@ CONFIG_IPV6=y
344# Network testing 349# Network testing
345# 350#
346# CONFIG_NET_PKTGEN is not set 351# CONFIG_NET_PKTGEN is not set
352# CONFIG_NET_TCPPROBE is not set
347# CONFIG_HAMRADIO is not set 353# CONFIG_HAMRADIO is not set
348# CONFIG_IRDA is not set 354# CONFIG_IRDA is not set
349# CONFIG_BT is not set 355# CONFIG_BT is not set
@@ -360,6 +366,7 @@ CONFIG_STANDALONE=y
360CONFIG_PREVENT_FIRMWARE_BUILD=y 366CONFIG_PREVENT_FIRMWARE_BUILD=y
361CONFIG_FW_LOADER=y 367CONFIG_FW_LOADER=y
362# CONFIG_DEBUG_DRIVER is not set 368# CONFIG_DEBUG_DRIVER is not set
369# CONFIG_SYS_HYPERVISOR is not set
363 370
364# 371#
365# Connector - unified userspace <-> kernelspace linker 372# Connector - unified userspace <-> kernelspace linker
@@ -526,6 +533,7 @@ CONFIG_SCSI_ATA_PIIX=y
526# CONFIG_SCSI_SATA_MV is not set 533# CONFIG_SCSI_SATA_MV is not set
527CONFIG_SCSI_SATA_NV=y 534CONFIG_SCSI_SATA_NV=y
528# CONFIG_SCSI_PDC_ADMA is not set 535# CONFIG_SCSI_PDC_ADMA is not set
536# CONFIG_SCSI_HPTIOP is not set
529# CONFIG_SCSI_SATA_QSTOR is not set 537# CONFIG_SCSI_SATA_QSTOR is not set
530# CONFIG_SCSI_SATA_PROMISE is not set 538# CONFIG_SCSI_SATA_PROMISE is not set
531# CONFIG_SCSI_SATA_SX4 is not set 539# CONFIG_SCSI_SATA_SX4 is not set
@@ -591,10 +599,7 @@ CONFIG_IEEE1394=y
591# 599#
592# Device Drivers 600# Device Drivers
593# 601#
594 602# CONFIG_IEEE1394_PCILYNX is not set
595#
596# Texas Instruments PCILynx requires I2C
597#
598CONFIG_IEEE1394_OHCI1394=y 603CONFIG_IEEE1394_OHCI1394=y
599 604
600# 605#
@@ -645,7 +650,16 @@ CONFIG_VORTEX=y
645# 650#
646# Tulip family network device support 651# Tulip family network device support
647# 652#
648# CONFIG_NET_TULIP is not set 653CONFIG_NET_TULIP=y
654# CONFIG_DE2104X is not set
655CONFIG_TULIP=y
656# CONFIG_TULIP_MWI is not set
657# CONFIG_TULIP_MMIO is not set
658# CONFIG_TULIP_NAPI is not set
659# CONFIG_DE4X5 is not set
660# CONFIG_WINBOND_840 is not set
661# CONFIG_DM9102 is not set
662# CONFIG_ULI526X is not set
649# CONFIG_HP100 is not set 663# CONFIG_HP100 is not set
650CONFIG_NET_PCI=y 664CONFIG_NET_PCI=y
651# CONFIG_PCNET32 is not set 665# CONFIG_PCNET32 is not set
@@ -697,6 +711,7 @@ CONFIG_TIGON3=y
697# CONFIG_IXGB is not set 711# CONFIG_IXGB is not set
698CONFIG_S2IO=m 712CONFIG_S2IO=m
699# CONFIG_S2IO_NAPI is not set 713# CONFIG_S2IO_NAPI is not set
714# CONFIG_MYRI10GE is not set
700 715
701# 716#
702# Token Ring devices 717# Token Ring devices
@@ -887,7 +902,56 @@ CONFIG_HPET_MMAP=y
887# 902#
888# I2C support 903# I2C support
889# 904#
890# CONFIG_I2C is not set 905CONFIG_I2C=m
906CONFIG_I2C_CHARDEV=m
907
908#
909# I2C Algorithms
910#
911# CONFIG_I2C_ALGOBIT is not set
912# CONFIG_I2C_ALGOPCF is not set
913# CONFIG_I2C_ALGOPCA is not set
914
915#
916# I2C Hardware Bus support
917#
918# CONFIG_I2C_ALI1535 is not set
919# CONFIG_I2C_ALI1563 is not set
920# CONFIG_I2C_ALI15X3 is not set
921# CONFIG_I2C_AMD756 is not set
922# CONFIG_I2C_AMD8111 is not set
923# CONFIG_I2C_I801 is not set
924# CONFIG_I2C_I810 is not set
925# CONFIG_I2C_PIIX4 is not set
926CONFIG_I2C_ISA=m
927# CONFIG_I2C_NFORCE2 is not set
928# CONFIG_I2C_OCORES is not set
929# CONFIG_I2C_PARPORT_LIGHT is not set
930# CONFIG_I2C_PROSAVAGE is not set
931# CONFIG_I2C_SAVAGE4 is not set
932# CONFIG_I2C_SIS5595 is not set
933# CONFIG_I2C_SIS630 is not set
934# CONFIG_I2C_SIS96X is not set
935# CONFIG_I2C_STUB is not set
936# CONFIG_I2C_VIA is not set
937# CONFIG_I2C_VIAPRO is not set
938# CONFIG_I2C_VOODOO3 is not set
939# CONFIG_I2C_PCA_ISA is not set
940
941#
942# Miscellaneous I2C Chip support
943#
944# CONFIG_SENSORS_DS1337 is not set
945# CONFIG_SENSORS_DS1374 is not set
946# CONFIG_SENSORS_EEPROM is not set
947# CONFIG_SENSORS_PCF8574 is not set
948# CONFIG_SENSORS_PCA9539 is not set
949# CONFIG_SENSORS_PCF8591 is not set
950# CONFIG_SENSORS_MAX6875 is not set
951# CONFIG_I2C_DEBUG_CORE is not set
952# CONFIG_I2C_DEBUG_ALGO is not set
953# CONFIG_I2C_DEBUG_BUS is not set
954# CONFIG_I2C_DEBUG_CHIP is not set
891 955
892# 956#
893# SPI support 957# SPI support
@@ -898,14 +962,51 @@ CONFIG_HPET_MMAP=y
898# 962#
899# Dallas's 1-wire bus 963# Dallas's 1-wire bus
900# 964#
901# CONFIG_W1 is not set
902 965
903# 966#
904# Hardware Monitoring support 967# Hardware Monitoring support
905# 968#
906CONFIG_HWMON=y 969CONFIG_HWMON=y
907# CONFIG_HWMON_VID is not set 970# CONFIG_HWMON_VID is not set
971# CONFIG_SENSORS_ABITUGURU is not set
972# CONFIG_SENSORS_ADM1021 is not set
973# CONFIG_SENSORS_ADM1025 is not set
974# CONFIG_SENSORS_ADM1026 is not set
975# CONFIG_SENSORS_ADM1031 is not set
976# CONFIG_SENSORS_ADM9240 is not set
977# CONFIG_SENSORS_ASB100 is not set
978# CONFIG_SENSORS_ATXP1 is not set
979# CONFIG_SENSORS_DS1621 is not set
908# CONFIG_SENSORS_F71805F is not set 980# CONFIG_SENSORS_F71805F is not set
981# CONFIG_SENSORS_FSCHER is not set
982# CONFIG_SENSORS_FSCPOS is not set
983# CONFIG_SENSORS_GL518SM is not set
984# CONFIG_SENSORS_GL520SM is not set
985# CONFIG_SENSORS_IT87 is not set
986# CONFIG_SENSORS_LM63 is not set
987# CONFIG_SENSORS_LM75 is not set
988# CONFIG_SENSORS_LM77 is not set
989# CONFIG_SENSORS_LM78 is not set
990# CONFIG_SENSORS_LM80 is not set
991# CONFIG_SENSORS_LM83 is not set
992# CONFIG_SENSORS_LM85 is not set
993# CONFIG_SENSORS_LM87 is not set
994# CONFIG_SENSORS_LM90 is not set
995# CONFIG_SENSORS_LM92 is not set
996# CONFIG_SENSORS_MAX1619 is not set
997# CONFIG_SENSORS_PC87360 is not set
998# CONFIG_SENSORS_SIS5595 is not set
999# CONFIG_SENSORS_SMSC47M1 is not set
1000# CONFIG_SENSORS_SMSC47M192 is not set
1001CONFIG_SENSORS_SMSC47B397=m
1002# CONFIG_SENSORS_VIA686A is not set
1003# CONFIG_SENSORS_VT8231 is not set
1004# CONFIG_SENSORS_W83781D is not set
1005# CONFIG_SENSORS_W83791D is not set
1006# CONFIG_SENSORS_W83792D is not set
1007# CONFIG_SENSORS_W83L785TS is not set
1008# CONFIG_SENSORS_W83627HF is not set
1009# CONFIG_SENSORS_W83627EHF is not set
909# CONFIG_SENSORS_HDAPS is not set 1010# CONFIG_SENSORS_HDAPS is not set
910# CONFIG_HWMON_DEBUG_CHIP is not set 1011# CONFIG_HWMON_DEBUG_CHIP is not set
911 1012
@@ -918,6 +1019,7 @@ CONFIG_HWMON=y
918# Multimedia devices 1019# Multimedia devices
919# 1020#
920# CONFIG_VIDEO_DEV is not set 1021# CONFIG_VIDEO_DEV is not set
1022CONFIG_VIDEO_V4L2=y
921 1023
922# 1024#
923# Digital Video Broadcasting Devices 1025# Digital Video Broadcasting Devices
@@ -953,28 +1055,17 @@ CONFIG_SOUND=y
953# Open Sound System 1055# Open Sound System
954# 1056#
955CONFIG_SOUND_PRIME=y 1057CONFIG_SOUND_PRIME=y
956CONFIG_OBSOLETE_OSS_DRIVER=y
957# CONFIG_SOUND_BT878 is not set 1058# CONFIG_SOUND_BT878 is not set
958# CONFIG_SOUND_CMPCI is not set
959# CONFIG_SOUND_EMU10K1 is not set 1059# CONFIG_SOUND_EMU10K1 is not set
960# CONFIG_SOUND_FUSION is not set 1060# CONFIG_SOUND_FUSION is not set
961# CONFIG_SOUND_CS4281 is not set
962# CONFIG_SOUND_ES1370 is not set
963# CONFIG_SOUND_ES1371 is not set 1061# CONFIG_SOUND_ES1371 is not set
964# CONFIG_SOUND_ESSSOLO1 is not set
965# CONFIG_SOUND_MAESTRO is not set
966# CONFIG_SOUND_MAESTRO3 is not set
967CONFIG_SOUND_ICH=y 1062CONFIG_SOUND_ICH=y
968# CONFIG_SOUND_SONICVIBES is not set
969# CONFIG_SOUND_TRIDENT is not set 1063# CONFIG_SOUND_TRIDENT is not set
970# CONFIG_SOUND_MSNDCLAS is not set 1064# CONFIG_SOUND_MSNDCLAS is not set
971# CONFIG_SOUND_MSNDPIN is not set 1065# CONFIG_SOUND_MSNDPIN is not set
972# CONFIG_SOUND_VIA82CXXX is not set 1066# CONFIG_SOUND_VIA82CXXX is not set
973# CONFIG_SOUND_OSS is not set 1067# CONFIG_SOUND_OSS is not set
974# CONFIG_SOUND_ALI5455 is not set 1068# CONFIG_SOUND_TVMIXER is not set
975# CONFIG_SOUND_FORTE is not set
976# CONFIG_SOUND_RME96XX is not set
977# CONFIG_SOUND_AD1980 is not set
978 1069
979# 1070#
980# USB support 1071# USB support
@@ -1000,6 +1091,7 @@ CONFIG_USB_DEVICEFS=y
1000CONFIG_USB_EHCI_HCD=y 1091CONFIG_USB_EHCI_HCD=y
1001# CONFIG_USB_EHCI_SPLIT_ISO is not set 1092# CONFIG_USB_EHCI_SPLIT_ISO is not set
1002# CONFIG_USB_EHCI_ROOT_HUB_TT is not set 1093# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
1094# CONFIG_USB_EHCI_TT_NEWSCHED is not set
1003# CONFIG_USB_ISP116X_HCD is not set 1095# CONFIG_USB_ISP116X_HCD is not set
1004CONFIG_USB_OHCI_HCD=y 1096CONFIG_USB_OHCI_HCD=y
1005# CONFIG_USB_OHCI_BIG_ENDIAN is not set 1097# CONFIG_USB_OHCI_BIG_ENDIAN is not set
@@ -1089,10 +1181,12 @@ CONFIG_USB_MON=y
1089# CONFIG_USB_LEGOTOWER is not set 1181# CONFIG_USB_LEGOTOWER is not set
1090# CONFIG_USB_LCD is not set 1182# CONFIG_USB_LCD is not set
1091# CONFIG_USB_LED is not set 1183# CONFIG_USB_LED is not set
1184# CONFIG_USB_CY7C63 is not set
1092# CONFIG_USB_CYTHERM is not set 1185# CONFIG_USB_CYTHERM is not set
1093# CONFIG_USB_PHIDGETKIT is not set 1186# CONFIG_USB_PHIDGETKIT is not set
1094# CONFIG_USB_PHIDGETSERVO is not set 1187# CONFIG_USB_PHIDGETSERVO is not set
1095# CONFIG_USB_IDMOUSE is not set 1188# CONFIG_USB_IDMOUSE is not set
1189# CONFIG_USB_APPLEDISPLAY is not set
1096# CONFIG_USB_SISUSBVGA is not set 1190# CONFIG_USB_SISUSBVGA is not set
1097# CONFIG_USB_LD is not set 1191# CONFIG_USB_LD is not set
1098# CONFIG_USB_TEST is not set 1192# CONFIG_USB_TEST is not set
@@ -1141,6 +1235,19 @@ CONFIG_USB_MON=y
1141# CONFIG_RTC_CLASS is not set 1235# CONFIG_RTC_CLASS is not set
1142 1236
1143# 1237#
1238# DMA Engine support
1239#
1240# CONFIG_DMA_ENGINE is not set
1241
1242#
1243# DMA Clients
1244#
1245
1246#
1247# DMA Devices
1248#
1249
1250#
1144# Firmware Drivers 1251# Firmware Drivers
1145# 1252#
1146# CONFIG_EDD is not set 1253# CONFIG_EDD is not set
@@ -1175,6 +1282,7 @@ CONFIG_FS_POSIX_ACL=y
1175# CONFIG_MINIX_FS is not set 1282# CONFIG_MINIX_FS is not set
1176# CONFIG_ROMFS_FS is not set 1283# CONFIG_ROMFS_FS is not set
1177CONFIG_INOTIFY=y 1284CONFIG_INOTIFY=y
1285CONFIG_INOTIFY_USER=y
1178# CONFIG_QUOTA is not set 1286# CONFIG_QUOTA is not set
1179CONFIG_DNOTIFY=y 1287CONFIG_DNOTIFY=y
1180CONFIG_AUTOFS_FS=y 1288CONFIG_AUTOFS_FS=y
@@ -1331,7 +1439,8 @@ CONFIG_DETECT_SOFTLOCKUP=y
1331CONFIG_DEBUG_FS=y 1439CONFIG_DEBUG_FS=y
1332# CONFIG_DEBUG_VM is not set 1440# CONFIG_DEBUG_VM is not set
1333# CONFIG_FRAME_POINTER is not set 1441# CONFIG_FRAME_POINTER is not set
1334# CONFIG_UNWIND_INFO is not set 1442CONFIG_UNWIND_INFO=y
1443CONFIG_STACK_UNWIND=y
1335# CONFIG_FORCED_INLINING is not set 1444# CONFIG_FORCED_INLINING is not set
1336# CONFIG_RCU_TORTURE_TEST is not set 1445# CONFIG_RCU_TORTURE_TEST is not set
1337# CONFIG_DEBUG_RODATA is not set 1446# CONFIG_DEBUG_RODATA is not set
diff --git a/arch/x86_64/ia32/fpu32.c b/arch/x86_64/ia32/fpu32.c
index 1c23095f18..2c8209a360 100644
--- a/arch/x86_64/ia32/fpu32.c
+++ b/arch/x86_64/ia32/fpu32.c
@@ -2,7 +2,6 @@
2 * Copyright 2002 Andi Kleen, SuSE Labs. 2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * FXSAVE<->i387 conversion support. Based on code by Gareth Hughes. 3 * FXSAVE<->i387 conversion support. Based on code by Gareth Hughes.
4 * This is used for ptrace, signals and coredumps in 32bit emulation. 4 * This is used for ptrace, signals and coredumps in 32bit emulation.
5 * $Id: fpu32.c,v 1.1 2002/03/21 14:16:32 ak Exp $
6 */ 5 */
7 6
8#include <linux/sched.h> 7#include <linux/sched.h>
diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c
index e0a92439f6..25e5ca2220 100644
--- a/arch/x86_64/ia32/ia32_signal.c
+++ b/arch/x86_64/ia32/ia32_signal.c
@@ -6,8 +6,6 @@
6 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson 6 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
7 * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes 7 * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
8 * 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen 8 * 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen
9 *
10 * $Id: ia32_signal.c,v 1.22 2002/07/29 10:34:03 ak Exp $
11 */ 9 */
12 10
13#include <linux/sched.h> 11#include <linux/sched.h>
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 4ec594ab1a..c536fa98ea 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -155,6 +155,7 @@ sysenter_tracesys:
155 .previous 155 .previous
156 jmp sysenter_do_call 156 jmp sysenter_do_call
157 CFI_ENDPROC 157 CFI_ENDPROC
158ENDPROC(ia32_sysenter_target)
158 159
159/* 160/*
160 * 32bit SYSCALL instruction entry. 161 * 32bit SYSCALL instruction entry.
@@ -178,7 +179,7 @@ sysenter_tracesys:
178 */ 179 */
179ENTRY(ia32_cstar_target) 180ENTRY(ia32_cstar_target)
180 CFI_STARTPROC32 simple 181 CFI_STARTPROC32 simple
181 CFI_DEF_CFA rsp,0 182 CFI_DEF_CFA rsp,PDA_STACKOFFSET
182 CFI_REGISTER rip,rcx 183 CFI_REGISTER rip,rcx
183 /*CFI_REGISTER rflags,r11*/ 184 /*CFI_REGISTER rflags,r11*/
184 swapgs 185 swapgs
@@ -249,6 +250,7 @@ cstar_tracesys:
249 .quad 1b,ia32_badarg 250 .quad 1b,ia32_badarg
250 .previous 251 .previous
251 jmp cstar_do_call 252 jmp cstar_do_call
253END(ia32_cstar_target)
252 254
253ia32_badarg: 255ia32_badarg:
254 movq $-EFAULT,%rax 256 movq $-EFAULT,%rax
@@ -314,16 +316,13 @@ ia32_tracesys:
314 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ 316 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
315 RESTORE_REST 317 RESTORE_REST
316 jmp ia32_do_syscall 318 jmp ia32_do_syscall
319END(ia32_syscall)
317 320
318ia32_badsys: 321ia32_badsys:
319 movq $0,ORIG_RAX-ARGOFFSET(%rsp) 322 movq $0,ORIG_RAX-ARGOFFSET(%rsp)
320 movq $-ENOSYS,RAX-ARGOFFSET(%rsp) 323 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
321 jmp int_ret_from_sys_call 324 jmp int_ret_from_sys_call
322 325
323ni_syscall:
324 movq %rax,%rdi
325 jmp sys32_ni_syscall
326
327quiet_ni_syscall: 326quiet_ni_syscall:
328 movq $-ENOSYS,%rax 327 movq $-ENOSYS,%rax
329 ret 328 ret
@@ -370,10 +369,10 @@ ENTRY(ia32_ptregs_common)
370 RESTORE_REST 369 RESTORE_REST
371 jmp ia32_sysret /* misbalances the return cache */ 370 jmp ia32_sysret /* misbalances the return cache */
372 CFI_ENDPROC 371 CFI_ENDPROC
372END(ia32_ptregs_common)
373 373
374 .section .rodata,"a" 374 .section .rodata,"a"
375 .align 8 375 .align 8
376 .globl ia32_sys_call_table
377ia32_sys_call_table: 376ia32_sys_call_table:
378 .quad sys_restart_syscall 377 .quad sys_restart_syscall
379 .quad sys_exit 378 .quad sys_exit
diff --git a/arch/x86_64/ia32/ptrace32.c b/arch/x86_64/ia32/ptrace32.c
index 23a4515a73..a590b7a0d9 100644
--- a/arch/x86_64/ia32/ptrace32.c
+++ b/arch/x86_64/ia32/ptrace32.c
@@ -7,8 +7,6 @@
7 * 7 *
8 * This allows to access 64bit processes too; but there is no way to see the extended 8 * This allows to access 64bit processes too; but there is no way to see the extended
9 * register contents. 9 * register contents.
10 *
11 * $Id: ptrace32.c,v 1.16 2003/03/14 16:06:35 ak Exp $
12 */ 10 */
13 11
14#include <linux/kernel.h> 12#include <linux/kernel.h>
@@ -27,6 +25,7 @@
27#include <asm/debugreg.h> 25#include <asm/debugreg.h>
28#include <asm/i387.h> 26#include <asm/i387.h>
29#include <asm/fpu32.h> 27#include <asm/fpu32.h>
28#include <asm/ia32.h>
30 29
31/* 30/*
32 * Determines which flags the user has access to [1 = access, 0 = no access]. 31 * Determines which flags the user has access to [1 = access, 0 = no access].
@@ -199,6 +198,24 @@ static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
199 198
200#undef R32 199#undef R32
201 200
201static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
202{
203 int ret;
204 compat_siginfo_t *si32 = (compat_siginfo_t *)compat_ptr(data);
205 siginfo_t *si = compat_alloc_user_space(sizeof(siginfo_t));
206 if (request == PTRACE_SETSIGINFO) {
207 ret = copy_siginfo_from_user32(si, si32);
208 if (ret)
209 return ret;
210 }
211 ret = sys_ptrace(request, pid, addr, (unsigned long)si);
212 if (ret)
213 return ret;
214 if (request == PTRACE_GETSIGINFO)
215 ret = copy_siginfo_to_user32(si32, si);
216 return ret;
217}
218
202asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) 219asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
203{ 220{
204 struct task_struct *child; 221 struct task_struct *child;
@@ -208,9 +225,19 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
208 __u32 val; 225 __u32 val;
209 226
210 switch (request) { 227 switch (request) {
211 default: 228 case PTRACE_TRACEME:
229 case PTRACE_ATTACH:
230 case PTRACE_KILL:
231 case PTRACE_CONT:
232 case PTRACE_SINGLESTEP:
233 case PTRACE_DETACH:
234 case PTRACE_SYSCALL:
235 case PTRACE_SETOPTIONS:
212 return sys_ptrace(request, pid, addr, data); 236 return sys_ptrace(request, pid, addr, data);
213 237
238 default:
239 return -EINVAL;
240
214 case PTRACE_PEEKTEXT: 241 case PTRACE_PEEKTEXT:
215 case PTRACE_PEEKDATA: 242 case PTRACE_PEEKDATA:
216 case PTRACE_POKEDATA: 243 case PTRACE_POKEDATA:
@@ -225,10 +252,11 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
225 case PTRACE_GETFPXREGS: 252 case PTRACE_GETFPXREGS:
226 case PTRACE_GETEVENTMSG: 253 case PTRACE_GETEVENTMSG:
227 break; 254 break;
228 }
229 255
230 if (request == PTRACE_TRACEME) 256 case PTRACE_SETSIGINFO:
231 return ptrace_traceme(); 257 case PTRACE_GETSIGINFO:
258 return ptrace32_siginfo(request, pid, addr, data);
259 }
232 260
233 child = ptrace_get_task_struct(pid); 261 child = ptrace_get_task_struct(pid);
234 if (IS_ERR(child)) 262 if (IS_ERR(child))
@@ -349,8 +377,7 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
349 break; 377 break;
350 378
351 default: 379 default:
352 ret = -EINVAL; 380 BUG();
353 break;
354 } 381 }
355 382
356 out: 383 out:
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index f182b20858..dc88154c41 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -508,19 +508,6 @@ sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options)
508 return compat_sys_wait4(pid, stat_addr, options, NULL); 508 return compat_sys_wait4(pid, stat_addr, options, NULL);
509} 509}
510 510
511int sys32_ni_syscall(int call)
512{
513 struct task_struct *me = current;
514 static char lastcomm[sizeof(me->comm)];
515
516 if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
517 printk(KERN_INFO "IA32 syscall %d from %s not implemented\n",
518 call, me->comm);
519 strncpy(lastcomm, me->comm, sizeof(lastcomm));
520 }
521 return -ENOSYS;
522}
523
524/* 32-bit timeval and related flotsam. */ 511/* 32-bit timeval and related flotsam. */
525 512
526asmlinkage long 513asmlinkage long
@@ -916,7 +903,7 @@ long sys32_vm86_warning(void)
916 struct task_struct *me = current; 903 struct task_struct *me = current;
917 static char lastcomm[sizeof(me->comm)]; 904 static char lastcomm[sizeof(me->comm)];
918 if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) { 905 if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
919 printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n", 906 compat_printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n",
920 me->comm); 907 me->comm);
921 strncpy(lastcomm, me->comm, sizeof(lastcomm)); 908 strncpy(lastcomm, me->comm, sizeof(lastcomm));
922 } 909 }
@@ -929,13 +916,3 @@ long sys32_lookup_dcookie(u32 addr_low, u32 addr_high,
929 return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len); 916 return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len);
930} 917}
931 918
932static int __init ia32_init (void)
933{
934 printk("IA32 emulation $Id: sys_ia32.c,v 1.32 2002/03/24 13:02:28 ak Exp $\n");
935 return 0;
936}
937
938__initcall(ia32_init);
939
940extern unsigned long ia32_sys_call_table[];
941EXPORT_SYMBOL(ia32_sys_call_table);
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
index 059c88313f..aeb9c560be 100644
--- a/arch/x86_64/kernel/Makefile
+++ b/arch/x86_64/kernel/Makefile
@@ -8,7 +8,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \ 8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \
9 x8664_ksyms.o i387.o syscall.o vsyscall.o \ 9 x8664_ksyms.o i387.o syscall.o vsyscall.o \
10 setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \ 10 setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \
11 pci-dma.o pci-nommu.o 11 pci-dma.o pci-nommu.o alternative.o
12 12
13obj-$(CONFIG_X86_MCE) += mce.o 13obj-$(CONFIG_X86_MCE) += mce.o
14obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o 14obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
@@ -28,11 +28,13 @@ obj-$(CONFIG_PM) += suspend.o
28obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o 28obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
29obj-$(CONFIG_CPU_FREQ) += cpufreq/ 29obj-$(CONFIG_CPU_FREQ) += cpufreq/
30obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 30obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
31obj-$(CONFIG_GART_IOMMU) += pci-gart.o aperture.o 31obj-$(CONFIG_IOMMU) += pci-gart.o aperture.o
32obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary.o tce.o
32obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o 33obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
33obj-$(CONFIG_KPROBES) += kprobes.o 34obj-$(CONFIG_KPROBES) += kprobes.o
34obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o 35obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o
35obj-$(CONFIG_X86_VSMP) += vsmp.o 36obj-$(CONFIG_X86_VSMP) += vsmp.o
37obj-$(CONFIG_K8_NB) += k8.o
36 38
37obj-$(CONFIG_MODULES) += module.o 39obj-$(CONFIG_MODULES) += module.o
38 40
@@ -49,3 +51,5 @@ intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o
49quirks-y += ../../i386/kernel/quirks.o 51quirks-y += ../../i386/kernel/quirks.o
50i8237-y += ../../i386/kernel/i8237.o 52i8237-y += ../../i386/kernel/i8237.o
51msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o 53msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o
54alternative-y += ../../i386/kernel/alternative.o
55
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index 70b9d21ed6..a195ef06ec 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -8,7 +8,6 @@
8 * because only the bootmem allocator can allocate 32+MB. 8 * because only the bootmem allocator can allocate 32+MB.
9 * 9 *
10 * Copyright 2002 Andi Kleen, SuSE Labs. 10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * $Id: aperture.c,v 1.7 2003/08/01 03:36:18 ak Exp $
12 */ 11 */
13#include <linux/config.h> 12#include <linux/config.h>
14#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -24,6 +23,7 @@
24#include <asm/proto.h> 23#include <asm/proto.h>
25#include <asm/pci-direct.h> 24#include <asm/pci-direct.h>
26#include <asm/dma.h> 25#include <asm/dma.h>
26#include <asm/k8.h>
27 27
28int iommu_aperture; 28int iommu_aperture;
29int iommu_aperture_disabled __initdata = 0; 29int iommu_aperture_disabled __initdata = 0;
@@ -37,8 +37,6 @@ int fix_aperture __initdata = 1;
37/* This code runs before the PCI subsystem is initialized, so just 37/* This code runs before the PCI subsystem is initialized, so just
38 access the northbridge directly. */ 38 access the northbridge directly. */
39 39
40#define NB_ID_3 (PCI_VENDOR_ID_AMD | (0x1103<<16))
41
42static u32 __init allocate_aperture(void) 40static u32 __init allocate_aperture(void)
43{ 41{
44 pg_data_t *nd0 = NODE_DATA(0); 42 pg_data_t *nd0 = NODE_DATA(0);
@@ -68,20 +66,20 @@ static u32 __init allocate_aperture(void)
68 return (u32)__pa(p); 66 return (u32)__pa(p);
69} 67}
70 68
71static int __init aperture_valid(char *name, u64 aper_base, u32 aper_size) 69static int __init aperture_valid(u64 aper_base, u32 aper_size)
72{ 70{
73 if (!aper_base) 71 if (!aper_base)
74 return 0; 72 return 0;
75 if (aper_size < 64*1024*1024) { 73 if (aper_size < 64*1024*1024) {
76 printk("Aperture from %s too small (%d MB)\n", name, aper_size>>20); 74 printk("Aperture too small (%d MB)\n", aper_size>>20);
77 return 0; 75 return 0;
78 } 76 }
79 if (aper_base + aper_size >= 0xffffffff) { 77 if (aper_base + aper_size >= 0xffffffff) {
80 printk("Aperture from %s beyond 4GB. Ignoring.\n",name); 78 printk("Aperture beyond 4GB. Ignoring.\n");
81 return 0; 79 return 0;
82 } 80 }
83 if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { 81 if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
84 printk("Aperture from %s pointing to e820 RAM. Ignoring.\n",name); 82 printk("Aperture pointing to e820 RAM. Ignoring.\n");
85 return 0; 83 return 0;
86 } 84 }
87 return 1; 85 return 1;
@@ -140,7 +138,7 @@ static __u32 __init read_agp(int num, int slot, int func, int cap, u32 *order)
140 printk("Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n", 138 printk("Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n",
141 aper, 32 << *order, apsizereg); 139 aper, 32 << *order, apsizereg);
142 140
143 if (!aperture_valid("AGP bridge", aper, (32*1024*1024) << *order)) 141 if (!aperture_valid(aper, (32*1024*1024) << *order))
144 return 0; 142 return 0;
145 return (u32)aper; 143 return (u32)aper;
146} 144}
@@ -208,10 +206,10 @@ void __init iommu_hole_init(void)
208 206
209 fix = 0; 207 fix = 0;
210 for (num = 24; num < 32; num++) { 208 for (num = 24; num < 32; num++) {
211 char name[30]; 209 if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
212 if (read_pci_config(0, num, 3, 0x00) != NB_ID_3) 210 continue;
213 continue;
214 211
212 iommu_detected = 1;
215 iommu_aperture = 1; 213 iommu_aperture = 1;
216 214
217 aper_order = (read_pci_config(0, num, 3, 0x90) >> 1) & 7; 215 aper_order = (read_pci_config(0, num, 3, 0x90) >> 1) & 7;
@@ -222,9 +220,7 @@ void __init iommu_hole_init(void)
222 printk("CPU %d: aperture @ %Lx size %u MB\n", num-24, 220 printk("CPU %d: aperture @ %Lx size %u MB\n", num-24,
223 aper_base, aper_size>>20); 221 aper_base, aper_size>>20);
224 222
225 sprintf(name, "northbridge cpu %d", num-24); 223 if (!aperture_valid(aper_base, aper_size)) {
226
227 if (!aperture_valid(name, aper_base, aper_size)) {
228 fix = 1; 224 fix = 1;
229 break; 225 break;
230 } 226 }
@@ -273,7 +269,7 @@ void __init iommu_hole_init(void)
273 269
274 /* Fix up the north bridges */ 270 /* Fix up the north bridges */
275 for (num = 24; num < 32; num++) { 271 for (num = 24; num < 32; num++) {
276 if (read_pci_config(0, num, 3, 0x00) != NB_ID_3) 272 if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
277 continue; 273 continue;
278 274
279 /* Don't enable translation yet. That is done later. 275 /* Don't enable translation yet. That is done later.
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 29ef99001e..b2ead91df2 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -100,7 +100,7 @@ void clear_local_APIC(void)
100 maxlvt = get_maxlvt(); 100 maxlvt = get_maxlvt();
101 101
102 /* 102 /*
103 * Masking an LVT entry on a P6 can trigger a local APIC error 103 * Masking an LVT entry can trigger a local APIC error
104 * if the vector is zero. Mask LVTERR first to prevent this. 104 * if the vector is zero. Mask LVTERR first to prevent this.
105 */ 105 */
106 if (maxlvt >= 3) { 106 if (maxlvt >= 3) {
@@ -851,7 +851,18 @@ void disable_APIC_timer(void)
851 unsigned long v; 851 unsigned long v;
852 852
853 v = apic_read(APIC_LVTT); 853 v = apic_read(APIC_LVTT);
854 apic_write(APIC_LVTT, v | APIC_LVT_MASKED); 854 /*
855 * When an illegal vector value (0-15) is written to an LVT
856 * entry and delivery mode is Fixed, the APIC may signal an
857 * illegal vector error, with out regard to whether the mask
858 * bit is set or whether an interrupt is actually seen on input.
859 *
860 * Boot sequence might call this function when the LVTT has
861 * '0' vector value. So make sure vector field is set to
862 * valid value.
863 */
864 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
865 apic_write(APIC_LVTT, v);
855 } 866 }
856} 867}
857 868
@@ -909,15 +920,13 @@ int setup_profiling_timer(unsigned int multiplier)
909 return -EINVAL; 920 return -EINVAL;
910} 921}
911 922
912#ifdef CONFIG_X86_MCE_AMD 923void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector,
913void setup_threshold_lvt(unsigned long lvt_off) 924 unsigned char msg_type, unsigned char mask)
914{ 925{
915 unsigned int v = 0; 926 unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE;
916 unsigned long reg = (lvt_off << 4) + 0x500; 927 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
917 v |= THRESHOLD_APIC_VECTOR;
918 apic_write(reg, v); 928 apic_write(reg, v);
919} 929}
920#endif /* CONFIG_X86_MCE_AMD */
921 930
922#undef APIC_DIVISOR 931#undef APIC_DIVISOR
923 932
@@ -983,7 +992,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
983} 992}
984 993
985/* 994/*
986 * oem_force_hpet_timer -- force HPET mode for some boxes. 995 * apic_is_clustered_box() -- Check if we can expect good TSC
987 * 996 *
988 * Thus far, the major user of this is IBM's Summit2 series: 997 * Thus far, the major user of this is IBM's Summit2 series:
989 * 998 *
@@ -991,7 +1000,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
991 * multi-chassis. Use available data to take a good guess. 1000 * multi-chassis. Use available data to take a good guess.
992 * If in doubt, go HPET. 1001 * If in doubt, go HPET.
993 */ 1002 */
994__cpuinit int oem_force_hpet_timer(void) 1003__cpuinit int apic_is_clustered_box(void)
995{ 1004{
996 int i, clusters, zeros; 1005 int i, clusters, zeros;
997 unsigned id; 1006 unsigned id;
@@ -1022,8 +1031,7 @@ __cpuinit int oem_force_hpet_timer(void)
1022 } 1031 }
1023 1032
1024 /* 1033 /*
1025 * If clusters > 2, then should be multi-chassis. Return 1 for HPET. 1034 * If clusters > 2, then should be multi-chassis.
1026 * Else return 0 to use TSC.
1027 * May have to revisit this when multi-core + hyperthreaded CPUs come 1035 * May have to revisit this when multi-core + hyperthreaded CPUs come
1028 * out, but AFAIK this will work even for them. 1036 * out, but AFAIK this will work even for them.
1029 */ 1037 */
diff --git a/arch/x86_64/kernel/asm-offsets.c b/arch/x86_64/kernel/asm-offsets.c
index 38834bbbae..96687e2beb 100644
--- a/arch/x86_64/kernel/asm-offsets.c
+++ b/arch/x86_64/kernel/asm-offsets.c
@@ -4,6 +4,7 @@
4 * and format the required data. 4 * and format the required data.
5 */ 5 */
6 6
7#include <linux/crypto.h>
7#include <linux/sched.h> 8#include <linux/sched.h>
8#include <linux/stddef.h> 9#include <linux/stddef.h>
9#include <linux/errno.h> 10#include <linux/errno.h>
@@ -68,5 +69,7 @@ int main(void)
68 DEFINE(pbe_next, offsetof(struct pbe, next)); 69 DEFINE(pbe_next, offsetof(struct pbe, next));
69 BLANK(); 70 BLANK();
70 DEFINE(TSS_ist, offsetof(struct tss_struct, ist)); 71 DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
72 BLANK();
73 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
71 return 0; 74 return 0;
72} 75}
diff --git a/arch/x86_64/kernel/crash.c b/arch/x86_64/kernel/crash.c
index 4e6c3b729e..d8d5750d61 100644
--- a/arch/x86_64/kernel/crash.c
+++ b/arch/x86_64/kernel/crash.c
@@ -111,14 +111,14 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
111 atomic_dec(&waiting_for_crash_ipi); 111 atomic_dec(&waiting_for_crash_ipi);
112 /* Assume hlt works */ 112 /* Assume hlt works */
113 for(;;) 113 for(;;)
114 asm("hlt"); 114 halt();
115 115
116 return 1; 116 return 1;
117} 117}
118 118
119static void smp_send_nmi_allbutself(void) 119static void smp_send_nmi_allbutself(void)
120{ 120{
121 send_IPI_allbutself(APIC_DM_NMI); 121 send_IPI_allbutself(NMI_VECTOR);
122} 122}
123 123
124/* 124/*
@@ -161,7 +161,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
161{ 161{
162 /* 162 /*
163 * This function is only called after the system 163 * This function is only called after the system
164 * has paniced or is otherwise in a critical state. 164 * has panicked or is otherwise in a critical state.
165 * The minimum amount of code to allow a kexec'd kernel 165 * The minimum amount of code to allow a kexec'd kernel
166 * to run successfully needs to happen here. 166 * to run successfully needs to happen here.
167 * 167 *
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 1ef6028f72..9e94d83462 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * Handle the memory map. 2 * Handle the memory map.
3 * The functions here do the job until bootmem takes over. 3 * The functions here do the job until bootmem takes over.
4 * $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $
5 * 4 *
6 * Getting sanitize_e820_map() in sync with i386 version by applying change: 5 * Getting sanitize_e820_map() in sync with i386 version by applying change:
7 * - Provisions for empty E820 memory regions (reported by certain BIOSes). 6 * - Provisions for empty E820 memory regions (reported by certain BIOSes).
@@ -621,6 +620,7 @@ void __init parse_memmapopt(char *p, char **from)
621} 620}
622 621
623unsigned long pci_mem_start = 0xaeedbabe; 622unsigned long pci_mem_start = 0xaeedbabe;
623EXPORT_SYMBOL(pci_mem_start);
624 624
625/* 625/*
626 * Search for the biggest gap in the low 32 bits of the e820 626 * Search for the biggest gap in the low 32 bits of the e820
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 586b34c00c..22cac4487b 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -154,6 +154,7 @@ rff_trace:
154 GET_THREAD_INFO(%rcx) 154 GET_THREAD_INFO(%rcx)
155 jmp rff_action 155 jmp rff_action
156 CFI_ENDPROC 156 CFI_ENDPROC
157END(ret_from_fork)
157 158
158/* 159/*
159 * System call entry. Upto 6 arguments in registers are supported. 160 * System call entry. Upto 6 arguments in registers are supported.
@@ -188,7 +189,7 @@ rff_trace:
188 189
189ENTRY(system_call) 190ENTRY(system_call)
190 CFI_STARTPROC simple 191 CFI_STARTPROC simple
191 CFI_DEF_CFA rsp,0 192 CFI_DEF_CFA rsp,PDA_STACKOFFSET
192 CFI_REGISTER rip,rcx 193 CFI_REGISTER rip,rcx
193 /*CFI_REGISTER rflags,r11*/ 194 /*CFI_REGISTER rflags,r11*/
194 swapgs 195 swapgs
@@ -285,6 +286,7 @@ tracesys:
285 /* Use IRET because user could have changed frame */ 286 /* Use IRET because user could have changed frame */
286 jmp int_ret_from_sys_call 287 jmp int_ret_from_sys_call
287 CFI_ENDPROC 288 CFI_ENDPROC
289END(system_call)
288 290
289/* 291/*
290 * Syscall return path ending with IRET. 292 * Syscall return path ending with IRET.
@@ -364,6 +366,7 @@ int_restore_rest:
364 cli 366 cli
365 jmp int_with_check 367 jmp int_with_check
366 CFI_ENDPROC 368 CFI_ENDPROC
369END(int_ret_from_sys_call)
367 370
368/* 371/*
369 * Certain special system calls that need to save a complete full stack frame. 372 * Certain special system calls that need to save a complete full stack frame.
@@ -375,6 +378,7 @@ int_restore_rest:
375 leaq \func(%rip),%rax 378 leaq \func(%rip),%rax
376 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */ 379 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
377 jmp ptregscall_common 380 jmp ptregscall_common
381END(\label)
378 .endm 382 .endm
379 383
380 CFI_STARTPROC 384 CFI_STARTPROC
@@ -404,6 +408,7 @@ ENTRY(ptregscall_common)
404 CFI_REL_OFFSET rip, 0 408 CFI_REL_OFFSET rip, 0
405 ret 409 ret
406 CFI_ENDPROC 410 CFI_ENDPROC
411END(ptregscall_common)
407 412
408ENTRY(stub_execve) 413ENTRY(stub_execve)
409 CFI_STARTPROC 414 CFI_STARTPROC
@@ -418,6 +423,7 @@ ENTRY(stub_execve)
418 RESTORE_REST 423 RESTORE_REST
419 jmp int_ret_from_sys_call 424 jmp int_ret_from_sys_call
420 CFI_ENDPROC 425 CFI_ENDPROC
426END(stub_execve)
421 427
422/* 428/*
423 * sigreturn is special because it needs to restore all registers on return. 429 * sigreturn is special because it needs to restore all registers on return.
@@ -435,6 +441,7 @@ ENTRY(stub_rt_sigreturn)
435 RESTORE_REST 441 RESTORE_REST
436 jmp int_ret_from_sys_call 442 jmp int_ret_from_sys_call
437 CFI_ENDPROC 443 CFI_ENDPROC
444END(stub_rt_sigreturn)
438 445
439/* 446/*
440 * initial frame state for interrupts and exceptions 447 * initial frame state for interrupts and exceptions
@@ -466,29 +473,18 @@ ENTRY(stub_rt_sigreturn)
466/* 0(%rsp): interrupt number */ 473/* 0(%rsp): interrupt number */
467 .macro interrupt func 474 .macro interrupt func
468 cld 475 cld
469#ifdef CONFIG_DEBUG_INFO
470 SAVE_ALL
471 movq %rsp,%rdi
472 /*
473 * Setup a stack frame pointer. This allows gdb to trace
474 * back to the original stack.
475 */
476 movq %rsp,%rbp
477 CFI_DEF_CFA_REGISTER rbp
478#else
479 SAVE_ARGS 476 SAVE_ARGS
480 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler 477 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
481#endif 478 pushq %rbp
479 CFI_ADJUST_CFA_OFFSET 8
480 CFI_REL_OFFSET rbp, 0
481 movq %rsp,%rbp
482 CFI_DEF_CFA_REGISTER rbp
482 testl $3,CS(%rdi) 483 testl $3,CS(%rdi)
483 je 1f 484 je 1f
484 swapgs 485 swapgs
4851: incl %gs:pda_irqcount # RED-PEN should check preempt count 4861: incl %gs:pda_irqcount # RED-PEN should check preempt count
486 movq %gs:pda_irqstackptr,%rax 487 cmoveq %gs:pda_irqstackptr,%rsp
487 cmoveq %rax,%rsp /*todo This needs CFI annotation! */
488 pushq %rdi # save old stack
489#ifndef CONFIG_DEBUG_INFO
490 CFI_ADJUST_CFA_OFFSET 8
491#endif
492 call \func 488 call \func
493 .endm 489 .endm
494 490
@@ -497,17 +493,11 @@ ENTRY(common_interrupt)
497 interrupt do_IRQ 493 interrupt do_IRQ
498 /* 0(%rsp): oldrsp-ARGOFFSET */ 494 /* 0(%rsp): oldrsp-ARGOFFSET */
499ret_from_intr: 495ret_from_intr:
500 popq %rdi
501#ifndef CONFIG_DEBUG_INFO
502 CFI_ADJUST_CFA_OFFSET -8
503#endif
504 cli 496 cli
505 decl %gs:pda_irqcount 497 decl %gs:pda_irqcount
506#ifdef CONFIG_DEBUG_INFO 498 leaveq
507 movq RBP(%rdi),%rbp
508 CFI_DEF_CFA_REGISTER rsp 499 CFI_DEF_CFA_REGISTER rsp
509#endif 500 CFI_ADJUST_CFA_OFFSET -8
510 leaq ARGOFFSET(%rdi),%rsp /*todo This needs CFI annotation! */
511exit_intr: 501exit_intr:
512 GET_THREAD_INFO(%rcx) 502 GET_THREAD_INFO(%rcx)
513 testl $3,CS-ARGOFFSET(%rsp) 503 testl $3,CS-ARGOFFSET(%rsp)
@@ -589,14 +579,16 @@ retint_kernel:
589 call preempt_schedule_irq 579 call preempt_schedule_irq
590 jmp exit_intr 580 jmp exit_intr
591#endif 581#endif
582
592 CFI_ENDPROC 583 CFI_ENDPROC
584END(common_interrupt)
593 585
594/* 586/*
595 * APIC interrupts. 587 * APIC interrupts.
596 */ 588 */
597 .macro apicinterrupt num,func 589 .macro apicinterrupt num,func
598 INTR_FRAME 590 INTR_FRAME
599 pushq $\num-256 591 pushq $~(\num)
600 CFI_ADJUST_CFA_OFFSET 8 592 CFI_ADJUST_CFA_OFFSET 8
601 interrupt \func 593 interrupt \func
602 jmp ret_from_intr 594 jmp ret_from_intr
@@ -605,17 +597,21 @@ retint_kernel:
605 597
606ENTRY(thermal_interrupt) 598ENTRY(thermal_interrupt)
607 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt 599 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
600END(thermal_interrupt)
608 601
609ENTRY(threshold_interrupt) 602ENTRY(threshold_interrupt)
610 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt 603 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
604END(threshold_interrupt)
611 605
612#ifdef CONFIG_SMP 606#ifdef CONFIG_SMP
613ENTRY(reschedule_interrupt) 607ENTRY(reschedule_interrupt)
614 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt 608 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
609END(reschedule_interrupt)
615 610
616 .macro INVALIDATE_ENTRY num 611 .macro INVALIDATE_ENTRY num
617ENTRY(invalidate_interrupt\num) 612ENTRY(invalidate_interrupt\num)
618 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt 613 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
614END(invalidate_interrupt\num)
619 .endm 615 .endm
620 616
621 INVALIDATE_ENTRY 0 617 INVALIDATE_ENTRY 0
@@ -629,17 +625,21 @@ ENTRY(invalidate_interrupt\num)
629 625
630ENTRY(call_function_interrupt) 626ENTRY(call_function_interrupt)
631 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt 627 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
628END(call_function_interrupt)
632#endif 629#endif
633 630
634#ifdef CONFIG_X86_LOCAL_APIC 631#ifdef CONFIG_X86_LOCAL_APIC
635ENTRY(apic_timer_interrupt) 632ENTRY(apic_timer_interrupt)
636 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt 633 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
634END(apic_timer_interrupt)
637 635
638ENTRY(error_interrupt) 636ENTRY(error_interrupt)
639 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt 637 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
638END(error_interrupt)
640 639
641ENTRY(spurious_interrupt) 640ENTRY(spurious_interrupt)
642 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt 641 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
642END(spurious_interrupt)
643#endif 643#endif
644 644
645/* 645/*
@@ -777,6 +777,7 @@ error_kernelspace:
777 cmpq $gs_change,RIP(%rsp) 777 cmpq $gs_change,RIP(%rsp)
778 je error_swapgs 778 je error_swapgs
779 jmp error_sti 779 jmp error_sti
780END(error_entry)
780 781
781 /* Reload gs selector with exception handling */ 782 /* Reload gs selector with exception handling */
782 /* edi: new selector */ 783 /* edi: new selector */
@@ -794,6 +795,7 @@ gs_change:
794 CFI_ADJUST_CFA_OFFSET -8 795 CFI_ADJUST_CFA_OFFSET -8
795 ret 796 ret
796 CFI_ENDPROC 797 CFI_ENDPROC
798ENDPROC(load_gs_index)
797 799
798 .section __ex_table,"a" 800 .section __ex_table,"a"
799 .align 8 801 .align 8
@@ -847,7 +849,7 @@ ENTRY(kernel_thread)
847 UNFAKE_STACK_FRAME 849 UNFAKE_STACK_FRAME
848 ret 850 ret
849 CFI_ENDPROC 851 CFI_ENDPROC
850 852ENDPROC(kernel_thread)
851 853
852child_rip: 854child_rip:
853 /* 855 /*
@@ -860,6 +862,7 @@ child_rip:
860 # exit 862 # exit
861 xorl %edi, %edi 863 xorl %edi, %edi
862 call do_exit 864 call do_exit
865ENDPROC(child_rip)
863 866
864/* 867/*
865 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. 868 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
@@ -889,19 +892,24 @@ ENTRY(execve)
889 UNFAKE_STACK_FRAME 892 UNFAKE_STACK_FRAME
890 ret 893 ret
891 CFI_ENDPROC 894 CFI_ENDPROC
895ENDPROC(execve)
892 896
893KPROBE_ENTRY(page_fault) 897KPROBE_ENTRY(page_fault)
894 errorentry do_page_fault 898 errorentry do_page_fault
899END(page_fault)
895 .previous .text 900 .previous .text
896 901
897ENTRY(coprocessor_error) 902ENTRY(coprocessor_error)
898 zeroentry do_coprocessor_error 903 zeroentry do_coprocessor_error
904END(coprocessor_error)
899 905
900ENTRY(simd_coprocessor_error) 906ENTRY(simd_coprocessor_error)
901 zeroentry do_simd_coprocessor_error 907 zeroentry do_simd_coprocessor_error
908END(simd_coprocessor_error)
902 909
903ENTRY(device_not_available) 910ENTRY(device_not_available)
904 zeroentry math_state_restore 911 zeroentry math_state_restore
912END(device_not_available)
905 913
906 /* runs on exception stack */ 914 /* runs on exception stack */
907KPROBE_ENTRY(debug) 915KPROBE_ENTRY(debug)
@@ -911,6 +919,7 @@ KPROBE_ENTRY(debug)
911 paranoidentry do_debug, DEBUG_STACK 919 paranoidentry do_debug, DEBUG_STACK
912 jmp paranoid_exit 920 jmp paranoid_exit
913 CFI_ENDPROC 921 CFI_ENDPROC
922END(debug)
914 .previous .text 923 .previous .text
915 924
916 /* runs on exception stack */ 925 /* runs on exception stack */
@@ -961,6 +970,7 @@ paranoid_schedule:
961 cli 970 cli
962 jmp paranoid_userspace 971 jmp paranoid_userspace
963 CFI_ENDPROC 972 CFI_ENDPROC
973END(nmi)
964 .previous .text 974 .previous .text
965 975
966KPROBE_ENTRY(int3) 976KPROBE_ENTRY(int3)
@@ -970,22 +980,28 @@ KPROBE_ENTRY(int3)
970 paranoidentry do_int3, DEBUG_STACK 980 paranoidentry do_int3, DEBUG_STACK
971 jmp paranoid_exit 981 jmp paranoid_exit
972 CFI_ENDPROC 982 CFI_ENDPROC
983END(int3)
973 .previous .text 984 .previous .text
974 985
975ENTRY(overflow) 986ENTRY(overflow)
976 zeroentry do_overflow 987 zeroentry do_overflow
988END(overflow)
977 989
978ENTRY(bounds) 990ENTRY(bounds)
979 zeroentry do_bounds 991 zeroentry do_bounds
992END(bounds)
980 993
981ENTRY(invalid_op) 994ENTRY(invalid_op)
982 zeroentry do_invalid_op 995 zeroentry do_invalid_op
996END(invalid_op)
983 997
984ENTRY(coprocessor_segment_overrun) 998ENTRY(coprocessor_segment_overrun)
985 zeroentry do_coprocessor_segment_overrun 999 zeroentry do_coprocessor_segment_overrun
1000END(coprocessor_segment_overrun)
986 1001
987ENTRY(reserved) 1002ENTRY(reserved)
988 zeroentry do_reserved 1003 zeroentry do_reserved
1004END(reserved)
989 1005
990 /* runs on exception stack */ 1006 /* runs on exception stack */
991ENTRY(double_fault) 1007ENTRY(double_fault)
@@ -993,12 +1009,15 @@ ENTRY(double_fault)
993 paranoidentry do_double_fault 1009 paranoidentry do_double_fault
994 jmp paranoid_exit 1010 jmp paranoid_exit
995 CFI_ENDPROC 1011 CFI_ENDPROC
1012END(double_fault)
996 1013
997ENTRY(invalid_TSS) 1014ENTRY(invalid_TSS)
998 errorentry do_invalid_TSS 1015 errorentry do_invalid_TSS
1016END(invalid_TSS)
999 1017
1000ENTRY(segment_not_present) 1018ENTRY(segment_not_present)
1001 errorentry do_segment_not_present 1019 errorentry do_segment_not_present
1020END(segment_not_present)
1002 1021
1003 /* runs on exception stack */ 1022 /* runs on exception stack */
1004ENTRY(stack_segment) 1023ENTRY(stack_segment)
@@ -1006,19 +1025,24 @@ ENTRY(stack_segment)
1006 paranoidentry do_stack_segment 1025 paranoidentry do_stack_segment
1007 jmp paranoid_exit 1026 jmp paranoid_exit
1008 CFI_ENDPROC 1027 CFI_ENDPROC
1028END(stack_segment)
1009 1029
1010KPROBE_ENTRY(general_protection) 1030KPROBE_ENTRY(general_protection)
1011 errorentry do_general_protection 1031 errorentry do_general_protection
1032END(general_protection)
1012 .previous .text 1033 .previous .text
1013 1034
1014ENTRY(alignment_check) 1035ENTRY(alignment_check)
1015 errorentry do_alignment_check 1036 errorentry do_alignment_check
1037END(alignment_check)
1016 1038
1017ENTRY(divide_error) 1039ENTRY(divide_error)
1018 zeroentry do_divide_error 1040 zeroentry do_divide_error
1041END(divide_error)
1019 1042
1020ENTRY(spurious_interrupt_bug) 1043ENTRY(spurious_interrupt_bug)
1021 zeroentry do_spurious_interrupt_bug 1044 zeroentry do_spurious_interrupt_bug
1045END(spurious_interrupt_bug)
1022 1046
1023#ifdef CONFIG_X86_MCE 1047#ifdef CONFIG_X86_MCE
1024 /* runs on exception stack */ 1048 /* runs on exception stack */
@@ -1029,6 +1053,7 @@ ENTRY(machine_check)
1029 paranoidentry do_machine_check 1053 paranoidentry do_machine_check
1030 jmp paranoid_exit 1054 jmp paranoid_exit
1031 CFI_ENDPROC 1055 CFI_ENDPROC
1056END(machine_check)
1032#endif 1057#endif
1033 1058
1034ENTRY(call_softirq) 1059ENTRY(call_softirq)
@@ -1046,3 +1071,37 @@ ENTRY(call_softirq)
1046 decl %gs:pda_irqcount 1071 decl %gs:pda_irqcount
1047 ret 1072 ret
1048 CFI_ENDPROC 1073 CFI_ENDPROC
1074ENDPROC(call_softirq)
1075
1076#ifdef CONFIG_STACK_UNWIND
1077ENTRY(arch_unwind_init_running)
1078 CFI_STARTPROC
1079 movq %r15, R15(%rdi)
1080 movq %r14, R14(%rdi)
1081 xchgq %rsi, %rdx
1082 movq %r13, R13(%rdi)
1083 movq %r12, R12(%rdi)
1084 xorl %eax, %eax
1085 movq %rbp, RBP(%rdi)
1086 movq %rbx, RBX(%rdi)
1087 movq (%rsp), %rcx
1088 movq %rax, R11(%rdi)
1089 movq %rax, R10(%rdi)
1090 movq %rax, R9(%rdi)
1091 movq %rax, R8(%rdi)
1092 movq %rax, RAX(%rdi)
1093 movq %rax, RCX(%rdi)
1094 movq %rax, RDX(%rdi)
1095 movq %rax, RSI(%rdi)
1096 movq %rax, RDI(%rdi)
1097 movq %rax, ORIG_RAX(%rdi)
1098 movq %rcx, RIP(%rdi)
1099 leaq 8(%rsp), %rcx
1100 movq $__KERNEL_CS, CS(%rdi)
1101 movq %rax, EFLAGS(%rdi)
1102 movq %rcx, RSP(%rdi)
1103 movq $__KERNEL_DS, SS(%rdi)
1104 jmpq *%rdx
1105 CFI_ENDPROC
1106ENDPROC(arch_unwind_init_running)
1107#endif
diff --git a/arch/x86_64/kernel/genapic_flat.c b/arch/x86_64/kernel/genapic_flat.c
index 1a2ab825be..21c7066e23 100644
--- a/arch/x86_64/kernel/genapic_flat.c
+++ b/arch/x86_64/kernel/genapic_flat.c
@@ -78,22 +78,29 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
78 78
79static void flat_send_IPI_allbutself(int vector) 79static void flat_send_IPI_allbutself(int vector)
80{ 80{
81#ifndef CONFIG_HOTPLUG_CPU 81#ifdef CONFIG_HOTPLUG_CPU
82 if (((num_online_cpus()) - 1) >= 1) 82 int hotplug = 1;
83 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
84#else 83#else
85 cpumask_t allbutme = cpu_online_map; 84 int hotplug = 0;
85#endif
86 if (hotplug || vector == NMI_VECTOR) {
87 cpumask_t allbutme = cpu_online_map;
86 88
87 cpu_clear(smp_processor_id(), allbutme); 89 cpu_clear(smp_processor_id(), allbutme);
88 90
89 if (!cpus_empty(allbutme)) 91 if (!cpus_empty(allbutme))
90 flat_send_IPI_mask(allbutme, vector); 92 flat_send_IPI_mask(allbutme, vector);
91#endif 93 } else if (num_online_cpus() > 1) {
94 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
95 }
92} 96}
93 97
94static void flat_send_IPI_all(int vector) 98static void flat_send_IPI_all(int vector)
95{ 99{
96 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); 100 if (vector == NMI_VECTOR)
101 flat_send_IPI_mask(cpu_online_map, vector);
102 else
103 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
97} 104}
98 105
99static int flat_apic_id_registered(void) 106static int flat_apic_id_registered(void)
@@ -108,10 +115,7 @@ static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask)
108 115
109static unsigned int phys_pkg_id(int index_msb) 116static unsigned int phys_pkg_id(int index_msb)
110{ 117{
111 u32 ebx; 118 return hard_smp_processor_id() >> index_msb;
112
113 ebx = cpuid_ebx(1);
114 return ((ebx >> 24) & 0xFF) >> index_msb;
115} 119}
116 120
117struct genapic apic_flat = { 121struct genapic apic_flat = {
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index cea20a66c1..e6a71c9556 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -2,8 +2,6 @@
2 * linux/arch/x86_64/kernel/head64.c -- prepare to run common code 2 * linux/arch/x86_64/kernel/head64.c -- prepare to run common code
3 * 3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
6 * $Id: head64.c,v 1.22 2001/07/06 14:28:20 ak Exp $
7 */ 5 */
8 6
9#include <linux/init.h> 7#include <linux/init.h>
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 5ecd34ab8c..86b2c1e197 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -44,11 +44,11 @@
44 BI(x,8) BI(x,9) BI(x,a) BI(x,b) \ 44 BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
45 BI(x,c) BI(x,d) BI(x,e) BI(x,f) 45 BI(x,c) BI(x,d) BI(x,e) BI(x,f)
46 46
47#define BUILD_14_IRQS(x) \ 47#define BUILD_15_IRQS(x) \
48 BI(x,0) BI(x,1) BI(x,2) BI(x,3) \ 48 BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
49 BI(x,4) BI(x,5) BI(x,6) BI(x,7) \ 49 BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
50 BI(x,8) BI(x,9) BI(x,a) BI(x,b) \ 50 BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
51 BI(x,c) BI(x,d) 51 BI(x,c) BI(x,d) BI(x,e)
52 52
53/* 53/*
54 * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: 54 * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
@@ -73,13 +73,13 @@ BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
73BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) 73BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
74 74
75#ifdef CONFIG_PCI_MSI 75#ifdef CONFIG_PCI_MSI
76 BUILD_14_IRQS(0xe) 76 BUILD_15_IRQS(0xe)
77#endif 77#endif
78 78
79#endif 79#endif
80 80
81#undef BUILD_16_IRQS 81#undef BUILD_16_IRQS
82#undef BUILD_14_IRQS 82#undef BUILD_15_IRQS
83#undef BI 83#undef BI
84 84
85 85
@@ -92,11 +92,11 @@ BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
92 IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \ 92 IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
93 IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f) 93 IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
94 94
95#define IRQLIST_14(x) \ 95#define IRQLIST_15(x) \
96 IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \ 96 IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
97 IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \ 97 IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
98 IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \ 98 IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
99 IRQ(x,c), IRQ(x,d) 99 IRQ(x,c), IRQ(x,d), IRQ(x,e)
100 100
101void (*interrupt[NR_IRQS])(void) = { 101void (*interrupt[NR_IRQS])(void) = {
102 IRQLIST_16(0x0), 102 IRQLIST_16(0x0),
@@ -108,7 +108,7 @@ void (*interrupt[NR_IRQS])(void) = {
108 IRQLIST_16(0xc), IRQLIST_16(0xd) 108 IRQLIST_16(0xc), IRQLIST_16(0xd)
109 109
110#ifdef CONFIG_PCI_MSI 110#ifdef CONFIG_PCI_MSI
111 , IRQLIST_14(0xe) 111 , IRQLIST_15(0xe)
112#endif 112#endif
113 113
114#endif 114#endif
@@ -278,7 +278,7 @@ static void mask_and_ack_8259A(unsigned int irq)
278 * Lightweight spurious IRQ detection. We do not want 278 * Lightweight spurious IRQ detection. We do not want
279 * to overdo spurious IRQ handling - it's usually a sign 279 * to overdo spurious IRQ handling - it's usually a sign
280 * of hardware problems, so we only do the checks we can 280 * of hardware problems, so we only do the checks we can
281 * do without slowing down good hardware unnecesserily. 281 * do without slowing down good hardware unnecessarily.
282 * 282 *
283 * Note that IRQ7 and IRQ15 (the two spurious IRQs 283 * Note that IRQ7 and IRQ15 (the two spurious IRQs
284 * usually resulting from the 8259A-1|2 PICs) occur 284 * usually resulting from the 8259A-1|2 PICs) occur
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 9cc7031b71..c768d8a036 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -41,6 +41,7 @@
41#include <asm/mach_apic.h> 41#include <asm/mach_apic.h>
42#include <asm/acpi.h> 42#include <asm/acpi.h>
43#include <asm/dma.h> 43#include <asm/dma.h>
44#include <asm/nmi.h>
44 45
45#define __apicdebuginit __init 46#define __apicdebuginit __init
46 47
@@ -56,6 +57,7 @@ int timer_over_8254 __initdata = 0;
56static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 57static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
57 58
58static DEFINE_SPINLOCK(ioapic_lock); 59static DEFINE_SPINLOCK(ioapic_lock);
60static DEFINE_SPINLOCK(vector_lock);
59 61
60/* 62/*
61 * # of IRQ routing registers 63 * # of IRQ routing registers
@@ -317,7 +319,7 @@ void __init check_ioapic(void)
317 vendor &= 0xffff; 319 vendor &= 0xffff;
318 switch (vendor) { 320 switch (vendor) {
319 case PCI_VENDOR_ID_VIA: 321 case PCI_VENDOR_ID_VIA:
320#ifdef CONFIG_GART_IOMMU 322#ifdef CONFIG_IOMMU
321 if ((end_pfn > MAX_DMA32_PFN || 323 if ((end_pfn > MAX_DMA32_PFN ||
322 force_iommu) && 324 force_iommu) &&
323 !iommu_aperture_allowed) { 325 !iommu_aperture_allowed) {
@@ -834,10 +836,17 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
834int assign_irq_vector(int irq) 836int assign_irq_vector(int irq)
835{ 837{
836 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; 838 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
839 unsigned long flags;
840 int vector;
837 841
838 BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS); 842 BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
839 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) 843
844 spin_lock_irqsave(&vector_lock, flags);
845
846 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
847 spin_unlock_irqrestore(&vector_lock, flags);
840 return IO_APIC_VECTOR(irq); 848 return IO_APIC_VECTOR(irq);
849 }
841next: 850next:
842 current_vector += 8; 851 current_vector += 8;
843 if (current_vector == IA32_SYSCALL_VECTOR) 852 if (current_vector == IA32_SYSCALL_VECTOR)
@@ -849,11 +858,14 @@ next:
849 current_vector = FIRST_DEVICE_VECTOR + offset; 858 current_vector = FIRST_DEVICE_VECTOR + offset;
850 } 859 }
851 860
852 vector_irq[current_vector] = irq; 861 vector = current_vector;
862 vector_irq[vector] = irq;
853 if (irq != AUTO_ASSIGN) 863 if (irq != AUTO_ASSIGN)
854 IO_APIC_VECTOR(irq) = current_vector; 864 IO_APIC_VECTOR(irq) = vector;
865
866 spin_unlock_irqrestore(&vector_lock, flags);
855 867
856 return current_vector; 868 return vector;
857} 869}
858 870
859extern void (*interrupt[NR_IRQS])(void); 871extern void (*interrupt[NR_IRQS])(void);
@@ -866,21 +878,14 @@ static struct hw_interrupt_type ioapic_edge_type;
866 878
867static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger) 879static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
868{ 880{
869 if (use_pci_vector() && !platform_legacy_irq(irq)) { 881 unsigned idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
870 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 882
871 trigger == IOAPIC_LEVEL) 883 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
872 irq_desc[vector].handler = &ioapic_level_type; 884 trigger == IOAPIC_LEVEL)
873 else 885 irq_desc[idx].handler = &ioapic_level_type;
874 irq_desc[vector].handler = &ioapic_edge_type; 886 else
875 set_intr_gate(vector, interrupt[vector]); 887 irq_desc[idx].handler = &ioapic_edge_type;
876 } else { 888 set_intr_gate(vector, interrupt[idx]);
877 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
878 trigger == IOAPIC_LEVEL)
879 irq_desc[irq].handler = &ioapic_level_type;
880 else
881 irq_desc[irq].handler = &ioapic_edge_type;
882 set_intr_gate(vector, interrupt[irq]);
883 }
884} 889}
885 890
886static void __init setup_IO_APIC_irqs(void) 891static void __init setup_IO_APIC_irqs(void)
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index d8bd0b345b..3be0a7e4bf 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -26,6 +26,30 @@ atomic_t irq_mis_count;
26#endif 26#endif
27#endif 27#endif
28 28
29#ifdef CONFIG_DEBUG_STACKOVERFLOW
30/*
31 * Probabilistic stack overflow check:
32 *
33 * Only check the stack in process context, because everything else
34 * runs on the big interrupt stacks. Checking reliably is too expensive,
35 * so we just check from interrupts.
36 */
37static inline void stack_overflow_check(struct pt_regs *regs)
38{
39 u64 curbase = (u64) current->thread_info;
40 static unsigned long warned = -60*HZ;
41
42 if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
43 regs->rsp < curbase + sizeof(struct thread_info) + 128 &&
44 time_after(jiffies, warned + 60*HZ)) {
45 printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
46 current->comm, curbase, regs->rsp);
47 show_stack(NULL,NULL);
48 warned = jiffies;
49 }
50}
51#endif
52
29/* 53/*
30 * Generic, controller-independent functions: 54 * Generic, controller-independent functions:
31 */ 55 */
@@ -39,7 +63,7 @@ int show_interrupts(struct seq_file *p, void *v)
39 if (i == 0) { 63 if (i == 0) {
40 seq_printf(p, " "); 64 seq_printf(p, " ");
41 for_each_online_cpu(j) 65 for_each_online_cpu(j)
42 seq_printf(p, "CPU%d ",j); 66 seq_printf(p, "CPU%-8d",j);
43 seq_putc(p, '\n'); 67 seq_putc(p, '\n');
44 } 68 }
45 69
@@ -91,12 +115,14 @@ skip:
91 */ 115 */
92asmlinkage unsigned int do_IRQ(struct pt_regs *regs) 116asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
93{ 117{
94 /* high bits used in ret_from_ code */ 118 /* high bit used in ret_from_ code */
95 unsigned irq = regs->orig_rax & 0xff; 119 unsigned irq = ~regs->orig_rax;
96 120
97 exit_idle(); 121 exit_idle();
98 irq_enter(); 122 irq_enter();
99 123#ifdef CONFIG_DEBUG_STACKOVERFLOW
124 stack_overflow_check(regs);
125#endif
100 __do_IRQ(irq, regs); 126 __do_IRQ(irq, regs);
101 irq_exit(); 127 irq_exit();
102 128
diff --git a/arch/x86_64/kernel/k8.c b/arch/x86_64/kernel/k8.c
new file mode 100644
index 0000000000..6416682d33
--- /dev/null
+++ b/arch/x86_64/kernel/k8.c
@@ -0,0 +1,118 @@
1/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
5#include <linux/gfp.h>
6#include <linux/types.h>
7#include <linux/init.h>
8#include <linux/errno.h>
9#include <linux/module.h>
10#include <linux/spinlock.h>
11#include <asm/k8.h>
12
13int num_k8_northbridges;
14EXPORT_SYMBOL(num_k8_northbridges);
15
16static u32 *flush_words;
17
18struct pci_device_id k8_nb_ids[] = {
19 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
21 {}
22};
23EXPORT_SYMBOL(k8_nb_ids);
24
25struct pci_dev **k8_northbridges;
26EXPORT_SYMBOL(k8_northbridges);
27
28static struct pci_dev *next_k8_northbridge(struct pci_dev *dev)
29{
30 do {
31 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
32 if (!dev)
33 break;
34 } while (!pci_match_id(&k8_nb_ids[0], dev));
35 return dev;
36}
37
38int cache_k8_northbridges(void)
39{
40 int i;
41 struct pci_dev *dev;
42 if (num_k8_northbridges)
43 return 0;
44
45 num_k8_northbridges = 0;
46 dev = NULL;
47 while ((dev = next_k8_northbridge(dev)) != NULL)
48 num_k8_northbridges++;
49
50 k8_northbridges = kmalloc((num_k8_northbridges + 1) * sizeof(void *),
51 GFP_KERNEL);
52 if (!k8_northbridges)
53 return -ENOMEM;
54
55 flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL);
56 if (!flush_words) {
57 kfree(k8_northbridges);
58 return -ENOMEM;
59 }
60
61 dev = NULL;
62 i = 0;
63 while ((dev = next_k8_northbridge(dev)) != NULL) {
64 k8_northbridges[i++] = dev;
65 pci_read_config_dword(dev, 0x9c, &flush_words[i]);
66 }
67 k8_northbridges[i] = NULL;
68 return 0;
69}
70EXPORT_SYMBOL_GPL(cache_k8_northbridges);
71
72/* Ignores subdevice/subvendor but as far as I can figure out
73 they're useless anyways */
74int __init early_is_k8_nb(u32 device)
75{
76 struct pci_device_id *id;
77 u32 vendor = device & 0xffff;
78 device >>= 16;
79 for (id = k8_nb_ids; id->vendor; id++)
80 if (vendor == id->vendor && device == id->device)
81 return 1;
82 return 0;
83}
84
85void k8_flush_garts(void)
86{
87 int flushed, i;
88 unsigned long flags;
89 static DEFINE_SPINLOCK(gart_lock);
90
91 /* Avoid races between AGP and IOMMU. In theory it's not needed
92 but I'm not sure if the hardware won't lose flush requests
93 when another is pending. This whole thing is so expensive anyways
94 that it doesn't matter to serialize more. -AK */
95 spin_lock_irqsave(&gart_lock, flags);
96 flushed = 0;
97 for (i = 0; i < num_k8_northbridges; i++) {
98 pci_write_config_dword(k8_northbridges[i], 0x9c,
99 flush_words[i]|1);
100 flushed++;
101 }
102 for (i = 0; i < num_k8_northbridges; i++) {
103 u32 w;
104 /* Make sure the hardware actually executed the flush*/
105 for (;;) {
106 pci_read_config_dword(k8_northbridges[i],
107 0x9c, &w);
108 if (!(w & 1))
109 break;
110 cpu_relax();
111 }
112 }
113 spin_unlock_irqrestore(&gart_lock, flags);
114 if (!flushed)
115 printk("nothing to flush?\n");
116}
117EXPORT_SYMBOL_GPL(k8_flush_garts);
118
diff --git a/arch/x86_64/kernel/machine_kexec.c b/arch/x86_64/kernel/machine_kexec.c
index 25ac8a3faa..83fb24a028 100644
--- a/arch/x86_64/kernel/machine_kexec.c
+++ b/arch/x86_64/kernel/machine_kexec.c
@@ -149,8 +149,8 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page,
149 unsigned long start_address, 149 unsigned long start_address,
150 unsigned long pgtable) ATTRIB_NORET; 150 unsigned long pgtable) ATTRIB_NORET;
151 151
152const extern unsigned char relocate_new_kernel[]; 152extern const unsigned char relocate_new_kernel[];
153const extern unsigned long relocate_new_kernel_size; 153extern const unsigned long relocate_new_kernel_size;
154 154
155int machine_kexec_prepare(struct kimage *image) 155int machine_kexec_prepare(struct kimage *image)
156{ 156{
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index c69fc43cee..88845674c6 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -562,7 +562,7 @@ static struct sysdev_class mce_sysclass = {
562 set_kset_name("machinecheck"), 562 set_kset_name("machinecheck"),
563}; 563};
564 564
565static DEFINE_PER_CPU(struct sys_device, device_mce); 565DEFINE_PER_CPU(struct sys_device, device_mce);
566 566
567/* Why are there no generic functions for this? */ 567/* Why are there no generic functions for this? */
568#define ACCESSOR(name, var, start) \ 568#define ACCESSOR(name, var, start) \
@@ -629,7 +629,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
629#endif 629#endif
630 630
631/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 631/* Get notified when a cpu comes on/off. Be hotplug friendly. */
632static int 632static __cpuinit int
633mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 633mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
634{ 634{
635 unsigned int cpu = (unsigned long)hcpu; 635 unsigned int cpu = (unsigned long)hcpu;
@@ -647,7 +647,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
647 return NOTIFY_OK; 647 return NOTIFY_OK;
648} 648}
649 649
650static struct notifier_block mce_cpu_notifier = { 650static struct notifier_block __cpuinitdata mce_cpu_notifier = {
651 .notifier_call = mce_cpu_callback, 651 .notifier_call = mce_cpu_callback,
652}; 652};
653 653
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index d13b241ad0..335200aa27 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * (c) 2005 Advanced Micro Devices, Inc. 2 * (c) 2005, 2006 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the 3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or 4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html 5 * http://www.gnu.org/licenses/gpl.html
@@ -8,9 +8,10 @@
8 * 8 *
9 * Support : jacob.shin@amd.com 9 * Support : jacob.shin@amd.com
10 * 10 *
11 * MC4_MISC0 DRAM ECC Error Threshold available under AMD K8 Rev F. 11 * April 2006
12 * MC4_MISC0 exists per physical processor. 12 * - added support for AMD Family 0x10 processors
13 * 13 *
14 * All MC4_MISCi registers are shared between multi-cores
14 */ 15 */
15 16
16#include <linux/cpu.h> 17#include <linux/cpu.h>
@@ -29,32 +30,45 @@
29#include <asm/percpu.h> 30#include <asm/percpu.h>
30#include <asm/idle.h> 31#include <asm/idle.h>
31 32
32#define PFX "mce_threshold: " 33#define PFX "mce_threshold: "
33#define VERSION "version 1.00.9" 34#define VERSION "version 1.1.1"
34#define NR_BANKS 5 35#define NR_BANKS 6
35#define THRESHOLD_MAX 0xFFF 36#define NR_BLOCKS 9
36#define INT_TYPE_APIC 0x00020000 37#define THRESHOLD_MAX 0xFFF
37#define MASK_VALID_HI 0x80000000 38#define INT_TYPE_APIC 0x00020000
38#define MASK_LVTOFF_HI 0x00F00000 39#define MASK_VALID_HI 0x80000000
39#define MASK_COUNT_EN_HI 0x00080000 40#define MASK_LVTOFF_HI 0x00F00000
40#define MASK_INT_TYPE_HI 0x00060000 41#define MASK_COUNT_EN_HI 0x00080000
41#define MASK_OVERFLOW_HI 0x00010000 42#define MASK_INT_TYPE_HI 0x00060000
43#define MASK_OVERFLOW_HI 0x00010000
42#define MASK_ERR_COUNT_HI 0x00000FFF 44#define MASK_ERR_COUNT_HI 0x00000FFF
43#define MASK_OVERFLOW 0x0001000000000000L 45#define MASK_BLKPTR_LO 0xFF000000
46#define MCG_XBLK_ADDR 0xC0000400
44 47
45struct threshold_bank { 48struct threshold_block {
49 unsigned int block;
50 unsigned int bank;
46 unsigned int cpu; 51 unsigned int cpu;
47 u8 bank; 52 u32 address;
48 u8 interrupt_enable; 53 u16 interrupt_enable;
49 u16 threshold_limit; 54 u16 threshold_limit;
50 struct kobject kobj; 55 struct kobject kobj;
56 struct list_head miscj;
51}; 57};
52 58
53static struct threshold_bank threshold_defaults = { 59/* defaults used early on boot */
60static struct threshold_block threshold_defaults = {
54 .interrupt_enable = 0, 61 .interrupt_enable = 0,
55 .threshold_limit = THRESHOLD_MAX, 62 .threshold_limit = THRESHOLD_MAX,
56}; 63};
57 64
65struct threshold_bank {
66 struct kobject kobj;
67 struct threshold_block *blocks;
68 cpumask_t cpus;
69};
70static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
71
58#ifdef CONFIG_SMP 72#ifdef CONFIG_SMP
59static unsigned char shared_bank[NR_BANKS] = { 73static unsigned char shared_bank[NR_BANKS] = {
60 0, 0, 0, 0, 1 74 0, 0, 0, 0, 1
@@ -68,12 +82,12 @@ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
68 */ 82 */
69 83
70/* must be called with correct cpu affinity */ 84/* must be called with correct cpu affinity */
71static void threshold_restart_bank(struct threshold_bank *b, 85static void threshold_restart_bank(struct threshold_block *b,
72 int reset, u16 old_limit) 86 int reset, u16 old_limit)
73{ 87{
74 u32 mci_misc_hi, mci_misc_lo; 88 u32 mci_misc_hi, mci_misc_lo;
75 89
76 rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi); 90 rdmsr(b->address, mci_misc_lo, mci_misc_hi);
77 91
78 if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) 92 if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
79 reset = 1; /* limit cannot be lower than err count */ 93 reset = 1; /* limit cannot be lower than err count */
@@ -94,35 +108,57 @@ static void threshold_restart_bank(struct threshold_bank *b,
94 (mci_misc_hi &= ~MASK_INT_TYPE_HI); 108 (mci_misc_hi &= ~MASK_INT_TYPE_HI);
95 109
96 mci_misc_hi |= MASK_COUNT_EN_HI; 110 mci_misc_hi |= MASK_COUNT_EN_HI;
97 wrmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi); 111 wrmsr(b->address, mci_misc_lo, mci_misc_hi);
98} 112}
99 113
114/* cpu init entry point, called from mce.c with preempt off */
100void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) 115void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
101{ 116{
102 int bank; 117 unsigned int bank, block;
103 u32 mci_misc_lo, mci_misc_hi;
104 unsigned int cpu = smp_processor_id(); 118 unsigned int cpu = smp_processor_id();
119 u32 low = 0, high = 0, address = 0;
105 120
106 for (bank = 0; bank < NR_BANKS; ++bank) { 121 for (bank = 0; bank < NR_BANKS; ++bank) {
107 rdmsr(MSR_IA32_MC0_MISC + bank * 4, mci_misc_lo, mci_misc_hi); 122 for (block = 0; block < NR_BLOCKS; ++block) {
123 if (block == 0)
124 address = MSR_IA32_MC0_MISC + bank * 4;
125 else if (block == 1)
126 address = MCG_XBLK_ADDR
127 + ((low & MASK_BLKPTR_LO) >> 21);
128 else
129 ++address;
130
131 if (rdmsr_safe(address, &low, &high))
132 continue;
108 133
109 /* !valid, !counter present, bios locked */ 134 if (!(high & MASK_VALID_HI)) {
110 if (!(mci_misc_hi & MASK_VALID_HI) || 135 if (block)
111 !(mci_misc_hi & MASK_VALID_HI >> 1) || 136 continue;
112 (mci_misc_hi & MASK_VALID_HI >> 2)) 137 else
113 continue; 138 break;
139 }
114 140
115 per_cpu(bank_map, cpu) |= (1 << bank); 141 if (!(high & MASK_VALID_HI >> 1) ||
142 (high & MASK_VALID_HI >> 2))
143 continue;
116 144
145 if (!block)
146 per_cpu(bank_map, cpu) |= (1 << bank);
117#ifdef CONFIG_SMP 147#ifdef CONFIG_SMP
118 if (shared_bank[bank] && cpu_core_id[cpu]) 148 if (shared_bank[bank] && c->cpu_core_id)
119 continue; 149 break;
120#endif 150#endif
151 high &= ~MASK_LVTOFF_HI;
152 high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20;
153 wrmsr(address, low, high);
121 154
122 setup_threshold_lvt((mci_misc_hi & MASK_LVTOFF_HI) >> 20); 155 setup_APIC_extened_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD,
123 threshold_defaults.cpu = cpu; 156 THRESHOLD_APIC_VECTOR,
124 threshold_defaults.bank = bank; 157 K8_APIC_EXT_INT_MSG_FIX, 0);
125 threshold_restart_bank(&threshold_defaults, 0, 0); 158
159 threshold_defaults.address = address;
160 threshold_restart_bank(&threshold_defaults, 0, 0);
161 }
126 } 162 }
127} 163}
128 164
@@ -137,8 +173,9 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
137 */ 173 */
138asmlinkage void mce_threshold_interrupt(void) 174asmlinkage void mce_threshold_interrupt(void)
139{ 175{
140 int bank; 176 unsigned int bank, block;
141 struct mce m; 177 struct mce m;
178 u32 low = 0, high = 0, address = 0;
142 179
143 ack_APIC_irq(); 180 ack_APIC_irq();
144 exit_idle(); 181 exit_idle();
@@ -150,15 +187,42 @@ asmlinkage void mce_threshold_interrupt(void)
150 187
151 /* assume first bank caused it */ 188 /* assume first bank caused it */
152 for (bank = 0; bank < NR_BANKS; ++bank) { 189 for (bank = 0; bank < NR_BANKS; ++bank) {
153 m.bank = MCE_THRESHOLD_BASE + bank; 190 for (block = 0; block < NR_BLOCKS; ++block) {
154 rdmsrl(MSR_IA32_MC0_MISC + bank * 4, m.misc); 191 if (block == 0)
192 address = MSR_IA32_MC0_MISC + bank * 4;
193 else if (block == 1)
194 address = MCG_XBLK_ADDR
195 + ((low & MASK_BLKPTR_LO) >> 21);
196 else
197 ++address;
198
199 if (rdmsr_safe(address, &low, &high))
200 continue;
155 201
156 if (m.misc & MASK_OVERFLOW) { 202 if (!(high & MASK_VALID_HI)) {
157 mce_log(&m); 203 if (block)
158 goto out; 204 continue;
205 else
206 break;
207 }
208
209 if (!(high & MASK_VALID_HI >> 1) ||
210 (high & MASK_VALID_HI >> 2))
211 continue;
212
213 if (high & MASK_OVERFLOW_HI) {
214 rdmsrl(address, m.misc);
215 rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
216 m.status);
217 m.bank = K8_MCE_THRESHOLD_BASE
218 + bank * NR_BLOCKS
219 + block;
220 mce_log(&m);
221 goto out;
222 }
159 } 223 }
160 } 224 }
161 out: 225out:
162 irq_exit(); 226 irq_exit();
163} 227}
164 228
@@ -166,20 +230,12 @@ asmlinkage void mce_threshold_interrupt(void)
166 * Sysfs Interface 230 * Sysfs Interface
167 */ 231 */
168 232
169static struct sysdev_class threshold_sysclass = {
170 set_kset_name("threshold"),
171};
172
173static DEFINE_PER_CPU(struct sys_device, device_threshold);
174
175struct threshold_attr { 233struct threshold_attr {
176 struct attribute attr; 234 struct attribute attr;
177 ssize_t(*show) (struct threshold_bank *, char *); 235 ssize_t(*show) (struct threshold_block *, char *);
178 ssize_t(*store) (struct threshold_bank *, const char *, size_t count); 236 ssize_t(*store) (struct threshold_block *, const char *, size_t count);
179}; 237};
180 238
181static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
182
183static cpumask_t affinity_set(unsigned int cpu) 239static cpumask_t affinity_set(unsigned int cpu)
184{ 240{
185 cpumask_t oldmask = current->cpus_allowed; 241 cpumask_t oldmask = current->cpus_allowed;
@@ -194,15 +250,15 @@ static void affinity_restore(cpumask_t oldmask)
194 set_cpus_allowed(current, oldmask); 250 set_cpus_allowed(current, oldmask);
195} 251}
196 252
197#define SHOW_FIELDS(name) \ 253#define SHOW_FIELDS(name) \
198 static ssize_t show_ ## name(struct threshold_bank * b, char *buf) \ 254static ssize_t show_ ## name(struct threshold_block * b, char *buf) \
199 { \ 255{ \
200 return sprintf(buf, "%lx\n", (unsigned long) b->name); \ 256 return sprintf(buf, "%lx\n", (unsigned long) b->name); \
201 } 257}
202SHOW_FIELDS(interrupt_enable) 258SHOW_FIELDS(interrupt_enable)
203SHOW_FIELDS(threshold_limit) 259SHOW_FIELDS(threshold_limit)
204 260
205static ssize_t store_interrupt_enable(struct threshold_bank *b, 261static ssize_t store_interrupt_enable(struct threshold_block *b,
206 const char *buf, size_t count) 262 const char *buf, size_t count)
207{ 263{
208 char *end; 264 char *end;
@@ -219,7 +275,7 @@ static ssize_t store_interrupt_enable(struct threshold_bank *b,
219 return end - buf; 275 return end - buf;
220} 276}
221 277
222static ssize_t store_threshold_limit(struct threshold_bank *b, 278static ssize_t store_threshold_limit(struct threshold_block *b,
223 const char *buf, size_t count) 279 const char *buf, size_t count)
224{ 280{
225 char *end; 281 char *end;
@@ -242,18 +298,18 @@ static ssize_t store_threshold_limit(struct threshold_bank *b,
242 return end - buf; 298 return end - buf;
243} 299}
244 300
245static ssize_t show_error_count(struct threshold_bank *b, char *buf) 301static ssize_t show_error_count(struct threshold_block *b, char *buf)
246{ 302{
247 u32 high, low; 303 u32 high, low;
248 cpumask_t oldmask; 304 cpumask_t oldmask;
249 oldmask = affinity_set(b->cpu); 305 oldmask = affinity_set(b->cpu);
250 rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, low, high); /* ignore low 32 */ 306 rdmsr(b->address, low, high);
251 affinity_restore(oldmask); 307 affinity_restore(oldmask);
252 return sprintf(buf, "%x\n", 308 return sprintf(buf, "%x\n",
253 (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit)); 309 (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
254} 310}
255 311
256static ssize_t store_error_count(struct threshold_bank *b, 312static ssize_t store_error_count(struct threshold_block *b,
257 const char *buf, size_t count) 313 const char *buf, size_t count)
258{ 314{
259 cpumask_t oldmask; 315 cpumask_t oldmask;
@@ -269,13 +325,13 @@ static ssize_t store_error_count(struct threshold_bank *b,
269 .store = _store, \ 325 .store = _store, \
270}; 326};
271 327
272#define ATTR_FIELDS(name) \ 328#define RW_ATTR(name) \
273 static struct threshold_attr name = \ 329static struct threshold_attr name = \
274 THRESHOLD_ATTR(name, 0644, show_## name, store_## name) 330 THRESHOLD_ATTR(name, 0644, show_## name, store_## name)
275 331
276ATTR_FIELDS(interrupt_enable); 332RW_ATTR(interrupt_enable);
277ATTR_FIELDS(threshold_limit); 333RW_ATTR(threshold_limit);
278ATTR_FIELDS(error_count); 334RW_ATTR(error_count);
279 335
280static struct attribute *default_attrs[] = { 336static struct attribute *default_attrs[] = {
281 &interrupt_enable.attr, 337 &interrupt_enable.attr,
@@ -284,12 +340,12 @@ static struct attribute *default_attrs[] = {
284 NULL 340 NULL
285}; 341};
286 342
287#define to_bank(k) container_of(k,struct threshold_bank,kobj) 343#define to_block(k) container_of(k, struct threshold_block, kobj)
288#define to_attr(a) container_of(a,struct threshold_attr,attr) 344#define to_attr(a) container_of(a, struct threshold_attr, attr)
289 345
290static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 346static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
291{ 347{
292 struct threshold_bank *b = to_bank(kobj); 348 struct threshold_block *b = to_block(kobj);
293 struct threshold_attr *a = to_attr(attr); 349 struct threshold_attr *a = to_attr(attr);
294 ssize_t ret; 350 ssize_t ret;
295 ret = a->show ? a->show(b, buf) : -EIO; 351 ret = a->show ? a->show(b, buf) : -EIO;
@@ -299,7 +355,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
299static ssize_t store(struct kobject *kobj, struct attribute *attr, 355static ssize_t store(struct kobject *kobj, struct attribute *attr,
300 const char *buf, size_t count) 356 const char *buf, size_t count)
301{ 357{
302 struct threshold_bank *b = to_bank(kobj); 358 struct threshold_block *b = to_block(kobj);
303 struct threshold_attr *a = to_attr(attr); 359 struct threshold_attr *a = to_attr(attr);
304 ssize_t ret; 360 ssize_t ret;
305 ret = a->store ? a->store(b, buf, count) : -EIO; 361 ret = a->store ? a->store(b, buf, count) : -EIO;
@@ -316,69 +372,174 @@ static struct kobj_type threshold_ktype = {
316 .default_attrs = default_attrs, 372 .default_attrs = default_attrs,
317}; 373};
318 374
375static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
376 unsigned int bank,
377 unsigned int block,
378 u32 address)
379{
380 int err;
381 u32 low, high;
382 struct threshold_block *b = NULL;
383
384 if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
385 return 0;
386
387 if (rdmsr_safe(address, &low, &high))
388 goto recurse;
389
390 if (!(high & MASK_VALID_HI)) {
391 if (block)
392 goto recurse;
393 else
394 return 0;
395 }
396
397 if (!(high & MASK_VALID_HI >> 1) ||
398 (high & MASK_VALID_HI >> 2))
399 goto recurse;
400
401 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
402 if (!b)
403 return -ENOMEM;
404 memset(b, 0, sizeof(struct threshold_block));
405
406 b->block = block;
407 b->bank = bank;
408 b->cpu = cpu;
409 b->address = address;
410 b->interrupt_enable = 0;
411 b->threshold_limit = THRESHOLD_MAX;
412
413 INIT_LIST_HEAD(&b->miscj);
414
415 if (per_cpu(threshold_banks, cpu)[bank]->blocks)
416 list_add(&b->miscj,
417 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
418 else
419 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
420
421 kobject_set_name(&b->kobj, "misc%i", block);
422 b->kobj.parent = &per_cpu(threshold_banks, cpu)[bank]->kobj;
423 b->kobj.ktype = &threshold_ktype;
424 err = kobject_register(&b->kobj);
425 if (err)
426 goto out_free;
427recurse:
428 if (!block) {
429 address = (low & MASK_BLKPTR_LO) >> 21;
430 if (!address)
431 return 0;
432 address += MCG_XBLK_ADDR;
433 } else
434 ++address;
435
436 err = allocate_threshold_blocks(cpu, bank, ++block, address);
437 if (err)
438 goto out_free;
439
440 return err;
441
442out_free:
443 if (b) {
444 kobject_unregister(&b->kobj);
445 kfree(b);
446 }
447 return err;
448}
449
319/* symlinks sibling shared banks to first core. first core owns dir/files. */ 450/* symlinks sibling shared banks to first core. first core owns dir/files. */
320static __cpuinit int threshold_create_bank(unsigned int cpu, int bank) 451static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
321{ 452{
322 int err = 0; 453 int i, err = 0;
323 struct threshold_bank *b = NULL; 454 struct threshold_bank *b = NULL;
455 cpumask_t oldmask = CPU_MASK_NONE;
456 char name[32];
457
458 sprintf(name, "threshold_bank%i", bank);
324 459
325#ifdef CONFIG_SMP 460#ifdef CONFIG_SMP
326 if (cpu_core_id[cpu] && shared_bank[bank]) { /* symlink */ 461 if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */
327 char name[16]; 462 i = first_cpu(cpu_core_map[cpu]);
328 unsigned lcpu = first_cpu(cpu_core_map[cpu]); 463
329 if (cpu_core_id[lcpu]) 464 /* first core not up yet */
330 goto out; /* first core not up yet */ 465 if (cpu_data[i].cpu_core_id)
466 goto out;
467
468 /* already linked */
469 if (per_cpu(threshold_banks, cpu)[bank])
470 goto out;
471
472 b = per_cpu(threshold_banks, i)[bank];
331 473
332 b = per_cpu(threshold_banks, lcpu)[bank];
333 if (!b) 474 if (!b)
334 goto out; 475 goto out;
335 sprintf(name, "bank%i", bank); 476
336 err = sysfs_create_link(&per_cpu(device_threshold, cpu).kobj, 477 err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj,
337 &b->kobj, name); 478 &b->kobj, name);
338 if (err) 479 if (err)
339 goto out; 480 goto out;
481
482 b->cpus = cpu_core_map[cpu];
340 per_cpu(threshold_banks, cpu)[bank] = b; 483 per_cpu(threshold_banks, cpu)[bank] = b;
341 goto out; 484 goto out;
342 } 485 }
343#endif 486#endif
344 487
345 b = kmalloc(sizeof(struct threshold_bank), GFP_KERNEL); 488 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
346 if (!b) { 489 if (!b) {
347 err = -ENOMEM; 490 err = -ENOMEM;
348 goto out; 491 goto out;
349 } 492 }
350 memset(b, 0, sizeof(struct threshold_bank)); 493 memset(b, 0, sizeof(struct threshold_bank));
351 494
352 b->cpu = cpu; 495 kobject_set_name(&b->kobj, "threshold_bank%i", bank);
353 b->bank = bank; 496 b->kobj.parent = &per_cpu(device_mce, cpu).kobj;
354 b->interrupt_enable = 0; 497#ifndef CONFIG_SMP
355 b->threshold_limit = THRESHOLD_MAX; 498 b->cpus = CPU_MASK_ALL;
356 kobject_set_name(&b->kobj, "bank%i", bank); 499#else
357 b->kobj.parent = &per_cpu(device_threshold, cpu).kobj; 500 b->cpus = cpu_core_map[cpu];
358 b->kobj.ktype = &threshold_ktype; 501#endif
359
360 err = kobject_register(&b->kobj); 502 err = kobject_register(&b->kobj);
361 if (err) { 503 if (err)
362 kfree(b); 504 goto out_free;
363 goto out; 505
364 }
365 per_cpu(threshold_banks, cpu)[bank] = b; 506 per_cpu(threshold_banks, cpu)[bank] = b;
366 out: 507
508 oldmask = affinity_set(cpu);
509 err = allocate_threshold_blocks(cpu, bank, 0,
510 MSR_IA32_MC0_MISC + bank * 4);
511 affinity_restore(oldmask);
512
513 if (err)
514 goto out_free;
515
516 for_each_cpu_mask(i, b->cpus) {
517 if (i == cpu)
518 continue;
519
520 err = sysfs_create_link(&per_cpu(device_mce, i).kobj,
521 &b->kobj, name);
522 if (err)
523 goto out;
524
525 per_cpu(threshold_banks, i)[bank] = b;
526 }
527
528 goto out;
529
530out_free:
531 per_cpu(threshold_banks, cpu)[bank] = NULL;
532 kfree(b);
533out:
367 return err; 534 return err;
368} 535}
369 536
370/* create dir/files for all valid threshold banks */ 537/* create dir/files for all valid threshold banks */
371static __cpuinit int threshold_create_device(unsigned int cpu) 538static __cpuinit int threshold_create_device(unsigned int cpu)
372{ 539{
373 int bank; 540 unsigned int bank;
374 int err = 0; 541 int err = 0;
375 542
376 per_cpu(device_threshold, cpu).id = cpu;
377 per_cpu(device_threshold, cpu).cls = &threshold_sysclass;
378 err = sysdev_register(&per_cpu(device_threshold, cpu));
379 if (err)
380 goto out;
381
382 for (bank = 0; bank < NR_BANKS; ++bank) { 543 for (bank = 0; bank < NR_BANKS; ++bank) {
383 if (!(per_cpu(bank_map, cpu) & 1 << bank)) 544 if (!(per_cpu(bank_map, cpu) & 1 << bank))
384 continue; 545 continue;
@@ -386,7 +547,7 @@ static __cpuinit int threshold_create_device(unsigned int cpu)
386 if (err) 547 if (err)
387 goto out; 548 goto out;
388 } 549 }
389 out: 550out:
390 return err; 551 return err;
391} 552}
392 553
@@ -397,92 +558,85 @@ static __cpuinit int threshold_create_device(unsigned int cpu)
397 * of shared sysfs dir/files, and rest of the cores will be symlinked to it. 558 * of shared sysfs dir/files, and rest of the cores will be symlinked to it.
398 */ 559 */
399 560
400/* cpu hotplug call removes all symlinks before first core dies */ 561static __cpuinit void deallocate_threshold_block(unsigned int cpu,
562 unsigned int bank)
563{
564 struct threshold_block *pos = NULL;
565 struct threshold_block *tmp = NULL;
566 struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
567
568 if (!head)
569 return;
570
571 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
572 kobject_unregister(&pos->kobj);
573 list_del(&pos->miscj);
574 kfree(pos);
575 }
576
577 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
578 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
579}
580
401static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank) 581static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank)
402{ 582{
583 int i = 0;
403 struct threshold_bank *b; 584 struct threshold_bank *b;
404 char name[16]; 585 char name[32];
405 586
406 b = per_cpu(threshold_banks, cpu)[bank]; 587 b = per_cpu(threshold_banks, cpu)[bank];
588
407 if (!b) 589 if (!b)
408 return; 590 return;
409 if (shared_bank[bank] && atomic_read(&b->kobj.kref.refcount) > 2) { 591
410 sprintf(name, "bank%i", bank); 592 if (!b->blocks)
411 sysfs_remove_link(&per_cpu(device_threshold, cpu).kobj, name); 593 goto free_out;
412 per_cpu(threshold_banks, cpu)[bank] = NULL; 594
413 } else { 595 sprintf(name, "threshold_bank%i", bank);
414 kobject_unregister(&b->kobj); 596
415 kfree(per_cpu(threshold_banks, cpu)[bank]); 597 /* sibling symlink */
598 if (shared_bank[bank] && b->blocks->cpu != cpu) {
599 sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
600 per_cpu(threshold_banks, i)[bank] = NULL;
601 return;
602 }
603
604 /* remove all sibling symlinks before unregistering */
605 for_each_cpu_mask(i, b->cpus) {
606 if (i == cpu)
607 continue;
608
609 sysfs_remove_link(&per_cpu(device_mce, i).kobj, name);
610 per_cpu(threshold_banks, i)[bank] = NULL;
416 } 611 }
612
613 deallocate_threshold_block(cpu, bank);
614
615free_out:
616 kobject_unregister(&b->kobj);
617 kfree(b);
618 per_cpu(threshold_banks, cpu)[bank] = NULL;
417} 619}
418 620
419static __cpuinit void threshold_remove_device(unsigned int cpu) 621static __cpuinit void threshold_remove_device(unsigned int cpu)
420{ 622{
421 int bank; 623 unsigned int bank;
422 624
423 for (bank = 0; bank < NR_BANKS; ++bank) { 625 for (bank = 0; bank < NR_BANKS; ++bank) {
424 if (!(per_cpu(bank_map, cpu) & 1 << bank)) 626 if (!(per_cpu(bank_map, cpu) & 1 << bank))
425 continue; 627 continue;
426 threshold_remove_bank(cpu, bank); 628 threshold_remove_bank(cpu, bank);
427 } 629 }
428 sysdev_unregister(&per_cpu(device_threshold, cpu));
429} 630}
430 631
431/* link all existing siblings when first core comes up */
432static __cpuinit int threshold_create_symlinks(unsigned int cpu)
433{
434 int bank, err = 0;
435 unsigned int lcpu = 0;
436
437 if (cpu_core_id[cpu])
438 return 0;
439 for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
440 if (lcpu == cpu)
441 continue;
442 for (bank = 0; bank < NR_BANKS; ++bank) {
443 if (!(per_cpu(bank_map, cpu) & 1 << bank))
444 continue;
445 if (!shared_bank[bank])
446 continue;
447 err = threshold_create_bank(lcpu, bank);
448 }
449 }
450 return err;
451}
452
453/* remove all symlinks before first core dies. */
454static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
455{
456 int bank;
457 unsigned int lcpu = 0;
458 if (cpu_core_id[cpu])
459 return;
460 for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
461 if (lcpu == cpu)
462 continue;
463 for (bank = 0; bank < NR_BANKS; ++bank) {
464 if (!(per_cpu(bank_map, cpu) & 1 << bank))
465 continue;
466 if (!shared_bank[bank])
467 continue;
468 threshold_remove_bank(lcpu, bank);
469 }
470 }
471}
472#else /* !CONFIG_HOTPLUG_CPU */ 632#else /* !CONFIG_HOTPLUG_CPU */
473static __cpuinit void threshold_create_symlinks(unsigned int cpu)
474{
475}
476static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
477{
478}
479static void threshold_remove_device(unsigned int cpu) 633static void threshold_remove_device(unsigned int cpu)
480{ 634{
481} 635}
482#endif 636#endif
483 637
484/* get notified when a cpu comes on/off */ 638/* get notified when a cpu comes on/off */
485static int threshold_cpu_callback(struct notifier_block *nfb, 639static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb,
486 unsigned long action, void *hcpu) 640 unsigned long action, void *hcpu)
487{ 641{
488 /* cpu was unsigned int to begin with */ 642 /* cpu was unsigned int to begin with */
@@ -494,13 +648,6 @@ static int threshold_cpu_callback(struct notifier_block *nfb,
494 switch (action) { 648 switch (action) {
495 case CPU_ONLINE: 649 case CPU_ONLINE:
496 threshold_create_device(cpu); 650 threshold_create_device(cpu);
497 threshold_create_symlinks(cpu);
498 break;
499 case CPU_DOWN_PREPARE:
500 threshold_remove_symlinks(cpu);
501 break;
502 case CPU_DOWN_FAILED:
503 threshold_create_symlinks(cpu);
504 break; 651 break;
505 case CPU_DEAD: 652 case CPU_DEAD:
506 threshold_remove_device(cpu); 653 threshold_remove_device(cpu);
@@ -512,29 +659,22 @@ static int threshold_cpu_callback(struct notifier_block *nfb,
512 return NOTIFY_OK; 659 return NOTIFY_OK;
513} 660}
514 661
515static struct notifier_block threshold_cpu_notifier = { 662static struct notifier_block threshold_cpu_notifier __cpuinitdata = {
516 .notifier_call = threshold_cpu_callback, 663 .notifier_call = threshold_cpu_callback,
517}; 664};
518 665
519static __init int threshold_init_device(void) 666static __init int threshold_init_device(void)
520{ 667{
521 int err; 668 unsigned lcpu = 0;
522 int lcpu = 0;
523
524 err = sysdev_class_register(&threshold_sysclass);
525 if (err)
526 goto out;
527 669
528 /* to hit CPUs online before the notifier is up */ 670 /* to hit CPUs online before the notifier is up */
529 for_each_online_cpu(lcpu) { 671 for_each_online_cpu(lcpu) {
530 err = threshold_create_device(lcpu); 672 int err = threshold_create_device(lcpu);
531 if (err) 673 if (err)
532 goto out; 674 return err;
533 } 675 }
534 register_cpu_notifier(&threshold_cpu_notifier); 676 register_cpu_notifier(&threshold_cpu_notifier);
535 677 return 0;
536 out:
537 return err;
538} 678}
539 679
540device_initcall(threshold_init_device); 680device_initcall(threshold_init_device);
diff --git a/arch/x86_64/kernel/module.c b/arch/x86_64/kernel/module.c
index bac195c74b..9d0958ff54 100644
--- a/arch/x86_64/kernel/module.c
+++ b/arch/x86_64/kernel/module.c
@@ -145,26 +145,38 @@ int apply_relocate(Elf_Shdr *sechdrs,
145 return -ENOSYS; 145 return -ENOSYS;
146} 146}
147 147
148extern void apply_alternatives(void *start, void *end);
149
150int module_finalize(const Elf_Ehdr *hdr, 148int module_finalize(const Elf_Ehdr *hdr,
151 const Elf_Shdr *sechdrs, 149 const Elf_Shdr *sechdrs,
152 struct module *me) 150 struct module *me)
153{ 151{
154 const Elf_Shdr *s; 152 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL;
155 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 153 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
156 154
157 /* look for .altinstructions to patch */ 155 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
158 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 156 if (!strcmp(".text", secstrings + s->sh_name))
159 void *seg; 157 text = s;
160 if (strcmp(".altinstructions", secstrings + s->sh_name)) 158 if (!strcmp(".altinstructions", secstrings + s->sh_name))
161 continue; 159 alt = s;
162 seg = (void *)s->sh_addr; 160 if (!strcmp(".smp_locks", secstrings + s->sh_name))
163 apply_alternatives(seg, seg + s->sh_size); 161 locks= s;
164 } 162 }
163
164 if (alt) {
165 /* patch .altinstructions */
166 void *aseg = (void *)alt->sh_addr;
167 apply_alternatives(aseg, aseg + alt->sh_size);
168 }
169 if (locks && text) {
170 void *lseg = (void *)locks->sh_addr;
171 void *tseg = (void *)text->sh_addr;
172 alternatives_smp_module_add(me, me->name,
173 lseg, lseg + locks->sh_size,
174 tseg, tseg + text->sh_size);
175 }
165 return 0; 176 return 0;
166} 177}
167 178
168void module_arch_cleanup(struct module *mod) 179void module_arch_cleanup(struct module *mod)
169{ 180{
181 alternatives_smp_module_del(mod);
170} 182}
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 4e6357fe0e..399489c931 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -15,11 +15,7 @@
15#include <linux/config.h> 15#include <linux/config.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/bootmem.h>
19#include <linux/smp_lock.h>
20#include <linux/interrupt.h> 18#include <linux/interrupt.h>
21#include <linux/mc146818rtc.h>
22#include <linux/kernel_stat.h>
23#include <linux/module.h> 19#include <linux/module.h>
24#include <linux/sysdev.h> 20#include <linux/sysdev.h>
25#include <linux/nmi.h> 21#include <linux/nmi.h>
@@ -27,14 +23,11 @@
27#include <linux/kprobes.h> 23#include <linux/kprobes.h>
28 24
29#include <asm/smp.h> 25#include <asm/smp.h>
30#include <asm/mtrr.h>
31#include <asm/mpspec.h>
32#include <asm/nmi.h> 26#include <asm/nmi.h>
33#include <asm/msr.h>
34#include <asm/proto.h> 27#include <asm/proto.h>
35#include <asm/kdebug.h> 28#include <asm/kdebug.h>
36#include <asm/local.h>
37#include <asm/mce.h> 29#include <asm/mce.h>
30#include <asm/intel_arch_perfmon.h>
38 31
39/* 32/*
40 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: 33 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
@@ -74,6 +67,9 @@ static unsigned int nmi_p4_cccr_val;
74#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 67#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
75#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 68#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
76 69
70#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
71#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
72
77#define MSR_P4_MISC_ENABLE 0x1A0 73#define MSR_P4_MISC_ENABLE 0x1A0
78#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) 74#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
79#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12) 75#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12)
@@ -105,7 +101,10 @@ static __cpuinit inline int nmi_known_cpu(void)
105 case X86_VENDOR_AMD: 101 case X86_VENDOR_AMD:
106 return boot_cpu_data.x86 == 15; 102 return boot_cpu_data.x86 == 15;
107 case X86_VENDOR_INTEL: 103 case X86_VENDOR_INTEL:
108 return boot_cpu_data.x86 == 15; 104 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
105 return 1;
106 else
107 return (boot_cpu_data.x86 == 15);
109 } 108 }
110 return 0; 109 return 0;
111} 110}
@@ -211,6 +210,8 @@ int __init setup_nmi_watchdog(char *str)
211 210
212__setup("nmi_watchdog=", setup_nmi_watchdog); 211__setup("nmi_watchdog=", setup_nmi_watchdog);
213 212
213static void disable_intel_arch_watchdog(void);
214
214static void disable_lapic_nmi_watchdog(void) 215static void disable_lapic_nmi_watchdog(void)
215{ 216{
216 if (nmi_active <= 0) 217 if (nmi_active <= 0)
@@ -223,6 +224,8 @@ static void disable_lapic_nmi_watchdog(void)
223 if (boot_cpu_data.x86 == 15) { 224 if (boot_cpu_data.x86 == 15) {
224 wrmsr(MSR_P4_IQ_CCCR0, 0, 0); 225 wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
225 wrmsr(MSR_P4_CRU_ESCR0, 0, 0); 226 wrmsr(MSR_P4_CRU_ESCR0, 0, 0);
227 } else if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
228 disable_intel_arch_watchdog();
226 } 229 }
227 break; 230 break;
228 } 231 }
@@ -375,6 +378,53 @@ static void setup_k7_watchdog(void)
375 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); 378 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
376} 379}
377 380
381static void disable_intel_arch_watchdog(void)
382{
383 unsigned ebx;
384
385 /*
386 * Check whether the Architectural PerfMon supports
387 * Unhalted Core Cycles Event or not.
388 * NOTE: Corresponding bit = 0 in ebp indicates event present.
389 */
390 ebx = cpuid_ebx(10);
391 if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
392 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
393}
394
395static int setup_intel_arch_watchdog(void)
396{
397 unsigned int evntsel;
398 unsigned ebx;
399
400 /*
401 * Check whether the Architectural PerfMon supports
402 * Unhalted Core Cycles Event or not.
403 * NOTE: Corresponding bit = 0 in ebp indicates event present.
404 */
405 ebx = cpuid_ebx(10);
406 if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
407 return 0;
408
409 nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
410
411 clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
412 clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
413
414 evntsel = ARCH_PERFMON_EVENTSEL_INT
415 | ARCH_PERFMON_EVENTSEL_OS
416 | ARCH_PERFMON_EVENTSEL_USR
417 | ARCH_PERFMON_NMI_EVENT_SEL
418 | ARCH_PERFMON_NMI_EVENT_UMASK;
419
420 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
421 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz));
422 apic_write(APIC_LVTPC, APIC_DM_NMI);
423 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
424 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
425 return 1;
426}
427
378 428
379static int setup_p4_watchdog(void) 429static int setup_p4_watchdog(void)
380{ 430{
@@ -428,10 +478,16 @@ void setup_apic_nmi_watchdog(void)
428 setup_k7_watchdog(); 478 setup_k7_watchdog();
429 break; 479 break;
430 case X86_VENDOR_INTEL: 480 case X86_VENDOR_INTEL:
431 if (boot_cpu_data.x86 != 15) 481 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
432 return; 482 if (!setup_intel_arch_watchdog())
433 if (!setup_p4_watchdog()) 483 return;
484 } else if (boot_cpu_data.x86 == 15) {
485 if (!setup_p4_watchdog())
486 return;
487 } else {
434 return; 488 return;
489 }
490
435 break; 491 break;
436 492
437 default: 493 default:
@@ -516,7 +572,14 @@ void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
516 */ 572 */
517 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); 573 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
518 apic_write(APIC_LVTPC, APIC_DM_NMI); 574 apic_write(APIC_LVTPC, APIC_DM_NMI);
519 } 575 } else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
576 /*
577 * For Intel based architectural perfmon
578 * - LVTPC is masked on interrupt and must be
579 * unmasked by the LVTPC handler.
580 */
581 apic_write(APIC_LVTPC, APIC_DM_NMI);
582 }
520 wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); 583 wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
521 } 584 }
522} 585}
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
new file mode 100644
index 0000000000..d91cb843f5
--- /dev/null
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -0,0 +1,1018 @@
1/*
2 * Derived from arch/powerpc/kernel/iommu.c
3 *
4 * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
5 * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/config.h>
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/types.h>
26#include <linux/slab.h>
27#include <linux/mm.h>
28#include <linux/spinlock.h>
29#include <linux/string.h>
30#include <linux/dma-mapping.h>
31#include <linux/init.h>
32#include <linux/bitops.h>
33#include <linux/pci_ids.h>
34#include <linux/pci.h>
35#include <linux/delay.h>
36#include <asm/proto.h>
37#include <asm/calgary.h>
38#include <asm/tce.h>
39#include <asm/pci-direct.h>
40#include <asm/system.h>
41#include <asm/dma.h>
42
43#define PCI_DEVICE_ID_IBM_CALGARY 0x02a1
44#define PCI_VENDOR_DEVICE_ID_CALGARY \
45 (PCI_VENDOR_ID_IBM | PCI_DEVICE_ID_IBM_CALGARY << 16)
46
47/* we need these for register space address calculation */
48#define START_ADDRESS 0xfe000000
49#define CHASSIS_BASE 0
50#define ONE_BASED_CHASSIS_NUM 1
51
52/* register offsets inside the host bridge space */
53#define PHB_CSR_OFFSET 0x0110
54#define PHB_PLSSR_OFFSET 0x0120
55#define PHB_CONFIG_RW_OFFSET 0x0160
56#define PHB_IOBASE_BAR_LOW 0x0170
57#define PHB_IOBASE_BAR_HIGH 0x0180
58#define PHB_MEM_1_LOW 0x0190
59#define PHB_MEM_1_HIGH 0x01A0
60#define PHB_IO_ADDR_SIZE 0x01B0
61#define PHB_MEM_1_SIZE 0x01C0
62#define PHB_MEM_ST_OFFSET 0x01D0
63#define PHB_AER_OFFSET 0x0200
64#define PHB_CONFIG_0_HIGH 0x0220
65#define PHB_CONFIG_0_LOW 0x0230
66#define PHB_CONFIG_0_END 0x0240
67#define PHB_MEM_2_LOW 0x02B0
68#define PHB_MEM_2_HIGH 0x02C0
69#define PHB_MEM_2_SIZE_HIGH 0x02D0
70#define PHB_MEM_2_SIZE_LOW 0x02E0
71#define PHB_DOSHOLE_OFFSET 0x08E0
72
73/* PHB_CONFIG_RW */
74#define PHB_TCE_ENABLE 0x20000000
75#define PHB_SLOT_DISABLE 0x1C000000
76#define PHB_DAC_DISABLE 0x01000000
77#define PHB_MEM2_ENABLE 0x00400000
78#define PHB_MCSR_ENABLE 0x00100000
79/* TAR (Table Address Register) */
80#define TAR_SW_BITS 0x0000ffffffff800fUL
81#define TAR_VALID 0x0000000000000008UL
82/* CSR (Channel/DMA Status Register) */
83#define CSR_AGENT_MASK 0xffe0ffff
84
85#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
86#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * 2) /* max dev->bus->number */
87#define PHBS_PER_CALGARY 4
88
89/* register offsets in Calgary's internal register space */
90static const unsigned long tar_offsets[] = {
91 0x0580 /* TAR0 */,
92 0x0588 /* TAR1 */,
93 0x0590 /* TAR2 */,
94 0x0598 /* TAR3 */
95};
96
97static const unsigned long split_queue_offsets[] = {
98 0x4870 /* SPLIT QUEUE 0 */,
99 0x5870 /* SPLIT QUEUE 1 */,
100 0x6870 /* SPLIT QUEUE 2 */,
101 0x7870 /* SPLIT QUEUE 3 */
102};
103
104static const unsigned long phb_offsets[] = {
105 0x8000 /* PHB0 */,
106 0x9000 /* PHB1 */,
107 0xA000 /* PHB2 */,
108 0xB000 /* PHB3 */
109};
110
111void* tce_table_kva[MAX_NUM_OF_PHBS * MAX_NUMNODES];
112unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
113static int translate_empty_slots __read_mostly = 0;
114static int calgary_detected __read_mostly = 0;
115
116/*
117 * the bitmap of PHBs the user requested that we disable
118 * translation on.
119 */
120static DECLARE_BITMAP(translation_disabled, MAX_NUMNODES * MAX_PHB_BUS_NUM);
121
122static void tce_cache_blast(struct iommu_table *tbl);
123
124/* enable this to stress test the chip's TCE cache */
125#ifdef CONFIG_IOMMU_DEBUG
126static inline void tce_cache_blast_stress(struct iommu_table *tbl)
127{
128 tce_cache_blast(tbl);
129}
130#else
131static inline void tce_cache_blast_stress(struct iommu_table *tbl)
132{
133}
134#endif /* BLAST_TCE_CACHE_ON_UNMAP */
135
136static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen)
137{
138 unsigned int npages;
139
140 npages = PAGE_ALIGN(dma + dmalen) - (dma & PAGE_MASK);
141 npages >>= PAGE_SHIFT;
142
143 return npages;
144}
145
146static inline int translate_phb(struct pci_dev* dev)
147{
148 int disabled = test_bit(dev->bus->number, translation_disabled);
149 return !disabled;
150}
151
152static void iommu_range_reserve(struct iommu_table *tbl,
153 unsigned long start_addr, unsigned int npages)
154{
155 unsigned long index;
156 unsigned long end;
157
158 index = start_addr >> PAGE_SHIFT;
159
160 /* bail out if we're asked to reserve a region we don't cover */
161 if (index >= tbl->it_size)
162 return;
163
164 end = index + npages;
165 if (end > tbl->it_size) /* don't go off the table */
166 end = tbl->it_size;
167
168 while (index < end) {
169 if (test_bit(index, tbl->it_map))
170 printk(KERN_ERR "Calgary: entry already allocated at "
171 "0x%lx tbl %p dma 0x%lx npages %u\n",
172 index, tbl, start_addr, npages);
173 ++index;
174 }
175 set_bit_string(tbl->it_map, start_addr >> PAGE_SHIFT, npages);
176}
177
178static unsigned long iommu_range_alloc(struct iommu_table *tbl,
179 unsigned int npages)
180{
181 unsigned long offset;
182
183 BUG_ON(npages == 0);
184
185 offset = find_next_zero_string(tbl->it_map, tbl->it_hint,
186 tbl->it_size, npages);
187 if (offset == ~0UL) {
188 tce_cache_blast(tbl);
189 offset = find_next_zero_string(tbl->it_map, 0,
190 tbl->it_size, npages);
191 if (offset == ~0UL) {
192 printk(KERN_WARNING "Calgary: IOMMU full.\n");
193 if (panic_on_overflow)
194 panic("Calgary: fix the allocator.\n");
195 else
196 return bad_dma_address;
197 }
198 }
199
200 set_bit_string(tbl->it_map, offset, npages);
201 tbl->it_hint = offset + npages;
202 BUG_ON(tbl->it_hint > tbl->it_size);
203
204 return offset;
205}
206
207static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr,
208 unsigned int npages, int direction)
209{
210 unsigned long entry, flags;
211 dma_addr_t ret = bad_dma_address;
212
213 spin_lock_irqsave(&tbl->it_lock, flags);
214
215 entry = iommu_range_alloc(tbl, npages);
216
217 if (unlikely(entry == bad_dma_address))
218 goto error;
219
220 /* set the return dma address */
221 ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK);
222
223 /* put the TCEs in the HW table */
224 tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
225 direction);
226
227 spin_unlock_irqrestore(&tbl->it_lock, flags);
228
229 return ret;
230
231error:
232 spin_unlock_irqrestore(&tbl->it_lock, flags);
233 printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
234 "iommu %p\n", npages, tbl);
235 return bad_dma_address;
236}
237
238static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
239 unsigned int npages)
240{
241 unsigned long entry;
242 unsigned long i;
243
244 entry = dma_addr >> PAGE_SHIFT;
245
246 BUG_ON(entry + npages > tbl->it_size);
247
248 tce_free(tbl, entry, npages);
249
250 for (i = 0; i < npages; ++i) {
251 if (!test_bit(entry + i, tbl->it_map))
252 printk(KERN_ERR "Calgary: bit is off at 0x%lx "
253 "tbl %p dma 0x%Lx entry 0x%lx npages %u\n",
254 entry + i, tbl, dma_addr, entry, npages);
255 }
256
257 __clear_bit_string(tbl->it_map, entry, npages);
258
259 tce_cache_blast_stress(tbl);
260}
261
262static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
263 unsigned int npages)
264{
265 unsigned long flags;
266
267 spin_lock_irqsave(&tbl->it_lock, flags);
268
269 __iommu_free(tbl, dma_addr, npages);
270
271 spin_unlock_irqrestore(&tbl->it_lock, flags);
272}
273
274static void __calgary_unmap_sg(struct iommu_table *tbl,
275 struct scatterlist *sglist, int nelems, int direction)
276{
277 while (nelems--) {
278 unsigned int npages;
279 dma_addr_t dma = sglist->dma_address;
280 unsigned int dmalen = sglist->dma_length;
281
282 if (dmalen == 0)
283 break;
284
285 npages = num_dma_pages(dma, dmalen);
286 __iommu_free(tbl, dma, npages);
287 sglist++;
288 }
289}
290
291void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
292 int nelems, int direction)
293{
294 unsigned long flags;
295 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
296
297 if (!translate_phb(to_pci_dev(dev)))
298 return;
299
300 spin_lock_irqsave(&tbl->it_lock, flags);
301
302 __calgary_unmap_sg(tbl, sglist, nelems, direction);
303
304 spin_unlock_irqrestore(&tbl->it_lock, flags);
305}
306
307static int calgary_nontranslate_map_sg(struct device* dev,
308 struct scatterlist *sg, int nelems, int direction)
309{
310 int i;
311
312 for (i = 0; i < nelems; i++ ) {
313 struct scatterlist *s = &sg[i];
314 BUG_ON(!s->page);
315 s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
316 s->dma_length = s->length;
317 }
318 return nelems;
319}
320
321int calgary_map_sg(struct device *dev, struct scatterlist *sg,
322 int nelems, int direction)
323{
324 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
325 unsigned long flags;
326 unsigned long vaddr;
327 unsigned int npages;
328 unsigned long entry;
329 int i;
330
331 if (!translate_phb(to_pci_dev(dev)))
332 return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
333
334 spin_lock_irqsave(&tbl->it_lock, flags);
335
336 for (i = 0; i < nelems; i++ ) {
337 struct scatterlist *s = &sg[i];
338 BUG_ON(!s->page);
339
340 vaddr = (unsigned long)page_address(s->page) + s->offset;
341 npages = num_dma_pages(vaddr, s->length);
342
343 entry = iommu_range_alloc(tbl, npages);
344 if (entry == bad_dma_address) {
345 /* makes sure unmap knows to stop */
346 s->dma_length = 0;
347 goto error;
348 }
349
350 s->dma_address = (entry << PAGE_SHIFT) | s->offset;
351
352 /* insert into HW table */
353 tce_build(tbl, entry, npages, vaddr & PAGE_MASK,
354 direction);
355
356 s->dma_length = s->length;
357 }
358
359 spin_unlock_irqrestore(&tbl->it_lock, flags);
360
361 return nelems;
362error:
363 __calgary_unmap_sg(tbl, sg, nelems, direction);
364 for (i = 0; i < nelems; i++) {
365 sg[i].dma_address = bad_dma_address;
366 sg[i].dma_length = 0;
367 }
368 spin_unlock_irqrestore(&tbl->it_lock, flags);
369 return 0;
370}
371
372dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
373 size_t size, int direction)
374{
375 dma_addr_t dma_handle = bad_dma_address;
376 unsigned long uaddr;
377 unsigned int npages;
378 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
379
380 uaddr = (unsigned long)vaddr;
381 npages = num_dma_pages(uaddr, size);
382
383 if (translate_phb(to_pci_dev(dev)))
384 dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
385 else
386 dma_handle = virt_to_bus(vaddr);
387
388 return dma_handle;
389}
390
391void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
392 size_t size, int direction)
393{
394 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
395 unsigned int npages;
396
397 if (!translate_phb(to_pci_dev(dev)))
398 return;
399
400 npages = num_dma_pages(dma_handle, size);
401 iommu_free(tbl, dma_handle, npages);
402}
403
404void* calgary_alloc_coherent(struct device *dev, size_t size,
405 dma_addr_t *dma_handle, gfp_t flag)
406{
407 void *ret = NULL;
408 dma_addr_t mapping;
409 unsigned int npages, order;
410 struct iommu_table *tbl;
411
412 tbl = to_pci_dev(dev)->bus->self->sysdata;
413
414 size = PAGE_ALIGN(size); /* size rounded up to full pages */
415 npages = size >> PAGE_SHIFT;
416 order = get_order(size);
417
418 /* alloc enough pages (and possibly more) */
419 ret = (void *)__get_free_pages(flag, order);
420 if (!ret)
421 goto error;
422 memset(ret, 0, size);
423
424 if (translate_phb(to_pci_dev(dev))) {
425 /* set up tces to cover the allocated range */
426 mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
427 if (mapping == bad_dma_address)
428 goto free;
429
430 *dma_handle = mapping;
431 } else /* non translated slot */
432 *dma_handle = virt_to_bus(ret);
433
434 return ret;
435
436free:
437 free_pages((unsigned long)ret, get_order(size));
438 ret = NULL;
439error:
440 return ret;
441}
442
443static struct dma_mapping_ops calgary_dma_ops = {
444 .alloc_coherent = calgary_alloc_coherent,
445 .map_single = calgary_map_single,
446 .unmap_single = calgary_unmap_single,
447 .map_sg = calgary_map_sg,
448 .unmap_sg = calgary_unmap_sg,
449};
450
451static inline int busno_to_phbid(unsigned char num)
452{
453 return bus_to_phb(num) % PHBS_PER_CALGARY;
454}
455
456static inline unsigned long split_queue_offset(unsigned char num)
457{
458 size_t idx = busno_to_phbid(num);
459
460 return split_queue_offsets[idx];
461}
462
463static inline unsigned long tar_offset(unsigned char num)
464{
465 size_t idx = busno_to_phbid(num);
466
467 return tar_offsets[idx];
468}
469
470static inline unsigned long phb_offset(unsigned char num)
471{
472 size_t idx = busno_to_phbid(num);
473
474 return phb_offsets[idx];
475}
476
477static inline void __iomem* calgary_reg(void __iomem *bar, unsigned long offset)
478{
479 unsigned long target = ((unsigned long)bar) | offset;
480 return (void __iomem*)target;
481}
482
483static void tce_cache_blast(struct iommu_table *tbl)
484{
485 u64 val;
486 u32 aer;
487 int i = 0;
488 void __iomem *bbar = tbl->bbar;
489 void __iomem *target;
490
491 /* disable arbitration on the bus */
492 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
493 aer = readl(target);
494 writel(0, target);
495
496 /* read plssr to ensure it got there */
497 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET);
498 val = readl(target);
499
500 /* poll split queues until all DMA activity is done */
501 target = calgary_reg(bbar, split_queue_offset(tbl->it_busno));
502 do {
503 val = readq(target);
504 i++;
505 } while ((val & 0xff) != 0xff && i < 100);
506 if (i == 100)
507 printk(KERN_WARNING "Calgary: PCI bus not quiesced, "
508 "continuing anyway\n");
509
510 /* invalidate TCE cache */
511 target = calgary_reg(bbar, tar_offset(tbl->it_busno));
512 writeq(tbl->tar_val, target);
513
514 /* enable arbitration */
515 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
516 writel(aer, target);
517 (void)readl(target); /* flush */
518}
519
520static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start,
521 u64 limit)
522{
523 unsigned int numpages;
524
525 limit = limit | 0xfffff;
526 limit++;
527
528 numpages = ((limit - start) >> PAGE_SHIFT);
529 iommu_range_reserve(dev->sysdata, start, numpages);
530}
531
532static void __init calgary_reserve_peripheral_mem_1(struct pci_dev *dev)
533{
534 void __iomem *target;
535 u64 low, high, sizelow;
536 u64 start, limit;
537 struct iommu_table *tbl = dev->sysdata;
538 unsigned char busnum = dev->bus->number;
539 void __iomem *bbar = tbl->bbar;
540
541 /* peripheral MEM_1 region */
542 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_LOW);
543 low = be32_to_cpu(readl(target));
544 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_HIGH);
545 high = be32_to_cpu(readl(target));
546 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_SIZE);
547 sizelow = be32_to_cpu(readl(target));
548
549 start = (high << 32) | low;
550 limit = sizelow;
551
552 calgary_reserve_mem_region(dev, start, limit);
553}
554
555static void __init calgary_reserve_peripheral_mem_2(struct pci_dev *dev)
556{
557 void __iomem *target;
558 u32 val32;
559 u64 low, high, sizelow, sizehigh;
560 u64 start, limit;
561 struct iommu_table *tbl = dev->sysdata;
562 unsigned char busnum = dev->bus->number;
563 void __iomem *bbar = tbl->bbar;
564
565 /* is it enabled? */
566 target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
567 val32 = be32_to_cpu(readl(target));
568 if (!(val32 & PHB_MEM2_ENABLE))
569 return;
570
571 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_LOW);
572 low = be32_to_cpu(readl(target));
573 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_HIGH);
574 high = be32_to_cpu(readl(target));
575 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_LOW);
576 sizelow = be32_to_cpu(readl(target));
577 target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_HIGH);
578 sizehigh = be32_to_cpu(readl(target));
579
580 start = (high << 32) | low;
581 limit = (sizehigh << 32) | sizelow;
582
583 calgary_reserve_mem_region(dev, start, limit);
584}
585
586/*
587 * some regions of the IO address space do not get translated, so we
588 * must not give devices IO addresses in those regions. The regions
589 * are the 640KB-1MB region and the two PCI peripheral memory holes.
590 * Reserve all of them in the IOMMU bitmap to avoid giving them out
591 * later.
592 */
593static void __init calgary_reserve_regions(struct pci_dev *dev)
594{
595 unsigned int npages;
596 void __iomem *bbar;
597 unsigned char busnum;
598 u64 start;
599 struct iommu_table *tbl = dev->sysdata;
600
601 bbar = tbl->bbar;
602 busnum = dev->bus->number;
603
604 /* reserve bad_dma_address in case it's a legal address */
605 iommu_range_reserve(tbl, bad_dma_address, 1);
606
607 /* avoid the BIOS/VGA first 640KB-1MB region */
608 start = (640 * 1024);
609 npages = ((1024 - 640) * 1024) >> PAGE_SHIFT;
610 iommu_range_reserve(tbl, start, npages);
611
612 /* reserve the two PCI peripheral memory regions in IO space */
613 calgary_reserve_peripheral_mem_1(dev);
614 calgary_reserve_peripheral_mem_2(dev);
615}
616
617static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
618{
619 u64 val64;
620 u64 table_phys;
621 void __iomem *target;
622 int ret;
623 struct iommu_table *tbl;
624
625 /* build TCE tables for each PHB */
626 ret = build_tce_table(dev, bbar);
627 if (ret)
628 return ret;
629
630 calgary_reserve_regions(dev);
631
632 /* set TARs for each PHB */
633 target = calgary_reg(bbar, tar_offset(dev->bus->number));
634 val64 = be64_to_cpu(readq(target));
635
636 /* zero out all TAR bits under sw control */
637 val64 &= ~TAR_SW_BITS;
638
639 tbl = dev->sysdata;
640 table_phys = (u64)__pa(tbl->it_base);
641 val64 |= table_phys;
642
643 BUG_ON(specified_table_size > TCE_TABLE_SIZE_8M);
644 val64 |= (u64) specified_table_size;
645
646 tbl->tar_val = cpu_to_be64(val64);
647 writeq(tbl->tar_val, target);
648 readq(target); /* flush */
649
650 return 0;
651}
652
653static void __init calgary_free_tar(struct pci_dev *dev)
654{
655 u64 val64;
656 struct iommu_table *tbl = dev->sysdata;
657 void __iomem *target;
658
659 target = calgary_reg(tbl->bbar, tar_offset(dev->bus->number));
660 val64 = be64_to_cpu(readq(target));
661 val64 &= ~TAR_SW_BITS;
662 writeq(cpu_to_be64(val64), target);
663 readq(target); /* flush */
664
665 kfree(tbl);
666 dev->sysdata = NULL;
667}
668
669static void calgary_watchdog(unsigned long data)
670{
671 struct pci_dev *dev = (struct pci_dev *)data;
672 struct iommu_table *tbl = dev->sysdata;
673 void __iomem *bbar = tbl->bbar;
674 u32 val32;
675 void __iomem *target;
676
677 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET);
678 val32 = be32_to_cpu(readl(target));
679
680 /* If no error, the agent ID in the CSR is not valid */
681 if (val32 & CSR_AGENT_MASK) {
682 printk(KERN_EMERG "calgary_watchdog: DMA error on bus %d, "
683 "CSR = %#x\n", dev->bus->number, val32);
684 writel(0, target);
685
686 /* Disable bus that caused the error */
687 target = calgary_reg(bbar, phb_offset(tbl->it_busno) |
688 PHB_CONFIG_RW_OFFSET);
689 val32 = be32_to_cpu(readl(target));
690 val32 |= PHB_SLOT_DISABLE;
691 writel(cpu_to_be32(val32), target);
692 readl(target); /* flush */
693 } else {
694 /* Reset the timer */
695 mod_timer(&tbl->watchdog_timer, jiffies + 2 * HZ);
696 }
697}
698
699static void __init calgary_enable_translation(struct pci_dev *dev)
700{
701 u32 val32;
702 unsigned char busnum;
703 void __iomem *target;
704 void __iomem *bbar;
705 struct iommu_table *tbl;
706
707 busnum = dev->bus->number;
708 tbl = dev->sysdata;
709 bbar = tbl->bbar;
710
711 /* enable TCE in PHB Config Register */
712 target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
713 val32 = be32_to_cpu(readl(target));
714 val32 |= PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE;
715
716 printk(KERN_INFO "Calgary: enabling translation on PHB %d\n", busnum);
717 printk(KERN_INFO "Calgary: errant DMAs will now be prevented on this "
718 "bus.\n");
719
720 writel(cpu_to_be32(val32), target);
721 readl(target); /* flush */
722
723 init_timer(&tbl->watchdog_timer);
724 tbl->watchdog_timer.function = &calgary_watchdog;
725 tbl->watchdog_timer.data = (unsigned long)dev;
726 mod_timer(&tbl->watchdog_timer, jiffies);
727}
728
729static void __init calgary_disable_translation(struct pci_dev *dev)
730{
731 u32 val32;
732 unsigned char busnum;
733 void __iomem *target;
734 void __iomem *bbar;
735 struct iommu_table *tbl;
736
737 busnum = dev->bus->number;
738 tbl = dev->sysdata;
739 bbar = tbl->bbar;
740
741 /* disable TCE in PHB Config Register */
742 target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
743 val32 = be32_to_cpu(readl(target));
744 val32 &= ~(PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE);
745
746 printk(KERN_INFO "Calgary: disabling translation on PHB %d!\n", busnum);
747 writel(cpu_to_be32(val32), target);
748 readl(target); /* flush */
749
750 del_timer_sync(&tbl->watchdog_timer);
751}
752
753static inline unsigned int __init locate_register_space(struct pci_dev *dev)
754{
755 int rionodeid;
756 u32 address;
757
758 rionodeid = (dev->bus->number % 15 > 4) ? 3 : 2;
759 /*
760 * register space address calculation as follows:
761 * FE0MB-8MB*OneBasedChassisNumber+1MB*(RioNodeId-ChassisBase)
762 * ChassisBase is always zero for x366/x260/x460
763 * RioNodeId is 2 for first Calgary, 3 for second Calgary
764 */
765 address = START_ADDRESS -
766 (0x800000 * (ONE_BASED_CHASSIS_NUM + dev->bus->number / 15)) +
767 (0x100000) * (rionodeid - CHASSIS_BASE);
768 return address;
769}
770
771static int __init calgary_init_one_nontraslated(struct pci_dev *dev)
772{
773 dev->sysdata = NULL;
774 dev->bus->self = dev;
775
776 return 0;
777}
778
779static int __init calgary_init_one(struct pci_dev *dev)
780{
781 u32 address;
782 void __iomem *bbar;
783 int ret;
784
785 address = locate_register_space(dev);
786 /* map entire 1MB of Calgary config space */
787 bbar = ioremap_nocache(address, 1024 * 1024);
788 if (!bbar) {
789 ret = -ENODATA;
790 goto done;
791 }
792
793 ret = calgary_setup_tar(dev, bbar);
794 if (ret)
795 goto iounmap;
796
797 dev->bus->self = dev;
798 calgary_enable_translation(dev);
799
800 return 0;
801
802iounmap:
803 iounmap(bbar);
804done:
805 return ret;
806}
807
808static int __init calgary_init(void)
809{
810 int i, ret = -ENODEV;
811 struct pci_dev *dev = NULL;
812
813 for (i = 0; i <= num_online_nodes() * MAX_NUM_OF_PHBS; i++) {
814 dev = pci_get_device(PCI_VENDOR_ID_IBM,
815 PCI_DEVICE_ID_IBM_CALGARY,
816 dev);
817 if (!dev)
818 break;
819 if (!translate_phb(dev)) {
820 calgary_init_one_nontraslated(dev);
821 continue;
822 }
823 if (!tce_table_kva[i] && !translate_empty_slots) {
824 pci_dev_put(dev);
825 continue;
826 }
827 ret = calgary_init_one(dev);
828 if (ret)
829 goto error;
830 }
831
832 return ret;
833
834error:
835 for (i--; i >= 0; i--) {
836 dev = pci_find_device_reverse(PCI_VENDOR_ID_IBM,
837 PCI_DEVICE_ID_IBM_CALGARY,
838 dev);
839 if (!translate_phb(dev)) {
840 pci_dev_put(dev);
841 continue;
842 }
843 if (!tce_table_kva[i] && !translate_empty_slots)
844 continue;
845 calgary_disable_translation(dev);
846 calgary_free_tar(dev);
847 pci_dev_put(dev);
848 }
849
850 return ret;
851}
852
853static inline int __init determine_tce_table_size(u64 ram)
854{
855 int ret;
856
857 if (specified_table_size != TCE_TABLE_SIZE_UNSPECIFIED)
858 return specified_table_size;
859
860 /*
861 * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to
862 * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each
863 * larger table size has twice as many entries, so shift the
864 * max ram address by 13 to divide by 8K and then look at the
865 * order of the result to choose between 0-7.
866 */
867 ret = get_order(ram >> 13);
868 if (ret > TCE_TABLE_SIZE_8M)
869 ret = TCE_TABLE_SIZE_8M;
870
871 return ret;
872}
873
874void __init detect_calgary(void)
875{
876 u32 val;
877 int bus, table_idx;
878 void *tbl;
879 int detected = 0;
880
881 /*
882 * if the user specified iommu=off or iommu=soft or we found
883 * another HW IOMMU already, bail out.
884 */
885 if (swiotlb || no_iommu || iommu_detected)
886 return;
887
888 specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);
889
890 for (bus = 0, table_idx = 0;
891 bus <= num_online_nodes() * MAX_PHB_BUS_NUM;
892 bus++) {
893 BUG_ON(bus > MAX_NUMNODES * MAX_PHB_BUS_NUM);
894 if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY)
895 continue;
896 if (test_bit(bus, translation_disabled)) {
897 printk(KERN_INFO "Calgary: translation is disabled for "
898 "PHB 0x%x\n", bus);
899 /* skip this phb, don't allocate a tbl for it */
900 tce_table_kva[table_idx] = NULL;
901 table_idx++;
902 continue;
903 }
904 /*
905 * scan the first slot of the PCI bus to see if there
906 * are any devices present
907 */
908 val = read_pci_config(bus, 1, 0, 0);
909 if (val != 0xffffffff || translate_empty_slots) {
910 tbl = alloc_tce_table();
911 if (!tbl)
912 goto cleanup;
913 detected = 1;
914 } else
915 tbl = NULL;
916
917 tce_table_kva[table_idx] = tbl;
918 table_idx++;
919 }
920
921 if (detected) {
922 iommu_detected = 1;
923 calgary_detected = 1;
924 printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected. "
925 "TCE table spec is %d.\n", specified_table_size);
926 }
927 return;
928
929cleanup:
930 for (--table_idx; table_idx >= 0; --table_idx)
931 if (tce_table_kva[table_idx])
932 free_tce_table(tce_table_kva[table_idx]);
933}
934
935int __init calgary_iommu_init(void)
936{
937 int ret;
938
939 if (no_iommu || swiotlb)
940 return -ENODEV;
941
942 if (!calgary_detected)
943 return -ENODEV;
944
945 /* ok, we're trying to use Calgary - let's roll */
946 printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n");
947
948 ret = calgary_init();
949 if (ret) {
950 printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
951 "falling back to no_iommu\n", ret);
952 if (end_pfn > MAX_DMA32_PFN)
953 printk(KERN_ERR "WARNING more than 4GB of memory, "
954 "32bit PCI may malfunction.\n");
955 return ret;
956 }
957
958 force_iommu = 1;
959 dma_ops = &calgary_dma_ops;
960
961 return 0;
962}
963
964static int __init calgary_parse_options(char *p)
965{
966 unsigned int bridge;
967 size_t len;
968 char* endp;
969
970 while (*p) {
971 if (!strncmp(p, "64k", 3))
972 specified_table_size = TCE_TABLE_SIZE_64K;
973 else if (!strncmp(p, "128k", 4))
974 specified_table_size = TCE_TABLE_SIZE_128K;
975 else if (!strncmp(p, "256k", 4))
976 specified_table_size = TCE_TABLE_SIZE_256K;
977 else if (!strncmp(p, "512k", 4))
978 specified_table_size = TCE_TABLE_SIZE_512K;
979 else if (!strncmp(p, "1M", 2))
980 specified_table_size = TCE_TABLE_SIZE_1M;
981 else if (!strncmp(p, "2M", 2))
982 specified_table_size = TCE_TABLE_SIZE_2M;
983 else if (!strncmp(p, "4M", 2))
984 specified_table_size = TCE_TABLE_SIZE_4M;
985 else if (!strncmp(p, "8M", 2))
986 specified_table_size = TCE_TABLE_SIZE_8M;
987
988 len = strlen("translate_empty_slots");
989 if (!strncmp(p, "translate_empty_slots", len))
990 translate_empty_slots = 1;
991
992 len = strlen("disable");
993 if (!strncmp(p, "disable", len)) {
994 p += len;
995 if (*p == '=')
996 ++p;
997 if (*p == '\0')
998 break;
999 bridge = simple_strtol(p, &endp, 0);
1000 if (p == endp)
1001 break;
1002
1003 if (bridge <= (num_online_nodes() * MAX_PHB_BUS_NUM)) {
1004 printk(KERN_INFO "Calgary: disabling "
1005 "translation for PHB 0x%x\n", bridge);
1006 set_bit(bridge, translation_disabled);
1007 }
1008 }
1009
1010 p = strpbrk(p, ",");
1011 if (!p)
1012 break;
1013
1014 p++; /* skip ',' */
1015 }
1016 return 1;
1017}
1018__setup("calgary=", calgary_parse_options);
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index a9275c9557..9c44f4f243 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -9,6 +9,7 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <asm/io.h> 10#include <asm/io.h>
11#include <asm/proto.h> 11#include <asm/proto.h>
12#include <asm/calgary.h>
12 13
13int iommu_merge __read_mostly = 0; 14int iommu_merge __read_mostly = 0;
14EXPORT_SYMBOL(iommu_merge); 15EXPORT_SYMBOL(iommu_merge);
@@ -33,12 +34,15 @@ int panic_on_overflow __read_mostly = 0;
33int force_iommu __read_mostly= 0; 34int force_iommu __read_mostly= 0;
34#endif 35#endif
35 36
37/* Set this to 1 if there is a HW IOMMU in the system */
38int iommu_detected __read_mostly = 0;
39
36/* Dummy device used for NULL arguments (normally ISA). Better would 40/* Dummy device used for NULL arguments (normally ISA). Better would
37 be probably a smaller DMA mask, but this is bug-to-bug compatible 41 be probably a smaller DMA mask, but this is bug-to-bug compatible
38 to i386. */ 42 to i386. */
39struct device fallback_dev = { 43struct device fallback_dev = {
40 .bus_id = "fallback device", 44 .bus_id = "fallback device",
41 .coherent_dma_mask = 0xffffffff, 45 .coherent_dma_mask = DMA_32BIT_MASK,
42 .dma_mask = &fallback_dev.coherent_dma_mask, 46 .dma_mask = &fallback_dev.coherent_dma_mask,
43}; 47};
44 48
@@ -77,7 +81,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
77 dev = &fallback_dev; 81 dev = &fallback_dev;
78 dma_mask = dev->coherent_dma_mask; 82 dma_mask = dev->coherent_dma_mask;
79 if (dma_mask == 0) 83 if (dma_mask == 0)
80 dma_mask = 0xffffffff; 84 dma_mask = DMA_32BIT_MASK;
81 85
82 /* Don't invoke OOM killer */ 86 /* Don't invoke OOM killer */
83 gfp |= __GFP_NORETRY; 87 gfp |= __GFP_NORETRY;
@@ -90,7 +94,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
90 larger than 16MB and in this case we have a chance of 94 larger than 16MB and in this case we have a chance of
91 finding fitting memory in the next higher zone first. If 95 finding fitting memory in the next higher zone first. If
92 not retry with true GFP_DMA. -AK */ 96 not retry with true GFP_DMA. -AK */
93 if (dma_mask <= 0xffffffff) 97 if (dma_mask <= DMA_32BIT_MASK)
94 gfp |= GFP_DMA32; 98 gfp |= GFP_DMA32;
95 99
96 again: 100 again:
@@ -111,7 +115,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
111 115
112 /* Don't use the 16MB ZONE_DMA unless absolutely 116 /* Don't use the 16MB ZONE_DMA unless absolutely
113 needed. It's better to use remapping first. */ 117 needed. It's better to use remapping first. */
114 if (dma_mask < 0xffffffff && !(gfp & GFP_DMA)) { 118 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
115 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 119 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
116 goto again; 120 goto again;
117 } 121 }
@@ -174,7 +178,7 @@ int dma_supported(struct device *dev, u64 mask)
174 /* Copied from i386. Doesn't make much sense, because it will 178 /* Copied from i386. Doesn't make much sense, because it will
175 only work for pci_alloc_coherent. 179 only work for pci_alloc_coherent.
176 The caller just has to use GFP_DMA in this case. */ 180 The caller just has to use GFP_DMA in this case. */
177 if (mask < 0x00ffffff) 181 if (mask < DMA_24BIT_MASK)
178 return 0; 182 return 0;
179 183
180 /* Tell the device to use SAC when IOMMU force is on. This 184 /* Tell the device to use SAC when IOMMU force is on. This
@@ -189,7 +193,7 @@ int dma_supported(struct device *dev, u64 mask)
189 SAC for these. Assume all masks <= 40 bits are of this 193 SAC for these. Assume all masks <= 40 bits are of this
190 type. Normally this doesn't make any difference, but gives 194 type. Normally this doesn't make any difference, but gives
191 more gentle handling of IOMMU overflow. */ 195 more gentle handling of IOMMU overflow. */
192 if (iommu_sac_force && (mask >= 0xffffffffffULL)) { 196 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
193 printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask); 197 printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
194 return 0; 198 return 0;
195 } 199 }
@@ -266,7 +270,7 @@ __init int iommu_setup(char *p)
266 swiotlb = 1; 270 swiotlb = 1;
267#endif 271#endif
268 272
269#ifdef CONFIG_GART_IOMMU 273#ifdef CONFIG_IOMMU
270 gart_parse_options(p); 274 gart_parse_options(p);
271#endif 275#endif
272 276
@@ -276,3 +280,40 @@ __init int iommu_setup(char *p)
276 } 280 }
277 return 1; 281 return 1;
278} 282}
283__setup("iommu=", iommu_setup);
284
285void __init pci_iommu_alloc(void)
286{
287 /*
288 * The order of these functions is important for
289 * fall-back/fail-over reasons
290 */
291#ifdef CONFIG_IOMMU
292 iommu_hole_init();
293#endif
294
295#ifdef CONFIG_CALGARY_IOMMU
296 detect_calgary();
297#endif
298
299#ifdef CONFIG_SWIOTLB
300 pci_swiotlb_init();
301#endif
302}
303
304static int __init pci_iommu_init(void)
305{
306#ifdef CONFIG_CALGARY_IOMMU
307 calgary_iommu_init();
308#endif
309
310#ifdef CONFIG_IOMMU
311 gart_iommu_init();
312#endif
313
314 no_iommu_init();
315 return 0;
316}
317
318/* Must execute after PCI subsystem */
319fs_initcall(pci_iommu_init);
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 82a7c9bfdf..4ca674d16b 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -32,6 +32,7 @@
32#include <asm/kdebug.h> 32#include <asm/kdebug.h>
33#include <asm/swiotlb.h> 33#include <asm/swiotlb.h>
34#include <asm/dma.h> 34#include <asm/dma.h>
35#include <asm/k8.h>
35 36
36unsigned long iommu_bus_base; /* GART remapping area (physical) */ 37unsigned long iommu_bus_base; /* GART remapping area (physical) */
37static unsigned long iommu_size; /* size of remapping area bytes */ 38static unsigned long iommu_size; /* size of remapping area bytes */
@@ -46,8 +47,6 @@ u32 *iommu_gatt_base; /* Remapping table */
46 also seen with Qlogic at least). */ 47 also seen with Qlogic at least). */
47int iommu_fullflush = 1; 48int iommu_fullflush = 1;
48 49
49#define MAX_NB 8
50
51/* Allocation bitmap for the remapping area */ 50/* Allocation bitmap for the remapping area */
52static DEFINE_SPINLOCK(iommu_bitmap_lock); 51static DEFINE_SPINLOCK(iommu_bitmap_lock);
53static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */ 52static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
@@ -63,13 +62,6 @@ static u32 gart_unmapped_entry;
63#define to_pages(addr,size) \ 62#define to_pages(addr,size) \
64 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) 63 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
65 64
66#define for_all_nb(dev) \
67 dev = NULL; \
68 while ((dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)
69
70static struct pci_dev *northbridges[MAX_NB];
71static u32 northbridge_flush_word[MAX_NB];
72
73#define EMERGENCY_PAGES 32 /* = 128KB */ 65#define EMERGENCY_PAGES 32 /* = 128KB */
74 66
75#ifdef CONFIG_AGP 67#ifdef CONFIG_AGP
@@ -93,7 +85,7 @@ static unsigned long alloc_iommu(int size)
93 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size); 85 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
94 if (offset == -1) { 86 if (offset == -1) {
95 need_flush = 1; 87 need_flush = 1;
96 offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size); 88 offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size);
97 } 89 }
98 if (offset != -1) { 90 if (offset != -1) {
99 set_bit_string(iommu_gart_bitmap, offset, size); 91 set_bit_string(iommu_gart_bitmap, offset, size);
@@ -120,44 +112,17 @@ static void free_iommu(unsigned long offset, int size)
120/* 112/*
121 * Use global flush state to avoid races with multiple flushers. 113 * Use global flush state to avoid races with multiple flushers.
122 */ 114 */
123static void flush_gart(struct device *dev) 115static void flush_gart(void)
124{ 116{
125 unsigned long flags; 117 unsigned long flags;
126 int flushed = 0;
127 int i, max;
128
129 spin_lock_irqsave(&iommu_bitmap_lock, flags); 118 spin_lock_irqsave(&iommu_bitmap_lock, flags);
130 if (need_flush) { 119 if (need_flush) {
131 max = 0; 120 k8_flush_garts();
132 for (i = 0; i < MAX_NB; i++) {
133 if (!northbridges[i])
134 continue;
135 pci_write_config_dword(northbridges[i], 0x9c,
136 northbridge_flush_word[i] | 1);
137 flushed++;
138 max = i;
139 }
140 for (i = 0; i <= max; i++) {
141 u32 w;
142 if (!northbridges[i])
143 continue;
144 /* Make sure the hardware actually executed the flush. */
145 for (;;) {
146 pci_read_config_dword(northbridges[i], 0x9c, &w);
147 if (!(w & 1))
148 break;
149 cpu_relax();
150 }
151 }
152 if (!flushed)
153 printk("nothing to flush?\n");
154 need_flush = 0; 121 need_flush = 0;
155 } 122 }
156 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 123 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
157} 124}
158 125
159
160
161#ifdef CONFIG_IOMMU_LEAK 126#ifdef CONFIG_IOMMU_LEAK
162 127
163#define SET_LEAK(x) if (iommu_leak_tab) \ 128#define SET_LEAK(x) if (iommu_leak_tab) \
@@ -266,7 +231,7 @@ static dma_addr_t gart_map_simple(struct device *dev, char *buf,
266 size_t size, int dir) 231 size_t size, int dir)
267{ 232{
268 dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir); 233 dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
269 flush_gart(dev); 234 flush_gart();
270 return map; 235 return map;
271} 236}
272 237
@@ -289,6 +254,28 @@ dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
289} 254}
290 255
291/* 256/*
257 * Free a DMA mapping.
258 */
259void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
260 size_t size, int direction)
261{
262 unsigned long iommu_page;
263 int npages;
264 int i;
265
266 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
267 dma_addr >= iommu_bus_base + iommu_size)
268 return;
269 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
270 npages = to_pages(dma_addr, size);
271 for (i = 0; i < npages; i++) {
272 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
273 CLEAR_LEAK(iommu_page + i);
274 }
275 free_iommu(iommu_page, npages);
276}
277
278/*
292 * Wrapper for pci_unmap_single working with scatterlists. 279 * Wrapper for pci_unmap_single working with scatterlists.
293 */ 280 */
294void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) 281void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
@@ -299,7 +286,7 @@ void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int di
299 struct scatterlist *s = &sg[i]; 286 struct scatterlist *s = &sg[i];
300 if (!s->dma_length || !s->length) 287 if (!s->dma_length || !s->length)
301 break; 288 break;
302 dma_unmap_single(dev, s->dma_address, s->dma_length, dir); 289 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
303 } 290 }
304} 291}
305 292
@@ -329,7 +316,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
329 s->dma_address = addr; 316 s->dma_address = addr;
330 s->dma_length = s->length; 317 s->dma_length = s->length;
331 } 318 }
332 flush_gart(dev); 319 flush_gart();
333 return nents; 320 return nents;
334} 321}
335 322
@@ -436,13 +423,13 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
436 if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0) 423 if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
437 goto error; 424 goto error;
438 out++; 425 out++;
439 flush_gart(dev); 426 flush_gart();
440 if (out < nents) 427 if (out < nents)
441 sg[out].dma_length = 0; 428 sg[out].dma_length = 0;
442 return out; 429 return out;
443 430
444error: 431error:
445 flush_gart(NULL); 432 flush_gart();
446 gart_unmap_sg(dev, sg, nents, dir); 433 gart_unmap_sg(dev, sg, nents, dir);
447 /* When it was forced or merged try again in a dumb way */ 434 /* When it was forced or merged try again in a dumb way */
448 if (force_iommu || iommu_merge) { 435 if (force_iommu || iommu_merge) {
@@ -458,28 +445,6 @@ error:
458 return 0; 445 return 0;
459} 446}
460 447
461/*
462 * Free a DMA mapping.
463 */
464void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
465 size_t size, int direction)
466{
467 unsigned long iommu_page;
468 int npages;
469 int i;
470
471 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
472 dma_addr >= iommu_bus_base + iommu_size)
473 return;
474 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
475 npages = to_pages(dma_addr, size);
476 for (i = 0; i < npages; i++) {
477 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
478 CLEAR_LEAK(iommu_page + i);
479 }
480 free_iommu(iommu_page, npages);
481}
482
483static int no_agp; 448static int no_agp;
484 449
485static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) 450static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
@@ -532,10 +497,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
532 void *gatt; 497 void *gatt;
533 unsigned aper_base, new_aper_base; 498 unsigned aper_base, new_aper_base;
534 unsigned aper_size, gatt_size, new_aper_size; 499 unsigned aper_size, gatt_size, new_aper_size;
535 500 int i;
501
536 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); 502 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
537 aper_size = aper_base = info->aper_size = 0; 503 aper_size = aper_base = info->aper_size = 0;
538 for_all_nb(dev) { 504 dev = NULL;
505 for (i = 0; i < num_k8_northbridges; i++) {
506 dev = k8_northbridges[i];
539 new_aper_base = read_aperture(dev, &new_aper_size); 507 new_aper_base = read_aperture(dev, &new_aper_size);
540 if (!new_aper_base) 508 if (!new_aper_base)
541 goto nommu; 509 goto nommu;
@@ -558,11 +526,12 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
558 panic("Cannot allocate GATT table"); 526 panic("Cannot allocate GATT table");
559 memset(gatt, 0, gatt_size); 527 memset(gatt, 0, gatt_size);
560 agp_gatt_table = gatt; 528 agp_gatt_table = gatt;
561 529
562 for_all_nb(dev) { 530 for (i = 0; i < num_k8_northbridges; i++) {
563 u32 ctl; 531 u32 ctl;
564 u32 gatt_reg; 532 u32 gatt_reg;
565 533
534 dev = k8_northbridges[i];
566 gatt_reg = __pa(gatt) >> 12; 535 gatt_reg = __pa(gatt) >> 12;
567 gatt_reg <<= 4; 536 gatt_reg <<= 4;
568 pci_write_config_dword(dev, 0x98, gatt_reg); 537 pci_write_config_dword(dev, 0x98, gatt_reg);
@@ -573,7 +542,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
573 542
574 pci_write_config_dword(dev, 0x90, ctl); 543 pci_write_config_dword(dev, 0x90, ctl);
575 } 544 }
576 flush_gart(NULL); 545 flush_gart();
577 546
578 printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10); 547 printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
579 return 0; 548 return 0;
@@ -602,15 +571,19 @@ static struct dma_mapping_ops gart_dma_ops = {
602 .unmap_sg = gart_unmap_sg, 571 .unmap_sg = gart_unmap_sg,
603}; 572};
604 573
605static int __init pci_iommu_init(void) 574void __init gart_iommu_init(void)
606{ 575{
607 struct agp_kern_info info; 576 struct agp_kern_info info;
608 unsigned long aper_size; 577 unsigned long aper_size;
609 unsigned long iommu_start; 578 unsigned long iommu_start;
610 struct pci_dev *dev;
611 unsigned long scratch; 579 unsigned long scratch;
612 long i; 580 long i;
613 581
582 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
583 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
584 return;
585 }
586
614#ifndef CONFIG_AGP_AMD64 587#ifndef CONFIG_AGP_AMD64
615 no_agp = 1; 588 no_agp = 1;
616#else 589#else
@@ -622,7 +595,11 @@ static int __init pci_iommu_init(void)
622#endif 595#endif
623 596
624 if (swiotlb) 597 if (swiotlb)
625 return -1; 598 return;
599
600 /* Did we detect a different HW IOMMU? */
601 if (iommu_detected && !iommu_aperture)
602 return;
626 603
627 if (no_iommu || 604 if (no_iommu ||
628 (!force_iommu && end_pfn <= MAX_DMA32_PFN) || 605 (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
@@ -634,15 +611,7 @@ static int __init pci_iommu_init(void)
634 "but IOMMU not available.\n" 611 "but IOMMU not available.\n"
635 KERN_ERR "WARNING 32bit PCI may malfunction.\n"); 612 KERN_ERR "WARNING 32bit PCI may malfunction.\n");
636 } 613 }
637 return -1; 614 return;
638 }
639
640 i = 0;
641 for_all_nb(dev)
642 i++;
643 if (i > MAX_NB) {
644 printk(KERN_ERR "PCI-GART: Too many northbridges (%ld). Disabled\n", i);
645 return -1;
646 } 615 }
647 616
648 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); 617 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
@@ -707,26 +676,10 @@ static int __init pci_iommu_init(void)
707 for (i = EMERGENCY_PAGES; i < iommu_pages; i++) 676 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
708 iommu_gatt_base[i] = gart_unmapped_entry; 677 iommu_gatt_base[i] = gart_unmapped_entry;
709 678
710 for_all_nb(dev) { 679 flush_gart();
711 u32 flag;
712 int cpu = PCI_SLOT(dev->devfn) - 24;
713 if (cpu >= MAX_NB)
714 continue;
715 northbridges[cpu] = dev;
716 pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */
717 northbridge_flush_word[cpu] = flag;
718 }
719
720 flush_gart(NULL);
721
722 dma_ops = &gart_dma_ops; 680 dma_ops = &gart_dma_ops;
723
724 return 0;
725} 681}
726 682
727/* Must execute after PCI subsystem */
728fs_initcall(pci_iommu_init);
729
730void gart_parse_options(char *p) 683void gart_parse_options(char *p)
731{ 684{
732 int arg; 685 int arg;
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index 1f6ecc6206..c4c3cc36ac 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -4,6 +4,8 @@
4#include <linux/init.h> 4#include <linux/init.h>
5#include <linux/pci.h> 5#include <linux/pci.h>
6#include <linux/string.h> 6#include <linux/string.h>
7#include <linux/dma-mapping.h>
8
7#include <asm/proto.h> 9#include <asm/proto.h>
8#include <asm/processor.h> 10#include <asm/processor.h>
9#include <asm/dma.h> 11#include <asm/dma.h>
@@ -12,10 +14,11 @@ static int
12check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) 14check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
13{ 15{
14 if (hwdev && bus + size > *hwdev->dma_mask) { 16 if (hwdev && bus + size > *hwdev->dma_mask) {
15 if (*hwdev->dma_mask >= 0xffffffffULL) 17 if (*hwdev->dma_mask >= DMA_32BIT_MASK)
16 printk(KERN_ERR 18 printk(KERN_ERR
17 "nommu_%s: overflow %Lx+%lu of device mask %Lx\n", 19 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
18 name, (long long)bus, size, (long long)*hwdev->dma_mask); 20 name, (long long)bus, size,
21 (long long)*hwdev->dma_mask);
19 return 0; 22 return 0;
20 } 23 }
21 return 1; 24 return 1;
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 990ed67896..ebdb77fe20 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -31,7 +31,7 @@ struct dma_mapping_ops swiotlb_dma_ops = {
31void pci_swiotlb_init(void) 31void pci_swiotlb_init(void)
32{ 32{
33 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 33 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
34 if (!iommu_aperture && !no_iommu && 34 if (!iommu_detected && !no_iommu &&
35 (end_pfn > MAX_DMA32_PFN || force_iommu)) 35 (end_pfn > MAX_DMA32_PFN || force_iommu))
36 swiotlb = 1; 36 swiotlb = 1;
37 if (swiotlb) { 37 if (swiotlb) {
diff --git a/arch/x86_64/kernel/pmtimer.c b/arch/x86_64/kernel/pmtimer.c
index bf421ed268..7554458dc9 100644
--- a/arch/x86_64/kernel/pmtimer.c
+++ b/arch/x86_64/kernel/pmtimer.c
@@ -27,7 +27,7 @@
27/* The I/O port the PMTMR resides at. 27/* The I/O port the PMTMR resides at.
28 * The location is detected during setup_arch(), 28 * The location is detected during setup_arch(),
29 * in arch/i386/kernel/acpi/boot.c */ 29 * in arch/i386/kernel/acpi/boot.c */
30u32 pmtmr_ioport; 30u32 pmtmr_ioport __read_mostly;
31 31
32/* value of the Power timer at last timer interrupt */ 32/* value of the Power timer at last timer interrupt */
33static u32 offset_delay; 33static u32 offset_delay;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index fb903e65e0..ca56e19b8b 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -10,7 +10,6 @@
10 * Andi Kleen. 10 * Andi Kleen.
11 * 11 *
12 * CPU hotplug support - ashok.raj@intel.com 12 * CPU hotplug support - ashok.raj@intel.com
13 * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
14 */ 13 */
15 14
16/* 15/*
@@ -64,6 +63,7 @@ EXPORT_SYMBOL(boot_option_idle_override);
64 * Powermanagement idle function, if any.. 63 * Powermanagement idle function, if any..
65 */ 64 */
66void (*pm_idle)(void); 65void (*pm_idle)(void);
66EXPORT_SYMBOL(pm_idle);
67static DEFINE_PER_CPU(unsigned int, cpu_idle_state); 67static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
68 68
69static ATOMIC_NOTIFIER_HEAD(idle_notifier); 69static ATOMIC_NOTIFIER_HEAD(idle_notifier);
@@ -111,7 +111,7 @@ static void default_idle(void)
111{ 111{
112 local_irq_enable(); 112 local_irq_enable();
113 113
114 clear_thread_flag(TIF_POLLING_NRFLAG); 114 current_thread_info()->status &= ~TS_POLLING;
115 smp_mb__after_clear_bit(); 115 smp_mb__after_clear_bit();
116 while (!need_resched()) { 116 while (!need_resched()) {
117 local_irq_disable(); 117 local_irq_disable();
@@ -120,7 +120,7 @@ static void default_idle(void)
120 else 120 else
121 local_irq_enable(); 121 local_irq_enable();
122 } 122 }
123 set_thread_flag(TIF_POLLING_NRFLAG); 123 current_thread_info()->status |= TS_POLLING;
124} 124}
125 125
126/* 126/*
@@ -203,8 +203,7 @@ static inline void play_dead(void)
203 */ 203 */
204void cpu_idle (void) 204void cpu_idle (void)
205{ 205{
206 set_thread_flag(TIF_POLLING_NRFLAG); 206 current_thread_info()->status |= TS_POLLING;
207
208 /* endless idle loop with no priority at all */ 207 /* endless idle loop with no priority at all */
209 while (1) { 208 while (1) {
210 while (!need_resched()) { 209 while (!need_resched()) {
@@ -335,7 +334,7 @@ void show_regs(struct pt_regs *regs)
335{ 334{
336 printk("CPU %d:", smp_processor_id()); 335 printk("CPU %d:", smp_processor_id());
337 __show_regs(regs); 336 __show_regs(regs);
338 show_trace(&regs->rsp); 337 show_trace(NULL, regs, (void *)(regs + 1));
339} 338}
340 339
341/* 340/*
@@ -365,8 +364,11 @@ void flush_thread(void)
365 struct task_struct *tsk = current; 364 struct task_struct *tsk = current;
366 struct thread_info *t = current_thread_info(); 365 struct thread_info *t = current_thread_info();
367 366
368 if (t->flags & _TIF_ABI_PENDING) 367 if (t->flags & _TIF_ABI_PENDING) {
369 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32); 368 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
369 if (t->flags & _TIF_IA32)
370 current_thread_info()->status |= TS_COMPAT;
371 }
370 372
371 tsk->thread.debugreg0 = 0; 373 tsk->thread.debugreg0 = 0;
372 tsk->thread.debugreg1 = 0; 374 tsk->thread.debugreg1 = 0;
diff --git a/arch/x86_64/kernel/reboot.c b/arch/x86_64/kernel/reboot.c
index 57117b8beb..2d67698474 100644
--- a/arch/x86_64/kernel/reboot.c
+++ b/arch/x86_64/kernel/reboot.c
@@ -20,6 +20,7 @@
20 * Power off function, if any 20 * Power off function, if any
21 */ 21 */
22void (*pm_power_off)(void); 22void (*pm_power_off)(void);
23EXPORT_SYMBOL(pm_power_off);
23 24
24static long no_idt[3]; 25static long no_idt[3];
25static enum { 26static enum {
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index fb850b52b4..1129918ede 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Nov 2001 Dave Jones <davej@suse.de> 6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code. 7 * Forked from i386 setup code.
8 *
9 * $Id$
10 */ 8 */
11 9
12/* 10/*
@@ -65,9 +63,7 @@
65#include <asm/setup.h> 63#include <asm/setup.h>
66#include <asm/mach_apic.h> 64#include <asm/mach_apic.h>
67#include <asm/numa.h> 65#include <asm/numa.h>
68#include <asm/swiotlb.h>
69#include <asm/sections.h> 66#include <asm/sections.h>
70#include <asm/gart-mapping.h>
71#include <asm/dmi.h> 67#include <asm/dmi.h>
72 68
73/* 69/*
@@ -75,6 +71,7 @@
75 */ 71 */
76 72
77struct cpuinfo_x86 boot_cpu_data __read_mostly; 73struct cpuinfo_x86 boot_cpu_data __read_mostly;
74EXPORT_SYMBOL(boot_cpu_data);
78 75
79unsigned long mmu_cr4_features; 76unsigned long mmu_cr4_features;
80 77
@@ -103,12 +100,14 @@ char dmi_alloc_data[DMI_MAX_DATA];
103 * Setup options 100 * Setup options
104 */ 101 */
105struct screen_info screen_info; 102struct screen_info screen_info;
103EXPORT_SYMBOL(screen_info);
106struct sys_desc_table_struct { 104struct sys_desc_table_struct {
107 unsigned short length; 105 unsigned short length;
108 unsigned char table[0]; 106 unsigned char table[0];
109}; 107};
110 108
111struct edid_info edid_info; 109struct edid_info edid_info;
110EXPORT_SYMBOL_GPL(edid_info);
112struct e820map e820; 111struct e820map e820;
113 112
114extern int root_mountflags; 113extern int root_mountflags;
@@ -473,80 +472,6 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
473} 472}
474#endif 473#endif
475 474
476/* Use inline assembly to define this because the nops are defined
477 as inline assembly strings in the include files and we cannot
478 get them easily into strings. */
479asm("\t.data\nk8nops: "
480 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
481 K8_NOP7 K8_NOP8);
482
483extern unsigned char k8nops[];
484static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
485 NULL,
486 k8nops,
487 k8nops + 1,
488 k8nops + 1 + 2,
489 k8nops + 1 + 2 + 3,
490 k8nops + 1 + 2 + 3 + 4,
491 k8nops + 1 + 2 + 3 + 4 + 5,
492 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
493 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
494};
495
496extern char __vsyscall_0;
497
498/* Replace instructions with better alternatives for this CPU type.
499
500 This runs before SMP is initialized to avoid SMP problems with
501 self modifying code. This implies that assymetric systems where
502 APs have less capabilities than the boot processor are not handled.
503 In this case boot with "noreplacement". */
504void apply_alternatives(void *start, void *end)
505{
506 struct alt_instr *a;
507 int diff, i, k;
508 for (a = start; (void *)a < end; a++) {
509 u8 *instr;
510
511 if (!boot_cpu_has(a->cpuid))
512 continue;
513
514 BUG_ON(a->replacementlen > a->instrlen);
515 instr = a->instr;
516 /* vsyscall code is not mapped yet. resolve it manually. */
517 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END)
518 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
519 __inline_memcpy(instr, a->replacement, a->replacementlen);
520 diff = a->instrlen - a->replacementlen;
521
522 /* Pad the rest with nops */
523 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
524 k = diff;
525 if (k > ASM_NOP_MAX)
526 k = ASM_NOP_MAX;
527 __inline_memcpy(instr + i, k8_nops[k], k);
528 }
529 }
530}
531
532static int no_replacement __initdata = 0;
533
534void __init alternative_instructions(void)
535{
536 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
537 if (no_replacement)
538 return;
539 apply_alternatives(__alt_instructions, __alt_instructions_end);
540}
541
542static int __init noreplacement_setup(char *s)
543{
544 no_replacement = 1;
545 return 1;
546}
547
548__setup("noreplacement", noreplacement_setup);
549
550#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) 475#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
551struct edd edd; 476struct edd edd;
552#ifdef CONFIG_EDD_MODULE 477#ifdef CONFIG_EDD_MODULE
@@ -779,10 +704,6 @@ void __init setup_arch(char **cmdline_p)
779 704
780 e820_setup_gap(); 705 e820_setup_gap();
781 706
782#ifdef CONFIG_GART_IOMMU
783 iommu_hole_init();
784#endif
785
786#ifdef CONFIG_VT 707#ifdef CONFIG_VT
787#if defined(CONFIG_VGA_CONSOLE) 708#if defined(CONFIG_VGA_CONSOLE)
788 conswitchp = &vga_con; 709 conswitchp = &vga_con;
@@ -867,24 +788,32 @@ static int nearby_node(int apicid)
867static void __init amd_detect_cmp(struct cpuinfo_x86 *c) 788static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
868{ 789{
869#ifdef CONFIG_SMP 790#ifdef CONFIG_SMP
870 int cpu = smp_processor_id();
871 unsigned bits; 791 unsigned bits;
872#ifdef CONFIG_NUMA 792#ifdef CONFIG_NUMA
793 int cpu = smp_processor_id();
873 int node = 0; 794 int node = 0;
874 unsigned apicid = hard_smp_processor_id(); 795 unsigned apicid = hard_smp_processor_id();
875#endif 796#endif
797 unsigned ecx = cpuid_ecx(0x80000008);
798
799 c->x86_max_cores = (ecx & 0xff) + 1;
876 800
877 bits = 0; 801 /* CPU telling us the core id bits shift? */
878 while ((1 << bits) < c->x86_max_cores) 802 bits = (ecx >> 12) & 0xF;
879 bits++; 803
804 /* Otherwise recompute */
805 if (bits == 0) {
806 while ((1 << bits) < c->x86_max_cores)
807 bits++;
808 }
880 809
881 /* Low order bits define the core id (index of core in socket) */ 810 /* Low order bits define the core id (index of core in socket) */
882 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1); 811 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
883 /* Convert the APIC ID into the socket ID */ 812 /* Convert the APIC ID into the socket ID */
884 phys_proc_id[cpu] = phys_pkg_id(bits); 813 c->phys_proc_id = phys_pkg_id(bits);
885 814
886#ifdef CONFIG_NUMA 815#ifdef CONFIG_NUMA
887 node = phys_proc_id[cpu]; 816 node = c->phys_proc_id;
888 if (apicid_to_node[apicid] != NUMA_NO_NODE) 817 if (apicid_to_node[apicid] != NUMA_NO_NODE)
889 node = apicid_to_node[apicid]; 818 node = apicid_to_node[apicid];
890 if (!node_online(node)) { 819 if (!node_online(node)) {
@@ -897,7 +826,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
897 but in the same order as the HT nodeids. 826 but in the same order as the HT nodeids.
898 If that doesn't result in a usable node fall back to the 827 If that doesn't result in a usable node fall back to the
899 path for the previous case. */ 828 path for the previous case. */
900 int ht_nodeid = apicid - (phys_proc_id[0] << bits); 829 int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
901 if (ht_nodeid >= 0 && 830 if (ht_nodeid >= 0 &&
902 apicid_to_node[ht_nodeid] != NUMA_NO_NODE) 831 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
903 node = apicid_to_node[ht_nodeid]; 832 node = apicid_to_node[ht_nodeid];
@@ -907,15 +836,13 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
907 } 836 }
908 numa_set_node(cpu, node); 837 numa_set_node(cpu, node);
909 838
910 printk(KERN_INFO "CPU %d/%x(%d) -> Node %d -> Core %d\n", 839 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
911 cpu, apicid, c->x86_max_cores, node, cpu_core_id[cpu]);
912#endif 840#endif
913#endif 841#endif
914} 842}
915 843
916static int __init init_amd(struct cpuinfo_x86 *c) 844static void __init init_amd(struct cpuinfo_x86 *c)
917{ 845{
918 int r;
919 unsigned level; 846 unsigned level;
920 847
921#ifdef CONFIG_SMP 848#ifdef CONFIG_SMP
@@ -948,8 +875,8 @@ static int __init init_amd(struct cpuinfo_x86 *c)
948 if (c->x86 >= 6) 875 if (c->x86 >= 6)
949 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability); 876 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
950 877
951 r = get_model_name(c); 878 level = get_model_name(c);
952 if (!r) { 879 if (!level) {
953 switch (c->x86) { 880 switch (c->x86) {
954 case 15: 881 case 15:
955 /* Should distinguish Models here, but this is only 882 /* Should distinguish Models here, but this is only
@@ -964,13 +891,12 @@ static int __init init_amd(struct cpuinfo_x86 *c)
964 if (c->x86_power & (1<<8)) 891 if (c->x86_power & (1<<8))
965 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); 892 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
966 893
967 if (c->extended_cpuid_level >= 0x80000008) { 894 /* Multi core CPU? */
968 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 895 if (c->extended_cpuid_level >= 0x80000008)
969
970 amd_detect_cmp(c); 896 amd_detect_cmp(c);
971 }
972 897
973 return r; 898 /* Fix cpuid4 emulation for more */
899 num_cache_leaves = 3;
974} 900}
975 901
976static void __cpuinit detect_ht(struct cpuinfo_x86 *c) 902static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
@@ -978,13 +904,14 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
978#ifdef CONFIG_SMP 904#ifdef CONFIG_SMP
979 u32 eax, ebx, ecx, edx; 905 u32 eax, ebx, ecx, edx;
980 int index_msb, core_bits; 906 int index_msb, core_bits;
981 int cpu = smp_processor_id();
982 907
983 cpuid(1, &eax, &ebx, &ecx, &edx); 908 cpuid(1, &eax, &ebx, &ecx, &edx);
984 909
985 910
986 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) 911 if (!cpu_has(c, X86_FEATURE_HT))
987 return; 912 return;
913 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
914 goto out;
988 915
989 smp_num_siblings = (ebx & 0xff0000) >> 16; 916 smp_num_siblings = (ebx & 0xff0000) >> 16;
990 917
@@ -999,10 +926,7 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
999 } 926 }
1000 927
1001 index_msb = get_count_order(smp_num_siblings); 928 index_msb = get_count_order(smp_num_siblings);
1002 phys_proc_id[cpu] = phys_pkg_id(index_msb); 929 c->phys_proc_id = phys_pkg_id(index_msb);
1003
1004 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
1005 phys_proc_id[cpu]);
1006 930
1007 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 931 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
1008 932
@@ -1010,13 +934,15 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1010 934
1011 core_bits = get_count_order(c->x86_max_cores); 935 core_bits = get_count_order(c->x86_max_cores);
1012 936
1013 cpu_core_id[cpu] = phys_pkg_id(index_msb) & 937 c->cpu_core_id = phys_pkg_id(index_msb) &
1014 ((1 << core_bits) - 1); 938 ((1 << core_bits) - 1);
1015
1016 if (c->x86_max_cores > 1)
1017 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
1018 cpu_core_id[cpu]);
1019 } 939 }
940out:
941 if ((c->x86_max_cores * smp_num_siblings) > 1) {
942 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
943 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
944 }
945
1020#endif 946#endif
1021} 947}
1022 948
@@ -1025,15 +951,12 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1025 */ 951 */
1026static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) 952static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
1027{ 953{
1028 unsigned int eax; 954 unsigned int eax, t;
1029 955
1030 if (c->cpuid_level < 4) 956 if (c->cpuid_level < 4)
1031 return 1; 957 return 1;
1032 958
1033 __asm__("cpuid" 959 cpuid_count(4, 0, &eax, &t, &t, &t);
1034 : "=a" (eax)
1035 : "0" (4), "c" (0)
1036 : "bx", "dx");
1037 960
1038 if (eax & 0x1f) 961 if (eax & 0x1f)
1039 return ((eax >> 26) + 1); 962 return ((eax >> 26) + 1);
@@ -1046,16 +969,17 @@ static void srat_detect_node(void)
1046#ifdef CONFIG_NUMA 969#ifdef CONFIG_NUMA
1047 unsigned node; 970 unsigned node;
1048 int cpu = smp_processor_id(); 971 int cpu = smp_processor_id();
972 int apicid = hard_smp_processor_id();
1049 973
1050 /* Don't do the funky fallback heuristics the AMD version employs 974 /* Don't do the funky fallback heuristics the AMD version employs
1051 for now. */ 975 for now. */
1052 node = apicid_to_node[hard_smp_processor_id()]; 976 node = apicid_to_node[apicid];
1053 if (node == NUMA_NO_NODE) 977 if (node == NUMA_NO_NODE)
1054 node = first_node(node_online_map); 978 node = first_node(node_online_map);
1055 numa_set_node(cpu, node); 979 numa_set_node(cpu, node);
1056 980
1057 if (acpi_numa > 0) 981 if (acpi_numa > 0)
1058 printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node); 982 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
1059#endif 983#endif
1060} 984}
1061 985
@@ -1065,6 +989,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1065 unsigned n; 989 unsigned n;
1066 990
1067 init_intel_cacheinfo(c); 991 init_intel_cacheinfo(c);
992 if (c->cpuid_level > 9 ) {
993 unsigned eax = cpuid_eax(10);
994 /* Check for version and the number of counters */
995 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
996 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
997 }
998
1068 n = c->extended_cpuid_level; 999 n = c->extended_cpuid_level;
1069 if (n >= 0x80000008) { 1000 if (n >= 0x80000008) {
1070 unsigned eax = cpuid_eax(0x80000008); 1001 unsigned eax = cpuid_eax(0x80000008);
@@ -1156,7 +1087,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1156 } 1087 }
1157 1088
1158#ifdef CONFIG_SMP 1089#ifdef CONFIG_SMP
1159 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff; 1090 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
1160#endif 1091#endif
1161} 1092}
1162 1093
@@ -1283,7 +1214,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1283 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1214 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1284 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, 1215 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1285 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL, 1216 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1286 NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow", 1217 NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
1287 1218
1288 /* Transmeta-defined */ 1219 /* Transmeta-defined */
1289 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, 1220 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
@@ -1294,7 +1225,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1294 /* Other (Linux-defined) */ 1225 /* Other (Linux-defined) */
1295 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL, 1226 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
1296 "constant_tsc", NULL, NULL, 1227 "constant_tsc", NULL, NULL,
1297 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1228 "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1298 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1229 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1299 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1230 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1300 1231
@@ -1364,9 +1295,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1364#ifdef CONFIG_SMP 1295#ifdef CONFIG_SMP
1365 if (smp_num_siblings * c->x86_max_cores > 1) { 1296 if (smp_num_siblings * c->x86_max_cores > 1) {
1366 int cpu = c - cpu_data; 1297 int cpu = c - cpu_data;
1367 seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]); 1298 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1368 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu])); 1299 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
1369 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]); 1300 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1370 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 1301 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1371 } 1302 }
1372#endif 1303#endif
@@ -1440,7 +1371,7 @@ struct seq_operations cpuinfo_op = {
1440 .show = show_cpuinfo, 1371 .show = show_cpuinfo,
1441}; 1372};
1442 1373
1443#ifdef CONFIG_INPUT_PCSPKR 1374#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
1444#include <linux/platform_device.h> 1375#include <linux/platform_device.h>
1445static __init int add_pcspkr(void) 1376static __init int add_pcspkr(void)
1446{ 1377{
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 8a691fa6d3..f5934cb4a2 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -3,7 +3,6 @@
3 * Copyright (C) 1995 Linus Torvalds 3 * Copyright (C) 1995 Linus Torvalds
4 * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen. 4 * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
5 * See setup.c for older changelog. 5 * See setup.c for older changelog.
6 * $Id: setup64.c,v 1.12 2002/03/21 10:09:17 ak Exp $
7 */ 6 */
8#include <linux/config.h> 7#include <linux/config.h>
9#include <linux/init.h> 8#include <linux/init.h>
@@ -31,6 +30,7 @@ char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
31cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; 30cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
32 31
33struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly; 32struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
33EXPORT_SYMBOL(_cpu_pda);
34struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned; 34struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
35 35
36struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; 36struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
@@ -38,6 +38,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
38char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned"))); 38char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
39 39
40unsigned long __supported_pte_mask __read_mostly = ~0UL; 40unsigned long __supported_pte_mask __read_mostly = ~0UL;
41EXPORT_SYMBOL(__supported_pte_mask);
41static int do_not_nx __cpuinitdata = 0; 42static int do_not_nx __cpuinitdata = 0;
42 43
43/* noexec=on|off 44/* noexec=on|off
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index e5f5ce7909..28161170fb 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -7,8 +7,6 @@
7 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson 7 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
8 * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes 8 * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
9 * 2000-2002 x86-64 support by Andi Kleen 9 * 2000-2002 x86-64 support by Andi Kleen
10 *
11 * $Id: signal.c,v 1.18 2001/10/17 22:30:37 ak Exp $
12 */ 10 */
13 11
14#include <linux/sched.h> 12#include <linux/sched.h>
@@ -239,7 +237,6 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
239 rsp = regs->rsp - 128; 237 rsp = regs->rsp - 128;
240 238
241 /* This is the X/Open sanctioned signal stack switching. */ 239 /* This is the X/Open sanctioned signal stack switching. */
242 /* RED-PEN: redzone on that stack? */
243 if (ka->sa.sa_flags & SA_ONSTACK) { 240 if (ka->sa.sa_flags & SA_ONSTACK) {
244 if (sas_ss_flags(rsp) == 0) 241 if (sas_ss_flags(rsp) == 0)
245 rsp = current->sas_ss_sp + current->sas_ss_size; 242 rsp = current->sas_ss_sp + current->sas_ss_size;
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 4a6628b14d..5a1c0a3bf8 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -135,10 +135,10 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
135 135
136 cpu = smp_processor_id(); 136 cpu = smp_processor_id();
137 /* 137 /*
138 * orig_rax contains the interrupt vector - 256. 138 * orig_rax contains the negated interrupt vector.
139 * Use that to determine where the sender put the data. 139 * Use that to determine where the sender put the data.
140 */ 140 */
141 sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START; 141 sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
142 f = &per_cpu(flush_state, sender); 142 f = &per_cpu(flush_state, sender);
143 143
144 if (!cpu_isset(cpu, f->flush_cpumask)) 144 if (!cpu_isset(cpu, f->flush_cpumask))
@@ -224,6 +224,7 @@ void flush_tlb_current_task(void)
224 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 224 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
225 preempt_enable(); 225 preempt_enable();
226} 226}
227EXPORT_SYMBOL(flush_tlb_current_task);
227 228
228void flush_tlb_mm (struct mm_struct * mm) 229void flush_tlb_mm (struct mm_struct * mm)
229{ 230{
@@ -244,6 +245,7 @@ void flush_tlb_mm (struct mm_struct * mm)
244 245
245 preempt_enable(); 246 preempt_enable();
246} 247}
248EXPORT_SYMBOL(flush_tlb_mm);
247 249
248void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) 250void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
249{ 251{
@@ -266,6 +268,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
266 268
267 preempt_enable(); 269 preempt_enable();
268} 270}
271EXPORT_SYMBOL(flush_tlb_page);
269 272
270static void do_flush_tlb_all(void* info) 273static void do_flush_tlb_all(void* info)
271{ 274{
@@ -443,6 +446,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
443 spin_unlock(&call_lock); 446 spin_unlock(&call_lock);
444 return 0; 447 return 0;
445} 448}
449EXPORT_SYMBOL(smp_call_function);
446 450
447void smp_stop_cpu(void) 451void smp_stop_cpu(void)
448{ 452{
@@ -460,7 +464,7 @@ static void smp_really_stop_cpu(void *dummy)
460{ 464{
461 smp_stop_cpu(); 465 smp_stop_cpu();
462 for (;;) 466 for (;;)
463 asm("hlt"); 467 halt();
464} 468}
465 469
466void smp_send_stop(void) 470void smp_send_stop(void)
@@ -470,7 +474,7 @@ void smp_send_stop(void)
470 return; 474 return;
471 /* Don't deadlock on the call lock in panic */ 475 /* Don't deadlock on the call lock in panic */
472 if (!spin_trylock(&call_lock)) { 476 if (!spin_trylock(&call_lock)) {
473 /* ignore locking because we have paniced anyways */ 477 /* ignore locking because we have panicked anyways */
474 nolock = 1; 478 nolock = 1;
475 } 479 }
476 __smp_call_function(smp_really_stop_cpu, NULL, 0, 0); 480 __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
@@ -520,13 +524,13 @@ asmlinkage void smp_call_function_interrupt(void)
520 524
521int safe_smp_processor_id(void) 525int safe_smp_processor_id(void)
522{ 526{
523 int apicid, i; 527 unsigned apicid, i;
524 528
525 if (disable_apic) 529 if (disable_apic)
526 return 0; 530 return 0;
527 531
528 apicid = hard_smp_processor_id(); 532 apicid = hard_smp_processor_id();
529 if (x86_cpu_to_apicid[apicid] == apicid) 533 if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid)
530 return apicid; 534 return apicid;
531 535
532 for (i = 0; i < NR_CPUS; ++i) { 536 for (i = 0; i < NR_CPUS; ++i) {
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 71a7222cf9..540c0ccbcc 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -63,13 +63,11 @@
63 63
64/* Number of siblings per CPU package */ 64/* Number of siblings per CPU package */
65int smp_num_siblings = 1; 65int smp_num_siblings = 1;
66/* Package ID of each logical CPU */ 66EXPORT_SYMBOL(smp_num_siblings);
67u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
68/* core ID of each logical CPU */
69u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
70 67
71/* Last level cache ID of each logical CPU */ 68/* Last level cache ID of each logical CPU */
72u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; 69u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
70EXPORT_SYMBOL(cpu_llc_id);
73 71
74/* Bitmask of currently online CPUs */ 72/* Bitmask of currently online CPUs */
75cpumask_t cpu_online_map __read_mostly; 73cpumask_t cpu_online_map __read_mostly;
@@ -82,18 +80,21 @@ EXPORT_SYMBOL(cpu_online_map);
82 */ 80 */
83cpumask_t cpu_callin_map; 81cpumask_t cpu_callin_map;
84cpumask_t cpu_callout_map; 82cpumask_t cpu_callout_map;
83EXPORT_SYMBOL(cpu_callout_map);
85 84
86cpumask_t cpu_possible_map; 85cpumask_t cpu_possible_map;
87EXPORT_SYMBOL(cpu_possible_map); 86EXPORT_SYMBOL(cpu_possible_map);
88 87
89/* Per CPU bogomips and other parameters */ 88/* Per CPU bogomips and other parameters */
90struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 89struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
90EXPORT_SYMBOL(cpu_data);
91 91
92/* Set when the idlers are all forked */ 92/* Set when the idlers are all forked */
93int smp_threads_ready; 93int smp_threads_ready;
94 94
95/* representing HT siblings of each logical CPU */ 95/* representing HT siblings of each logical CPU */
96cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 96cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
97EXPORT_SYMBOL(cpu_sibling_map);
97 98
98/* representing HT and core siblings of each logical CPU */ 99/* representing HT and core siblings of each logical CPU */
99cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 100cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
@@ -454,10 +455,12 @@ cpumask_t cpu_coregroup_map(int cpu)
454 struct cpuinfo_x86 *c = cpu_data + cpu; 455 struct cpuinfo_x86 *c = cpu_data + cpu;
455 /* 456 /*
456 * For perf, we return last level cache shared map. 457 * For perf, we return last level cache shared map.
457 * TBD: when power saving sched policy is added, we will return 458 * And for power savings, we return cpu_core_map
458 * cpu_core_map when power saving policy is enabled
459 */ 459 */
460 return c->llc_shared_map; 460 if (sched_mc_power_savings || sched_smt_power_savings)
461 return cpu_core_map[cpu];
462 else
463 return c->llc_shared_map;
461} 464}
462 465
463/* representing cpus for which sibling maps can be computed */ 466/* representing cpus for which sibling maps can be computed */
@@ -472,8 +475,8 @@ static inline void set_cpu_sibling_map(int cpu)
472 475
473 if (smp_num_siblings > 1) { 476 if (smp_num_siblings > 1) {
474 for_each_cpu_mask(i, cpu_sibling_setup_map) { 477 for_each_cpu_mask(i, cpu_sibling_setup_map) {
475 if (phys_proc_id[cpu] == phys_proc_id[i] && 478 if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
476 cpu_core_id[cpu] == cpu_core_id[i]) { 479 c[cpu].cpu_core_id == c[i].cpu_core_id) {
477 cpu_set(i, cpu_sibling_map[cpu]); 480 cpu_set(i, cpu_sibling_map[cpu]);
478 cpu_set(cpu, cpu_sibling_map[i]); 481 cpu_set(cpu, cpu_sibling_map[i]);
479 cpu_set(i, cpu_core_map[cpu]); 482 cpu_set(i, cpu_core_map[cpu]);
@@ -500,7 +503,7 @@ static inline void set_cpu_sibling_map(int cpu)
500 cpu_set(i, c[cpu].llc_shared_map); 503 cpu_set(i, c[cpu].llc_shared_map);
501 cpu_set(cpu, c[i].llc_shared_map); 504 cpu_set(cpu, c[i].llc_shared_map);
502 } 505 }
503 if (phys_proc_id[cpu] == phys_proc_id[i]) { 506 if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
504 cpu_set(i, cpu_core_map[cpu]); 507 cpu_set(i, cpu_core_map[cpu]);
505 cpu_set(cpu, cpu_core_map[i]); 508 cpu_set(cpu, cpu_core_map[i]);
506 /* 509 /*
@@ -797,6 +800,8 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
797 } 800 }
798 801
799 802
803 alternatives_smp_switch(1);
804
800 c_idle.idle = get_idle_for_cpu(cpu); 805 c_idle.idle = get_idle_for_cpu(cpu);
801 806
802 if (c_idle.idle) { 807 if (c_idle.idle) {
@@ -1199,8 +1204,8 @@ static void remove_siblinginfo(int cpu)
1199 cpu_clear(cpu, cpu_sibling_map[sibling]); 1204 cpu_clear(cpu, cpu_sibling_map[sibling]);
1200 cpus_clear(cpu_sibling_map[cpu]); 1205 cpus_clear(cpu_sibling_map[cpu]);
1201 cpus_clear(cpu_core_map[cpu]); 1206 cpus_clear(cpu_core_map[cpu]);
1202 phys_proc_id[cpu] = BAD_APICID; 1207 c[cpu].phys_proc_id = 0;
1203 cpu_core_id[cpu] = BAD_APICID; 1208 c[cpu].cpu_core_id = 0;
1204 cpu_clear(cpu, cpu_sibling_setup_map); 1209 cpu_clear(cpu, cpu_sibling_setup_map);
1205} 1210}
1206 1211
@@ -1259,6 +1264,8 @@ void __cpu_die(unsigned int cpu)
1259 /* They ack this in play_dead by setting CPU_DEAD */ 1264 /* They ack this in play_dead by setting CPU_DEAD */
1260 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { 1265 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1261 printk ("CPU %d is now offline\n", cpu); 1266 printk ("CPU %d is now offline\n", cpu);
1267 if (1 == num_online_cpus())
1268 alternatives_smp_switch(0);
1262 return; 1269 return;
1263 } 1270 }
1264 msleep(100); 1271 msleep(100);
diff --git a/arch/x86_64/kernel/tce.c b/arch/x86_64/kernel/tce.c
new file mode 100644
index 0000000000..8d4c67f61b
--- /dev/null
+++ b/arch/x86_64/kernel/tce.c
@@ -0,0 +1,202 @@
1/*
2 * Derived from arch/powerpc/platforms/pseries/iommu.c
3 *
4 * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
5 * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/config.h>
23#include <linux/types.h>
24#include <linux/slab.h>
25#include <linux/mm.h>
26#include <linux/spinlock.h>
27#include <linux/string.h>
28#include <linux/pci.h>
29#include <linux/dma-mapping.h>
30#include <linux/bootmem.h>
31#include <asm/tce.h>
32#include <asm/calgary.h>
33#include <asm/proto.h>
34
35/* flush a tce at 'tceaddr' to main memory */
36static inline void flush_tce(void* tceaddr)
37{
38 /* a single tce can't cross a cache line */
39 if (cpu_has_clflush)
40 asm volatile("clflush (%0)" :: "r" (tceaddr));
41 else
42 asm volatile("wbinvd":::"memory");
43}
44
45void tce_build(struct iommu_table *tbl, unsigned long index,
46 unsigned int npages, unsigned long uaddr, int direction)
47{
48 u64* tp;
49 u64 t;
50 u64 rpn;
51
52 t = (1 << TCE_READ_SHIFT);
53 if (direction != DMA_TO_DEVICE)
54 t |= (1 << TCE_WRITE_SHIFT);
55
56 tp = ((u64*)tbl->it_base) + index;
57
58 while (npages--) {
59 rpn = (virt_to_bus((void*)uaddr)) >> PAGE_SHIFT;
60 t &= ~TCE_RPN_MASK;
61 t |= (rpn << TCE_RPN_SHIFT);
62
63 *tp = cpu_to_be64(t);
64 flush_tce(tp);
65
66 uaddr += PAGE_SIZE;
67 tp++;
68 }
69}
70
71void tce_free(struct iommu_table *tbl, long index, unsigned int npages)
72{
73 u64* tp;
74
75 tp = ((u64*)tbl->it_base) + index;
76
77 while (npages--) {
78 *tp = cpu_to_be64(0);
79 flush_tce(tp);
80 tp++;
81 }
82}
83
84static inline unsigned int table_size_to_number_of_entries(unsigned char size)
85{
86 /*
87 * size is the order of the table, 0-7
88 * smallest table is 8K entries, so shift result by 13 to
89 * multiply by 8K
90 */
91 return (1 << size) << 13;
92}
93
94static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
95{
96 unsigned int bitmapsz;
97 unsigned int tce_table_index;
98 unsigned long bmppages;
99 int ret;
100
101 tbl->it_busno = dev->bus->number;
102
103 /* set the tce table size - measured in entries */
104 tbl->it_size = table_size_to_number_of_entries(specified_table_size);
105
106 tce_table_index = bus_to_phb(tbl->it_busno);
107 tbl->it_base = (unsigned long)tce_table_kva[tce_table_index];
108 if (!tbl->it_base) {
109 printk(KERN_ERR "Calgary: iommu_table_setparms: "
110 "no table allocated?!\n");
111 ret = -ENOMEM;
112 goto done;
113 }
114
115 /*
116 * number of bytes needed for the bitmap size in number of
117 * entries; we need one bit per entry
118 */
119 bitmapsz = tbl->it_size / BITS_PER_BYTE;
120 bmppages = __get_free_pages(GFP_KERNEL, get_order(bitmapsz));
121 if (!bmppages) {
122 printk(KERN_ERR "Calgary: cannot allocate bitmap\n");
123 ret = -ENOMEM;
124 goto done;
125 }
126
127 tbl->it_map = (unsigned long*)bmppages;
128
129 memset(tbl->it_map, 0, bitmapsz);
130
131 tbl->it_hint = 0;
132
133 spin_lock_init(&tbl->it_lock);
134
135 return 0;
136
137done:
138 return ret;
139}
140
141int build_tce_table(struct pci_dev *dev, void __iomem *bbar)
142{
143 struct iommu_table *tbl;
144 int ret;
145
146 if (dev->sysdata) {
147 printk(KERN_ERR "Calgary: dev %p has sysdata %p\n",
148 dev, dev->sysdata);
149 BUG();
150 }
151
152 tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
153 if (!tbl) {
154 printk(KERN_ERR "Calgary: error allocating iommu_table\n");
155 ret = -ENOMEM;
156 goto done;
157 }
158
159 ret = tce_table_setparms(dev, tbl);
160 if (ret)
161 goto free_tbl;
162
163 tce_free(tbl, 0, tbl->it_size);
164
165 tbl->bbar = bbar;
166
167 /*
168 * NUMA is already using the bus's sysdata pointer, so we use
169 * the bus's pci_dev's sysdata instead.
170 */
171 dev->sysdata = tbl;
172
173 return 0;
174
175free_tbl:
176 kfree(tbl);
177done:
178 return ret;
179}
180
181void* alloc_tce_table(void)
182{
183 unsigned int size;
184
185 size = table_size_to_number_of_entries(specified_table_size);
186 size *= TCE_ENTRY_SIZE;
187
188 return __alloc_bootmem_low(size, size, 0);
189}
190
191void free_tce_table(void *tbl)
192{
193 unsigned int size;
194
195 if (!tbl)
196 return;
197
198 size = table_size_to_number_of_entries(specified_table_size);
199 size *= TCE_ENTRY_SIZE;
200
201 free_bootmem(__pa(tbl), size);
202}
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 7392570f97..ebbee6f59f 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -8,7 +8,7 @@
8 * Copyright (c) 1995 Markus Kuhn 8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar 9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli 10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik 11 * Copyright (c) 2002,2006 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen 12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c 13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
14 */ 14 */
@@ -51,14 +51,21 @@ extern int using_apic_timer;
51static char *time_init_gtod(void); 51static char *time_init_gtod(void);
52 52
53DEFINE_SPINLOCK(rtc_lock); 53DEFINE_SPINLOCK(rtc_lock);
54EXPORT_SYMBOL(rtc_lock);
54DEFINE_SPINLOCK(i8253_lock); 55DEFINE_SPINLOCK(i8253_lock);
55 56
56int nohpet __initdata = 0; 57int nohpet __initdata = 0;
57static int notsc __initdata = 0; 58static int notsc __initdata = 0;
58 59
59#undef HPET_HACK_ENABLE_DANGEROUS 60#define USEC_PER_TICK (USEC_PER_SEC / HZ)
61#define NSEC_PER_TICK (NSEC_PER_SEC / HZ)
62#define FSEC_PER_TICK (FSEC_PER_SEC / HZ)
63
64#define NS_SCALE 10 /* 2^10, carefully chosen */
65#define US_SCALE 32 /* 2^32, arbitralrily chosen */
60 66
61unsigned int cpu_khz; /* TSC clocks / usec, not used here */ 67unsigned int cpu_khz; /* TSC clocks / usec, not used here */
68EXPORT_SYMBOL(cpu_khz);
62static unsigned long hpet_period; /* fsecs / HPET clock */ 69static unsigned long hpet_period; /* fsecs / HPET clock */
63unsigned long hpet_tick; /* HPET clocks / interrupt */ 70unsigned long hpet_tick; /* HPET clocks / interrupt */
64int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */ 71int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
@@ -90,7 +97,7 @@ static inline unsigned int do_gettimeoffset_tsc(void)
90 t = get_cycles_sync(); 97 t = get_cycles_sync();
91 if (t < vxtime.last_tsc) 98 if (t < vxtime.last_tsc)
92 t = vxtime.last_tsc; /* hack */ 99 t = vxtime.last_tsc; /* hack */
93 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32; 100 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> US_SCALE;
94 return x; 101 return x;
95} 102}
96 103
@@ -98,7 +105,7 @@ static inline unsigned int do_gettimeoffset_hpet(void)
98{ 105{
99 /* cap counter read to one tick to avoid inconsistencies */ 106 /* cap counter read to one tick to avoid inconsistencies */
100 unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last; 107 unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
101 return (min(counter,hpet_tick) * vxtime.quot) >> 32; 108 return (min(counter,hpet_tick) * vxtime.quot) >> US_SCALE;
102} 109}
103 110
104unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc; 111unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
@@ -118,7 +125,7 @@ void do_gettimeofday(struct timeval *tv)
118 seq = read_seqbegin(&xtime_lock); 125 seq = read_seqbegin(&xtime_lock);
119 126
120 sec = xtime.tv_sec; 127 sec = xtime.tv_sec;
121 usec = xtime.tv_nsec / 1000; 128 usec = xtime.tv_nsec / NSEC_PER_USEC;
122 129
123 /* i386 does some correction here to keep the clock 130 /* i386 does some correction here to keep the clock
124 monotonous even when ntpd is fixing drift. 131 monotonous even when ntpd is fixing drift.
@@ -129,14 +136,14 @@ void do_gettimeofday(struct timeval *tv)
129 in arch/x86_64/kernel/vsyscall.c and export all needed 136 in arch/x86_64/kernel/vsyscall.c and export all needed
130 variables in vmlinux.lds. -AK */ 137 variables in vmlinux.lds. -AK */
131 138
132 t = (jiffies - wall_jiffies) * (1000000L / HZ) + 139 t = (jiffies - wall_jiffies) * USEC_PER_TICK +
133 do_gettimeoffset(); 140 do_gettimeoffset();
134 usec += t; 141 usec += t;
135 142
136 } while (read_seqretry(&xtime_lock, seq)); 143 } while (read_seqretry(&xtime_lock, seq));
137 144
138 tv->tv_sec = sec + usec / 1000000; 145 tv->tv_sec = sec + usec / USEC_PER_SEC;
139 tv->tv_usec = usec % 1000000; 146 tv->tv_usec = usec % USEC_PER_SEC;
140} 147}
141 148
142EXPORT_SYMBOL(do_gettimeofday); 149EXPORT_SYMBOL(do_gettimeofday);
@@ -157,8 +164,8 @@ int do_settimeofday(struct timespec *tv)
157 164
158 write_seqlock_irq(&xtime_lock); 165 write_seqlock_irq(&xtime_lock);
159 166
160 nsec -= do_gettimeoffset() * 1000 + 167 nsec -= do_gettimeoffset() * NSEC_PER_USEC +
161 (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ); 168 (jiffies - wall_jiffies) * NSEC_PER_TICK;
162 169
163 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); 170 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
164 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); 171 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
@@ -288,7 +295,7 @@ unsigned long long monotonic_clock(void)
288 this_offset = hpet_readl(HPET_COUNTER); 295 this_offset = hpet_readl(HPET_COUNTER);
289 } while (read_seqretry(&xtime_lock, seq)); 296 } while (read_seqretry(&xtime_lock, seq));
290 offset = (this_offset - last_offset); 297 offset = (this_offset - last_offset);
291 offset *= (NSEC_PER_SEC/HZ) / hpet_tick; 298 offset *= NSEC_PER_TICK / hpet_tick;
292 } else { 299 } else {
293 do { 300 do {
294 seq = read_seqbegin(&xtime_lock); 301 seq = read_seqbegin(&xtime_lock);
@@ -297,7 +304,8 @@ unsigned long long monotonic_clock(void)
297 base = monotonic_base; 304 base = monotonic_base;
298 } while (read_seqretry(&xtime_lock, seq)); 305 } while (read_seqretry(&xtime_lock, seq));
299 this_offset = get_cycles_sync(); 306 this_offset = get_cycles_sync();
300 offset = (this_offset - last_offset)*1000 / cpu_khz; 307 /* FIXME: 1000 or 1000000? */
308 offset = (this_offset - last_offset)*1000 / cpu_khz;
301 } 309 }
302 return base + offset; 310 return base + offset;
303} 311}
@@ -382,7 +390,7 @@ void main_timer_handler(struct pt_regs *regs)
382 } 390 }
383 391
384 monotonic_base += 392 monotonic_base +=
385 (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick; 393 (offset - vxtime.last) * NSEC_PER_TICK / hpet_tick;
386 394
387 vxtime.last = offset; 395 vxtime.last = offset;
388#ifdef CONFIG_X86_PM_TIMER 396#ifdef CONFIG_X86_PM_TIMER
@@ -391,24 +399,25 @@ void main_timer_handler(struct pt_regs *regs)
391#endif 399#endif
392 } else { 400 } else {
393 offset = (((tsc - vxtime.last_tsc) * 401 offset = (((tsc - vxtime.last_tsc) *
394 vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ); 402 vxtime.tsc_quot) >> US_SCALE) - USEC_PER_TICK;
395 403
396 if (offset < 0) 404 if (offset < 0)
397 offset = 0; 405 offset = 0;
398 406
399 if (offset > (USEC_PER_SEC / HZ)) { 407 if (offset > USEC_PER_TICK) {
400 lost = offset / (USEC_PER_SEC / HZ); 408 lost = offset / USEC_PER_TICK;
401 offset %= (USEC_PER_SEC / HZ); 409 offset %= USEC_PER_TICK;
402 } 410 }
403 411
404 monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ; 412 /* FIXME: 1000 or 1000000? */
413 monotonic_base += (tsc - vxtime.last_tsc) * 1000000 / cpu_khz;
405 414
406 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot; 415 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
407 416
408 if ((((tsc - vxtime.last_tsc) * 417 if ((((tsc - vxtime.last_tsc) *
409 vxtime.tsc_quot) >> 32) < offset) 418 vxtime.tsc_quot) >> US_SCALE) < offset)
410 vxtime.last_tsc = tsc - 419 vxtime.last_tsc = tsc -
411 (((long) offset << 32) / vxtime.tsc_quot) - 1; 420 (((long) offset << US_SCALE) / vxtime.tsc_quot) - 1;
412 } 421 }
413 422
414 if (lost > 0) { 423 if (lost > 0) {
@@ -468,16 +477,15 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
468} 477}
469 478
470static unsigned int cyc2ns_scale __read_mostly; 479static unsigned int cyc2ns_scale __read_mostly;
471#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
472 480
473static inline void set_cyc2ns_scale(unsigned long cpu_khz) 481static inline void set_cyc2ns_scale(unsigned long cpu_khz)
474{ 482{
475 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; 483 cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / cpu_khz;
476} 484}
477 485
478static inline unsigned long long cycles_2_ns(unsigned long long cyc) 486static inline unsigned long long cycles_2_ns(unsigned long long cyc)
479{ 487{
480 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; 488 return (cyc * cyc2ns_scale) >> NS_SCALE;
481} 489}
482 490
483unsigned long long sched_clock(void) 491unsigned long long sched_clock(void)
@@ -490,7 +498,7 @@ unsigned long long sched_clock(void)
490 Disadvantage is a small drift between CPUs in some configurations, 498 Disadvantage is a small drift between CPUs in some configurations,
491 but that should be tolerable. */ 499 but that should be tolerable. */
492 if (__vxtime.mode == VXTIME_HPET) 500 if (__vxtime.mode == VXTIME_HPET)
493 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32; 501 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> US_SCALE;
494#endif 502#endif
495 503
496 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively, 504 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
@@ -633,7 +641,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
633 641
634 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new); 642 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
635 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 643 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
636 vxtime.tsc_quot = (1000L << 32) / cpu_khz; 644 vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
637 } 645 }
638 646
639 set_cyc2ns_scale(cpu_khz_ref); 647 set_cyc2ns_scale(cpu_khz_ref);
@@ -789,8 +797,8 @@ static int hpet_timer_stop_set_go(unsigned long tick)
789 if (hpet_use_timer) { 797 if (hpet_use_timer) {
790 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL | 798 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
791 HPET_TN_32BIT, HPET_T0_CFG); 799 HPET_TN_32BIT, HPET_T0_CFG);
792 hpet_writel(hpet_tick, HPET_T0_CMP); 800 hpet_writel(hpet_tick, HPET_T0_CMP); /* next interrupt */
793 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */ 801 hpet_writel(hpet_tick, HPET_T0_CMP); /* period */
794 cfg |= HPET_CFG_LEGACY; 802 cfg |= HPET_CFG_LEGACY;
795 } 803 }
796/* 804/*
@@ -825,8 +833,7 @@ static int hpet_init(void)
825 if (hpet_period < 100000 || hpet_period > 100000000) 833 if (hpet_period < 100000 || hpet_period > 100000000)
826 return -1; 834 return -1;
827 835
828 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) / 836 hpet_tick = (FSEC_PER_TICK + hpet_period / 2) / hpet_period;
829 hpet_period;
830 837
831 hpet_use_timer = (id & HPET_ID_LEGSUP); 838 hpet_use_timer = (id & HPET_ID_LEGSUP);
832 839
@@ -890,18 +897,6 @@ void __init time_init(void)
890 char *timename; 897 char *timename;
891 char *gtod; 898 char *gtod;
892 899
893#ifdef HPET_HACK_ENABLE_DANGEROUS
894 if (!vxtime.hpet_address) {
895 printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
896 "manually!\n");
897 outl(0x800038a0, 0xcf8);
898 outl(0xff000001, 0xcfc);
899 outl(0x800038a0, 0xcf8);
900 vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
901 printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
902 "at %#lx.\n", vxtime.hpet_address);
903 }
904#endif
905 if (nohpet) 900 if (nohpet)
906 vxtime.hpet_address = 0; 901 vxtime.hpet_address = 0;
907 902
@@ -912,7 +907,7 @@ void __init time_init(void)
912 -xtime.tv_sec, -xtime.tv_nsec); 907 -xtime.tv_sec, -xtime.tv_nsec);
913 908
914 if (!hpet_init()) 909 if (!hpet_init())
915 vxtime_hz = (1000000000000000L + hpet_period / 2) / hpet_period; 910 vxtime_hz = (FSEC_PER_SEC + hpet_period / 2) / hpet_period;
916 else 911 else
917 vxtime.hpet_address = 0; 912 vxtime.hpet_address = 0;
918 913
@@ -941,8 +936,8 @@ void __init time_init(void)
941 vxtime_hz / 1000000, vxtime_hz % 1000000, timename, gtod); 936 vxtime_hz / 1000000, vxtime_hz % 1000000, timename, gtod);
942 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", 937 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
943 cpu_khz / 1000, cpu_khz % 1000); 938 cpu_khz / 1000, cpu_khz % 1000);
944 vxtime.quot = (1000000L << 32) / vxtime_hz; 939 vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz;
945 vxtime.tsc_quot = (1000L << 32) / cpu_khz; 940 vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
946 vxtime.last_tsc = get_cycles_sync(); 941 vxtime.last_tsc = get_cycles_sync();
947 setup_irq(0, &irq0); 942 setup_irq(0, &irq0);
948 943
@@ -956,10 +951,10 @@ void __init time_init(void)
956__cpuinit int unsynchronized_tsc(void) 951__cpuinit int unsynchronized_tsc(void)
957{ 952{
958#ifdef CONFIG_SMP 953#ifdef CONFIG_SMP
959 if (oem_force_hpet_timer()) 954 if (apic_is_clustered_box())
960 return 1; 955 return 1;
961 /* Intel systems are normally all synchronized. Exceptions 956 /* Intel systems are normally all synchronized. Exceptions
962 are handled in the OEM check above. */ 957 are handled in the check above. */
963 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 958 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
964 return 0; 959 return 0;
965#endif 960#endif
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index cea335e874..3d11a2fe45 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -6,8 +6,6 @@
6 * 6 *
7 * Pentium III FXSR, SSE support 7 * Pentium III FXSR, SSE support
8 * Gareth Hughes <gareth@valinux.com>, May 2000 8 * Gareth Hughes <gareth@valinux.com>, May 2000
9 *
10 * $Id: traps.c,v 1.36 2002/03/24 11:09:10 ak Exp $
11 */ 9 */
12 10
13/* 11/*
@@ -31,6 +29,7 @@
31#include <linux/nmi.h> 29#include <linux/nmi.h>
32#include <linux/kprobes.h> 30#include <linux/kprobes.h>
33#include <linux/kexec.h> 31#include <linux/kexec.h>
32#include <linux/unwind.h>
34 33
35#include <asm/system.h> 34#include <asm/system.h>
36#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -41,7 +40,7 @@
41#include <asm/i387.h> 40#include <asm/i387.h>
42#include <asm/kdebug.h> 41#include <asm/kdebug.h>
43#include <asm/processor.h> 42#include <asm/processor.h>
44 43#include <asm/unwind.h>
45#include <asm/smp.h> 44#include <asm/smp.h>
46#include <asm/pgalloc.h> 45#include <asm/pgalloc.h>
47#include <asm/pda.h> 46#include <asm/pda.h>
@@ -71,6 +70,7 @@ asmlinkage void machine_check(void);
71asmlinkage void spurious_interrupt_bug(void); 70asmlinkage void spurious_interrupt_bug(void);
72 71
73ATOMIC_NOTIFIER_HEAD(die_chain); 72ATOMIC_NOTIFIER_HEAD(die_chain);
73EXPORT_SYMBOL(die_chain);
74 74
75int register_die_notifier(struct notifier_block *nb) 75int register_die_notifier(struct notifier_block *nb)
76{ 76{
@@ -107,7 +107,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
107 preempt_enable_no_resched(); 107 preempt_enable_no_resched();
108} 108}
109 109
110static int kstack_depth_to_print = 10; 110static int kstack_depth_to_print = 12;
111static int call_trace = 1;
111 112
112#ifdef CONFIG_KALLSYMS 113#ifdef CONFIG_KALLSYMS
113#include <linux/kallsyms.h> 114#include <linux/kallsyms.h>
@@ -191,6 +192,25 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
191 return NULL; 192 return NULL;
192} 193}
193 194
195static int show_trace_unwind(struct unwind_frame_info *info, void *context)
196{
197 int i = 11, n = 0;
198
199 while (unwind(info) == 0 && UNW_PC(info)) {
200 ++n;
201 if (i > 50) {
202 printk("\n ");
203 i = 7;
204 } else
205 i += printk(" ");
206 i += printk_address(UNW_PC(info));
207 if (arch_unw_user_mode(info))
208 break;
209 }
210 printk("\n");
211 return n;
212}
213
194/* 214/*
195 * x86-64 can have upto three kernel stacks: 215 * x86-64 can have upto three kernel stacks:
196 * process stack 216 * process stack
@@ -198,15 +218,39 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
198 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack 218 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
199 */ 219 */
200 220
201void show_trace(unsigned long *stack) 221void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack)
202{ 222{
203 const unsigned cpu = safe_smp_processor_id(); 223 const unsigned cpu = safe_smp_processor_id();
204 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; 224 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
205 int i; 225 int i = 11;
206 unsigned used = 0; 226 unsigned used = 0;
207 227
208 printk("\nCall Trace:"); 228 printk("\nCall Trace:");
209 229
230 if (!tsk)
231 tsk = current;
232
233 if (call_trace >= 0) {
234 int unw_ret = 0;
235 struct unwind_frame_info info;
236
237 if (regs) {
238 if (unwind_init_frame_info(&info, tsk, regs) == 0)
239 unw_ret = show_trace_unwind(&info, NULL);
240 } else if (tsk == current)
241 unw_ret = unwind_init_running(&info, show_trace_unwind, NULL);
242 else {
243 if (unwind_init_blocked(&info, tsk) == 0)
244 unw_ret = show_trace_unwind(&info, NULL);
245 }
246 if (unw_ret > 0) {
247 if (call_trace > 0)
248 return;
249 printk("Legacy call trace:");
250 i = 18;
251 }
252 }
253
210#define HANDLE_STACK(cond) \ 254#define HANDLE_STACK(cond) \
211 do while (cond) { \ 255 do while (cond) { \
212 unsigned long addr = *stack++; \ 256 unsigned long addr = *stack++; \
@@ -229,7 +273,7 @@ void show_trace(unsigned long *stack)
229 } \ 273 } \
230 } while (0) 274 } while (0)
231 275
232 for(i = 11; ; ) { 276 for(; ; ) {
233 const char *id; 277 const char *id;
234 unsigned long *estack_end; 278 unsigned long *estack_end;
235 estack_end = in_exception_stack(cpu, (unsigned long)stack, 279 estack_end = in_exception_stack(cpu, (unsigned long)stack,
@@ -264,7 +308,7 @@ void show_trace(unsigned long *stack)
264 printk("\n"); 308 printk("\n");
265} 309}
266 310
267void show_stack(struct task_struct *tsk, unsigned long * rsp) 311static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long * rsp)
268{ 312{
269 unsigned long *stack; 313 unsigned long *stack;
270 int i; 314 int i;
@@ -298,7 +342,12 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp)
298 printk("%016lx ", *stack++); 342 printk("%016lx ", *stack++);
299 touch_nmi_watchdog(); 343 touch_nmi_watchdog();
300 } 344 }
301 show_trace((unsigned long *)rsp); 345 show_trace(tsk, regs, rsp);
346}
347
348void show_stack(struct task_struct *tsk, unsigned long * rsp)
349{
350 _show_stack(tsk, NULL, rsp);
302} 351}
303 352
304/* 353/*
@@ -307,7 +356,7 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp)
307void dump_stack(void) 356void dump_stack(void)
308{ 357{
309 unsigned long dummy; 358 unsigned long dummy;
310 show_trace(&dummy); 359 show_trace(NULL, NULL, &dummy);
311} 360}
312 361
313EXPORT_SYMBOL(dump_stack); 362EXPORT_SYMBOL(dump_stack);
@@ -334,7 +383,7 @@ void show_registers(struct pt_regs *regs)
334 if (in_kernel) { 383 if (in_kernel) {
335 384
336 printk("Stack: "); 385 printk("Stack: ");
337 show_stack(NULL, (unsigned long*)rsp); 386 _show_stack(NULL, regs, (unsigned long*)rsp);
338 387
339 printk("\nCode: "); 388 printk("\nCode: ");
340 if (regs->rip < PAGE_OFFSET) 389 if (regs->rip < PAGE_OFFSET)
@@ -383,6 +432,7 @@ void out_of_line_bug(void)
383{ 432{
384 BUG(); 433 BUG();
385} 434}
435EXPORT_SYMBOL(out_of_line_bug);
386#endif 436#endif
387 437
388static DEFINE_SPINLOCK(die_lock); 438static DEFINE_SPINLOCK(die_lock);
@@ -1012,3 +1062,14 @@ static int __init kstack_setup(char *s)
1012} 1062}
1013__setup("kstack=", kstack_setup); 1063__setup("kstack=", kstack_setup);
1014 1064
1065static int __init call_trace_setup(char *s)
1066{
1067 if (strcmp(s, "old") == 0)
1068 call_trace = -1;
1069 else if (strcmp(s, "both") == 0)
1070 call_trace = 0;
1071 else if (strcmp(s, "new") == 0)
1072 call_trace = 1;
1073 return 1;
1074}
1075__setup("call_trace=", call_trace_setup);
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index b81f473c4a..1c6a5f3229 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -45,6 +45,15 @@ SECTIONS
45 45
46 RODATA 46 RODATA
47 47
48#ifdef CONFIG_STACK_UNWIND
49 . = ALIGN(8);
50 .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
51 __start_unwind = .;
52 *(.eh_frame)
53 __end_unwind = .;
54 }
55#endif
56
48 /* Data */ 57 /* Data */
49 .data : AT(ADDR(.data) - LOAD_OFFSET) { 58 .data : AT(ADDR(.data) - LOAD_OFFSET) {
50 *(.data) 59 *(.data)
@@ -131,6 +140,26 @@ SECTIONS
131 *(.data.page_aligned) 140 *(.data.page_aligned)
132 } 141 }
133 142
143 /* might get freed after init */
144 . = ALIGN(4096);
145 __smp_alt_begin = .;
146 __smp_alt_instructions = .;
147 .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
148 *(.smp_altinstructions)
149 }
150 __smp_alt_instructions_end = .;
151 . = ALIGN(8);
152 __smp_locks = .;
153 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
154 *(.smp_locks)
155 }
156 __smp_locks_end = .;
157 .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
158 *(.smp_altinstr_replacement)
159 }
160 . = ALIGN(4096);
161 __smp_alt_end = .;
162
134 . = ALIGN(4096); /* Init code and data */ 163 . = ALIGN(4096); /* Init code and data */
135 __init_begin = .; 164 __init_begin = .;
136 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { 165 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index 9468fb20b0..f603037df1 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -107,7 +107,7 @@ static __always_inline long time_syscall(long *t)
107 107
108int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz) 108int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
109{ 109{
110 if (unlikely(!__sysctl_vsyscall)) 110 if (!__sysctl_vsyscall)
111 return gettimeofday(tv,tz); 111 return gettimeofday(tv,tz);
112 if (tv) 112 if (tv)
113 do_vgettimeofday(tv); 113 do_vgettimeofday(tv);
@@ -120,7 +120,7 @@ int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
120 * unlikely */ 120 * unlikely */
121time_t __vsyscall(1) vtime(time_t *t) 121time_t __vsyscall(1) vtime(time_t *t)
122{ 122{
123 if (unlikely(!__sysctl_vsyscall)) 123 if (!__sysctl_vsyscall)
124 return time_syscall(t); 124 return time_syscall(t);
125 else if (t) 125 else if (t)
126 *t = __xtime.tv_sec; 126 *t = __xtime.tv_sec;
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index 1def21c9f7..370952c4ff 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -1,66 +1,21 @@
1/* Exports for assembly files.
2 All C exports should go in the respective C files. */
3
1#include <linux/config.h> 4#include <linux/config.h>
2#include <linux/module.h> 5#include <linux/module.h>
3#include <linux/smp.h> 6#include <linux/smp.h>
4#include <linux/user.h>
5#include <linux/sched.h>
6#include <linux/in6.h>
7#include <linux/interrupt.h>
8#include <linux/smp_lock.h>
9#include <linux/pm.h>
10#include <linux/pci.h>
11#include <linux/apm_bios.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/syscalls.h>
15#include <linux/tty.h>
16 7
17#include <asm/semaphore.h> 8#include <asm/semaphore.h>
18#include <asm/processor.h> 9#include <asm/processor.h>
19#include <asm/i387.h>
20#include <asm/uaccess.h> 10#include <asm/uaccess.h>
21#include <asm/checksum.h>
22#include <asm/io.h>
23#include <asm/delay.h>
24#include <asm/irq.h>
25#include <asm/mmx.h>
26#include <asm/desc.h>
27#include <asm/pgtable.h> 11#include <asm/pgtable.h>
28#include <asm/pgalloc.h>
29#include <asm/nmi.h>
30#include <asm/kdebug.h>
31#include <asm/unistd.h>
32#include <asm/tlbflush.h>
33#include <asm/kdebug.h>
34
35extern spinlock_t rtc_lock;
36 12
37#ifdef CONFIG_SMP
38extern void __write_lock_failed(rwlock_t *rw);
39extern void __read_lock_failed(rwlock_t *rw);
40#endif
41
42/* platform dependent support */
43EXPORT_SYMBOL(boot_cpu_data);
44//EXPORT_SYMBOL(dump_fpu);
45EXPORT_SYMBOL(__ioremap);
46EXPORT_SYMBOL(ioremap_nocache);
47EXPORT_SYMBOL(iounmap);
48EXPORT_SYMBOL(kernel_thread); 13EXPORT_SYMBOL(kernel_thread);
49EXPORT_SYMBOL(pm_idle);
50EXPORT_SYMBOL(pm_power_off);
51 14
52EXPORT_SYMBOL(__down_failed); 15EXPORT_SYMBOL(__down_failed);
53EXPORT_SYMBOL(__down_failed_interruptible); 16EXPORT_SYMBOL(__down_failed_interruptible);
54EXPORT_SYMBOL(__down_failed_trylock); 17EXPORT_SYMBOL(__down_failed_trylock);
55EXPORT_SYMBOL(__up_wakeup); 18EXPORT_SYMBOL(__up_wakeup);
56/* Networking helper routines. */
57EXPORT_SYMBOL(csum_partial_copy_nocheck);
58EXPORT_SYMBOL(ip_compute_csum);
59/* Delay loops */
60EXPORT_SYMBOL(__udelay);
61EXPORT_SYMBOL(__ndelay);
62EXPORT_SYMBOL(__delay);
63EXPORT_SYMBOL(__const_udelay);
64 19
65EXPORT_SYMBOL(__get_user_1); 20EXPORT_SYMBOL(__get_user_1);
66EXPORT_SYMBOL(__get_user_2); 21EXPORT_SYMBOL(__get_user_2);
@@ -71,42 +26,20 @@ EXPORT_SYMBOL(__put_user_2);
71EXPORT_SYMBOL(__put_user_4); 26EXPORT_SYMBOL(__put_user_4);
72EXPORT_SYMBOL(__put_user_8); 27EXPORT_SYMBOL(__put_user_8);
73 28
74EXPORT_SYMBOL(strncpy_from_user);
75EXPORT_SYMBOL(__strncpy_from_user);
76EXPORT_SYMBOL(clear_user);
77EXPORT_SYMBOL(__clear_user);
78EXPORT_SYMBOL(copy_user_generic); 29EXPORT_SYMBOL(copy_user_generic);
79EXPORT_SYMBOL(copy_from_user); 30EXPORT_SYMBOL(copy_from_user);
80EXPORT_SYMBOL(copy_to_user); 31EXPORT_SYMBOL(copy_to_user);
81EXPORT_SYMBOL(copy_in_user);
82EXPORT_SYMBOL(strnlen_user);
83
84#ifdef CONFIG_PCI
85EXPORT_SYMBOL(pci_mem_start);
86#endif
87 32
88EXPORT_SYMBOL(copy_page); 33EXPORT_SYMBOL(copy_page);
89EXPORT_SYMBOL(clear_page); 34EXPORT_SYMBOL(clear_page);
90 35
91EXPORT_SYMBOL(_cpu_pda);
92#ifdef CONFIG_SMP 36#ifdef CONFIG_SMP
93EXPORT_SYMBOL(cpu_data); 37extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
38extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
94EXPORT_SYMBOL(__write_lock_failed); 39EXPORT_SYMBOL(__write_lock_failed);
95EXPORT_SYMBOL(__read_lock_failed); 40EXPORT_SYMBOL(__read_lock_failed);
96
97EXPORT_SYMBOL(smp_call_function);
98EXPORT_SYMBOL(cpu_callout_map);
99#endif
100
101#ifdef CONFIG_VT
102EXPORT_SYMBOL(screen_info);
103#endif 41#endif
104 42
105EXPORT_SYMBOL(rtc_lock);
106
107EXPORT_SYMBOL_GPL(set_nmi_callback);
108EXPORT_SYMBOL_GPL(unset_nmi_callback);
109
110/* Export string functions. We normally rely on gcc builtin for most of these, 43/* Export string functions. We normally rely on gcc builtin for most of these,
111 but gcc sometimes decides not to inline them. */ 44 but gcc sometimes decides not to inline them. */
112#undef memcpy 45#undef memcpy
@@ -114,51 +47,14 @@ EXPORT_SYMBOL_GPL(unset_nmi_callback);
114#undef memmove 47#undef memmove
115 48
116extern void * memset(void *,int,__kernel_size_t); 49extern void * memset(void *,int,__kernel_size_t);
117extern size_t strlen(const char *);
118extern void * memmove(void * dest,const void *src,size_t count);
119extern void * memcpy(void *,const void *,__kernel_size_t); 50extern void * memcpy(void *,const void *,__kernel_size_t);
120extern void * __memcpy(void *,const void *,__kernel_size_t); 51extern void * __memcpy(void *,const void *,__kernel_size_t);
121 52
122EXPORT_SYMBOL(memset); 53EXPORT_SYMBOL(memset);
123EXPORT_SYMBOL(memmove);
124EXPORT_SYMBOL(memcpy); 54EXPORT_SYMBOL(memcpy);
125EXPORT_SYMBOL(__memcpy); 55EXPORT_SYMBOL(__memcpy);
126 56
127#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
128/* prototypes are wrong, these are assembly with custom calling functions */
129extern void rwsem_down_read_failed_thunk(void);
130extern void rwsem_wake_thunk(void);
131extern void rwsem_downgrade_thunk(void);
132extern void rwsem_down_write_failed_thunk(void);
133EXPORT_SYMBOL(rwsem_down_read_failed_thunk);
134EXPORT_SYMBOL(rwsem_wake_thunk);
135EXPORT_SYMBOL(rwsem_downgrade_thunk);
136EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
137#endif
138
139EXPORT_SYMBOL(empty_zero_page); 57EXPORT_SYMBOL(empty_zero_page);
140
141EXPORT_SYMBOL(die_chain);
142
143#ifdef CONFIG_SMP
144EXPORT_SYMBOL(cpu_sibling_map);
145EXPORT_SYMBOL(smp_num_siblings);
146#endif
147
148#ifdef CONFIG_BUG
149EXPORT_SYMBOL(out_of_line_bug);
150#endif
151
152EXPORT_SYMBOL(init_level4_pgt); 58EXPORT_SYMBOL(init_level4_pgt);
153
154extern unsigned long __supported_pte_mask;
155EXPORT_SYMBOL(__supported_pte_mask);
156
157#ifdef CONFIG_SMP
158EXPORT_SYMBOL(flush_tlb_page);
159#endif
160
161EXPORT_SYMBOL(cpu_khz);
162
163EXPORT_SYMBOL(load_gs_index); 59EXPORT_SYMBOL(load_gs_index);
164 60
diff --git a/arch/x86_64/lib/csum-partial.c b/arch/x86_64/lib/csum-partial.c
index 5384e227cd..c493735218 100644
--- a/arch/x86_64/lib/csum-partial.c
+++ b/arch/x86_64/lib/csum-partial.c
@@ -147,4 +147,5 @@ unsigned short ip_compute_csum(unsigned char * buff, int len)
147{ 147{
148 return csum_fold(csum_partial(buff,len,0)); 148 return csum_fold(csum_partial(buff,len,0));
149} 149}
150EXPORT_SYMBOL(ip_compute_csum);
150 151
diff --git a/arch/x86_64/lib/csum-wrappers.c b/arch/x86_64/lib/csum-wrappers.c
index 94323f2081..b1320ec584 100644
--- a/arch/x86_64/lib/csum-wrappers.c
+++ b/arch/x86_64/lib/csum-wrappers.c
@@ -109,6 +109,7 @@ csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len,
109{ 109{
110 return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL); 110 return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL);
111} 111}
112EXPORT_SYMBOL(csum_partial_copy_nocheck);
112 113
113unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, 114unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
114 __u32 len, unsigned short proto, unsigned int sum) 115 __u32 len, unsigned short proto, unsigned int sum)
diff --git a/arch/x86_64/lib/delay.c b/arch/x86_64/lib/delay.c
index 03c460cbdd..b6cd3cca2f 100644
--- a/arch/x86_64/lib/delay.c
+++ b/arch/x86_64/lib/delay.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/config.h> 11#include <linux/config.h>
12#include <linux/module.h>
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/delay.h> 14#include <linux/delay.h>
14#include <asm/delay.h> 15#include <asm/delay.h>
@@ -36,18 +37,22 @@ void __delay(unsigned long loops)
36 } 37 }
37 while((now-bclock) < loops); 38 while((now-bclock) < loops);
38} 39}
40EXPORT_SYMBOL(__delay);
39 41
40inline void __const_udelay(unsigned long xloops) 42inline void __const_udelay(unsigned long xloops)
41{ 43{
42 __delay((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32); 44 __delay((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32);
43} 45}
46EXPORT_SYMBOL(__const_udelay);
44 47
45void __udelay(unsigned long usecs) 48void __udelay(unsigned long usecs)
46{ 49{
47 __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */ 50 __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
48} 51}
52EXPORT_SYMBOL(__udelay);
49 53
50void __ndelay(unsigned long nsecs) 54void __ndelay(unsigned long nsecs)
51{ 55{
52 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ 56 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
53} 57}
58EXPORT_SYMBOL(__ndelay);
diff --git a/arch/x86_64/lib/memmove.c b/arch/x86_64/lib/memmove.c
index e93d5255fd..751ebae8ec 100644
--- a/arch/x86_64/lib/memmove.c
+++ b/arch/x86_64/lib/memmove.c
@@ -3,12 +3,13 @@
3 */ 3 */
4#define _STRING_C 4#define _STRING_C
5#include <linux/string.h> 5#include <linux/string.h>
6#include <linux/module.h>
6 7
7#undef memmove 8#undef memmove
8void *memmove(void * dest,const void *src,size_t count) 9void *memmove(void * dest,const void *src,size_t count)
9{ 10{
10 if (dest < src) { 11 if (dest < src) {
11 __inline_memcpy(dest,src,count); 12 return memcpy(dest,src,count);
12 } else { 13 } else {
13 char *p = (char *) dest + count; 14 char *p = (char *) dest + count;
14 char *s = (char *) src + count; 15 char *s = (char *) src + count;
@@ -17,3 +18,4 @@ void *memmove(void * dest,const void *src,size_t count)
17 } 18 }
18 return dest; 19 return dest;
19} 20}
21EXPORT_SYMBOL(memmove);
diff --git a/arch/x86_64/lib/usercopy.c b/arch/x86_64/lib/usercopy.c
index 9bc2c29581..893d43f838 100644
--- a/arch/x86_64/lib/usercopy.c
+++ b/arch/x86_64/lib/usercopy.c
@@ -5,6 +5,7 @@
5 * Copyright 1997 Linus Torvalds 5 * Copyright 1997 Linus Torvalds
6 * Copyright 2002 Andi Kleen <ak@suse.de> 6 * Copyright 2002 Andi Kleen <ak@suse.de>
7 */ 7 */
8#include <linux/module.h>
8#include <asm/uaccess.h> 9#include <asm/uaccess.h>
9 10
10/* 11/*
@@ -47,15 +48,17 @@ __strncpy_from_user(char *dst, const char __user *src, long count)
47 __do_strncpy_from_user(dst, src, count, res); 48 __do_strncpy_from_user(dst, src, count, res);
48 return res; 49 return res;
49} 50}
51EXPORT_SYMBOL(__strncpy_from_user);
50 52
51long 53long
52strncpy_from_user(char *dst, const char __user *src, long count) 54strncpy_from_user(char *dst, const char __user *src, long count)
53{ 55{
54 long res = -EFAULT; 56 long res = -EFAULT;
55 if (access_ok(VERIFY_READ, src, 1)) 57 if (access_ok(VERIFY_READ, src, 1))
56 __do_strncpy_from_user(dst, src, count, res); 58 return __strncpy_from_user(dst, src, count);
57 return res; 59 return res;
58} 60}
61EXPORT_SYMBOL(strncpy_from_user);
59 62
60/* 63/*
61 * Zero Userspace 64 * Zero Userspace
@@ -94,7 +97,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
94 [zero] "r" (0UL), [eight] "r" (8UL)); 97 [zero] "r" (0UL), [eight] "r" (8UL));
95 return size; 98 return size;
96} 99}
97 100EXPORT_SYMBOL(__clear_user);
98 101
99unsigned long clear_user(void __user *to, unsigned long n) 102unsigned long clear_user(void __user *to, unsigned long n)
100{ 103{
@@ -102,6 +105,7 @@ unsigned long clear_user(void __user *to, unsigned long n)
102 return __clear_user(to, n); 105 return __clear_user(to, n);
103 return n; 106 return n;
104} 107}
108EXPORT_SYMBOL(clear_user);
105 109
106/* 110/*
107 * Return the size of a string (including the ending 0) 111 * Return the size of a string (including the ending 0)
@@ -125,6 +129,7 @@ long __strnlen_user(const char __user *s, long n)
125 s++; 129 s++;
126 } 130 }
127} 131}
132EXPORT_SYMBOL(__strnlen_user);
128 133
129long strnlen_user(const char __user *s, long n) 134long strnlen_user(const char __user *s, long n)
130{ 135{
@@ -132,6 +137,7 @@ long strnlen_user(const char __user *s, long n)
132 return 0; 137 return 0;
133 return __strnlen_user(s, n); 138 return __strnlen_user(s, n);
134} 139}
140EXPORT_SYMBOL(strnlen_user);
135 141
136long strlen_user(const char __user *s) 142long strlen_user(const char __user *s)
137{ 143{
@@ -147,6 +153,7 @@ long strlen_user(const char __user *s)
147 s++; 153 s++;
148 } 154 }
149} 155}
156EXPORT_SYMBOL(strlen_user);
150 157
151unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len) 158unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
152{ 159{
@@ -155,3 +162,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from, unsigned le
155 } 162 }
156 return len; 163 return len;
157} 164}
165EXPORT_SYMBOL(copy_in_user);
166
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 55250593d8..08dc696f54 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -41,6 +41,41 @@
41#define PF_RSVD (1<<3) 41#define PF_RSVD (1<<3)
42#define PF_INSTR (1<<4) 42#define PF_INSTR (1<<4)
43 43
44#ifdef CONFIG_KPROBES
45ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
46
47/* Hook to register for page fault notifications */
48int register_page_fault_notifier(struct notifier_block *nb)
49{
50 vmalloc_sync_all();
51 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
52}
53
54int unregister_page_fault_notifier(struct notifier_block *nb)
55{
56 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
57}
58
59static inline int notify_page_fault(enum die_val val, const char *str,
60 struct pt_regs *regs, long err, int trap, int sig)
61{
62 struct die_args args = {
63 .regs = regs,
64 .str = str,
65 .err = err,
66 .trapnr = trap,
67 .signr = sig
68 };
69 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
70}
71#else
72static inline int notify_page_fault(enum die_val val, const char *str,
73 struct pt_regs *regs, long err, int trap, int sig)
74{
75 return NOTIFY_DONE;
76}
77#endif
78
44void bust_spinlocks(int yes) 79void bust_spinlocks(int yes)
45{ 80{
46 int loglevel_save = console_loglevel; 81 int loglevel_save = console_loglevel;
@@ -160,7 +195,7 @@ void dump_pagetable(unsigned long address)
160 printk("PGD %lx ", pgd_val(*pgd)); 195 printk("PGD %lx ", pgd_val(*pgd));
161 if (!pgd_present(*pgd)) goto ret; 196 if (!pgd_present(*pgd)) goto ret;
162 197
163 pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address); 198 pud = pud_offset(pgd, address);
164 if (bad_address(pud)) goto bad; 199 if (bad_address(pud)) goto bad;
165 printk("PUD %lx ", pud_val(*pud)); 200 printk("PUD %lx ", pud_val(*pud));
166 if (!pud_present(*pud)) goto ret; 201 if (!pud_present(*pud)) goto ret;
@@ -348,7 +383,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
348 if (vmalloc_fault(address) >= 0) 383 if (vmalloc_fault(address) >= 0)
349 return; 384 return;
350 } 385 }
351 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, 386 if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
352 SIGSEGV) == NOTIFY_STOP) 387 SIGSEGV) == NOTIFY_STOP)
353 return; 388 return;
354 /* 389 /*
@@ -358,7 +393,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
358 goto bad_area_nosemaphore; 393 goto bad_area_nosemaphore;
359 } 394 }
360 395
361 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, 396 if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
362 SIGSEGV) == NOTIFY_STOP) 397 SIGSEGV) == NOTIFY_STOP)
363 return; 398 return;
364 399
@@ -410,8 +445,10 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
410 if (!(vma->vm_flags & VM_GROWSDOWN)) 445 if (!(vma->vm_flags & VM_GROWSDOWN))
411 goto bad_area; 446 goto bad_area;
412 if (error_code & 4) { 447 if (error_code & 4) {
413 // XXX: align red zone size with ABI 448 /* Allow userspace just enough access below the stack pointer
414 if (address + 128 < regs->rsp) 449 * to let the 'enter' instruction work.
450 */
451 if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
415 goto bad_area; 452 goto bad_area;
416 } 453 }
417 if (expand_stack(vma, address)) 454 if (expand_stack(vma, address))
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 4ba34e95d8..95bd232ff0 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -23,6 +23,7 @@
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/proc_fs.h> 24#include <linux/proc_fs.h>
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <linux/poison.h>
26#include <linux/dma-mapping.h> 27#include <linux/dma-mapping.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <linux/memory_hotplug.h> 29#include <linux/memory_hotplug.h>
@@ -41,8 +42,6 @@
41#include <asm/proto.h> 42#include <asm/proto.h>
42#include <asm/smp.h> 43#include <asm/smp.h>
43#include <asm/sections.h> 44#include <asm/sections.h>
44#include <asm/dma-mapping.h>
45#include <asm/swiotlb.h>
46 45
47#ifndef Dprintk 46#ifndef Dprintk
48#define Dprintk(x...) 47#define Dprintk(x...)
@@ -90,8 +89,6 @@ void show_mem(void)
90 printk(KERN_INFO "%lu pages swap cached\n",cached); 89 printk(KERN_INFO "%lu pages swap cached\n",cached);
91} 90}
92 91
93/* References to section boundaries */
94
95int after_bootmem; 92int after_bootmem;
96 93
97static __init void *spp_getpage(void) 94static __init void *spp_getpage(void)
@@ -261,9 +258,10 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
261 for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) { 258 for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
262 unsigned long entry; 259 unsigned long entry;
263 260
264 if (address > end) { 261 if (address >= end) {
265 for (; i < PTRS_PER_PMD; i++, pmd++) 262 if (!after_bootmem)
266 set_pmd(pmd, __pmd(0)); 263 for (; i < PTRS_PER_PMD; i++, pmd++)
264 set_pmd(pmd, __pmd(0));
267 break; 265 break;
268 } 266 }
269 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; 267 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
@@ -341,7 +339,8 @@ static void __init find_early_table_space(unsigned long end)
341 table_end = table_start; 339 table_end = table_start;
342 340
343 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n", 341 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
344 end, table_start << PAGE_SHIFT, table_end << PAGE_SHIFT); 342 end, table_start << PAGE_SHIFT,
343 (table_start << PAGE_SHIFT) + tables);
345} 344}
346 345
347/* Setup the direct mapping of the physical memory at PAGE_OFFSET. 346/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
@@ -372,7 +371,7 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end)
372 pud_t *pud; 371 pud_t *pud;
373 372
374 if (after_bootmem) 373 if (after_bootmem)
375 pud = pud_offset_k(pgd, start & PGDIR_MASK); 374 pud = pud_offset(pgd, start & PGDIR_MASK);
376 else 375 else
377 pud = alloc_low_page(&map, &pud_phys); 376 pud = alloc_low_page(&map, &pud_phys);
378 377
@@ -508,8 +507,6 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size)
508/* 507/*
509 * Memory hotplug specific functions 508 * Memory hotplug specific functions
510 */ 509 */
511#if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
512
513void online_page(struct page *page) 510void online_page(struct page *page)
514{ 511{
515 ClearPageReserved(page); 512 ClearPageReserved(page);
@@ -519,31 +516,17 @@ void online_page(struct page *page)
519 num_physpages++; 516 num_physpages++;
520} 517}
521 518
522#ifndef CONFIG_MEMORY_HOTPLUG 519#ifdef CONFIG_MEMORY_HOTPLUG
523/* 520/*
524 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, 521 * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
525 * just online the pages. 522 * via probe interface of sysfs. If acpi notifies hot-add event, then it
523 * can tell node id by searching dsdt. But, probe interface doesn't have
524 * node id. So, return 0 as node id at this time.
526 */ 525 */
527int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) 526#ifdef CONFIG_NUMA
527int memory_add_physaddr_to_nid(u64 start)
528{ 528{
529 int err = -EIO; 529 return 0;
530 unsigned long pfn;
531 unsigned long total = 0, mem = 0;
532 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
533 if (pfn_valid(pfn)) {
534 online_page(pfn_to_page(pfn));
535 err = 0;
536 mem++;
537 }
538 total++;
539 }
540 if (!err) {
541 z->spanned_pages += total;
542 z->present_pages += mem;
543 z->zone_pgdat->node_spanned_pages += total;
544 z->zone_pgdat->node_present_pages += mem;
545 }
546 return err;
547} 530}
548#endif 531#endif
549 532
@@ -551,9 +534,9 @@ int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
551 * Memory is added always to NORMAL zone. This means you will never get 534 * Memory is added always to NORMAL zone. This means you will never get
552 * additional DMA/DMA32 memory. 535 * additional DMA/DMA32 memory.
553 */ 536 */
554int add_memory(u64 start, u64 size) 537int arch_add_memory(int nid, u64 start, u64 size)
555{ 538{
556 struct pglist_data *pgdat = NODE_DATA(0); 539 struct pglist_data *pgdat = NODE_DATA(nid);
557 struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2; 540 struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
558 unsigned long start_pfn = start >> PAGE_SHIFT; 541 unsigned long start_pfn = start >> PAGE_SHIFT;
559 unsigned long nr_pages = size >> PAGE_SHIFT; 542 unsigned long nr_pages = size >> PAGE_SHIFT;
@@ -570,7 +553,7 @@ error:
570 printk("%s: Problem encountered in __add_pages!\n", __func__); 553 printk("%s: Problem encountered in __add_pages!\n", __func__);
571 return ret; 554 return ret;
572} 555}
573EXPORT_SYMBOL_GPL(add_memory); 556EXPORT_SYMBOL_GPL(arch_add_memory);
574 557
575int remove_memory(u64 start, u64 size) 558int remove_memory(u64 start, u64 size)
576{ 559{
@@ -578,7 +561,33 @@ int remove_memory(u64 start, u64 size)
578} 561}
579EXPORT_SYMBOL_GPL(remove_memory); 562EXPORT_SYMBOL_GPL(remove_memory);
580 563
581#endif 564#else /* CONFIG_MEMORY_HOTPLUG */
565/*
566 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
567 * just online the pages.
568 */
569int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
570{
571 int err = -EIO;
572 unsigned long pfn;
573 unsigned long total = 0, mem = 0;
574 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
575 if (pfn_valid(pfn)) {
576 online_page(pfn_to_page(pfn));
577 err = 0;
578 mem++;
579 }
580 total++;
581 }
582 if (!err) {
583 z->spanned_pages += total;
584 z->present_pages += mem;
585 z->zone_pgdat->node_spanned_pages += total;
586 z->zone_pgdat->node_present_pages += mem;
587 }
588 return err;
589}
590#endif /* CONFIG_MEMORY_HOTPLUG */
582 591
583static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules, 592static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
584 kcore_vsyscall; 593 kcore_vsyscall;
@@ -587,10 +596,7 @@ void __init mem_init(void)
587{ 596{
588 long codesize, reservedpages, datasize, initsize; 597 long codesize, reservedpages, datasize, initsize;
589 598
590#ifdef CONFIG_SWIOTLB 599 pci_iommu_alloc();
591 pci_swiotlb_init();
592#endif
593 no_iommu_init();
594 600
595 /* How many end-of-memory variables you have, grandma! */ 601 /* How many end-of-memory variables you have, grandma! */
596 max_low_pfn = end_pfn; 602 max_low_pfn = end_pfn;
@@ -644,20 +650,31 @@ void __init mem_init(void)
644#endif 650#endif
645} 651}
646 652
647void free_initmem(void) 653void free_init_pages(char *what, unsigned long begin, unsigned long end)
648{ 654{
649 unsigned long addr; 655 unsigned long addr;
650 656
651 addr = (unsigned long)(&__init_begin); 657 if (begin >= end)
652 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 658 return;
659
660 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
661 for (addr = begin; addr < end; addr += PAGE_SIZE) {
653 ClearPageReserved(virt_to_page(addr)); 662 ClearPageReserved(virt_to_page(addr));
654 init_page_count(virt_to_page(addr)); 663 init_page_count(virt_to_page(addr));
655 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE); 664 memset((void *)(addr & ~(PAGE_SIZE-1)),
665 POISON_FREE_INITMEM, PAGE_SIZE);
656 free_page(addr); 666 free_page(addr);
657 totalram_pages++; 667 totalram_pages++;
658 } 668 }
659 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin); 669}
660 printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10); 670
671void free_initmem(void)
672{
673 memset(__initdata_begin, POISON_FREE_INITDATA,
674 __initdata_end - __initdata_begin);
675 free_init_pages("unused kernel memory",
676 (unsigned long)(&__init_begin),
677 (unsigned long)(&__init_end));
661} 678}
662 679
663#ifdef CONFIG_DEBUG_RODATA 680#ifdef CONFIG_DEBUG_RODATA
@@ -686,15 +703,7 @@ void mark_rodata_ro(void)
686#ifdef CONFIG_BLK_DEV_INITRD 703#ifdef CONFIG_BLK_DEV_INITRD
687void free_initrd_mem(unsigned long start, unsigned long end) 704void free_initrd_mem(unsigned long start, unsigned long end)
688{ 705{
689 if (start >= end) 706 free_init_pages("initrd memory", start, end);
690 return;
691 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
692 for (; start < end; start += PAGE_SIZE) {
693 ClearPageReserved(virt_to_page(start));
694 init_page_count(virt_to_page(start));
695 free_page(start);
696 totalram_pages++;
697 }
698} 707}
699#endif 708#endif
700 709
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c
index ae20706420..45d7d823c3 100644
--- a/arch/x86_64/mm/ioremap.c
+++ b/arch/x86_64/mm/ioremap.c
@@ -11,6 +11,7 @@
11#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/module.h>
14#include <asm/io.h> 15#include <asm/io.h>
15#include <asm/pgalloc.h> 16#include <asm/pgalloc.h>
16#include <asm/fixmap.h> 17#include <asm/fixmap.h>
@@ -219,6 +220,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
219 } 220 }
220 return (__force void __iomem *) (offset + (char *)addr); 221 return (__force void __iomem *) (offset + (char *)addr);
221} 222}
223EXPORT_SYMBOL(__ioremap);
222 224
223/** 225/**
224 * ioremap_nocache - map bus memory into CPU space 226 * ioremap_nocache - map bus memory into CPU space
@@ -246,6 +248,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
246{ 248{
247 return __ioremap(phys_addr, size, _PAGE_PCD); 249 return __ioremap(phys_addr, size, _PAGE_PCD);
248} 250}
251EXPORT_SYMBOL(ioremap_nocache);
249 252
250/** 253/**
251 * iounmap - Free a IO remapping 254 * iounmap - Free a IO remapping
@@ -291,3 +294,5 @@ void iounmap(volatile void __iomem *addr)
291 BUG_ON(p != o || o == NULL); 294 BUG_ON(p != o || o == NULL);
292 kfree(p); 295 kfree(p);
293} 296}
297EXPORT_SYMBOL(iounmap);
298
diff --git a/arch/x86_64/pci/k8-bus.c b/arch/x86_64/pci/k8-bus.c
index 3acf60ded2..b50a7c7c47 100644
--- a/arch/x86_64/pci/k8-bus.c
+++ b/arch/x86_64/pci/k8-bus.c
@@ -2,6 +2,7 @@
2#include <linux/pci.h> 2#include <linux/pci.h>
3#include <asm/mpspec.h> 3#include <asm/mpspec.h>
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5#include <asm/k8.h>
5 6
6/* 7/*
7 * This discovers the pcibus <-> node mapping on AMD K8. 8 * This discovers the pcibus <-> node mapping on AMD K8.
@@ -18,7 +19,6 @@
18#define NR_LDT_BUS_NUMBER_REGISTERS 3 19#define NR_LDT_BUS_NUMBER_REGISTERS 3
19#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF) 20#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF)
20#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF) 21#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF)
21#define PCI_DEVICE_ID_K8HTCONFIG 0x1100
22 22
23/** 23/**
24 * fill_mp_bus_to_cpumask() 24 * fill_mp_bus_to_cpumask()
@@ -28,8 +28,7 @@
28__init static int 28__init static int
29fill_mp_bus_to_cpumask(void) 29fill_mp_bus_to_cpumask(void)
30{ 30{
31 struct pci_dev *nb_dev = NULL; 31 int i, j, k;
32 int i, j;
33 u32 ldtbus, nid; 32 u32 ldtbus, nid;
34 static int lbnr[3] = { 33 static int lbnr[3] = {
35 LDT_BUS_NUMBER_REGISTER_0, 34 LDT_BUS_NUMBER_REGISTER_0,
@@ -37,8 +36,9 @@ fill_mp_bus_to_cpumask(void)
37 LDT_BUS_NUMBER_REGISTER_2 36 LDT_BUS_NUMBER_REGISTER_2
38 }; 37 };
39 38
40 while ((nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 39 cache_k8_northbridges();
41 PCI_DEVICE_ID_K8HTCONFIG, nb_dev))) { 40 for (k = 0; k < num_k8_northbridges; k++) {
41 struct pci_dev *nb_dev = k8_northbridges[k];
42 pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid); 42 pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid);
43 43
44 for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) { 44 for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) {
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 98fac8489a..3a3a4c66ef 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -71,7 +71,7 @@ archprepare: $(archinc)/.platform
71# Update machine cpu and platform symlinks if something which affects 71# Update machine cpu and platform symlinks if something which affects
72# them changed. 72# them changed.
73 73
74$(archinc)/.platform: $(wildcard include/config/arch/*.h) include/config/MARKER 74$(archinc)/.platform: $(wildcard include/config/arch/*.h) include/config/auto.conf
75 @echo ' SYMLINK $(archinc)/xtensa/config -> $(archinc)/xtensa/config-$(CPU)' 75 @echo ' SYMLINK $(archinc)/xtensa/config -> $(archinc)/xtensa/config-$(CPU)'
76 $(Q)mkdir -p $(archinc) 76 $(Q)mkdir -p $(archinc)
77 $(Q)mkdir -p $(archinc)/xtensa 77 $(Q)mkdir -p $(archinc)/xtensa
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 937d81f62f..fe14909f45 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -29,7 +29,7 @@
29 29
30extern volatile unsigned long wall_jiffies; 30extern volatile unsigned long wall_jiffies;
31 31
32spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED; 32DEFINE_SPINLOCK(rtc_lock);
33EXPORT_SYMBOL(rtc_lock); 33EXPORT_SYMBOL(rtc_lock);
34 34
35 35
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 225d64d73f..27e409089a 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -461,7 +461,7 @@ void show_code(unsigned int *pc)
461 } 461 }
462} 462}
463 463
464spinlock_t die_lock = SPIN_LOCK_UNLOCKED; 464DEFINE_SPINLOCK(die_lock);
465 465
466void die(const char * str, struct pt_regs * regs, long err) 466void die(const char * str, struct pt_regs * regs, long err)
467{ 467{
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 1ec5df4667..3af31ed49a 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -892,7 +892,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
892} 892}
893 893
894/* 894/*
895 * as_can_anticipate indicates weather we should either run arq 895 * as_can_anticipate indicates whether we should either run arq
896 * or keep anticipating a better request. 896 * or keep anticipating a better request.
897 */ 897 */
898static int as_can_anticipate(struct as_data *ad, struct as_rq *arq) 898static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 0603ab2f36..eee03a3876 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2745,7 +2745,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
2745 return 0; 2745 return 0;
2746 2746
2747 /* 2747 /*
2748 * not contigious 2748 * not contiguous
2749 */ 2749 */
2750 if (req->sector + req->nr_sectors != next->sector) 2750 if (req->sector + req->nr_sectors != next->sector)
2751 return 0; 2751 return 0;
@@ -3403,7 +3403,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
3403} 3403}
3404 3404
3405 3405
3406static struct notifier_block blk_cpu_notifier = { 3406static struct notifier_block __devinitdata blk_cpu_notifier = {
3407 .notifier_call = blk_cpu_notify, 3407 .notifier_call = blk_cpu_notify,
3408}; 3408};
3409 3409
@@ -3415,7 +3415,7 @@ static struct notifier_block blk_cpu_notifier = {
3415 * 3415 *
3416 * Description: 3416 * Description:
3417 * Ends all I/O on a request. It does not handle partial completions, 3417 * Ends all I/O on a request. It does not handle partial completions,
3418 * unless the driver actually implements this in its completionc callback 3418 * unless the driver actually implements this in its completion callback
3419 * through requeueing. Theh actual completion happens out-of-order, 3419 * through requeueing. Theh actual completion happens out-of-order,
3420 * through a softirq handler. The user must have registered a completion 3420 * through a softirq handler. The user must have registered a completion
3421 * callback through blk_queue_softirq_done(). 3421 * callback through blk_queue_softirq_done().
@@ -3541,9 +3541,7 @@ int __init blk_dev_init(void)
3541 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); 3541 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3542 3542
3543 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); 3543 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
3544#ifdef CONFIG_HOTPLUG_CPU 3544 register_hotcpu_notifier(&blk_cpu_notifier);
3545 register_cpu_notifier(&blk_cpu_notifier);
3546#endif
3547 3545
3548 blk_max_low_pfn = max_low_pfn; 3546 blk_max_low_pfn = max_low_pfn;
3549 blk_max_pfn = max_pfn; 3547 blk_max_pfn = max_pfn;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index c442f2e7ce..ba133d5570 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -337,7 +337,7 @@ config CRYPTO_CRC32C
337 337
338config CRYPTO_TEST 338config CRYPTO_TEST
339 tristate "Testing module" 339 tristate "Testing module"
340 depends on CRYPTO 340 depends on CRYPTO && m
341 help 341 help
342 Quick & dirty crypto test module. 342 Quick & dirty crypto test module.
343 343
diff --git a/crypto/aes.c b/crypto/aes.c
index a5017292e0..a038711831 100644
--- a/crypto/aes.c
+++ b/crypto/aes.c
@@ -248,10 +248,10 @@ gen_tabs (void)
248 t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \ 248 t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \
249} 249}
250 250
251static int 251static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
252aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) 252 unsigned int key_len, u32 *flags)
253{ 253{
254 struct aes_ctx *ctx = ctx_arg; 254 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
255 const __le32 *key = (const __le32 *)in_key; 255 const __le32 *key = (const __le32 *)in_key;
256 u32 i, t, u, v, w; 256 u32 i, t, u, v, w;
257 257
@@ -318,9 +318,9 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
318 f_rl(bo, bi, 2, k); \ 318 f_rl(bo, bi, 2, k); \
319 f_rl(bo, bi, 3, k) 319 f_rl(bo, bi, 3, k)
320 320
321static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) 321static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
322{ 322{
323 const struct aes_ctx *ctx = ctx_arg; 323 const struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
324 const __le32 *src = (const __le32 *)in; 324 const __le32 *src = (const __le32 *)in;
325 __le32 *dst = (__le32 *)out; 325 __le32 *dst = (__le32 *)out;
326 u32 b0[4], b1[4]; 326 u32 b0[4], b1[4];
@@ -373,9 +373,9 @@ static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in)
373 i_rl(bo, bi, 2, k); \ 373 i_rl(bo, bi, 2, k); \
374 i_rl(bo, bi, 3, k) 374 i_rl(bo, bi, 3, k)
375 375
376static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in) 376static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
377{ 377{
378 const struct aes_ctx *ctx = ctx_arg; 378 const struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
379 const __le32 *src = (const __le32 *)in; 379 const __le32 *src = (const __le32 *)in;
380 __le32 *dst = (__le32 *)out; 380 __le32 *dst = (__le32 *)out;
381 u32 b0[4], b1[4]; 381 u32 b0[4], b1[4];
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 2c796bdb91..7e2e1a2980 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -460,16 +460,15 @@ static const u32 rc[] = {
460 0xf726ffedU, 0xe89d6f8eU, 0x19a0f089U, 460 0xf726ffedU, 0xe89d6f8eU, 0x19a0f089U,
461}; 461};
462 462
463static int anubis_setkey(void *ctx_arg, const u8 *in_key, 463static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
464 unsigned int key_len, u32 *flags) 464 unsigned int key_len, u32 *flags)
465{ 465{
466 struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
466 const __be32 *key = (const __be32 *)in_key; 467 const __be32 *key = (const __be32 *)in_key;
467 int N, R, i, r; 468 int N, R, i, r;
468 u32 kappa[ANUBIS_MAX_N]; 469 u32 kappa[ANUBIS_MAX_N];
469 u32 inter[ANUBIS_MAX_N]; 470 u32 inter[ANUBIS_MAX_N];
470 471
471 struct anubis_ctx *ctx = ctx_arg;
472
473 switch (key_len) 472 switch (key_len)
474 { 473 {
475 case 16: case 20: case 24: case 28: 474 case 16: case 20: case 24: case 28:
@@ -660,15 +659,15 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
660 dst[i] = cpu_to_be32(inter[i]); 659 dst[i] = cpu_to_be32(inter[i]);
661} 660}
662 661
663static void anubis_encrypt(void *ctx_arg, u8 *dst, const u8 *src) 662static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
664{ 663{
665 struct anubis_ctx *ctx = ctx_arg; 664 struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
666 anubis_crypt(ctx->E, dst, src, ctx->R); 665 anubis_crypt(ctx->E, dst, src, ctx->R);
667} 666}
668 667
669static void anubis_decrypt(void *ctx_arg, u8 *dst, const u8 *src) 668static void anubis_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
670{ 669{
671 struct anubis_ctx *ctx = ctx_arg; 670 struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
672 anubis_crypt(ctx->D, dst, src, ctx->R); 671 anubis_crypt(ctx->D, dst, src, ctx->R);
673} 672}
674 673
diff --git a/crypto/api.c b/crypto/api.c
index 80bba637fb..c11ec1fd4f 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -188,13 +188,16 @@ struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
188 if (crypto_init_flags(tfm, flags)) 188 if (crypto_init_flags(tfm, flags))
189 goto out_free_tfm; 189 goto out_free_tfm;
190 190
191 if (crypto_init_ops(tfm)) { 191 if (crypto_init_ops(tfm))
192 crypto_exit_ops(tfm);
193 goto out_free_tfm; 192 goto out_free_tfm;
194 } 193
194 if (alg->cra_init && alg->cra_init(tfm))
195 goto cra_init_failed;
195 196
196 goto out; 197 goto out;
197 198
199cra_init_failed:
200 crypto_exit_ops(tfm);
198out_free_tfm: 201out_free_tfm:
199 kfree(tfm); 202 kfree(tfm);
200 tfm = NULL; 203 tfm = NULL;
@@ -215,6 +218,8 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
215 alg = tfm->__crt_alg; 218 alg = tfm->__crt_alg;
216 size = sizeof(*tfm) + alg->cra_ctxsize; 219 size = sizeof(*tfm) + alg->cra_ctxsize;
217 220
221 if (alg->cra_exit)
222 alg->cra_exit(tfm);
218 crypto_exit_ops(tfm); 223 crypto_exit_ops(tfm);
219 crypto_alg_put(alg); 224 crypto_alg_put(alg);
220 memset(tfm, 0, size); 225 memset(tfm, 0, size);
@@ -224,7 +229,7 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
224static inline int crypto_set_driver_name(struct crypto_alg *alg) 229static inline int crypto_set_driver_name(struct crypto_alg *alg)
225{ 230{
226 static const char suffix[] = "-generic"; 231 static const char suffix[] = "-generic";
227 char *driver_name = (char *)alg->cra_driver_name; 232 char *driver_name = alg->cra_driver_name;
228 int len; 233 int len;
229 234
230 if (*driver_name) 235 if (*driver_name)
@@ -262,13 +267,13 @@ int crypto_register_alg(struct crypto_alg *alg)
262 down_write(&crypto_alg_sem); 267 down_write(&crypto_alg_sem);
263 268
264 list_for_each_entry(q, &crypto_alg_list, cra_list) { 269 list_for_each_entry(q, &crypto_alg_list, cra_list) {
265 if (!strcmp(q->cra_driver_name, alg->cra_driver_name)) { 270 if (q == alg) {
266 ret = -EEXIST; 271 ret = -EEXIST;
267 goto out; 272 goto out;
268 } 273 }
269 } 274 }
270 275
271 list_add_tail(&alg->cra_list, &crypto_alg_list); 276 list_add(&alg->cra_list, &crypto_alg_list);
272out: 277out:
273 up_write(&crypto_alg_sem); 278 up_write(&crypto_alg_sem);
274 return ret; 279 return ret;
diff --git a/crypto/arc4.c b/crypto/arc4.c
index 9efbcaae88..5edc6a65b9 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -24,9 +24,10 @@ struct arc4_ctx {
24 u8 x, y; 24 u8 x, y;
25}; 25};
26 26
27static int arc4_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) 27static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
28 unsigned int key_len, u32 *flags)
28{ 29{
29 struct arc4_ctx *ctx = ctx_arg; 30 struct arc4_ctx *ctx = crypto_tfm_ctx(tfm);
30 int i, j = 0, k = 0; 31 int i, j = 0, k = 0;
31 32
32 ctx->x = 1; 33 ctx->x = 1;
@@ -48,9 +49,9 @@ static int arc4_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u
48 return 0; 49 return 0;
49} 50}
50 51
51static void arc4_crypt(void *ctx_arg, u8 *out, const u8 *in) 52static void arc4_crypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
52{ 53{
53 struct arc4_ctx *ctx = ctx_arg; 54 struct arc4_ctx *ctx = crypto_tfm_ctx(tfm);
54 55
55 u8 *const S = ctx->S; 56 u8 *const S = ctx->S;
56 u8 x = ctx->x; 57 u8 x = ctx->x;
diff --git a/crypto/blowfish.c b/crypto/blowfish.c
index 7f710b201f..490265f42b 100644
--- a/crypto/blowfish.c
+++ b/crypto/blowfish.c
@@ -349,7 +349,7 @@ static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src)
349 dst[1] = yl; 349 dst[1] = yl;
350} 350}
351 351
352static void bf_encrypt(void *ctx, u8 *dst, const u8 *src) 352static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
353{ 353{
354 const __be32 *in_blk = (const __be32 *)src; 354 const __be32 *in_blk = (const __be32 *)src;
355 __be32 *const out_blk = (__be32 *)dst; 355 __be32 *const out_blk = (__be32 *)dst;
@@ -357,17 +357,18 @@ static void bf_encrypt(void *ctx, u8 *dst, const u8 *src)
357 357
358 in32[0] = be32_to_cpu(in_blk[0]); 358 in32[0] = be32_to_cpu(in_blk[0]);
359 in32[1] = be32_to_cpu(in_blk[1]); 359 in32[1] = be32_to_cpu(in_blk[1]);
360 encrypt_block(ctx, out32, in32); 360 encrypt_block(crypto_tfm_ctx(tfm), out32, in32);
361 out_blk[0] = cpu_to_be32(out32[0]); 361 out_blk[0] = cpu_to_be32(out32[0]);
362 out_blk[1] = cpu_to_be32(out32[1]); 362 out_blk[1] = cpu_to_be32(out32[1]);
363} 363}
364 364
365static void bf_decrypt(void *ctx, u8 *dst, const u8 *src) 365static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
366{ 366{
367 struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
367 const __be32 *in_blk = (const __be32 *)src; 368 const __be32 *in_blk = (const __be32 *)src;
368 __be32 *const out_blk = (__be32 *)dst; 369 __be32 *const out_blk = (__be32 *)dst;
369 const u32 *P = ((struct bf_ctx *)ctx)->p; 370 const u32 *P = ctx->p;
370 const u32 *S = ((struct bf_ctx *)ctx)->s; 371 const u32 *S = ctx->s;
371 u32 yl = be32_to_cpu(in_blk[0]); 372 u32 yl = be32_to_cpu(in_blk[0]);
372 u32 yr = be32_to_cpu(in_blk[1]); 373 u32 yr = be32_to_cpu(in_blk[1]);
373 374
@@ -398,12 +399,14 @@ static void bf_decrypt(void *ctx, u8 *dst, const u8 *src)
398/* 399/*
399 * Calculates the blowfish S and P boxes for encryption and decryption. 400 * Calculates the blowfish S and P boxes for encryption and decryption.
400 */ 401 */
401static int bf_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) 402static int bf_setkey(struct crypto_tfm *tfm, const u8 *key,
403 unsigned int keylen, u32 *flags)
402{ 404{
405 struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
406 u32 *P = ctx->p;
407 u32 *S = ctx->s;
403 short i, j, count; 408 short i, j, count;
404 u32 data[2], temp; 409 u32 data[2], temp;
405 u32 *P = ((struct bf_ctx *)ctx)->p;
406 u32 *S = ((struct bf_ctx *)ctx)->s;
407 410
408 /* Copy the initialization s-boxes */ 411 /* Copy the initialization s-boxes */
409 for (i = 0, count = 0; i < 256; i++) 412 for (i = 0, count = 0; i < 256; i++)
diff --git a/crypto/cast5.c b/crypto/cast5.c
index 8834c8580c..08eef58c1d 100644
--- a/crypto/cast5.c
+++ b/crypto/cast5.c
@@ -577,9 +577,9 @@ static const u32 sb8[256] = {
577 (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) ) 577 (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) )
578 578
579 579
580static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) 580static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
581{ 581{
582 struct cast5_ctx *c = (struct cast5_ctx *) ctx; 582 struct cast5_ctx *c = crypto_tfm_ctx(tfm);
583 const __be32 *src = (const __be32 *)inbuf; 583 const __be32 *src = (const __be32 *)inbuf;
584 __be32 *dst = (__be32 *)outbuf; 584 __be32 *dst = (__be32 *)outbuf;
585 u32 l, r, t; 585 u32 l, r, t;
@@ -642,9 +642,9 @@ static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
642 dst[1] = cpu_to_be32(l); 642 dst[1] = cpu_to_be32(l);
643} 643}
644 644
645static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf) 645static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
646{ 646{
647 struct cast5_ctx *c = (struct cast5_ctx *) ctx; 647 struct cast5_ctx *c = crypto_tfm_ctx(tfm);
648 const __be32 *src = (const __be32 *)inbuf; 648 const __be32 *src = (const __be32 *)inbuf;
649 __be32 *dst = (__be32 *)outbuf; 649 __be32 *dst = (__be32 *)outbuf;
650 u32 l, r, t; 650 u32 l, r, t;
@@ -769,15 +769,15 @@ static void key_schedule(u32 * x, u32 * z, u32 * k)
769} 769}
770 770
771 771
772static int 772static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key,
773cast5_setkey(void *ctx, const u8 * key, unsigned key_len, u32 * flags) 773 unsigned key_len, u32 *flags)
774{ 774{
775 struct cast5_ctx *c = crypto_tfm_ctx(tfm);
775 int i; 776 int i;
776 u32 x[4]; 777 u32 x[4];
777 u32 z[4]; 778 u32 z[4];
778 u32 k[16]; 779 u32 k[16];
779 __be32 p_key[4]; 780 __be32 p_key[4];
780 struct cast5_ctx *c = (struct cast5_ctx *) ctx;
781 781
782 if (key_len < 5 || key_len > 16) { 782 if (key_len < 5 || key_len > 16) {
783 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 783 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
diff --git a/crypto/cast6.c b/crypto/cast6.c
index 9e28740ba7..08e33bfc3a 100644
--- a/crypto/cast6.c
+++ b/crypto/cast6.c
@@ -381,13 +381,13 @@ static inline void W(u32 *key, unsigned int i) {
381 key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]); 381 key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]);
382} 382}
383 383
384static int 384static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
385cast6_setkey(void *ctx, const u8 * in_key, unsigned key_len, u32 * flags) 385 unsigned key_len, u32 *flags)
386{ 386{
387 int i; 387 int i;
388 u32 key[8]; 388 u32 key[8];
389 __be32 p_key[8]; /* padded key */ 389 __be32 p_key[8]; /* padded key */
390 struct cast6_ctx *c = (struct cast6_ctx *) ctx; 390 struct cast6_ctx *c = crypto_tfm_ctx(tfm);
391 391
392 if (key_len < 16 || key_len > 32 || key_len % 4 != 0) { 392 if (key_len < 16 || key_len > 32 || key_len % 4 != 0) {
393 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 393 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
@@ -444,8 +444,9 @@ static inline void QBAR (u32 * block, u8 * Kr, u32 * Km) {
444 block[2] ^= F1(block[3], Kr[0], Km[0]); 444 block[2] ^= F1(block[3], Kr[0], Km[0]);
445} 445}
446 446
447static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { 447static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
448 struct cast6_ctx * c = (struct cast6_ctx *)ctx; 448{
449 struct cast6_ctx *c = crypto_tfm_ctx(tfm);
449 const __be32 *src = (const __be32 *)inbuf; 450 const __be32 *src = (const __be32 *)inbuf;
450 __be32 *dst = (__be32 *)outbuf; 451 __be32 *dst = (__be32 *)outbuf;
451 u32 block[4]; 452 u32 block[4];
@@ -476,8 +477,8 @@ static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) {
476 dst[3] = cpu_to_be32(block[3]); 477 dst[3] = cpu_to_be32(block[3]);
477} 478}
478 479
479static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { 480static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) {
480 struct cast6_ctx * c = (struct cast6_ctx *)ctx; 481 struct cast6_ctx * c = crypto_tfm_ctx(tfm);
481 const __be32 *src = (const __be32 *)inbuf; 482 const __be32 *src = (const __be32 *)inbuf;
482 __be32 *dst = (__be32 *)outbuf; 483 __be32 *dst = (__be32 *)outbuf;
483 u32 block[4]; 484 u32 block[4];
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 65bcea0cd1..b899eb97ab 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -187,7 +187,7 @@ static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
187 void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block; 187 void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
188 int bsize = crypto_tfm_alg_blocksize(tfm); 188 int bsize = crypto_tfm_alg_blocksize(tfm);
189 189
190 void (*fn)(void *, u8 *, const u8 *) = desc->crfn; 190 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
191 u8 *iv = desc->info; 191 u8 *iv = desc->info;
192 unsigned int done = 0; 192 unsigned int done = 0;
193 193
@@ -195,7 +195,7 @@ static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
195 195
196 do { 196 do {
197 xor(iv, src); 197 xor(iv, src);
198 fn(crypto_tfm_ctx(tfm), dst, iv); 198 fn(tfm, dst, iv);
199 memcpy(iv, dst, bsize); 199 memcpy(iv, dst, bsize);
200 200
201 src += bsize; 201 src += bsize;
@@ -218,7 +218,7 @@ static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
218 u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1); 218 u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1);
219 u8 **dst_p = src == dst ? &buf : &dst; 219 u8 **dst_p = src == dst ? &buf : &dst;
220 220
221 void (*fn)(void *, u8 *, const u8 *) = desc->crfn; 221 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
222 u8 *iv = desc->info; 222 u8 *iv = desc->info;
223 unsigned int done = 0; 223 unsigned int done = 0;
224 224
@@ -227,7 +227,7 @@ static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
227 do { 227 do {
228 u8 *tmp_dst = *dst_p; 228 u8 *tmp_dst = *dst_p;
229 229
230 fn(crypto_tfm_ctx(tfm), tmp_dst, src); 230 fn(tfm, tmp_dst, src);
231 xor(tmp_dst, iv); 231 xor(tmp_dst, iv);
232 memcpy(iv, src, bsize); 232 memcpy(iv, src, bsize);
233 if (tmp_dst != dst) 233 if (tmp_dst != dst)
@@ -245,13 +245,13 @@ static unsigned int ecb_process(const struct cipher_desc *desc, u8 *dst,
245{ 245{
246 struct crypto_tfm *tfm = desc->tfm; 246 struct crypto_tfm *tfm = desc->tfm;
247 int bsize = crypto_tfm_alg_blocksize(tfm); 247 int bsize = crypto_tfm_alg_blocksize(tfm);
248 void (*fn)(void *, u8 *, const u8 *) = desc->crfn; 248 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
249 unsigned int done = 0; 249 unsigned int done = 0;
250 250
251 nbytes -= bsize; 251 nbytes -= bsize;
252 252
253 do { 253 do {
254 fn(crypto_tfm_ctx(tfm), dst, src); 254 fn(tfm, dst, src);
255 255
256 src += bsize; 256 src += bsize;
257 dst += bsize; 257 dst += bsize;
@@ -268,7 +268,7 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
268 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 268 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
269 return -EINVAL; 269 return -EINVAL;
270 } else 270 } else
271 return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen, 271 return cia->cia_setkey(tfm, key, keylen,
272 &tfm->crt_flags); 272 &tfm->crt_flags);
273} 273}
274 274
diff --git a/crypto/compress.c b/crypto/compress.c
index eb36d9364d..eca182aa33 100644
--- a/crypto/compress.c
+++ b/crypto/compress.c
@@ -22,8 +22,7 @@ static int crypto_compress(struct crypto_tfm *tfm,
22 const u8 *src, unsigned int slen, 22 const u8 *src, unsigned int slen,
23 u8 *dst, unsigned int *dlen) 23 u8 *dst, unsigned int *dlen)
24{ 24{
25 return tfm->__crt_alg->cra_compress.coa_compress(crypto_tfm_ctx(tfm), 25 return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst,
26 src, slen, dst,
27 dlen); 26 dlen);
28} 27}
29 28
@@ -31,8 +30,7 @@ static int crypto_decompress(struct crypto_tfm *tfm,
31 const u8 *src, unsigned int slen, 30 const u8 *src, unsigned int slen,
32 u8 *dst, unsigned int *dlen) 31 u8 *dst, unsigned int *dlen)
33{ 32{
34 return tfm->__crt_alg->cra_compress.coa_decompress(crypto_tfm_ctx(tfm), 33 return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst,
35 src, slen, dst,
36 dlen); 34 dlen);
37} 35}
38 36
@@ -43,21 +41,14 @@ int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags)
43 41
44int crypto_init_compress_ops(struct crypto_tfm *tfm) 42int crypto_init_compress_ops(struct crypto_tfm *tfm)
45{ 43{
46 int ret = 0;
47 struct compress_tfm *ops = &tfm->crt_compress; 44 struct compress_tfm *ops = &tfm->crt_compress;
48
49 ret = tfm->__crt_alg->cra_compress.coa_init(crypto_tfm_ctx(tfm));
50 if (ret)
51 goto out;
52 45
53 ops->cot_compress = crypto_compress; 46 ops->cot_compress = crypto_compress;
54 ops->cot_decompress = crypto_decompress; 47 ops->cot_decompress = crypto_decompress;
55 48
56out: 49 return 0;
57 return ret;
58} 50}
59 51
60void crypto_exit_compress_ops(struct crypto_tfm *tfm) 52void crypto_exit_compress_ops(struct crypto_tfm *tfm)
61{ 53{
62 tfm->__crt_alg->cra_compress.coa_exit(crypto_tfm_ctx(tfm));
63} 54}
diff --git a/crypto/crc32c.c b/crypto/crc32c.c
index 953362423a..f2660123ae 100644
--- a/crypto/crc32c.c
+++ b/crypto/crc32c.c
@@ -31,9 +31,9 @@ struct chksum_ctx {
31 * crc using table. 31 * crc using table.
32 */ 32 */
33 33
34static void chksum_init(void *ctx) 34static void chksum_init(struct crypto_tfm *tfm)
35{ 35{
36 struct chksum_ctx *mctx = ctx; 36 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
37 37
38 mctx->crc = ~(u32)0; /* common usage */ 38 mctx->crc = ~(u32)0; /* common usage */
39} 39}
@@ -43,10 +43,10 @@ static void chksum_init(void *ctx)
43 * If your algorithm starts with ~0, then XOR with ~0 before you set 43 * If your algorithm starts with ~0, then XOR with ~0 before you set
44 * the seed. 44 * the seed.
45 */ 45 */
46static int chksum_setkey(void *ctx, const u8 *key, unsigned int keylen, 46static int chksum_setkey(struct crypto_tfm *tfm, const u8 *key,
47 u32 *flags) 47 unsigned int keylen, u32 *flags)
48{ 48{
49 struct chksum_ctx *mctx = ctx; 49 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
50 50
51 if (keylen != sizeof(mctx->crc)) { 51 if (keylen != sizeof(mctx->crc)) {
52 if (flags) 52 if (flags)
@@ -57,9 +57,10 @@ static int chksum_setkey(void *ctx, const u8 *key, unsigned int keylen,
57 return 0; 57 return 0;
58} 58}
59 59
60static void chksum_update(void *ctx, const u8 *data, unsigned int length) 60static void chksum_update(struct crypto_tfm *tfm, const u8 *data,
61 unsigned int length)
61{ 62{
62 struct chksum_ctx *mctx = ctx; 63 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
63 u32 mcrc; 64 u32 mcrc;
64 65
65 mcrc = crc32c(mctx->crc, data, (size_t)length); 66 mcrc = crc32c(mctx->crc, data, (size_t)length);
@@ -67,9 +68,9 @@ static void chksum_update(void *ctx, const u8 *data, unsigned int length)
67 mctx->crc = mcrc; 68 mctx->crc = mcrc;
68} 69}
69 70
70static void chksum_final(void *ctx, u8 *out) 71static void chksum_final(struct crypto_tfm *tfm, u8 *out)
71{ 72{
72 struct chksum_ctx *mctx = ctx; 73 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
73 u32 mcrc = (mctx->crc ^ ~(u32)0); 74 u32 mcrc = (mctx->crc ^ ~(u32)0);
74 75
75 *(u32 *)out = __le32_to_cpu(mcrc); 76 *(u32 *)out = __le32_to_cpu(mcrc);
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 3fcf6e887e..a0d956b529 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -27,8 +27,8 @@
27#define NULL_BLOCK_SIZE 1 27#define NULL_BLOCK_SIZE 1
28#define NULL_DIGEST_SIZE 0 28#define NULL_DIGEST_SIZE 0
29 29
30static int null_compress(void *ctx, const u8 *src, unsigned int slen, 30static int null_compress(struct crypto_tfm *tfm, const u8 *src,
31 u8 *dst, unsigned int *dlen) 31 unsigned int slen, u8 *dst, unsigned int *dlen)
32{ 32{
33 if (slen > *dlen) 33 if (slen > *dlen)
34 return -EINVAL; 34 return -EINVAL;
@@ -37,20 +37,21 @@ static int null_compress(void *ctx, const u8 *src, unsigned int slen,
37 return 0; 37 return 0;
38} 38}
39 39
40static void null_init(void *ctx) 40static void null_init(struct crypto_tfm *tfm)
41{ } 41{ }
42 42
43static void null_update(void *ctx, const u8 *data, unsigned int len) 43static void null_update(struct crypto_tfm *tfm, const u8 *data,
44 unsigned int len)
44{ } 45{ }
45 46
46static void null_final(void *ctx, u8 *out) 47static void null_final(struct crypto_tfm *tfm, u8 *out)
47{ } 48{ }
48 49
49static int null_setkey(void *ctx, const u8 *key, 50static int null_setkey(struct crypto_tfm *tfm, const u8 *key,
50 unsigned int keylen, u32 *flags) 51 unsigned int keylen, u32 *flags)
51{ return 0; } 52{ return 0; }
52 53
53static void null_crypt(void *ctx, u8 *dst, const u8 *src) 54static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
54{ 55{
55 memcpy(dst, src, NULL_BLOCK_SIZE); 56 memcpy(dst, src, NULL_BLOCK_SIZE);
56} 57}
diff --git a/crypto/deflate.c b/crypto/deflate.c
index f209368d62..6588bbf82e 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -102,8 +102,9 @@ static void deflate_decomp_exit(struct deflate_ctx *ctx)
102 kfree(ctx->decomp_stream.workspace); 102 kfree(ctx->decomp_stream.workspace);
103} 103}
104 104
105static int deflate_init(void *ctx) 105static int deflate_init(struct crypto_tfm *tfm)
106{ 106{
107 struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
107 int ret; 108 int ret;
108 109
109 ret = deflate_comp_init(ctx); 110 ret = deflate_comp_init(ctx);
@@ -116,17 +117,19 @@ out:
116 return ret; 117 return ret;
117} 118}
118 119
119static void deflate_exit(void *ctx) 120static void deflate_exit(struct crypto_tfm *tfm)
120{ 121{
122 struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
123
121 deflate_comp_exit(ctx); 124 deflate_comp_exit(ctx);
122 deflate_decomp_exit(ctx); 125 deflate_decomp_exit(ctx);
123} 126}
124 127
125static int deflate_compress(void *ctx, const u8 *src, unsigned int slen, 128static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
126 u8 *dst, unsigned int *dlen) 129 unsigned int slen, u8 *dst, unsigned int *dlen)
127{ 130{
128 int ret = 0; 131 int ret = 0;
129 struct deflate_ctx *dctx = ctx; 132 struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
130 struct z_stream_s *stream = &dctx->comp_stream; 133 struct z_stream_s *stream = &dctx->comp_stream;
131 134
132 ret = zlib_deflateReset(stream); 135 ret = zlib_deflateReset(stream);
@@ -151,12 +154,12 @@ out:
151 return ret; 154 return ret;
152} 155}
153 156
154static int deflate_decompress(void *ctx, const u8 *src, unsigned int slen, 157static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
155 u8 *dst, unsigned int *dlen) 158 unsigned int slen, u8 *dst, unsigned int *dlen)
156{ 159{
157 160
158 int ret = 0; 161 int ret = 0;
159 struct deflate_ctx *dctx = ctx; 162 struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
160 struct z_stream_s *stream = &dctx->decomp_stream; 163 struct z_stream_s *stream = &dctx->decomp_stream;
161 164
162 ret = zlib_inflateReset(stream); 165 ret = zlib_inflateReset(stream);
@@ -198,9 +201,9 @@ static struct crypto_alg alg = {
198 .cra_ctxsize = sizeof(struct deflate_ctx), 201 .cra_ctxsize = sizeof(struct deflate_ctx),
199 .cra_module = THIS_MODULE, 202 .cra_module = THIS_MODULE,
200 .cra_list = LIST_HEAD_INIT(alg.cra_list), 203 .cra_list = LIST_HEAD_INIT(alg.cra_list),
204 .cra_init = deflate_init,
205 .cra_exit = deflate_exit,
201 .cra_u = { .compress = { 206 .cra_u = { .compress = {
202 .coa_init = deflate_init,
203 .coa_exit = deflate_exit,
204 .coa_compress = deflate_compress, 207 .coa_compress = deflate_compress,
205 .coa_decompress = deflate_decompress } } 208 .coa_decompress = deflate_decompress } }
206}; 209};
diff --git a/crypto/des.c b/crypto/des.c
index 2d74cab40c..a9d3c235a6 100644
--- a/crypto/des.c
+++ b/crypto/des.c
@@ -783,9 +783,10 @@ static void dkey(u32 *pe, const u8 *k)
783 } 783 }
784} 784}
785 785
786static int des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) 786static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
787 unsigned int keylen, u32 *flags)
787{ 788{
788 struct des_ctx *dctx = ctx; 789 struct des_ctx *dctx = crypto_tfm_ctx(tfm);
789 u32 tmp[DES_EXPKEY_WORDS]; 790 u32 tmp[DES_EXPKEY_WORDS];
790 int ret; 791 int ret;
791 792
@@ -803,9 +804,10 @@ static int des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
803 return 0; 804 return 0;
804} 805}
805 806
806static void des_encrypt(void *ctx, u8 *dst, const u8 *src) 807static void des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
807{ 808{
808 const u32 *K = ((struct des_ctx *)ctx)->expkey; 809 struct des_ctx *ctx = crypto_tfm_ctx(tfm);
810 const u32 *K = ctx->expkey;
809 const __le32 *s = (const __le32 *)src; 811 const __le32 *s = (const __le32 *)src;
810 __le32 *d = (__le32 *)dst; 812 __le32 *d = (__le32 *)dst;
811 u32 L, R, A, B; 813 u32 L, R, A, B;
@@ -825,9 +827,10 @@ static void des_encrypt(void *ctx, u8 *dst, const u8 *src)
825 d[1] = cpu_to_le32(L); 827 d[1] = cpu_to_le32(L);
826} 828}
827 829
828static void des_decrypt(void *ctx, u8 *dst, const u8 *src) 830static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
829{ 831{
830 const u32 *K = ((struct des_ctx *)ctx)->expkey + DES_EXPKEY_WORDS - 2; 832 struct des_ctx *ctx = crypto_tfm_ctx(tfm);
833 const u32 *K = ctx->expkey + DES_EXPKEY_WORDS - 2;
831 const __le32 *s = (const __le32 *)src; 834 const __le32 *s = (const __le32 *)src;
832 __le32 *d = (__le32 *)dst; 835 __le32 *d = (__le32 *)dst;
833 u32 L, R, A, B; 836 u32 L, R, A, B;
@@ -860,11 +863,11 @@ static void des_decrypt(void *ctx, u8 *dst, const u8 *src)
860 * property. 863 * property.
861 * 864 *
862 */ 865 */
863static int des3_ede_setkey(void *ctx, const u8 *key, 866static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
864 unsigned int keylen, u32 *flags) 867 unsigned int keylen, u32 *flags)
865{ 868{
866 const u32 *K = (const u32 *)key; 869 const u32 *K = (const u32 *)key;
867 struct des3_ede_ctx *dctx = ctx; 870 struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
868 u32 *expkey = dctx->expkey; 871 u32 *expkey = dctx->expkey;
869 872
870 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 873 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
@@ -881,9 +884,9 @@ static int des3_ede_setkey(void *ctx, const u8 *key,
881 return 0; 884 return 0;
882} 885}
883 886
884static void des3_ede_encrypt(void *ctx, u8 *dst, const u8 *src) 887static void des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
885{ 888{
886 struct des3_ede_ctx *dctx = ctx; 889 struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
887 const u32 *K = dctx->expkey; 890 const u32 *K = dctx->expkey;
888 const __le32 *s = (const __le32 *)src; 891 const __le32 *s = (const __le32 *)src;
889 __le32 *d = (__le32 *)dst; 892 __le32 *d = (__le32 *)dst;
@@ -912,9 +915,9 @@ static void des3_ede_encrypt(void *ctx, u8 *dst, const u8 *src)
912 d[1] = cpu_to_le32(L); 915 d[1] = cpu_to_le32(L);
913} 916}
914 917
915static void des3_ede_decrypt(void *ctx, u8 *dst, const u8 *src) 918static void des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
916{ 919{
917 struct des3_ede_ctx *dctx = ctx; 920 struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
918 const u32 *K = dctx->expkey + DES3_EDE_EXPKEY_WORDS - 2; 921 const u32 *K = dctx->expkey + DES3_EDE_EXPKEY_WORDS - 2;
919 const __le32 *s = (const __le32 *)src; 922 const __le32 *s = (const __le32 *)src;
920 __le32 *d = (__le32 *)dst; 923 __le32 *d = (__le32 *)dst;
diff --git a/crypto/digest.c b/crypto/digest.c
index d9b6ac9dbf..603006a7be 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -20,13 +20,14 @@
20 20
21static void init(struct crypto_tfm *tfm) 21static void init(struct crypto_tfm *tfm)
22{ 22{
23 tfm->__crt_alg->cra_digest.dia_init(crypto_tfm_ctx(tfm)); 23 tfm->__crt_alg->cra_digest.dia_init(tfm);
24} 24}
25 25
26static void update(struct crypto_tfm *tfm, 26static void update(struct crypto_tfm *tfm,
27 struct scatterlist *sg, unsigned int nsg) 27 struct scatterlist *sg, unsigned int nsg)
28{ 28{
29 unsigned int i; 29 unsigned int i;
30 unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
30 31
31 for (i = 0; i < nsg; i++) { 32 for (i = 0; i < nsg; i++) {
32 33
@@ -38,12 +39,22 @@ static void update(struct crypto_tfm *tfm,
38 unsigned int bytes_from_page = min(l, ((unsigned int) 39 unsigned int bytes_from_page = min(l, ((unsigned int)
39 (PAGE_SIZE)) - 40 (PAGE_SIZE)) -
40 offset); 41 offset);
41 char *p = crypto_kmap(pg, 0) + offset; 42 char *src = crypto_kmap(pg, 0);
43 char *p = src + offset;
42 44
43 tfm->__crt_alg->cra_digest.dia_update 45 if (unlikely(offset & alignmask)) {
44 (crypto_tfm_ctx(tfm), p, 46 unsigned int bytes =
45 bytes_from_page); 47 alignmask + 1 - (offset & alignmask);
46 crypto_kunmap(p, 0); 48 bytes = min(bytes, bytes_from_page);
49 tfm->__crt_alg->cra_digest.dia_update(tfm, p,
50 bytes);
51 p += bytes;
52 bytes_from_page -= bytes;
53 l -= bytes;
54 }
55 tfm->__crt_alg->cra_digest.dia_update(tfm, p,
56 bytes_from_page);
57 crypto_kunmap(src, 0);
47 crypto_yield(tfm); 58 crypto_yield(tfm);
48 offset = 0; 59 offset = 0;
49 pg++; 60 pg++;
@@ -54,7 +65,15 @@ static void update(struct crypto_tfm *tfm,
54 65
55static void final(struct crypto_tfm *tfm, u8 *out) 66static void final(struct crypto_tfm *tfm, u8 *out)
56{ 67{
57 tfm->__crt_alg->cra_digest.dia_final(crypto_tfm_ctx(tfm), out); 68 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
69 if (unlikely((unsigned long)out & alignmask)) {
70 unsigned int size = crypto_tfm_alg_digestsize(tfm);
71 u8 buffer[size + alignmask];
72 u8 *dst = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
73 tfm->__crt_alg->cra_digest.dia_final(tfm, dst);
74 memcpy(out, dst, size);
75 } else
76 tfm->__crt_alg->cra_digest.dia_final(tfm, out);
58} 77}
59 78
60static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) 79static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
@@ -62,25 +81,15 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
62 u32 flags; 81 u32 flags;
63 if (tfm->__crt_alg->cra_digest.dia_setkey == NULL) 82 if (tfm->__crt_alg->cra_digest.dia_setkey == NULL)
64 return -ENOSYS; 83 return -ENOSYS;
65 return tfm->__crt_alg->cra_digest.dia_setkey(crypto_tfm_ctx(tfm), 84 return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen, &flags);
66 key, keylen, &flags);
67} 85}
68 86
69static void digest(struct crypto_tfm *tfm, 87static void digest(struct crypto_tfm *tfm,
70 struct scatterlist *sg, unsigned int nsg, u8 *out) 88 struct scatterlist *sg, unsigned int nsg, u8 *out)
71{ 89{
72 unsigned int i; 90 init(tfm);
73 91 update(tfm, sg, nsg);
74 tfm->crt_digest.dit_init(tfm); 92 final(tfm, out);
75
76 for (i = 0; i < nsg; i++) {
77 char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset;
78 tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm),
79 p, sg[i].length);
80 crypto_kunmap(p, 0);
81 crypto_yield(tfm);
82 }
83 crypto_digest_final(tfm, out);
84} 93}
85 94
86int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags) 95int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags)
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 807f2bf4ea..d4c9d3657b 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -754,11 +754,11 @@ static const u64 c[KHAZAD_ROUNDS + 1] = {
754 0xccc41d14c363da5dULL, 0x5fdc7dcd7f5a6c5cULL, 0xf726ffede89d6f8eULL 754 0xccc41d14c363da5dULL, 0x5fdc7dcd7f5a6c5cULL, 0xf726ffede89d6f8eULL
755}; 755};
756 756
757static int khazad_setkey(void *ctx_arg, const u8 *in_key, 757static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
758 unsigned int key_len, u32 *flags) 758 unsigned int key_len, u32 *flags)
759{ 759{
760 struct khazad_ctx *ctx = ctx_arg; 760 struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
761 const __be64 *key = (const __be64 *)in_key; 761 const __be32 *key = (const __be32 *)in_key;
762 int r; 762 int r;
763 const u64 *S = T7; 763 const u64 *S = T7;
764 u64 K2, K1; 764 u64 K2, K1;
@@ -769,8 +769,9 @@ static int khazad_setkey(void *ctx_arg, const u8 *in_key,
769 return -EINVAL; 769 return -EINVAL;
770 } 770 }
771 771
772 K2 = be64_to_cpu(key[0]); 772 /* key is supposed to be 32-bit aligned */
773 K1 = be64_to_cpu(key[1]); 773 K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]);
774 K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]);
774 775
775 /* setup the encrypt key */ 776 /* setup the encrypt key */
776 for (r = 0; r <= KHAZAD_ROUNDS; r++) { 777 for (r = 0; r <= KHAZAD_ROUNDS; r++) {
@@ -840,15 +841,15 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
840 *dst = cpu_to_be64(state); 841 *dst = cpu_to_be64(state);
841} 842}
842 843
843static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src) 844static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
844{ 845{
845 struct khazad_ctx *ctx = ctx_arg; 846 struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
846 khazad_crypt(ctx->E, dst, src); 847 khazad_crypt(ctx->E, dst, src);
847} 848}
848 849
849static void khazad_decrypt(void *ctx_arg, u8 *dst, const u8 *src) 850static void khazad_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
850{ 851{
851 struct khazad_ctx *ctx = ctx_arg; 852 struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
852 khazad_crypt(ctx->D, dst, src); 853 khazad_crypt(ctx->D, dst, src);
853} 854}
854 855
diff --git a/crypto/md4.c b/crypto/md4.c
index a2d6df5c0f..c1bc71bdc1 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -152,9 +152,9 @@ static inline void md4_transform_helper(struct md4_ctx *ctx)
152 md4_transform(ctx->hash, ctx->block); 152 md4_transform(ctx->hash, ctx->block);
153} 153}
154 154
155static void md4_init(void *ctx) 155static void md4_init(struct crypto_tfm *tfm)
156{ 156{
157 struct md4_ctx *mctx = ctx; 157 struct md4_ctx *mctx = crypto_tfm_ctx(tfm);
158 158
159 mctx->hash[0] = 0x67452301; 159 mctx->hash[0] = 0x67452301;
160 mctx->hash[1] = 0xefcdab89; 160 mctx->hash[1] = 0xefcdab89;
@@ -163,9 +163,9 @@ static void md4_init(void *ctx)
163 mctx->byte_count = 0; 163 mctx->byte_count = 0;
164} 164}
165 165
166static void md4_update(void *ctx, const u8 *data, unsigned int len) 166static void md4_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
167{ 167{
168 struct md4_ctx *mctx = ctx; 168 struct md4_ctx *mctx = crypto_tfm_ctx(tfm);
169 const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); 169 const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
170 170
171 mctx->byte_count += len; 171 mctx->byte_count += len;
@@ -193,9 +193,9 @@ static void md4_update(void *ctx, const u8 *data, unsigned int len)
193 memcpy(mctx->block, data, len); 193 memcpy(mctx->block, data, len);
194} 194}
195 195
196static void md4_final(void *ctx, u8 *out) 196static void md4_final(struct crypto_tfm *tfm, u8 *out)
197{ 197{
198 struct md4_ctx *mctx = ctx; 198 struct md4_ctx *mctx = crypto_tfm_ctx(tfm);
199 const unsigned int offset = mctx->byte_count & 0x3f; 199 const unsigned int offset = mctx->byte_count & 0x3f;
200 char *p = (char *)mctx->block + offset; 200 char *p = (char *)mctx->block + offset;
201 int padding = 56 - (offset + 1); 201 int padding = 56 - (offset + 1);
diff --git a/crypto/md5.c b/crypto/md5.c
index 7f041aef5d..93d18e8b3d 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -147,9 +147,9 @@ static inline void md5_transform_helper(struct md5_ctx *ctx)
147 md5_transform(ctx->hash, ctx->block); 147 md5_transform(ctx->hash, ctx->block);
148} 148}
149 149
150static void md5_init(void *ctx) 150static void md5_init(struct crypto_tfm *tfm)
151{ 151{
152 struct md5_ctx *mctx = ctx; 152 struct md5_ctx *mctx = crypto_tfm_ctx(tfm);
153 153
154 mctx->hash[0] = 0x67452301; 154 mctx->hash[0] = 0x67452301;
155 mctx->hash[1] = 0xefcdab89; 155 mctx->hash[1] = 0xefcdab89;
@@ -158,9 +158,9 @@ static void md5_init(void *ctx)
158 mctx->byte_count = 0; 158 mctx->byte_count = 0;
159} 159}
160 160
161static void md5_update(void *ctx, const u8 *data, unsigned int len) 161static void md5_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
162{ 162{
163 struct md5_ctx *mctx = ctx; 163 struct md5_ctx *mctx = crypto_tfm_ctx(tfm);
164 const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); 164 const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
165 165
166 mctx->byte_count += len; 166 mctx->byte_count += len;
@@ -188,9 +188,9 @@ static void md5_update(void *ctx, const u8 *data, unsigned int len)
188 memcpy(mctx->block, data, len); 188 memcpy(mctx->block, data, len);
189} 189}
190 190
191static void md5_final(void *ctx, u8 *out) 191static void md5_final(struct crypto_tfm *tfm, u8 *out)
192{ 192{
193 struct md5_ctx *mctx = ctx; 193 struct md5_ctx *mctx = crypto_tfm_ctx(tfm);
194 const unsigned int offset = mctx->byte_count & 0x3f; 194 const unsigned int offset = mctx->byte_count & 0x3f;
195 char *p = (char *)mctx->block + offset; 195 char *p = (char *)mctx->block + offset;
196 int padding = 56 - (offset + 1); 196 int padding = 56 - (offset + 1);
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index 4f6ab23e14..d061da21cf 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -45,16 +45,17 @@ do { \
45} while (0) 45} while (0)
46 46
47 47
48static void michael_init(void *ctx) 48static void michael_init(struct crypto_tfm *tfm)
49{ 49{
50 struct michael_mic_ctx *mctx = ctx; 50 struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm);
51 mctx->pending_len = 0; 51 mctx->pending_len = 0;
52} 52}
53 53
54 54
55static void michael_update(void *ctx, const u8 *data, unsigned int len) 55static void michael_update(struct crypto_tfm *tfm, const u8 *data,
56 unsigned int len)
56{ 57{
57 struct michael_mic_ctx *mctx = ctx; 58 struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm);
58 const __le32 *src; 59 const __le32 *src;
59 60
60 if (mctx->pending_len) { 61 if (mctx->pending_len) {
@@ -90,9 +91,9 @@ static void michael_update(void *ctx, const u8 *data, unsigned int len)
90} 91}
91 92
92 93
93static void michael_final(void *ctx, u8 *out) 94static void michael_final(struct crypto_tfm *tfm, u8 *out)
94{ 95{
95 struct michael_mic_ctx *mctx = ctx; 96 struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm);
96 u8 *data = mctx->pending; 97 u8 *data = mctx->pending;
97 __le32 *dst = (__le32 *)out; 98 __le32 *dst = (__le32 *)out;
98 99
@@ -121,10 +122,10 @@ static void michael_final(void *ctx, u8 *out)
121} 122}
122 123
123 124
124static int michael_setkey(void *ctx, const u8 *key, unsigned int keylen, 125static int michael_setkey(struct crypto_tfm *tfm, const u8 *key,
125 u32 *flags) 126 unsigned int keylen, u32 *flags)
126{ 127{
127 struct michael_mic_ctx *mctx = ctx; 128 struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm);
128 const __le32 *data = (const __le32 *)key; 129 const __le32 *data = (const __le32 *)key;
129 130
130 if (keylen != 8) { 131 if (keylen != 8) {
@@ -145,6 +146,7 @@ static struct crypto_alg michael_mic_alg = {
145 .cra_blocksize = 8, 146 .cra_blocksize = 8,
146 .cra_ctxsize = sizeof(struct michael_mic_ctx), 147 .cra_ctxsize = sizeof(struct michael_mic_ctx),
147 .cra_module = THIS_MODULE, 148 .cra_module = THIS_MODULE,
149 .cra_alignmask = 3,
148 .cra_list = LIST_HEAD_INIT(michael_mic_alg.cra_list), 150 .cra_list = LIST_HEAD_INIT(michael_mic_alg.cra_list),
149 .cra_u = { .digest = { 151 .cra_u = { .digest = {
150 .dia_digestsize = 8, 152 .dia_digestsize = 8,
diff --git a/crypto/serpent.c b/crypto/serpent.c
index e366406ab4..de60cdddbf 100644
--- a/crypto/serpent.c
+++ b/crypto/serpent.c
@@ -215,9 +215,11 @@ struct serpent_ctx {
215}; 215};
216 216
217 217
218static int serpent_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) 218static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
219 unsigned int keylen, u32 *flags)
219{ 220{
220 u32 *k = ((struct serpent_ctx *)ctx)->expkey; 221 struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
222 u32 *k = ctx->expkey;
221 u8 *k8 = (u8 *)k; 223 u8 *k8 = (u8 *)k;
222 u32 r0,r1,r2,r3,r4; 224 u32 r0,r1,r2,r3,r4;
223 int i; 225 int i;
@@ -365,10 +367,11 @@ static int serpent_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *fl
365 return 0; 367 return 0;
366} 368}
367 369
368static void serpent_encrypt(void *ctx, u8 *dst, const u8 *src) 370static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
369{ 371{
372 struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
370 const u32 373 const u32
371 *k = ((struct serpent_ctx *)ctx)->expkey, 374 *k = ctx->expkey,
372 *s = (const u32 *)src; 375 *s = (const u32 *)src;
373 u32 *d = (u32 *)dst, 376 u32 *d = (u32 *)dst,
374 r0, r1, r2, r3, r4; 377 r0, r1, r2, r3, r4;
@@ -423,8 +426,9 @@ static void serpent_encrypt(void *ctx, u8 *dst, const u8 *src)
423 d[3] = cpu_to_le32(r3); 426 d[3] = cpu_to_le32(r3);
424} 427}
425 428
426static void serpent_decrypt(void *ctx, u8 *dst, const u8 *src) 429static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
427{ 430{
431 struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
428 const u32 432 const u32
429 *k = ((struct serpent_ctx *)ctx)->expkey, 433 *k = ((struct serpent_ctx *)ctx)->expkey,
430 *s = (const u32 *)src; 434 *s = (const u32 *)src;
@@ -492,7 +496,8 @@ static struct crypto_alg serpent_alg = {
492 .cia_decrypt = serpent_decrypt } } 496 .cia_decrypt = serpent_decrypt } }
493}; 497};
494 498
495static int tnepres_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) 499static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key,
500 unsigned int keylen, u32 *flags)
496{ 501{
497 u8 rev_key[SERPENT_MAX_KEY_SIZE]; 502 u8 rev_key[SERPENT_MAX_KEY_SIZE];
498 int i; 503 int i;
@@ -506,10 +511,10 @@ static int tnepres_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *fl
506 for (i = 0; i < keylen; ++i) 511 for (i = 0; i < keylen; ++i)
507 rev_key[keylen - i - 1] = key[i]; 512 rev_key[keylen - i - 1] = key[i];
508 513
509 return serpent_setkey(ctx, rev_key, keylen, flags); 514 return serpent_setkey(tfm, rev_key, keylen, flags);
510} 515}
511 516
512static void tnepres_encrypt(void *ctx, u8 *dst, const u8 *src) 517static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
513{ 518{
514 const u32 * const s = (const u32 * const)src; 519 const u32 * const s = (const u32 * const)src;
515 u32 * const d = (u32 * const)dst; 520 u32 * const d = (u32 * const)dst;
@@ -521,7 +526,7 @@ static void tnepres_encrypt(void *ctx, u8 *dst, const u8 *src)
521 rs[2] = swab32(s[1]); 526 rs[2] = swab32(s[1]);
522 rs[3] = swab32(s[0]); 527 rs[3] = swab32(s[0]);
523 528
524 serpent_encrypt(ctx, (u8 *)rd, (u8 *)rs); 529 serpent_encrypt(tfm, (u8 *)rd, (u8 *)rs);
525 530
526 d[0] = swab32(rd[3]); 531 d[0] = swab32(rd[3]);
527 d[1] = swab32(rd[2]); 532 d[1] = swab32(rd[2]);
@@ -529,7 +534,7 @@ static void tnepres_encrypt(void *ctx, u8 *dst, const u8 *src)
529 d[3] = swab32(rd[0]); 534 d[3] = swab32(rd[0]);
530} 535}
531 536
532static void tnepres_decrypt(void *ctx, u8 *dst, const u8 *src) 537static void tnepres_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
533{ 538{
534 const u32 * const s = (const u32 * const)src; 539 const u32 * const s = (const u32 * const)src;
535 u32 * const d = (u32 * const)dst; 540 u32 * const d = (u32 * const)dst;
@@ -541,7 +546,7 @@ static void tnepres_decrypt(void *ctx, u8 *dst, const u8 *src)
541 rs[2] = swab32(s[1]); 546 rs[2] = swab32(s[1]);
542 rs[3] = swab32(s[0]); 547 rs[3] = swab32(s[0]);
543 548
544 serpent_decrypt(ctx, (u8 *)rd, (u8 *)rs); 549 serpent_decrypt(tfm, (u8 *)rd, (u8 *)rs);
545 550
546 d[0] = swab32(rd[3]); 551 d[0] = swab32(rd[3]);
547 d[1] = swab32(rd[2]); 552 d[1] = swab32(rd[2]);
diff --git a/crypto/sha1.c b/crypto/sha1.c
index 21571ed35b..6c77b689f8 100644
--- a/crypto/sha1.c
+++ b/crypto/sha1.c
@@ -34,9 +34,9 @@ struct sha1_ctx {
34 u8 buffer[64]; 34 u8 buffer[64];
35}; 35};
36 36
37static void sha1_init(void *ctx) 37static void sha1_init(struct crypto_tfm *tfm)
38{ 38{
39 struct sha1_ctx *sctx = ctx; 39 struct sha1_ctx *sctx = crypto_tfm_ctx(tfm);
40 static const struct sha1_ctx initstate = { 40 static const struct sha1_ctx initstate = {
41 0, 41 0,
42 { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 }, 42 { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 },
@@ -46,9 +46,10 @@ static void sha1_init(void *ctx)
46 *sctx = initstate; 46 *sctx = initstate;
47} 47}
48 48
49static void sha1_update(void *ctx, const u8 *data, unsigned int len) 49static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
50 unsigned int len)
50{ 51{
51 struct sha1_ctx *sctx = ctx; 52 struct sha1_ctx *sctx = crypto_tfm_ctx(tfm);
52 unsigned int partial, done; 53 unsigned int partial, done;
53 const u8 *src; 54 const u8 *src;
54 55
@@ -80,9 +81,9 @@ static void sha1_update(void *ctx, const u8 *data, unsigned int len)
80 81
81 82
82/* Add padding and return the message digest. */ 83/* Add padding and return the message digest. */
83static void sha1_final(void* ctx, u8 *out) 84static void sha1_final(struct crypto_tfm *tfm, u8 *out)
84{ 85{
85 struct sha1_ctx *sctx = ctx; 86 struct sha1_ctx *sctx = crypto_tfm_ctx(tfm);
86 __be32 *dst = (__be32 *)out; 87 __be32 *dst = (__be32 *)out;
87 u32 i, index, padlen; 88 u32 i, index, padlen;
88 __be64 bits; 89 __be64 bits;
@@ -93,10 +94,10 @@ static void sha1_final(void* ctx, u8 *out)
93 /* Pad out to 56 mod 64 */ 94 /* Pad out to 56 mod 64 */
94 index = sctx->count & 0x3f; 95 index = sctx->count & 0x3f;
95 padlen = (index < 56) ? (56 - index) : ((64+56) - index); 96 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
96 sha1_update(sctx, padding, padlen); 97 sha1_update(tfm, padding, padlen);
97 98
98 /* Append length */ 99 /* Append length */
99 sha1_update(sctx, (const u8 *)&bits, sizeof(bits)); 100 sha1_update(tfm, (const u8 *)&bits, sizeof(bits));
100 101
101 /* Store state in digest */ 102 /* Store state in digest */
102 for (i = 0; i < 5; i++) 103 for (i = 0; i < 5; i++)
@@ -112,6 +113,7 @@ static struct crypto_alg alg = {
112 .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, 113 .cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
113 .cra_ctxsize = sizeof(struct sha1_ctx), 114 .cra_ctxsize = sizeof(struct sha1_ctx),
114 .cra_module = THIS_MODULE, 115 .cra_module = THIS_MODULE,
116 .cra_alignmask = 3,
115 .cra_list = LIST_HEAD_INIT(alg.cra_list), 117 .cra_list = LIST_HEAD_INIT(alg.cra_list),
116 .cra_u = { .digest = { 118 .cra_u = { .digest = {
117 .dia_digestsize = SHA1_DIGEST_SIZE, 119 .dia_digestsize = SHA1_DIGEST_SIZE,
diff --git a/crypto/sha256.c b/crypto/sha256.c
index 9d5ef674d6..bc71d85a7d 100644
--- a/crypto/sha256.c
+++ b/crypto/sha256.c
@@ -230,9 +230,9 @@ static void sha256_transform(u32 *state, const u8 *input)
230 memset(W, 0, 64 * sizeof(u32)); 230 memset(W, 0, 64 * sizeof(u32));
231} 231}
232 232
233static void sha256_init(void *ctx) 233static void sha256_init(struct crypto_tfm *tfm)
234{ 234{
235 struct sha256_ctx *sctx = ctx; 235 struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
236 sctx->state[0] = H0; 236 sctx->state[0] = H0;
237 sctx->state[1] = H1; 237 sctx->state[1] = H1;
238 sctx->state[2] = H2; 238 sctx->state[2] = H2;
@@ -242,12 +242,12 @@ static void sha256_init(void *ctx)
242 sctx->state[6] = H6; 242 sctx->state[6] = H6;
243 sctx->state[7] = H7; 243 sctx->state[7] = H7;
244 sctx->count[0] = sctx->count[1] = 0; 244 sctx->count[0] = sctx->count[1] = 0;
245 memset(sctx->buf, 0, sizeof(sctx->buf));
246} 245}
247 246
248static void sha256_update(void *ctx, const u8 *data, unsigned int len) 247static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
248 unsigned int len)
249{ 249{
250 struct sha256_ctx *sctx = ctx; 250 struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
251 unsigned int i, index, part_len; 251 unsigned int i, index, part_len;
252 252
253 /* Compute number of bytes mod 128 */ 253 /* Compute number of bytes mod 128 */
@@ -277,9 +277,9 @@ static void sha256_update(void *ctx, const u8 *data, unsigned int len)
277 memcpy(&sctx->buf[index], &data[i], len-i); 277 memcpy(&sctx->buf[index], &data[i], len-i);
278} 278}
279 279
280static void sha256_final(void* ctx, u8 *out) 280static void sha256_final(struct crypto_tfm *tfm, u8 *out)
281{ 281{
282 struct sha256_ctx *sctx = ctx; 282 struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
283 __be32 *dst = (__be32 *)out; 283 __be32 *dst = (__be32 *)out;
284 __be32 bits[2]; 284 __be32 bits[2];
285 unsigned int index, pad_len; 285 unsigned int index, pad_len;
@@ -293,10 +293,10 @@ static void sha256_final(void* ctx, u8 *out)
293 /* Pad out to 56 mod 64. */ 293 /* Pad out to 56 mod 64. */
294 index = (sctx->count[0] >> 3) & 0x3f; 294 index = (sctx->count[0] >> 3) & 0x3f;
295 pad_len = (index < 56) ? (56 - index) : ((64+56) - index); 295 pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
296 sha256_update(sctx, padding, pad_len); 296 sha256_update(tfm, padding, pad_len);
297 297
298 /* Append length (before padding) */ 298 /* Append length (before padding) */
299 sha256_update(sctx, (const u8 *)bits, sizeof(bits)); 299 sha256_update(tfm, (const u8 *)bits, sizeof(bits));
300 300
301 /* Store state in digest */ 301 /* Store state in digest */
302 for (i = 0; i < 8; i++) 302 for (i = 0; i < 8; i++)
@@ -313,6 +313,7 @@ static struct crypto_alg alg = {
313 .cra_blocksize = SHA256_HMAC_BLOCK_SIZE, 313 .cra_blocksize = SHA256_HMAC_BLOCK_SIZE,
314 .cra_ctxsize = sizeof(struct sha256_ctx), 314 .cra_ctxsize = sizeof(struct sha256_ctx),
315 .cra_module = THIS_MODULE, 315 .cra_module = THIS_MODULE,
316 .cra_alignmask = 3,
316 .cra_list = LIST_HEAD_INIT(alg.cra_list), 317 .cra_list = LIST_HEAD_INIT(alg.cra_list),
317 .cra_u = { .digest = { 318 .cra_u = { .digest = {
318 .dia_digestsize = SHA256_DIGEST_SIZE, 319 .dia_digestsize = SHA256_DIGEST_SIZE,
diff --git a/crypto/sha512.c b/crypto/sha512.c
index 3e6e939231..2dfe7f170b 100644
--- a/crypto/sha512.c
+++ b/crypto/sha512.c
@@ -161,9 +161,9 @@ sha512_transform(u64 *state, u64 *W, const u8 *input)
161} 161}
162 162
163static void 163static void
164sha512_init(void *ctx) 164sha512_init(struct crypto_tfm *tfm)
165{ 165{
166 struct sha512_ctx *sctx = ctx; 166 struct sha512_ctx *sctx = crypto_tfm_ctx(tfm);
167 sctx->state[0] = H0; 167 sctx->state[0] = H0;
168 sctx->state[1] = H1; 168 sctx->state[1] = H1;
169 sctx->state[2] = H2; 169 sctx->state[2] = H2;
@@ -173,13 +173,12 @@ sha512_init(void *ctx)
173 sctx->state[6] = H6; 173 sctx->state[6] = H6;
174 sctx->state[7] = H7; 174 sctx->state[7] = H7;
175 sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; 175 sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0;
176 memset(sctx->buf, 0, sizeof(sctx->buf));
177} 176}
178 177
179static void 178static void
180sha384_init(void *ctx) 179sha384_init(struct crypto_tfm *tfm)
181{ 180{
182 struct sha512_ctx *sctx = ctx; 181 struct sha512_ctx *sctx = crypto_tfm_ctx(tfm);
183 sctx->state[0] = HP0; 182 sctx->state[0] = HP0;
184 sctx->state[1] = HP1; 183 sctx->state[1] = HP1;
185 sctx->state[2] = HP2; 184 sctx->state[2] = HP2;
@@ -189,13 +188,12 @@ sha384_init(void *ctx)
189 sctx->state[6] = HP6; 188 sctx->state[6] = HP6;
190 sctx->state[7] = HP7; 189 sctx->state[7] = HP7;
191 sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; 190 sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0;
192 memset(sctx->buf, 0, sizeof(sctx->buf));
193} 191}
194 192
195static void 193static void
196sha512_update(void *ctx, const u8 *data, unsigned int len) 194sha512_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
197{ 195{
198 struct sha512_ctx *sctx = ctx; 196 struct sha512_ctx *sctx = crypto_tfm_ctx(tfm);
199 197
200 unsigned int i, index, part_len; 198 unsigned int i, index, part_len;
201 199
@@ -233,9 +231,9 @@ sha512_update(void *ctx, const u8 *data, unsigned int len)
233} 231}
234 232
235static void 233static void
236sha512_final(void *ctx, u8 *hash) 234sha512_final(struct crypto_tfm *tfm, u8 *hash)
237{ 235{
238 struct sha512_ctx *sctx = ctx; 236 struct sha512_ctx *sctx = crypto_tfm_ctx(tfm);
239 static u8 padding[128] = { 0x80, }; 237 static u8 padding[128] = { 0x80, };
240 __be64 *dst = (__be64 *)hash; 238 __be64 *dst = (__be64 *)hash;
241 __be32 bits[4]; 239 __be32 bits[4];
@@ -251,10 +249,10 @@ sha512_final(void *ctx, u8 *hash)
251 /* Pad out to 112 mod 128. */ 249 /* Pad out to 112 mod 128. */
252 index = (sctx->count[0] >> 3) & 0x7f; 250 index = (sctx->count[0] >> 3) & 0x7f;
253 pad_len = (index < 112) ? (112 - index) : ((128+112) - index); 251 pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
254 sha512_update(sctx, padding, pad_len); 252 sha512_update(tfm, padding, pad_len);
255 253
256 /* Append length (before padding) */ 254 /* Append length (before padding) */
257 sha512_update(sctx, (const u8 *)bits, sizeof(bits)); 255 sha512_update(tfm, (const u8 *)bits, sizeof(bits));
258 256
259 /* Store state in digest */ 257 /* Store state in digest */
260 for (i = 0; i < 8; i++) 258 for (i = 0; i < 8; i++)
@@ -264,12 +262,11 @@ sha512_final(void *ctx, u8 *hash)
264 memset(sctx, 0, sizeof(struct sha512_ctx)); 262 memset(sctx, 0, sizeof(struct sha512_ctx));
265} 263}
266 264
267static void sha384_final(void *ctx, u8 *hash) 265static void sha384_final(struct crypto_tfm *tfm, u8 *hash)
268{ 266{
269 struct sha512_ctx *sctx = ctx;
270 u8 D[64]; 267 u8 D[64];
271 268
272 sha512_final(sctx, D); 269 sha512_final(tfm, D);
273 270
274 memcpy(hash, D, 48); 271 memcpy(hash, D, 48);
275 memset(D, 0, 64); 272 memset(D, 0, 64);
@@ -281,6 +278,7 @@ static struct crypto_alg sha512 = {
281 .cra_blocksize = SHA512_HMAC_BLOCK_SIZE, 278 .cra_blocksize = SHA512_HMAC_BLOCK_SIZE,
282 .cra_ctxsize = sizeof(struct sha512_ctx), 279 .cra_ctxsize = sizeof(struct sha512_ctx),
283 .cra_module = THIS_MODULE, 280 .cra_module = THIS_MODULE,
281 .cra_alignmask = 3,
284 .cra_list = LIST_HEAD_INIT(sha512.cra_list), 282 .cra_list = LIST_HEAD_INIT(sha512.cra_list),
285 .cra_u = { .digest = { 283 .cra_u = { .digest = {
286 .dia_digestsize = SHA512_DIGEST_SIZE, 284 .dia_digestsize = SHA512_DIGEST_SIZE,
@@ -295,6 +293,7 @@ static struct crypto_alg sha384 = {
295 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 293 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
296 .cra_blocksize = SHA384_HMAC_BLOCK_SIZE, 294 .cra_blocksize = SHA384_HMAC_BLOCK_SIZE,
297 .cra_ctxsize = sizeof(struct sha512_ctx), 295 .cra_ctxsize = sizeof(struct sha512_ctx),
296 .cra_alignmask = 3,
298 .cra_module = THIS_MODULE, 297 .cra_module = THIS_MODULE,
299 .cra_list = LIST_HEAD_INIT(sha384.cra_list), 298 .cra_list = LIST_HEAD_INIT(sha384.cra_list),
300 .cra_u = { .digest = { 299 .cra_u = { .digest = {
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 49e344f008..e52f56c5bd 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -570,6 +570,122 @@ out:
570 crypto_free_tfm(tfm); 570 crypto_free_tfm(tfm);
571} 571}
572 572
573static void test_digest_jiffies(struct crypto_tfm *tfm, char *p, int blen,
574 int plen, char *out, int sec)
575{
576 struct scatterlist sg[1];
577 unsigned long start, end;
578 int bcount, pcount;
579
580 for (start = jiffies, end = start + sec * HZ, bcount = 0;
581 time_before(jiffies, end); bcount++) {
582 crypto_digest_init(tfm);
583 for (pcount = 0; pcount < blen; pcount += plen) {
584 sg_set_buf(sg, p + pcount, plen);
585 crypto_digest_update(tfm, sg, 1);
586 }
587 /* we assume there is enough space in 'out' for the result */
588 crypto_digest_final(tfm, out);
589 }
590
591 printk("%6u opers/sec, %9lu bytes/sec\n",
592 bcount / sec, ((long)bcount * blen) / sec);
593
594 return;
595}
596
597static void test_digest_cycles(struct crypto_tfm *tfm, char *p, int blen,
598 int plen, char *out)
599{
600 struct scatterlist sg[1];
601 unsigned long cycles = 0;
602 int i, pcount;
603
604 local_bh_disable();
605 local_irq_disable();
606
607 /* Warm-up run. */
608 for (i = 0; i < 4; i++) {
609 crypto_digest_init(tfm);
610 for (pcount = 0; pcount < blen; pcount += plen) {
611 sg_set_buf(sg, p + pcount, plen);
612 crypto_digest_update(tfm, sg, 1);
613 }
614 crypto_digest_final(tfm, out);
615 }
616
617 /* The real thing. */
618 for (i = 0; i < 8; i++) {
619 cycles_t start, end;
620
621 crypto_digest_init(tfm);
622
623 start = get_cycles();
624
625 for (pcount = 0; pcount < blen; pcount += plen) {
626 sg_set_buf(sg, p + pcount, plen);
627 crypto_digest_update(tfm, sg, 1);
628 }
629 crypto_digest_final(tfm, out);
630
631 end = get_cycles();
632
633 cycles += end - start;
634 }
635
636 local_irq_enable();
637 local_bh_enable();
638
639 printk("%6lu cycles/operation, %4lu cycles/byte\n",
640 cycles / 8, cycles / (8 * blen));
641
642 return;
643}
644
645static void test_digest_speed(char *algo, unsigned int sec,
646 struct digest_speed *speed)
647{
648 struct crypto_tfm *tfm;
649 char output[1024];
650 int i;
651
652 printk("\ntesting speed of %s\n", algo);
653
654 tfm = crypto_alloc_tfm(algo, 0);
655
656 if (tfm == NULL) {
657 printk("failed to load transform for %s\n", algo);
658 return;
659 }
660
661 if (crypto_tfm_alg_digestsize(tfm) > sizeof(output)) {
662 printk("digestsize(%u) > outputbuffer(%zu)\n",
663 crypto_tfm_alg_digestsize(tfm), sizeof(output));
664 goto out;
665 }
666
667 for (i = 0; speed[i].blen != 0; i++) {
668 if (speed[i].blen > TVMEMSIZE) {
669 printk("template (%u) too big for tvmem (%u)\n",
670 speed[i].blen, TVMEMSIZE);
671 goto out;
672 }
673
674 printk("test%3u (%5u byte blocks,%5u bytes per update,%4u updates): ",
675 i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
676
677 memset(tvmem, 0xff, speed[i].blen);
678
679 if (sec)
680 test_digest_jiffies(tfm, tvmem, speed[i].blen, speed[i].plen, output, sec);
681 else
682 test_digest_cycles(tfm, tvmem, speed[i].blen, speed[i].plen, output);
683 }
684
685out:
686 crypto_free_tfm(tfm);
687}
688
573static void test_deflate(void) 689static void test_deflate(void)
574{ 690{
575 unsigned int i; 691 unsigned int i;
@@ -1086,6 +1202,60 @@ static void do_test(void)
1086 des_speed_template); 1202 des_speed_template);
1087 break; 1203 break;
1088 1204
1205 case 300:
1206 /* fall through */
1207
1208 case 301:
1209 test_digest_speed("md4", sec, generic_digest_speed_template);
1210 if (mode > 300 && mode < 400) break;
1211
1212 case 302:
1213 test_digest_speed("md5", sec, generic_digest_speed_template);
1214 if (mode > 300 && mode < 400) break;
1215
1216 case 303:
1217 test_digest_speed("sha1", sec, generic_digest_speed_template);
1218 if (mode > 300 && mode < 400) break;
1219
1220 case 304:
1221 test_digest_speed("sha256", sec, generic_digest_speed_template);
1222 if (mode > 300 && mode < 400) break;
1223
1224 case 305:
1225 test_digest_speed("sha384", sec, generic_digest_speed_template);
1226 if (mode > 300 && mode < 400) break;
1227
1228 case 306:
1229 test_digest_speed("sha512", sec, generic_digest_speed_template);
1230 if (mode > 300 && mode < 400) break;
1231
1232 case 307:
1233 test_digest_speed("wp256", sec, generic_digest_speed_template);
1234 if (mode > 300 && mode < 400) break;
1235
1236 case 308:
1237 test_digest_speed("wp384", sec, generic_digest_speed_template);
1238 if (mode > 300 && mode < 400) break;
1239
1240 case 309:
1241 test_digest_speed("wp512", sec, generic_digest_speed_template);
1242 if (mode > 300 && mode < 400) break;
1243
1244 case 310:
1245 test_digest_speed("tgr128", sec, generic_digest_speed_template);
1246 if (mode > 300 && mode < 400) break;
1247
1248 case 311:
1249 test_digest_speed("tgr160", sec, generic_digest_speed_template);
1250 if (mode > 300 && mode < 400) break;
1251
1252 case 312:
1253 test_digest_speed("tgr192", sec, generic_digest_speed_template);
1254 if (mode > 300 && mode < 400) break;
1255
1256 case 399:
1257 break;
1258
1089 case 1000: 1259 case 1000:
1090 test_available(); 1260 test_available();
1091 break; 1261 break;
@@ -1113,7 +1283,14 @@ static int __init init(void)
1113 1283
1114 kfree(xbuf); 1284 kfree(xbuf);
1115 kfree(tvmem); 1285 kfree(tvmem);
1116 return 0; 1286
1287 /* We intentionaly return -EAGAIN to prevent keeping
1288 * the module. It does all its work from init()
1289 * and doesn't offer any runtime functionality
1290 * => we don't need it in the memory, do we?
1291 * -- mludvig
1292 */
1293 return -EAGAIN;
1117} 1294}
1118 1295
1119/* 1296/*
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 1f683ba794..1fac5602f6 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -65,6 +65,11 @@ struct cipher_speed {
65 unsigned int blen; 65 unsigned int blen;
66}; 66};
67 67
68struct digest_speed {
69 unsigned int blen; /* buffer length */
70 unsigned int plen; /* per-update length */
71};
72
68/* 73/*
69 * MD4 test vectors from RFC1320 74 * MD4 test vectors from RFC1320
70 */ 75 */
@@ -2975,4 +2980,35 @@ static struct cipher_speed des_speed_template[] = {
2975 { .klen = 0, .blen = 0, } 2980 { .klen = 0, .blen = 0, }
2976}; 2981};
2977 2982
2983/*
2984 * Digest speed tests
2985 */
2986static struct digest_speed generic_digest_speed_template[] = {
2987 { .blen = 16, .plen = 16, },
2988 { .blen = 64, .plen = 16, },
2989 { .blen = 64, .plen = 64, },
2990 { .blen = 256, .plen = 16, },
2991 { .blen = 256, .plen = 64, },
2992 { .blen = 256, .plen = 256, },
2993 { .blen = 1024, .plen = 16, },
2994 { .blen = 1024, .plen = 256, },
2995 { .blen = 1024, .plen = 1024, },
2996 { .blen = 2048, .plen = 16, },
2997 { .blen = 2048, .plen = 256, },
2998 { .blen = 2048, .plen = 1024, },
2999 { .blen = 2048, .plen = 2048, },
3000 { .blen = 4096, .plen = 16, },
3001 { .blen = 4096, .plen = 256, },
3002 { .blen = 4096, .plen = 1024, },
3003 { .blen = 4096, .plen = 4096, },
3004 { .blen = 8192, .plen = 16, },
3005 { .blen = 8192, .plen = 256, },
3006 { .blen = 8192, .plen = 1024, },
3007 { .blen = 8192, .plen = 4096, },
3008 { .blen = 8192, .plen = 8192, },
3009
3010 /* End marker */
3011 { .blen = 0, .plen = 0, }
3012};
3013
2978#endif /* _CRYPTO_TCRYPT_H */ 3014#endif /* _CRYPTO_TCRYPT_H */
diff --git a/crypto/tea.c b/crypto/tea.c
index a6a02b30e4..5367adc82f 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -45,10 +45,10 @@ struct xtea_ctx {
45 u32 KEY[4]; 45 u32 KEY[4];
46}; 46};
47 47
48static int tea_setkey(void *ctx_arg, const u8 *in_key, 48static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
49 unsigned int key_len, u32 *flags) 49 unsigned int key_len, u32 *flags)
50{ 50{
51 struct tea_ctx *ctx = ctx_arg; 51 struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
52 const __le32 *key = (const __le32 *)in_key; 52 const __le32 *key = (const __le32 *)in_key;
53 53
54 if (key_len != 16) 54 if (key_len != 16)
@@ -66,12 +66,11 @@ static int tea_setkey(void *ctx_arg, const u8 *in_key,
66 66
67} 67}
68 68
69static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) 69static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
70{ 70{
71 u32 y, z, n, sum = 0; 71 u32 y, z, n, sum = 0;
72 u32 k0, k1, k2, k3; 72 u32 k0, k1, k2, k3;
73 73 struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
74 struct tea_ctx *ctx = ctx_arg;
75 const __le32 *in = (const __le32 *)src; 74 const __le32 *in = (const __le32 *)src;
76 __le32 *out = (__le32 *)dst; 75 __le32 *out = (__le32 *)dst;
77 76
@@ -95,11 +94,11 @@ static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
95 out[1] = cpu_to_le32(z); 94 out[1] = cpu_to_le32(z);
96} 95}
97 96
98static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) 97static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
99{ 98{
100 u32 y, z, n, sum; 99 u32 y, z, n, sum;
101 u32 k0, k1, k2, k3; 100 u32 k0, k1, k2, k3;
102 struct tea_ctx *ctx = ctx_arg; 101 struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
103 const __le32 *in = (const __le32 *)src; 102 const __le32 *in = (const __le32 *)src;
104 __le32 *out = (__le32 *)dst; 103 __le32 *out = (__le32 *)dst;
105 104
@@ -125,10 +124,10 @@ static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
125 out[1] = cpu_to_le32(z); 124 out[1] = cpu_to_le32(z);
126} 125}
127 126
128static int xtea_setkey(void *ctx_arg, const u8 *in_key, 127static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
129 unsigned int key_len, u32 *flags) 128 unsigned int key_len, u32 *flags)
130{ 129{
131 struct xtea_ctx *ctx = ctx_arg; 130 struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
132 const __le32 *key = (const __le32 *)in_key; 131 const __le32 *key = (const __le32 *)in_key;
133 132
134 if (key_len != 16) 133 if (key_len != 16)
@@ -146,12 +145,11 @@ static int xtea_setkey(void *ctx_arg, const u8 *in_key,
146 145
147} 146}
148 147
149static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) 148static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
150{ 149{
151 u32 y, z, sum = 0; 150 u32 y, z, sum = 0;
152 u32 limit = XTEA_DELTA * XTEA_ROUNDS; 151 u32 limit = XTEA_DELTA * XTEA_ROUNDS;
153 152 struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
154 struct xtea_ctx *ctx = ctx_arg;
155 const __le32 *in = (const __le32 *)src; 153 const __le32 *in = (const __le32 *)src;
156 __le32 *out = (__le32 *)dst; 154 __le32 *out = (__le32 *)dst;
157 155
@@ -168,10 +166,10 @@ static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
168 out[1] = cpu_to_le32(z); 166 out[1] = cpu_to_le32(z);
169} 167}
170 168
171static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) 169static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
172{ 170{
173 u32 y, z, sum; 171 u32 y, z, sum;
174 struct tea_ctx *ctx = ctx_arg; 172 struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
175 const __le32 *in = (const __le32 *)src; 173 const __le32 *in = (const __le32 *)src;
176 __le32 *out = (__le32 *)dst; 174 __le32 *out = (__le32 *)dst;
177 175
@@ -191,12 +189,11 @@ static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
191} 189}
192 190
193 191
194static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src) 192static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
195{ 193{
196 u32 y, z, sum = 0; 194 u32 y, z, sum = 0;
197 u32 limit = XTEA_DELTA * XTEA_ROUNDS; 195 u32 limit = XTEA_DELTA * XTEA_ROUNDS;
198 196 struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
199 struct xtea_ctx *ctx = ctx_arg;
200 const __le32 *in = (const __le32 *)src; 197 const __le32 *in = (const __le32 *)src;
201 __le32 *out = (__le32 *)dst; 198 __le32 *out = (__le32 *)dst;
202 199
@@ -213,10 +210,10 @@ static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
213 out[1] = cpu_to_le32(z); 210 out[1] = cpu_to_le32(z);
214} 211}
215 212
216static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src) 213static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
217{ 214{
218 u32 y, z, sum; 215 u32 y, z, sum;
219 struct tea_ctx *ctx = ctx_arg; 216 struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
220 const __le32 *in = (const __le32 *)src; 217 const __le32 *in = (const __le32 *)src;
221 __le32 *out = (__le32 *)dst; 218 __le32 *out = (__le32 *)dst;
222 219
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 2d8e44f6fb..a0fadf3dd3 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -496,11 +496,10 @@ static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data)
496 tctx->c = c; 496 tctx->c = c;
497} 497}
498 498
499static void tgr192_init(void *ctx) 499static void tgr192_init(struct crypto_tfm *tfm)
500{ 500{
501 struct tgr192_ctx *tctx = ctx; 501 struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm);
502 502
503 memset (tctx->hash, 0, 64);
504 tctx->a = 0x0123456789abcdefULL; 503 tctx->a = 0x0123456789abcdefULL;
505 tctx->b = 0xfedcba9876543210ULL; 504 tctx->b = 0xfedcba9876543210ULL;
506 tctx->c = 0xf096a5b4c3b2e187ULL; 505 tctx->c = 0xf096a5b4c3b2e187ULL;
@@ -511,9 +510,10 @@ static void tgr192_init(void *ctx)
511 510
512/* Update the message digest with the contents 511/* Update the message digest with the contents
513 * of INBUF with length INLEN. */ 512 * of INBUF with length INLEN. */
514static void tgr192_update(void *ctx, const u8 * inbuf, unsigned int len) 513static void tgr192_update(struct crypto_tfm *tfm, const u8 *inbuf,
514 unsigned int len)
515{ 515{
516 struct tgr192_ctx *tctx = ctx; 516 struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm);
517 517
518 if (tctx->count == 64) { /* flush the buffer */ 518 if (tctx->count == 64) { /* flush the buffer */
519 tgr192_transform(tctx, tctx->hash); 519 tgr192_transform(tctx, tctx->hash);
@@ -527,7 +527,7 @@ static void tgr192_update(void *ctx, const u8 * inbuf, unsigned int len)
527 for (; len && tctx->count < 64; len--) { 527 for (; len && tctx->count < 64; len--) {
528 tctx->hash[tctx->count++] = *inbuf++; 528 tctx->hash[tctx->count++] = *inbuf++;
529 } 529 }
530 tgr192_update(tctx, NULL, 0); 530 tgr192_update(tfm, NULL, 0);
531 if (!len) { 531 if (!len) {
532 return; 532 return;
533 } 533 }
@@ -549,15 +549,15 @@ static void tgr192_update(void *ctx, const u8 * inbuf, unsigned int len)
549 549
550 550
551/* The routine terminates the computation */ 551/* The routine terminates the computation */
552static void tgr192_final(void *ctx, u8 * out) 552static void tgr192_final(struct crypto_tfm *tfm, u8 * out)
553{ 553{
554 struct tgr192_ctx *tctx = ctx; 554 struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm);
555 __be64 *dst = (__be64 *)out; 555 __be64 *dst = (__be64 *)out;
556 __be64 *be64p; 556 __be64 *be64p;
557 __le32 *le32p; 557 __le32 *le32p;
558 u32 t, msb, lsb; 558 u32 t, msb, lsb;
559 559
560 tgr192_update(tctx, NULL, 0); /* flush */ ; 560 tgr192_update(tfm, NULL, 0); /* flush */ ;
561 561
562 msb = 0; 562 msb = 0;
563 t = tctx->nblocks; 563 t = tctx->nblocks;
@@ -585,7 +585,7 @@ static void tgr192_final(void *ctx, u8 * out)
585 while (tctx->count < 64) { 585 while (tctx->count < 64) {
586 tctx->hash[tctx->count++] = 0; 586 tctx->hash[tctx->count++] = 0;
587 } 587 }
588 tgr192_update(tctx, NULL, 0); /* flush */ ; 588 tgr192_update(tfm, NULL, 0); /* flush */ ;
589 memset(tctx->hash, 0, 56); /* fill next block with zeroes */ 589 memset(tctx->hash, 0, 56); /* fill next block with zeroes */
590 } 590 }
591 /* append the 64 bit count */ 591 /* append the 64 bit count */
@@ -601,22 +601,20 @@ static void tgr192_final(void *ctx, u8 * out)
601 dst[2] = be64p[2] = cpu_to_be64(tctx->c); 601 dst[2] = be64p[2] = cpu_to_be64(tctx->c);
602} 602}
603 603
604static void tgr160_final(void *ctx, u8 * out) 604static void tgr160_final(struct crypto_tfm *tfm, u8 * out)
605{ 605{
606 struct tgr192_ctx *wctx = ctx;
607 u8 D[64]; 606 u8 D[64];
608 607
609 tgr192_final(wctx, D); 608 tgr192_final(tfm, D);
610 memcpy(out, D, TGR160_DIGEST_SIZE); 609 memcpy(out, D, TGR160_DIGEST_SIZE);
611 memset(D, 0, TGR192_DIGEST_SIZE); 610 memset(D, 0, TGR192_DIGEST_SIZE);
612} 611}
613 612
614static void tgr128_final(void *ctx, u8 * out) 613static void tgr128_final(struct crypto_tfm *tfm, u8 * out)
615{ 614{
616 struct tgr192_ctx *wctx = ctx;
617 u8 D[64]; 615 u8 D[64];
618 616
619 tgr192_final(wctx, D); 617 tgr192_final(tfm, D);
620 memcpy(out, D, TGR128_DIGEST_SIZE); 618 memcpy(out, D, TGR128_DIGEST_SIZE);
621 memset(D, 0, TGR192_DIGEST_SIZE); 619 memset(D, 0, TGR192_DIGEST_SIZE);
622} 620}
@@ -627,6 +625,7 @@ static struct crypto_alg tgr192 = {
627 .cra_blocksize = TGR192_BLOCK_SIZE, 625 .cra_blocksize = TGR192_BLOCK_SIZE,
628 .cra_ctxsize = sizeof(struct tgr192_ctx), 626 .cra_ctxsize = sizeof(struct tgr192_ctx),
629 .cra_module = THIS_MODULE, 627 .cra_module = THIS_MODULE,
628 .cra_alignmask = 7,
630 .cra_list = LIST_HEAD_INIT(tgr192.cra_list), 629 .cra_list = LIST_HEAD_INIT(tgr192.cra_list),
631 .cra_u = {.digest = { 630 .cra_u = {.digest = {
632 .dia_digestsize = TGR192_DIGEST_SIZE, 631 .dia_digestsize = TGR192_DIGEST_SIZE,
@@ -641,6 +640,7 @@ static struct crypto_alg tgr160 = {
641 .cra_blocksize = TGR192_BLOCK_SIZE, 640 .cra_blocksize = TGR192_BLOCK_SIZE,
642 .cra_ctxsize = sizeof(struct tgr192_ctx), 641 .cra_ctxsize = sizeof(struct tgr192_ctx),
643 .cra_module = THIS_MODULE, 642 .cra_module = THIS_MODULE,
643 .cra_alignmask = 7,
644 .cra_list = LIST_HEAD_INIT(tgr160.cra_list), 644 .cra_list = LIST_HEAD_INIT(tgr160.cra_list),
645 .cra_u = {.digest = { 645 .cra_u = {.digest = {
646 .dia_digestsize = TGR160_DIGEST_SIZE, 646 .dia_digestsize = TGR160_DIGEST_SIZE,
@@ -655,6 +655,7 @@ static struct crypto_alg tgr128 = {
655 .cra_blocksize = TGR192_BLOCK_SIZE, 655 .cra_blocksize = TGR192_BLOCK_SIZE,
656 .cra_ctxsize = sizeof(struct tgr192_ctx), 656 .cra_ctxsize = sizeof(struct tgr192_ctx),
657 .cra_module = THIS_MODULE, 657 .cra_module = THIS_MODULE,
658 .cra_alignmask = 7,
658 .cra_list = LIST_HEAD_INIT(tgr128.cra_list), 659 .cra_list = LIST_HEAD_INIT(tgr128.cra_list),
659 .cra_u = {.digest = { 660 .cra_u = {.digest = {
660 .dia_digestsize = TGR128_DIGEST_SIZE, 661 .dia_digestsize = TGR128_DIGEST_SIZE,
diff --git a/crypto/twofish.c b/crypto/twofish.c
index ddfd5a3fcc..ec2488242e 100644
--- a/crypto/twofish.c
+++ b/crypto/twofish.c
@@ -643,11 +643,11 @@ struct twofish_ctx {
643}; 643};
644 644
645/* Perform the key setup. */ 645/* Perform the key setup. */
646static int twofish_setkey(void *cx, const u8 *key, 646static int twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
647 unsigned int key_len, u32 *flags) 647 unsigned int key_len, u32 *flags)
648{ 648{
649 649
650 struct twofish_ctx *ctx = cx; 650 struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
651 651
652 int i, j, k; 652 int i, j, k;
653 653
@@ -802,9 +802,9 @@ static int twofish_setkey(void *cx, const u8 *key,
802} 802}
803 803
804/* Encrypt one block. in and out may be the same. */ 804/* Encrypt one block. in and out may be the same. */
805static void twofish_encrypt(void *cx, u8 *out, const u8 *in) 805static void twofish_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
806{ 806{
807 struct twofish_ctx *ctx = cx; 807 struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
808 const __le32 *src = (const __le32 *)in; 808 const __le32 *src = (const __le32 *)in;
809 __le32 *dst = (__le32 *)out; 809 __le32 *dst = (__le32 *)out;
810 810
@@ -839,9 +839,9 @@ static void twofish_encrypt(void *cx, u8 *out, const u8 *in)
839} 839}
840 840
841/* Decrypt one block. in and out may be the same. */ 841/* Decrypt one block. in and out may be the same. */
842static void twofish_decrypt(void *cx, u8 *out, const u8 *in) 842static void twofish_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
843{ 843{
844 struct twofish_ctx *ctx = cx; 844 struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
845 const __le32 *src = (const __le32 *)in; 845 const __le32 *src = (const __le32 *)in;
846 __le32 *dst = (__le32 *)out; 846 __le32 *dst = (__le32 *)out;
847 847
diff --git a/crypto/wp512.c b/crypto/wp512.c
index b226a126cf..727d05a19f 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -981,9 +981,9 @@ static void wp512_process_buffer(struct wp512_ctx *wctx) {
981 981
982} 982}
983 983
984static void wp512_init (void *ctx) { 984static void wp512_init(struct crypto_tfm *tfm) {
985 struct wp512_ctx *wctx = crypto_tfm_ctx(tfm);
985 int i; 986 int i;
986 struct wp512_ctx *wctx = ctx;
987 987
988 memset(wctx->bitLength, 0, 32); 988 memset(wctx->bitLength, 0, 32);
989 wctx->bufferBits = wctx->bufferPos = 0; 989 wctx->bufferBits = wctx->bufferPos = 0;
@@ -993,10 +993,10 @@ static void wp512_init (void *ctx) {
993 } 993 }
994} 994}
995 995
996static void wp512_update(void *ctx, const u8 *source, unsigned int len) 996static void wp512_update(struct crypto_tfm *tfm, const u8 *source,
997 unsigned int len)
997{ 998{
998 999 struct wp512_ctx *wctx = crypto_tfm_ctx(tfm);
999 struct wp512_ctx *wctx = ctx;
1000 int sourcePos = 0; 1000 int sourcePos = 0;
1001 unsigned int bits_len = len * 8; // convert to number of bits 1001 unsigned int bits_len = len * 8; // convert to number of bits
1002 int sourceGap = (8 - ((int)bits_len & 7)) & 7; 1002 int sourceGap = (8 - ((int)bits_len & 7)) & 7;
@@ -1054,9 +1054,9 @@ static void wp512_update(void *ctx, const u8 *source, unsigned int len)
1054 1054
1055} 1055}
1056 1056
1057static void wp512_final(void *ctx, u8 *out) 1057static void wp512_final(struct crypto_tfm *tfm, u8 *out)
1058{ 1058{
1059 struct wp512_ctx *wctx = ctx; 1059 struct wp512_ctx *wctx = crypto_tfm_ctx(tfm);
1060 int i; 1060 int i;
1061 u8 *buffer = wctx->buffer; 1061 u8 *buffer = wctx->buffer;
1062 u8 *bitLength = wctx->bitLength; 1062 u8 *bitLength = wctx->bitLength;
@@ -1087,22 +1087,20 @@ static void wp512_final(void *ctx, u8 *out)
1087 wctx->bufferPos = bufferPos; 1087 wctx->bufferPos = bufferPos;
1088} 1088}
1089 1089
1090static void wp384_final(void *ctx, u8 *out) 1090static void wp384_final(struct crypto_tfm *tfm, u8 *out)
1091{ 1091{
1092 struct wp512_ctx *wctx = ctx;
1093 u8 D[64]; 1092 u8 D[64];
1094 1093
1095 wp512_final (wctx, D); 1094 wp512_final(tfm, D);
1096 memcpy (out, D, WP384_DIGEST_SIZE); 1095 memcpy (out, D, WP384_DIGEST_SIZE);
1097 memset (D, 0, WP512_DIGEST_SIZE); 1096 memset (D, 0, WP512_DIGEST_SIZE);
1098} 1097}
1099 1098
1100static void wp256_final(void *ctx, u8 *out) 1099static void wp256_final(struct crypto_tfm *tfm, u8 *out)
1101{ 1100{
1102 struct wp512_ctx *wctx = ctx;
1103 u8 D[64]; 1101 u8 D[64];
1104 1102
1105 wp512_final (wctx, D); 1103 wp512_final(tfm, D);
1106 memcpy (out, D, WP256_DIGEST_SIZE); 1104 memcpy (out, D, WP256_DIGEST_SIZE);
1107 memset (D, 0, WP512_DIGEST_SIZE); 1105 memset (D, 0, WP512_DIGEST_SIZE);
1108} 1106}
diff --git a/drivers/Makefile b/drivers/Makefile
index 3c5170310b..fc2d744a4e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -74,4 +74,5 @@ obj-$(CONFIG_SGI_SN) += sn/
74obj-y += firmware/ 74obj-y += firmware/
75obj-$(CONFIG_CRYPTO) += crypto/ 75obj-$(CONFIG_CRYPTO) += crypto/
76obj-$(CONFIG_SUPERH) += sh/ 76obj-$(CONFIG_SUPERH) += sh/
77obj-$(CONFIG_GENERIC_TIME) += clocksource/
77obj-$(CONFIG_DMA_ENGINE) += dma/ 78obj-$(CONFIG_DMA_ENGINE) += dma/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 94b8d820c5..610d2cc02c 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -328,7 +328,7 @@ config ACPI_CONTAINER
328config ACPI_HOTPLUG_MEMORY 328config ACPI_HOTPLUG_MEMORY
329 tristate "Memory Hotplug" 329 tristate "Memory Hotplug"
330 depends on ACPI 330 depends on ACPI
331 depends on MEMORY_HOTPLUG || X86_64 331 depends on MEMORY_HOTPLUG
332 default n 332 default n
333 help 333 help
334 This driver adds supports for ACPI Memory Hotplug. This driver 334 This driver adds supports for ACPI Memory Hotplug. This driver
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index e0a95ba723..1012284ff4 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -57,6 +57,7 @@ MODULE_LICENSE("GPL");
57 57
58static int acpi_memory_device_add(struct acpi_device *device); 58static int acpi_memory_device_add(struct acpi_device *device);
59static int acpi_memory_device_remove(struct acpi_device *device, int type); 59static int acpi_memory_device_remove(struct acpi_device *device, int type);
60static int acpi_memory_device_start(struct acpi_device *device);
60 61
61static struct acpi_driver acpi_memory_device_driver = { 62static struct acpi_driver acpi_memory_device_driver = {
62 .name = ACPI_MEMORY_DEVICE_DRIVER_NAME, 63 .name = ACPI_MEMORY_DEVICE_DRIVER_NAME,
@@ -65,48 +66,79 @@ static struct acpi_driver acpi_memory_device_driver = {
65 .ops = { 66 .ops = {
66 .add = acpi_memory_device_add, 67 .add = acpi_memory_device_add,
67 .remove = acpi_memory_device_remove, 68 .remove = acpi_memory_device_remove,
69 .start = acpi_memory_device_start,
68 }, 70 },
69}; 71};
70 72
73struct acpi_memory_info {
74 struct list_head list;
75 u64 start_addr; /* Memory Range start physical addr */
76 u64 length; /* Memory Range length */
77 unsigned short caching; /* memory cache attribute */
78 unsigned short write_protect; /* memory read/write attribute */
79 unsigned int enabled:1;
80};
81
71struct acpi_memory_device { 82struct acpi_memory_device {
72 acpi_handle handle; 83 acpi_handle handle;
73 unsigned int state; /* State of the memory device */ 84 unsigned int state; /* State of the memory device */
74 unsigned short caching; /* memory cache attribute */ 85 struct list_head res_list;
75 unsigned short write_protect; /* memory read/write attribute */
76 u64 start_addr; /* Memory Range start physical addr */
77 u64 length; /* Memory Range length */
78}; 86};
79 87
88static acpi_status
89acpi_memory_get_resource(struct acpi_resource *resource, void *context)
90{
91 struct acpi_memory_device *mem_device = context;
92 struct acpi_resource_address64 address64;
93 struct acpi_memory_info *info, *new;
94 acpi_status status;
95
96 status = acpi_resource_to_address64(resource, &address64);
97 if (ACPI_FAILURE(status) ||
98 (address64.resource_type != ACPI_MEMORY_RANGE))
99 return AE_OK;
100
101 list_for_each_entry(info, &mem_device->res_list, list) {
102 /* Can we combine the resource range information? */
103 if ((info->caching == address64.info.mem.caching) &&
104 (info->write_protect == address64.info.mem.write_protect) &&
105 (info->start_addr + info->length == address64.minimum)) {
106 info->length += address64.address_length;
107 return AE_OK;
108 }
109 }
110
111 new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL);
112 if (!new)
113 return AE_ERROR;
114
115 INIT_LIST_HEAD(&new->list);
116 new->caching = address64.info.mem.caching;
117 new->write_protect = address64.info.mem.write_protect;
118 new->start_addr = address64.minimum;
119 new->length = address64.address_length;
120 list_add_tail(&new->list, &mem_device->res_list);
121
122 return AE_OK;
123}
124
80static int 125static int
81acpi_memory_get_device_resources(struct acpi_memory_device *mem_device) 126acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
82{ 127{
83 acpi_status status; 128 acpi_status status;
84 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 129 struct acpi_memory_info *info, *n;
85 struct acpi_resource *resource = NULL;
86 struct acpi_resource_address64 address64;
87 130
88 ACPI_FUNCTION_TRACE("acpi_memory_get_device_resources"); 131 ACPI_FUNCTION_TRACE("acpi_memory_get_device_resources");
89 132
90 /* Get the range from the _CRS */ 133 status = acpi_walk_resources(mem_device->handle, METHOD_NAME__CRS,
91 status = acpi_get_current_resources(mem_device->handle, &buffer); 134 acpi_memory_get_resource, mem_device);
92 if (ACPI_FAILURE(status)) 135 if (ACPI_FAILURE(status)) {
93 return_VALUE(-EINVAL); 136 list_for_each_entry_safe(info, n, &mem_device->res_list, list)
94 137 kfree(info);
95 resource = (struct acpi_resource *)buffer.pointer; 138 return -EINVAL;
96 status = acpi_resource_to_address64(resource, &address64);
97 if (ACPI_SUCCESS(status)) {
98 if (address64.resource_type == ACPI_MEMORY_RANGE) {
99 /* Populate the structure */
100 mem_device->caching = address64.info.mem.caching;
101 mem_device->write_protect =
102 address64.info.mem.write_protect;
103 mem_device->start_addr = address64.minimum;
104 mem_device->length = address64.address_length;
105 }
106 } 139 }
107 140
108 acpi_os_free(buffer.pointer); 141 return 0;
109 return_VALUE(0);
110} 142}
111 143
112static int 144static int
@@ -181,7 +213,9 @@ static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
181 213
182static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) 214static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
183{ 215{
184 int result; 216 int result, num_enabled = 0;
217 struct acpi_memory_info *info;
218 int node;
185 219
186 ACPI_FUNCTION_TRACE("acpi_memory_enable_device"); 220 ACPI_FUNCTION_TRACE("acpi_memory_enable_device");
187 221
@@ -194,15 +228,35 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
194 return result; 228 return result;
195 } 229 }
196 230
231 node = acpi_get_node(mem_device->handle);
197 /* 232 /*
198 * Tell the VM there is more memory here... 233 * Tell the VM there is more memory here...
199 * Note: Assume that this function returns zero on success 234 * Note: Assume that this function returns zero on success
235 * We don't have memory-hot-add rollback function,now.
236 * (i.e. memory-hot-remove function)
200 */ 237 */
201 result = add_memory(mem_device->start_addr, mem_device->length); 238 list_for_each_entry(info, &mem_device->res_list, list) {
202 if (result) { 239 u64 start_pfn, end_pfn;
240
241 start_pfn = info->start_addr >> PAGE_SHIFT;
242 end_pfn = (info->start_addr + info->length - 1) >> PAGE_SHIFT;
243
244 if (pfn_valid(start_pfn) || pfn_valid(end_pfn)) {
245 /* already enabled. try next area */
246 num_enabled++;
247 continue;
248 }
249
250 result = add_memory(node, info->start_addr, info->length);
251 if (result)
252 continue;
253 info->enabled = 1;
254 num_enabled++;
255 }
256 if (!num_enabled) {
203 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "\nadd_memory failed\n")); 257 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "\nadd_memory failed\n"));
204 mem_device->state = MEMORY_INVALID_STATE; 258 mem_device->state = MEMORY_INVALID_STATE;
205 return result; 259 return -EINVAL;
206 } 260 }
207 261
208 return result; 262 return result;
@@ -246,8 +300,7 @@ static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device)
246static int acpi_memory_disable_device(struct acpi_memory_device *mem_device) 300static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
247{ 301{
248 int result; 302 int result;
249 u64 start = mem_device->start_addr; 303 struct acpi_memory_info *info, *n;
250 u64 len = mem_device->length;
251 304
252 ACPI_FUNCTION_TRACE("acpi_memory_disable_device"); 305 ACPI_FUNCTION_TRACE("acpi_memory_disable_device");
253 306
@@ -255,10 +308,13 @@ static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
255 * Ask the VM to offline this memory range. 308 * Ask the VM to offline this memory range.
256 * Note: Assume that this function returns zero on success 309 * Note: Assume that this function returns zero on success
257 */ 310 */
258 result = remove_memory(start, len); 311 list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
259 if (result) { 312 if (info->enabled) {
260 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Hot-Remove failed.\n")); 313 result = remove_memory(info->start_addr, info->length);
261 return_VALUE(result); 314 if (result)
315 return result;
316 }
317 kfree(info);
262 } 318 }
263 319
264 /* Power-off and eject the device */ 320 /* Power-off and eject the device */
@@ -356,6 +412,7 @@ static int acpi_memory_device_add(struct acpi_device *device)
356 return_VALUE(-ENOMEM); 412 return_VALUE(-ENOMEM);
357 memset(mem_device, 0, sizeof(struct acpi_memory_device)); 413 memset(mem_device, 0, sizeof(struct acpi_memory_device));
358 414
415 INIT_LIST_HEAD(&mem_device->res_list);
359 mem_device->handle = device->handle; 416 mem_device->handle = device->handle;
360 sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME); 417 sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
361 sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS); 418 sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
@@ -391,6 +448,25 @@ static int acpi_memory_device_remove(struct acpi_device *device, int type)
391 return_VALUE(0); 448 return_VALUE(0);
392} 449}
393 450
451static int acpi_memory_device_start (struct acpi_device *device)
452{
453 struct acpi_memory_device *mem_device;
454 int result = 0;
455
456 ACPI_FUNCTION_TRACE("acpi_memory_device_start");
457
458 mem_device = acpi_driver_data(device);
459
460 if (!acpi_memory_check_device(mem_device)) {
461 /* call add_memory func */
462 result = acpi_memory_enable_device(mem_device);
463 if (result)
464 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
465 "Error in acpi_memory_enable_device\n"));
466 }
467 return_VALUE(result);
468}
469
394/* 470/*
395 * Helper function to check for memory device 471 * Helper function to check for memory device
396 */ 472 */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index e2c1a16078..13d6d5bdea 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -254,5 +254,18 @@ int acpi_get_pxm(acpi_handle h)
254 } while (ACPI_SUCCESS(status)); 254 } while (ACPI_SUCCESS(status));
255 return -1; 255 return -1;
256} 256}
257
258EXPORT_SYMBOL(acpi_get_pxm); 257EXPORT_SYMBOL(acpi_get_pxm);
258
259int acpi_get_node(acpi_handle *handle)
260{
261 int pxm, node = -1;
262
263 ACPI_FUNCTION_TRACE("acpi_get_node");
264
265 pxm = acpi_get_pxm(handle);
266 if (pxm >= 0)
267 node = acpi_map_pxm_to_node(pxm);
268
269 return_VALUE(node);
270}
271EXPORT_SYMBOL(acpi_get_node);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 3b97a5eae9..8a74bf3efd 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -206,11 +206,11 @@ acpi_processor_power_activate(struct acpi_processor *pr,
206 206
207static void acpi_safe_halt(void) 207static void acpi_safe_halt(void)
208{ 208{
209 clear_thread_flag(TIF_POLLING_NRFLAG); 209 current_thread_info()->status &= ~TS_POLLING;
210 smp_mb__after_clear_bit(); 210 smp_mb__after_clear_bit();
211 if (!need_resched()) 211 if (!need_resched())
212 safe_halt(); 212 safe_halt();
213 set_thread_flag(TIF_POLLING_NRFLAG); 213 current_thread_info()->status |= TS_POLLING;
214} 214}
215 215
216static atomic_t c3_cpu_count; 216static atomic_t c3_cpu_count;
@@ -330,10 +330,10 @@ static void acpi_processor_idle(void)
330 * Invoke the current Cx state to put the processor to sleep. 330 * Invoke the current Cx state to put the processor to sleep.
331 */ 331 */
332 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { 332 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
333 clear_thread_flag(TIF_POLLING_NRFLAG); 333 current_thread_info()->status &= ~TS_POLLING;
334 smp_mb__after_clear_bit(); 334 smp_mb__after_clear_bit();
335 if (need_resched()) { 335 if (need_resched()) {
336 set_thread_flag(TIF_POLLING_NRFLAG); 336 current_thread_info()->status |= TS_POLLING;
337 local_irq_enable(); 337 local_irq_enable();
338 return; 338 return;
339 } 339 }
@@ -369,9 +369,14 @@ static void acpi_processor_idle(void)
369 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 369 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
370 /* Get end time (ticks) */ 370 /* Get end time (ticks) */
371 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 371 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
372
373#ifdef CONFIG_GENERIC_TIME
374 /* TSC halts in C2, so notify users */
375 mark_tsc_unstable();
376#endif
372 /* Re-enable interrupts */ 377 /* Re-enable interrupts */
373 local_irq_enable(); 378 local_irq_enable();
374 set_thread_flag(TIF_POLLING_NRFLAG); 379 current_thread_info()->status |= TS_POLLING;
375 /* Compute time (ticks) that we were actually asleep */ 380 /* Compute time (ticks) that we were actually asleep */
376 sleep_ticks = 381 sleep_ticks =
377 ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; 382 ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
@@ -409,9 +414,13 @@ static void acpi_processor_idle(void)
409 ACPI_MTX_DO_NOT_LOCK); 414 ACPI_MTX_DO_NOT_LOCK);
410 } 415 }
411 416
417#ifdef CONFIG_GENERIC_TIME
418 /* TSC halts in C3, so notify users */
419 mark_tsc_unstable();
420#endif
412 /* Re-enable interrupts */ 421 /* Re-enable interrupts */
413 local_irq_enable(); 422 local_irq_enable();
414 set_thread_flag(TIF_POLLING_NRFLAG); 423 current_thread_info()->status |= TS_POLLING;
415 /* Compute time (ticks) that we were actually asleep */ 424 /* Compute time (ticks) that we were actually asleep */
416 sleep_ticks = 425 sleep_ticks =
417 ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; 426 ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 7f7ec28882..1bca86edf5 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -33,6 +33,7 @@
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/mm.h> 34#include <linux/mm.h>
35#include <linux/pci.h> 35#include <linux/pci.h>
36#include <linux/poison.h>
36#include <linux/errno.h> 37#include <linux/errno.h>
37#include <linux/atm.h> 38#include <linux/atm.h>
38#include <linux/atmdev.h> 39#include <linux/atmdev.h>
@@ -754,7 +755,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
754 fs_kfree_skb (skb); 755 fs_kfree_skb (skb);
755 756
756 fs_dprintk (FS_DEBUG_ALLOC, "Free trans-d: %p\n", td); 757 fs_dprintk (FS_DEBUG_ALLOC, "Free trans-d: %p\n", td);
757 memset (td, 0x12, sizeof (struct FS_BPENTRY)); 758 memset (td, ATM_POISON_FREE, sizeof(struct FS_BPENTRY));
758 kfree (td); 759 kfree (td);
759 break; 760 break;
760 default: 761 default:
@@ -951,7 +952,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
951 it most likely that the chip will notice it. It also prevents us 952 it most likely that the chip will notice it. It also prevents us
952 from having to wait for completion. On the other hand, we may 953 from having to wait for completion. On the other hand, we may
953 need to wait for completion anyway, to see if it completed 954 need to wait for completion anyway, to see if it completed
954 succesfully. */ 955 successfully. */
955 956
956 switch (atm_vcc->qos.aal) { 957 switch (atm_vcc->qos.aal) {
957 case ATM_AAL2: 958 case ATM_AAL2:
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index dd712b24ec..4bef76a2f3 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -8,6 +8,7 @@
8#include <linux/cpu.h> 8#include <linux/cpu.h>
9#include <linux/topology.h> 9#include <linux/topology.h>
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/node.h>
11 12
12#include "base.h" 13#include "base.h"
13 14
@@ -57,13 +58,12 @@ static void __devinit register_cpu_control(struct cpu *cpu)
57{ 58{
58 sysdev_create_file(&cpu->sysdev, &attr_online); 59 sysdev_create_file(&cpu->sysdev, &attr_online);
59} 60}
60void unregister_cpu(struct cpu *cpu, struct node *root) 61void unregister_cpu(struct cpu *cpu)
61{ 62{
62 int logical_cpu = cpu->sysdev.id; 63 int logical_cpu = cpu->sysdev.id;
63 64
64 if (root) 65 unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
65 sysfs_remove_link(&root->sysdev.kobj, 66
66 kobject_name(&cpu->sysdev.kobj));
67 sysdev_remove_file(&cpu->sysdev, &attr_online); 67 sysdev_remove_file(&cpu->sysdev, &attr_online);
68 68
69 sysdev_unregister(&cpu->sysdev); 69 sysdev_unregister(&cpu->sysdev);
@@ -109,23 +109,21 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
109 * 109 *
110 * Initialize and register the CPU device. 110 * Initialize and register the CPU device.
111 */ 111 */
112int __devinit register_cpu(struct cpu *cpu, int num, struct node *root) 112int __devinit register_cpu(struct cpu *cpu, int num)
113{ 113{
114 int error; 114 int error;
115
116 cpu->node_id = cpu_to_node(num); 115 cpu->node_id = cpu_to_node(num);
117 cpu->sysdev.id = num; 116 cpu->sysdev.id = num;
118 cpu->sysdev.cls = &cpu_sysdev_class; 117 cpu->sysdev.cls = &cpu_sysdev_class;
119 118
120 error = sysdev_register(&cpu->sysdev); 119 error = sysdev_register(&cpu->sysdev);
121 if (!error && root) 120
122 error = sysfs_create_link(&root->sysdev.kobj,
123 &cpu->sysdev.kobj,
124 kobject_name(&cpu->sysdev.kobj));
125 if (!error && !cpu->no_control) 121 if (!error && !cpu->no_control)
126 register_cpu_control(cpu); 122 register_cpu_control(cpu);
127 if (!error) 123 if (!error)
128 cpu_sys_devices[num] = &cpu->sysdev; 124 cpu_sys_devices[num] = &cpu->sysdev;
125 if (!error)
126 register_cpu_under_node(num, cpu_to_node(num));
129 127
130#ifdef CONFIG_KEXEC 128#ifdef CONFIG_KEXEC
131 if (!error) 129 if (!error)
@@ -145,5 +143,13 @@ EXPORT_SYMBOL_GPL(get_cpu_sysdev);
145 143
146int __init cpu_dev_init(void) 144int __init cpu_dev_init(void)
147{ 145{
148 return sysdev_class_register(&cpu_sysdev_class); 146 int err;
147
148 err = sysdev_class_register(&cpu_sysdev_class);
149#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
150 if (!err)
151 err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class);
152#endif
153
154 return err;
149} 155}
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c
index e2f64f91ed..33c5cce156 100644
--- a/drivers/base/dmapool.c
+++ b/drivers/base/dmapool.c
@@ -7,6 +7,7 @@
7#include <linux/dmapool.h> 7#include <linux/dmapool.h>
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/poison.h>
10 11
11/* 12/*
12 * Pool allocator ... wraps the dma_alloc_coherent page allocator, so 13 * Pool allocator ... wraps the dma_alloc_coherent page allocator, so
@@ -35,8 +36,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
35}; 36};
36 37
37#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) 38#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
38#define POOL_POISON_FREED 0xa7 /* !inuse */
39#define POOL_POISON_ALLOCATED 0xa9 /* !initted */
40 39
41static DECLARE_MUTEX (pools_lock); 40static DECLARE_MUTEX (pools_lock);
42 41
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index dd547af468..c6b7d9c4b6 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -306,11 +306,13 @@ static ssize_t
306memory_probe_store(struct class *class, const char *buf, size_t count) 306memory_probe_store(struct class *class, const char *buf, size_t count)
307{ 307{
308 u64 phys_addr; 308 u64 phys_addr;
309 int nid;
309 int ret; 310 int ret;
310 311
311 phys_addr = simple_strtoull(buf, NULL, 0); 312 phys_addr = simple_strtoull(buf, NULL, 0);
312 313
313 ret = add_memory(phys_addr, PAGES_PER_SECTION << PAGE_SHIFT); 314 nid = memory_add_physaddr_to_nid(phys_addr);
315 ret = add_memory(nid, phys_addr, PAGES_PER_SECTION << PAGE_SHIFT);
314 316
315 if (ret) 317 if (ret)
316 count = ret; 318 count = ret;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index c80c3aeed0..eae2bdc183 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -11,6 +11,7 @@
11#include <linux/cpumask.h> 11#include <linux/cpumask.h>
12#include <linux/topology.h> 12#include <linux/topology.h>
13#include <linux/nodemask.h> 13#include <linux/nodemask.h>
14#include <linux/cpu.h>
14 15
15static struct sysdev_class node_class = { 16static struct sysdev_class node_class = {
16 set_kset_name("node"), 17 set_kset_name("node"),
@@ -190,6 +191,66 @@ void unregister_node(struct node *node)
190 sysdev_unregister(&node->sysdev); 191 sysdev_unregister(&node->sysdev);
191} 192}
192 193
194struct node node_devices[MAX_NUMNODES];
195
196/*
197 * register cpu under node
198 */
199int register_cpu_under_node(unsigned int cpu, unsigned int nid)
200{
201 if (node_online(nid)) {
202 struct sys_device *obj = get_cpu_sysdev(cpu);
203 if (!obj)
204 return 0;
205 return sysfs_create_link(&node_devices[nid].sysdev.kobj,
206 &obj->kobj,
207 kobject_name(&obj->kobj));
208 }
209
210 return 0;
211}
212
213int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
214{
215 if (node_online(nid)) {
216 struct sys_device *obj = get_cpu_sysdev(cpu);
217 if (obj)
218 sysfs_remove_link(&node_devices[nid].sysdev.kobj,
219 kobject_name(&obj->kobj));
220 }
221 return 0;
222}
223
224int register_one_node(int nid)
225{
226 int error = 0;
227 int cpu;
228
229 if (node_online(nid)) {
230 int p_node = parent_node(nid);
231 struct node *parent = NULL;
232
233 if (p_node != nid)
234 parent = &node_devices[p_node];
235
236 error = register_node(&node_devices[nid], nid, parent);
237
238 /* link cpu under this node */
239 for_each_present_cpu(cpu) {
240 if (cpu_to_node(cpu) == nid)
241 register_cpu_under_node(cpu, nid);
242 }
243 }
244
245 return error;
246
247}
248
249void unregister_one_node(int nid)
250{
251 unregister_node(&node_devices[nid]);
252}
253
193static int __init register_node_type(void) 254static int __init register_node_type(void)
194{ 255{
195 return sysdev_class_register(&node_class); 256 return sysdev_class_register(&node_class);
diff --git a/drivers/base/power/resume.c b/drivers/base/power/resume.c
index 520679ce53..826093ef4c 100644
--- a/drivers/base/power/resume.c
+++ b/drivers/base/power/resume.c
@@ -53,8 +53,7 @@ void dpm_resume(void)
53 struct device * dev = to_device(entry); 53 struct device * dev = to_device(entry);
54 54
55 get_device(dev); 55 get_device(dev);
56 list_del_init(entry); 56 list_move_tail(entry, &dpm_active);
57 list_add_tail(entry, &dpm_active);
58 57
59 up(&dpm_list_sem); 58 up(&dpm_list_sem);
60 if (!dev->power.prev_state.event) 59 if (!dev->power.prev_state.event)
@@ -101,8 +100,7 @@ void dpm_power_up(void)
101 struct device * dev = to_device(entry); 100 struct device * dev = to_device(entry);
102 101
103 get_device(dev); 102 get_device(dev);
104 list_del_init(entry); 103 list_move_tail(entry, &dpm_active);
105 list_add_tail(entry, &dpm_active);
106 resume_device(dev); 104 resume_device(dev);
107 put_device(dev); 105 put_device(dev);
108 } 106 }
diff --git a/drivers/base/power/suspend.c b/drivers/base/power/suspend.c
index 1a1fe43a30..69509e02f7 100644
--- a/drivers/base/power/suspend.c
+++ b/drivers/base/power/suspend.c
@@ -116,12 +116,10 @@ int device_suspend(pm_message_t state)
116 /* Check if the device got removed */ 116 /* Check if the device got removed */
117 if (!list_empty(&dev->power.entry)) { 117 if (!list_empty(&dev->power.entry)) {
118 /* Move it to the dpm_off or dpm_off_irq list */ 118 /* Move it to the dpm_off or dpm_off_irq list */
119 if (!error) { 119 if (!error)
120 list_del(&dev->power.entry); 120 list_move(&dev->power.entry, &dpm_off);
121 list_add(&dev->power.entry, &dpm_off); 121 else if (error == -EAGAIN) {
122 } else if (error == -EAGAIN) { 122 list_move(&dev->power.entry, &dpm_off_irq);
123 list_del(&dev->power.entry);
124 list_add(&dev->power.entry, &dpm_off_irq);
125 error = 0; 123 error = 0;
126 } 124 }
127 } 125 }
@@ -139,8 +137,7 @@ int device_suspend(pm_message_t state)
139 */ 137 */
140 while (!list_empty(&dpm_off_irq)) { 138 while (!list_empty(&dpm_off_irq)) {
141 struct list_head * entry = dpm_off_irq.next; 139 struct list_head * entry = dpm_off_irq.next;
142 list_del(entry); 140 list_move(entry, &dpm_off);
143 list_add(entry, &dpm_off);
144 } 141 }
145 dpm_resume(); 142 dpm_resume();
146 } 143 }
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 8c52421cbc..c2d6216323 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -107,7 +107,7 @@ static int __cpuinit topology_remove_dev(struct sys_device * sys_dev)
107 return 0; 107 return 0;
108} 108}
109 109
110static int topology_cpu_callback(struct notifier_block *nfb, 110static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
111 unsigned long action, void *hcpu) 111 unsigned long action, void *hcpu)
112{ 112{
113 unsigned int cpu = (unsigned long)hcpu; 113 unsigned int cpu = (unsigned long)hcpu;
@@ -125,7 +125,7 @@ static int topology_cpu_callback(struct notifier_block *nfb,
125 return NOTIFY_OK; 125 return NOTIFY_OK;
126} 126}
127 127
128static struct notifier_block topology_cpu_notifier = 128static struct notifier_block __cpuinitdata topology_cpu_notifier =
129{ 129{
130 .notifier_call = topology_cpu_callback, 130 .notifier_call = topology_cpu_callback,
131}; 131};
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 9dc294a749..3c74ea729f 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -74,7 +74,6 @@
74#include <linux/completion.h> 74#include <linux/completion.h>
75#include <linux/highmem.h> 75#include <linux/highmem.h>
76#include <linux/gfp.h> 76#include <linux/gfp.h>
77#include <linux/kthread.h>
78 77
79#include <asm/uaccess.h> 78#include <asm/uaccess.h>
80 79
@@ -579,6 +578,8 @@ static int loop_thread(void *data)
579 struct loop_device *lo = data; 578 struct loop_device *lo = data;
580 struct bio *bio; 579 struct bio *bio;
581 580
581 daemonize("loop%d", lo->lo_number);
582
582 /* 583 /*
583 * loop can be used in an encrypted device, 584 * loop can be used in an encrypted device,
584 * hence, it mustn't be stopped at all 585 * hence, it mustn't be stopped at all
@@ -591,6 +592,11 @@ static int loop_thread(void *data)
591 lo->lo_state = Lo_bound; 592 lo->lo_state = Lo_bound;
592 lo->lo_pending = 1; 593 lo->lo_pending = 1;
593 594
595 /*
596 * complete it, we are running
597 */
598 complete(&lo->lo_done);
599
594 for (;;) { 600 for (;;) {
595 int pending; 601 int pending;
596 602
@@ -623,6 +629,7 @@ static int loop_thread(void *data)
623 break; 629 break;
624 } 630 }
625 631
632 complete(&lo->lo_done);
626 return 0; 633 return 0;
627} 634}
628 635
@@ -739,7 +746,6 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
739 unsigned lo_blocksize; 746 unsigned lo_blocksize;
740 int lo_flags = 0; 747 int lo_flags = 0;
741 int error; 748 int error;
742 struct task_struct *tsk;
743 loff_t size; 749 loff_t size;
744 750
745 /* This is safe, since we have a reference from open(). */ 751 /* This is safe, since we have a reference from open(). */
@@ -833,11 +839,10 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
833 839
834 set_blocksize(bdev, lo_blocksize); 840 set_blocksize(bdev, lo_blocksize);
835 841
836 tsk = kthread_run(loop_thread, lo, "loop%d", lo->lo_number); 842 error = kernel_thread(loop_thread, lo, CLONE_KERNEL);
837 if (IS_ERR(tsk)) { 843 if (error < 0)
838 error = PTR_ERR(tsk);
839 goto out_putf; 844 goto out_putf;
840 } 845 wait_for_completion(&lo->lo_done);
841 return 0; 846 return 0;
842 847
843 out_putf: 848 out_putf:
@@ -893,9 +898,6 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
893 if (lo->lo_state != Lo_bound) 898 if (lo->lo_state != Lo_bound)
894 return -ENXIO; 899 return -ENXIO;
895 900
896 if (!lo->lo_thread)
897 return -EINVAL;
898
899 if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */ 901 if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */
900 return -EBUSY; 902 return -EBUSY;
901 903
@@ -909,7 +911,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
909 complete(&lo->lo_bh_done); 911 complete(&lo->lo_bh_done);
910 spin_unlock_irq(&lo->lo_lock); 912 spin_unlock_irq(&lo->lo_lock);
911 913
912 kthread_stop(lo->lo_thread); 914 wait_for_completion(&lo->lo_done);
913 915
914 lo->lo_backing_file = NULL; 916 lo->lo_backing_file = NULL;
915 917
@@ -922,7 +924,6 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
922 lo->lo_sizelimit = 0; 924 lo->lo_sizelimit = 0;
923 lo->lo_encrypt_key_size = 0; 925 lo->lo_encrypt_key_size = 0;
924 lo->lo_flags = 0; 926 lo->lo_flags = 0;
925 lo->lo_thread = NULL;
926 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); 927 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
927 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); 928 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
928 memset(lo->lo_file_name, 0, LO_NAME_SIZE); 929 memset(lo->lo_file_name, 0, LO_NAME_SIZE);
@@ -1287,6 +1288,7 @@ static int __init loop_init(void)
1287 if (!lo->lo_queue) 1288 if (!lo->lo_queue)
1288 goto out_mem4; 1289 goto out_mem4;
1289 mutex_init(&lo->lo_ctl_mutex); 1290 mutex_init(&lo->lo_ctl_mutex);
1291 init_completion(&lo->lo_done);
1290 init_completion(&lo->lo_bh_done); 1292 init_completion(&lo->lo_bh_done);
1291 lo->lo_number = i; 1293 lo->lo_number = i;
1292 spin_lock_init(&lo->lo_lock); 1294 spin_lock_init(&lo->lo_lock);
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index a71a240611..ed8dca84ff 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -423,6 +423,9 @@ static int dtl1_hci_send_frame(struct sk_buff *skb)
423 nsh.len = skb->len; 423 nsh.len = skb->len;
424 424
425 s = bt_skb_alloc(NSHL + skb->len + 1, GFP_ATOMIC); 425 s = bt_skb_alloc(NSHL + skb->len + 1, GFP_ATOMIC);
426 if (!s)
427 return -ENOMEM;
428
426 skb_reserve(s, NSHL); 429 skb_reserve(s, NSHL);
427 memcpy(skb_put(s, skb->len), skb->data, skb->len); 430 memcpy(skb_put(s, skb->len), skb->data, skb->len);
428 if (skb->len & 0x0001) 431 if (skb->len & 0x0001)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 63f28d169b..410d70cb76 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -62,6 +62,23 @@ config HW_CONSOLE
62 depends on VT && !S390 && !UML 62 depends on VT && !S390 && !UML
63 default y 63 default y
64 64
65config VT_HW_CONSOLE_BINDING
66 bool "Support for binding and unbinding console drivers"
67 depends on HW_CONSOLE
68 default n
69 ---help---
70 The virtual terminal is the device that interacts with the physical
71 terminal through console drivers. On these systems, at least one
72 console driver is loaded. In other configurations, additional console
73 drivers may be enabled, such as the framebuffer console. If more than
74 1 console driver is enabled, setting this to 'y' will allow you to
75 select the console driver that will serve as the backend for the
76 virtual terminals.
77
78 See <file:Documentation/console/console.txt> for more
79 information. For framebuffer console users, please refer to
80 <file:Documentation/fb/fbcon.txt>.
81
65config SERIAL_NONSTANDARD 82config SERIAL_NONSTANDARD
66 bool "Non-standard serial port support" 83 bool "Non-standard serial port support"
67 ---help--- 84 ---help---
@@ -670,20 +687,7 @@ config NWFLASH
670 687
671 If you're not sure, say N. 688 If you're not sure, say N.
672 689
673config HW_RANDOM 690source "drivers/char/hw_random/Kconfig"
674 tristate "Intel/AMD/VIA HW Random Number Generator support"
675 depends on (X86 || IA64) && PCI
676 ---help---
677 This driver provides kernel-side support for the Random Number
678 Generator hardware found on Intel i8xx-based motherboards,
679 AMD 76x-based motherboards, and Via Nehemiah CPUs.
680
681 Provides a character driver, used to read() entropy data.
682
683 To compile this driver as a module, choose M here: the
684 module will be called hw_random.
685
686 If unsure, say N.
687 691
688config NVRAM 692config NVRAM
689 tristate "/dev/nvram support" 693 tristate "/dev/nvram support"
@@ -935,12 +939,35 @@ config MWAVE
935config SCx200_GPIO 939config SCx200_GPIO
936 tristate "NatSemi SCx200 GPIO Support" 940 tristate "NatSemi SCx200 GPIO Support"
937 depends on SCx200 941 depends on SCx200
942 select NSC_GPIO
938 help 943 help
939 Give userspace access to the GPIO pins on the National 944 Give userspace access to the GPIO pins on the National
940 Semiconductor SCx200 processors. 945 Semiconductor SCx200 processors.
941 946
942 If compiled as a module, it will be called scx200_gpio. 947 If compiled as a module, it will be called scx200_gpio.
943 948
949config PC8736x_GPIO
950 tristate "NatSemi PC8736x GPIO Support"
951 depends on X86
952 default SCx200_GPIO # mostly N
953 select NSC_GPIO # needed for support routines
954 help
955 Give userspace access to the GPIO pins on the National
956 Semiconductor PC-8736x (x=[03456]) SuperIO chip. The chip
957 has multiple functional units, inc several managed by
958 hwmon/pc87360 driver. Tested with PC-87366
959
960 If compiled as a module, it will be called pc8736x_gpio.
961
962config NSC_GPIO
963 tristate "NatSemi Base GPIO Support"
964 # selected by SCx200_GPIO and PC8736x_GPIO
965 # what about 2 selectors differing: m != y
966 help
967 Common support used (and needed) by scx200_gpio and
968 pc8736x_gpio drivers. If those drivers are built as
969 modules, this one will be too, named nsc_gpio
970
944config CS5535_GPIO 971config CS5535_GPIO
945 tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)" 972 tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)"
946 depends on X86_32 973 depends on X86_32
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index fb919bfb28..6e0f4469d8 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -75,13 +75,15 @@ endif
75obj-$(CONFIG_TOSHIBA) += toshiba.o 75obj-$(CONFIG_TOSHIBA) += toshiba.o
76obj-$(CONFIG_I8K) += i8k.o 76obj-$(CONFIG_I8K) += i8k.o
77obj-$(CONFIG_DS1620) += ds1620.o 77obj-$(CONFIG_DS1620) += ds1620.o
78obj-$(CONFIG_HW_RANDOM) += hw_random.o 78obj-$(CONFIG_HW_RANDOM) += hw_random/
79obj-$(CONFIG_FTAPE) += ftape/ 79obj-$(CONFIG_FTAPE) += ftape/
80obj-$(CONFIG_COBALT_LCD) += lcd.o 80obj-$(CONFIG_COBALT_LCD) += lcd.o
81obj-$(CONFIG_PPDEV) += ppdev.o 81obj-$(CONFIG_PPDEV) += ppdev.o
82obj-$(CONFIG_NWBUTTON) += nwbutton.o 82obj-$(CONFIG_NWBUTTON) += nwbutton.o
83obj-$(CONFIG_NWFLASH) += nwflash.o 83obj-$(CONFIG_NWFLASH) += nwflash.o
84obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o 84obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
85obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
86obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
85obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o 87obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o
86obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o 88obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
87obj-$(CONFIG_TANBAC_TB0219) += tb0219.o 89obj-$(CONFIG_TANBAC_TB0219) += tb0219.o
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 46685a5407..9826a399fa 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -55,9 +55,9 @@ config AGP_AMD
55 X on AMD Irongate, 761, and 762 chipsets. 55 X on AMD Irongate, 761, and 762 chipsets.
56 56
57config AGP_AMD64 57config AGP_AMD64
58 tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU 58 tristate "AMD Opteron/Athlon64 on-CPU GART support" if !IOMMU
59 depends on AGP && X86 59 depends on AGP && X86
60 default y if GART_IOMMU 60 default y if IOMMU
61 help 61 help
62 This option gives you AGP support for the GLX component of 62 This option gives you AGP support for the GLX component of
63 X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. 63 X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 1f776651ac..51d0d562d0 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -118,7 +118,7 @@ static int amd_create_gatt_pages(int nr_tables)
118 return retval; 118 return retval;
119} 119}
120 120
121/* Since we don't need contigious memory we just try 121/* Since we don't need contiguous memory we just try
122 * to get the gatt table once 122 * to get the gatt table once
123 */ 123 */
124 124
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index ac3c33a2e3..f690ee8cb7 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -15,11 +15,9 @@
15#include <linux/agp_backend.h> 15#include <linux/agp_backend.h>
16#include <linux/mmzone.h> 16#include <linux/mmzone.h>
17#include <asm/page.h> /* PAGE_SIZE */ 17#include <asm/page.h> /* PAGE_SIZE */
18#include <asm/k8.h>
18#include "agp.h" 19#include "agp.h"
19 20
20/* Will need to be increased if AMD64 ever goes >8-way. */
21#define MAX_HAMMER_GARTS 8
22
23/* PTE bits. */ 21/* PTE bits. */
24#define GPTE_VALID 1 22#define GPTE_VALID 1
25#define GPTE_COHERENT 2 23#define GPTE_COHERENT 2
@@ -53,28 +51,12 @@
53#define ULI_X86_64_HTT_FEA_REG 0x50 51#define ULI_X86_64_HTT_FEA_REG 0x50
54#define ULI_X86_64_ENU_SCR_REG 0x54 52#define ULI_X86_64_ENU_SCR_REG 0x54
55 53
56static int nr_garts;
57static struct pci_dev * hammers[MAX_HAMMER_GARTS];
58
59static struct resource *aperture_resource; 54static struct resource *aperture_resource;
60static int __initdata agp_try_unsupported = 1; 55static int __initdata agp_try_unsupported = 1;
61 56
62#define for_each_nb() for(gart_iterator=0;gart_iterator<nr_garts;gart_iterator++)
63
64static void flush_amd64_tlb(struct pci_dev *dev)
65{
66 u32 tmp;
67
68 pci_read_config_dword (dev, AMD64_GARTCACHECTL, &tmp);
69 tmp |= INVGART;
70 pci_write_config_dword (dev, AMD64_GARTCACHECTL, tmp);
71}
72
73static void amd64_tlbflush(struct agp_memory *temp) 57static void amd64_tlbflush(struct agp_memory *temp)
74{ 58{
75 int gart_iterator; 59 k8_flush_garts();
76 for_each_nb()
77 flush_amd64_tlb(hammers[gart_iterator]);
78} 60}
79 61
80static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) 62static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
@@ -153,7 +135,7 @@ static int amd64_fetch_size(void)
153 u32 temp; 135 u32 temp;
154 struct aper_size_info_32 *values; 136 struct aper_size_info_32 *values;
155 137
156 dev = hammers[0]; 138 dev = k8_northbridges[0];
157 if (dev==NULL) 139 if (dev==NULL)
158 return 0; 140 return 0;
159 141
@@ -201,9 +183,6 @@ static u64 amd64_configure (struct pci_dev *hammer, u64 gatt_table)
201 tmp &= ~(DISGARTCPU | DISGARTIO); 183 tmp &= ~(DISGARTCPU | DISGARTIO);
202 pci_write_config_dword(hammer, AMD64_GARTAPERTURECTL, tmp); 184 pci_write_config_dword(hammer, AMD64_GARTAPERTURECTL, tmp);
203 185
204 /* keep CPU's coherent. */
205 flush_amd64_tlb (hammer);
206
207 return aper_base; 186 return aper_base;
208} 187}
209 188
@@ -222,13 +201,14 @@ static struct aper_size_info_32 amd_8151_sizes[7] =
222static int amd_8151_configure(void) 201static int amd_8151_configure(void)
223{ 202{
224 unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real); 203 unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real);
225 int gart_iterator; 204 int i;
226 205
227 /* Configure AGP regs in each x86-64 host bridge. */ 206 /* Configure AGP regs in each x86-64 host bridge. */
228 for_each_nb() { 207 for (i = 0; i < num_k8_northbridges; i++) {
229 agp_bridge->gart_bus_addr = 208 agp_bridge->gart_bus_addr =
230 amd64_configure(hammers[gart_iterator],gatt_bus); 209 amd64_configure(k8_northbridges[i], gatt_bus);
231 } 210 }
211 k8_flush_garts();
232 return 0; 212 return 0;
233} 213}
234 214
@@ -236,12 +216,13 @@ static int amd_8151_configure(void)
236static void amd64_cleanup(void) 216static void amd64_cleanup(void)
237{ 217{
238 u32 tmp; 218 u32 tmp;
239 int gart_iterator; 219 int i;
240 for_each_nb() { 220 for (i = 0; i < num_k8_northbridges; i++) {
221 struct pci_dev *dev = k8_northbridges[i];
241 /* disable gart translation */ 222 /* disable gart translation */
242 pci_read_config_dword (hammers[gart_iterator], AMD64_GARTAPERTURECTL, &tmp); 223 pci_read_config_dword (dev, AMD64_GARTAPERTURECTL, &tmp);
243 tmp &= ~AMD64_GARTEN; 224 tmp &= ~AMD64_GARTEN;
244 pci_write_config_dword (hammers[gart_iterator], AMD64_GARTAPERTURECTL, tmp); 225 pci_write_config_dword (dev, AMD64_GARTAPERTURECTL, tmp);
245 } 226 }
246} 227}
247 228
@@ -311,7 +292,7 @@ static int __devinit aperture_valid(u64 aper, u32 size)
311/* 292/*
312 * W*s centric BIOS sometimes only set up the aperture in the AGP 293 * W*s centric BIOS sometimes only set up the aperture in the AGP
313 * bridge, not the northbridge. On AMD64 this is handled early 294 * bridge, not the northbridge. On AMD64 this is handled early
314 * in aperture.c, but when GART_IOMMU is not enabled or we run 295 * in aperture.c, but when IOMMU is not enabled or we run
315 * on a 32bit kernel this needs to be redone. 296 * on a 32bit kernel this needs to be redone.
316 * Unfortunately it is impossible to fix the aperture here because it's too late 297 * Unfortunately it is impossible to fix the aperture here because it's too late
317 * to allocate that much memory. But at least error out cleanly instead of 298 * to allocate that much memory. But at least error out cleanly instead of
@@ -361,17 +342,15 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
361 342
362static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr) 343static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
363{ 344{
364 struct pci_dev *loop_dev = NULL; 345 int i;
365 int i = 0; 346
366 347 if (cache_k8_northbridges() < 0)
367 /* cache pci_devs of northbridges. */ 348 return -ENODEV;
368 while ((loop_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, loop_dev)) 349
369 != NULL) { 350 i = 0;
370 if (i == MAX_HAMMER_GARTS) { 351 for (i = 0; i < num_k8_northbridges; i++) {
371 printk(KERN_ERR PFX "Too many northbridges for AGP\n"); 352 struct pci_dev *dev = k8_northbridges[i];
372 return -1; 353 if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
373 }
374 if (fix_northbridge(loop_dev, pdev, cap_ptr) < 0) {
375 printk(KERN_ERR PFX "No usable aperture found.\n"); 354 printk(KERN_ERR PFX "No usable aperture found.\n");
376#ifdef __x86_64__ 355#ifdef __x86_64__
377 /* should port this to i386 */ 356 /* should port this to i386 */
@@ -379,10 +358,8 @@ static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
379#endif 358#endif
380 return -1; 359 return -1;
381 } 360 }
382 hammers[i++] = loop_dev;
383 } 361 }
384 nr_garts = i; 362 return 0;
385 return i == 0 ? -1 : 0;
386} 363}
387 364
388/* Handle AMD 8151 quirks */ 365/* Handle AMD 8151 quirks */
@@ -450,7 +427,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
450 } 427 }
451 428
452 /* shadow x86-64 registers into ULi registers */ 429 /* shadow x86-64 registers into ULi registers */
453 pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &httfea); 430 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea);
454 431
455 /* if x86-64 aperture base is beyond 4G, exit here */ 432 /* if x86-64 aperture base is beyond 4G, exit here */
456 if ((httfea & 0x7fff) >> (32 - 25)) 433 if ((httfea & 0x7fff) >> (32 - 25))
@@ -513,7 +490,7 @@ static int __devinit nforce3_agp_init(struct pci_dev *pdev)
513 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); 490 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
514 491
515 /* shadow x86-64 registers into NVIDIA registers */ 492 /* shadow x86-64 registers into NVIDIA registers */
516 pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &apbase); 493 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &apbase);
517 494
518 /* if x86-64 aperture base is beyond 4G, exit here */ 495 /* if x86-64 aperture base is beyond 4G, exit here */
519 if ( (apbase & 0x7fff) >> (32 - 25) ) { 496 if ( (apbase & 0x7fff) >> (32 - 25) ) {
@@ -754,10 +731,6 @@ static struct pci_driver agp_amd64_pci_driver = {
754int __init agp_amd64_init(void) 731int __init agp_amd64_init(void)
755{ 732{
756 int err = 0; 733 int err = 0;
757 static struct pci_device_id amd64nb[] = {
758 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
759 { },
760 };
761 734
762 if (agp_off) 735 if (agp_off)
763 return -EINVAL; 736 return -EINVAL;
@@ -774,7 +747,7 @@ int __init agp_amd64_init(void)
774 } 747 }
775 748
776 /* First check that we have at least one AMD64 NB */ 749 /* First check that we have at least one AMD64 NB */
777 if (!pci_dev_present(amd64nb)) 750 if (!pci_dev_present(k8_nb_ids))
778 return -ENODEV; 751 return -ENODEV;
779 752
780 /* Look for any AGP bridge */ 753 /* Look for any AGP bridge */
@@ -802,7 +775,7 @@ static void __exit agp_amd64_cleanup(void)
802 775
803/* On AMD64 the PCI driver needs to initialize this driver early 776/* On AMD64 the PCI driver needs to initialize this driver early
804 for the IOMMU, so it has to be called via a backdoor. */ 777 for the IOMMU, so it has to be called via a backdoor. */
805#ifndef CONFIG_GART_IOMMU 778#ifndef CONFIG_IOMMU
806module_init(agp_amd64_init); 779module_init(agp_amd64_init);
807module_exit(agp_amd64_cleanup); 780module_exit(agp_amd64_cleanup);
808#endif 781#endif
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 06fd10ba0c..1605643459 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -261,7 +261,7 @@ static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state)
261#endif 261#endif
262 262
263/* 263/*
264 *Since we don't need contigious memory we just try 264 *Since we don't need contiguous memory we just try
265 * to get the gatt table once 265 * to get the gatt table once
266 */ 266 */
267 267
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 86a966b652..b788b0a3bb 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -177,7 +177,7 @@ static int efficeon_free_gatt_table(struct agp_bridge_data *bridge)
177 177
178 178
179/* 179/*
180 * Since we don't need contigious memory we just try 180 * Since we don't need contiguous memory we just try
181 * to get the gatt table once 181 * to get the gatt table once
182 */ 182 */
183 183
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index cfa7922cb4..d73be4c2db 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -329,9 +329,8 @@ static int __devinit agp_sgi_init(void)
329 329
330static void __devexit agp_sgi_cleanup(void) 330static void __devexit agp_sgi_cleanup(void)
331{ 331{
332 if (sgi_tioca_agp_bridges) 332 kfree(sgi_tioca_agp_bridges);
333 kfree(sgi_tioca_agp_bridges); 333 sgi_tioca_agp_bridges = NULL;
334 sgi_tioca_agp_bridges=NULL;
335} 334}
336 335
337module_init(agp_sgi_init); 336module_init(agp_sgi_init);
diff --git a/drivers/char/drm/drm_memory_debug.h b/drivers/char/drm/drm_memory_debug.h
index 6543b9a14c..d117cc9971 100644
--- a/drivers/char/drm/drm_memory_debug.h
+++ b/drivers/char/drm/drm_memory_debug.h
@@ -43,7 +43,7 @@ typedef struct drm_mem_stats {
43 unsigned long bytes_freed; 43 unsigned long bytes_freed;
44} drm_mem_stats_t; 44} drm_mem_stats_t;
45 45
46static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED; 46static DEFINE_SPINLOCK(drm_mem_lock);
47static unsigned long drm_ram_available = 0; /* In pages */ 47static unsigned long drm_ram_available = 0; /* In pages */
48static unsigned long drm_ram_used = 0; 48static unsigned long drm_ram_used = 0;
49static drm_mem_stats_t drm_mem_stats[] = 49static drm_mem_stats_t drm_mem_stats[] =
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index b7f17457b4..78a81a4a99 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -557,7 +557,7 @@ via_init_dmablit(drm_device_t *dev)
557 blitq->num_outstanding = 0; 557 blitq->num_outstanding = 0;
558 blitq->is_active = 0; 558 blitq->is_active = 0;
559 blitq->aborting = 0; 559 blitq->aborting = 0;
560 blitq->blit_lock = SPIN_LOCK_UNLOCKED; 560 spin_lock_init(&blitq->blit_lock);
561 for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) { 561 for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
562 DRM_INIT_WAITQUEUE(blitq->blit_queue + j); 562 DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
563 } 563 }
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 9cad8501d6..dc0602ae85 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -80,7 +80,7 @@ static int invalid_lilo_config;
80/* The ISA boards do window flipping into the same spaces so its only sane 80/* The ISA boards do window flipping into the same spaces so its only sane
81 with a single lock. It's still pretty efficient */ 81 with a single lock. It's still pretty efficient */
82 82
83static spinlock_t epca_lock = SPIN_LOCK_UNLOCKED; 83static DEFINE_SPINLOCK(epca_lock);
84 84
85/* ----------------------------------------------------------------------- 85/* -----------------------------------------------------------------------
86 MAXBOARDS is typically 12, but ISA and EISA cards are restricted to 86 MAXBOARDS is typically 12, but ISA and EISA cards are restricted to
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index ac626418b3..d69f2ad9a6 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -117,12 +117,12 @@ __setup("hcheck_reboot", hangcheck_parse_reboot);
117__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks); 117__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
118#endif /* not MODULE */ 118#endif /* not MODULE */
119 119
120#if defined(CONFIG_X86) || defined(CONFIG_S390) 120#if defined(CONFIG_X86_64) || defined(CONFIG_S390)
121# define HAVE_MONOTONIC 121# define HAVE_MONOTONIC
122# define TIMER_FREQ 1000000000ULL 122# define TIMER_FREQ 1000000000ULL
123#elif defined(CONFIG_IA64) 123#elif defined(CONFIG_IA64)
124# define TIMER_FREQ ((unsigned long long)local_cpu_data->itc_freq) 124# define TIMER_FREQ ((unsigned long long)local_cpu_data->itc_freq)
125#elif defined(CONFIG_PPC64) 125#else
126# define TIMER_FREQ (HZ*loops_per_jiffy) 126# define TIMER_FREQ (HZ*loops_per_jiffy)
127#endif 127#endif
128 128
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 8d97b39112..afa26b65da 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -1320,11 +1320,12 @@ static struct tty_operations hvcs_ops = {
1320static int hvcs_alloc_index_list(int n) 1320static int hvcs_alloc_index_list(int n)
1321{ 1321{
1322 int i; 1322 int i;
1323
1323 hvcs_index_list = kmalloc(n * sizeof(hvcs_index_count),GFP_KERNEL); 1324 hvcs_index_list = kmalloc(n * sizeof(hvcs_index_count),GFP_KERNEL);
1324 if (!hvcs_index_list) 1325 if (!hvcs_index_list)
1325 return -ENOMEM; 1326 return -ENOMEM;
1326 hvcs_index_count = n; 1327 hvcs_index_count = n;
1327 for(i = 0; i < hvcs_index_count; i++) 1328 for (i = 0; i < hvcs_index_count; i++)
1328 hvcs_index_list[i] = -1; 1329 hvcs_index_list[i] = -1;
1329 return 0; 1330 return 0;
1330} 1331}
@@ -1332,11 +1333,9 @@ static int hvcs_alloc_index_list(int n)
1332static void hvcs_free_index_list(void) 1333static void hvcs_free_index_list(void)
1333{ 1334{
1334 /* Paranoia check to be thorough. */ 1335 /* Paranoia check to be thorough. */
1335 if (hvcs_index_list) { 1336 kfree(hvcs_index_list);
1336 kfree(hvcs_index_list); 1337 hvcs_index_list = NULL;
1337 hvcs_index_list = NULL; 1338 hvcs_index_count = 0;
1338 hvcs_index_count = 0;
1339 }
1340} 1339}
1341 1340
1342static int __init hvcs_module_init(void) 1341static int __init hvcs_module_init(void)
diff --git a/drivers/char/hw_random.c b/drivers/char/hw_random.c
deleted file mode 100644
index 29dc87e590..0000000000
--- a/drivers/char/hw_random.c
+++ /dev/null
@@ -1,698 +0,0 @@
1/*
2 Added support for the AMD Geode LX RNG
3 (c) Copyright 2004-2005 Advanced Micro Devices, Inc.
4
5 derived from
6
7 Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
8 (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
9
10 derived from
11
12 Hardware driver for the AMD 768 Random Number Generator (RNG)
13 (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
14
15 derived from
16
17 Hardware driver for Intel i810 Random Number Generator (RNG)
18 Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
19 Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
20
21 Please read Documentation/hw_random.txt for details on use.
22
23 ----------------------------------------------------------
24 This software may be used and distributed according to the terms
25 of the GNU General Public License, incorporated herein by reference.
26
27 */
28
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/fs.h>
33#include <linux/init.h>
34#include <linux/pci.h>
35#include <linux/interrupt.h>
36#include <linux/spinlock.h>
37#include <linux/random.h>
38#include <linux/miscdevice.h>
39#include <linux/smp_lock.h>
40#include <linux/mm.h>
41#include <linux/delay.h>
42
43#ifdef __i386__
44#include <asm/msr.h>
45#include <asm/cpufeature.h>
46#endif
47
48#include <asm/io.h>
49#include <asm/uaccess.h>
50
51
52/*
53 * core module and version information
54 */
55#define RNG_VERSION "1.0.0"
56#define RNG_MODULE_NAME "hw_random"
57#define RNG_DRIVER_NAME RNG_MODULE_NAME " hardware driver " RNG_VERSION
58#define PFX RNG_MODULE_NAME ": "
59
60
61/*
62 * debugging macros
63 */
64
65/* pr_debug() collapses to a no-op if DEBUG is not defined */
66#define DPRINTK(fmt, args...) pr_debug(PFX "%s: " fmt, __FUNCTION__ , ## args)
67
68
69#undef RNG_NDEBUG /* define to enable lightweight runtime checks */
70#ifdef RNG_NDEBUG
71#define assert(expr) \
72 if(!(expr)) { \
73 printk(KERN_DEBUG PFX "Assertion failed! %s,%s,%s," \
74 "line=%d\n", #expr, __FILE__, __FUNCTION__, __LINE__); \
75 }
76#else
77#define assert(expr)
78#endif
79
80#define RNG_MISCDEV_MINOR 183 /* official */
81
82static int rng_dev_open (struct inode *inode, struct file *filp);
83static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
84 loff_t * offp);
85
86static int __init intel_init (struct pci_dev *dev);
87static void intel_cleanup(void);
88static unsigned int intel_data_present (void);
89static u32 intel_data_read (void);
90
91static int __init amd_init (struct pci_dev *dev);
92static void amd_cleanup(void);
93static unsigned int amd_data_present (void);
94static u32 amd_data_read (void);
95
96#ifdef __i386__
97static int __init via_init(struct pci_dev *dev);
98static void via_cleanup(void);
99static unsigned int via_data_present (void);
100static u32 via_data_read (void);
101#endif
102
103static int __init geode_init(struct pci_dev *dev);
104static void geode_cleanup(void);
105static unsigned int geode_data_present (void);
106static u32 geode_data_read (void);
107
108struct rng_operations {
109 int (*init) (struct pci_dev *dev);
110 void (*cleanup) (void);
111 unsigned int (*data_present) (void);
112 u32 (*data_read) (void);
113 unsigned int n_bytes; /* number of bytes per ->data_read */
114};
115static struct rng_operations *rng_ops;
116
117static struct file_operations rng_chrdev_ops = {
118 .owner = THIS_MODULE,
119 .open = rng_dev_open,
120 .read = rng_dev_read,
121};
122
123
124static struct miscdevice rng_miscdev = {
125 RNG_MISCDEV_MINOR,
126 RNG_MODULE_NAME,
127 &rng_chrdev_ops,
128};
129
130enum {
131 rng_hw_none,
132 rng_hw_intel,
133 rng_hw_amd,
134#ifdef __i386__
135 rng_hw_via,
136#endif
137 rng_hw_geode,
138};
139
140static struct rng_operations rng_vendor_ops[] = {
141 /* rng_hw_none */
142 { },
143
144 /* rng_hw_intel */
145 { intel_init, intel_cleanup, intel_data_present,
146 intel_data_read, 1 },
147
148 /* rng_hw_amd */
149 { amd_init, amd_cleanup, amd_data_present, amd_data_read, 4 },
150
151#ifdef __i386__
152 /* rng_hw_via */
153 { via_init, via_cleanup, via_data_present, via_data_read, 1 },
154#endif
155
156 /* rng_hw_geode */
157 { geode_init, geode_cleanup, geode_data_present, geode_data_read, 4 }
158};
159
160/*
161 * Data for PCI driver interface
162 *
163 * This data only exists for exporting the supported
164 * PCI ids via MODULE_DEVICE_TABLE. We do not actually
165 * register a pci_driver, because someone else might one day
166 * want to register another driver on the same PCI id.
167 */
168static struct pci_device_id rng_pci_tbl[] = {
169 { 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_amd },
170 { 0x1022, 0x746b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_amd },
171
172 { 0x8086, 0x2418, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
173 { 0x8086, 0x2428, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
174 { 0x8086, 0x2430, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
175 { 0x8086, 0x2448, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
176 { 0x8086, 0x244e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
177 { 0x8086, 0x245e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
178
179 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES,
180 PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_geode },
181
182 { 0, }, /* terminate list */
183};
184MODULE_DEVICE_TABLE (pci, rng_pci_tbl);
185
186
187/***********************************************************************
188 *
189 * Intel RNG operations
190 *
191 */
192
193/*
194 * RNG registers (offsets from rng_mem)
195 */
196#define INTEL_RNG_HW_STATUS 0
197#define INTEL_RNG_PRESENT 0x40
198#define INTEL_RNG_ENABLED 0x01
199#define INTEL_RNG_STATUS 1
200#define INTEL_RNG_DATA_PRESENT 0x01
201#define INTEL_RNG_DATA 2
202
203/*
204 * Magic address at which Intel PCI bridges locate the RNG
205 */
206#define INTEL_RNG_ADDR 0xFFBC015F
207#define INTEL_RNG_ADDR_LEN 3
208
209/* token to our ioremap'd RNG register area */
210static void __iomem *rng_mem;
211
212static inline u8 intel_hwstatus (void)
213{
214 assert (rng_mem != NULL);
215 return readb (rng_mem + INTEL_RNG_HW_STATUS);
216}
217
218static inline u8 intel_hwstatus_set (u8 hw_status)
219{
220 assert (rng_mem != NULL);
221 writeb (hw_status, rng_mem + INTEL_RNG_HW_STATUS);
222 return intel_hwstatus ();
223}
224
225static unsigned int intel_data_present(void)
226{
227 assert (rng_mem != NULL);
228
229 return (readb (rng_mem + INTEL_RNG_STATUS) & INTEL_RNG_DATA_PRESENT) ?
230 1 : 0;
231}
232
233static u32 intel_data_read(void)
234{
235 assert (rng_mem != NULL);
236
237 return readb (rng_mem + INTEL_RNG_DATA);
238}
239
240static int __init intel_init (struct pci_dev *dev)
241{
242 int rc;
243 u8 hw_status;
244
245 DPRINTK ("ENTER\n");
246
247 rng_mem = ioremap (INTEL_RNG_ADDR, INTEL_RNG_ADDR_LEN);
248 if (rng_mem == NULL) {
249 printk (KERN_ERR PFX "cannot ioremap RNG Memory\n");
250 rc = -EBUSY;
251 goto err_out;
252 }
253
254 /* Check for Intel 82802 */
255 hw_status = intel_hwstatus ();
256 if ((hw_status & INTEL_RNG_PRESENT) == 0) {
257 printk (KERN_ERR PFX "RNG not detected\n");
258 rc = -ENODEV;
259 goto err_out_free_map;
260 }
261
262 /* turn RNG h/w on, if it's off */
263 if ((hw_status & INTEL_RNG_ENABLED) == 0)
264 hw_status = intel_hwstatus_set (hw_status | INTEL_RNG_ENABLED);
265 if ((hw_status & INTEL_RNG_ENABLED) == 0) {
266 printk (KERN_ERR PFX "cannot enable RNG, aborting\n");
267 rc = -EIO;
268 goto err_out_free_map;
269 }
270
271 DPRINTK ("EXIT, returning 0\n");
272 return 0;
273
274err_out_free_map:
275 iounmap (rng_mem);
276 rng_mem = NULL;
277err_out:
278 DPRINTK ("EXIT, returning %d\n", rc);
279 return rc;
280}
281
282static void intel_cleanup(void)
283{
284 u8 hw_status;
285
286 hw_status = intel_hwstatus ();
287 if (hw_status & INTEL_RNG_ENABLED)
288 intel_hwstatus_set (hw_status & ~INTEL_RNG_ENABLED);
289 else
290 printk(KERN_WARNING PFX "unusual: RNG already disabled\n");
291 iounmap(rng_mem);
292 rng_mem = NULL;
293}
294
295/***********************************************************************
296 *
297 * AMD RNG operations
298 *
299 */
300
301static u32 pmbase; /* PMxx I/O base */
302static struct pci_dev *amd_dev;
303
304static unsigned int amd_data_present (void)
305{
306 return inl(pmbase + 0xF4) & 1;
307}
308
309
310static u32 amd_data_read (void)
311{
312 return inl(pmbase + 0xF0);
313}
314
315static int __init amd_init (struct pci_dev *dev)
316{
317 int rc;
318 u8 rnen;
319
320 DPRINTK ("ENTER\n");
321
322 pci_read_config_dword(dev, 0x58, &pmbase);
323
324 pmbase &= 0x0000FF00;
325
326 if (pmbase == 0)
327 {
328 printk (KERN_ERR PFX "power management base not set\n");
329 rc = -EIO;
330 goto err_out;
331 }
332
333 pci_read_config_byte(dev, 0x40, &rnen);
334 rnen |= (1 << 7); /* RNG on */
335 pci_write_config_byte(dev, 0x40, rnen);
336
337 pci_read_config_byte(dev, 0x41, &rnen);
338 rnen |= (1 << 7); /* PMIO enable */
339 pci_write_config_byte(dev, 0x41, rnen);
340
341 pr_info( PFX "AMD768 system management I/O registers at 0x%X.\n",
342 pmbase);
343
344 amd_dev = dev;
345
346 DPRINTK ("EXIT, returning 0\n");
347 return 0;
348
349err_out:
350 DPRINTK ("EXIT, returning %d\n", rc);
351 return rc;
352}
353
354static void amd_cleanup(void)
355{
356 u8 rnen;
357
358 pci_read_config_byte(amd_dev, 0x40, &rnen);
359 rnen &= ~(1 << 7); /* RNG off */
360 pci_write_config_byte(amd_dev, 0x40, rnen);
361
362 /* FIXME: twiddle pmio, also? */
363}
364
365#ifdef __i386__
366/***********************************************************************
367 *
368 * VIA RNG operations
369 *
370 */
371
372enum {
373 VIA_STRFILT_CNT_SHIFT = 16,
374 VIA_STRFILT_FAIL = (1 << 15),
375 VIA_STRFILT_ENABLE = (1 << 14),
376 VIA_RAWBITS_ENABLE = (1 << 13),
377 VIA_RNG_ENABLE = (1 << 6),
378 VIA_XSTORE_CNT_MASK = 0x0F,
379
380 VIA_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */
381 VIA_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */
382 VIA_RNG_CHUNK_4_MASK = 0xFFFFFFFF,
383 VIA_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */
384 VIA_RNG_CHUNK_2_MASK = 0xFFFF,
385 VIA_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */
386 VIA_RNG_CHUNK_1_MASK = 0xFF,
387};
388
389static u32 via_rng_datum;
390
391/*
392 * Investigate using the 'rep' prefix to obtain 32 bits of random data
393 * in one insn. The upside is potentially better performance. The
394 * downside is that the instruction becomes no longer atomic. Due to
395 * this, just like familiar issues with /dev/random itself, the worst
396 * case of a 'rep xstore' could potentially pause a cpu for an
397 * unreasonably long time. In practice, this condition would likely
398 * only occur when the hardware is failing. (or so we hope :))
399 *
400 * Another possible performance boost may come from simply buffering
401 * until we have 4 bytes, thus returning a u32 at a time,
402 * instead of the current u8-at-a-time.
403 */
404
405static inline u32 xstore(u32 *addr, u32 edx_in)
406{
407 u32 eax_out;
408
409 asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
410 :"=m"(*addr), "=a"(eax_out)
411 :"D"(addr), "d"(edx_in));
412
413 return eax_out;
414}
415
416static unsigned int via_data_present(void)
417{
418 u32 bytes_out;
419
420 /* We choose the recommended 1-byte-per-instruction RNG rate,
421 * for greater randomness at the expense of speed. Larger
422 * values 2, 4, or 8 bytes-per-instruction yield greater
423 * speed at lesser randomness.
424 *
425 * If you change this to another VIA_CHUNK_n, you must also
426 * change the ->n_bytes values in rng_vendor_ops[] tables.
427 * VIA_CHUNK_8 requires further code changes.
428 *
429 * A copy of MSR_VIA_RNG is placed in eax_out when xstore
430 * completes.
431 */
432 via_rng_datum = 0; /* paranoia, not really necessary */
433 bytes_out = xstore(&via_rng_datum, VIA_RNG_CHUNK_1) & VIA_XSTORE_CNT_MASK;
434 if (bytes_out == 0)
435 return 0;
436
437 return 1;
438}
439
440static u32 via_data_read(void)
441{
442 return via_rng_datum;
443}
444
445static int __init via_init(struct pci_dev *dev)
446{
447 u32 lo, hi, old_lo;
448
449 /* Control the RNG via MSR. Tread lightly and pay very close
450 * close attention to values written, as the reserved fields
451 * are documented to be "undefined and unpredictable"; but it
452 * does not say to write them as zero, so I make a guess that
453 * we restore the values we find in the register.
454 */
455 rdmsr(MSR_VIA_RNG, lo, hi);
456
457 old_lo = lo;
458 lo &= ~(0x7f << VIA_STRFILT_CNT_SHIFT);
459 lo &= ~VIA_XSTORE_CNT_MASK;
460 lo &= ~(VIA_STRFILT_ENABLE | VIA_STRFILT_FAIL | VIA_RAWBITS_ENABLE);
461 lo |= VIA_RNG_ENABLE;
462
463 if (lo != old_lo)
464 wrmsr(MSR_VIA_RNG, lo, hi);
465
466 /* perhaps-unnecessary sanity check; remove after testing if
467 unneeded */
468 rdmsr(MSR_VIA_RNG, lo, hi);
469 if ((lo & VIA_RNG_ENABLE) == 0) {
470 printk(KERN_ERR PFX "cannot enable VIA C3 RNG, aborting\n");
471 return -ENODEV;
472 }
473
474 return 0;
475}
476
477static void via_cleanup(void)
478{
479 /* do nothing */
480}
481#endif
482
483/***********************************************************************
484 *
485 * AMD Geode RNG operations
486 *
487 */
488
489static void __iomem *geode_rng_base = NULL;
490
491#define GEODE_RNG_DATA_REG 0x50
492#define GEODE_RNG_STATUS_REG 0x54
493
494static u32 geode_data_read(void)
495{
496 u32 val;
497
498 assert(geode_rng_base != NULL);
499 val = readl(geode_rng_base + GEODE_RNG_DATA_REG);
500 return val;
501}
502
503static unsigned int geode_data_present(void)
504{
505 u32 val;
506
507 assert(geode_rng_base != NULL);
508 val = readl(geode_rng_base + GEODE_RNG_STATUS_REG);
509 return val;
510}
511
512static void geode_cleanup(void)
513{
514 iounmap(geode_rng_base);
515 geode_rng_base = NULL;
516}
517
518static int geode_init(struct pci_dev *dev)
519{
520 unsigned long rng_base = pci_resource_start(dev, 0);
521
522 if (rng_base == 0)
523 return 1;
524
525 geode_rng_base = ioremap(rng_base, 0x58);
526
527 if (geode_rng_base == NULL) {
528 printk(KERN_ERR PFX "Cannot ioremap RNG memory\n");
529 return -EBUSY;
530 }
531
532 return 0;
533}
534
535/***********************************************************************
536 *
537 * /dev/hwrandom character device handling (major 10, minor 183)
538 *
539 */
540
541static int rng_dev_open (struct inode *inode, struct file *filp)
542{
543 /* enforce read-only access to this chrdev */
544 if ((filp->f_mode & FMODE_READ) == 0)
545 return -EINVAL;
546 if (filp->f_mode & FMODE_WRITE)
547 return -EINVAL;
548
549 return 0;
550}
551
552
553static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
554 loff_t * offp)
555{
556 static DEFINE_SPINLOCK(rng_lock);
557 unsigned int have_data;
558 u32 data = 0;
559 ssize_t ret = 0;
560
561 while (size) {
562 spin_lock(&rng_lock);
563
564 have_data = 0;
565 if (rng_ops->data_present()) {
566 data = rng_ops->data_read();
567 have_data = rng_ops->n_bytes;
568 }
569
570 spin_unlock (&rng_lock);
571
572 while (have_data && size) {
573 if (put_user((u8)data, buf++)) {
574 ret = ret ? : -EFAULT;
575 break;
576 }
577 size--;
578 ret++;
579 have_data--;
580 data>>=8;
581 }
582
583 if (filp->f_flags & O_NONBLOCK)
584 return ret ? : -EAGAIN;
585
586 if(need_resched())
587 schedule_timeout_interruptible(1);
588 else
589 udelay(200); /* FIXME: We could poll for 250uS ?? */
590
591 if (signal_pending (current))
592 return ret ? : -ERESTARTSYS;
593 }
594 return ret;
595}
596
597
598
599/*
600 * rng_init_one - look for and attempt to init a single RNG
601 */
602static int __init rng_init_one (struct pci_dev *dev)
603{
604 int rc;
605
606 DPRINTK ("ENTER\n");
607
608 assert(rng_ops != NULL);
609
610 rc = rng_ops->init(dev);
611 if (rc)
612 goto err_out;
613
614 rc = misc_register (&rng_miscdev);
615 if (rc) {
616 printk (KERN_ERR PFX "misc device register failed\n");
617 goto err_out_cleanup_hw;
618 }
619
620 DPRINTK ("EXIT, returning 0\n");
621 return 0;
622
623err_out_cleanup_hw:
624 rng_ops->cleanup();
625err_out:
626 DPRINTK ("EXIT, returning %d\n", rc);
627 return rc;
628}
629
630
631
632MODULE_AUTHOR("The Linux Kernel team");
633MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
634MODULE_LICENSE("GPL");
635
636
637/*
638 * rng_init - initialize RNG module
639 */
640static int __init rng_init (void)
641{
642 int rc;
643 struct pci_dev *pdev = NULL;
644 const struct pci_device_id *ent;
645
646 DPRINTK ("ENTER\n");
647
648 /* Probe for Intel, AMD, Geode RNGs */
649 for_each_pci_dev(pdev) {
650 ent = pci_match_id(rng_pci_tbl, pdev);
651 if (ent) {
652 rng_ops = &rng_vendor_ops[ent->driver_data];
653 goto match;
654 }
655 }
656
657#ifdef __i386__
658 /* Probe for VIA RNG */
659 if (cpu_has_xstore) {
660 rng_ops = &rng_vendor_ops[rng_hw_via];
661 pdev = NULL;
662 goto match;
663 }
664#endif
665
666 DPRINTK ("EXIT, returning -ENODEV\n");
667 return -ENODEV;
668
669match:
670 rc = rng_init_one (pdev);
671 if (rc)
672 return rc;
673
674 pr_info( RNG_DRIVER_NAME " loaded\n");
675
676 DPRINTK ("EXIT, returning 0\n");
677 return 0;
678}
679
680
681/*
682 * rng_init - shutdown RNG module
683 */
684static void __exit rng_cleanup (void)
685{
686 DPRINTK ("ENTER\n");
687
688 misc_deregister (&rng_miscdev);
689
690 if (rng_ops->cleanup)
691 rng_ops->cleanup();
692
693 DPRINTK ("EXIT\n");
694}
695
696
697module_init (rng_init);
698module_exit (rng_cleanup);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
new file mode 100644
index 0000000000..9f7635f751
--- /dev/null
+++ b/drivers/char/hw_random/Kconfig
@@ -0,0 +1,90 @@
1#
2# Hardware Random Number Generator (RNG) configuration
3#
4
5config HW_RANDOM
6 bool "Hardware Random Number Generator Core support"
7 default y
8 ---help---
9 Hardware Random Number Generator Core infrastructure.
10
11 If unsure, say Y.
12
13config HW_RANDOM_INTEL
14 tristate "Intel HW Random Number Generator support"
15 depends on HW_RANDOM && (X86 || IA64) && PCI
16 default y
17 ---help---
18 This driver provides kernel-side support for the Random Number
19 Generator hardware found on Intel i8xx-based motherboards.
20
21 To compile this driver as a module, choose M here: the
22 module will be called intel-rng.
23
24 If unsure, say Y.
25
26config HW_RANDOM_AMD
27 tristate "AMD HW Random Number Generator support"
28 depends on HW_RANDOM && X86 && PCI
29 default y
30 ---help---
31 This driver provides kernel-side support for the Random Number
32 Generator hardware found on AMD 76x-based motherboards.
33
34 To compile this driver as a module, choose M here: the
35 module will be called amd-rng.
36
37 If unsure, say Y.
38
39config HW_RANDOM_GEODE
40 tristate "AMD Geode HW Random Number Generator support"
41 depends on HW_RANDOM && X86 && PCI
42 default y
43 ---help---
44 This driver provides kernel-side support for the Random Number
45 Generator hardware found on the AMD Geode LX.
46
47 To compile this driver as a module, choose M here: the
48 module will be called geode-rng.
49
50 If unsure, say Y.
51
52config HW_RANDOM_VIA
53 tristate "VIA HW Random Number Generator support"
54 depends on HW_RANDOM && X86_32
55 default y
56 ---help---
57 This driver provides kernel-side support for the Random Number
58 Generator hardware found on VIA based motherboards.
59
60 To compile this driver as a module, choose M here: the
61 module will be called via-rng.
62
63 If unsure, say Y.
64
65config HW_RANDOM_IXP4XX
66 tristate "Intel IXP4xx NPU HW Random Number Generator support"
67 depends on HW_RANDOM && ARCH_IXP4XX
68 default y
69 ---help---
70 This driver provides kernel-side support for the Random
71 Number Generator hardware found on the Intel IXP4xx NPU.
72
73 To compile this driver as a module, choose M here: the
74 module will be called ixp4xx-rng.
75
76 If unsure, say Y.
77
78config HW_RANDOM_OMAP
79 tristate "OMAP Random Number Generator support"
80 depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP24XX)
81 default y
82 ---help---
83 This driver provides kernel-side support for the Random Number
84 Generator hardware found on OMAP16xx and OMAP24xx multimedia
85 processors.
86
87 To compile this driver as a module, choose M here: the
88 module will be called omap-rng.
89
90 If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
new file mode 100644
index 0000000000..e263ae96f9
--- /dev/null
+++ b/drivers/char/hw_random/Makefile
@@ -0,0 +1,11 @@
1#
2# Makefile for HW Random Number Generator (RNG) device drivers.
3#
4
5obj-$(CONFIG_HW_RANDOM) += core.o
6obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
7obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
8obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
9obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
10obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
11obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
new file mode 100644
index 0000000000..71e4e0f3fd
--- /dev/null
+++ b/drivers/char/hw_random/amd-rng.c
@@ -0,0 +1,152 @@
1/*
2 * RNG driver for AMD RNGs
3 *
4 * Copyright 2005 (c) MontaVista Software, Inc.
5 *
6 * with the majority of the code coming from:
7 *
8 * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
9 * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
10 *
11 * derived from
12 *
13 * Hardware driver for the AMD 768 Random Number Generator (RNG)
14 * (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
15 *
16 * derived from
17 *
18 * Hardware driver for Intel i810 Random Number Generator (RNG)
19 * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
20 * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
21 *
22 * This file is licensed under the terms of the GNU General Public
23 * License version 2. This program is licensed "as is" without any
24 * warranty of any kind, whether express or implied.
25 */
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/pci.h>
30#include <linux/hw_random.h>
31#include <asm/io.h>
32
33
34#define PFX KBUILD_MODNAME ": "
35
36
37/*
38 * Data for PCI driver interface
39 *
40 * This data only exists for exporting the supported
41 * PCI ids via MODULE_DEVICE_TABLE. We do not actually
42 * register a pci_driver, because someone else might one day
43 * want to register another driver on the same PCI id.
44 */
45static const struct pci_device_id pci_tbl[] = {
46 { 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
47 { 0x1022, 0x746b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
48 { 0, }, /* terminate list */
49};
50MODULE_DEVICE_TABLE(pci, pci_tbl);
51
52static struct pci_dev *amd_pdev;
53
54
55static int amd_rng_data_present(struct hwrng *rng)
56{
57 u32 pmbase = (u32)rng->priv;
58
59 return !!(inl(pmbase + 0xF4) & 1);
60}
61
62static int amd_rng_data_read(struct hwrng *rng, u32 *data)
63{
64 u32 pmbase = (u32)rng->priv;
65
66 *data = inl(pmbase + 0xF0);
67
68 return 4;
69}
70
71static int amd_rng_init(struct hwrng *rng)
72{
73 u8 rnen;
74
75 pci_read_config_byte(amd_pdev, 0x40, &rnen);
76 rnen |= (1 << 7); /* RNG on */
77 pci_write_config_byte(amd_pdev, 0x40, rnen);
78
79 pci_read_config_byte(amd_pdev, 0x41, &rnen);
80 rnen |= (1 << 7); /* PMIO enable */
81 pci_write_config_byte(amd_pdev, 0x41, rnen);
82
83 return 0;
84}
85
86static void amd_rng_cleanup(struct hwrng *rng)
87{
88 u8 rnen;
89
90 pci_read_config_byte(amd_pdev, 0x40, &rnen);
91 rnen &= ~(1 << 7); /* RNG off */
92 pci_write_config_byte(amd_pdev, 0x40, rnen);
93}
94
95
96static struct hwrng amd_rng = {
97 .name = "amd",
98 .init = amd_rng_init,
99 .cleanup = amd_rng_cleanup,
100 .data_present = amd_rng_data_present,
101 .data_read = amd_rng_data_read,
102};
103
104
105static int __init mod_init(void)
106{
107 int err = -ENODEV;
108 struct pci_dev *pdev = NULL;
109 const struct pci_device_id *ent;
110 u32 pmbase;
111
112 for_each_pci_dev(pdev) {
113 ent = pci_match_id(pci_tbl, pdev);
114 if (ent)
115 goto found;
116 }
117 /* Device not found. */
118 goto out;
119
120found:
121 err = pci_read_config_dword(pdev, 0x58, &pmbase);
122 if (err)
123 goto out;
124 err = -EIO;
125 pmbase &= 0x0000FF00;
126 if (pmbase == 0)
127 goto out;
128 amd_rng.priv = (unsigned long)pmbase;
129 amd_pdev = pdev;
130
131 printk(KERN_INFO "AMD768 RNG detected\n");
132 err = hwrng_register(&amd_rng);
133 if (err) {
134 printk(KERN_ERR PFX "RNG registering failed (%d)\n",
135 err);
136 goto out;
137 }
138out:
139 return err;
140}
141
142static void __exit mod_exit(void)
143{
144 hwrng_unregister(&amd_rng);
145}
146
147subsys_initcall(mod_init);
148module_exit(mod_exit);
149
150MODULE_AUTHOR("The Linux Kernel team");
151MODULE_DESCRIPTION("H/W RNG driver for AMD chipsets");
152MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
new file mode 100644
index 0000000000..88b026639f
--- /dev/null
+++ b/drivers/char/hw_random/core.c
@@ -0,0 +1,354 @@
1/*
2 Added support for the AMD Geode LX RNG
3 (c) Copyright 2004-2005 Advanced Micro Devices, Inc.
4
5 derived from
6
7 Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
8 (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
9
10 derived from
11
12 Hardware driver for the AMD 768 Random Number Generator (RNG)
13 (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
14
15 derived from
16
17 Hardware driver for Intel i810 Random Number Generator (RNG)
18 Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
19 Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
20
21 Added generic RNG API
22 Copyright 2006 Michael Buesch <mbuesch@freenet.de>
23 Copyright 2005 (c) MontaVista Software, Inc.
24
25 Please read Documentation/hw_random.txt for details on use.
26
27 ----------------------------------------------------------
28 This software may be used and distributed according to the terms
29 of the GNU General Public License, incorporated herein by reference.
30
31 */
32
33
34#include <linux/device.h>
35#include <linux/hw_random.h>
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/fs.h>
39#include <linux/init.h>
40#include <linux/miscdevice.h>
41#include <linux/delay.h>
42#include <asm/uaccess.h>
43
44
45#define RNG_MODULE_NAME "hw_random"
46#define PFX RNG_MODULE_NAME ": "
47#define RNG_MISCDEV_MINOR 183 /* official */
48
49
50static struct hwrng *current_rng;
51static LIST_HEAD(rng_list);
52static DEFINE_MUTEX(rng_mutex);
53
54
55static inline int hwrng_init(struct hwrng *rng)
56{
57 if (!rng->init)
58 return 0;
59 return rng->init(rng);
60}
61
62static inline void hwrng_cleanup(struct hwrng *rng)
63{
64 if (rng && rng->cleanup)
65 rng->cleanup(rng);
66}
67
68static inline int hwrng_data_present(struct hwrng *rng)
69{
70 if (!rng->data_present)
71 return 1;
72 return rng->data_present(rng);
73}
74
75static inline int hwrng_data_read(struct hwrng *rng, u32 *data)
76{
77 return rng->data_read(rng, data);
78}
79
80
81static int rng_dev_open(struct inode *inode, struct file *filp)
82{
83 /* enforce read-only access to this chrdev */
84 if ((filp->f_mode & FMODE_READ) == 0)
85 return -EINVAL;
86 if (filp->f_mode & FMODE_WRITE)
87 return -EINVAL;
88 return 0;
89}
90
91static ssize_t rng_dev_read(struct file *filp, char __user *buf,
92 size_t size, loff_t *offp)
93{
94 u32 data;
95 ssize_t ret = 0;
96 int i, err = 0;
97 int data_present;
98 int bytes_read;
99
100 while (size) {
101 err = -ERESTARTSYS;
102 if (mutex_lock_interruptible(&rng_mutex))
103 goto out;
104 if (!current_rng) {
105 mutex_unlock(&rng_mutex);
106 err = -ENODEV;
107 goto out;
108 }
109 if (filp->f_flags & O_NONBLOCK) {
110 data_present = hwrng_data_present(current_rng);
111 } else {
112 /* Some RNG require some time between data_reads to gather
113 * new entropy. Poll it.
114 */
115 for (i = 0; i < 20; i++) {
116 data_present = hwrng_data_present(current_rng);
117 if (data_present)
118 break;
119 udelay(10);
120 }
121 }
122 bytes_read = 0;
123 if (data_present)
124 bytes_read = hwrng_data_read(current_rng, &data);
125 mutex_unlock(&rng_mutex);
126
127 err = -EAGAIN;
128 if (!bytes_read && (filp->f_flags & O_NONBLOCK))
129 goto out;
130
131 err = -EFAULT;
132 while (bytes_read && size) {
133 if (put_user((u8)data, buf++))
134 goto out;
135 size--;
136 ret++;
137 bytes_read--;
138 data >>= 8;
139 }
140
141 if (need_resched())
142 schedule_timeout_interruptible(1);
143 err = -ERESTARTSYS;
144 if (signal_pending(current))
145 goto out;
146 }
147out:
148 return ret ? : err;
149}
150
151
152static struct file_operations rng_chrdev_ops = {
153 .owner = THIS_MODULE,
154 .open = rng_dev_open,
155 .read = rng_dev_read,
156};
157
158static struct miscdevice rng_miscdev = {
159 .minor = RNG_MISCDEV_MINOR,
160 .name = RNG_MODULE_NAME,
161 .fops = &rng_chrdev_ops,
162};
163
164
165static ssize_t hwrng_attr_current_store(struct class_device *class,
166 const char *buf, size_t len)
167{
168 int err;
169 struct hwrng *rng;
170
171 err = mutex_lock_interruptible(&rng_mutex);
172 if (err)
173 return -ERESTARTSYS;
174 err = -ENODEV;
175 list_for_each_entry(rng, &rng_list, list) {
176 if (strcmp(rng->name, buf) == 0) {
177 if (rng == current_rng) {
178 err = 0;
179 break;
180 }
181 err = hwrng_init(rng);
182 if (err)
183 break;
184 hwrng_cleanup(current_rng);
185 current_rng = rng;
186 err = 0;
187 break;
188 }
189 }
190 mutex_unlock(&rng_mutex);
191
192 return err ? : len;
193}
194
195static ssize_t hwrng_attr_current_show(struct class_device *class,
196 char *buf)
197{
198 int err;
199 ssize_t ret;
200 const char *name = "none";
201
202 err = mutex_lock_interruptible(&rng_mutex);
203 if (err)
204 return -ERESTARTSYS;
205 if (current_rng)
206 name = current_rng->name;
207 ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
208 mutex_unlock(&rng_mutex);
209
210 return ret;
211}
212
213static ssize_t hwrng_attr_available_show(struct class_device *class,
214 char *buf)
215{
216 int err;
217 ssize_t ret = 0;
218 struct hwrng *rng;
219
220 err = mutex_lock_interruptible(&rng_mutex);
221 if (err)
222 return -ERESTARTSYS;
223 buf[0] = '\0';
224 list_for_each_entry(rng, &rng_list, list) {
225 strncat(buf, rng->name, PAGE_SIZE - ret - 1);
226 ret += strlen(rng->name);
227 strncat(buf, " ", PAGE_SIZE - ret - 1);
228 ret++;
229 }
230 strncat(buf, "\n", PAGE_SIZE - ret - 1);
231 ret++;
232 mutex_unlock(&rng_mutex);
233
234 return ret;
235}
236
237static CLASS_DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
238 hwrng_attr_current_show,
239 hwrng_attr_current_store);
240static CLASS_DEVICE_ATTR(rng_available, S_IRUGO,
241 hwrng_attr_available_show,
242 NULL);
243
244
245static void unregister_miscdev(void)
246{
247 class_device_remove_file(rng_miscdev.class,
248 &class_device_attr_rng_available);
249 class_device_remove_file(rng_miscdev.class,
250 &class_device_attr_rng_current);
251 misc_deregister(&rng_miscdev);
252}
253
254static int register_miscdev(void)
255{
256 int err;
257
258 err = misc_register(&rng_miscdev);
259 if (err)
260 goto out;
261 err = class_device_create_file(rng_miscdev.class,
262 &class_device_attr_rng_current);
263 if (err)
264 goto err_misc_dereg;
265 err = class_device_create_file(rng_miscdev.class,
266 &class_device_attr_rng_available);
267 if (err)
268 goto err_remove_current;
269out:
270 return err;
271
272err_remove_current:
273 class_device_remove_file(rng_miscdev.class,
274 &class_device_attr_rng_current);
275err_misc_dereg:
276 misc_deregister(&rng_miscdev);
277 goto out;
278}
279
280int hwrng_register(struct hwrng *rng)
281{
282 int must_register_misc;
283 int err = -EINVAL;
284 struct hwrng *old_rng, *tmp;
285
286 if (rng->name == NULL ||
287 rng->data_read == NULL)
288 goto out;
289
290 mutex_lock(&rng_mutex);
291
292 /* Must not register two RNGs with the same name. */
293 err = -EEXIST;
294 list_for_each_entry(tmp, &rng_list, list) {
295 if (strcmp(tmp->name, rng->name) == 0)
296 goto out_unlock;
297 }
298
299 must_register_misc = (current_rng == NULL);
300 old_rng = current_rng;
301 if (!old_rng) {
302 err = hwrng_init(rng);
303 if (err)
304 goto out_unlock;
305 current_rng = rng;
306 }
307 err = 0;
308 if (must_register_misc) {
309 err = register_miscdev();
310 if (err) {
311 if (!old_rng) {
312 hwrng_cleanup(rng);
313 current_rng = NULL;
314 }
315 goto out_unlock;
316 }
317 }
318 INIT_LIST_HEAD(&rng->list);
319 list_add_tail(&rng->list, &rng_list);
320out_unlock:
321 mutex_unlock(&rng_mutex);
322out:
323 return err;
324}
325EXPORT_SYMBOL_GPL(hwrng_register);
326
327void hwrng_unregister(struct hwrng *rng)
328{
329 int err;
330
331 mutex_lock(&rng_mutex);
332
333 list_del(&rng->list);
334 if (current_rng == rng) {
335 hwrng_cleanup(rng);
336 if (list_empty(&rng_list)) {
337 current_rng = NULL;
338 } else {
339 current_rng = list_entry(rng_list.prev, struct hwrng, list);
340 err = hwrng_init(current_rng);
341 if (err)
342 current_rng = NULL;
343 }
344 }
345 if (list_empty(&rng_list))
346 unregister_miscdev();
347
348 mutex_unlock(&rng_mutex);
349}
350EXPORT_SYMBOL_GPL(hwrng_unregister);
351
352
353MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
354MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
new file mode 100644
index 0000000000..be61f22ee7
--- /dev/null
+++ b/drivers/char/hw_random/geode-rng.c
@@ -0,0 +1,128 @@
1/*
2 * RNG driver for AMD Geode RNGs
3 *
4 * Copyright 2005 (c) MontaVista Software, Inc.
5 *
6 * with the majority of the code coming from:
7 *
8 * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
9 * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
10 *
11 * derived from
12 *
13 * Hardware driver for the AMD 768 Random Number Generator (RNG)
14 * (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
15 *
16 * derived from
17 *
18 * Hardware driver for Intel i810 Random Number Generator (RNG)
19 * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
20 * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
21 *
22 * This file is licensed under the terms of the GNU General Public
23 * License version 2. This program is licensed "as is" without any
24 * warranty of any kind, whether express or implied.
25 */
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/pci.h>
30#include <linux/hw_random.h>
31#include <asm/io.h>
32
33
34#define PFX KBUILD_MODNAME ": "
35
36#define GEODE_RNG_DATA_REG 0x50
37#define GEODE_RNG_STATUS_REG 0x54
38
39/*
40 * Data for PCI driver interface
41 *
42 * This data only exists for exporting the supported
43 * PCI ids via MODULE_DEVICE_TABLE. We do not actually
44 * register a pci_driver, because someone else might one day
45 * want to register another driver on the same PCI id.
46 */
47static const struct pci_device_id pci_tbl[] = {
48 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES,
49 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
50 { 0, }, /* terminate list */
51};
52MODULE_DEVICE_TABLE(pci, pci_tbl);
53
54
55static int geode_rng_data_read(struct hwrng *rng, u32 *data)
56{
57 void __iomem *mem = (void __iomem *)rng->priv;
58
59 *data = readl(mem + GEODE_RNG_DATA_REG);
60
61 return 4;
62}
63
64static int geode_rng_data_present(struct hwrng *rng)
65{
66 void __iomem *mem = (void __iomem *)rng->priv;
67
68 return !!(readl(mem + GEODE_RNG_STATUS_REG));
69}
70
71
72static struct hwrng geode_rng = {
73 .name = "geode",
74 .data_present = geode_rng_data_present,
75 .data_read = geode_rng_data_read,
76};
77
78
79static int __init mod_init(void)
80{
81 int err = -ENODEV;
82 struct pci_dev *pdev = NULL;
83 const struct pci_device_id *ent;
84 void __iomem *mem;
85 unsigned long rng_base;
86
87 for_each_pci_dev(pdev) {
88 ent = pci_match_id(pci_tbl, pdev);
89 if (ent)
90 goto found;
91 }
92 /* Device not found. */
93 goto out;
94
95found:
96 rng_base = pci_resource_start(pdev, 0);
97 if (rng_base == 0)
98 goto out;
99 err = -ENOMEM;
100 mem = ioremap(rng_base, 0x58);
101 if (!mem)
102 goto out;
103 geode_rng.priv = (unsigned long)mem;
104
105 printk(KERN_INFO "AMD Geode RNG detected\n");
106 err = hwrng_register(&geode_rng);
107 if (err) {
108 printk(KERN_ERR PFX "RNG registering failed (%d)\n",
109 err);
110 goto out;
111 }
112out:
113 return err;
114}
115
116static void __exit mod_exit(void)
117{
118 void __iomem *mem = (void __iomem *)geode_rng.priv;
119
120 hwrng_unregister(&geode_rng);
121 iounmap(mem);
122}
123
124subsys_initcall(mod_init);
125module_exit(mod_exit);
126
127MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs");
128MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
new file mode 100644
index 0000000000..6594bd5645
--- /dev/null
+++ b/drivers/char/hw_random/intel-rng.c
@@ -0,0 +1,189 @@
1/*
2 * RNG driver for Intel RNGs
3 *
4 * Copyright 2005 (c) MontaVista Software, Inc.
5 *
6 * with the majority of the code coming from:
7 *
8 * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
9 * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
10 *
11 * derived from
12 *
13 * Hardware driver for the AMD 768 Random Number Generator (RNG)
14 * (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
15 *
16 * derived from
17 *
18 * Hardware driver for Intel i810 Random Number Generator (RNG)
19 * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
20 * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
21 *
22 * This file is licensed under the terms of the GNU General Public
23 * License version 2. This program is licensed "as is" without any
24 * warranty of any kind, whether express or implied.
25 */
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/pci.h>
30#include <linux/hw_random.h>
31#include <asm/io.h>
32
33
34#define PFX KBUILD_MODNAME ": "
35
36/*
37 * RNG registers
38 */
39#define INTEL_RNG_HW_STATUS 0
40#define INTEL_RNG_PRESENT 0x40
41#define INTEL_RNG_ENABLED 0x01
42#define INTEL_RNG_STATUS 1
43#define INTEL_RNG_DATA_PRESENT 0x01
44#define INTEL_RNG_DATA 2
45
46/*
47 * Magic address at which Intel PCI bridges locate the RNG
48 */
49#define INTEL_RNG_ADDR 0xFFBC015F
50#define INTEL_RNG_ADDR_LEN 3
51
52/*
53 * Data for PCI driver interface
54 *
55 * This data only exists for exporting the supported
56 * PCI ids via MODULE_DEVICE_TABLE. We do not actually
57 * register a pci_driver, because someone else might one day
58 * want to register another driver on the same PCI id.
59 */
60static const struct pci_device_id pci_tbl[] = {
61 { 0x8086, 0x2418, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
62 { 0x8086, 0x2428, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
63 { 0x8086, 0x2430, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
64 { 0x8086, 0x2448, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
65 { 0x8086, 0x244e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
66 { 0x8086, 0x245e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
67 { 0, }, /* terminate list */
68};
69MODULE_DEVICE_TABLE(pci, pci_tbl);
70
71
72static inline u8 hwstatus_get(void __iomem *mem)
73{
74 return readb(mem + INTEL_RNG_HW_STATUS);
75}
76
77static inline u8 hwstatus_set(void __iomem *mem,
78 u8 hw_status)
79{
80 writeb(hw_status, mem + INTEL_RNG_HW_STATUS);
81 return hwstatus_get(mem);
82}
83
84static int intel_rng_data_present(struct hwrng *rng)
85{
86 void __iomem *mem = (void __iomem *)rng->priv;
87
88 return !!(readb(mem + INTEL_RNG_STATUS) & INTEL_RNG_DATA_PRESENT);
89}
90
91static int intel_rng_data_read(struct hwrng *rng, u32 *data)
92{
93 void __iomem *mem = (void __iomem *)rng->priv;
94
95 *data = readb(mem + INTEL_RNG_DATA);
96
97 return 1;
98}
99
100static int intel_rng_init(struct hwrng *rng)
101{
102 void __iomem *mem = (void __iomem *)rng->priv;
103 u8 hw_status;
104 int err = -EIO;
105
106 hw_status = hwstatus_get(mem);
107 /* turn RNG h/w on, if it's off */
108 if ((hw_status & INTEL_RNG_ENABLED) == 0)
109 hw_status = hwstatus_set(mem, hw_status | INTEL_RNG_ENABLED);
110 if ((hw_status & INTEL_RNG_ENABLED) == 0) {
111 printk(KERN_ERR PFX "cannot enable RNG, aborting\n");
112 goto out;
113 }
114 err = 0;
115out:
116 return err;
117}
118
119static void intel_rng_cleanup(struct hwrng *rng)
120{
121 void __iomem *mem = (void __iomem *)rng->priv;
122 u8 hw_status;
123
124 hw_status = hwstatus_get(mem);
125 if (hw_status & INTEL_RNG_ENABLED)
126 hwstatus_set(mem, hw_status & ~INTEL_RNG_ENABLED);
127 else
128 printk(KERN_WARNING PFX "unusual: RNG already disabled\n");
129}
130
131
132static struct hwrng intel_rng = {
133 .name = "intel",
134 .init = intel_rng_init,
135 .cleanup = intel_rng_cleanup,
136 .data_present = intel_rng_data_present,
137 .data_read = intel_rng_data_read,
138};
139
140
141static int __init mod_init(void)
142{
143 int err = -ENODEV;
144 void __iomem *mem;
145 u8 hw_status;
146
147 if (!pci_dev_present(pci_tbl))
148 goto out; /* Device not found. */
149
150 err = -ENOMEM;
151 mem = ioremap(INTEL_RNG_ADDR, INTEL_RNG_ADDR_LEN);
152 if (!mem)
153 goto out;
154 intel_rng.priv = (unsigned long)mem;
155
156 /* Check for Intel 82802 */
157 err = -ENODEV;
158 hw_status = hwstatus_get(mem);
159 if ((hw_status & INTEL_RNG_PRESENT) == 0)
160 goto err_unmap;
161
162 printk(KERN_INFO "Intel 82802 RNG detected\n");
163 err = hwrng_register(&intel_rng);
164 if (err) {
165 printk(KERN_ERR PFX "RNG registering failed (%d)\n",
166 err);
167 goto out;
168 }
169out:
170 return err;
171
172err_unmap:
173 iounmap(mem);
174 goto out;
175}
176
177static void __exit mod_exit(void)
178{
179 void __iomem *mem = (void __iomem *)intel_rng.priv;
180
181 hwrng_unregister(&intel_rng);
182 iounmap(mem);
183}
184
185subsys_initcall(mod_init);
186module_exit(mod_exit);
187
188MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets");
189MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c
new file mode 100644
index 0000000000..ef71022423
--- /dev/null
+++ b/drivers/char/hw_random/ixp4xx-rng.c
@@ -0,0 +1,73 @@
1/*
2 * drivers/char/rng/ixp4xx-rng.c
3 *
4 * RNG driver for Intel IXP4xx family of NPUs
5 *
6 * Author: Deepak Saxena <dsaxena@plexity.net>
7 *
8 * Copyright 2005 (c) MontaVista Software, Inc.
9 *
10 * Fixes by Michael Buesch
11 *
12 * This file is licensed under the terms of the GNU General Public
13 * License version 2. This program is licensed "as is" without any
14 * warranty of any kind, whether express or implied.
15 */
16
17#include <linux/kernel.h>
18#include <linux/config.h>
19#include <linux/types.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/init.h>
23#include <linux/bitops.h>
24#include <linux/hw_random.h>
25
26#include <asm/io.h>
27#include <asm/hardware.h>
28
29
30static int ixp4xx_rng_data_read(struct hwrng *rng, u32 *buffer)
31{
32 void __iomem * rng_base = (void __iomem *)rng->priv;
33
34 *buffer = __raw_readl(rng_base);
35
36 return 4;
37}
38
39static struct hwrng ixp4xx_rng_ops = {
40 .name = "ixp4xx",
41 .data_read = ixp4xx_rng_data_read,
42};
43
44static int __init ixp4xx_rng_init(void)
45{
46 void __iomem * rng_base;
47 int err;
48
49 rng_base = ioremap(0x70002100, 4);
50 if (!rng_base)
51 return -ENOMEM;
52 ixp4xx_rng_ops.priv = (unsigned long)rng_base;
53 err = hwrng_register(&ixp4xx_rng_ops);
54 if (err)
55 iounmap(rng_base);
56
57 return err;
58}
59
60static void __exit ixp4xx_rng_exit(void)
61{
62 void __iomem * rng_base = (void __iomem *)ixp4xx_rng_ops.priv;
63
64 hwrng_unregister(&ixp4xx_rng_ops);
65 iounmap(rng_base);
66}
67
68subsys_initcall(ixp4xx_rng_init);
69module_exit(ixp4xx_rng_exit);
70
71MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
72MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for IXP4xx");
73MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
new file mode 100644
index 0000000000..819516b35a
--- /dev/null
+++ b/drivers/char/hw_random/omap-rng.c
@@ -0,0 +1,208 @@
1/*
2 * driver/char/hw_random/omap-rng.c
3 *
4 * RNG driver for TI OMAP CPU family
5 *
6 * Author: Deepak Saxena <dsaxena@plexity.net>
7 *
8 * Copyright 2005 (c) MontaVista Software, Inc.
9 *
10 * Mostly based on original driver:
11 *
12 * Copyright (C) 2005 Nokia Corporation
13 * Author: Juha Yrj��<juha.yrjola@nokia.com>
14 *
15 * This file is licensed under the terms of the GNU General Public
16 * License version 2. This program is licensed "as is" without any
17 * warranty of any kind, whether express or implied.
18 *
19 * TODO:
20 *
21 * - Make status updated be interrupt driven so we don't poll
22 *
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/random.h>
28#include <linux/err.h>
29#include <linux/device.h>
30#include <linux/hw_random.h>
31
32#include <asm/io.h>
33#include <asm/hardware/clock.h>
34
35#define RNG_OUT_REG 0x00 /* Output register */
36#define RNG_STAT_REG 0x04 /* Status register
37 [0] = STAT_BUSY */
38#define RNG_ALARM_REG 0x24 /* Alarm register
39 [7:0] = ALARM_COUNTER */
40#define RNG_CONFIG_REG 0x28 /* Configuration register
41 [11:6] = RESET_COUNT
42 [5:3] = RING2_DELAY
43 [2:0] = RING1_DELAY */
44#define RNG_REV_REG 0x3c /* Revision register
45 [7:0] = REV_NB */
46#define RNG_MASK_REG 0x40 /* Mask and reset register
47 [2] = IT_EN
48 [1] = SOFTRESET
49 [0] = AUTOIDLE */
50#define RNG_SYSSTATUS 0x44 /* System status
51 [0] = RESETDONE */
52
53static void __iomem *rng_base;
54static struct clk *rng_ick;
55static struct device *rng_dev;
56
57static u32 omap_rng_read_reg(int reg)
58{
59 return __raw_readl(rng_base + reg);
60}
61
62static void omap_rng_write_reg(int reg, u32 val)
63{
64 __raw_writel(val, rng_base + reg);
65}
66
67/* REVISIT: Does the status bit really work on 16xx? */
68static int omap_rng_data_present(struct hwrng *rng)
69{
70 return omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
71}
72
73static int omap_rng_data_read(struct hwrng *rng, u32 *data)
74{
75 *data = omap_rng_read_reg(RNG_OUT_REG);
76
77 return 4;
78}
79
80static struct hwrng omap_rng_ops = {
81 .name = "omap",
82 .data_present = omap_rng_data_present,
83 .data_read = omap_rng_data_read,
84};
85
86static int __init omap_rng_probe(struct device *dev)
87{
88 struct platform_device *pdev = to_platform_device(dev);
89 struct resource *res, *mem;
90 int ret;
91
92 /*
93 * A bit ugly, and it will never actually happen but there can
94 * be only one RNG and this catches any bork
95 */
96 BUG_ON(rng_dev);
97
98 if (cpu_is_omap24xx()) {
99 rng_ick = clk_get(NULL, "rng_ick");
100 if (IS_ERR(rng_ick)) {
101 dev_err(dev, "Could not get rng_ick\n");
102 ret = PTR_ERR(rng_ick);
103 return ret;
104 }
105 else {
106 clk_use(rng_ick);
107 }
108 }
109
110 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
111
112 if (!res)
113 return -ENOENT;
114
115 mem = request_mem_region(res->start, res->end - res->start + 1,
116 pdev->name);
117 if (mem == NULL)
118 return -EBUSY;
119
120 dev_set_drvdata(dev, mem);
121 rng_base = (u32 __iomem *)io_p2v(res->start);
122
123 ret = hwrng_register(&omap_rng_ops);
124 if (ret) {
125 release_resource(mem);
126 rng_base = NULL;
127 return ret;
128 }
129
130 dev_info(dev, "OMAP Random Number Generator ver. %02x\n",
131 omap_rng_read_reg(RNG_REV_REG));
132 omap_rng_write_reg(RNG_MASK_REG, 0x1);
133
134 rng_dev = dev;
135
136 return 0;
137}
138
139static int __exit omap_rng_remove(struct device *dev)
140{
141 struct resource *mem = dev_get_drvdata(dev);
142
143 hwrng_unregister(&omap_rng_ops);
144
145 omap_rng_write_reg(RNG_MASK_REG, 0x0);
146
147 if (cpu_is_omap24xx()) {
148 clk_unuse(rng_ick);
149 clk_put(rng_ick);
150 }
151
152 release_resource(mem);
153 rng_base = NULL;
154
155 return 0;
156}
157
158#ifdef CONFIG_PM
159
160static int omap_rng_suspend(struct device *dev, pm_message_t message, u32 level)
161{
162 omap_rng_write_reg(RNG_MASK_REG, 0x0);
163
164 return 0;
165}
166
167static int omap_rng_resume(struct device *dev, pm_message_t message, u32 level)
168{
169 omap_rng_write_reg(RNG_MASK_REG, 0x1);
170
171 return 1;
172}
173
174#else
175
176#define omap_rng_suspend NULL
177#define omap_rng_resume NULL
178
179#endif
180
181
182static struct device_driver omap_rng_driver = {
183 .name = "omap_rng",
184 .bus = &platform_bus_type,
185 .probe = omap_rng_probe,
186 .remove = __exit_p(omap_rng_remove),
187 .suspend = omap_rng_suspend,
188 .resume = omap_rng_resume
189};
190
191static int __init omap_rng_init(void)
192{
193 if (!cpu_is_omap16xx() && !cpu_is_omap24xx())
194 return -ENODEV;
195
196 return driver_register(&omap_rng_driver);
197}
198
199static void __exit omap_rng_exit(void)
200{
201 driver_unregister(&omap_rng_driver);
202}
203
204module_init(omap_rng_init);
205module_exit(omap_rng_exit);
206
207MODULE_AUTHOR("Deepak Saxena (and others)");
208MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
new file mode 100644
index 0000000000..0e786b617b
--- /dev/null
+++ b/drivers/char/hw_random/via-rng.c
@@ -0,0 +1,183 @@
1/*
2 * RNG driver for VIA RNGs
3 *
4 * Copyright 2005 (c) MontaVista Software, Inc.
5 *
6 * with the majority of the code coming from:
7 *
8 * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
9 * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
10 *
11 * derived from
12 *
13 * Hardware driver for the AMD 768 Random Number Generator (RNG)
14 * (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
15 *
16 * derived from
17 *
18 * Hardware driver for Intel i810 Random Number Generator (RNG)
19 * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
20 * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
21 *
22 * This file is licensed under the terms of the GNU General Public
23 * License version 2. This program is licensed "as is" without any
24 * warranty of any kind, whether express or implied.
25 */
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/pci.h>
30#include <linux/hw_random.h>
31#include <asm/io.h>
32#include <asm/msr.h>
33#include <asm/cpufeature.h>
34
35
36#define PFX KBUILD_MODNAME ": "
37
38
39enum {
40 VIA_STRFILT_CNT_SHIFT = 16,
41 VIA_STRFILT_FAIL = (1 << 15),
42 VIA_STRFILT_ENABLE = (1 << 14),
43 VIA_RAWBITS_ENABLE = (1 << 13),
44 VIA_RNG_ENABLE = (1 << 6),
45 VIA_XSTORE_CNT_MASK = 0x0F,
46
47 VIA_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */
48 VIA_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */
49 VIA_RNG_CHUNK_4_MASK = 0xFFFFFFFF,
50 VIA_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */
51 VIA_RNG_CHUNK_2_MASK = 0xFFFF,
52 VIA_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */
53 VIA_RNG_CHUNK_1_MASK = 0xFF,
54};
55
56/*
57 * Investigate using the 'rep' prefix to obtain 32 bits of random data
58 * in one insn. The upside is potentially better performance. The
59 * downside is that the instruction becomes no longer atomic. Due to
60 * this, just like familiar issues with /dev/random itself, the worst
61 * case of a 'rep xstore' could potentially pause a cpu for an
62 * unreasonably long time. In practice, this condition would likely
63 * only occur when the hardware is failing. (or so we hope :))
64 *
65 * Another possible performance boost may come from simply buffering
66 * until we have 4 bytes, thus returning a u32 at a time,
67 * instead of the current u8-at-a-time.
68 */
69
70static inline u32 xstore(u32 *addr, u32 edx_in)
71{
72 u32 eax_out;
73
74 asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
75 :"=m"(*addr), "=a"(eax_out)
76 :"D"(addr), "d"(edx_in));
77
78 return eax_out;
79}
80
81static int via_rng_data_present(struct hwrng *rng)
82{
83 u32 bytes_out;
84 u32 *via_rng_datum = (u32 *)(&rng->priv);
85
86 /* We choose the recommended 1-byte-per-instruction RNG rate,
87 * for greater randomness at the expense of speed. Larger
88 * values 2, 4, or 8 bytes-per-instruction yield greater
89 * speed at lesser randomness.
90 *
91 * If you change this to another VIA_CHUNK_n, you must also
92 * change the ->n_bytes values in rng_vendor_ops[] tables.
93 * VIA_CHUNK_8 requires further code changes.
94 *
95 * A copy of MSR_VIA_RNG is placed in eax_out when xstore
96 * completes.
97 */
98
99 *via_rng_datum = 0; /* paranoia, not really necessary */
100 bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1);
101 bytes_out &= VIA_XSTORE_CNT_MASK;
102 if (bytes_out == 0)
103 return 0;
104 return 1;
105}
106
107static int via_rng_data_read(struct hwrng *rng, u32 *data)
108{
109 u32 via_rng_datum = (u32)rng->priv;
110
111 *data = via_rng_datum;
112
113 return 1;
114}
115
116static int via_rng_init(struct hwrng *rng)
117{
118 u32 lo, hi, old_lo;
119
120 /* Control the RNG via MSR. Tread lightly and pay very close
121 * close attention to values written, as the reserved fields
122 * are documented to be "undefined and unpredictable"; but it
123 * does not say to write them as zero, so I make a guess that
124 * we restore the values we find in the register.
125 */
126 rdmsr(MSR_VIA_RNG, lo, hi);
127
128 old_lo = lo;
129 lo &= ~(0x7f << VIA_STRFILT_CNT_SHIFT);
130 lo &= ~VIA_XSTORE_CNT_MASK;
131 lo &= ~(VIA_STRFILT_ENABLE | VIA_STRFILT_FAIL | VIA_RAWBITS_ENABLE);
132 lo |= VIA_RNG_ENABLE;
133
134 if (lo != old_lo)
135 wrmsr(MSR_VIA_RNG, lo, hi);
136
137 /* perhaps-unnecessary sanity check; remove after testing if
138 unneeded */
139 rdmsr(MSR_VIA_RNG, lo, hi);
140 if ((lo & VIA_RNG_ENABLE) == 0) {
141 printk(KERN_ERR PFX "cannot enable VIA C3 RNG, aborting\n");
142 return -ENODEV;
143 }
144
145 return 0;
146}
147
148
149static struct hwrng via_rng = {
150 .name = "via",
151 .init = via_rng_init,
152 .data_present = via_rng_data_present,
153 .data_read = via_rng_data_read,
154};
155
156
157static int __init mod_init(void)
158{
159 int err;
160
161 if (!cpu_has_xstore)
162 return -ENODEV;
163 printk(KERN_INFO "VIA RNG detected\n");
164 err = hwrng_register(&via_rng);
165 if (err) {
166 printk(KERN_ERR PFX "RNG registering failed (%d)\n",
167 err);
168 goto out;
169 }
170out:
171 return err;
172}
173
174static void __exit mod_exit(void)
175{
176 hwrng_unregister(&via_rng);
177}
178
179subsys_initcall(mod_init);
180module_exit(mod_exit);
181
182MODULE_DESCRIPTION("H/W RNG driver for VIA chipsets");
183MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 9f2f8fdec6..83ed6ae466 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -57,8 +57,7 @@ static int ipmi_init_msghandler(void);
57static int initialized = 0; 57static int initialized = 0;
58 58
59#ifdef CONFIG_PROC_FS 59#ifdef CONFIG_PROC_FS
60struct proc_dir_entry *proc_ipmi_root = NULL; 60static struct proc_dir_entry *proc_ipmi_root = NULL;
61EXPORT_SYMBOL(proc_ipmi_root);
62#endif /* CONFIG_PROC_FS */ 61#endif /* CONFIG_PROC_FS */
63 62
64#define MAX_EVENTS_IN_QUEUE 25 63#define MAX_EVENTS_IN_QUEUE 25
@@ -936,11 +935,8 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
936 935
937 if (val) { 936 if (val) {
938 /* Deliver any queued events. */ 937 /* Deliver any queued events. */
939 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, 938 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
940 link) { 939 list_move_tail(&msg->link, &msgs);
941 list_del(&msg->link);
942 list_add_tail(&msg->link, &msgs);
943 }
944 intf->waiting_events_count = 0; 940 intf->waiting_events_count = 0;
945 } 941 }
946 942
@@ -3677,7 +3673,7 @@ static void send_panic_events(char *str)
3677} 3673}
3678#endif /* CONFIG_IPMI_PANIC_EVENT */ 3674#endif /* CONFIG_IPMI_PANIC_EVENT */
3679 3675
3680static int has_paniced = 0; 3676static int has_panicked = 0;
3681 3677
3682static int panic_event(struct notifier_block *this, 3678static int panic_event(struct notifier_block *this,
3683 unsigned long event, 3679 unsigned long event,
@@ -3686,9 +3682,9 @@ static int panic_event(struct notifier_block *this,
3686 int i; 3682 int i;
3687 ipmi_smi_t intf; 3683 ipmi_smi_t intf;
3688 3684
3689 if (has_paniced) 3685 if (has_panicked)
3690 return NOTIFY_DONE; 3686 return NOTIFY_DONE;
3691 has_paniced = 1; 3687 has_panicked = 1;
3692 3688
3693 /* For every registered interface, set it to run to completion. */ 3689 /* For every registered interface, set it to run to completion. */
3694 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3690 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 02a7dd7a8a..101c14b9b2 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -809,7 +809,7 @@ static int ipmi_thread(void *data)
809 /* do nothing */ 809 /* do nothing */
810 } 810 }
811 else if (smi_result == SI_SM_CALL_WITH_DELAY) 811 else if (smi_result == SI_SM_CALL_WITH_DELAY)
812 udelay(1); 812 schedule();
813 else 813 else
814 schedule_timeout_interruptible(1); 814 schedule_timeout_interruptible(1);
815 } 815 }
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index edd996f6fb..4bb3d22726 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -151,6 +151,7 @@ unsigned char kbd_sysrq_xlate[KEY_MAX + 1] =
151 "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */ 151 "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */
152 "\r\000/"; /* 0x60 - 0x6f */ 152 "\r\000/"; /* 0x60 - 0x6f */
153static int sysrq_down; 153static int sysrq_down;
154static int sysrq_alt_use;
154#endif 155#endif
155static int sysrq_alt; 156static int sysrq_alt;
156 157
@@ -673,7 +674,7 @@ static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag, struc
673 */ 674 */
674static void k_dead(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs) 675static void k_dead(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
675{ 676{
676 static unsigned char ret_diacr[NR_DEAD] = {'`', '\'', '^', '~', '"', ',' }; 677 static const unsigned char ret_diacr[NR_DEAD] = {'`', '\'', '^', '~', '"', ',' };
677 value = ret_diacr[value]; 678 value = ret_diacr[value];
678 k_deadunicode(vc, value, up_flag, regs); 679 k_deadunicode(vc, value, up_flag, regs);
679} 680}
@@ -710,8 +711,8 @@ static void k_cur(struct vc_data *vc, unsigned char value, char up_flag, struct
710 711
711static void k_pad(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs) 712static void k_pad(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
712{ 713{
713 static const char *pad_chars = "0123456789+-*/\015,.?()#"; 714 static const char pad_chars[] = "0123456789+-*/\015,.?()#";
714 static const char *app_map = "pqrstuvwxylSRQMnnmPQS"; 715 static const char app_map[] = "pqrstuvwxylSRQMnnmPQS";
715 716
716 if (up_flag) 717 if (up_flag)
717 return; /* no action, if this is a key release */ 718 return; /* no action, if this is a key release */
@@ -1036,7 +1037,7 @@ static void kbd_refresh_leds(struct input_handle *handle)
1036#define HW_RAW(dev) (test_bit(EV_MSC, dev->evbit) && test_bit(MSC_RAW, dev->mscbit) &&\ 1037#define HW_RAW(dev) (test_bit(EV_MSC, dev->evbit) && test_bit(MSC_RAW, dev->mscbit) &&\
1037 ((dev)->id.bustype == BUS_I8042) && ((dev)->id.vendor == 0x0001) && ((dev)->id.product == 0x0001)) 1038 ((dev)->id.bustype == BUS_I8042) && ((dev)->id.vendor == 0x0001) && ((dev)->id.product == 0x0001))
1038 1039
1039static unsigned short x86_keycodes[256] = 1040static const unsigned short x86_keycodes[256] =
1040 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1041 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
1041 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 1042 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
1042 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 1043 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
@@ -1074,11 +1075,13 @@ static int emulate_raw(struct vc_data *vc, unsigned int keycode,
1074 put_queue(vc, 0x1d | up_flag); 1075 put_queue(vc, 0x1d | up_flag);
1075 put_queue(vc, 0x45 | up_flag); 1076 put_queue(vc, 0x45 | up_flag);
1076 return 0; 1077 return 0;
1077 case KEY_HANGUEL: 1078 case KEY_HANGEUL:
1078 if (!up_flag) put_queue(vc, 0xf1); 1079 if (!up_flag)
1080 put_queue(vc, 0xf2);
1079 return 0; 1081 return 0;
1080 case KEY_HANJA: 1082 case KEY_HANJA:
1081 if (!up_flag) put_queue(vc, 0xf2); 1083 if (!up_flag)
1084 put_queue(vc, 0xf1);
1082 return 0; 1085 return 0;
1083 } 1086 }
1084 1087
@@ -1143,7 +1146,7 @@ static void kbd_keycode(unsigned int keycode, int down,
1143 kbd = kbd_table + fg_console; 1146 kbd = kbd_table + fg_console;
1144 1147
1145 if (keycode == KEY_LEFTALT || keycode == KEY_RIGHTALT) 1148 if (keycode == KEY_LEFTALT || keycode == KEY_RIGHTALT)
1146 sysrq_alt = down; 1149 sysrq_alt = down ? keycode : 0;
1147#ifdef CONFIG_SPARC 1150#ifdef CONFIG_SPARC
1148 if (keycode == KEY_STOP) 1151 if (keycode == KEY_STOP)
1149 sparc_l1_a_state = down; 1152 sparc_l1_a_state = down;
@@ -1163,9 +1166,14 @@ static void kbd_keycode(unsigned int keycode, int down,
1163 1166
1164#ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */ 1167#ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */
1165 if (keycode == KEY_SYSRQ && (sysrq_down || (down == 1 && sysrq_alt))) { 1168 if (keycode == KEY_SYSRQ && (sysrq_down || (down == 1 && sysrq_alt))) {
1166 sysrq_down = down; 1169 if (!sysrq_down) {
1170 sysrq_down = down;
1171 sysrq_alt_use = sysrq_alt;
1172 }
1167 return; 1173 return;
1168 } 1174 }
1175 if (sysrq_down && !down && keycode == sysrq_alt_use)
1176 sysrq_down = 0;
1169 if (sysrq_down && down && !rep) { 1177 if (sysrq_down && down && !rep) {
1170 handle_sysrq(kbd_sysrq_xlate[keycode], regs, tty); 1178 handle_sysrq(kbd_sysrq_xlate[keycode], regs, tty);
1171 return; 1179 return;
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index f43c2e04ea..01247cccb8 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -301,7 +301,7 @@ static struct tty_operations moxa_ops = {
301 .tiocmset = moxa_tiocmset, 301 .tiocmset = moxa_tiocmset,
302}; 302};
303 303
304static spinlock_t moxa_lock = SPIN_LOCK_UNLOCKED; 304static DEFINE_SPINLOCK(moxa_lock);
305 305
306#ifdef CONFIG_PCI 306#ifdef CONFIG_PCI
307static int moxa_get_PCI_conf(struct pci_dev *p, int board_type, moxa_board_conf * board) 307static int moxa_get_PCI_conf(struct pci_dev *p, int board_type, moxa_board_conf * board)
diff --git a/drivers/char/nsc_gpio.c b/drivers/char/nsc_gpio.c
new file mode 100644
index 0000000000..5b91e4e256
--- /dev/null
+++ b/drivers/char/nsc_gpio.c
@@ -0,0 +1,142 @@
1/* linux/drivers/char/nsc_gpio.c
2
3 National Semiconductor common GPIO device-file/VFS methods.
4 Allows a user space process to control the GPIO pins.
5
6 Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
7 Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com>
8*/
9
10#include <linux/config.h>
11#include <linux/fs.h>
12#include <linux/module.h>
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/nsc_gpio.h>
17#include <linux/platform_device.h>
18#include <asm/uaccess.h>
19#include <asm/io.h>
20
21#define NAME "nsc_gpio"
22
23void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index)
24{
25 /* retrieve current config w/o changing it */
26 u32 config = amp->gpio_config(index, ~0, 0);
27
28 /* user requested via 'v' command, so its INFO */
29 dev_info(amp->dev, "io%02u: 0x%04x %s %s %s %s %s %s %s\tio:%d/%d\n",
30 index, config,
31 (config & 1) ? "OE" : "TS", /* output-enabled/tristate */
32 (config & 2) ? "PP" : "OD", /* push pull / open drain */
33 (config & 4) ? "PUE" : "PUD", /* pull up enabled/disabled */
34 (config & 8) ? "LOCKED" : "", /* locked / unlocked */
35 (config & 16) ? "LEVEL" : "EDGE",/* level/edge input */
36 (config & 32) ? "HI" : "LO", /* trigger on rise/fall edge */
37 (config & 64) ? "DEBOUNCE" : "", /* debounce */
38
39 amp->gpio_get(index), amp->gpio_current(index));
40}
41
42ssize_t nsc_gpio_write(struct file *file, const char __user *data,
43 size_t len, loff_t *ppos)
44{
45 unsigned m = iminor(file->f_dentry->d_inode);
46 struct nsc_gpio_ops *amp = file->private_data;
47 struct device *dev = amp->dev;
48 size_t i;
49 int err = 0;
50
51 for (i = 0; i < len; ++i) {
52 char c;
53 if (get_user(c, data + i))
54 return -EFAULT;
55 switch (c) {
56 case '0':
57 amp->gpio_set(m, 0);
58 break;
59 case '1':
60 amp->gpio_set(m, 1);
61 break;
62 case 'O':
63 dev_dbg(dev, "GPIO%d output enabled\n", m);
64 amp->gpio_config(m, ~1, 1);
65 break;
66 case 'o':
67 dev_dbg(dev, "GPIO%d output disabled\n", m);
68 amp->gpio_config(m, ~1, 0);
69 break;
70 case 'T':
71 dev_dbg(dev, "GPIO%d output is push pull\n",
72 m);
73 amp->gpio_config(m, ~2, 2);
74 break;
75 case 't':
76 dev_dbg(dev, "GPIO%d output is open drain\n",
77 m);
78 amp->gpio_config(m, ~2, 0);
79 break;
80 case 'P':
81 dev_dbg(dev, "GPIO%d pull up enabled\n", m);
82 amp->gpio_config(m, ~4, 4);
83 break;
84 case 'p':
85 dev_dbg(dev, "GPIO%d pull up disabled\n", m);
86 amp->gpio_config(m, ~4, 0);
87 break;
88 case 'v':
89 /* View Current pin settings */
90 amp->gpio_dump(amp, m);
91 break;
92 case '\n':
93 /* end of settings string, do nothing */
94 break;
95 default:
96 dev_err(dev, "io%2d bad setting: chr<0x%2x>\n",
97 m, (int)c);
98 err++;
99 }
100 }
101 if (err)
102 return -EINVAL; /* full string handled, report error */
103
104 return len;
105}
106
107ssize_t nsc_gpio_read(struct file *file, char __user * buf,
108 size_t len, loff_t * ppos)
109{
110 unsigned m = iminor(file->f_dentry->d_inode);
111 int value;
112 struct nsc_gpio_ops *amp = file->private_data;
113
114 value = amp->gpio_get(m);
115 if (put_user(value ? '1' : '0', buf))
116 return -EFAULT;
117
118 return 1;
119}
120
121/* common file-ops routines for both scx200_gpio and pc87360_gpio */
122EXPORT_SYMBOL(nsc_gpio_write);
123EXPORT_SYMBOL(nsc_gpio_read);
124EXPORT_SYMBOL(nsc_gpio_dump);
125
126static int __init nsc_gpio_init(void)
127{
128 printk(KERN_DEBUG NAME " initializing\n");
129 return 0;
130}
131
132static void __exit nsc_gpio_cleanup(void)
133{
134 printk(KERN_DEBUG NAME " cleanup\n");
135}
136
137module_init(nsc_gpio_init);
138module_exit(nsc_gpio_cleanup);
139
140MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
141MODULE_DESCRIPTION("NatSemi GPIO Common Methods");
142MODULE_LICENSE("GPL");
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
new file mode 100644
index 0000000000..1c706ccfdb
--- /dev/null
+++ b/drivers/char/pc8736x_gpio.c
@@ -0,0 +1,340 @@
1/* linux/drivers/char/pc8736x_gpio.c
2
3 National Semiconductor PC8736x GPIO driver. Allows a user space
4 process to play with the GPIO pins.
5
6 Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com>
7
8 adapted from linux/drivers/char/scx200_gpio.c
9 Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>,
10*/
11
12#include <linux/config.h>
13#include <linux/fs.h>
14#include <linux/module.h>
15#include <linux/errno.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/io.h>
19#include <linux/ioport.h>
20#include <linux/mutex.h>
21#include <linux/nsc_gpio.h>
22#include <linux/platform_device.h>
23#include <asm/uaccess.h>
24
25#define DEVNAME "pc8736x_gpio"
26
27MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
28MODULE_DESCRIPTION("NatSemi PC-8736x GPIO Pin Driver");
29MODULE_LICENSE("GPL");
30
31static int major; /* default to dynamic major */
32module_param(major, int, 0);
33MODULE_PARM_DESC(major, "Major device number");
34
35static DEFINE_MUTEX(pc8736x_gpio_config_lock);
36static unsigned pc8736x_gpio_base;
37static u8 pc8736x_gpio_shadow[4];
38
39#define SIO_BASE1 0x2E /* 1st command-reg to check */
40#define SIO_BASE2 0x4E /* alt command-reg to check */
41#define SIO_BASE_OFFSET 0x20
42
43#define SIO_SID 0x20 /* SuperI/O ID Register */
44#define SIO_SID_VALUE 0xe9 /* Expected value in SuperI/O ID Register */
45
46#define SIO_CF1 0x21 /* chip config, bit0 is chip enable */
47
48#define PC8736X_GPIO_SIZE 16
49
50#define SIO_UNIT_SEL 0x7 /* unit select reg */
51#define SIO_UNIT_ACT 0x30 /* unit enable */
52#define SIO_GPIO_UNIT 0x7 /* unit number of GPIO */
53#define SIO_VLM_UNIT 0x0D
54#define SIO_TMS_UNIT 0x0E
55
56/* config-space addrs to read/write each unit's runtime addr */
57#define SIO_BASE_HADDR 0x60
58#define SIO_BASE_LADDR 0x61
59
60/* GPIO config-space pin-control addresses */
61#define SIO_GPIO_PIN_SELECT 0xF0
62#define SIO_GPIO_PIN_CONFIG 0xF1
63#define SIO_GPIO_PIN_EVENT 0xF2
64
65static unsigned char superio_cmd = 0;
66static unsigned char selected_device = 0xFF; /* bogus start val */
67
68/* GPIO port runtime access, functionality */
69static int port_offset[] = { 0, 4, 8, 10 }; /* non-uniform offsets ! */
70/* static int event_capable[] = { 1, 1, 0, 0 }; ports 2,3 are hobbled */
71
72#define PORT_OUT 0
73#define PORT_IN 1
74#define PORT_EVT_EN 2
75#define PORT_EVT_STST 3
76
77static struct platform_device *pdev; /* use in dev_*() */
78
79static inline void superio_outb(int addr, int val)
80{
81 outb_p(addr, superio_cmd);
82 outb_p(val, superio_cmd + 1);
83}
84
85static inline int superio_inb(int addr)
86{
87 outb_p(addr, superio_cmd);
88 return inb_p(superio_cmd + 1);
89}
90
91static int pc8736x_superio_present(void)
92{
93 /* try the 2 possible values, read a hardware reg to verify */
94 superio_cmd = SIO_BASE1;
95 if (superio_inb(SIO_SID) == SIO_SID_VALUE)
96 return superio_cmd;
97
98 superio_cmd = SIO_BASE2;
99 if (superio_inb(SIO_SID) == SIO_SID_VALUE)
100 return superio_cmd;
101
102 return 0;
103}
104
105static void device_select(unsigned devldn)
106{
107 superio_outb(SIO_UNIT_SEL, devldn);
108 selected_device = devldn;
109}
110
111static void select_pin(unsigned iminor)
112{
113 /* select GPIO port/pin from device minor number */
114 device_select(SIO_GPIO_UNIT);
115 superio_outb(SIO_GPIO_PIN_SELECT,
116 ((iminor << 1) & 0xF0) | (iminor & 0x7));
117}
118
119static inline u32 pc8736x_gpio_configure_fn(unsigned index, u32 mask, u32 bits,
120 u32 func_slct)
121{
122 u32 config, new_config;
123
124 mutex_lock(&pc8736x_gpio_config_lock);
125
126 device_select(SIO_GPIO_UNIT);
127 select_pin(index);
128
129 /* read current config value */
130 config = superio_inb(func_slct);
131
132 /* set new config */
133 new_config = (config & mask) | bits;
134 superio_outb(func_slct, new_config);
135
136 mutex_unlock(&pc8736x_gpio_config_lock);
137
138 return config;
139}
140
141static u32 pc8736x_gpio_configure(unsigned index, u32 mask, u32 bits)
142{
143 return pc8736x_gpio_configure_fn(index, mask, bits,
144 SIO_GPIO_PIN_CONFIG);
145}
146
147static int pc8736x_gpio_get(unsigned minor)
148{
149 int port, bit, val;
150
151 port = minor >> 3;
152 bit = minor & 7;
153 val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
154 val >>= bit;
155 val &= 1;
156
157 dev_dbg(&pdev->dev, "_gpio_get(%d from %x bit %d) == val %d\n",
158 minor, pc8736x_gpio_base + port_offset[port] + PORT_IN, bit,
159 val);
160
161 return val;
162}
163
164static void pc8736x_gpio_set(unsigned minor, int val)
165{
166 int port, bit, curval;
167
168 minor &= 0x1f;
169 port = minor >> 3;
170 bit = minor & 7;
171 curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
172
173 dev_dbg(&pdev->dev, "addr:%x cur:%x bit-pos:%d cur-bit:%x + new:%d -> bit-new:%d\n",
174 pc8736x_gpio_base + port_offset[port] + PORT_OUT,
175 curval, bit, (curval & ~(1 << bit)), val, (val << bit));
176
177 val = (curval & ~(1 << bit)) | (val << bit);
178
179 dev_dbg(&pdev->dev, "gpio_set(minor:%d port:%d bit:%d)"
180 " %2x -> %2x\n", minor, port, bit, curval, val);
181
182 outb_p(val, pc8736x_gpio_base + port_offset[port] + PORT_OUT);
183
184 curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
185 val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
186
187 dev_dbg(&pdev->dev, "wrote %x, read: %x\n", curval, val);
188 pc8736x_gpio_shadow[port] = val;
189}
190
191static void pc8736x_gpio_set_high(unsigned index)
192{
193 pc8736x_gpio_set(index, 1);
194}
195
196static void pc8736x_gpio_set_low(unsigned index)
197{
198 pc8736x_gpio_set(index, 0);
199}
200
201static int pc8736x_gpio_current(unsigned minor)
202{
203 int port, bit;
204 minor &= 0x1f;
205 port = minor >> 3;
206 bit = minor & 7;
207 return ((pc8736x_gpio_shadow[port] >> bit) & 0x01);
208}
209
210static void pc8736x_gpio_change(unsigned index)
211{
212 pc8736x_gpio_set(index, !pc8736x_gpio_current(index));
213}
214
215static struct nsc_gpio_ops pc8736x_access = {
216 .owner = THIS_MODULE,
217 .gpio_config = pc8736x_gpio_configure,
218 .gpio_dump = nsc_gpio_dump,
219 .gpio_get = pc8736x_gpio_get,
220 .gpio_set = pc8736x_gpio_set,
221 .gpio_set_high = pc8736x_gpio_set_high,
222 .gpio_set_low = pc8736x_gpio_set_low,
223 .gpio_change = pc8736x_gpio_change,
224 .gpio_current = pc8736x_gpio_current
225};
226
227static int pc8736x_gpio_open(struct inode *inode, struct file *file)
228{
229 unsigned m = iminor(inode);
230 file->private_data = &pc8736x_access;
231
232 dev_dbg(&pdev->dev, "open %d\n", m);
233
234 if (m > 63)
235 return -EINVAL;
236 return nonseekable_open(inode, file);
237}
238
239static struct file_operations pc8736x_gpio_fops = {
240 .owner = THIS_MODULE,
241 .open = pc8736x_gpio_open,
242 .write = nsc_gpio_write,
243 .read = nsc_gpio_read,
244};
245
246static void __init pc8736x_init_shadow(void)
247{
248 int port;
249
250 /* read the current values driven on the GPIO signals */
251 for (port = 0; port < 4; ++port)
252 pc8736x_gpio_shadow[port]
253 = inb_p(pc8736x_gpio_base + port_offset[port]
254 + PORT_OUT);
255
256}
257
258static int __init pc8736x_gpio_init(void)
259{
260 int rc = 0;
261
262 pdev = platform_device_alloc(DEVNAME, 0);
263 if (!pdev)
264 return -ENOMEM;
265
266 rc = platform_device_add(pdev);
267 if (rc) {
268 rc = -ENODEV;
269 goto undo_platform_dev_alloc;
270 }
271 dev_info(&pdev->dev, "NatSemi pc8736x GPIO Driver Initializing\n");
272
273 if (!pc8736x_superio_present()) {
274 rc = -ENODEV;
275 dev_err(&pdev->dev, "no device found\n");
276 goto undo_platform_dev_add;
277 }
278 pc8736x_access.dev = &pdev->dev;
279
280 /* Verify that chip and it's GPIO unit are both enabled.
281 My BIOS does this, so I take minimum action here
282 */
283 rc = superio_inb(SIO_CF1);
284 if (!(rc & 0x01)) {
285 rc = -ENODEV;
286 dev_err(&pdev->dev, "device not enabled\n");
287 goto undo_platform_dev_add;
288 }
289 device_select(SIO_GPIO_UNIT);
290 if (!superio_inb(SIO_UNIT_ACT)) {
291 rc = -ENODEV;
292 dev_err(&pdev->dev, "GPIO unit not enabled\n");
293 goto undo_platform_dev_add;
294 }
295
296 /* read the GPIO unit base addr that chip responds to */
297 pc8736x_gpio_base = (superio_inb(SIO_BASE_HADDR) << 8
298 | superio_inb(SIO_BASE_LADDR));
299
300 if (!request_region(pc8736x_gpio_base, 16, DEVNAME)) {
301 rc = -ENODEV;
302 dev_err(&pdev->dev, "GPIO ioport %x busy\n",
303 pc8736x_gpio_base);
304 goto undo_platform_dev_add;
305 }
306 dev_info(&pdev->dev, "GPIO ioport %x reserved\n", pc8736x_gpio_base);
307
308 rc = register_chrdev(major, DEVNAME, &pc8736x_gpio_fops);
309 if (rc < 0) {
310 dev_err(&pdev->dev, "register-chrdev failed: %d\n", rc);
311 goto undo_platform_dev_add;
312 }
313 if (!major) {
314 major = rc;
315 dev_dbg(&pdev->dev, "got dynamic major %d\n", major);
316 }
317
318 pc8736x_init_shadow();
319 return 0;
320
321undo_platform_dev_add:
322 platform_device_put(pdev);
323undo_platform_dev_alloc:
324 kfree(pdev);
325 return rc;
326}
327
328static void __exit pc8736x_gpio_cleanup(void)
329{
330 dev_dbg(&pdev->dev, " cleanup\n");
331
332 release_region(pc8736x_gpio_base, 16);
333
334 unregister_chrdev(major, DEVNAME);
335}
336
337EXPORT_SYMBOL(pc8736x_access);
338
339module_init(pc8736x_gpio_init);
340module_exit(pc8736x_gpio_cleanup);
diff --git a/drivers/char/rio/riointr.c b/drivers/char/rio/riointr.c
index eec1fea0cb..0bd09040a5 100644
--- a/drivers/char/rio/riointr.c
+++ b/drivers/char/rio/riointr.c
@@ -546,7 +546,7 @@ static void RIOReceive(struct rio_info *p, struct Port *PortP)
546 ** run out of space it will be set to the offset of the 546 ** run out of space it will be set to the offset of the
547 ** next byte to copy from the packet data area. The packet 547 ** next byte to copy from the packet data area. The packet
548 ** length field is decremented by the number of bytes that 548 ** length field is decremented by the number of bytes that
549 ** we succesfully removed from the packet. When this reaches 549 ** we successfully removed from the packet. When this reaches
550 ** zero, we reset the offset pointer to be zero, and free 550 ** zero, we reset the offset pointer to be zero, and free
551 ** the packet from the front of the queue. 551 ** the packet from the front of the queue.
552 */ 552 */
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c
index 664a6e97eb..5a280a3304 100644
--- a/drivers/char/scx200_gpio.c
+++ b/drivers/char/scx200_gpio.c
@@ -1,4 +1,4 @@
1/* linux/drivers/char/scx200_gpio.c 1/* linux/drivers/char/scx200_gpio.c
2 2
3 National Semiconductor SCx200 GPIO driver. Allows a user space 3 National Semiconductor SCx200 GPIO driver. Allows a user space
4 process to play with the GPIO pins. 4 process to play with the GPIO pins.
@@ -6,17 +6,26 @@
6 Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> */ 6 Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> */
7 7
8#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/device.h>
9#include <linux/fs.h> 10#include <linux/fs.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/errno.h> 12#include <linux/errno.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
13#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/platform_device.h>
14#include <asm/uaccess.h> 16#include <asm/uaccess.h>
15#include <asm/io.h> 17#include <asm/io.h>
16 18
19#include <linux/types.h>
20#include <linux/cdev.h>
21
17#include <linux/scx200_gpio.h> 22#include <linux/scx200_gpio.h>
23#include <linux/nsc_gpio.h>
18 24
19#define NAME "scx200_gpio" 25#define NAME "scx200_gpio"
26#define DEVNAME NAME
27
28static struct platform_device *pdev;
20 29
21MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); 30MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
22MODULE_DESCRIPTION("NatSemi SCx200 GPIO Pin Driver"); 31MODULE_DESCRIPTION("NatSemi SCx200 GPIO Pin Driver");
@@ -26,70 +35,23 @@ static int major = 0; /* default to dynamic major */
26module_param(major, int, 0); 35module_param(major, int, 0);
27MODULE_PARM_DESC(major, "Major device number"); 36MODULE_PARM_DESC(major, "Major device number");
28 37
29static ssize_t scx200_gpio_write(struct file *file, const char __user *data, 38struct nsc_gpio_ops scx200_access = {
30 size_t len, loff_t *ppos) 39 .owner = THIS_MODULE,
31{ 40 .gpio_config = scx200_gpio_configure,
32 unsigned m = iminor(file->f_dentry->d_inode); 41 .gpio_dump = nsc_gpio_dump,
33 size_t i; 42 .gpio_get = scx200_gpio_get,
34 43 .gpio_set = scx200_gpio_set,
35 for (i = 0; i < len; ++i) { 44 .gpio_set_high = scx200_gpio_set_high,
36 char c; 45 .gpio_set_low = scx200_gpio_set_low,
37 if (get_user(c, data+i)) 46 .gpio_change = scx200_gpio_change,
38 return -EFAULT; 47 .gpio_current = scx200_gpio_current
39 switch (c) 48};
40 {
41 case '0':
42 scx200_gpio_set(m, 0);
43 break;
44 case '1':
45 scx200_gpio_set(m, 1);
46 break;
47 case 'O':
48 printk(KERN_INFO NAME ": GPIO%d output enabled\n", m);
49 scx200_gpio_configure(m, ~1, 1);
50 break;
51 case 'o':
52 printk(KERN_INFO NAME ": GPIO%d output disabled\n", m);
53 scx200_gpio_configure(m, ~1, 0);
54 break;
55 case 'T':
56 printk(KERN_INFO NAME ": GPIO%d output is push pull\n", m);
57 scx200_gpio_configure(m, ~2, 2);
58 break;
59 case 't':
60 printk(KERN_INFO NAME ": GPIO%d output is open drain\n", m);
61 scx200_gpio_configure(m, ~2, 0);
62 break;
63 case 'P':
64 printk(KERN_INFO NAME ": GPIO%d pull up enabled\n", m);
65 scx200_gpio_configure(m, ~4, 4);
66 break;
67 case 'p':
68 printk(KERN_INFO NAME ": GPIO%d pull up disabled\n", m);
69 scx200_gpio_configure(m, ~4, 0);
70 break;
71 }
72 }
73
74 return len;
75}
76
77static ssize_t scx200_gpio_read(struct file *file, char __user *buf,
78 size_t len, loff_t *ppos)
79{
80 unsigned m = iminor(file->f_dentry->d_inode);
81 int value;
82
83 value = scx200_gpio_get(m);
84 if (put_user(value ? '1' : '0', buf))
85 return -EFAULT;
86
87 return 1;
88}
89 49
90static int scx200_gpio_open(struct inode *inode, struct file *file) 50static int scx200_gpio_open(struct inode *inode, struct file *file)
91{ 51{
92 unsigned m = iminor(inode); 52 unsigned m = iminor(inode);
53 file->private_data = &scx200_access;
54
93 if (m > 63) 55 if (m > 63)
94 return -EINVAL; 56 return -EINVAL;
95 return nonseekable_open(inode, file); 57 return nonseekable_open(inode, file);
@@ -103,47 +65,81 @@ static int scx200_gpio_release(struct inode *inode, struct file *file)
103 65
104static struct file_operations scx200_gpio_fops = { 66static struct file_operations scx200_gpio_fops = {
105 .owner = THIS_MODULE, 67 .owner = THIS_MODULE,
106 .write = scx200_gpio_write, 68 .write = nsc_gpio_write,
107 .read = scx200_gpio_read, 69 .read = nsc_gpio_read,
108 .open = scx200_gpio_open, 70 .open = scx200_gpio_open,
109 .release = scx200_gpio_release, 71 .release = scx200_gpio_release,
110}; 72};
111 73
74struct cdev *scx200_devices;
75static int num_pins = 32;
76
112static int __init scx200_gpio_init(void) 77static int __init scx200_gpio_init(void)
113{ 78{
114 int r; 79 int rc, i;
115 80 dev_t dev = MKDEV(major, 0);
116 printk(KERN_DEBUG NAME ": NatSemi SCx200 GPIO Driver\n");
117 81
118 if (!scx200_gpio_present()) { 82 if (!scx200_gpio_present()) {
119 printk(KERN_ERR NAME ": no SCx200 gpio pins available\n"); 83 printk(KERN_ERR NAME ": no SCx200 gpio present\n");
120 return -ENODEV; 84 return -ENODEV;
121 } 85 }
122 86
123 r = register_chrdev(major, NAME, &scx200_gpio_fops); 87 /* support dev_dbg() with pdev->dev */
124 if (r < 0) { 88 pdev = platform_device_alloc(DEVNAME, 0);
125 printk(KERN_ERR NAME ": unable to register character device\n"); 89 if (!pdev)
126 return r; 90 return -ENOMEM;
91
92 rc = platform_device_add(pdev);
93 if (rc)
94 goto undo_malloc;
95
96 /* nsc_gpio uses dev_dbg(), so needs this */
97 scx200_access.dev = &pdev->dev;
98
99 if (major)
100 rc = register_chrdev_region(dev, num_pins, "scx200_gpio");
101 else {
102 rc = alloc_chrdev_region(&dev, 0, num_pins, "scx200_gpio");
103 major = MAJOR(dev);
127 } 104 }
128 if (!major) { 105 if (rc < 0) {
129 major = r; 106 dev_err(&pdev->dev, "SCx200 chrdev_region err: %d\n", rc);
130 printk(KERN_DEBUG NAME ": got dynamic major %d\n", major); 107 goto undo_platform_device_add;
108 }
109 scx200_devices = kzalloc(num_pins * sizeof(struct cdev), GFP_KERNEL);
110 if (!scx200_devices) {
111 rc = -ENOMEM;
112 goto undo_chrdev_region;
113 }
114 for (i = 0; i < num_pins; i++) {
115 struct cdev *cdev = &scx200_devices[i];
116 cdev_init(cdev, &scx200_gpio_fops);
117 cdev->owner = THIS_MODULE;
118 rc = cdev_add(cdev, MKDEV(major, i), 1);
119 /* tolerate 'minor' errors */
120 if (rc)
121 dev_err(&pdev->dev, "Error %d on minor %d", rc, i);
131 } 122 }
132 123
133 return 0; 124 return 0; /* succeed */
125
126undo_chrdev_region:
127 unregister_chrdev_region(dev, num_pins);
128undo_platform_device_add:
129 platform_device_put(pdev);
130undo_malloc:
131 kfree(pdev);
132 return rc;
134} 133}
135 134
136static void __exit scx200_gpio_cleanup(void) 135static void __exit scx200_gpio_cleanup(void)
137{ 136{
138 unregister_chrdev(major, NAME); 137 kfree(scx200_devices);
138 unregister_chrdev_region(MKDEV(major, 0), num_pins);
139 platform_device_put(pdev);
140 platform_device_unregister(pdev);
141 /* kfree(pdev); */
139} 142}
140 143
141module_init(scx200_gpio_init); 144module_init(scx200_gpio_init);
142module_exit(scx200_gpio_cleanup); 145module_exit(scx200_gpio_cleanup);
143
144/*
145 Local variables:
146 compile-command: "make -k -C ../.. SUBDIRS=drivers/char modules"
147 c-basic-offset: 8
148 End:
149*/
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 1b5330299e..d2d6b01dcd 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -2477,7 +2477,7 @@ static int __init specialix_init(void)
2477#endif 2477#endif
2478 2478
2479 for (i = 0; i < SX_NBOARD; i++) 2479 for (i = 0; i < SX_NBOARD; i++)
2480 sx_board[i].lock = SPIN_LOCK_UNLOCKED; 2480 spin_lock_init(&sx_board[i].lock);
2481 2481
2482 if (sx_init_drivers()) { 2482 if (sx_init_drivers()) {
2483 func_exit(); 2483 func_exit();
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index a9c5a7230f..bf361a5ba7 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -141,15 +141,6 @@ static char *stl_drvversion = "5.6.0";
141static struct tty_driver *stl_serial; 141static struct tty_driver *stl_serial;
142 142
143/* 143/*
144 * We will need to allocate a temporary write buffer for chars that
145 * come direct from user space. The problem is that a copy from user
146 * space might cause a page fault (typically on a system that is
147 * swapping!). All ports will share one buffer - since if the system
148 * is already swapping a shared buffer won't make things any worse.
149 */
150static char *stl_tmpwritebuf;
151
152/*
153 * Define a local default termios struct. All ports will be created 144 * Define a local default termios struct. All ports will be created
154 * with this termios initially. Basically all it defines is a raw port 145 * with this termios initially. Basically all it defines is a raw port
155 * at 9600, 8 data bits, 1 stop bit. 146 * at 9600, 8 data bits, 1 stop bit.
@@ -363,6 +354,14 @@ static unsigned char stl_vecmap[] = {
363}; 354};
364 355
365/* 356/*
357 * Lock ordering is that you may not take stallion_lock holding
358 * brd_lock.
359 */
360
361static spinlock_t brd_lock; /* Guard the board mapping */
362static spinlock_t stallion_lock; /* Guard the tty driver */
363
364/*
366 * Set up enable and disable macros for the ECH boards. They require 365 * Set up enable and disable macros for the ECH boards. They require
367 * the secondary io address space to be activated and deactivated. 366 * the secondary io address space to be activated and deactivated.
368 * This way all ECH boards can share their secondary io region. 367 * This way all ECH boards can share their secondary io region.
@@ -725,17 +724,7 @@ static struct class *stallion_class;
725 724
726static int __init stallion_module_init(void) 725static int __init stallion_module_init(void)
727{ 726{
728 unsigned long flags;
729
730#ifdef DEBUG
731 printk("init_module()\n");
732#endif
733
734 save_flags(flags);
735 cli();
736 stl_init(); 727 stl_init();
737 restore_flags(flags);
738
739 return 0; 728 return 0;
740} 729}
741 730
@@ -746,7 +735,6 @@ static void __exit stallion_module_exit(void)
746 stlbrd_t *brdp; 735 stlbrd_t *brdp;
747 stlpanel_t *panelp; 736 stlpanel_t *panelp;
748 stlport_t *portp; 737 stlport_t *portp;
749 unsigned long flags;
750 int i, j, k; 738 int i, j, k;
751 739
752#ifdef DEBUG 740#ifdef DEBUG
@@ -756,9 +744,6 @@ static void __exit stallion_module_exit(void)
756 printk(KERN_INFO "Unloading %s: version %s\n", stl_drvtitle, 744 printk(KERN_INFO "Unloading %s: version %s\n", stl_drvtitle,
757 stl_drvversion); 745 stl_drvversion);
758 746
759 save_flags(flags);
760 cli();
761
762/* 747/*
763 * Free up all allocated resources used by the ports. This includes 748 * Free up all allocated resources used by the ports. This includes
764 * memory and interrupts. As part of this process we will also do 749 * memory and interrupts. As part of this process we will also do
@@ -770,7 +755,6 @@ static void __exit stallion_module_exit(void)
770 if (i) { 755 if (i) {
771 printk("STALLION: failed to un-register tty driver, " 756 printk("STALLION: failed to un-register tty driver, "
772 "errno=%d\n", -i); 757 "errno=%d\n", -i);
773 restore_flags(flags);
774 return; 758 return;
775 } 759 }
776 for (i = 0; i < 4; i++) { 760 for (i = 0; i < 4; i++) {
@@ -783,8 +767,6 @@ static void __exit stallion_module_exit(void)
783 "errno=%d\n", -i); 767 "errno=%d\n", -i);
784 class_destroy(stallion_class); 768 class_destroy(stallion_class);
785 769
786 kfree(stl_tmpwritebuf);
787
788 for (i = 0; (i < stl_nrbrds); i++) { 770 for (i = 0; (i < stl_nrbrds); i++) {
789 if ((brdp = stl_brds[i]) == (stlbrd_t *) NULL) 771 if ((brdp = stl_brds[i]) == (stlbrd_t *) NULL)
790 continue; 772 continue;
@@ -814,8 +796,6 @@ static void __exit stallion_module_exit(void)
814 kfree(brdp); 796 kfree(brdp);
815 stl_brds[i] = (stlbrd_t *) NULL; 797 stl_brds[i] = (stlbrd_t *) NULL;
816 } 798 }
817
818 restore_flags(flags);
819} 799}
820 800
821module_init(stallion_module_init); 801module_init(stallion_module_init);
@@ -948,7 +928,7 @@ static stlbrd_t *stl_allocbrd(void)
948 928
949 brdp = kzalloc(sizeof(stlbrd_t), GFP_KERNEL); 929 brdp = kzalloc(sizeof(stlbrd_t), GFP_KERNEL);
950 if (!brdp) { 930 if (!brdp) {
951 printk("STALLION: failed to allocate memory (size=%d)\n", 931 printk("STALLION: failed to allocate memory (size=%Zd)\n",
952 sizeof(stlbrd_t)); 932 sizeof(stlbrd_t));
953 return NULL; 933 return NULL;
954 } 934 }
@@ -1066,16 +1046,17 @@ static int stl_waitcarrier(stlport_t *portp, struct file *filp)
1066 rc = 0; 1046 rc = 0;
1067 doclocal = 0; 1047 doclocal = 0;
1068 1048
1049 spin_lock_irqsave(&stallion_lock, flags);
1050
1069 if (portp->tty->termios->c_cflag & CLOCAL) 1051 if (portp->tty->termios->c_cflag & CLOCAL)
1070 doclocal++; 1052 doclocal++;
1071 1053
1072 save_flags(flags);
1073 cli();
1074 portp->openwaitcnt++; 1054 portp->openwaitcnt++;
1075 if (! tty_hung_up_p(filp)) 1055 if (! tty_hung_up_p(filp))
1076 portp->refcount--; 1056 portp->refcount--;
1077 1057
1078 for (;;) { 1058 for (;;) {
1059 /* Takes brd_lock internally */
1079 stl_setsignals(portp, 1, 1); 1060 stl_setsignals(portp, 1, 1);
1080 if (tty_hung_up_p(filp) || 1061 if (tty_hung_up_p(filp) ||
1081 ((portp->flags & ASYNC_INITIALIZED) == 0)) { 1062 ((portp->flags & ASYNC_INITIALIZED) == 0)) {
@@ -1093,13 +1074,14 @@ static int stl_waitcarrier(stlport_t *portp, struct file *filp)
1093 rc = -ERESTARTSYS; 1074 rc = -ERESTARTSYS;
1094 break; 1075 break;
1095 } 1076 }
1077 /* FIXME */
1096 interruptible_sleep_on(&portp->open_wait); 1078 interruptible_sleep_on(&portp->open_wait);
1097 } 1079 }
1098 1080
1099 if (! tty_hung_up_p(filp)) 1081 if (! tty_hung_up_p(filp))
1100 portp->refcount++; 1082 portp->refcount++;
1101 portp->openwaitcnt--; 1083 portp->openwaitcnt--;
1102 restore_flags(flags); 1084 spin_unlock_irqrestore(&stallion_lock, flags);
1103 1085
1104 return rc; 1086 return rc;
1105} 1087}
@@ -1119,16 +1101,15 @@ static void stl_close(struct tty_struct *tty, struct file *filp)
1119 if (portp == (stlport_t *) NULL) 1101 if (portp == (stlport_t *) NULL)
1120 return; 1102 return;
1121 1103
1122 save_flags(flags); 1104 spin_lock_irqsave(&stallion_lock, flags);
1123 cli();
1124 if (tty_hung_up_p(filp)) { 1105 if (tty_hung_up_p(filp)) {
1125 restore_flags(flags); 1106 spin_unlock_irqrestore(&stallion_lock, flags);
1126 return; 1107 return;
1127 } 1108 }
1128 if ((tty->count == 1) && (portp->refcount != 1)) 1109 if ((tty->count == 1) && (portp->refcount != 1))
1129 portp->refcount = 1; 1110 portp->refcount = 1;
1130 if (portp->refcount-- > 1) { 1111 if (portp->refcount-- > 1) {
1131 restore_flags(flags); 1112 spin_unlock_irqrestore(&stallion_lock, flags);
1132 return; 1113 return;
1133 } 1114 }
1134 1115
@@ -1142,11 +1123,18 @@ static void stl_close(struct tty_struct *tty, struct file *filp)
1142 * (The sc26198 has no "end-of-data" interrupt only empty FIFO) 1123 * (The sc26198 has no "end-of-data" interrupt only empty FIFO)
1143 */ 1124 */
1144 tty->closing = 1; 1125 tty->closing = 1;
1126
1127 spin_unlock_irqrestore(&stallion_lock, flags);
1128
1145 if (portp->closing_wait != ASYNC_CLOSING_WAIT_NONE) 1129 if (portp->closing_wait != ASYNC_CLOSING_WAIT_NONE)
1146 tty_wait_until_sent(tty, portp->closing_wait); 1130 tty_wait_until_sent(tty, portp->closing_wait);
1147 stl_waituntilsent(tty, (HZ / 2)); 1131 stl_waituntilsent(tty, (HZ / 2));
1148 1132
1133
1134 spin_lock_irqsave(&stallion_lock, flags);
1149 portp->flags &= ~ASYNC_INITIALIZED; 1135 portp->flags &= ~ASYNC_INITIALIZED;
1136 spin_unlock_irqrestore(&stallion_lock, flags);
1137
1150 stl_disableintrs(portp); 1138 stl_disableintrs(portp);
1151 if (tty->termios->c_cflag & HUPCL) 1139 if (tty->termios->c_cflag & HUPCL)
1152 stl_setsignals(portp, 0, 0); 1140 stl_setsignals(portp, 0, 0);
@@ -1173,7 +1161,6 @@ static void stl_close(struct tty_struct *tty, struct file *filp)
1173 1161
1174 portp->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); 1162 portp->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
1175 wake_up_interruptible(&portp->close_wait); 1163 wake_up_interruptible(&portp->close_wait);
1176 restore_flags(flags);
1177} 1164}
1178 1165
1179/*****************************************************************************/ 1166/*****************************************************************************/
@@ -1195,9 +1182,6 @@ static int stl_write(struct tty_struct *tty, const unsigned char *buf, int count
1195 (int) tty, (int) buf, count); 1182 (int) tty, (int) buf, count);
1196#endif 1183#endif
1197 1184
1198 if ((tty == (struct tty_struct *) NULL) ||
1199 (stl_tmpwritebuf == (char *) NULL))
1200 return 0;
1201 portp = tty->driver_data; 1185 portp = tty->driver_data;
1202 if (portp == (stlport_t *) NULL) 1186 if (portp == (stlport_t *) NULL)
1203 return 0; 1187 return 0;
@@ -1302,11 +1286,6 @@ static void stl_flushchars(struct tty_struct *tty)
1302 if (portp->tx.buf == (char *) NULL) 1286 if (portp->tx.buf == (char *) NULL)
1303 return; 1287 return;
1304 1288
1305#if 0
1306 if (tty->stopped || tty->hw_stopped ||
1307 (portp->tx.head == portp->tx.tail))
1308 return;
1309#endif
1310 stl_startrxtx(portp, -1, 1); 1289 stl_startrxtx(portp, -1, 1);
1311} 1290}
1312 1291
@@ -1977,12 +1956,14 @@ static int stl_eiointr(stlbrd_t *brdp)
1977 unsigned int iobase; 1956 unsigned int iobase;
1978 int handled = 0; 1957 int handled = 0;
1979 1958
1959 spin_lock(&brd_lock);
1980 panelp = brdp->panels[0]; 1960 panelp = brdp->panels[0];
1981 iobase = panelp->iobase; 1961 iobase = panelp->iobase;
1982 while (inb(brdp->iostatus) & EIO_INTRPEND) { 1962 while (inb(brdp->iostatus) & EIO_INTRPEND) {
1983 handled = 1; 1963 handled = 1;
1984 (* panelp->isr)(panelp, iobase); 1964 (* panelp->isr)(panelp, iobase);
1985 } 1965 }
1966 spin_unlock(&brd_lock);
1986 return handled; 1967 return handled;
1987} 1968}
1988 1969
@@ -2168,7 +2149,7 @@ static int __init stl_initports(stlbrd_t *brdp, stlpanel_t *panelp)
2168 portp = kzalloc(sizeof(stlport_t), GFP_KERNEL); 2149 portp = kzalloc(sizeof(stlport_t), GFP_KERNEL);
2169 if (!portp) { 2150 if (!portp) {
2170 printk("STALLION: failed to allocate memory " 2151 printk("STALLION: failed to allocate memory "
2171 "(size=%d)\n", sizeof(stlport_t)); 2152 "(size=%Zd)\n", sizeof(stlport_t));
2172 break; 2153 break;
2173 } 2154 }
2174 2155
@@ -2304,7 +2285,7 @@ static inline int stl_initeio(stlbrd_t *brdp)
2304 panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL); 2285 panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL);
2305 if (!panelp) { 2286 if (!panelp) {
2306 printk(KERN_WARNING "STALLION: failed to allocate memory " 2287 printk(KERN_WARNING "STALLION: failed to allocate memory "
2307 "(size=%d)\n", sizeof(stlpanel_t)); 2288 "(size=%Zd)\n", sizeof(stlpanel_t));
2308 return -ENOMEM; 2289 return -ENOMEM;
2309 } 2290 }
2310 2291
@@ -2478,7 +2459,7 @@ static inline int stl_initech(stlbrd_t *brdp)
2478 panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL); 2459 panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL);
2479 if (!panelp) { 2460 if (!panelp) {
2480 printk("STALLION: failed to allocate memory " 2461 printk("STALLION: failed to allocate memory "
2481 "(size=%d)\n", sizeof(stlpanel_t)); 2462 "(size=%Zd)\n", sizeof(stlpanel_t));
2482 break; 2463 break;
2483 } 2464 }
2484 panelp->magic = STL_PANELMAGIC; 2465 panelp->magic = STL_PANELMAGIC;
@@ -2879,8 +2860,7 @@ static int stl_getportstats(stlport_t *portp, comstats_t __user *cp)
2879 portp->stats.lflags = 0; 2860 portp->stats.lflags = 0;
2880 portp->stats.rxbuffered = 0; 2861 portp->stats.rxbuffered = 0;
2881 2862
2882 save_flags(flags); 2863 spin_lock_irqsave(&stallion_lock, flags);
2883 cli();
2884 if (portp->tty != (struct tty_struct *) NULL) { 2864 if (portp->tty != (struct tty_struct *) NULL) {
2885 if (portp->tty->driver_data == portp) { 2865 if (portp->tty->driver_data == portp) {
2886 portp->stats.ttystate = portp->tty->flags; 2866 portp->stats.ttystate = portp->tty->flags;
@@ -2894,7 +2874,7 @@ static int stl_getportstats(stlport_t *portp, comstats_t __user *cp)
2894 } 2874 }
2895 } 2875 }
2896 } 2876 }
2897 restore_flags(flags); 2877 spin_unlock_irqrestore(&stallion_lock, flags);
2898 2878
2899 head = portp->tx.head; 2879 head = portp->tx.head;
2900 tail = portp->tx.tail; 2880 tail = portp->tx.tail;
@@ -3056,14 +3036,6 @@ static int __init stl_init(void)
3056 return -1; 3036 return -1;
3057 3037
3058/* 3038/*
3059 * Allocate a temporary write buffer.
3060 */
3061 stl_tmpwritebuf = kmalloc(STL_TXBUFSIZE, GFP_KERNEL);
3062 if (!stl_tmpwritebuf)
3063 printk("STALLION: failed to allocate memory (size=%d)\n",
3064 STL_TXBUFSIZE);
3065
3066/*
3067 * Set up a character driver for per board stuff. This is mainly used 3039 * Set up a character driver for per board stuff. This is mainly used
3068 * to do stats ioctls on the ports. 3040 * to do stats ioctls on the ports.
3069 */ 3041 */
@@ -3147,11 +3119,13 @@ static int stl_cd1400panelinit(stlbrd_t *brdp, stlpanel_t *panelp)
3147 unsigned int gfrcr; 3119 unsigned int gfrcr;
3148 int chipmask, i, j; 3120 int chipmask, i, j;
3149 int nrchips, uartaddr, ioaddr; 3121 int nrchips, uartaddr, ioaddr;
3122 unsigned long flags;
3150 3123
3151#ifdef DEBUG 3124#ifdef DEBUG
3152 printk("stl_panelinit(brdp=%x,panelp=%x)\n", (int) brdp, (int) panelp); 3125 printk("stl_panelinit(brdp=%x,panelp=%x)\n", (int) brdp, (int) panelp);
3153#endif 3126#endif
3154 3127
3128 spin_lock_irqsave(&brd_lock, flags);
3155 BRDENABLE(panelp->brdnr, panelp->pagenr); 3129 BRDENABLE(panelp->brdnr, panelp->pagenr);
3156 3130
3157/* 3131/*
@@ -3189,6 +3163,7 @@ static int stl_cd1400panelinit(stlbrd_t *brdp, stlpanel_t *panelp)
3189 } 3163 }
3190 3164
3191 BRDDISABLE(panelp->brdnr); 3165 BRDDISABLE(panelp->brdnr);
3166 spin_unlock_irqrestore(&brd_lock, flags);
3192 return chipmask; 3167 return chipmask;
3193} 3168}
3194 3169
@@ -3200,6 +3175,7 @@ static int stl_cd1400panelinit(stlbrd_t *brdp, stlpanel_t *panelp)
3200 3175
3201static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *portp) 3176static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *portp)
3202{ 3177{
3178 unsigned long flags;
3203#ifdef DEBUG 3179#ifdef DEBUG
3204 printk("stl_cd1400portinit(brdp=%x,panelp=%x,portp=%x)\n", 3180 printk("stl_cd1400portinit(brdp=%x,panelp=%x,portp=%x)\n",
3205 (int) brdp, (int) panelp, (int) portp); 3181 (int) brdp, (int) panelp, (int) portp);
@@ -3209,6 +3185,7 @@ static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *po
3209 (portp == (stlport_t *) NULL)) 3185 (portp == (stlport_t *) NULL))
3210 return; 3186 return;
3211 3187
3188 spin_lock_irqsave(&brd_lock, flags);
3212 portp->ioaddr = panelp->iobase + (((brdp->brdtype == BRD_ECHPCI) || 3189 portp->ioaddr = panelp->iobase + (((brdp->brdtype == BRD_ECHPCI) ||
3213 (portp->portnr < 8)) ? 0 : EREG_BANKSIZE); 3190 (portp->portnr < 8)) ? 0 : EREG_BANKSIZE);
3214 portp->uartaddr = (portp->portnr & 0x04) << 5; 3191 portp->uartaddr = (portp->portnr & 0x04) << 5;
@@ -3219,6 +3196,7 @@ static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *po
3219 stl_cd1400setreg(portp, LIVR, (portp->portnr << 3)); 3196 stl_cd1400setreg(portp, LIVR, (portp->portnr << 3));
3220 portp->hwid = stl_cd1400getreg(portp, GFRCR); 3197 portp->hwid = stl_cd1400getreg(portp, GFRCR);
3221 BRDDISABLE(portp->brdnr); 3198 BRDDISABLE(portp->brdnr);
3199 spin_unlock_irqrestore(&brd_lock, flags);
3222} 3200}
3223 3201
3224/*****************************************************************************/ 3202/*****************************************************************************/
@@ -3428,8 +3406,7 @@ static void stl_cd1400setport(stlport_t *portp, struct termios *tiosp)
3428 tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP]); 3406 tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP]);
3429#endif 3407#endif
3430 3408
3431 save_flags(flags); 3409 spin_lock_irqsave(&brd_lock, flags);
3432 cli();
3433 BRDENABLE(portp->brdnr, portp->pagenr); 3410 BRDENABLE(portp->brdnr, portp->pagenr);
3434 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x3)); 3411 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x3));
3435 srer = stl_cd1400getreg(portp, SRER); 3412 srer = stl_cd1400getreg(portp, SRER);
@@ -3466,7 +3443,7 @@ static void stl_cd1400setport(stlport_t *portp, struct termios *tiosp)
3466 portp->sigs &= ~TIOCM_CD; 3443 portp->sigs &= ~TIOCM_CD;
3467 stl_cd1400setreg(portp, SRER, ((srer & ~sreroff) | sreron)); 3444 stl_cd1400setreg(portp, SRER, ((srer & ~sreroff) | sreron));
3468 BRDDISABLE(portp->brdnr); 3445 BRDDISABLE(portp->brdnr);
3469 restore_flags(flags); 3446 spin_unlock_irqrestore(&brd_lock, flags);
3470} 3447}
3471 3448
3472/*****************************************************************************/ 3449/*****************************************************************************/
@@ -3492,8 +3469,7 @@ static void stl_cd1400setsignals(stlport_t *portp, int dtr, int rts)
3492 if (rts > 0) 3469 if (rts > 0)
3493 msvr2 = MSVR2_RTS; 3470 msvr2 = MSVR2_RTS;
3494 3471
3495 save_flags(flags); 3472 spin_lock_irqsave(&brd_lock, flags);
3496 cli();
3497 BRDENABLE(portp->brdnr, portp->pagenr); 3473 BRDENABLE(portp->brdnr, portp->pagenr);
3498 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); 3474 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
3499 if (rts >= 0) 3475 if (rts >= 0)
@@ -3501,7 +3477,7 @@ static void stl_cd1400setsignals(stlport_t *portp, int dtr, int rts)
3501 if (dtr >= 0) 3477 if (dtr >= 0)
3502 stl_cd1400setreg(portp, MSVR1, msvr1); 3478 stl_cd1400setreg(portp, MSVR1, msvr1);
3503 BRDDISABLE(portp->brdnr); 3479 BRDDISABLE(portp->brdnr);
3504 restore_flags(flags); 3480 spin_unlock_irqrestore(&brd_lock, flags);
3505} 3481}
3506 3482
3507/*****************************************************************************/ 3483/*****************************************************************************/
@@ -3520,14 +3496,13 @@ static int stl_cd1400getsignals(stlport_t *portp)
3520 printk("stl_cd1400getsignals(portp=%x)\n", (int) portp); 3496 printk("stl_cd1400getsignals(portp=%x)\n", (int) portp);
3521#endif 3497#endif
3522 3498
3523 save_flags(flags); 3499 spin_lock_irqsave(&brd_lock, flags);
3524 cli();
3525 BRDENABLE(portp->brdnr, portp->pagenr); 3500 BRDENABLE(portp->brdnr, portp->pagenr);
3526 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); 3501 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
3527 msvr1 = stl_cd1400getreg(portp, MSVR1); 3502 msvr1 = stl_cd1400getreg(portp, MSVR1);
3528 msvr2 = stl_cd1400getreg(portp, MSVR2); 3503 msvr2 = stl_cd1400getreg(portp, MSVR2);
3529 BRDDISABLE(portp->brdnr); 3504 BRDDISABLE(portp->brdnr);
3530 restore_flags(flags); 3505 spin_unlock_irqrestore(&brd_lock, flags);
3531 3506
3532 sigs = 0; 3507 sigs = 0;
3533 sigs |= (msvr1 & MSVR1_DCD) ? TIOCM_CD : 0; 3508 sigs |= (msvr1 & MSVR1_DCD) ? TIOCM_CD : 0;
@@ -3569,15 +3544,14 @@ static void stl_cd1400enablerxtx(stlport_t *portp, int rx, int tx)
3569 else if (rx > 0) 3544 else if (rx > 0)
3570 ccr |= CCR_RXENABLE; 3545 ccr |= CCR_RXENABLE;
3571 3546
3572 save_flags(flags); 3547 spin_lock_irqsave(&brd_lock, flags);
3573 cli();
3574 BRDENABLE(portp->brdnr, portp->pagenr); 3548 BRDENABLE(portp->brdnr, portp->pagenr);
3575 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); 3549 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
3576 stl_cd1400ccrwait(portp); 3550 stl_cd1400ccrwait(portp);
3577 stl_cd1400setreg(portp, CCR, ccr); 3551 stl_cd1400setreg(portp, CCR, ccr);
3578 stl_cd1400ccrwait(portp); 3552 stl_cd1400ccrwait(portp);
3579 BRDDISABLE(portp->brdnr); 3553 BRDDISABLE(portp->brdnr);
3580 restore_flags(flags); 3554 spin_unlock_irqrestore(&brd_lock, flags);
3581} 3555}
3582 3556
3583/*****************************************************************************/ 3557/*****************************************************************************/
@@ -3609,8 +3583,7 @@ static void stl_cd1400startrxtx(stlport_t *portp, int rx, int tx)
3609 else if (rx > 0) 3583 else if (rx > 0)
3610 sreron |= SRER_RXDATA; 3584 sreron |= SRER_RXDATA;
3611 3585
3612 save_flags(flags); 3586 spin_lock_irqsave(&brd_lock, flags);
3613 cli();
3614 BRDENABLE(portp->brdnr, portp->pagenr); 3587 BRDENABLE(portp->brdnr, portp->pagenr);
3615 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); 3588 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
3616 stl_cd1400setreg(portp, SRER, 3589 stl_cd1400setreg(portp, SRER,
@@ -3618,7 +3591,7 @@ static void stl_cd1400startrxtx(stlport_t *portp, int rx, int tx)
3618 BRDDISABLE(portp->brdnr); 3591 BRDDISABLE(portp->brdnr);
3619 if (tx > 0) 3592 if (tx > 0)
3620 set_bit(ASYI_TXBUSY, &portp->istate); 3593 set_bit(ASYI_TXBUSY, &portp->istate);
3621 restore_flags(flags); 3594 spin_unlock_irqrestore(&brd_lock, flags);
3622} 3595}
3623 3596
3624/*****************************************************************************/ 3597/*****************************************************************************/
@@ -3634,13 +3607,12 @@ static void stl_cd1400disableintrs(stlport_t *portp)
3634#ifdef DEBUG 3607#ifdef DEBUG
3635 printk("stl_cd1400disableintrs(portp=%x)\n", (int) portp); 3608 printk("stl_cd1400disableintrs(portp=%x)\n", (int) portp);
3636#endif 3609#endif
3637 save_flags(flags); 3610 spin_lock_irqsave(&brd_lock, flags);
3638 cli();
3639 BRDENABLE(portp->brdnr, portp->pagenr); 3611 BRDENABLE(portp->brdnr, portp->pagenr);
3640 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); 3612 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
3641 stl_cd1400setreg(portp, SRER, 0); 3613 stl_cd1400setreg(portp, SRER, 0);
3642 BRDDISABLE(portp->brdnr); 3614 BRDDISABLE(portp->brdnr);
3643 restore_flags(flags); 3615 spin_unlock_irqrestore(&brd_lock, flags);
3644} 3616}
3645 3617
3646/*****************************************************************************/ 3618/*****************************************************************************/
@@ -3653,8 +3625,7 @@ static void stl_cd1400sendbreak(stlport_t *portp, int len)
3653 printk("stl_cd1400sendbreak(portp=%x,len=%d)\n", (int) portp, len); 3625 printk("stl_cd1400sendbreak(portp=%x,len=%d)\n", (int) portp, len);
3654#endif 3626#endif
3655 3627
3656 save_flags(flags); 3628 spin_lock_irqsave(&brd_lock, flags);
3657 cli();
3658 BRDENABLE(portp->brdnr, portp->pagenr); 3629 BRDENABLE(portp->brdnr, portp->pagenr);
3659 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); 3630 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
3660 stl_cd1400setreg(portp, SRER, 3631 stl_cd1400setreg(portp, SRER,
@@ -3664,7 +3635,7 @@ static void stl_cd1400sendbreak(stlport_t *portp, int len)
3664 portp->brklen = len; 3635 portp->brklen = len;
3665 if (len == 1) 3636 if (len == 1)
3666 portp->stats.txbreaks++; 3637 portp->stats.txbreaks++;
3667 restore_flags(flags); 3638 spin_unlock_irqrestore(&brd_lock, flags);
3668} 3639}
3669 3640
3670/*****************************************************************************/ 3641/*****************************************************************************/
@@ -3688,8 +3659,7 @@ static void stl_cd1400flowctrl(stlport_t *portp, int state)
3688 if (tty == (struct tty_struct *) NULL) 3659 if (tty == (struct tty_struct *) NULL)
3689 return; 3660 return;
3690 3661
3691 save_flags(flags); 3662 spin_lock_irqsave(&brd_lock, flags);
3692 cli();
3693 BRDENABLE(portp->brdnr, portp->pagenr); 3663 BRDENABLE(portp->brdnr, portp->pagenr);
3694 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); 3664 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
3695 3665
@@ -3729,7 +3699,7 @@ static void stl_cd1400flowctrl(stlport_t *portp, int state)
3729 } 3699 }
3730 3700
3731 BRDDISABLE(portp->brdnr); 3701 BRDDISABLE(portp->brdnr);
3732 restore_flags(flags); 3702 spin_unlock_irqrestore(&brd_lock, flags);
3733} 3703}
3734 3704
3735/*****************************************************************************/ 3705/*****************************************************************************/
@@ -3753,8 +3723,7 @@ static void stl_cd1400sendflow(stlport_t *portp, int state)
3753 if (tty == (struct tty_struct *) NULL) 3723 if (tty == (struct tty_struct *) NULL)
3754 return; 3724 return;
3755 3725
3756 save_flags(flags); 3726 spin_lock_irqsave(&brd_lock, flags);
3757 cli();
3758 BRDENABLE(portp->brdnr, portp->pagenr); 3727 BRDENABLE(portp->brdnr, portp->pagenr);
3759 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); 3728 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
3760 if (state) { 3729 if (state) {
@@ -3769,7 +3738,7 @@ static void stl_cd1400sendflow(stlport_t *portp, int state)
3769 stl_cd1400ccrwait(portp); 3738 stl_cd1400ccrwait(portp);
3770 } 3739 }
3771 BRDDISABLE(portp->brdnr); 3740 BRDDISABLE(portp->brdnr);
3772 restore_flags(flags); 3741 spin_unlock_irqrestore(&brd_lock, flags);
3773} 3742}
3774 3743
3775/*****************************************************************************/ 3744/*****************************************************************************/
@@ -3785,8 +3754,7 @@ static void stl_cd1400flush(stlport_t *portp)
3785 if (portp == (stlport_t *) NULL) 3754 if (portp == (stlport_t *) NULL)
3786 return; 3755 return;
3787 3756
3788 save_flags(flags); 3757 spin_lock_irqsave(&brd_lock, flags);
3789 cli();
3790 BRDENABLE(portp->brdnr, portp->pagenr); 3758 BRDENABLE(portp->brdnr, portp->pagenr);
3791 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03)); 3759 stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
3792 stl_cd1400ccrwait(portp); 3760 stl_cd1400ccrwait(portp);
@@ -3794,7 +3762,7 @@ static void stl_cd1400flush(stlport_t *portp)
3794 stl_cd1400ccrwait(portp); 3762 stl_cd1400ccrwait(portp);
3795 portp->tx.tail = portp->tx.head; 3763 portp->tx.tail = portp->tx.head;
3796 BRDDISABLE(portp->brdnr); 3764 BRDDISABLE(portp->brdnr);
3797 restore_flags(flags); 3765 spin_unlock_irqrestore(&brd_lock, flags);
3798} 3766}
3799 3767
3800/*****************************************************************************/ 3768/*****************************************************************************/
@@ -3833,6 +3801,7 @@ static void stl_cd1400eiointr(stlpanel_t *panelp, unsigned int iobase)
3833 (int) panelp, iobase); 3801 (int) panelp, iobase);
3834#endif 3802#endif
3835 3803
3804 spin_lock(&brd_lock);
3836 outb(SVRR, iobase); 3805 outb(SVRR, iobase);
3837 svrtype = inb(iobase + EREG_DATA); 3806 svrtype = inb(iobase + EREG_DATA);
3838 if (panelp->nrports > 4) { 3807 if (panelp->nrports > 4) {
@@ -3846,6 +3815,8 @@ static void stl_cd1400eiointr(stlpanel_t *panelp, unsigned int iobase)
3846 stl_cd1400txisr(panelp, iobase); 3815 stl_cd1400txisr(panelp, iobase);
3847 else if (svrtype & SVRR_MDM) 3816 else if (svrtype & SVRR_MDM)
3848 stl_cd1400mdmisr(panelp, iobase); 3817 stl_cd1400mdmisr(panelp, iobase);
3818
3819 spin_unlock(&brd_lock);
3849} 3820}
3850 3821
3851/*****************************************************************************/ 3822/*****************************************************************************/
@@ -4433,8 +4404,7 @@ static void stl_sc26198setport(stlport_t *portp, struct termios *tiosp)
4433 tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP]); 4404 tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP]);
4434#endif 4405#endif
4435 4406
4436 save_flags(flags); 4407 spin_lock_irqsave(&brd_lock, flags);
4437 cli();
4438 BRDENABLE(portp->brdnr, portp->pagenr); 4408 BRDENABLE(portp->brdnr, portp->pagenr);
4439 stl_sc26198setreg(portp, IMR, 0); 4409 stl_sc26198setreg(portp, IMR, 0);
4440 stl_sc26198updatereg(portp, MR0, mr0); 4410 stl_sc26198updatereg(portp, MR0, mr0);
@@ -4461,7 +4431,7 @@ static void stl_sc26198setport(stlport_t *portp, struct termios *tiosp)
4461 portp->imr = (portp->imr & ~imroff) | imron; 4431 portp->imr = (portp->imr & ~imroff) | imron;
4462 stl_sc26198setreg(portp, IMR, portp->imr); 4432 stl_sc26198setreg(portp, IMR, portp->imr);
4463 BRDDISABLE(portp->brdnr); 4433 BRDDISABLE(portp->brdnr);
4464 restore_flags(flags); 4434 spin_unlock_irqrestore(&brd_lock, flags);
4465} 4435}
4466 4436
4467/*****************************************************************************/ 4437/*****************************************************************************/
@@ -4491,13 +4461,12 @@ static void stl_sc26198setsignals(stlport_t *portp, int dtr, int rts)
4491 else if (rts > 0) 4461 else if (rts > 0)
4492 iopioron |= IPR_RTS; 4462 iopioron |= IPR_RTS;
4493 4463
4494 save_flags(flags); 4464 spin_lock_irqsave(&brd_lock, flags);
4495 cli();
4496 BRDENABLE(portp->brdnr, portp->pagenr); 4465 BRDENABLE(portp->brdnr, portp->pagenr);
4497 stl_sc26198setreg(portp, IOPIOR, 4466 stl_sc26198setreg(portp, IOPIOR,
4498 ((stl_sc26198getreg(portp, IOPIOR) & ~iopioroff) | iopioron)); 4467 ((stl_sc26198getreg(portp, IOPIOR) & ~iopioroff) | iopioron));
4499 BRDDISABLE(portp->brdnr); 4468 BRDDISABLE(portp->brdnr);
4500 restore_flags(flags); 4469 spin_unlock_irqrestore(&brd_lock, flags);
4501} 4470}
4502 4471
4503/*****************************************************************************/ 4472/*****************************************************************************/
@@ -4516,12 +4485,11 @@ static int stl_sc26198getsignals(stlport_t *portp)
4516 printk("stl_sc26198getsignals(portp=%x)\n", (int) portp); 4485 printk("stl_sc26198getsignals(portp=%x)\n", (int) portp);
4517#endif 4486#endif
4518 4487
4519 save_flags(flags); 4488 spin_lock_irqsave(&brd_lock, flags);
4520 cli();
4521 BRDENABLE(portp->brdnr, portp->pagenr); 4489 BRDENABLE(portp->brdnr, portp->pagenr);
4522 ipr = stl_sc26198getreg(portp, IPR); 4490 ipr = stl_sc26198getreg(portp, IPR);
4523 BRDDISABLE(portp->brdnr); 4491 BRDDISABLE(portp->brdnr);
4524 restore_flags(flags); 4492 spin_unlock_irqrestore(&brd_lock, flags);
4525 4493
4526 sigs = 0; 4494 sigs = 0;
4527 sigs |= (ipr & IPR_DCD) ? 0 : TIOCM_CD; 4495 sigs |= (ipr & IPR_DCD) ? 0 : TIOCM_CD;
@@ -4558,13 +4526,12 @@ static void stl_sc26198enablerxtx(stlport_t *portp, int rx, int tx)
4558 else if (rx > 0) 4526 else if (rx > 0)
4559 ccr |= CR_RXENABLE; 4527 ccr |= CR_RXENABLE;
4560 4528
4561 save_flags(flags); 4529 spin_lock_irqsave(&brd_lock, flags);
4562 cli();
4563 BRDENABLE(portp->brdnr, portp->pagenr); 4530 BRDENABLE(portp->brdnr, portp->pagenr);
4564 stl_sc26198setreg(portp, SCCR, ccr); 4531 stl_sc26198setreg(portp, SCCR, ccr);
4565 BRDDISABLE(portp->brdnr); 4532 BRDDISABLE(portp->brdnr);
4566 portp->crenable = ccr; 4533 portp->crenable = ccr;
4567 restore_flags(flags); 4534 spin_unlock_irqrestore(&brd_lock, flags);
4568} 4535}
4569 4536
4570/*****************************************************************************/ 4537/*****************************************************************************/
@@ -4593,15 +4560,14 @@ static void stl_sc26198startrxtx(stlport_t *portp, int rx, int tx)
4593 else if (rx > 0) 4560 else if (rx > 0)
4594 imr |= IR_RXRDY | IR_RXBREAK | IR_RXWATCHDOG; 4561 imr |= IR_RXRDY | IR_RXBREAK | IR_RXWATCHDOG;
4595 4562
4596 save_flags(flags); 4563 spin_lock_irqsave(&brd_lock, flags);
4597 cli();
4598 BRDENABLE(portp->brdnr, portp->pagenr); 4564 BRDENABLE(portp->brdnr, portp->pagenr);
4599 stl_sc26198setreg(portp, IMR, imr); 4565 stl_sc26198setreg(portp, IMR, imr);
4600 BRDDISABLE(portp->brdnr); 4566 BRDDISABLE(portp->brdnr);
4601 portp->imr = imr; 4567 portp->imr = imr;
4602 if (tx > 0) 4568 if (tx > 0)
4603 set_bit(ASYI_TXBUSY, &portp->istate); 4569 set_bit(ASYI_TXBUSY, &portp->istate);
4604 restore_flags(flags); 4570 spin_unlock_irqrestore(&brd_lock, flags);
4605} 4571}
4606 4572
4607/*****************************************************************************/ 4573/*****************************************************************************/
@@ -4618,13 +4584,12 @@ static void stl_sc26198disableintrs(stlport_t *portp)
4618 printk("stl_sc26198disableintrs(portp=%x)\n", (int) portp); 4584 printk("stl_sc26198disableintrs(portp=%x)\n", (int) portp);
4619#endif 4585#endif
4620 4586
4621 save_flags(flags); 4587 spin_lock_irqsave(&brd_lock, flags);
4622 cli();
4623 BRDENABLE(portp->brdnr, portp->pagenr); 4588 BRDENABLE(portp->brdnr, portp->pagenr);
4624 portp->imr = 0; 4589 portp->imr = 0;
4625 stl_sc26198setreg(portp, IMR, 0); 4590 stl_sc26198setreg(portp, IMR, 0);
4626 BRDDISABLE(portp->brdnr); 4591 BRDDISABLE(portp->brdnr);
4627 restore_flags(flags); 4592 spin_unlock_irqrestore(&brd_lock, flags);
4628} 4593}
4629 4594
4630/*****************************************************************************/ 4595/*****************************************************************************/
@@ -4637,8 +4602,7 @@ static void stl_sc26198sendbreak(stlport_t *portp, int len)
4637 printk("stl_sc26198sendbreak(portp=%x,len=%d)\n", (int) portp, len); 4602 printk("stl_sc26198sendbreak(portp=%x,len=%d)\n", (int) portp, len);
4638#endif 4603#endif
4639 4604
4640 save_flags(flags); 4605 spin_lock_irqsave(&brd_lock, flags);
4641 cli();
4642 BRDENABLE(portp->brdnr, portp->pagenr); 4606 BRDENABLE(portp->brdnr, portp->pagenr);
4643 if (len == 1) { 4607 if (len == 1) {
4644 stl_sc26198setreg(portp, SCCR, CR_TXSTARTBREAK); 4608 stl_sc26198setreg(portp, SCCR, CR_TXSTARTBREAK);
@@ -4647,7 +4611,7 @@ static void stl_sc26198sendbreak(stlport_t *portp, int len)
4647 stl_sc26198setreg(portp, SCCR, CR_TXSTOPBREAK); 4611 stl_sc26198setreg(portp, SCCR, CR_TXSTOPBREAK);
4648 } 4612 }
4649 BRDDISABLE(portp->brdnr); 4613 BRDDISABLE(portp->brdnr);
4650 restore_flags(flags); 4614 spin_unlock_irqrestore(&brd_lock, flags);
4651} 4615}
4652 4616
4653/*****************************************************************************/ 4617/*****************************************************************************/
@@ -4672,8 +4636,7 @@ static void stl_sc26198flowctrl(stlport_t *portp, int state)
4672 if (tty == (struct tty_struct *) NULL) 4636 if (tty == (struct tty_struct *) NULL)
4673 return; 4637 return;
4674 4638
4675 save_flags(flags); 4639 spin_lock_irqsave(&brd_lock, flags);
4676 cli();
4677 BRDENABLE(portp->brdnr, portp->pagenr); 4640 BRDENABLE(portp->brdnr, portp->pagenr);
4678 4641
4679 if (state) { 4642 if (state) {
@@ -4719,7 +4682,7 @@ static void stl_sc26198flowctrl(stlport_t *portp, int state)
4719 } 4682 }
4720 4683
4721 BRDDISABLE(portp->brdnr); 4684 BRDDISABLE(portp->brdnr);
4722 restore_flags(flags); 4685 spin_unlock_irqrestore(&brd_lock, flags);
4723} 4686}
4724 4687
4725/*****************************************************************************/ 4688/*****************************************************************************/
@@ -4744,8 +4707,7 @@ static void stl_sc26198sendflow(stlport_t *portp, int state)
4744 if (tty == (struct tty_struct *) NULL) 4707 if (tty == (struct tty_struct *) NULL)
4745 return; 4708 return;
4746 4709
4747 save_flags(flags); 4710 spin_lock_irqsave(&brd_lock, flags);
4748 cli();
4749 BRDENABLE(portp->brdnr, portp->pagenr); 4711 BRDENABLE(portp->brdnr, portp->pagenr);
4750 if (state) { 4712 if (state) {
4751 mr0 = stl_sc26198getreg(portp, MR0); 4713 mr0 = stl_sc26198getreg(portp, MR0);
@@ -4765,7 +4727,7 @@ static void stl_sc26198sendflow(stlport_t *portp, int state)
4765 stl_sc26198setreg(portp, MR0, mr0); 4727 stl_sc26198setreg(portp, MR0, mr0);
4766 } 4728 }
4767 BRDDISABLE(portp->brdnr); 4729 BRDDISABLE(portp->brdnr);
4768 restore_flags(flags); 4730 spin_unlock_irqrestore(&brd_lock, flags);
4769} 4731}
4770 4732
4771/*****************************************************************************/ 4733/*****************************************************************************/
@@ -4781,14 +4743,13 @@ static void stl_sc26198flush(stlport_t *portp)
4781 if (portp == (stlport_t *) NULL) 4743 if (portp == (stlport_t *) NULL)
4782 return; 4744 return;
4783 4745
4784 save_flags(flags); 4746 spin_lock_irqsave(&brd_lock, flags);
4785 cli();
4786 BRDENABLE(portp->brdnr, portp->pagenr); 4747 BRDENABLE(portp->brdnr, portp->pagenr);
4787 stl_sc26198setreg(portp, SCCR, CR_TXRESET); 4748 stl_sc26198setreg(portp, SCCR, CR_TXRESET);
4788 stl_sc26198setreg(portp, SCCR, portp->crenable); 4749 stl_sc26198setreg(portp, SCCR, portp->crenable);
4789 BRDDISABLE(portp->brdnr); 4750 BRDDISABLE(portp->brdnr);
4790 portp->tx.tail = portp->tx.head; 4751 portp->tx.tail = portp->tx.head;
4791 restore_flags(flags); 4752 spin_unlock_irqrestore(&brd_lock, flags);
4792} 4753}
4793 4754
4794/*****************************************************************************/ 4755/*****************************************************************************/
@@ -4815,12 +4776,11 @@ static int stl_sc26198datastate(stlport_t *portp)
4815 if (test_bit(ASYI_TXBUSY, &portp->istate)) 4776 if (test_bit(ASYI_TXBUSY, &portp->istate))
4816 return 1; 4777 return 1;
4817 4778
4818 save_flags(flags); 4779 spin_lock_irqsave(&brd_lock, flags);
4819 cli();
4820 BRDENABLE(portp->brdnr, portp->pagenr); 4780 BRDENABLE(portp->brdnr, portp->pagenr);
4821 sr = stl_sc26198getreg(portp, SR); 4781 sr = stl_sc26198getreg(portp, SR);
4822 BRDDISABLE(portp->brdnr); 4782 BRDDISABLE(portp->brdnr);
4823 restore_flags(flags); 4783 spin_unlock_irqrestore(&brd_lock, flags);
4824 4784
4825 return (sr & SR_TXEMPTY) ? 0 : 1; 4785 return (sr & SR_TXEMPTY) ? 0 : 1;
4826} 4786}
@@ -4878,6 +4838,8 @@ static void stl_sc26198intr(stlpanel_t *panelp, unsigned int iobase)
4878 stlport_t *portp; 4838 stlport_t *portp;
4879 unsigned int iack; 4839 unsigned int iack;
4880 4840
4841 spin_lock(&brd_lock);
4842
4881/* 4843/*
4882 * Work around bug in sc26198 chip... Cannot have A6 address 4844 * Work around bug in sc26198 chip... Cannot have A6 address
4883 * line of UART high, else iack will be returned as 0. 4845 * line of UART high, else iack will be returned as 0.
@@ -4893,6 +4855,8 @@ static void stl_sc26198intr(stlpanel_t *panelp, unsigned int iobase)
4893 stl_sc26198txisr(portp); 4855 stl_sc26198txisr(portp);
4894 else 4856 else
4895 stl_sc26198otherisr(portp, iack); 4857 stl_sc26198otherisr(portp, iack);
4858
4859 spin_unlock(&brd_lock);
4896} 4860}
4897 4861
4898/*****************************************************************************/ 4862/*****************************************************************************/
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index 3b47472302..76b9107f7f 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -2320,7 +2320,7 @@ static int sx_init_portstructs (int nboards, int nports)
2320#ifdef NEW_WRITE_LOCKING 2320#ifdef NEW_WRITE_LOCKING
2321 port->gs.port_write_mutex = MUTEX; 2321 port->gs.port_write_mutex = MUTEX;
2322#endif 2322#endif
2323 port->gs.driver_lock = SPIN_LOCK_UNLOCKED; 2323 spin_lock_init(&port->gs.driver_lock);
2324 /* 2324 /*
2325 * Initializing wait queue 2325 * Initializing wait queue
2326 */ 2326 */
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index f58ad7f682..ef68d152d3 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -343,7 +343,7 @@ static ssize_t store_received_ref_clk3b(struct device *d,
343 343
344 val = (unsigned char)tmp; 344 val = (unsigned char)tmp;
345 spin_lock_irqsave(&event_lock, flags); 345 spin_lock_irqsave(&event_lock, flags);
346 SET_PORT_BITS(TLCLK_REG1, 0xef, val << 1); 346 SET_PORT_BITS(TLCLK_REG1, 0xdf, val << 1);
347 spin_unlock_irqrestore(&event_lock, flags); 347 spin_unlock_irqrestore(&event_lock, flags);
348 348
349 return strnlen(buf, count); 349 return strnlen(buf, count);
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 8b2a599698..bd74e82d8a 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -2621,10 +2621,9 @@ int tty_ioctl(struct inode * inode, struct file * file,
2621 tty->driver->break_ctl(tty, 0); 2621 tty->driver->break_ctl(tty, 0);
2622 return 0; 2622 return 0;
2623 case TCSBRK: /* SVID version: non-zero arg --> no break */ 2623 case TCSBRK: /* SVID version: non-zero arg --> no break */
2624 /* 2624 /* non-zero arg means wait for all output data
2625 * XXX is the above comment correct, or the 2625 * to be sent (performed above) but don't send break.
2626 * code below correct? Is this ioctl used at 2626 * This is used by the tcdrain() termios function.
2627 * all by anyone?
2628 */ 2627 */
2629 if (!arg) 2628 if (!arg)
2630 return send_break(tty, 250); 2629 return send_break(tty, 250);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 6c94879e0b..714d95ff2f 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -98,7 +98,22 @@
98#include <asm/system.h> 98#include <asm/system.h>
99#include <asm/uaccess.h> 99#include <asm/uaccess.h>
100 100
101#define MAX_NR_CON_DRIVER 16
101 102
103#define CON_DRIVER_FLAG_MODULE 1
104#define CON_DRIVER_FLAG_INIT 2
105
106struct con_driver {
107 const struct consw *con;
108 const char *desc;
109 struct class_device *class_dev;
110 int node;
111 int first;
112 int last;
113 int flag;
114};
115
116static struct con_driver registered_con_driver[MAX_NR_CON_DRIVER];
102const struct consw *conswitchp; 117const struct consw *conswitchp;
103 118
104/* A bitmap for codes <32. A bit of 1 indicates that the code 119/* A bitmap for codes <32. A bit of 1 indicates that the code
@@ -2557,7 +2572,7 @@ static int __init con_init(void)
2557{ 2572{
2558 const char *display_desc = NULL; 2573 const char *display_desc = NULL;
2559 struct vc_data *vc; 2574 struct vc_data *vc;
2560 unsigned int currcons = 0; 2575 unsigned int currcons = 0, i;
2561 2576
2562 acquire_console_sem(); 2577 acquire_console_sem();
2563 2578
@@ -2569,6 +2584,22 @@ static int __init con_init(void)
2569 return 0; 2584 return 0;
2570 } 2585 }
2571 2586
2587 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
2588 struct con_driver *con_driver = &registered_con_driver[i];
2589
2590 if (con_driver->con == NULL) {
2591 con_driver->con = conswitchp;
2592 con_driver->desc = display_desc;
2593 con_driver->flag = CON_DRIVER_FLAG_INIT;
2594 con_driver->first = 0;
2595 con_driver->last = MAX_NR_CONSOLES - 1;
2596 break;
2597 }
2598 }
2599
2600 for (i = 0; i < MAX_NR_CONSOLES; i++)
2601 con_driver_map[i] = conswitchp;
2602
2572 init_timer(&console_timer); 2603 init_timer(&console_timer);
2573 console_timer.function = blank_screen_t; 2604 console_timer.function = blank_screen_t;
2574 if (blankinterval) { 2605 if (blankinterval) {
@@ -2656,38 +2687,53 @@ int __init vty_init(void)
2656} 2687}
2657 2688
2658#ifndef VT_SINGLE_DRIVER 2689#ifndef VT_SINGLE_DRIVER
2690#include <linux/device.h>
2659 2691
2660/* 2692static struct class *vtconsole_class;
2661 * If we support more console drivers, this function is used
2662 * when a driver wants to take over some existing consoles
2663 * and become default driver for newly opened ones.
2664 */
2665 2693
2666int take_over_console(const struct consw *csw, int first, int last, int deflt) 2694static int bind_con_driver(const struct consw *csw, int first, int last,
2695 int deflt)
2667{ 2696{
2668 int i, j = -1; 2697 struct module *owner = csw->owner;
2669 const char *desc; 2698 const char *desc = NULL;
2670 struct module *owner; 2699 struct con_driver *con_driver;
2700 int i, j = -1, k = -1, retval = -ENODEV;
2671 2701
2672 owner = csw->owner;
2673 if (!try_module_get(owner)) 2702 if (!try_module_get(owner))
2674 return -ENODEV; 2703 return -ENODEV;
2675 2704
2676 acquire_console_sem(); 2705 acquire_console_sem();
2677 2706
2678 desc = csw->con_startup(); 2707 /* check if driver is registered */
2679 if (!desc) { 2708 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
2680 release_console_sem(); 2709 con_driver = &registered_con_driver[i];
2681 module_put(owner); 2710
2682 return -ENODEV; 2711 if (con_driver->con == csw) {
2712 desc = con_driver->desc;
2713 retval = 0;
2714 break;
2715 }
2716 }
2717
2718 if (retval)
2719 goto err;
2720
2721 if (!(con_driver->flag & CON_DRIVER_FLAG_INIT)) {
2722 csw->con_startup();
2723 con_driver->flag |= CON_DRIVER_FLAG_INIT;
2683 } 2724 }
2725
2684 if (deflt) { 2726 if (deflt) {
2685 if (conswitchp) 2727 if (conswitchp)
2686 module_put(conswitchp->owner); 2728 module_put(conswitchp->owner);
2729
2687 __module_get(owner); 2730 __module_get(owner);
2688 conswitchp = csw; 2731 conswitchp = csw;
2689 } 2732 }
2690 2733
2734 first = max(first, con_driver->first);
2735 last = min(last, con_driver->last);
2736
2691 for (i = first; i <= last; i++) { 2737 for (i = first; i <= last; i++) {
2692 int old_was_color; 2738 int old_was_color;
2693 struct vc_data *vc = vc_cons[i].d; 2739 struct vc_data *vc = vc_cons[i].d;
@@ -2701,15 +2747,17 @@ int take_over_console(const struct consw *csw, int first, int last, int deflt)
2701 continue; 2747 continue;
2702 2748
2703 j = i; 2749 j = i;
2704 if (CON_IS_VISIBLE(vc)) 2750
2751 if (CON_IS_VISIBLE(vc)) {
2752 k = i;
2705 save_screen(vc); 2753 save_screen(vc);
2754 }
2755
2706 old_was_color = vc->vc_can_do_color; 2756 old_was_color = vc->vc_can_do_color;
2707 vc->vc_sw->con_deinit(vc); 2757 vc->vc_sw->con_deinit(vc);
2708 vc->vc_origin = (unsigned long)vc->vc_screenbuf; 2758 vc->vc_origin = (unsigned long)vc->vc_screenbuf;
2709 vc->vc_visible_origin = vc->vc_origin;
2710 vc->vc_scr_end = vc->vc_origin + vc->vc_screenbuf_size;
2711 vc->vc_pos = vc->vc_origin + vc->vc_size_row * vc->vc_y + 2 * vc->vc_x;
2712 visual_init(vc, i, 0); 2759 visual_init(vc, i, 0);
2760 set_origin(vc);
2713 update_attr(vc); 2761 update_attr(vc);
2714 2762
2715 /* If the console changed between mono <-> color, then 2763 /* If the console changed between mono <-> color, then
@@ -2718,36 +2766,506 @@ int take_over_console(const struct consw *csw, int first, int last, int deflt)
2718 */ 2766 */
2719 if (old_was_color != vc->vc_can_do_color) 2767 if (old_was_color != vc->vc_can_do_color)
2720 clear_buffer_attributes(vc); 2768 clear_buffer_attributes(vc);
2721
2722 if (CON_IS_VISIBLE(vc))
2723 update_screen(vc);
2724 } 2769 }
2770
2725 printk("Console: switching "); 2771 printk("Console: switching ");
2726 if (!deflt) 2772 if (!deflt)
2727 printk("consoles %d-%d ", first+1, last+1); 2773 printk("consoles %d-%d ", first+1, last+1);
2728 if (j >= 0) 2774 if (j >= 0) {
2775 struct vc_data *vc = vc_cons[j].d;
2776
2729 printk("to %s %s %dx%d\n", 2777 printk("to %s %s %dx%d\n",
2730 vc_cons[j].d->vc_can_do_color ? "colour" : "mono", 2778 vc->vc_can_do_color ? "colour" : "mono",
2731 desc, vc_cons[j].d->vc_cols, vc_cons[j].d->vc_rows); 2779 desc, vc->vc_cols, vc->vc_rows);
2732 else 2780
2781 if (k >= 0) {
2782 vc = vc_cons[k].d;
2783 update_screen(vc);
2784 }
2785 } else
2733 printk("to %s\n", desc); 2786 printk("to %s\n", desc);
2734 2787
2788 retval = 0;
2789err:
2735 release_console_sem(); 2790 release_console_sem();
2791 module_put(owner);
2792 return retval;
2793};
2794
2795#ifdef CONFIG_VT_HW_CONSOLE_BINDING
2796static int con_is_graphics(const struct consw *csw, int first, int last)
2797{
2798 int i, retval = 0;
2799
2800 for (i = first; i <= last; i++) {
2801 struct vc_data *vc = vc_cons[i].d;
2802
2803 if (vc && vc->vc_mode == KD_GRAPHICS) {
2804 retval = 1;
2805 break;
2806 }
2807 }
2808
2809 return retval;
2810}
2811
2812static int unbind_con_driver(const struct consw *csw, int first, int last,
2813 int deflt)
2814{
2815 struct module *owner = csw->owner;
2816 const struct consw *defcsw = NULL;
2817 struct con_driver *con_driver = NULL, *con_back = NULL;
2818 int i, retval = -ENODEV;
2819
2820 if (!try_module_get(owner))
2821 return -ENODEV;
2822
2823 acquire_console_sem();
2824
2825 /* check if driver is registered and if it is unbindable */
2826 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
2827 con_driver = &registered_con_driver[i];
2828
2829 if (con_driver->con == csw &&
2830 con_driver->flag & CON_DRIVER_FLAG_MODULE) {
2831 retval = 0;
2832 break;
2833 }
2834 }
2835
2836 if (retval) {
2837 release_console_sem();
2838 goto err;
2839 }
2840
2841 retval = -ENODEV;
2842
2843 /* check if backup driver exists */
2844 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
2845 con_back = &registered_con_driver[i];
2846
2847 if (con_back->con &&
2848 !(con_back->flag & CON_DRIVER_FLAG_MODULE)) {
2849 defcsw = con_back->con;
2850 retval = 0;
2851 break;
2852 }
2853 }
2854
2855 if (retval) {
2856 release_console_sem();
2857 goto err;
2858 }
2859
2860 if (!con_is_bound(csw)) {
2861 release_console_sem();
2862 goto err;
2863 }
2864
2865 first = max(first, con_driver->first);
2866 last = min(last, con_driver->last);
2867
2868 for (i = first; i <= last; i++) {
2869 if (con_driver_map[i] == csw) {
2870 module_put(csw->owner);
2871 con_driver_map[i] = NULL;
2872 }
2873 }
2874
2875 if (!con_is_bound(defcsw)) {
2876 const struct consw *defconsw = conswitchp;
2877
2878 defcsw->con_startup();
2879 con_back->flag |= CON_DRIVER_FLAG_INIT;
2880 /*
2881 * vgacon may change the default driver to point
2882 * to dummycon, we restore it here...
2883 */
2884 conswitchp = defconsw;
2885 }
2886
2887 if (!con_is_bound(csw))
2888 con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
2736 2889
2890 release_console_sem();
2891 /* ignore return value, binding should not fail */
2892 bind_con_driver(defcsw, first, last, deflt);
2893err:
2737 module_put(owner); 2894 module_put(owner);
2895 return retval;
2896
2897}
2898
2899static int vt_bind(struct con_driver *con)
2900{
2901 const struct consw *defcsw = NULL, *csw = NULL;
2902 int i, more = 1, first = -1, last = -1, deflt = 0;
2903
2904 if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE) ||
2905 con_is_graphics(con->con, con->first, con->last))
2906 goto err;
2907
2908 csw = con->con;
2909
2910 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
2911 struct con_driver *con = &registered_con_driver[i];
2912
2913 if (con->con && !(con->flag & CON_DRIVER_FLAG_MODULE)) {
2914 defcsw = con->con;
2915 break;
2916 }
2917 }
2918
2919 if (!defcsw)
2920 goto err;
2921
2922 while (more) {
2923 more = 0;
2924
2925 for (i = con->first; i <= con->last; i++) {
2926 if (con_driver_map[i] == defcsw) {
2927 if (first == -1)
2928 first = i;
2929 last = i;
2930 more = 1;
2931 } else if (first != -1)
2932 break;
2933 }
2934
2935 if (first == 0 && last == MAX_NR_CONSOLES -1)
2936 deflt = 1;
2937
2938 if (first != -1)
2939 bind_con_driver(csw, first, last, deflt);
2940
2941 first = -1;
2942 last = -1;
2943 deflt = 0;
2944 }
2945
2946err:
2738 return 0; 2947 return 0;
2739} 2948}
2740 2949
2741void give_up_console(const struct consw *csw) 2950static int vt_unbind(struct con_driver *con)
2951{
2952 const struct consw *csw = NULL;
2953 int i, more = 1, first = -1, last = -1, deflt = 0;
2954
2955 if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE) ||
2956 con_is_graphics(con->con, con->first, con->last))
2957 goto err;
2958
2959 csw = con->con;
2960
2961 while (more) {
2962 more = 0;
2963
2964 for (i = con->first; i <= con->last; i++) {
2965 if (con_driver_map[i] == csw) {
2966 if (first == -1)
2967 first = i;
2968 last = i;
2969 more = 1;
2970 } else if (first != -1)
2971 break;
2972 }
2973
2974 if (first == 0 && last == MAX_NR_CONSOLES -1)
2975 deflt = 1;
2976
2977 if (first != -1)
2978 unbind_con_driver(csw, first, last, deflt);
2979
2980 first = -1;
2981 last = -1;
2982 deflt = 0;
2983 }
2984
2985err:
2986 return 0;
2987}
2988#else
2989static inline int vt_bind(struct con_driver *con)
2990{
2991 return 0;
2992}
2993static inline int vt_unbind(struct con_driver *con)
2994{
2995 return 0;
2996}
2997#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
2998
2999static ssize_t store_bind(struct class_device *class_device,
3000 const char *buf, size_t count)
3001{
3002 struct con_driver *con = class_get_devdata(class_device);
3003 int bind = simple_strtoul(buf, NULL, 0);
3004
3005 if (bind)
3006 vt_bind(con);
3007 else
3008 vt_unbind(con);
3009
3010 return count;
3011}
3012
3013static ssize_t show_bind(struct class_device *class_device, char *buf)
3014{
3015 struct con_driver *con = class_get_devdata(class_device);
3016 int bind = con_is_bound(con->con);
3017
3018 return snprintf(buf, PAGE_SIZE, "%i\n", bind);
3019}
3020
3021static ssize_t show_name(struct class_device *class_device, char *buf)
3022{
3023 struct con_driver *con = class_get_devdata(class_device);
3024
3025 return snprintf(buf, PAGE_SIZE, "%s %s\n",
3026 (con->flag & CON_DRIVER_FLAG_MODULE) ? "(M)" : "(S)",
3027 con->desc);
3028
3029}
3030
3031static struct class_device_attribute class_device_attrs[] = {
3032 __ATTR(bind, S_IRUGO|S_IWUSR, show_bind, store_bind),
3033 __ATTR(name, S_IRUGO, show_name, NULL),
3034};
3035
3036static int vtconsole_init_class_device(struct con_driver *con)
3037{
3038 int i;
3039
3040 class_set_devdata(con->class_dev, con);
3041 for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
3042 class_device_create_file(con->class_dev,
3043 &class_device_attrs[i]);
3044
3045 return 0;
3046}
3047
3048static void vtconsole_deinit_class_device(struct con_driver *con)
2742{ 3049{
2743 int i; 3050 int i;
2744 3051
2745 for(i = 0; i < MAX_NR_CONSOLES; i++) 3052 for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
3053 class_device_remove_file(con->class_dev,
3054 &class_device_attrs[i]);
3055}
3056
3057/**
3058 * con_is_bound - checks if driver is bound to the console
3059 * @csw: console driver
3060 *
3061 * RETURNS: zero if unbound, nonzero if bound
3062 *
3063 * Drivers can call this and if zero, they should release
3064 * all resources allocated on con_startup()
3065 */
3066int con_is_bound(const struct consw *csw)
3067{
3068 int i, bound = 0;
3069
3070 for (i = 0; i < MAX_NR_CONSOLES; i++) {
2746 if (con_driver_map[i] == csw) { 3071 if (con_driver_map[i] == csw) {
2747 module_put(csw->owner); 3072 bound = 1;
2748 con_driver_map[i] = NULL; 3073 break;
3074 }
3075 }
3076
3077 return bound;
3078}
3079EXPORT_SYMBOL(con_is_bound);
3080
3081/**
3082 * register_con_driver - register console driver to console layer
3083 * @csw: console driver
3084 * @first: the first console to take over, minimum value is 0
3085 * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
3086 *
3087 * DESCRIPTION: This function registers a console driver which can later
3088 * bind to a range of consoles specified by @first and @last. It will
3089 * also initialize the console driver by calling con_startup().
3090 */
3091int register_con_driver(const struct consw *csw, int first, int last)
3092{
3093 struct module *owner = csw->owner;
3094 struct con_driver *con_driver;
3095 const char *desc;
3096 int i, retval = 0;
3097
3098 if (!try_module_get(owner))
3099 return -ENODEV;
3100
3101 acquire_console_sem();
3102
3103 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
3104 con_driver = &registered_con_driver[i];
3105
3106 /* already registered */
3107 if (con_driver->con == csw)
3108 retval = -EINVAL;
3109 }
3110
3111 if (retval)
3112 goto err;
3113
3114 desc = csw->con_startup();
3115
3116 if (!desc)
3117 goto err;
3118
3119 retval = -EINVAL;
3120
3121 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
3122 con_driver = &registered_con_driver[i];
3123
3124 if (con_driver->con == NULL) {
3125 con_driver->con = csw;
3126 con_driver->desc = desc;
3127 con_driver->node = i;
3128 con_driver->flag = CON_DRIVER_FLAG_MODULE |
3129 CON_DRIVER_FLAG_INIT;
3130 con_driver->first = first;
3131 con_driver->last = last;
3132 retval = 0;
3133 break;
3134 }
3135 }
3136
3137 if (retval)
3138 goto err;
3139
3140 con_driver->class_dev = class_device_create(vtconsole_class, NULL,
3141 MKDEV(0, con_driver->node),
3142 NULL, "vtcon%i",
3143 con_driver->node);
3144
3145 if (IS_ERR(con_driver->class_dev)) {
3146 printk(KERN_WARNING "Unable to create class_device for %s; "
3147 "errno = %ld\n", con_driver->desc,
3148 PTR_ERR(con_driver->class_dev));
3149 con_driver->class_dev = NULL;
3150 } else {
3151 vtconsole_init_class_device(con_driver);
3152 }
3153err:
3154 release_console_sem();
3155 module_put(owner);
3156 return retval;
3157}
3158EXPORT_SYMBOL(register_con_driver);
3159
3160/**
3161 * unregister_con_driver - unregister console driver from console layer
3162 * @csw: console driver
3163 *
3164 * DESCRIPTION: All drivers that registers to the console layer must
3165 * call this function upon exit, or if the console driver is in a state
3166 * where it won't be able to handle console services, such as the
3167 * framebuffer console without loaded framebuffer drivers.
3168 *
3169 * The driver must unbind first prior to unregistration.
3170 */
3171int unregister_con_driver(const struct consw *csw)
3172{
3173 int i, retval = -ENODEV;
3174
3175 acquire_console_sem();
3176
3177 /* cannot unregister a bound driver */
3178 if (con_is_bound(csw))
3179 goto err;
3180
3181 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
3182 struct con_driver *con_driver = &registered_con_driver[i];
3183
3184 if (con_driver->con == csw &&
3185 con_driver->flag & CON_DRIVER_FLAG_MODULE) {
3186 vtconsole_deinit_class_device(con_driver);
3187 class_device_destroy(vtconsole_class,
3188 MKDEV(0, con_driver->node));
3189 con_driver->con = NULL;
3190 con_driver->desc = NULL;
3191 con_driver->class_dev = NULL;
3192 con_driver->node = 0;
3193 con_driver->flag = 0;
3194 con_driver->first = 0;
3195 con_driver->last = 0;
3196 retval = 0;
3197 break;
3198 }
3199 }
3200err:
3201 release_console_sem();
3202 return retval;
3203}
3204EXPORT_SYMBOL(unregister_con_driver);
3205
3206/*
3207 * If we support more console drivers, this function is used
3208 * when a driver wants to take over some existing consoles
3209 * and become default driver for newly opened ones.
3210 *
3211 * take_over_console is basically a register followed by unbind
3212 */
3213int take_over_console(const struct consw *csw, int first, int last, int deflt)
3214{
3215 int err;
3216
3217 err = register_con_driver(csw, first, last);
3218
3219 if (!err)
3220 bind_con_driver(csw, first, last, deflt);
3221
3222 return err;
3223}
3224
3225/*
3226 * give_up_console is a wrapper to unregister_con_driver. It will only
3227 * work if driver is fully unbound.
3228 */
3229void give_up_console(const struct consw *csw)
3230{
3231 unregister_con_driver(csw);
3232}
3233
3234static int __init vtconsole_class_init(void)
3235{
3236 int i;
3237
3238 vtconsole_class = class_create(THIS_MODULE, "vtconsole");
3239 if (IS_ERR(vtconsole_class)) {
3240 printk(KERN_WARNING "Unable to create vt console class; "
3241 "errno = %ld\n", PTR_ERR(vtconsole_class));
3242 vtconsole_class = NULL;
3243 }
3244
3245 /* Add system drivers to sysfs */
3246 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
3247 struct con_driver *con = &registered_con_driver[i];
3248
3249 if (con->con && !con->class_dev) {
3250 con->class_dev =
3251 class_device_create(vtconsole_class, NULL,
3252 MKDEV(0, con->node), NULL,
3253 "vtcon%i", con->node);
3254
3255 if (IS_ERR(con->class_dev)) {
3256 printk(KERN_WARNING "Unable to create "
3257 "class_device for %s; errno = %ld\n",
3258 con->desc, PTR_ERR(con->class_dev));
3259 con->class_dev = NULL;
3260 } else {
3261 vtconsole_init_class_device(con);
3262 }
2749 } 3263 }
3264 }
3265
3266 return 0;
2750} 3267}
3268postcore_initcall(vtconsole_class_init);
2751 3269
2752#endif 3270#endif
2753 3271
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
new file mode 100644
index 0000000000..a522254702
--- /dev/null
+++ b/drivers/clocksource/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
2obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
3obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
new file mode 100644
index 0000000000..7ad3be8c0f
--- /dev/null
+++ b/drivers/clocksource/acpi_pm.c
@@ -0,0 +1,177 @@
1/*
2 * linux/drivers/clocksource/acpi_pm.c
3 *
4 * This file contains the ACPI PM based clocksource.
5 *
6 * This code was largely moved from the i386 timer_pm.c file
7 * which was (C) Dominik Brodowski <linux@brodo.de> 2003
8 * and contained the following comments:
9 *
10 * Driver to use the Power Management Timer (PMTMR) available in some
11 * southbridges as primary timing source for the Linux kernel.
12 *
13 * Based on parts of linux/drivers/acpi/hardware/hwtimer.c, timer_pit.c,
14 * timer_hpet.c, and on Arjan van de Ven's implementation for 2.4.
15 *
16 * This file is licensed under the GPL v2.
17 */
18
19#include <linux/clocksource.h>
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/pci.h>
23#include <asm/io.h>
24
25/* Number of PMTMR ticks expected during calibration run */
26#define PMTMR_TICKS_PER_SEC 3579545
27
28/*
29 * The I/O port the PMTMR resides at.
30 * The location is detected during setup_arch(),
31 * in arch/i386/acpi/boot.c
32 */
33u32 pmtmr_ioport __read_mostly;
34
35#define ACPI_PM_MASK CLOCKSOURCE_MASK(24) /* limit it to 24 bits */
36
37static inline u32 read_pmtmr(void)
38{
39 /* mask the output to 24 bits */
40 return inl(pmtmr_ioport) & ACPI_PM_MASK;
41}
42
43static cycle_t acpi_pm_read_verified(void)
44{
45 u32 v1 = 0, v2 = 0, v3 = 0;
46
47 /*
48 * It has been reported that because of various broken
49 * chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM clock
50 * source is not latched, you must read it multiple
51 * times to ensure a safe value is read:
52 */
53 do {
54 v1 = read_pmtmr();
55 v2 = read_pmtmr();
56 v3 = read_pmtmr();
57 } while ((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
58 || (v3 > v1 && v3 < v2));
59
60 return (cycle_t)v2;
61}
62
63static cycle_t acpi_pm_read(void)
64{
65 return (cycle_t)read_pmtmr();
66}
67
68static struct clocksource clocksource_acpi_pm = {
69 .name = "acpi_pm",
70 .rating = 200,
71 .read = acpi_pm_read,
72 .mask = (cycle_t)ACPI_PM_MASK,
73 .mult = 0, /*to be caluclated*/
74 .shift = 22,
75 .is_continuous = 1,
76};
77
78
79#ifdef CONFIG_PCI
80static int acpi_pm_good;
81static int __init acpi_pm_good_setup(char *__str)
82{
83 acpi_pm_good = 1;
84 return 1;
85}
86__setup("acpi_pm_good", acpi_pm_good_setup);
87
88static inline void acpi_pm_need_workaround(void)
89{
90 clocksource_acpi_pm.read = acpi_pm_read_verified;
91 clocksource_acpi_pm.rating = 110;
92}
93
94/*
95 * PIIX4 Errata:
96 *
97 * The power management timer may return improper results when read.
98 * Although the timer value settles properly after incrementing,
99 * while incrementing there is a 3 ns window every 69.8 ns where the
100 * timer value is indeterminate (a 4.2% chance that the data will be
101 * incorrect when read). As a result, the ACPI free running count up
102 * timer specification is violated due to erroneous reads.
103 */
104static void __devinit acpi_pm_check_blacklist(struct pci_dev *dev)
105{
106 u8 rev;
107
108 if (acpi_pm_good)
109 return;
110
111 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
112 /* the bug has been fixed in PIIX4M */
113 if (rev < 3) {
114 printk(KERN_WARNING "* Found PM-Timer Bug on the chipset."
115 " Due to workarounds for a bug,\n"
116 "* this clock source is slow. Consider trying"
117 " other clock sources\n");
118
119 acpi_pm_need_workaround();
120 }
121}
122DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3,
123 acpi_pm_check_blacklist);
124
125static void __devinit acpi_pm_check_graylist(struct pci_dev *dev)
126{
127 if (acpi_pm_good)
128 return;
129
130 printk(KERN_WARNING "* The chipset may have PM-Timer Bug. Due to"
131 " workarounds for a bug,\n"
132 "* this clock source is slow. If you are sure your timer"
133 " does not have\n"
134 "* this bug, please use \"acpi_pm_good\" to disable the"
135 " workaround\n");
136
137 acpi_pm_need_workaround();
138}
139DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
140 acpi_pm_check_graylist);
141#endif
142
143
144static int __init init_acpi_pm_clocksource(void)
145{
146 u32 value1, value2;
147 unsigned int i;
148
149 if (!pmtmr_ioport)
150 return -ENODEV;
151
152 clocksource_acpi_pm.mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC,
153 clocksource_acpi_pm.shift);
154
155 /* "verify" this timing source: */
156 value1 = read_pmtmr();
157 for (i = 0; i < 10000; i++) {
158 value2 = read_pmtmr();
159 if (value2 == value1)
160 continue;
161 if (value2 > value1)
162 goto pm_good;
163 if ((value2 < value1) && ((value2) < 0xFFF))
164 goto pm_good;
165 printk(KERN_INFO "PM-Timer had inconsistent results:"
166 " 0x%#x, 0x%#x - aborting.\n", value1, value2);
167 return -EINVAL;
168 }
169 printk(KERN_INFO "PM-Timer had no reasonable result:"
170 " 0x%#x - aborting.\n", value1);
171 return -ENODEV;
172
173pm_good:
174 return clocksource_register(&clocksource_acpi_pm);
175}
176
177module_init(init_acpi_pm_clocksource);
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c
new file mode 100644
index 0000000000..bf4d3d50d1
--- /dev/null
+++ b/drivers/clocksource/cyclone.c
@@ -0,0 +1,119 @@
1#include <linux/clocksource.h>
2#include <linux/string.h>
3#include <linux/errno.h>
4#include <linux/timex.h>
5#include <linux/init.h>
6
7#include <asm/pgtable.h>
8#include <asm/io.h>
9
10#include "mach_timer.h"
11
12#define CYCLONE_CBAR_ADDR 0xFEB00CD0 /* base address ptr */
13#define CYCLONE_PMCC_OFFSET 0x51A0 /* offset to control register */
14#define CYCLONE_MPCS_OFFSET 0x51A8 /* offset to select register */
15#define CYCLONE_MPMC_OFFSET 0x51D0 /* offset to count register */
16#define CYCLONE_TIMER_FREQ 99780000 /* 100Mhz, but not really */
17#define CYCLONE_TIMER_MASK CLOCKSOURCE_MASK(32) /* 32 bit mask */
18
19int use_cyclone = 0;
20static void __iomem *cyclone_ptr;
21
22static cycle_t read_cyclone(void)
23{
24 return (cycle_t)readl(cyclone_ptr);
25}
26
27static struct clocksource clocksource_cyclone = {
28 .name = "cyclone",
29 .rating = 250,
30 .read = read_cyclone,
31 .mask = CYCLONE_TIMER_MASK,
32 .mult = 10,
33 .shift = 0,
34 .is_continuous = 1,
35};
36
37static int __init init_cyclone_clocksource(void)
38{
39 unsigned long base; /* saved value from CBAR */
40 unsigned long offset;
41 u32 __iomem* volatile cyclone_timer; /* Cyclone MPMC0 register */
42 u32 __iomem* reg;
43 int i;
44
45 /* make sure we're on a summit box: */
46 if (!use_cyclone)
47 return -ENODEV;
48
49 printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n");
50
51 /* find base address: */
52 offset = CYCLONE_CBAR_ADDR;
53 reg = ioremap_nocache(offset, sizeof(reg));
54 if (!reg) {
55 printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n");
56 return -ENODEV;
57 }
58 /* even on 64bit systems, this is only 32bits: */
59 base = readl(reg);
60 if (!base) {
61 printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n");
62 return -ENODEV;
63 }
64 iounmap(reg);
65
66 /* setup PMCC: */
67 offset = base + CYCLONE_PMCC_OFFSET;
68 reg = ioremap_nocache(offset, sizeof(reg));
69 if (!reg) {
70 printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n");
71 return -ENODEV;
72 }
73 writel(0x00000001,reg);
74 iounmap(reg);
75
76 /* setup MPCS: */
77 offset = base + CYCLONE_MPCS_OFFSET;
78 reg = ioremap_nocache(offset, sizeof(reg));
79 if (!reg) {
80 printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n");
81 return -ENODEV;
82 }
83 writel(0x00000001,reg);
84 iounmap(reg);
85
86 /* map in cyclone_timer: */
87 offset = base + CYCLONE_MPMC_OFFSET;
88 cyclone_timer = ioremap_nocache(offset, sizeof(u64));
89 if (!cyclone_timer) {
90 printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n");
91 return -ENODEV;
92 }
93
94 /* quick test to make sure its ticking: */
95 for (i = 0; i < 3; i++){
96 u32 old = readl(cyclone_timer);
97 int stall = 100;
98
99 while (stall--)
100 barrier();
101
102 if (readl(cyclone_timer) == old) {
103 printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n");
104 iounmap(cyclone_timer);
105 cyclone_timer = NULL;
106 return -ENODEV;
107 }
108 }
109 cyclone_ptr = cyclone_timer;
110
111 /* sort out mult/shift values: */
112 clocksource_cyclone.shift = 22;
113 clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ,
114 clocksource_cyclone.shift);
115
116 return clocksource_register(&clocksource_cyclone);
117}
118
119module_init(init_cyclone_clocksource);
diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c
new file mode 100644
index 0000000000..d418b82972
--- /dev/null
+++ b/drivers/clocksource/scx200_hrt.c
@@ -0,0 +1,101 @@
1/*
2 * Copyright (C) 2006 Jim Cromie
3 *
4 * This is a clocksource driver for the Geode SCx200's 1 or 27 MHz
5 * high-resolution timer. The Geode SC-1100 (at least) has a buggy
6 * time stamp counter (TSC), which loses time unless 'idle=poll' is
7 * given as a boot-arg. In its absence, the Generic Timekeeping code
8 * will detect and de-rate the bad TSC, allowing this timer to take
9 * over timekeeping duties.
10 *
11 * Based on work by John Stultz, and Ted Phelps (in a 2.6.12-rc6 patch)
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License, or (at your option) any later version.
17 */
18
19#include <linux/clocksource.h>
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/ioport.h>
23#include <linux/scx200.h>
24
25#define NAME "scx200_hrt"
26
27static int mhz27;
28module_param(mhz27, int, 0); /* load time only */
29MODULE_PARM_DESC(mhz27, "count at 27.0 MHz (default is 1.0 MHz)");
30
31static int ppm;
32module_param(ppm, int, 0); /* load time only */
33MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)");
34
35/* HiRes Timer configuration register address */
36#define SCx200_TMCNFG_OFFSET (SCx200_TIMER_OFFSET + 5)
37
38/* and config settings */
39#define HR_TMEN (1 << 0) /* timer interrupt enable */
40#define HR_TMCLKSEL (1 << 1) /* 1|0 counts at 27|1 MHz */
41#define HR_TM27MPD (1 << 2) /* 1 turns off input clock (power-down) */
42
43/* The base timer frequency, * 27 if selected */
44#define HRT_FREQ 1000000
45
46static cycle_t read_hrt(void)
47{
48 /* Read the timer value */
49 return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET);
50}
51
52#define HRT_SHIFT_1 22
53#define HRT_SHIFT_27 26
54
55static struct clocksource cs_hrt = {
56 .name = "scx200_hrt",
57 .rating = 250,
58 .read = read_hrt,
59 .mask = CLOCKSOURCE_MASK(32),
60 .is_continuous = 1,
61 /* mult, shift are set based on mhz27 flag */
62};
63
64static int __init init_hrt_clocksource(void)
65{
66 /* Make sure scx200 has initializedd the configuration block */
67 if (!scx200_cb_present())
68 return -ENODEV;
69
70 /* Reserve the timer's ISA io-region for ourselves */
71 if (!request_region(scx200_cb_base + SCx200_TIMER_OFFSET,
72 SCx200_TIMER_SIZE,
73 "NatSemi SCx200 High-Resolution Timer")) {
74 printk(KERN_WARNING NAME ": unable to lock timer region\n");
75 return -ENODEV;
76 }
77
78 /* write timer config */
79 outb(HR_TMEN | (mhz27) ? HR_TMCLKSEL : 0,
80 scx200_cb_base + SCx200_TMCNFG_OFFSET);
81
82 if (mhz27) {
83 cs_hrt.shift = HRT_SHIFT_27;
84 cs_hrt.mult = clocksource_hz2mult((HRT_FREQ + ppm) * 27,
85 cs_hrt.shift);
86 } else {
87 cs_hrt.shift = HRT_SHIFT_1;
88 cs_hrt.mult = clocksource_hz2mult(HRT_FREQ + ppm,
89 cs_hrt.shift);
90 }
91 printk(KERN_INFO "enabling scx200 high-res timer (%s MHz +%d ppm)\n",
92 mhz27 ? "27":"1", ppm);
93
94 return clocksource_register(&cs_hrt);
95}
96
97module_init(init_hrt_clocksource);
98
99MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
100MODULE_DESCRIPTION("clocksource on SCx200 HiRes Timer");
101MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 44d1eca83a..35e0b9ceec 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1497,6 +1497,7 @@ int cpufreq_update_policy(unsigned int cpu)
1497} 1497}
1498EXPORT_SYMBOL(cpufreq_update_policy); 1498EXPORT_SYMBOL(cpufreq_update_policy);
1499 1499
1500#ifdef CONFIG_HOTPLUG_CPU
1500static int cpufreq_cpu_callback(struct notifier_block *nfb, 1501static int cpufreq_cpu_callback(struct notifier_block *nfb,
1501 unsigned long action, void *hcpu) 1502 unsigned long action, void *hcpu)
1502{ 1503{
@@ -1532,10 +1533,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
1532 return NOTIFY_OK; 1533 return NOTIFY_OK;
1533} 1534}
1534 1535
1535static struct notifier_block cpufreq_cpu_notifier = 1536static struct notifier_block __cpuinitdata cpufreq_cpu_notifier =
1536{ 1537{
1537 .notifier_call = cpufreq_cpu_callback, 1538 .notifier_call = cpufreq_cpu_callback,
1538}; 1539};
1540#endif /* CONFIG_HOTPLUG_CPU */
1539 1541
1540/********************************************************************* 1542/*********************************************************************
1541 * REGISTER / UNREGISTER CPUFREQ DRIVER * 1543 * REGISTER / UNREGISTER CPUFREQ DRIVER *
@@ -1596,7 +1598,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1596 } 1598 }
1597 1599
1598 if (!ret) { 1600 if (!ret) {
1599 register_cpu_notifier(&cpufreq_cpu_notifier); 1601 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1600 dprintk("driver %s up and running\n", driver_data->name); 1602 dprintk("driver %s up and running\n", driver_data->name);
1601 cpufreq_debug_enable_ratelimit(); 1603 cpufreq_debug_enable_ratelimit();
1602 } 1604 }
@@ -1628,7 +1630,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1628 dprintk("unregistering driver %s\n", driver->name); 1630 dprintk("unregistering driver %s\n", driver->name);
1629 1631
1630 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); 1632 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1631 unregister_cpu_notifier(&cpufreq_cpu_notifier); 1633 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1632 1634
1633 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1635 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1634 cpufreq_driver = NULL; 1636 cpufreq_driver = NULL;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index c576c0b3f4..145061b847 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -350,7 +350,7 @@ __init cpufreq_stats_init(void)
350 return ret; 350 return ret;
351 } 351 }
352 352
353 register_cpu_notifier(&cpufreq_stat_cpu_notifier); 353 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
354 lock_cpu_hotplug(); 354 lock_cpu_hotplug();
355 for_each_online_cpu(cpu) { 355 for_each_online_cpu(cpu) {
356 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE, 356 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE,
@@ -368,7 +368,7 @@ __exit cpufreq_stats_exit(void)
368 CPUFREQ_POLICY_NOTIFIER); 368 CPUFREQ_POLICY_NOTIFIER);
369 cpufreq_unregister_notifier(&notifier_trans_block, 369 cpufreq_unregister_notifier(&notifier_trans_block,
370 CPUFREQ_TRANSITION_NOTIFIER); 370 CPUFREQ_TRANSITION_NOTIFIER);
371 unregister_cpu_notifier(&cpufreq_stat_cpu_notifier); 371 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
372 lock_cpu_hotplug(); 372 lock_cpu_hotplug();
373 for_each_online_cpu(cpu) { 373 for_each_online_cpu(cpu) {
374 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD, 374 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD,
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 5158a9db4b..17ee684144 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -60,15 +60,14 @@
60#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) 60#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t))
61 61
62struct aes_ctx { 62struct aes_ctx {
63 uint32_t e_data[AES_EXTENDED_KEY_SIZE];
64 uint32_t d_data[AES_EXTENDED_KEY_SIZE];
65 struct { 63 struct {
66 struct cword encrypt; 64 struct cword encrypt;
67 struct cword decrypt; 65 struct cword decrypt;
68 } cword; 66 } cword;
69 uint32_t *E; 67 u32 *D;
70 uint32_t *D;
71 int key_length; 68 int key_length;
69 u32 E[AES_EXTENDED_KEY_SIZE];
70 u32 d_data[AES_EXTENDED_KEY_SIZE];
72}; 71};
73 72
74/* ====== Key management routines ====== */ 73/* ====== Key management routines ====== */
@@ -282,19 +281,20 @@ aes_hw_extkey_available(uint8_t key_len)
282 return 0; 281 return 0;
283} 282}
284 283
285static inline struct aes_ctx *aes_ctx(void *ctx) 284static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
286{ 285{
286 unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
287 unsigned long align = PADLOCK_ALIGNMENT; 287 unsigned long align = PADLOCK_ALIGNMENT;
288 288
289 if (align <= crypto_tfm_ctx_alignment()) 289 if (align <= crypto_tfm_ctx_alignment())
290 align = 1; 290 align = 1;
291 return (struct aes_ctx *)ALIGN((unsigned long)ctx, align); 291 return (struct aes_ctx *)ALIGN(addr, align);
292} 292}
293 293
294static int 294static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
295aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags) 295 unsigned int key_len, u32 *flags)
296{ 296{
297 struct aes_ctx *ctx = aes_ctx(ctx_arg); 297 struct aes_ctx *ctx = aes_ctx(tfm);
298 const __le32 *key = (const __le32 *)in_key; 298 const __le32 *key = (const __le32 *)in_key;
299 uint32_t i, t, u, v, w; 299 uint32_t i, t, u, v, w;
300 uint32_t P[AES_EXTENDED_KEY_SIZE]; 300 uint32_t P[AES_EXTENDED_KEY_SIZE];
@@ -312,8 +312,7 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t
312 * itself we must supply the plain key for both encryption 312 * itself we must supply the plain key for both encryption
313 * and decryption. 313 * and decryption.
314 */ 314 */
315 ctx->E = ctx->e_data; 315 ctx->D = ctx->E;
316 ctx->D = ctx->e_data;
317 316
318 E_KEY[0] = le32_to_cpu(key[0]); 317 E_KEY[0] = le32_to_cpu(key[0]);
319 E_KEY[1] = le32_to_cpu(key[1]); 318 E_KEY[1] = le32_to_cpu(key[1]);
@@ -414,24 +413,22 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
414 return iv; 413 return iv;
415} 414}
416 415
417static void 416static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
418aes_encrypt(void *ctx_arg, uint8_t *out, const uint8_t *in)
419{ 417{
420 struct aes_ctx *ctx = aes_ctx(ctx_arg); 418 struct aes_ctx *ctx = aes_ctx(tfm);
421 padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1); 419 padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1);
422} 420}
423 421
424static void 422static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
425aes_decrypt(void *ctx_arg, uint8_t *out, const uint8_t *in)
426{ 423{
427 struct aes_ctx *ctx = aes_ctx(ctx_arg); 424 struct aes_ctx *ctx = aes_ctx(tfm);
428 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); 425 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1);
429} 426}
430 427
431static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, 428static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
432 const u8 *in, unsigned int nbytes) 429 const u8 *in, unsigned int nbytes)
433{ 430{
434 struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); 431 struct aes_ctx *ctx = aes_ctx(desc->tfm);
435 padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 432 padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt,
436 nbytes / AES_BLOCK_SIZE); 433 nbytes / AES_BLOCK_SIZE);
437 return nbytes & ~(AES_BLOCK_SIZE - 1); 434 return nbytes & ~(AES_BLOCK_SIZE - 1);
@@ -440,7 +437,7 @@ static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
440static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, 437static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out,
441 const u8 *in, unsigned int nbytes) 438 const u8 *in, unsigned int nbytes)
442{ 439{
443 struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); 440 struct aes_ctx *ctx = aes_ctx(desc->tfm);
444 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 441 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt,
445 nbytes / AES_BLOCK_SIZE); 442 nbytes / AES_BLOCK_SIZE);
446 return nbytes & ~(AES_BLOCK_SIZE - 1); 443 return nbytes & ~(AES_BLOCK_SIZE - 1);
@@ -449,7 +446,7 @@ static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out,
449static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, 446static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out,
450 const u8 *in, unsigned int nbytes) 447 const u8 *in, unsigned int nbytes)
451{ 448{
452 struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); 449 struct aes_ctx *ctx = aes_ctx(desc->tfm);
453 u8 *iv; 450 u8 *iv;
454 451
455 iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info, 452 iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info,
@@ -462,7 +459,7 @@ static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out,
462static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, 459static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out,
463 const u8 *in, unsigned int nbytes) 460 const u8 *in, unsigned int nbytes)
464{ 461{
465 struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); 462 struct aes_ctx *ctx = aes_ctx(desc->tfm);
466 padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt, 463 padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt,
467 nbytes / AES_BLOCK_SIZE); 464 nbytes / AES_BLOCK_SIZE);
468 return nbytes & ~(AES_BLOCK_SIZE - 1); 465 return nbytes & ~(AES_BLOCK_SIZE - 1);
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index 0fdf7fbd64..2801d14a5e 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -824,10 +824,9 @@ static int __init ioat_init_module(void)
824{ 824{
825 /* it's currently unsafe to unload this module */ 825 /* it's currently unsafe to unload this module */
826 /* if forced, worst case is that rmmod hangs */ 826 /* if forced, worst case is that rmmod hangs */
827 if (THIS_MODULE != NULL) 827 __unsafe(THIS_MODULE);
828 THIS_MODULE->unsafe = 1;
829 828
830 return pci_module_init(&ioat_pci_drv); 829 pci_module_init(&ioat_pci_drv);
831} 830}
832 831
833module_init(ioat_init_module); 832module_init(ioat_init_module);
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 65b2709f75..facc1ccb83 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -341,7 +341,7 @@ static ssize_t set_fan_min(struct device *dev, const char *buf,
341 341
342/* Note: we save and restore the fan minimum here, because its value is 342/* Note: we save and restore the fan minimum here, because its value is
343 determined in part by the fan divisor. This follows the principle of 343 determined in part by the fan divisor. This follows the principle of
344 least suprise; the user doesn't expect the fan minimum to change just 344 least surprise; the user doesn't expect the fan minimum to change just
345 because the divisor changed. */ 345 because the divisor changed. */
346static ssize_t set_fan_div(struct device *dev, const char *buf, 346static ssize_t set_fan_div(struct device *dev, const char *buf,
347 size_t count, int nr) 347 size_t count, int nr)
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 94be3d797e..a6ce7abf86 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -358,7 +358,7 @@ static ssize_t show_fan_div(struct device *dev, char *buf, int nr)
358 358
359/* Note: we save and restore the fan minimum here, because its value is 359/* Note: we save and restore the fan minimum here, because its value is
360 determined in part by the fan divisor. This follows the principle of 360 determined in part by the fan divisor. This follows the principle of
361 least suprise; the user doesn't expect the fan minimum to change just 361 least surprise; the user doesn't expect the fan minimum to change just
362 because the divisor changed. */ 362 because the divisor changed. */
363static ssize_t set_fan_div(struct device *dev, const char *buf, 363static ssize_t set_fan_div(struct device *dev, const char *buf,
364 size_t count, int nr) 364 size_t count, int nr)
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index f72120d08c..b4ccdfc012 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -253,7 +253,7 @@ set_fan(min2, fan_min[1], LM80_REG_FAN_MIN(2), fan_div[1]);
253 253
254/* Note: we save and restore the fan minimum here, because its value is 254/* Note: we save and restore the fan minimum here, because its value is
255 determined in part by the fan divisor. This follows the principle of 255 determined in part by the fan divisor. This follows the principle of
256 least suprise; the user doesn't expect the fan minimum to change just 256 least surprise; the user doesn't expect the fan minimum to change just
257 because the divisor changed. */ 257 because the divisor changed. */
258static ssize_t set_fan_div(struct device *dev, const char *buf, 258static ssize_t set_fan_div(struct device *dev, const char *buf,
259 size_t count, int nr) 259 size_t count, int nr)
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index e229daf666..e6c1b638c9 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -421,7 +421,7 @@ static void set_fan_min(struct device *dev, const char *buf, int nr)
421 421
422/* Note: we save and restore the fan minimum here, because its value is 422/* Note: we save and restore the fan minimum here, because its value is
423 determined in part by the fan clock divider. This follows the principle 423 determined in part by the fan clock divider. This follows the principle
424 of least suprise; the user doesn't expect the fan minimum to change just 424 of least surprise; the user doesn't expect the fan minimum to change just
425 because the divider changed. */ 425 because the divider changed. */
426static ssize_t set_fan_div(struct device *dev, const char *buf, 426static ssize_t set_fan_div(struct device *dev, const char *buf,
427 size_t count, int nr) 427 size_t count, int nr)
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 6f3fda73f7..063f71c5f0 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -380,7 +380,7 @@ static ssize_t show_fan_div(struct device *dev, char *buf, int nr)
380 380
381/* Note: we save and restore the fan minimum here, because its value is 381/* Note: we save and restore the fan minimum here, because its value is
382 determined in part by the fan divisor. This follows the principle of 382 determined in part by the fan divisor. This follows the principle of
383 least suprise; the user doesn't expect the fan minimum to change just 383 least surprise; the user doesn't expect the fan minimum to change just
384 because the divisor changed. */ 384 because the divisor changed. */
385static ssize_t set_fan_div(struct device *dev, const char *buf, 385static ssize_t set_fan_div(struct device *dev, const char *buf,
386 size_t count, int nr) 386 size_t count, int nr)
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 7732aec545..825e8f7269 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -207,7 +207,7 @@ static ssize_t set_fan_min(struct device *dev, const char *buf,
207 207
208/* Note: we save and restore the fan minimum here, because its value is 208/* Note: we save and restore the fan minimum here, because its value is
209 determined in part by the fan clock divider. This follows the principle 209 determined in part by the fan clock divider. This follows the principle
210 of least suprise; the user doesn't expect the fan minimum to change just 210 of least surprise; the user doesn't expect the fan minimum to change just
211 because the divider changed. */ 211 because the divider changed. */
212static ssize_t set_fan_div(struct device *dev, const char *buf, 212static ssize_t set_fan_div(struct device *dev, const char *buf,
213 size_t count, int nr) 213 size_t count, int nr)
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 71fb7f1af8..79368d53c3 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -781,7 +781,7 @@ show_fan_div_reg(struct device *dev, char *buf, int nr)
781 781
782/* Note: we save and restore the fan minimum here, because its value is 782/* Note: we save and restore the fan minimum here, because its value is
783 determined in part by the fan divisor. This follows the principle of 783 determined in part by the fan divisor. This follows the principle of
784 least suprise; the user doesn't expect the fan minimum to change just 784 least surprise; the user doesn't expect the fan minimum to change just
785 because the divisor changed. */ 785 because the divisor changed. */
786static ssize_t 786static ssize_t
787store_fan_div_reg(struct device *dev, const char *buf, size_t count, int nr) 787store_fan_div_reg(struct device *dev, const char *buf, size_t count, int nr)
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index e4c700356c..7be469ed0f 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -630,7 +630,7 @@ show_fan_div_reg(struct device *dev, char *buf, int nr)
630 630
631/* Note: we save and restore the fan minimum here, because its value is 631/* Note: we save and restore the fan minimum here, because its value is
632 determined in part by the fan divisor. This follows the principle of 632 determined in part by the fan divisor. This follows the principle of
633 least suprise; the user doesn't expect the fan minimum to change just 633 least surprise; the user doesn't expect the fan minimum to change just
634 because the divisor changed. */ 634 because the divisor changed. */
635static ssize_t 635static ssize_t
636store_fan_div_reg(struct device *dev, const char *buf, size_t count, int nr) 636store_fan_div_reg(struct device *dev, const char *buf, size_t count, int nr)
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 4ef884c216..e407c74bda 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -463,7 +463,7 @@ show_fan_div(struct device *dev, struct device_attribute *attr,
463 463
464/* Note: we save and restore the fan minimum here, because its value is 464/* Note: we save and restore the fan minimum here, because its value is
465 determined in part by the fan divisor. This follows the principle of 465 determined in part by the fan divisor. This follows the principle of
466 least suprise; the user doesn't expect the fan minimum to change just 466 least surprise; the user doesn't expect the fan minimum to change just
467 because the divisor changed. */ 467 because the divisor changed. */
468static ssize_t 468static ssize_t
469store_fan_div(struct device *dev, struct device_attribute *attr, 469store_fan_div(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 3e0d04d5a8..8b46ef7d9f 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -488,7 +488,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
488 dev_err(&dev->dev, "SMBus base address uninitialized, " 488 dev_err(&dev->dev, "SMBus base address uninitialized, "
489 "upgrade BIOS\n"); 489 "upgrade BIOS\n");
490 err = -ENODEV; 490 err = -ENODEV;
491 goto exit_disable; 491 goto exit;
492 } 492 }
493 493
494 err = pci_request_region(dev, SMBBAR, i801_driver.name); 494 err = pci_request_region(dev, SMBBAR, i801_driver.name);
@@ -496,7 +496,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
496 dev_err(&dev->dev, "Failed to request SMBus region " 496 dev_err(&dev->dev, "Failed to request SMBus region "
497 "0x%lx-0x%lx\n", i801_smba, 497 "0x%lx-0x%lx\n", i801_smba,
498 pci_resource_end(dev, SMBBAR)); 498 pci_resource_end(dev, SMBBAR));
499 goto exit_disable; 499 goto exit;
500 } 500 }
501 501
502 pci_read_config_byte(I801_dev, SMBHSTCFG, &temp); 502 pci_read_config_byte(I801_dev, SMBHSTCFG, &temp);
@@ -520,11 +520,12 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
520 err = i2c_add_adapter(&i801_adapter); 520 err = i2c_add_adapter(&i801_adapter);
521 if (err) { 521 if (err) {
522 dev_err(&dev->dev, "Failed to add SMBus adapter\n"); 522 dev_err(&dev->dev, "Failed to add SMBus adapter\n");
523 goto exit_disable; 523 goto exit_release;
524 } 524 }
525 return 0;
525 526
526exit_disable: 527exit_release:
527 pci_disable_device(dev); 528 pci_release_region(dev, SMBBAR);
528exit: 529exit:
529 return err; 530 return err;
530} 531}
@@ -533,7 +534,10 @@ static void __devexit i801_remove(struct pci_dev *dev)
533{ 534{
534 i2c_del_adapter(&i801_adapter); 535 i2c_del_adapter(&i801_adapter);
535 pci_release_region(dev, SMBBAR); 536 pci_release_region(dev, SMBBAR);
536 pci_disable_device(dev); 537 /*
538 * do not call pci_disable_device(dev) since it can cause hard hangs on
539 * some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010)
540 */
537} 541}
538 542
539static struct pci_driver i801_driver = { 543static struct pci_driver i801_driver = {
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index a5017de72d..f033d732f3 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -37,7 +37,7 @@
37 * Version 1.15 convert all calls to ide_raw_taskfile 37 * Version 1.15 convert all calls to ide_raw_taskfile
38 * since args will return register content. 38 * since args will return register content.
39 * Version 1.16 added suspend-resume-checkpower 39 * Version 1.16 added suspend-resume-checkpower
40 * Version 1.17 do flush on standy, do flush on ATA < ATA6 40 * Version 1.17 do flush on standby, do flush on ATA < ATA6
41 * fix wcache setup. 41 * fix wcache setup.
42 */ 42 */
43 43
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 622a55c72f..935cb25837 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -959,7 +959,7 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
959 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 959 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
960 SELECT_DRIVE(drive); 960 SELECT_DRIVE(drive);
961 HWIF(drive)->OUTB(8, HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]); 961 HWIF(drive)->OUTB(8, HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]);
962 rc = ide_wait_not_busy(HWIF(drive), 10000); 962 rc = ide_wait_not_busy(HWIF(drive), 100000);
963 if (rc) 963 if (rc)
964 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 964 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
965 } 965 }
@@ -1665,7 +1665,7 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs)
1665 * Initialize a request before we fill it in and send it down to 1665 * Initialize a request before we fill it in and send it down to
1666 * ide_do_drive_cmd. Commands must be set up by this function. Right 1666 * ide_do_drive_cmd. Commands must be set up by this function. Right
1667 * now it doesn't do a lot, but if that changes abusers will have a 1667 * now it doesn't do a lot, but if that changes abusers will have a
1668 * nasty suprise. 1668 * nasty surprise.
1669 */ 1669 */
1670 1670
1671void ide_init_drive_cmd (struct request *rq) 1671void ide_init_drive_cmd (struct request *rq)
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 16a143133f..7ddb118287 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -485,7 +485,7 @@ static u8 ide_dump_ata_status(ide_drive_t *drive, const char *msg, u8 stat)
485 unsigned long flags; 485 unsigned long flags;
486 u8 err = 0; 486 u8 err = 0;
487 487
488 local_irq_set(flags); 488 local_irq_save(flags);
489 printk("%s: %s: status=0x%02x { ", drive->name, msg, stat); 489 printk("%s: %s: status=0x%02x { ", drive->name, msg, stat);
490 if (stat & BUSY_STAT) 490 if (stat & BUSY_STAT)
491 printk("Busy "); 491 printk("Busy ");
@@ -567,7 +567,7 @@ static u8 ide_dump_atapi_status(ide_drive_t *drive, const char *msg, u8 stat)
567 567
568 status.all = stat; 568 status.all = stat;
569 error.all = 0; 569 error.all = 0;
570 local_irq_set(flags); 570 local_irq_save(flags);
571 printk("%s: %s: status=0x%02x { ", drive->name, msg, stat); 571 printk("%s: %s: status=0x%02x { ", drive->name, msg, stat);
572 if (status.b.bsy) 572 if (status.b.bsy)
573 printk("Busy "); 573 printk("Busy ");
diff --git a/drivers/ide/ide-timing.h b/drivers/ide/ide-timing.h
index 2fcfac6e96..c0864b1e92 100644
--- a/drivers/ide/ide-timing.h
+++ b/drivers/ide/ide-timing.h
@@ -220,6 +220,12 @@ static int ide_timing_compute(ide_drive_t *drive, short speed, struct ide_timing
220 return -EINVAL; 220 return -EINVAL;
221 221
222/* 222/*
223 * Copy the timing from the table.
224 */
225
226 *t = *s;
227
228/*
223 * If the drive is an EIDE drive, it can tell us it needs extended 229 * If the drive is an EIDE drive, it can tell us it needs extended
224 * PIO/MWDMA cycle timing. 230 * PIO/MWDMA cycle timing.
225 */ 231 */
@@ -247,7 +253,7 @@ static int ide_timing_compute(ide_drive_t *drive, short speed, struct ide_timing
247 * Convert the timing to bus clock counts. 253 * Convert the timing to bus clock counts.
248 */ 254 */
249 255
250 ide_timing_quantize(s, t, T, UT); 256 ide_timing_quantize(t, t, T, UT);
251 257
252/* 258/*
253 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T 259 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 6e9dbf4d80..85007cb12c 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -75,6 +75,7 @@ static struct amd_ide_chip {
75 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 }, 75 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 },
76 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 }, 76 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 },
77 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, 0x50, AMD_UDMA_133 }, 77 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, 0x50, AMD_UDMA_133 },
78 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE, 0x50, AMD_UDMA_133 },
78 { PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, AMD_UDMA_100 }, 79 { PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, AMD_UDMA_100 },
79 { 0 } 80 { 0 }
80}; 81};
@@ -490,7 +491,8 @@ static ide_pci_device_t amd74xx_chipsets[] __devinitdata = {
490 /* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"), 491 /* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"),
491 /* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"), 492 /* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"),
492 /* 17 */ DECLARE_NV_DEV("NFORCE-MCP61"), 493 /* 17 */ DECLARE_NV_DEV("NFORCE-MCP61"),
493 /* 18 */ DECLARE_AMD_DEV("AMD5536"), 494 /* 18 */ DECLARE_NV_DEV("NFORCE-MCP65"),
495 /* 19 */ DECLARE_AMD_DEV("AMD5536"),
494}; 496};
495 497
496static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id) 498static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -528,7 +530,8 @@ static struct pci_device_id amd74xx_pci_tbl[] = {
528 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 }, 530 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 },
529 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 }, 531 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 },
530 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 }, 532 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 },
531 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18 }, 533 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18 },
534 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 19 },
532 { 0, }, 535 { 0, },
533}; 536};
534MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl); 537MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl);
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index 7ce5bf7836..22d17548ec 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -370,7 +370,6 @@ chipset_is_set:
370 if (!(speed)) { 370 if (!(speed)) {
371 /* restore original pci-config space */ 371 /* restore original pci-config space */
372 pci_write_config_dword(dev, drive_pci, drive_conf); 372 pci_write_config_dword(dev, drive_pci, drive_conf);
373 hwif->tuneproc(drive, 5);
374 return 0; 373 return 0;
375 } 374 }
376 375
@@ -415,8 +414,6 @@ static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
415 if (drive->addressing == 1) { 414 if (drive->addressing == 1) {
416 struct request *rq = HWGROUP(drive)->rq; 415 struct request *rq = HWGROUP(drive)->rq;
417 ide_hwif_t *hwif = HWIF(drive); 416 ide_hwif_t *hwif = HWIF(drive);
418// struct pci_dev *dev = hwif->pci_dev;
419// unsgned long high_16 = pci_resource_start(dev, 4);
420 unsigned long high_16 = hwif->dma_master; 417 unsigned long high_16 = hwif->dma_master;
421 unsigned long atapi_reg = high_16 + (hwif->channel ? 0x24 : 0x20); 418 unsigned long atapi_reg = high_16 + (hwif->channel ? 0x24 : 0x20);
422 u32 word_count = 0; 419 u32 word_count = 0;
@@ -436,7 +433,6 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
436{ 433{
437 if (drive->addressing == 1) { 434 if (drive->addressing == 1) {
438 ide_hwif_t *hwif = HWIF(drive); 435 ide_hwif_t *hwif = HWIF(drive);
439// unsigned long high_16 = pci_resource_start(hwif->pci_dev, 4);
440 unsigned long high_16 = hwif->dma_master; 436 unsigned long high_16 = hwif->dma_master;
441 unsigned long atapi_reg = high_16 + (hwif->channel ? 0x24 : 0x20); 437 unsigned long atapi_reg = high_16 + (hwif->channel ? 0x24 : 0x20);
442 u8 clock = 0; 438 u8 clock = 0;
@@ -453,8 +449,6 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
453static int pdc202xx_old_ide_dma_test_irq(ide_drive_t *drive) 449static int pdc202xx_old_ide_dma_test_irq(ide_drive_t *drive)
454{ 450{
455 ide_hwif_t *hwif = HWIF(drive); 451 ide_hwif_t *hwif = HWIF(drive);
456// struct pci_dev *dev = hwif->pci_dev;
457// unsigned long high_16 = pci_resource_start(dev, 4);
458 unsigned long high_16 = hwif->dma_master; 452 unsigned long high_16 = hwif->dma_master;
459 u8 dma_stat = hwif->INB(hwif->dma_status); 453 u8 dma_stat = hwif->INB(hwif->dma_status);
460 u8 sc1d = hwif->INB((high_16 + 0x001d)); 454 u8 sc1d = hwif->INB((high_16 + 0x001d));
@@ -492,12 +486,7 @@ static int pdc202xx_ide_dma_timeout(ide_drive_t *drive)
492 486
493static void pdc202xx_reset_host (ide_hwif_t *hwif) 487static void pdc202xx_reset_host (ide_hwif_t *hwif)
494{ 488{
495#ifdef CONFIG_BLK_DEV_IDEDMA
496// unsigned long high_16 = hwif->dma_base - (8*(hwif->channel));
497 unsigned long high_16 = hwif->dma_master; 489 unsigned long high_16 = hwif->dma_master;
498#else /* !CONFIG_BLK_DEV_IDEDMA */
499 unsigned long high_16 = pci_resource_start(hwif->pci_dev, 4);
500#endif /* CONFIG_BLK_DEV_IDEDMA */
501 u8 udma_speed_flag = hwif->INB(high_16|0x001f); 490 u8 udma_speed_flag = hwif->INB(high_16|0x001f);
502 491
503 hwif->OUTB((udma_speed_flag | 0x10), (high_16|0x001f)); 492 hwif->OUTB((udma_speed_flag | 0x10), (high_16|0x001f));
@@ -550,31 +539,6 @@ static void pdc202xx_reset (ide_drive_t *drive)
550#endif 539#endif
551} 540}
552 541
553/*
554 * Since SUN Cobalt is attempting to do this operation, I should disclose
555 * this has been a long time ago Thu Jul 27 16:40:57 2000 was the patch date
556 * HOTSWAP ATA Infrastructure.
557 */
558static int pdc202xx_tristate (ide_drive_t * drive, int state)
559{
560 ide_hwif_t *hwif = HWIF(drive);
561// unsigned long high_16 = hwif->dma_base - (8*(hwif->channel));
562 unsigned long high_16 = hwif->dma_master;
563 u8 sc1f = hwif->INB(high_16|0x001f);
564
565 if (!hwif)
566 return -EINVAL;
567
568// hwif->bus_state = state;
569
570 if (state) {
571 hwif->OUTB(sc1f | 0x08, (high_16|0x001f));
572 } else {
573 hwif->OUTB(sc1f & ~0x08, (high_16|0x001f));
574 }
575 return 0;
576}
577
578static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev, const char *name) 542static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev, const char *name)
579{ 543{
580 if (dev->resource[PCI_ROM_RESOURCE].start) { 544 if (dev->resource[PCI_ROM_RESOURCE].start) {
@@ -624,10 +588,8 @@ static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif)
624 hwif->tuneproc = &config_chipset_for_pio; 588 hwif->tuneproc = &config_chipset_for_pio;
625 hwif->quirkproc = &pdc202xx_quirkproc; 589 hwif->quirkproc = &pdc202xx_quirkproc;
626 590
627 if (hwif->pci_dev->device != PCI_DEVICE_ID_PROMISE_20246) { 591 if (hwif->pci_dev->device != PCI_DEVICE_ID_PROMISE_20246)
628 hwif->busproc = &pdc202xx_tristate;
629 hwif->resetproc = &pdc202xx_reset; 592 hwif->resetproc = &pdc202xx_reset;
630 }
631 593
632 hwif->speedproc = &pdc202xx_tune_chipset; 594 hwif->speedproc = &pdc202xx_tune_chipset;
633 595
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index e9b83e1a30..7fac6f57b5 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -222,6 +222,8 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
222 unsigned long flags; 222 unsigned long flags;
223 u16 master_data; 223 u16 master_data;
224 u8 slave_data; 224 u8 slave_data;
225 static DEFINE_SPINLOCK(tune_lock);
226
225 /* ISP RTC */ 227 /* ISP RTC */
226 u8 timings[][2] = { { 0, 0 }, 228 u8 timings[][2] = { { 0, 0 },
227 { 0, 0 }, 229 { 0, 0 },
@@ -230,7 +232,13 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
230 { 2, 3 }, }; 232 { 2, 3 }, };
231 233
232 pio = ide_get_best_pio_mode(drive, pio, 5, NULL); 234 pio = ide_get_best_pio_mode(drive, pio, 5, NULL);
233 spin_lock_irqsave(&ide_lock, flags); 235
236 /*
237 * Master vs slave is synchronized above us but the slave register is
238 * shared by the two hwifs so the corner case of two slave timeouts in
239 * parallel must be locked.
240 */
241 spin_lock_irqsave(&tune_lock, flags);
234 pci_read_config_word(dev, master_port, &master_data); 242 pci_read_config_word(dev, master_port, &master_data);
235 if (is_slave) { 243 if (is_slave) {
236 master_data = master_data | 0x4000; 244 master_data = master_data | 0x4000;
@@ -250,7 +258,7 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
250 pci_write_config_word(dev, master_port, master_data); 258 pci_write_config_word(dev, master_port, master_data);
251 if (is_slave) 259 if (is_slave)
252 pci_write_config_byte(dev, slave_port, slave_data); 260 pci_write_config_byte(dev, slave_port, slave_data);
253 spin_unlock_irqrestore(&ide_lock, flags); 261 spin_unlock_irqrestore(&tune_lock, flags);
254} 262}
255 263
256/** 264/**
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 5bda15904a..2d5b57be98 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -1074,8 +1074,7 @@ static inline int update_partial_datagram(struct list_head *pdgl, struct list_he
1074 1074
1075 /* Move list entry to beginnig of list so that oldest partial 1075 /* Move list entry to beginnig of list so that oldest partial
1076 * datagrams percolate to the end of the list */ 1076 * datagrams percolate to the end of the list */
1077 list_del(lh); 1077 list_move(lh, pdgl);
1078 list_add(lh, pdgl);
1079 1078
1080 return 0; 1079 return 0;
1081} 1080}
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index 2d47b11777..ad49c040b6 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -103,7 +103,7 @@ static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
103 * driver specific parts, enable the controller and make it available 103 * driver specific parts, enable the controller and make it available
104 * to the general subsystem using hpsb_add_host(). 104 * to the general subsystem using hpsb_add_host().
105 * 105 *
106 * Return Value: a pointer to the &hpsb_host if succesful, %NULL if 106 * Return Value: a pointer to the &hpsb_host if successful, %NULL if
107 * no memory was available. 107 * no memory was available.
108 */ 108 */
109static DEFINE_MUTEX(host_num_alloc); 109static DEFINE_MUTEX(host_num_alloc);
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h
index e7b55e895f..0ecbf335c6 100644
--- a/drivers/ieee1394/ieee1394_core.h
+++ b/drivers/ieee1394/ieee1394_core.h
@@ -139,7 +139,7 @@ int hpsb_bus_reset(struct hpsb_host *host);
139 139
140/* 140/*
141 * Hand over received selfid packet to the core. Complement check (second 141 * Hand over received selfid packet to the core. Complement check (second
142 * quadlet is complement of first) is expected to be done and succesful. 142 * quadlet is complement of first) is expected to be done and successful.
143 */ 143 */
144void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid); 144void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid);
145 145
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 20ce539580..571ea68c0c 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -132,8 +132,7 @@ static void free_pending_request(struct pending_request *req)
132static void __queue_complete_req(struct pending_request *req) 132static void __queue_complete_req(struct pending_request *req)
133{ 133{
134 struct file_info *fi = req->file_info; 134 struct file_info *fi = req->file_info;
135 list_del(&req->list); 135 list_move_tail(&req->list, &fi->req_complete);
136 list_add_tail(&req->list, &fi->req_complete);
137 136
138 up(&fi->complete_sem); 137 up(&fi->complete_sem);
139 wake_up_interruptible(&fi->poll_wait_complete); 138 wake_up_interruptible(&fi->poll_wait_complete);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index a76834edf6..863f64befc 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -476,7 +476,7 @@ static inline int cma_zero_addr(struct sockaddr *addr)
476 else { 476 else {
477 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr; 477 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
478 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] | 478 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
479 ip6->s6_addr32[3] | ip6->s6_addr32[4]) == 0; 479 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
480 } 480 }
481} 481}
482 482
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index b38e02a5db..5ed4dab52a 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1775,11 +1775,9 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1775void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) 1775void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1776{ 1776{
1777 mad_send_wr->timeout = 0; 1777 mad_send_wr->timeout = 0;
1778 if (mad_send_wr->refcount == 1) { 1778 if (mad_send_wr->refcount == 1)
1779 list_del(&mad_send_wr->agent_list); 1779 list_move_tail(&mad_send_wr->agent_list,
1780 list_add_tail(&mad_send_wr->agent_list,
1781 &mad_send_wr->mad_agent_priv->done_list); 1780 &mad_send_wr->mad_agent_priv->done_list);
1782 }
1783} 1781}
1784 1782
1785static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, 1783static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
@@ -2098,8 +2096,7 @@ retry:
2098 queued_send_wr = container_of(mad_list, 2096 queued_send_wr = container_of(mad_list,
2099 struct ib_mad_send_wr_private, 2097 struct ib_mad_send_wr_private,
2100 mad_list); 2098 mad_list);
2101 list_del(&mad_list->list); 2099 list_move_tail(&mad_list->list, &send_queue->list);
2102 list_add_tail(&mad_list->list, &send_queue->list);
2103 } 2100 }
2104 spin_unlock_irqrestore(&send_queue->lock, flags); 2101 spin_unlock_irqrestore(&send_queue->lock, flags);
2105 2102
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index d4704e054e..ebcd5b1817 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -665,8 +665,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
665 goto out; 665 goto out;
666 666
667 mad_send_wr->refcount++; 667 mad_send_wr->refcount++;
668 list_del(&mad_send_wr->agent_list); 668 list_move_tail(&mad_send_wr->agent_list,
669 list_add_tail(&mad_send_wr->agent_list,
670 &mad_send_wr->mad_agent_priv->send_list); 669 &mad_send_wr->mad_agent_priv->send_list);
671 } 670 }
672out: 671out:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 216471fa01..ab40488182 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -864,8 +864,7 @@ void ipoib_mcast_restart_task(void *dev_ptr)
864 864
865 if (mcast) { 865 if (mcast) {
866 /* Destroy the send only entry */ 866 /* Destroy the send only entry */
867 list_del(&mcast->list); 867 list_move_tail(&mcast->list, &remove_list);
868 list_add_tail(&mcast->list, &remove_list);
869 868
870 rb_replace_node(&mcast->rb_node, 869 rb_replace_node(&mcast->rb_node,
871 &nmcast->rb_node, 870 &nmcast->rb_node,
@@ -890,8 +889,7 @@ void ipoib_mcast_restart_task(void *dev_ptr)
890 rb_erase(&mcast->rb_node, &priv->multicast_tree); 889 rb_erase(&mcast->rb_node, &priv->multicast_tree);
891 890
892 /* Move to the remove list */ 891 /* Move to the remove list */
893 list_del(&mcast->list); 892 list_move_tail(&mcast->list, &remove_list);
894 list_add_tail(&mcast->list, &remove_list);
895 } 893 }
896 } 894 }
897 895
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 5f561fce32..a29d5ceb00 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -78,14 +78,19 @@ static int evdev_fasync(int fd, struct file *file, int on)
78{ 78{
79 int retval; 79 int retval;
80 struct evdev_list *list = file->private_data; 80 struct evdev_list *list = file->private_data;
81
81 retval = fasync_helper(fd, file, on, &list->fasync); 82 retval = fasync_helper(fd, file, on, &list->fasync);
83
82 return retval < 0 ? retval : 0; 84 return retval < 0 ? retval : 0;
83} 85}
84 86
85static int evdev_flush(struct file * file, fl_owner_t id) 87static int evdev_flush(struct file *file, fl_owner_t id)
86{ 88{
87 struct evdev_list *list = file->private_data; 89 struct evdev_list *list = file->private_data;
88 if (!list->evdev->exist) return -ENODEV; 90
91 if (!list->evdev->exist)
92 return -ENODEV;
93
89 return input_flush_device(&list->evdev->handle, file); 94 return input_flush_device(&list->evdev->handle, file);
90} 95}
91 96
@@ -300,6 +305,7 @@ static ssize_t evdev_read(struct file * file, char __user * buffer, size_t count
300static unsigned int evdev_poll(struct file *file, poll_table *wait) 305static unsigned int evdev_poll(struct file *file, poll_table *wait)
301{ 306{
302 struct evdev_list *list = file->private_data; 307 struct evdev_list *list = file->private_data;
308
303 poll_wait(file, &list->evdev->wait, wait); 309 poll_wait(file, &list->evdev->wait, wait);
304 return ((list->head == list->tail) ? 0 : (POLLIN | POLLRDNORM)) | 310 return ((list->head == list->tail) ? 0 : (POLLIN | POLLRDNORM)) |
305 (list->evdev->exist ? 0 : (POLLHUP | POLLERR)); 311 (list->evdev->exist ? 0 : (POLLHUP | POLLERR));
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 3038c26891..a90486f5e4 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -28,20 +28,6 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
28MODULE_DESCRIPTION("Input core"); 28MODULE_DESCRIPTION("Input core");
29MODULE_LICENSE("GPL"); 29MODULE_LICENSE("GPL");
30 30
31EXPORT_SYMBOL(input_allocate_device);
32EXPORT_SYMBOL(input_register_device);
33EXPORT_SYMBOL(input_unregister_device);
34EXPORT_SYMBOL(input_register_handler);
35EXPORT_SYMBOL(input_unregister_handler);
36EXPORT_SYMBOL(input_grab_device);
37EXPORT_SYMBOL(input_release_device);
38EXPORT_SYMBOL(input_open_device);
39EXPORT_SYMBOL(input_close_device);
40EXPORT_SYMBOL(input_accept_process);
41EXPORT_SYMBOL(input_flush_device);
42EXPORT_SYMBOL(input_event);
43EXPORT_SYMBOL_GPL(input_class);
44
45#define INPUT_DEVICES 256 31#define INPUT_DEVICES 256
46 32
47static LIST_HEAD(input_dev_list); 33static LIST_HEAD(input_dev_list);
@@ -63,11 +49,13 @@ void input_event(struct input_dev *dev, unsigned int type, unsigned int code, in
63 case EV_SYN: 49 case EV_SYN:
64 switch (code) { 50 switch (code) {
65 case SYN_CONFIG: 51 case SYN_CONFIG:
66 if (dev->event) dev->event(dev, type, code, value); 52 if (dev->event)
53 dev->event(dev, type, code, value);
67 break; 54 break;
68 55
69 case SYN_REPORT: 56 case SYN_REPORT:
70 if (dev->sync) return; 57 if (dev->sync)
58 return;
71 dev->sync = 1; 59 dev->sync = 1;
72 break; 60 break;
73 } 61 }
@@ -136,7 +124,8 @@ void input_event(struct input_dev *dev, unsigned int type, unsigned int code, in
136 if (code > MSC_MAX || !test_bit(code, dev->mscbit)) 124 if (code > MSC_MAX || !test_bit(code, dev->mscbit))
137 return; 125 return;
138 126
139 if (dev->event) dev->event(dev, type, code, value); 127 if (dev->event)
128 dev->event(dev, type, code, value);
140 129
141 break; 130 break;
142 131
@@ -146,7 +135,9 @@ void input_event(struct input_dev *dev, unsigned int type, unsigned int code, in
146 return; 135 return;
147 136
148 change_bit(code, dev->led); 137 change_bit(code, dev->led);
149 if (dev->event) dev->event(dev, type, code, value); 138
139 if (dev->event)
140 dev->event(dev, type, code, value);
150 141
151 break; 142 break;
152 143
@@ -158,21 +149,25 @@ void input_event(struct input_dev *dev, unsigned int type, unsigned int code, in
158 if (!!test_bit(code, dev->snd) != !!value) 149 if (!!test_bit(code, dev->snd) != !!value)
159 change_bit(code, dev->snd); 150 change_bit(code, dev->snd);
160 151
161 if (dev->event) dev->event(dev, type, code, value); 152 if (dev->event)
153 dev->event(dev, type, code, value);
162 154
163 break; 155 break;
164 156
165 case EV_REP: 157 case EV_REP:
166 158
167 if (code > REP_MAX || value < 0 || dev->rep[code] == value) return; 159 if (code > REP_MAX || value < 0 || dev->rep[code] == value)
160 return;
168 161
169 dev->rep[code] = value; 162 dev->rep[code] = value;
170 if (dev->event) dev->event(dev, type, code, value); 163 if (dev->event)
164 dev->event(dev, type, code, value);
171 165
172 break; 166 break;
173 167
174 case EV_FF: 168 case EV_FF:
175 if (dev->event) dev->event(dev, type, code, value); 169 if (dev->event)
170 dev->event(dev, type, code, value);
176 break; 171 break;
177 } 172 }
178 173
@@ -186,6 +181,7 @@ void input_event(struct input_dev *dev, unsigned int type, unsigned int code, in
186 if (handle->open) 181 if (handle->open)
187 handle->handler->event(handle, type, code, value); 182 handle->handler->event(handle, type, code, value);
188} 183}
184EXPORT_SYMBOL(input_event);
189 185
190static void input_repeat_key(unsigned long data) 186static void input_repeat_key(unsigned long data)
191{ 187{
@@ -208,6 +204,7 @@ int input_accept_process(struct input_handle *handle, struct file *file)
208 204
209 return 0; 205 return 0;
210} 206}
207EXPORT_SYMBOL(input_accept_process);
211 208
212int input_grab_device(struct input_handle *handle) 209int input_grab_device(struct input_handle *handle)
213{ 210{
@@ -217,12 +214,14 @@ int input_grab_device(struct input_handle *handle)
217 handle->dev->grab = handle; 214 handle->dev->grab = handle;
218 return 0; 215 return 0;
219} 216}
217EXPORT_SYMBOL(input_grab_device);
220 218
221void input_release_device(struct input_handle *handle) 219void input_release_device(struct input_handle *handle)
222{ 220{
223 if (handle->dev->grab == handle) 221 if (handle->dev->grab == handle)
224 handle->dev->grab = NULL; 222 handle->dev->grab = NULL;
225} 223}
224EXPORT_SYMBOL(input_release_device);
226 225
227int input_open_device(struct input_handle *handle) 226int input_open_device(struct input_handle *handle)
228{ 227{
@@ -245,6 +244,7 @@ int input_open_device(struct input_handle *handle)
245 244
246 return err; 245 return err;
247} 246}
247EXPORT_SYMBOL(input_open_device);
248 248
249int input_flush_device(struct input_handle* handle, struct file* file) 249int input_flush_device(struct input_handle* handle, struct file* file)
250{ 250{
@@ -253,6 +253,7 @@ int input_flush_device(struct input_handle* handle, struct file* file)
253 253
254 return 0; 254 return 0;
255} 255}
256EXPORT_SYMBOL(input_flush_device);
256 257
257void input_close_device(struct input_handle *handle) 258void input_close_device(struct input_handle *handle)
258{ 259{
@@ -268,6 +269,7 @@ void input_close_device(struct input_handle *handle)
268 269
269 mutex_unlock(&dev->mutex); 270 mutex_unlock(&dev->mutex);
270} 271}
272EXPORT_SYMBOL(input_close_device);
271 273
272static void input_link_handle(struct input_handle *handle) 274static void input_link_handle(struct input_handle *handle)
273{ 275{
@@ -335,9 +337,11 @@ static inline void input_wakeup_procfs_readers(void)
335static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait) 337static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
336{ 338{
337 int state = input_devices_state; 339 int state = input_devices_state;
340
338 poll_wait(file, &input_devices_poll_wait, wait); 341 poll_wait(file, &input_devices_poll_wait, wait);
339 if (state != input_devices_state) 342 if (state != input_devices_state)
340 return POLLIN | POLLRDNORM; 343 return POLLIN | POLLRDNORM;
344
341 return 0; 345 return 0;
342} 346}
343 347
@@ -629,7 +633,7 @@ static ssize_t input_dev_show_modalias(struct class_device *dev, char *buf)
629 633
630 len = input_print_modalias(buf, PAGE_SIZE, id, 1); 634 len = input_print_modalias(buf, PAGE_SIZE, id, 1);
631 635
632 return max_t(int, len, PAGE_SIZE); 636 return min_t(int, len, PAGE_SIZE);
633} 637}
634static CLASS_DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL); 638static CLASS_DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
635 639
@@ -862,6 +866,7 @@ struct class input_class = {
862 .release = input_dev_release, 866 .release = input_dev_release,
863 .uevent = input_dev_uevent, 867 .uevent = input_dev_uevent,
864}; 868};
869EXPORT_SYMBOL_GPL(input_class);
865 870
866struct input_dev *input_allocate_device(void) 871struct input_dev *input_allocate_device(void)
867{ 872{
@@ -872,12 +877,27 @@ struct input_dev *input_allocate_device(void)
872 dev->dynalloc = 1; 877 dev->dynalloc = 1;
873 dev->cdev.class = &input_class; 878 dev->cdev.class = &input_class;
874 class_device_initialize(&dev->cdev); 879 class_device_initialize(&dev->cdev);
880 mutex_init(&dev->mutex);
875 INIT_LIST_HEAD(&dev->h_list); 881 INIT_LIST_HEAD(&dev->h_list);
876 INIT_LIST_HEAD(&dev->node); 882 INIT_LIST_HEAD(&dev->node);
877 } 883 }
878 884
879 return dev; 885 return dev;
880} 886}
887EXPORT_SYMBOL(input_allocate_device);
888
889void input_free_device(struct input_dev *dev)
890{
891 if (dev) {
892
893 mutex_lock(&dev->mutex);
894 dev->name = dev->phys = dev->uniq = NULL;
895 mutex_unlock(&dev->mutex);
896
897 input_put_device(dev);
898 }
899}
900EXPORT_SYMBOL(input_free_device);
881 901
882int input_register_device(struct input_dev *dev) 902int input_register_device(struct input_dev *dev)
883{ 903{
@@ -895,7 +915,6 @@ int input_register_device(struct input_dev *dev)
895 return -EINVAL; 915 return -EINVAL;
896 } 916 }
897 917
898 mutex_init(&dev->mutex);
899 set_bit(EV_SYN, dev->evbit); 918 set_bit(EV_SYN, dev->evbit);
900 919
901 /* 920 /*
@@ -956,12 +975,14 @@ int input_register_device(struct input_dev *dev)
956 fail1: class_device_del(&dev->cdev); 975 fail1: class_device_del(&dev->cdev);
957 return error; 976 return error;
958} 977}
978EXPORT_SYMBOL(input_register_device);
959 979
960void input_unregister_device(struct input_dev *dev) 980void input_unregister_device(struct input_dev *dev)
961{ 981{
962 struct list_head * node, * next; 982 struct list_head *node, *next;
963 983
964 if (!dev) return; 984 if (!dev)
985 return;
965 986
966 del_timer_sync(&dev->timer); 987 del_timer_sync(&dev->timer);
967 988
@@ -977,10 +998,16 @@ void input_unregister_device(struct input_dev *dev)
977 sysfs_remove_group(&dev->cdev.kobj, &input_dev_caps_attr_group); 998 sysfs_remove_group(&dev->cdev.kobj, &input_dev_caps_attr_group);
978 sysfs_remove_group(&dev->cdev.kobj, &input_dev_id_attr_group); 999 sysfs_remove_group(&dev->cdev.kobj, &input_dev_id_attr_group);
979 sysfs_remove_group(&dev->cdev.kobj, &input_dev_attr_group); 1000 sysfs_remove_group(&dev->cdev.kobj, &input_dev_attr_group);
1001
1002 mutex_lock(&dev->mutex);
1003 dev->name = dev->phys = dev->uniq = NULL;
1004 mutex_unlock(&dev->mutex);
1005
980 class_device_unregister(&dev->cdev); 1006 class_device_unregister(&dev->cdev);
981 1007
982 input_wakeup_procfs_readers(); 1008 input_wakeup_procfs_readers();
983} 1009}
1010EXPORT_SYMBOL(input_unregister_device);
984 1011
985void input_register_handler(struct input_handler *handler) 1012void input_register_handler(struct input_handler *handler)
986{ 1013{
@@ -988,7 +1015,8 @@ void input_register_handler(struct input_handler *handler)
988 struct input_handle *handle; 1015 struct input_handle *handle;
989 struct input_device_id *id; 1016 struct input_device_id *id;
990 1017
991 if (!handler) return; 1018 if (!handler)
1019 return;
992 1020
993 INIT_LIST_HEAD(&handler->h_list); 1021 INIT_LIST_HEAD(&handler->h_list);
994 1022
@@ -1005,10 +1033,11 @@ void input_register_handler(struct input_handler *handler)
1005 1033
1006 input_wakeup_procfs_readers(); 1034 input_wakeup_procfs_readers();
1007} 1035}
1036EXPORT_SYMBOL(input_register_handler);
1008 1037
1009void input_unregister_handler(struct input_handler *handler) 1038void input_unregister_handler(struct input_handler *handler)
1010{ 1039{
1011 struct list_head * node, * next; 1040 struct list_head *node, *next;
1012 1041
1013 list_for_each_safe(node, next, &handler->h_list) { 1042 list_for_each_safe(node, next, &handler->h_list) {
1014 struct input_handle * handle = to_handle_h(node); 1043 struct input_handle * handle = to_handle_h(node);
@@ -1024,6 +1053,7 @@ void input_unregister_handler(struct input_handler *handler)
1024 1053
1025 input_wakeup_procfs_readers(); 1054 input_wakeup_procfs_readers();
1026} 1055}
1056EXPORT_SYMBOL(input_unregister_handler);
1027 1057
1028static int input_open_file(struct inode *inode, struct file *file) 1058static int input_open_file(struct inode *inode, struct file *file)
1029{ 1059{
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 949bdcef8c..d67157513b 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -81,10 +81,7 @@ static int joydev_correct(int value, struct js_corr *corr)
81 return 0; 81 return 0;
82 } 82 }
83 83
84 if (value < -32767) return -32767; 84 return value < -32767 ? -32767 : (value > 32767 ? 32767 : value);
85 if (value > 32767) return 32767;
86
87 return value;
88} 85}
89 86
90static void joydev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) 87static void joydev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value)
@@ -96,7 +93,8 @@ static void joydev_event(struct input_handle *handle, unsigned int type, unsigne
96 switch (type) { 93 switch (type) {
97 94
98 case EV_KEY: 95 case EV_KEY:
99 if (code < BTN_MISC || value == 2) return; 96 if (code < BTN_MISC || value == 2)
97 return;
100 event.type = JS_EVENT_BUTTON; 98 event.type = JS_EVENT_BUTTON;
101 event.number = joydev->keymap[code - BTN_MISC]; 99 event.number = joydev->keymap[code - BTN_MISC];
102 event.value = value; 100 event.value = value;
@@ -106,7 +104,8 @@ static void joydev_event(struct input_handle *handle, unsigned int type, unsigne
106 event.type = JS_EVENT_AXIS; 104 event.type = JS_EVENT_AXIS;
107 event.number = joydev->absmap[code]; 105 event.number = joydev->absmap[code];
108 event.value = joydev_correct(value, joydev->corr + event.number); 106 event.value = joydev_correct(value, joydev->corr + event.number);
109 if (event.value == joydev->abs[event.number]) return; 107 if (event.value == joydev->abs[event.number])
108 return;
110 joydev->abs[event.number] = event.value; 109 joydev->abs[event.number] = event.value;
111 break; 110 break;
112 111
@@ -134,7 +133,9 @@ static int joydev_fasync(int fd, struct file *file, int on)
134{ 133{
135 int retval; 134 int retval;
136 struct joydev_list *list = file->private_data; 135 struct joydev_list *list = file->private_data;
136
137 retval = fasync_helper(fd, file, on, &list->fasync); 137 retval = fasync_helper(fd, file, on, &list->fasync);
138
138 return retval < 0 ? retval : 0; 139 return retval < 0 ? retval : 0;
139} 140}
140 141
@@ -222,12 +223,12 @@ static ssize_t joydev_read(struct file *file, char __user *buf, size_t count, lo
222 return sizeof(struct JS_DATA_TYPE); 223 return sizeof(struct JS_DATA_TYPE);
223 } 224 }
224 225
225 if (list->startup == joydev->nabs + joydev->nkey 226 if (list->startup == joydev->nabs + joydev->nkey &&
226 && list->head == list->tail && (file->f_flags & O_NONBLOCK)) 227 list->head == list->tail && (file->f_flags & O_NONBLOCK))
227 return -EAGAIN; 228 return -EAGAIN;
228 229
229 retval = wait_event_interruptible(list->joydev->wait, 230 retval = wait_event_interruptible(list->joydev->wait,
230 !list->joydev->exist || 231 !list->joydev->exist ||
231 list->startup < joydev->nabs + joydev->nkey || 232 list->startup < joydev->nabs + joydev->nkey ||
232 list->head != list->tail); 233 list->head != list->tail);
233 234
@@ -276,8 +277,9 @@ static ssize_t joydev_read(struct file *file, char __user *buf, size_t count, lo
276static unsigned int joydev_poll(struct file *file, poll_table *wait) 277static unsigned int joydev_poll(struct file *file, poll_table *wait)
277{ 278{
278 struct joydev_list *list = file->private_data; 279 struct joydev_list *list = file->private_data;
280
279 poll_wait(file, &list->joydev->wait, wait); 281 poll_wait(file, &list->joydev->wait, wait);
280 return ((list->head != list->tail || list->startup < list->joydev->nabs + list->joydev->nkey) ? 282 return ((list->head != list->tail || list->startup < list->joydev->nabs + list->joydev->nkey) ?
281 (POLLIN | POLLRDNORM) : 0) | (list->joydev->exist ? 0 : (POLLHUP | POLLERR)); 283 (POLLIN | POLLRDNORM) : 0) | (list->joydev->exist ? 0 : (POLLHUP | POLLERR));
282} 284}
283 285
@@ -291,20 +293,26 @@ static int joydev_ioctl_common(struct joydev *joydev, unsigned int cmd, void __u
291 case JS_SET_CAL: 293 case JS_SET_CAL:
292 return copy_from_user(&joydev->glue.JS_CORR, argp, 294 return copy_from_user(&joydev->glue.JS_CORR, argp,
293 sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0; 295 sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0;
296
294 case JS_GET_CAL: 297 case JS_GET_CAL:
295 return copy_to_user(argp, &joydev->glue.JS_CORR, 298 return copy_to_user(argp, &joydev->glue.JS_CORR,
296 sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0; 299 sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0;
300
297 case JS_SET_TIMEOUT: 301 case JS_SET_TIMEOUT:
298 return get_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp); 302 return get_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp);
303
299 case JS_GET_TIMEOUT: 304 case JS_GET_TIMEOUT:
300 return put_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp); 305 return put_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp);
301 306
302 case JSIOCGVERSION: 307 case JSIOCGVERSION:
303 return put_user(JS_VERSION, (__u32 __user *) argp); 308 return put_user(JS_VERSION, (__u32 __user *) argp);
309
304 case JSIOCGAXES: 310 case JSIOCGAXES:
305 return put_user(joydev->nabs, (__u8 __user *) argp); 311 return put_user(joydev->nabs, (__u8 __user *) argp);
312
306 case JSIOCGBUTTONS: 313 case JSIOCGBUTTONS:
307 return put_user(joydev->nkey, (__u8 __user *) argp); 314 return put_user(joydev->nkey, (__u8 __user *) argp);
315
308 case JSIOCSCORR: 316 case JSIOCSCORR:
309 if (copy_from_user(joydev->corr, argp, 317 if (copy_from_user(joydev->corr, argp,
310 sizeof(joydev->corr[0]) * joydev->nabs)) 318 sizeof(joydev->corr[0]) * joydev->nabs))
@@ -314,38 +322,49 @@ static int joydev_ioctl_common(struct joydev *joydev, unsigned int cmd, void __u
314 joydev->abs[i] = joydev_correct(dev->abs[j], joydev->corr + i); 322 joydev->abs[i] = joydev_correct(dev->abs[j], joydev->corr + i);
315 } 323 }
316 return 0; 324 return 0;
325
317 case JSIOCGCORR: 326 case JSIOCGCORR:
318 return copy_to_user(argp, joydev->corr, 327 return copy_to_user(argp, joydev->corr,
319 sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0; 328 sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0;
329
320 case JSIOCSAXMAP: 330 case JSIOCSAXMAP:
321 if (copy_from_user(joydev->abspam, argp, sizeof(__u8) * (ABS_MAX + 1))) 331 if (copy_from_user(joydev->abspam, argp, sizeof(__u8) * (ABS_MAX + 1)))
322 return -EFAULT; 332 return -EFAULT;
323 for (i = 0; i < joydev->nabs; i++) { 333 for (i = 0; i < joydev->nabs; i++) {
324 if (joydev->abspam[i] > ABS_MAX) return -EINVAL; 334 if (joydev->abspam[i] > ABS_MAX)
335 return -EINVAL;
325 joydev->absmap[joydev->abspam[i]] = i; 336 joydev->absmap[joydev->abspam[i]] = i;
326 } 337 }
327 return 0; 338 return 0;
339
328 case JSIOCGAXMAP: 340 case JSIOCGAXMAP:
329 return copy_to_user(argp, joydev->abspam, 341 return copy_to_user(argp, joydev->abspam,
330 sizeof(__u8) * (ABS_MAX + 1)) ? -EFAULT : 0; 342 sizeof(__u8) * (ABS_MAX + 1)) ? -EFAULT : 0;
343
331 case JSIOCSBTNMAP: 344 case JSIOCSBTNMAP:
332 if (copy_from_user(joydev->keypam, argp, sizeof(__u16) * (KEY_MAX - BTN_MISC + 1))) 345 if (copy_from_user(joydev->keypam, argp, sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)))
333 return -EFAULT; 346 return -EFAULT;
334 for (i = 0; i < joydev->nkey; i++) { 347 for (i = 0; i < joydev->nkey; i++) {
335 if (joydev->keypam[i] > KEY_MAX || joydev->keypam[i] < BTN_MISC) return -EINVAL; 348 if (joydev->keypam[i] > KEY_MAX || joydev->keypam[i] < BTN_MISC)
349 return -EINVAL;
336 joydev->keymap[joydev->keypam[i] - BTN_MISC] = i; 350 joydev->keymap[joydev->keypam[i] - BTN_MISC] = i;
337 } 351 }
338 return 0; 352 return 0;
353
339 case JSIOCGBTNMAP: 354 case JSIOCGBTNMAP:
340 return copy_to_user(argp, joydev->keypam, 355 return copy_to_user(argp, joydev->keypam,
341 sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)) ? -EFAULT : 0; 356 sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)) ? -EFAULT : 0;
357
342 default: 358 default:
343 if ((cmd & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT)) == JSIOCGNAME(0)) { 359 if ((cmd & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT)) == JSIOCGNAME(0)) {
344 int len; 360 int len;
345 if (!dev->name) return 0; 361 if (!dev->name)
362 return 0;
346 len = strlen(dev->name) + 1; 363 len = strlen(dev->name) + 1;
347 if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd); 364 if (len > _IOC_SIZE(cmd))
348 if (copy_to_user(argp, dev->name, len)) return -EFAULT; 365 len = _IOC_SIZE(cmd);
366 if (copy_to_user(argp, dev->name, len))
367 return -EFAULT;
349 return len; 368 return len;
350 } 369 }
351 } 370 }
@@ -362,7 +381,9 @@ static long joydev_compat_ioctl(struct file *file, unsigned int cmd, unsigned lo
362 struct JS_DATA_SAVE_TYPE_32 ds32; 381 struct JS_DATA_SAVE_TYPE_32 ds32;
363 int err; 382 int err;
364 383
365 if (!joydev->exist) return -ENODEV; 384 if (!joydev->exist)
385 return -ENODEV;
386
366 switch(cmd) { 387 switch(cmd) {
367 case JS_SET_TIMELIMIT: 388 case JS_SET_TIMELIMIT:
368 err = get_user(tmp32, (s32 __user *) arg); 389 err = get_user(tmp32, (s32 __user *) arg);
@@ -395,8 +416,7 @@ static long joydev_compat_ioctl(struct file *file, unsigned int cmd, unsigned lo
395 ds32.JS_SAVE = joydev->glue.JS_SAVE; 416 ds32.JS_SAVE = joydev->glue.JS_SAVE;
396 ds32.JS_CORR = joydev->glue.JS_CORR; 417 ds32.JS_CORR = joydev->glue.JS_CORR;
397 418
398 err = copy_to_user(argp, &ds32, 419 err = copy_to_user(argp, &ds32, sizeof(ds32)) ? -EFAULT : 0;
399 sizeof(ds32)) ? -EFAULT : 0;
400 break; 420 break;
401 421
402 default: 422 default:
@@ -412,7 +432,8 @@ static int joydev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
412 struct joydev *joydev = list->joydev; 432 struct joydev *joydev = list->joydev;
413 void __user *argp = (void __user *)arg; 433 void __user *argp = (void __user *)arg;
414 434
415 if (!joydev->exist) return -ENODEV; 435 if (!joydev->exist)
436 return -ENODEV;
416 437
417 switch(cmd) { 438 switch(cmd) {
418 case JS_SET_TIMELIMIT: 439 case JS_SET_TIMELIMIT:
@@ -546,8 +567,8 @@ static struct input_device_id joydev_blacklist[] = {
546 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, 567 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
547 .evbit = { BIT(EV_KEY) }, 568 .evbit = { BIT(EV_KEY) },
548 .keybit = { [LONG(BTN_TOUCH)] = BIT(BTN_TOUCH) }, 569 .keybit = { [LONG(BTN_TOUCH)] = BIT(BTN_TOUCH) },
549 }, /* Avoid itouchpads, touchscreens and tablets */ 570 }, /* Avoid itouchpads, touchscreens and tablets */
550 { }, /* Terminating entry */ 571 { } /* Terminating entry */
551}; 572};
552 573
553static struct input_device_id joydev_ids[] = { 574static struct input_device_id joydev_ids[] = {
@@ -566,7 +587,7 @@ static struct input_device_id joydev_ids[] = {
566 .evbit = { BIT(EV_ABS) }, 587 .evbit = { BIT(EV_ABS) },
567 .absbit = { BIT(ABS_THROTTLE) }, 588 .absbit = { BIT(ABS_THROTTLE) },
568 }, 589 },
569 { }, /* Terminating entry */ 590 { } /* Terminating entry */
570}; 591};
571 592
572MODULE_DEVICE_TABLE(input, joydev_ids); 593MODULE_DEVICE_TABLE(input, joydev_ids);
@@ -579,7 +600,7 @@ static struct input_handler joydev_handler = {
579 .minor = JOYDEV_MINOR_BASE, 600 .minor = JOYDEV_MINOR_BASE,
580 .name = "joydev", 601 .name = "joydev",
581 .id_table = joydev_ids, 602 .id_table = joydev_ids,
582 .blacklist = joydev_blacklist, 603 .blacklist = joydev_blacklist,
583}; 604};
584 605
585static int __init joydev_init(void) 606static int __init joydev_init(void)
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index 4612d13ea7..b11a4bbc84 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -306,7 +306,7 @@ static int a3d_connect(struct gameport *gameport, struct gameport_driver *drv)
306 gameport_set_poll_handler(gameport, a3d_poll); 306 gameport_set_poll_handler(gameport, a3d_poll);
307 gameport_set_poll_interval(gameport, 20); 307 gameport_set_poll_interval(gameport, 20);
308 308
309 sprintf(a3d->phys, "%s/input0", gameport->phys); 309 snprintf(a3d->phys, sizeof(a3d->phys), "%s/input0", gameport->phys);
310 310
311 input_dev->name = a3d_names[a3d->mode]; 311 input_dev->name = a3d_names[a3d->mode];
312 input_dev->phys = a3d->phys; 312 input_dev->phys = a3d->phys;
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 3121961e3e..01dc0b195d 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -408,21 +408,23 @@ static void analog_calibrate_timer(struct analog_port *port)
408 408
409static void analog_name(struct analog *analog) 409static void analog_name(struct analog *analog)
410{ 410{
411 sprintf(analog->name, "Analog %d-axis %d-button", 411 snprintf(analog->name, sizeof(analog->name), "Analog %d-axis %d-button",
412 hweight8(analog->mask & ANALOG_AXES_STD), 412 hweight8(analog->mask & ANALOG_AXES_STD),
413 hweight8(analog->mask & ANALOG_BTNS_STD) + !!(analog->mask & ANALOG_BTNS_CHF) * 2 + 413 hweight8(analog->mask & ANALOG_BTNS_STD) + !!(analog->mask & ANALOG_BTNS_CHF) * 2 +
414 hweight16(analog->mask & ANALOG_BTNS_GAMEPAD) + !!(analog->mask & ANALOG_HBTN_CHF) * 4); 414 hweight16(analog->mask & ANALOG_BTNS_GAMEPAD) + !!(analog->mask & ANALOG_HBTN_CHF) * 4);
415 415
416 if (analog->mask & ANALOG_HATS_ALL) 416 if (analog->mask & ANALOG_HATS_ALL)
417 sprintf(analog->name, "%s %d-hat", 417 snprintf(analog->name, sizeof(analog->name), "%s %d-hat",
418 analog->name, hweight16(analog->mask & ANALOG_HATS_ALL)); 418 analog->name, hweight16(analog->mask & ANALOG_HATS_ALL));
419 419
420 if (analog->mask & ANALOG_HAT_FCS) 420 if (analog->mask & ANALOG_HAT_FCS)
421 strcat(analog->name, " FCS"); 421 strlcat(analog->name, " FCS", sizeof(analog->name));
422 if (analog->mask & ANALOG_ANY_CHF) 422 if (analog->mask & ANALOG_ANY_CHF)
423 strcat(analog->name, (analog->mask & ANALOG_SAITEK) ? " Saitek" : " CHF"); 423 strlcat(analog->name, (analog->mask & ANALOG_SAITEK) ? " Saitek" : " CHF",
424 sizeof(analog->name));
424 425
425 strcat(analog->name, (analog->mask & ANALOG_GAMEPAD) ? " gamepad": " joystick"); 426 strlcat(analog->name, (analog->mask & ANALOG_GAMEPAD) ? " gamepad": " joystick",
427 sizeof(analog->name));
426} 428}
427 429
428/* 430/*
@@ -435,7 +437,8 @@ static int analog_init_device(struct analog_port *port, struct analog *analog, i
435 int i, j, t, v, w, x, y, z; 437 int i, j, t, v, w, x, y, z;
436 438
437 analog_name(analog); 439 analog_name(analog);
438 sprintf(analog->phys, "%s/input%d", port->gameport->phys, index); 440 snprintf(analog->phys, sizeof(analog->phys),
441 "%s/input%d", port->gameport->phys, index);
439 analog->buttons = (analog->mask & ANALOG_GAMEPAD) ? analog_pad_btn : analog_joy_btn; 442 analog->buttons = (analog->mask & ANALOG_GAMEPAD) ? analog_pad_btn : analog_joy_btn;
440 443
441 analog->dev = input_dev = input_allocate_device(); 444 analog->dev = input_dev = input_allocate_device();
diff --git a/drivers/input/joystick/cobra.c b/drivers/input/joystick/cobra.c
index 1909f7ef34..d5e42eb88a 100644
--- a/drivers/input/joystick/cobra.c
+++ b/drivers/input/joystick/cobra.c
@@ -202,7 +202,8 @@ static int cobra_connect(struct gameport *gameport, struct gameport_driver *drv)
202 goto fail3; 202 goto fail3;
203 } 203 }
204 204
205 sprintf(cobra->phys[i], "%s/input%d", gameport->phys, i); 205 snprintf(cobra->phys[i], sizeof(cobra->phys[i]),
206 "%s/input%d", gameport->phys, i);
206 207
207 input_dev->name = "Creative Labs Blaster GamePad Cobra"; 208 input_dev->name = "Creative Labs Blaster GamePad Cobra";
208 input_dev->phys = cobra->phys[i]; 209 input_dev->phys = cobra->phys[i];
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index e61894685c..6f31f054d1 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -620,7 +620,8 @@ static struct db9 __init *db9_probe(int parport, int mode)
620 goto err_unreg_devs; 620 goto err_unreg_devs;
621 } 621 }
622 622
623 sprintf(db9->phys[i], "%s/input%d", db9->pd->port->name, i); 623 snprintf(db9->phys[i], sizeof(db9->phys[i]),
624 "%s/input%d", db9->pd->port->name, i);
624 625
625 input_dev->name = db9_mode->name; 626 input_dev->name = db9_mode->name;
626 input_dev->phys = db9->phys[i]; 627 input_dev->phys = db9->phys[i];
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index ecbdb6b9bb..fe12aa3739 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -761,7 +761,8 @@ static struct gc __init *gc_probe(int parport, int *pads, int n_pads)
761 if (!pads[i]) 761 if (!pads[i])
762 continue; 762 continue;
763 763
764 sprintf(gc->phys[i], "%s/input%d", gc->pd->port->name, i); 764 snprintf(gc->phys[i], sizeof(gc->phys[i]),
765 "%s/input%d", gc->pd->port->name, i);
765 err = gc_setup_pad(gc, i, pads[i]); 766 err = gc_setup_pad(gc, i, pads[i]);
766 if (err) 767 if (err)
767 goto err_unreg_devs; 768 goto err_unreg_devs;
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index 8a3ad455eb..e4a699f6ec 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -298,7 +298,7 @@ static int gf2k_connect(struct gameport *gameport, struct gameport_driver *drv)
298 gameport_set_poll_handler(gameport, gf2k_poll); 298 gameport_set_poll_handler(gameport, gf2k_poll);
299 gameport_set_poll_interval(gameport, 20); 299 gameport_set_poll_interval(gameport, 20);
300 300
301 sprintf(gf2k->phys, "%s/input0", gameport->phys); 301 snprintf(gf2k->phys, sizeof(gf2k->phys), "%s/input0", gameport->phys);
302 302
303 gf2k->length = gf2k_lens[gf2k->id]; 303 gf2k->length = gf2k_lens[gf2k->id];
304 304
diff --git a/drivers/input/joystick/grip.c b/drivers/input/joystick/grip.c
index 20cb98ac2d..17a90c436d 100644
--- a/drivers/input/joystick/grip.c
+++ b/drivers/input/joystick/grip.c
@@ -354,7 +354,8 @@ static int grip_connect(struct gameport *gameport, struct gameport_driver *drv)
354 goto fail3; 354 goto fail3;
355 } 355 }
356 356
357 sprintf(grip->phys[i], "%s/input%d", gameport->phys, i); 357 snprintf(grip->phys[i], sizeof(grip->phys[i]),
358 "%s/input%d", gameport->phys, i);
358 359
359 input_dev->name = grip_name[grip->mode[i]]; 360 input_dev->name = grip_name[grip->mode[i]];
360 input_dev->phys = grip->phys[i]; 361 input_dev->phys = grip->phys[i];
diff --git a/drivers/input/joystick/guillemot.c b/drivers/input/joystick/guillemot.c
index 6e2c721c26..840ed9b512 100644
--- a/drivers/input/joystick/guillemot.c
+++ b/drivers/input/joystick/guillemot.c
@@ -222,7 +222,7 @@ static int guillemot_connect(struct gameport *gameport, struct gameport_driver *
222 gameport_set_poll_handler(gameport, guillemot_poll); 222 gameport_set_poll_handler(gameport, guillemot_poll);
223 gameport_set_poll_interval(gameport, 20); 223 gameport_set_poll_interval(gameport, 20);
224 224
225 sprintf(guillemot->phys, "%s/input0", gameport->phys); 225 snprintf(guillemot->phys, sizeof(guillemot->phys), "%s/input0", gameport->phys);
226 guillemot->type = guillemot_type + i; 226 guillemot->type = guillemot_type + i;
227 227
228 input_dev->name = guillemot_type[i].name; 228 input_dev->name = guillemot_type[i].name;
diff --git a/drivers/input/joystick/iforce/iforce-ff.c b/drivers/input/joystick/iforce/iforce-ff.c
index 2b8e8456c9..50c90765ae 100644
--- a/drivers/input/joystick/iforce/iforce-ff.c
+++ b/drivers/input/joystick/iforce/iforce-ff.c
@@ -47,7 +47,7 @@ static int make_magnitude_modifier(struct iforce* iforce,
47 iforce->device_memory.start, iforce->device_memory.end, 2L, 47 iforce->device_memory.start, iforce->device_memory.end, 2L,
48 NULL, NULL)) { 48 NULL, NULL)) {
49 mutex_unlock(&iforce->mem_mutex); 49 mutex_unlock(&iforce->mem_mutex);
50 return -ENOMEM; 50 return -ENOSPC;
51 } 51 }
52 mutex_unlock(&iforce->mem_mutex); 52 mutex_unlock(&iforce->mem_mutex);
53 } 53 }
@@ -80,7 +80,7 @@ static int make_period_modifier(struct iforce* iforce,
80 iforce->device_memory.start, iforce->device_memory.end, 2L, 80 iforce->device_memory.start, iforce->device_memory.end, 2L,
81 NULL, NULL)) { 81 NULL, NULL)) {
82 mutex_unlock(&iforce->mem_mutex); 82 mutex_unlock(&iforce->mem_mutex);
83 return -ENOMEM; 83 return -ENOSPC;
84 } 84 }
85 mutex_unlock(&iforce->mem_mutex); 85 mutex_unlock(&iforce->mem_mutex);
86 } 86 }
@@ -120,7 +120,7 @@ static int make_envelope_modifier(struct iforce* iforce,
120 iforce->device_memory.start, iforce->device_memory.end, 2L, 120 iforce->device_memory.start, iforce->device_memory.end, 2L,
121 NULL, NULL)) { 121 NULL, NULL)) {
122 mutex_unlock(&iforce->mem_mutex); 122 mutex_unlock(&iforce->mem_mutex);
123 return -ENOMEM; 123 return -ENOSPC;
124 } 124 }
125 mutex_unlock(&iforce->mem_mutex); 125 mutex_unlock(&iforce->mem_mutex);
126 } 126 }
@@ -157,7 +157,7 @@ static int make_condition_modifier(struct iforce* iforce,
157 iforce->device_memory.start, iforce->device_memory.end, 2L, 157 iforce->device_memory.start, iforce->device_memory.end, 2L,
158 NULL, NULL)) { 158 NULL, NULL)) {
159 mutex_unlock(&iforce->mem_mutex); 159 mutex_unlock(&iforce->mem_mutex);
160 return -ENOMEM; 160 return -ENOSPC;
161 } 161 }
162 mutex_unlock(&iforce->mem_mutex); 162 mutex_unlock(&iforce->mem_mutex);
163 } 163 }
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index ab0a26b924..6d99e3c378 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -86,7 +86,7 @@ static struct iforce_device iforce_device[] = {
86 86
87static int iforce_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) 87static int iforce_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
88{ 88{
89 struct iforce* iforce = (struct iforce*)(dev->private); 89 struct iforce* iforce = dev->private;
90 unsigned char data[3]; 90 unsigned char data[3];
91 91
92 if (type != EV_FF) 92 if (type != EV_FF)
@@ -138,7 +138,7 @@ static int iforce_input_event(struct input_dev *dev, unsigned int type, unsigned
138 */ 138 */
139static int iforce_upload_effect(struct input_dev *dev, struct ff_effect *effect) 139static int iforce_upload_effect(struct input_dev *dev, struct ff_effect *effect)
140{ 140{
141 struct iforce* iforce = (struct iforce*)(dev->private); 141 struct iforce* iforce = dev->private;
142 int id; 142 int id;
143 int ret; 143 int ret;
144 int is_update; 144 int is_update;
@@ -218,7 +218,7 @@ static int iforce_upload_effect(struct input_dev *dev, struct ff_effect *effect)
218 */ 218 */
219static int iforce_erase_effect(struct input_dev *dev, int effect_id) 219static int iforce_erase_effect(struct input_dev *dev, int effect_id)
220{ 220{
221 struct iforce* iforce = (struct iforce*)(dev->private); 221 struct iforce* iforce = dev->private;
222 int err = 0; 222 int err = 0;
223 struct iforce_core_effect* core_effect; 223 struct iforce_core_effect* core_effect;
224 224
diff --git a/drivers/input/joystick/interact.c b/drivers/input/joystick/interact.c
index c4ed017582..bbfeb9c59b 100644
--- a/drivers/input/joystick/interact.c
+++ b/drivers/input/joystick/interact.c
@@ -251,7 +251,7 @@ static int interact_connect(struct gameport *gameport, struct gameport_driver *d
251 gameport_set_poll_handler(gameport, interact_poll); 251 gameport_set_poll_handler(gameport, interact_poll);
252 gameport_set_poll_interval(gameport, 20); 252 gameport_set_poll_interval(gameport, 20);
253 253
254 sprintf(interact->phys, "%s/input0", gameport->phys); 254 snprintf(interact->phys, sizeof(interact->phys), "%s/input0", gameport->phys);
255 255
256 interact->type = i; 256 interact->type = i;
257 interact->length = interact_type[i].length; 257 interact->length = interact_type[i].length;
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c
index ca3cc2319d..168b1061a0 100644
--- a/drivers/input/joystick/magellan.c
+++ b/drivers/input/joystick/magellan.c
@@ -162,7 +162,7 @@ static int magellan_connect(struct serio *serio, struct serio_driver *drv)
162 goto fail; 162 goto fail;
163 163
164 magellan->dev = input_dev; 164 magellan->dev = input_dev;
165 sprintf(magellan->phys, "%s/input0", serio->phys); 165 snprintf(magellan->phys, sizeof(magellan->phys), "%s/input0", serio->phys);
166 166
167 input_dev->name = "LogiCad3D Magellan / SpaceMouse"; 167 input_dev->name = "LogiCad3D Magellan / SpaceMouse";
168 input_dev->phys = magellan->phys; 168 input_dev->phys = magellan->phys;
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index 95c0de7964..e58b22c018 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -541,7 +541,7 @@ static void sw_print_packet(char *name, int length, unsigned char *buf, char bit
541 * Unfortunately I don't know how to do this for the other SW types. 541 * Unfortunately I don't know how to do this for the other SW types.
542 */ 542 */
543 543
544static void sw_3dp_id(unsigned char *buf, char *comment) 544static void sw_3dp_id(unsigned char *buf, char *comment, size_t size)
545{ 545{
546 int i; 546 int i;
547 char pnp[8], rev[9]; 547 char pnp[8], rev[9];
@@ -554,7 +554,7 @@ static void sw_3dp_id(unsigned char *buf, char *comment)
554 554
555 pnp[7] = rev[8] = 0; 555 pnp[7] = rev[8] = 0;
556 556
557 sprintf(comment, " [PnP %d.%02d id %s rev %s]", 557 snprintf(comment, size, " [PnP %d.%02d id %s rev %s]",
558 (int) ((sw_get_bits(buf, 8, 6, 1) << 6) | /* Two 6-bit values */ 558 (int) ((sw_get_bits(buf, 8, 6, 1) << 6) | /* Two 6-bit values */
559 sw_get_bits(buf, 16, 6, 1)) / 100, 559 sw_get_bits(buf, 16, 6, 1)) / 100,
560 (int) ((sw_get_bits(buf, 8, 6, 1) << 6) | 560 (int) ((sw_get_bits(buf, 8, 6, 1) << 6) |
@@ -695,7 +695,7 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
695 sw->type = SW_ID_FFP; 695 sw->type = SW_ID_FFP;
696 sprintf(comment, " [AC %s]", sw_get_bits(idbuf,38,1,3) ? "off" : "on"); 696 sprintf(comment, " [AC %s]", sw_get_bits(idbuf,38,1,3) ? "off" : "on");
697 } else 697 } else
698 sw->type = SW_ID_PP; 698 sw->type = SW_ID_PP;
699 break; 699 break;
700 case 66: 700 case 66:
701 sw->bits = 3; 701 sw->bits = 3;
@@ -703,7 +703,8 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
703 sw->length = 22; 703 sw->length = 22;
704 case 64: 704 case 64:
705 sw->type = SW_ID_3DP; 705 sw->type = SW_ID_3DP;
706 if (j == 160) sw_3dp_id(idbuf, comment); 706 if (j == 160)
707 sw_3dp_id(idbuf, comment, sizeof(comment));
707 break; 708 break;
708 } 709 }
709 } 710 }
@@ -733,8 +734,10 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
733 for (i = 0; i < sw->number; i++) { 734 for (i = 0; i < sw->number; i++) {
734 int bits, code; 735 int bits, code;
735 736
736 sprintf(sw->name, "Microsoft SideWinder %s", sw_name[sw->type]); 737 snprintf(sw->name, sizeof(sw->name),
737 sprintf(sw->phys[i], "%s/input%d", gameport->phys, i); 738 "Microsoft SideWinder %s", sw_name[sw->type]);
739 snprintf(sw->phys[i], sizeof(sw->phys[i]),
740 "%s/input%d", gameport->phys, i);
738 741
739 sw->dev[i] = input_dev = input_allocate_device(); 742 sw->dev[i] = input_dev = input_allocate_device();
740 if (!input_dev) { 743 if (!input_dev) {
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index d6f8db8ec3..75eb5ca599 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -220,7 +220,7 @@ static int spaceball_connect(struct serio *serio, struct serio_driver *drv)
220 goto fail; 220 goto fail;
221 221
222 spaceball->dev = input_dev; 222 spaceball->dev = input_dev;
223 sprintf(spaceball->phys, "%s/input0", serio->phys); 223 snprintf(spaceball->phys, sizeof(spaceball->phys), "%s/input0", serio->phys);
224 224
225 input_dev->name = spaceball_names[id]; 225 input_dev->name = spaceball_names[id];
226 input_dev->phys = spaceball->phys; 226 input_dev->phys = spaceball->phys;
diff --git a/drivers/input/joystick/spaceorb.c b/drivers/input/joystick/spaceorb.c
index 7c123a01c5..3e2782e798 100644
--- a/drivers/input/joystick/spaceorb.c
+++ b/drivers/input/joystick/spaceorb.c
@@ -177,7 +177,7 @@ static int spaceorb_connect(struct serio *serio, struct serio_driver *drv)
177 goto fail; 177 goto fail;
178 178
179 spaceorb->dev = input_dev; 179 spaceorb->dev = input_dev;
180 sprintf(spaceorb->phys, "%s/input0", serio->phys); 180 snprintf(spaceorb->phys, sizeof(spaceorb->phys), "%s/input0", serio->phys);
181 181
182 input_dev->name = "SpaceTec SpaceOrb 360 / Avenger"; 182 input_dev->name = "SpaceTec SpaceOrb 360 / Avenger";
183 input_dev->phys = spaceorb->phys; 183 input_dev->phys = spaceorb->phys;
diff --git a/drivers/input/joystick/stinger.c b/drivers/input/joystick/stinger.c
index 0a9ed1d306..011ec4858e 100644
--- a/drivers/input/joystick/stinger.c
+++ b/drivers/input/joystick/stinger.c
@@ -148,7 +148,7 @@ static int stinger_connect(struct serio *serio, struct serio_driver *drv)
148 goto fail; 148 goto fail;
149 149
150 stinger->dev = input_dev; 150 stinger->dev = input_dev;
151 sprintf(stinger->phys, "%s/serio0", serio->phys); 151 snprintf(stinger->phys, sizeof(stinger->phys), "%s/serio0", serio->phys);
152 152
153 input_dev->name = "Gravis Stinger"; 153 input_dev->name = "Gravis Stinger";
154 input_dev->phys = stinger->phys; 154 input_dev->phys = stinger->phys;
diff --git a/drivers/input/joystick/twidjoy.c b/drivers/input/joystick/twidjoy.c
index 7f8b0093c5..076f237d96 100644
--- a/drivers/input/joystick/twidjoy.c
+++ b/drivers/input/joystick/twidjoy.c
@@ -199,7 +199,7 @@ static int twidjoy_connect(struct serio *serio, struct serio_driver *drv)
199 goto fail; 199 goto fail;
200 200
201 twidjoy->dev = input_dev; 201 twidjoy->dev = input_dev;
202 sprintf(twidjoy->phys, "%s/input0", serio->phys); 202 snprintf(twidjoy->phys, sizeof(twidjoy->phys), "%s/input0", serio->phys);
203 203
204 input_dev->name = "Handykey Twiddler"; 204 input_dev->name = "Handykey Twiddler";
205 input_dev->phys = twidjoy->phys; 205 input_dev->phys = twidjoy->phys;
diff --git a/drivers/input/joystick/warrior.c b/drivers/input/joystick/warrior.c
index 1849b176cf..f9c1a03214 100644
--- a/drivers/input/joystick/warrior.c
+++ b/drivers/input/joystick/warrior.c
@@ -154,7 +154,7 @@ static int warrior_connect(struct serio *serio, struct serio_driver *drv)
154 goto fail; 154 goto fail;
155 155
156 warrior->dev = input_dev; 156 warrior->dev = input_dev;
157 sprintf(warrior->phys, "%s/input0", serio->phys); 157 snprintf(warrior->phys, sizeof(warrior->phys), "%s/input0", serio->phys);
158 158
159 input_dev->name = "Logitech WingMan Warrior"; 159 input_dev->name = "Logitech WingMan Warrior";
160 input_dev->phys = warrior->phys; 160 input_dev->phys = warrior->phys;
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index fad04b66d2..ffde8f86e0 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -55,7 +55,7 @@ static int atkbd_softraw = 1;
55module_param_named(softraw, atkbd_softraw, bool, 0); 55module_param_named(softraw, atkbd_softraw, bool, 0);
56MODULE_PARM_DESC(softraw, "Use software generated rawmode"); 56MODULE_PARM_DESC(softraw, "Use software generated rawmode");
57 57
58static int atkbd_scroll = 0; 58static int atkbd_scroll;
59module_param_named(scroll, atkbd_scroll, bool, 0); 59module_param_named(scroll, atkbd_scroll, bool, 0);
60MODULE_PARM_DESC(scroll, "Enable scroll-wheel on MS Office and similar keyboards"); 60MODULE_PARM_DESC(scroll, "Enable scroll-wheel on MS Office and similar keyboards");
61 61
@@ -150,8 +150,8 @@ static unsigned char atkbd_unxlate_table[128] = {
150#define ATKBD_RET_EMUL0 0xe0 150#define ATKBD_RET_EMUL0 0xe0
151#define ATKBD_RET_EMUL1 0xe1 151#define ATKBD_RET_EMUL1 0xe1
152#define ATKBD_RET_RELEASE 0xf0 152#define ATKBD_RET_RELEASE 0xf0
153#define ATKBD_RET_HANGUEL 0xf1 153#define ATKBD_RET_HANJA 0xf1
154#define ATKBD_RET_HANJA 0xf2 154#define ATKBD_RET_HANGEUL 0xf2
155#define ATKBD_RET_ERR 0xff 155#define ATKBD_RET_ERR 0xff
156 156
157#define ATKBD_KEY_UNKNOWN 0 157#define ATKBD_KEY_UNKNOWN 0
@@ -170,6 +170,13 @@ static unsigned char atkbd_unxlate_table[128] = {
170#define ATKBD_LED_EVENT_BIT 0 170#define ATKBD_LED_EVENT_BIT 0
171#define ATKBD_REP_EVENT_BIT 1 171#define ATKBD_REP_EVENT_BIT 1
172 172
173#define ATKBD_XL_ERR 0x01
174#define ATKBD_XL_BAT 0x02
175#define ATKBD_XL_ACK 0x04
176#define ATKBD_XL_NAK 0x08
177#define ATKBD_XL_HANGEUL 0x10
178#define ATKBD_XL_HANJA 0x20
179
173static struct { 180static struct {
174 unsigned char keycode; 181 unsigned char keycode;
175 unsigned char set2; 182 unsigned char set2;
@@ -211,8 +218,7 @@ struct atkbd {
211 unsigned char emul; 218 unsigned char emul;
212 unsigned char resend; 219 unsigned char resend;
213 unsigned char release; 220 unsigned char release;
214 unsigned char bat_xl; 221 unsigned long xl_bit;
215 unsigned char err_xl;
216 unsigned int last; 222 unsigned int last;
217 unsigned long time; 223 unsigned long time;
218 224
@@ -245,17 +251,65 @@ ATKBD_DEFINE_ATTR(set);
245ATKBD_DEFINE_ATTR(softrepeat); 251ATKBD_DEFINE_ATTR(softrepeat);
246ATKBD_DEFINE_ATTR(softraw); 252ATKBD_DEFINE_ATTR(softraw);
247 253
254static const unsigned int xl_table[] = {
255 ATKBD_RET_BAT, ATKBD_RET_ERR, ATKBD_RET_ACK,
256 ATKBD_RET_NAK, ATKBD_RET_HANJA, ATKBD_RET_HANGEUL,
257};
248 258
249static void atkbd_report_key(struct input_dev *dev, struct pt_regs *regs, int code, int value) 259/*
260 * Checks if we should mangle the scancode to extract 'release' bit
261 * in translated mode.
262 */
263static int atkbd_need_xlate(unsigned long xl_bit, unsigned char code)
250{ 264{
251 input_regs(dev, regs); 265 int i;
252 if (value == 3) { 266
253 input_report_key(dev, code, 1); 267 if (code == ATKBD_RET_EMUL0 || code == ATKBD_RET_EMUL1)
254 input_sync(dev); 268 return 0;
255 input_report_key(dev, code, 0); 269
256 } else 270 for (i = 0; i < ARRAY_SIZE(xl_table); i++)
257 input_event(dev, EV_KEY, code, value); 271 if (code == xl_table[i])
258 input_sync(dev); 272 return test_bit(i, &xl_bit);
273
274 return 1;
275}
276
277/*
278 * Calculates new value of xl_bit so the driver can distinguish
279 * between make/break pair of scancodes for select keys and PS/2
280 * protocol responses.
281 */
282static void atkbd_calculate_xl_bit(struct atkbd *atkbd, unsigned char code)
283{
284 int i;
285
286 for (i = 0; i < ARRAY_SIZE(xl_table); i++) {
287 if (!((code ^ xl_table[i]) & 0x7f)) {
288 if (code & 0x80)
289 __clear_bit(i, &atkbd->xl_bit);
290 else
291 __set_bit(i, &atkbd->xl_bit);
292 break;
293 }
294 }
295}
296
297/*
298 * Encode the scancode, 0xe0 prefix, and high bit into a single integer,
299 * keeping kernel 2.4 compatibility for set 2
300 */
301static unsigned int atkbd_compat_scancode(struct atkbd *atkbd, unsigned int code)
302{
303 if (atkbd->set == 3) {
304 if (atkbd->emul == 1)
305 code |= 0x100;
306 } else {
307 code = (code & 0x7f) | ((code & 0x80) << 1);
308 if (atkbd->emul == 1)
309 code |= 0x80;
310 }
311
312 return code;
259} 313}
260 314
261/* 315/*
@@ -267,9 +321,11 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
267 unsigned int flags, struct pt_regs *regs) 321 unsigned int flags, struct pt_regs *regs)
268{ 322{
269 struct atkbd *atkbd = serio_get_drvdata(serio); 323 struct atkbd *atkbd = serio_get_drvdata(serio);
324 struct input_dev *dev = atkbd->dev;
270 unsigned int code = data; 325 unsigned int code = data;
271 int scroll = 0, hscroll = 0, click = -1; 326 int scroll = 0, hscroll = 0, click = -1, add_release_event = 0;
272 int value; 327 int value;
328 unsigned char keycode;
273 329
274#ifdef ATKBD_DEBUG 330#ifdef ATKBD_DEBUG
275 printk(KERN_DEBUG "atkbd.c: Received %02x flags %02x\n", data, flags); 331 printk(KERN_DEBUG "atkbd.c: Received %02x flags %02x\n", data, flags);
@@ -298,25 +354,17 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
298 if (!atkbd->enabled) 354 if (!atkbd->enabled)
299 goto out; 355 goto out;
300 356
301 input_event(atkbd->dev, EV_MSC, MSC_RAW, code); 357 input_event(dev, EV_MSC, MSC_RAW, code);
302 358
303 if (atkbd->translated) { 359 if (atkbd->translated) {
304 360
305 if (atkbd->emul || 361 if (atkbd->emul || atkbd_need_xlate(atkbd->xl_bit, code)) {
306 (code != ATKBD_RET_EMUL0 && code != ATKBD_RET_EMUL1 &&
307 code != ATKBD_RET_HANGUEL && code != ATKBD_RET_HANJA &&
308 (code != ATKBD_RET_ERR || atkbd->err_xl) &&
309 (code != ATKBD_RET_BAT || atkbd->bat_xl))) {
310 atkbd->release = code >> 7; 362 atkbd->release = code >> 7;
311 code &= 0x7f; 363 code &= 0x7f;
312 } 364 }
313 365
314 if (!atkbd->emul) { 366 if (!atkbd->emul)
315 if ((code & 0x7f) == (ATKBD_RET_BAT & 0x7f)) 367 atkbd_calculate_xl_bit(atkbd, data);
316 atkbd->bat_xl = !(data >> 7);
317 if ((code & 0x7f) == (ATKBD_RET_ERR & 0x7f))
318 atkbd->err_xl = !(data >> 7);
319 }
320 } 368 }
321 369
322 switch (code) { 370 switch (code) {
@@ -333,47 +381,48 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
333 case ATKBD_RET_RELEASE: 381 case ATKBD_RET_RELEASE:
334 atkbd->release = 1; 382 atkbd->release = 1;
335 goto out; 383 goto out;
336 case ATKBD_RET_HANGUEL: 384 case ATKBD_RET_ACK:
337 atkbd_report_key(atkbd->dev, regs, KEY_HANGUEL, 3); 385 case ATKBD_RET_NAK:
386 printk(KERN_WARNING "atkbd.c: Spurious %s on %s. "
387 "Some program might be trying access hardware directly.\n",
388 data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys);
338 goto out; 389 goto out;
390 case ATKBD_RET_HANGEUL:
339 case ATKBD_RET_HANJA: 391 case ATKBD_RET_HANJA:
340 atkbd_report_key(atkbd->dev, regs, KEY_HANJA, 3); 392 /*
341 goto out; 393 * These keys do not report release and thus need to be
394 * flagged properly
395 */
396 add_release_event = 1;
397 break;
342 case ATKBD_RET_ERR: 398 case ATKBD_RET_ERR:
343 printk(KERN_DEBUG "atkbd.c: Keyboard on %s reports too many keys pressed.\n", serio->phys); 399 printk(KERN_DEBUG "atkbd.c: Keyboard on %s reports too many keys pressed.\n", serio->phys);
344 goto out; 400 goto out;
345 } 401 }
346 402
347 if (atkbd->set != 3) 403 code = atkbd_compat_scancode(atkbd, code);
348 code = (code & 0x7f) | ((code & 0x80) << 1); 404
349 if (atkbd->emul) { 405 if (atkbd->emul && --atkbd->emul)
350 if (--atkbd->emul) 406 goto out;
351 goto out;
352 code |= (atkbd->set != 3) ? 0x80 : 0x100;
353 }
354 407
355 if (atkbd->keycode[code] != ATKBD_KEY_NULL) 408 keycode = atkbd->keycode[code];
356 input_event(atkbd->dev, EV_MSC, MSC_SCAN, code);
357 409
358 switch (atkbd->keycode[code]) { 410 if (keycode != ATKBD_KEY_NULL)
411 input_event(dev, EV_MSC, MSC_SCAN, code);
412
413 switch (keycode) {
359 case ATKBD_KEY_NULL: 414 case ATKBD_KEY_NULL:
360 break; 415 break;
361 case ATKBD_KEY_UNKNOWN: 416 case ATKBD_KEY_UNKNOWN:
362 if (data == ATKBD_RET_ACK || data == ATKBD_RET_NAK) { 417 printk(KERN_WARNING
363 printk(KERN_WARNING "atkbd.c: Spurious %s on %s. Some program, " 418 "atkbd.c: Unknown key %s (%s set %d, code %#x on %s).\n",
364 "like XFree86, might be trying access hardware directly.\n", 419 atkbd->release ? "released" : "pressed",
365 data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys); 420 atkbd->translated ? "translated" : "raw",
366 } else { 421 atkbd->set, code, serio->phys);
367 printk(KERN_WARNING "atkbd.c: Unknown key %s " 422 printk(KERN_WARNING
368 "(%s set %d, code %#x on %s).\n", 423 "atkbd.c: Use 'setkeycodes %s%02x <keycode>' to make it known.\n",
369 atkbd->release ? "released" : "pressed", 424 code & 0x80 ? "e0" : "", code & 0x7f);
370 atkbd->translated ? "translated" : "raw", 425 input_sync(dev);
371 atkbd->set, code, serio->phys);
372 printk(KERN_WARNING "atkbd.c: Use 'setkeycodes %s%02x <keycode>' "
373 "to make it known.\n",
374 code & 0x80 ? "e0" : "", code & 0x7f);
375 }
376 input_sync(atkbd->dev);
377 break; 426 break;
378 case ATKBD_SCR_1: 427 case ATKBD_SCR_1:
379 scroll = 1 - atkbd->release * 2; 428 scroll = 1 - atkbd->release * 2;
@@ -397,33 +446,35 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
397 hscroll = 1; 446 hscroll = 1;
398 break; 447 break;
399 default: 448 default:
400 value = atkbd->release ? 0 : 449 if (atkbd->release) {
401 (1 + (!atkbd->softrepeat && test_bit(atkbd->keycode[code], atkbd->dev->key))); 450 value = 0;
402 451 atkbd->last = 0;
403 switch (value) { /* Workaround Toshiba laptop multiple keypress */ 452 } else if (!atkbd->softrepeat && test_bit(keycode, dev->key)) {
404 case 0: 453 /* Workaround Toshiba laptop multiple keypress */
405 atkbd->last = 0; 454 value = time_before(jiffies, atkbd->time) && atkbd->last == code ? 1 : 2;
406 break; 455 } else {
407 case 1: 456 value = 1;
408 atkbd->last = code; 457 atkbd->last = code;
409 atkbd->time = jiffies + msecs_to_jiffies(atkbd->dev->rep[REP_DELAY]) / 2; 458 atkbd->time = jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]) / 2;
410 break;
411 case 2:
412 if (!time_after(jiffies, atkbd->time) && atkbd->last == code)
413 value = 1;
414 break;
415 } 459 }
416 460
417 atkbd_report_key(atkbd->dev, regs, atkbd->keycode[code], value); 461 input_regs(dev, regs);
462 input_report_key(dev, keycode, value);
463 input_sync(dev);
464
465 if (value && add_release_event) {
466 input_report_key(dev, keycode, 0);
467 input_sync(dev);
468 }
418 } 469 }
419 470
420 if (atkbd->scroll) { 471 if (atkbd->scroll) {
421 input_regs(atkbd->dev, regs); 472 input_regs(dev, regs);
422 if (click != -1) 473 if (click != -1)
423 input_report_key(atkbd->dev, BTN_MIDDLE, click); 474 input_report_key(dev, BTN_MIDDLE, click);
424 input_report_rel(atkbd->dev, REL_WHEEL, scroll); 475 input_report_rel(dev, REL_WHEEL, scroll);
425 input_report_rel(atkbd->dev, REL_HWHEEL, hscroll); 476 input_report_rel(dev, REL_HWHEEL, hscroll);
426 input_sync(atkbd->dev); 477 input_sync(dev);
427 } 478 }
428 479
429 atkbd->release = 0; 480 atkbd->release = 0;
@@ -764,6 +815,9 @@ static void atkbd_set_keycode_table(struct atkbd *atkbd)
764 for (i = 0; i < ARRAY_SIZE(atkbd_scroll_keys); i++) 815 for (i = 0; i < ARRAY_SIZE(atkbd_scroll_keys); i++)
765 atkbd->keycode[atkbd_scroll_keys[i].set2] = atkbd_scroll_keys[i].keycode; 816 atkbd->keycode[atkbd_scroll_keys[i].set2] = atkbd_scroll_keys[i].keycode;
766 } 817 }
818
819 atkbd->keycode[atkbd_compat_scancode(atkbd, ATKBD_RET_HANGEUL)] = KEY_HANGUEL;
820 atkbd->keycode[atkbd_compat_scancode(atkbd, ATKBD_RET_HANJA)] = KEY_HANJA;
767} 821}
768 822
769/* 823/*
@@ -776,12 +830,15 @@ static void atkbd_set_device_attrs(struct atkbd *atkbd)
776 int i; 830 int i;
777 831
778 if (atkbd->extra) 832 if (atkbd->extra)
779 sprintf(atkbd->name, "AT Set 2 Extra keyboard"); 833 snprintf(atkbd->name, sizeof(atkbd->name),
834 "AT Set 2 Extra keyboard");
780 else 835 else
781 sprintf(atkbd->name, "AT %s Set %d keyboard", 836 snprintf(atkbd->name, sizeof(atkbd->name),
782 atkbd->translated ? "Translated" : "Raw", atkbd->set); 837 "AT %s Set %d keyboard",
838 atkbd->translated ? "Translated" : "Raw", atkbd->set);
783 839
784 sprintf(atkbd->phys, "%s/input0", atkbd->ps2dev.serio->phys); 840 snprintf(atkbd->phys, sizeof(atkbd->phys),
841 "%s/input0", atkbd->ps2dev.serio->phys);
785 842
786 input_dev->name = atkbd->name; 843 input_dev->name = atkbd->name;
787 input_dev->phys = atkbd->phys; 844 input_dev->phys = atkbd->phys;
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index 77c4d9669a..5174224cad 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -384,18 +384,21 @@ lkkbd_detection_done (struct lkkbd *lk)
384 */ 384 */
385 switch (lk->id[4]) { 385 switch (lk->id[4]) {
386 case 1: 386 case 1:
387 sprintf (lk->name, "DEC LK201 keyboard"); 387 strlcpy (lk->name, "DEC LK201 keyboard",
388 sizeof (lk->name));
388 389
389 if (lk201_compose_is_alt) 390 if (lk201_compose_is_alt)
390 lk->keycode[0xb1] = KEY_LEFTALT; 391 lk->keycode[0xb1] = KEY_LEFTALT;
391 break; 392 break;
392 393
393 case 2: 394 case 2:
394 sprintf (lk->name, "DEC LK401 keyboard"); 395 strlcpy (lk->name, "DEC LK401 keyboard",
396 sizeof (lk->name));
395 break; 397 break;
396 398
397 default: 399 default:
398 sprintf (lk->name, "Unknown DEC keyboard"); 400 strlcpy (lk->name, "Unknown DEC keyboard",
401 sizeof (lk->name));
399 printk (KERN_ERR "lkkbd: keyboard on %s is unknown, " 402 printk (KERN_ERR "lkkbd: keyboard on %s is unknown, "
400 "please report to Jan-Benedict Glaw " 403 "please report to Jan-Benedict Glaw "
401 "<jbglaw@lug-owl.de>\n", lk->phys); 404 "<jbglaw@lug-owl.de>\n", lk->phys);
diff --git a/drivers/input/keyboard/newtonkbd.c b/drivers/input/keyboard/newtonkbd.c
index d10983c521..40a3f55124 100644
--- a/drivers/input/keyboard/newtonkbd.c
+++ b/drivers/input/keyboard/newtonkbd.c
@@ -96,7 +96,7 @@ static int nkbd_connect(struct serio *serio, struct serio_driver *drv)
96 96
97 nkbd->serio = serio; 97 nkbd->serio = serio;
98 nkbd->dev = input_dev; 98 nkbd->dev = input_dev;
99 sprintf(nkbd->phys, "%s/input0", serio->phys); 99 snprintf(nkbd->phys, sizeof(nkbd->phys), "%s/input0", serio->phys);
100 memcpy(nkbd->keycode, nkbd_keycode, sizeof(nkbd->keycode)); 100 memcpy(nkbd->keycode, nkbd_keycode, sizeof(nkbd->keycode));
101 101
102 input_dev->name = "Newton Keyboard"; 102 input_dev->name = "Newton Keyboard";
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index b15b6d8d4f..9dbd7b8568 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -263,7 +263,7 @@ static int sunkbd_connect(struct serio *serio, struct serio_driver *drv)
263 goto fail; 263 goto fail;
264 } 264 }
265 265
266 sprintf(sunkbd->name, "Sun Type %d keyboard", sunkbd->type); 266 snprintf(sunkbd->name, sizeof(sunkbd->name), "Sun Type %d keyboard", sunkbd->type);
267 memcpy(sunkbd->keycode, sunkbd_keycode, sizeof(sunkbd->keycode)); 267 memcpy(sunkbd->keycode, sunkbd_keycode, sizeof(sunkbd->keycode));
268 268
269 input_dev->name = sunkbd->name; 269 input_dev->name = sunkbd->name;
diff --git a/drivers/input/keyboard/xtkbd.c b/drivers/input/keyboard/xtkbd.c
index 4135e3e16c..0821d53cf0 100644
--- a/drivers/input/keyboard/xtkbd.c
+++ b/drivers/input/keyboard/xtkbd.c
@@ -100,7 +100,7 @@ static int xtkbd_connect(struct serio *serio, struct serio_driver *drv)
100 100
101 xtkbd->serio = serio; 101 xtkbd->serio = serio;
102 xtkbd->dev = input_dev; 102 xtkbd->dev = input_dev;
103 sprintf(xtkbd->phys, "%s/input0", serio->phys); 103 snprintf(xtkbd->phys, sizeof(xtkbd->phys), "%s/input0", serio->phys);
104 memcpy(xtkbd->keycode, xtkbd_keycode, sizeof(xtkbd->keycode)); 104 memcpy(xtkbd->keycode, xtkbd_keycode, sizeof(xtkbd->keycode));
105 105
106 input_dev->name = "XT Keyboard"; 106 input_dev->name = "XT Keyboard";
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index a0e2e797c6..070d75330a 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -470,7 +470,7 @@ int alps_init(struct psmouse *psmouse)
470 dev1->keybit[LONG(BTN_BACK)] |= BIT(BTN_BACK); 470 dev1->keybit[LONG(BTN_BACK)] |= BIT(BTN_BACK);
471 } 471 }
472 472
473 sprintf(priv->phys, "%s/input1", psmouse->ps2dev.serio->phys); 473 snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys);
474 dev2->phys = priv->phys; 474 dev2->phys = priv->phys;
475 dev2->name = (priv->i->flags & ALPS_DUALPOINT) ? "DualPoint Stick" : "PS/2 Mouse"; 475 dev2->name = (priv->i->flags & ALPS_DUALPOINT) ? "DualPoint Stick" : "PS/2 Mouse";
476 dev2->id.bustype = BUS_I8042; 476 dev2->id.bustype = BUS_I8042;
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 136321a2cf..8bc9f51ae6 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -150,9 +150,20 @@ static psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse, struct pt_reg
150 */ 150 */
151 151
152 if (psmouse->type == PSMOUSE_IMEX) { 152 if (psmouse->type == PSMOUSE_IMEX) {
153 input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 8) - (int) (packet[3] & 7)); 153 switch (packet[3] & 0xC0) {
154 input_report_key(dev, BTN_SIDE, (packet[3] >> 4) & 1); 154 case 0x80: /* vertical scroll on IntelliMouse Explorer 4.0 */
155 input_report_key(dev, BTN_EXTRA, (packet[3] >> 5) & 1); 155 input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
156 break;
157 case 0x40: /* horizontal scroll on IntelliMouse Explorer 4.0 */
158 input_report_rel(dev, REL_HWHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
159 break;
160 case 0x00:
161 case 0xC0:
162 input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 8) - (int) (packet[3] & 7));
163 input_report_key(dev, BTN_SIDE, (packet[3] >> 4) & 1);
164 input_report_key(dev, BTN_EXTRA, (packet[3] >> 5) & 1);
165 break;
166 }
156 } 167 }
157 168
158/* 169/*
@@ -466,9 +477,25 @@ static int im_explorer_detect(struct psmouse *psmouse, int set_properties)
466 if (param[0] != 4) 477 if (param[0] != 4)
467 return -1; 478 return -1;
468 479
480/* Magic to enable horizontal scrolling on IntelliMouse 4.0 */
481 param[0] = 200;
482 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
483 param[0] = 80;
484 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
485 param[0] = 40;
486 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
487
488 param[0] = 200;
489 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
490 param[0] = 200;
491 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
492 param[0] = 60;
493 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
494
469 if (set_properties) { 495 if (set_properties) {
470 set_bit(BTN_MIDDLE, psmouse->dev->keybit); 496 set_bit(BTN_MIDDLE, psmouse->dev->keybit);
471 set_bit(REL_WHEEL, psmouse->dev->relbit); 497 set_bit(REL_WHEEL, psmouse->dev->relbit);
498 set_bit(REL_HWHEEL, psmouse->dev->relbit);
472 set_bit(BTN_SIDE, psmouse->dev->keybit); 499 set_bit(BTN_SIDE, psmouse->dev->keybit);
473 set_bit(BTN_EXTRA, psmouse->dev->keybit); 500 set_bit(BTN_EXTRA, psmouse->dev->keybit);
474 501
@@ -1057,8 +1084,8 @@ static int psmouse_switch_protocol(struct psmouse *psmouse, struct psmouse_proto
1057 if (psmouse->resync_time && psmouse->poll(psmouse)) 1084 if (psmouse->resync_time && psmouse->poll(psmouse))
1058 psmouse->resync_time = 0; 1085 psmouse->resync_time = 0;
1059 1086
1060 sprintf(psmouse->devname, "%s %s %s", 1087 snprintf(psmouse->devname, sizeof(psmouse->devname), "%s %s %s",
1061 psmouse_protocol_by_type(psmouse->type)->name, psmouse->vendor, psmouse->name); 1088 psmouse_protocol_by_type(psmouse->type)->name, psmouse->vendor, psmouse->name);
1062 1089
1063 input_dev->name = psmouse->devname; 1090 input_dev->name = psmouse->devname;
1064 input_dev->phys = psmouse->phys; 1091 input_dev->phys = psmouse->phys;
@@ -1099,7 +1126,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
1099 ps2_init(&psmouse->ps2dev, serio); 1126 ps2_init(&psmouse->ps2dev, serio);
1100 INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse); 1127 INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse);
1101 psmouse->dev = input_dev; 1128 psmouse->dev = input_dev;
1102 sprintf(psmouse->phys, "%s/input0", serio->phys); 1129 snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys);
1103 1130
1104 psmouse_set_state(psmouse, PSMOUSE_INITIALIZING); 1131 psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
1105 1132
diff --git a/drivers/input/mouse/sermouse.c b/drivers/input/mouse/sermouse.c
index 2f9a04ae72..a897424317 100644
--- a/drivers/input/mouse/sermouse.c
+++ b/drivers/input/mouse/sermouse.c
@@ -254,7 +254,7 @@ static int sermouse_connect(struct serio *serio, struct serio_driver *drv)
254 goto fail; 254 goto fail;
255 255
256 sermouse->dev = input_dev; 256 sermouse->dev = input_dev;
257 sprintf(sermouse->phys, "%s/input0", serio->phys); 257 snprintf(sermouse->phys, sizeof(sermouse->phys), "%s/input0", serio->phys);
258 sermouse->type = serio->id.proto; 258 sermouse->type = serio->id.proto;
259 259
260 input_dev->name = sermouse_protocols[sermouse->type]; 260 input_dev->name = sermouse_protocols[sermouse->type];
diff --git a/drivers/input/mouse/vsxxxaa.c b/drivers/input/mouse/vsxxxaa.c
index 36e9442a16..7b85bc21ae 100644
--- a/drivers/input/mouse/vsxxxaa.c
+++ b/drivers/input/mouse/vsxxxaa.c
@@ -153,22 +153,25 @@ vsxxxaa_detection_done (struct vsxxxaa *mouse)
153{ 153{
154 switch (mouse->type) { 154 switch (mouse->type) {
155 case 0x02: 155 case 0x02:
156 sprintf (mouse->name, "DEC VSXXX-AA/-GA mouse"); 156 strlcpy (mouse->name, "DEC VSXXX-AA/-GA mouse",
157 sizeof (mouse->name));
157 break; 158 break;
158 159
159 case 0x04: 160 case 0x04:
160 sprintf (mouse->name, "DEC VSXXX-AB digitizer"); 161 strlcpy (mouse->name, "DEC VSXXX-AB digitizer",
162 sizeof (mouse->name));
161 break; 163 break;
162 164
163 default: 165 default:
164 sprintf (mouse->name, "unknown DEC pointer device " 166 snprintf (mouse->name, sizeof (mouse->name),
165 "(type = 0x%02x)", mouse->type); 167 "unknown DEC pointer device (type = 0x%02x)",
168 mouse->type);
166 break; 169 break;
167 } 170 }
168 171
169 printk (KERN_INFO "Found %s version 0x%02x from country 0x%02x " 172 printk (KERN_INFO
170 "on port %s\n", mouse->name, mouse->version, 173 "Found %s version 0x%02x from country 0x%02x on port %s\n",
171 mouse->country, mouse->phys); 174 mouse->name, mouse->version, mouse->country, mouse->phys);
172} 175}
173 176
174/* 177/*
@@ -503,8 +506,9 @@ vsxxxaa_connect (struct serio *serio, struct serio_driver *drv)
503 506
504 mouse->dev = input_dev; 507 mouse->dev = input_dev;
505 mouse->serio = serio; 508 mouse->serio = serio;
506 sprintf (mouse->name, "DEC VSXXX-AA/-GA mouse or VSXXX-AB digitizer"); 509 strlcat (mouse->name, "DEC VSXXX-AA/-GA mouse or VSXXX-AB digitizer",
507 sprintf (mouse->phys, "%s/input0", serio->phys); 510 sizeof (mouse->name));
511 snprintf (mouse->phys, sizeof (mouse->phys), "%s/input0", serio->phys);
508 512
509 input_dev->name = mouse->name; 513 input_dev->name = mouse->name;
510 input_dev->phys = mouse->phys; 514 input_dev->phys = mouse->phys;
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index b685a50795..eb721b11ff 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -123,7 +123,9 @@ static void mousedev_touchpad_event(struct input_dev *dev, struct mousedev *mous
123 123
124 if (mousedev->touch) { 124 if (mousedev->touch) {
125 size = dev->absmax[ABS_X] - dev->absmin[ABS_X]; 125 size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
126 if (size == 0) size = 256 * 2; 126 if (size == 0)
127 size = 256 * 2;
128
127 switch (code) { 129 switch (code) {
128 case ABS_X: 130 case ABS_X:
129 fx(0) = value; 131 fx(0) = value;
@@ -155,18 +157,24 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
155 switch (code) { 157 switch (code) {
156 case ABS_X: 158 case ABS_X:
157 size = dev->absmax[ABS_X] - dev->absmin[ABS_X]; 159 size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
158 if (size == 0) size = xres ? : 1; 160 if (size == 0)
159 if (value > dev->absmax[ABS_X]) value = dev->absmax[ABS_X]; 161 size = xres ? : 1;
160 if (value < dev->absmin[ABS_X]) value = dev->absmin[ABS_X]; 162 if (value > dev->absmax[ABS_X])
163 value = dev->absmax[ABS_X];
164 if (value < dev->absmin[ABS_X])
165 value = dev->absmin[ABS_X];
161 mousedev->packet.x = ((value - dev->absmin[ABS_X]) * xres) / size; 166 mousedev->packet.x = ((value - dev->absmin[ABS_X]) * xres) / size;
162 mousedev->packet.abs_event = 1; 167 mousedev->packet.abs_event = 1;
163 break; 168 break;
164 169
165 case ABS_Y: 170 case ABS_Y:
166 size = dev->absmax[ABS_Y] - dev->absmin[ABS_Y]; 171 size = dev->absmax[ABS_Y] - dev->absmin[ABS_Y];
167 if (size == 0) size = yres ? : 1; 172 if (size == 0)
168 if (value > dev->absmax[ABS_Y]) value = dev->absmax[ABS_Y]; 173 size = yres ? : 1;
169 if (value < dev->absmin[ABS_Y]) value = dev->absmin[ABS_Y]; 174 if (value > dev->absmax[ABS_Y])
175 value = dev->absmax[ABS_Y];
176 if (value < dev->absmin[ABS_Y])
177 value = dev->absmin[ABS_Y];
170 mousedev->packet.y = yres - ((value - dev->absmin[ABS_Y]) * yres) / size; 178 mousedev->packet.y = yres - ((value - dev->absmin[ABS_Y]) * yres) / size;
171 mousedev->packet.abs_event = 1; 179 mousedev->packet.abs_event = 1;
172 break; 180 break;
@@ -202,7 +210,7 @@ static void mousedev_key_event(struct mousedev *mousedev, unsigned int code, int
202 case BTN_SIDE: index = 3; break; 210 case BTN_SIDE: index = 3; break;
203 case BTN_4: 211 case BTN_4:
204 case BTN_EXTRA: index = 4; break; 212 case BTN_EXTRA: index = 4; break;
205 default: return; 213 default: return;
206 } 214 }
207 215
208 if (value) { 216 if (value) {
@@ -285,10 +293,9 @@ static void mousedev_touchpad_touch(struct mousedev *mousedev, int value)
285 mousedev->touch = mousedev->pkt_count = 0; 293 mousedev->touch = mousedev->pkt_count = 0;
286 mousedev->frac_dx = 0; 294 mousedev->frac_dx = 0;
287 mousedev->frac_dy = 0; 295 mousedev->frac_dy = 0;
288 } 296
289 else 297 } else if (!mousedev->touch)
290 if (!mousedev->touch) 298 mousedev->touch = jiffies;
291 mousedev->touch = jiffies;
292} 299}
293 300
294static void mousedev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) 301static void mousedev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value)
@@ -327,7 +334,7 @@ static void mousedev_event(struct input_handle *handle, unsigned int type, unsig
327 mousedev->pkt_count++; 334 mousedev->pkt_count++;
328 /* Input system eats duplicate events, but we need all of them 335 /* Input system eats duplicate events, but we need all of them
329 * to do correct averaging so apply present one forward 336 * to do correct averaging so apply present one forward
330 */ 337 */
331 fx(0) = fx(1); 338 fx(0) = fx(1);
332 fy(0) = fy(1); 339 fy(0) = fy(1);
333 } 340 }
@@ -346,7 +353,9 @@ static int mousedev_fasync(int fd, struct file *file, int on)
346{ 353{
347 int retval; 354 int retval;
348 struct mousedev_list *list = file->private_data; 355 struct mousedev_list *list = file->private_data;
356
349 retval = fasync_helper(fd, file, on, &list->fasync); 357 retval = fasync_helper(fd, file, on, &list->fasync);
358
350 return retval < 0 ? retval : 0; 359 return retval < 0 ? retval : 0;
351} 360}
352 361
@@ -507,14 +516,16 @@ static ssize_t mousedev_write(struct file * file, const char __user * buffer, si
507 list->imexseq = 0; 516 list->imexseq = 0;
508 list->mode = MOUSEDEV_EMUL_EXPS; 517 list->mode = MOUSEDEV_EMUL_EXPS;
509 } 518 }
510 } else list->imexseq = 0; 519 } else
520 list->imexseq = 0;
511 521
512 if (c == mousedev_imps_seq[list->impsseq]) { 522 if (c == mousedev_imps_seq[list->impsseq]) {
513 if (++list->impsseq == MOUSEDEV_SEQ_LEN) { 523 if (++list->impsseq == MOUSEDEV_SEQ_LEN) {
514 list->impsseq = 0; 524 list->impsseq = 0;
515 list->mode = MOUSEDEV_EMUL_IMPS; 525 list->mode = MOUSEDEV_EMUL_IMPS;
516 } 526 }
517 } else list->impsseq = 0; 527 } else
528 list->impsseq = 0;
518 529
519 list->ps2[0] = 0xfa; 530 list->ps2[0] = 0xfa;
520 531
@@ -598,6 +609,7 @@ static ssize_t mousedev_read(struct file * file, char __user * buffer, size_t co
598static unsigned int mousedev_poll(struct file *file, poll_table *wait) 609static unsigned int mousedev_poll(struct file *file, poll_table *wait)
599{ 610{
600 struct mousedev_list *list = file->private_data; 611 struct mousedev_list *list = file->private_data;
612
601 poll_wait(file, &list->mousedev->wait, wait); 613 poll_wait(file, &list->mousedev->wait, wait);
602 return ((list->ready || list->buffer) ? (POLLIN | POLLRDNORM) : 0) | 614 return ((list->ready || list->buffer) ? (POLLIN | POLLRDNORM) : 0) |
603 (list->mousedev->exist ? 0 : (POLLHUP | POLLERR)); 615 (list->mousedev->exist ? 0 : (POLLHUP | POLLERR));
diff --git a/drivers/input/touchscreen/gunze.c b/drivers/input/touchscreen/gunze.c
index 466da190ce..b769b21973 100644
--- a/drivers/input/touchscreen/gunze.c
+++ b/drivers/input/touchscreen/gunze.c
@@ -129,7 +129,7 @@ static int gunze_connect(struct serio *serio, struct serio_driver *drv)
129 129
130 gunze->serio = serio; 130 gunze->serio = serio;
131 gunze->dev = input_dev; 131 gunze->dev = input_dev;
132 sprintf(gunze->phys, "%s/input0", serio->phys); 132 snprintf(gunze->phys, sizeof(serio->phys), "%s/input0", serio->phys);
133 133
134 input_dev->private = gunze; 134 input_dev->private = gunze;
135 input_dev->name = "Gunze AHL-51S TouchScreen"; 135 input_dev->name = "Gunze AHL-51S TouchScreen";
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
index a595d38631..2de2139f2f 100644
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ b/drivers/input/touchscreen/h3600_ts_input.c
@@ -363,7 +363,7 @@ static int h3600ts_connect(struct serio *serio, struct serio_driver *drv)
363 363
364 ts->serio = serio; 364 ts->serio = serio;
365 ts->dev = input_dev; 365 ts->dev = input_dev;
366 sprintf(ts->phys, "%s/input0", serio->phys); 366 snprintf(ts->phys, sizeof(ts->phys), "%s/input0", serio->phys);
367 367
368 input_dev->name = "H3600 TouchScreen"; 368 input_dev->name = "H3600 TouchScreen";
369 input_dev->phys = ts->phys; 369 input_dev->phys = ts->phys;
diff --git a/drivers/input/touchscreen/mtouch.c b/drivers/input/touchscreen/mtouch.c
index 1d0d37eeef..8647a905df 100644
--- a/drivers/input/touchscreen/mtouch.c
+++ b/drivers/input/touchscreen/mtouch.c
@@ -143,7 +143,7 @@ static int mtouch_connect(struct serio *serio, struct serio_driver *drv)
143 143
144 mtouch->serio = serio; 144 mtouch->serio = serio;
145 mtouch->dev = input_dev; 145 mtouch->dev = input_dev;
146 sprintf(mtouch->phys, "%s/input0", serio->phys); 146 snprintf(mtouch->phys, sizeof(mtouch->phys), "%s/input0", serio->phys);
147 147
148 input_dev->private = mtouch; 148 input_dev->private = mtouch;
149 input_dev->name = "MicroTouch Serial TouchScreen"; 149 input_dev->name = "MicroTouch Serial TouchScreen";
diff --git a/drivers/input/tsdev.c b/drivers/input/tsdev.c
index d678d144bb..5f9ecad2ca 100644
--- a/drivers/input/tsdev.c
+++ b/drivers/input/tsdev.c
@@ -35,7 +35,7 @@
35 * e-mail - mail your message to <jsimmons@infradead.org>. 35 * e-mail - mail your message to <jsimmons@infradead.org>.
36 */ 36 */
37 37
38#define TSDEV_MINOR_BASE 128 38#define TSDEV_MINOR_BASE 128
39#define TSDEV_MINORS 32 39#define TSDEV_MINORS 32
40/* First 16 devices are h3600_ts compatible; second 16 are h3600_tsraw */ 40/* First 16 devices are h3600_ts compatible; second 16 are h3600_tsraw */
41#define TSDEV_MINOR_MASK 15 41#define TSDEV_MINOR_MASK 15
@@ -230,6 +230,7 @@ static ssize_t tsdev_read(struct file *file, char __user *buffer, size_t count,
230static unsigned int tsdev_poll(struct file *file, poll_table * wait) 230static unsigned int tsdev_poll(struct file *file, poll_table * wait)
231{ 231{
232 struct tsdev_list *list = file->private_data; 232 struct tsdev_list *list = file->private_data;
233
233 poll_wait(file, &list->tsdev->wait, wait); 234 poll_wait(file, &list->tsdev->wait, wait);
234 return ((list->head == list->tail) ? 0 : (POLLIN | POLLRDNORM)) | 235 return ((list->head == list->tail) ? 0 : (POLLIN | POLLRDNORM)) |
235 (list->tsdev->exist ? 0 : (POLLHUP | POLLERR)); 236 (list->tsdev->exist ? 0 : (POLLHUP | POLLERR));
@@ -248,11 +249,13 @@ static int tsdev_ioctl(struct inode *inode, struct file *file,
248 sizeof (struct ts_calibration))) 249 sizeof (struct ts_calibration)))
249 retval = -EFAULT; 250 retval = -EFAULT;
250 break; 251 break;
252
251 case TS_SET_CAL: 253 case TS_SET_CAL:
252 if (copy_from_user (&tsdev->cal, (void __user *)arg, 254 if (copy_from_user (&tsdev->cal, (void __user *)arg,
253 sizeof (struct ts_calibration))) 255 sizeof (struct ts_calibration)))
254 retval = -EFAULT; 256 retval = -EFAULT;
255 break; 257 break;
258
256 default: 259 default:
257 retval = -EINVAL; 260 retval = -EINVAL;
258 break; 261 break;
@@ -284,9 +287,11 @@ static void tsdev_event(struct input_handle *handle, unsigned int type,
284 case ABS_X: 287 case ABS_X:
285 tsdev->x = value; 288 tsdev->x = value;
286 break; 289 break;
290
287 case ABS_Y: 291 case ABS_Y:
288 tsdev->y = value; 292 tsdev->y = value;
289 break; 293 break;
294
290 case ABS_PRESSURE: 295 case ABS_PRESSURE:
291 if (value > handle->dev->absmax[ABS_PRESSURE]) 296 if (value > handle->dev->absmax[ABS_PRESSURE])
292 value = handle->dev->absmax[ABS_PRESSURE]; 297 value = handle->dev->absmax[ABS_PRESSURE];
@@ -307,6 +312,7 @@ static void tsdev_event(struct input_handle *handle, unsigned int type,
307 else if (tsdev->x > xres) 312 else if (tsdev->x > xres)
308 tsdev->x = xres; 313 tsdev->x = xres;
309 break; 314 break;
315
310 case REL_Y: 316 case REL_Y:
311 tsdev->y += value; 317 tsdev->y += value;
312 if (tsdev->y < 0) 318 if (tsdev->y < 0)
@@ -323,6 +329,7 @@ static void tsdev_event(struct input_handle *handle, unsigned int type,
323 case 0: 329 case 0:
324 tsdev->pressure = 0; 330 tsdev->pressure = 0;
325 break; 331 break;
332
326 case 1: 333 case 1:
327 if (!tsdev->pressure) 334 if (!tsdev->pressure)
328 tsdev->pressure = 1; 335 tsdev->pressure = 1;
@@ -370,9 +377,8 @@ static struct input_handle *tsdev_connect(struct input_handler *handler,
370 struct class_device *cdev; 377 struct class_device *cdev;
371 int minor, delta; 378 int minor, delta;
372 379
373 for (minor = 0; minor < TSDEV_MINORS/2 && tsdev_table[minor]; 380 for (minor = 0; minor < TSDEV_MINORS / 2 && tsdev_table[minor]; minor++);
374 minor++); 381 if (minor >= TSDEV_MINORS / 2) {
375 if (minor >= TSDEV_MINORS/2) {
376 printk(KERN_ERR 382 printk(KERN_ERR
377 "tsdev: You have way too many touchscreens\n"); 383 "tsdev: You have way too many touchscreens\n");
378 return NULL; 384 return NULL;
@@ -444,22 +450,22 @@ static struct input_device_id tsdev_ids[] = {
444 .evbit = { BIT(EV_KEY) | BIT(EV_REL) }, 450 .evbit = { BIT(EV_KEY) | BIT(EV_REL) },
445 .keybit = { [LONG(BTN_LEFT)] = BIT(BTN_LEFT) }, 451 .keybit = { [LONG(BTN_LEFT)] = BIT(BTN_LEFT) },
446 .relbit = { BIT(REL_X) | BIT(REL_Y) }, 452 .relbit = { BIT(REL_X) | BIT(REL_Y) },
447 },/* A mouse like device, at least one button, two relative axes */ 453 }, /* A mouse like device, at least one button, two relative axes */
448 454
449 { 455 {
450 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, 456 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT,
451 .evbit = { BIT(EV_KEY) | BIT(EV_ABS) }, 457 .evbit = { BIT(EV_KEY) | BIT(EV_ABS) },
452 .keybit = { [LONG(BTN_TOUCH)] = BIT(BTN_TOUCH) }, 458 .keybit = { [LONG(BTN_TOUCH)] = BIT(BTN_TOUCH) },
453 .absbit = { BIT(ABS_X) | BIT(ABS_Y) }, 459 .absbit = { BIT(ABS_X) | BIT(ABS_Y) },
454 },/* A tablet like device, at least touch detection, two absolute axes */ 460 }, /* A tablet like device, at least touch detection, two absolute axes */
455 461
456 { 462 {
457 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, 463 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_ABSBIT,
458 .evbit = { BIT(EV_ABS) }, 464 .evbit = { BIT(EV_ABS) },
459 .absbit = { BIT(ABS_X) | BIT(ABS_Y) | BIT(ABS_PRESSURE) }, 465 .absbit = { BIT(ABS_X) | BIT(ABS_Y) | BIT(ABS_PRESSURE) },
460 },/* A tablet like device with several gradations of pressure */ 466 }, /* A tablet like device with several gradations of pressure */
461 467
462 {},/* Terminating entry */ 468 {} /* Terminating entry */
463}; 469};
464 470
465MODULE_DEVICE_TABLE(input, tsdev_ids); 471MODULE_DEVICE_TABLE(input, tsdev_ids);
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 173c899a1f..2e541fa020 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -87,6 +87,11 @@ struct capincci;
87#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE 87#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
88struct capiminor; 88struct capiminor;
89 89
90struct datahandle_queue {
91 struct list_head list;
92 u16 datahandle;
93};
94
90struct capiminor { 95struct capiminor {
91 struct list_head list; 96 struct list_head list;
92 struct capincci *nccip; 97 struct capincci *nccip;
@@ -109,12 +114,9 @@ struct capiminor {
109 int outbytes; 114 int outbytes;
110 115
111 /* transmit path */ 116 /* transmit path */
112 struct datahandle_queue { 117 struct list_head ackqueue;
113 struct datahandle_queue *next;
114 u16 datahandle;
115 } *ackqueue;
116 int nack; 118 int nack;
117 119 spinlock_t ackqlock;
118}; 120};
119#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */ 121#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
120 122
@@ -156,48 +158,54 @@ static LIST_HEAD(capiminor_list);
156 158
157static int capincci_add_ack(struct capiminor *mp, u16 datahandle) 159static int capincci_add_ack(struct capiminor *mp, u16 datahandle)
158{ 160{
159 struct datahandle_queue *n, **pp; 161 struct datahandle_queue *n;
162 unsigned long flags;
160 163
161 n = kmalloc(sizeof(*n), GFP_ATOMIC); 164 n = kmalloc(sizeof(*n), GFP_ATOMIC);
162 if (!n) { 165 if (unlikely(!n)) {
163 printk(KERN_ERR "capi: alloc datahandle failed\n"); 166 printk(KERN_ERR "capi: alloc datahandle failed\n");
164 return -1; 167 return -1;
165 } 168 }
166 n->next = NULL;
167 n->datahandle = datahandle; 169 n->datahandle = datahandle;
168 for (pp = &mp->ackqueue; *pp; pp = &(*pp)->next) ; 170 INIT_LIST_HEAD(&n->list);
169 *pp = n; 171 spin_lock_irqsave(&mp->ackqlock, flags);
172 list_add_tail(&n->list, &mp->ackqueue);
170 mp->nack++; 173 mp->nack++;
174 spin_unlock_irqrestore(&mp->ackqlock, flags);
171 return 0; 175 return 0;
172} 176}
173 177
174static int capiminor_del_ack(struct capiminor *mp, u16 datahandle) 178static int capiminor_del_ack(struct capiminor *mp, u16 datahandle)
175{ 179{
176 struct datahandle_queue **pp, *p; 180 struct datahandle_queue *p, *tmp;
181 unsigned long flags;
177 182
178 for (pp = &mp->ackqueue; *pp; pp = &(*pp)->next) { 183 spin_lock_irqsave(&mp->ackqlock, flags);
179 if ((*pp)->datahandle == datahandle) { 184 list_for_each_entry_safe(p, tmp, &mp->ackqueue, list) {
180 p = *pp; 185 if (p->datahandle == datahandle) {
181 *pp = (*pp)->next; 186 list_del(&p->list);
182 kfree(p); 187 kfree(p);
183 mp->nack--; 188 mp->nack--;
189 spin_unlock_irqrestore(&mp->ackqlock, flags);
184 return 0; 190 return 0;
185 } 191 }
186 } 192 }
193 spin_unlock_irqrestore(&mp->ackqlock, flags);
187 return -1; 194 return -1;
188} 195}
189 196
190static void capiminor_del_all_ack(struct capiminor *mp) 197static void capiminor_del_all_ack(struct capiminor *mp)
191{ 198{
192 struct datahandle_queue **pp, *p; 199 struct datahandle_queue *p, *tmp;
200 unsigned long flags;
193 201
194 pp = &mp->ackqueue; 202 spin_lock_irqsave(&mp->ackqlock, flags);
195 while (*pp) { 203 list_for_each_entry_safe(p, tmp, &mp->ackqueue, list) {
196 p = *pp; 204 list_del(&p->list);
197 *pp = (*pp)->next;
198 kfree(p); 205 kfree(p);
199 mp->nack--; 206 mp->nack--;
200 } 207 }
208 spin_unlock_irqrestore(&mp->ackqlock, flags);
201} 209}
202 210
203 211
@@ -220,6 +228,8 @@ static struct capiminor *capiminor_alloc(struct capi20_appl *ap, u32 ncci)
220 mp->ncci = ncci; 228 mp->ncci = ncci;
221 mp->msgid = 0; 229 mp->msgid = 0;
222 atomic_set(&mp->ttyopencount,0); 230 atomic_set(&mp->ttyopencount,0);
231 INIT_LIST_HEAD(&mp->ackqueue);
232 spin_lock_init(&mp->ackqlock);
223 233
224 skb_queue_head_init(&mp->inqueue); 234 skb_queue_head_init(&mp->inqueue);
225 skb_queue_head_init(&mp->outqueue); 235 skb_queue_head_init(&mp->outqueue);
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index f1a1f9a9b8..1f5ebe9ee7 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -592,7 +592,7 @@ static int put_address(char *st, u_char *p, int len)
592} /* put_address */ 592} /* put_address */
593 593
594/*************************************/ 594/*************************************/
595/* report a succesfull interrogation */ 595/* report a successful interrogation */
596/*************************************/ 596/*************************************/
597static int interrogate_success(isdn_ctrl *ic, struct call_struc *cs) 597static int interrogate_success(isdn_ctrl *ic, struct call_struc *cs)
598{ char *src = ic->parm.dss1_io.data; 598{ char *src = ic->parm.dss1_io.data;
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index eb41aba3dd..8a45715dd4 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -65,23 +65,22 @@ static struct usb_device_id gigaset_table [] = {
65 65
66MODULE_DEVICE_TABLE(usb, gigaset_table); 66MODULE_DEVICE_TABLE(usb, gigaset_table);
67 67
68/*======================= local function prototypes =============================*/ 68/*======================= local function prototypes ==========================*/
69 69
70/* This function is called if a new device is connected to the USB port. It 70/* function called if a new device belonging to this driver is connected */
71 * checks whether this new device belongs to this driver.
72 */
73static int gigaset_probe(struct usb_interface *interface, 71static int gigaset_probe(struct usb_interface *interface,
74 const struct usb_device_id *id); 72 const struct usb_device_id *id);
75 73
76/* Function will be called if the device is unplugged */ 74/* Function will be called if the device is unplugged */
77static void gigaset_disconnect(struct usb_interface *interface); 75static void gigaset_disconnect(struct usb_interface *interface);
78 76
79static void read_ctrl_callback(struct urb *, struct pt_regs *); 77static int atread_submit(struct cardstate *, int);
80static void stopurbs(struct bas_bc_state *); 78static void stopurbs(struct bas_bc_state *);
79static int req_submit(struct bc_state *, int, int, int);
81static int atwrite_submit(struct cardstate *, unsigned char *, int); 80static int atwrite_submit(struct cardstate *, unsigned char *, int);
82static int start_cbsend(struct cardstate *); 81static int start_cbsend(struct cardstate *);
83 82
84/*==============================================================================*/ 83/*============================================================================*/
85 84
86struct bas_cardstate { 85struct bas_cardstate {
87 struct usb_device *udev; /* USB device pointer */ 86 struct usb_device *udev; /* USB device pointer */
@@ -91,6 +90,7 @@ struct bas_cardstate {
91 struct urb *urb_ctrl; /* control pipe default URB */ 90 struct urb *urb_ctrl; /* control pipe default URB */
92 struct usb_ctrlrequest dr_ctrl; 91 struct usb_ctrlrequest dr_ctrl;
93 struct timer_list timer_ctrl; /* control request timeout */ 92 struct timer_list timer_ctrl; /* control request timeout */
93 int retry_ctrl;
94 94
95 struct timer_list timer_atrdy; /* AT command ready timeout */ 95 struct timer_list timer_atrdy; /* AT command ready timeout */
96 struct urb *urb_cmd_out; /* for sending AT commands */ 96 struct urb *urb_cmd_out; /* for sending AT commands */
@@ -307,6 +307,7 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
307 * hang up any existing connection because of an unrecoverable error 307 * hang up any existing connection because of an unrecoverable error
308 * This function may be called from any context and takes care of scheduling 308 * This function may be called from any context and takes care of scheduling
309 * the necessary actions for execution outside of interrupt context. 309 * the necessary actions for execution outside of interrupt context.
310 * cs->lock must not be held.
310 * argument: 311 * argument:
311 * B channel control structure 312 * B channel control structure
312 */ 313 */
@@ -325,14 +326,17 @@ static inline void error_hangup(struct bc_state *bcs)
325 326
326/* error_reset 327/* error_reset
327 * reset Gigaset device because of an unrecoverable error 328 * reset Gigaset device because of an unrecoverable error
328 * This function may be called from any context, and should take care of 329 * This function may be called from any context, and takes care of
329 * scheduling the necessary actions for execution outside of interrupt context. 330 * scheduling the necessary actions for execution outside of interrupt context.
330 * Right now, it just generates a kernel message calling for help. 331 * cs->lock must not be held.
331 * argument: 332 * argument:
332 * controller state structure 333 * controller state structure
333 */ 334 */
334static inline void error_reset(struct cardstate *cs) 335static inline void error_reset(struct cardstate *cs)
335{ 336{
337 /* close AT command channel to recover (ignore errors) */
338 req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT);
339
336 //FIXME try to recover without bothering the user 340 //FIXME try to recover without bothering the user
337 dev_err(cs->dev, 341 dev_err(cs->dev,
338 "unrecoverable error - please disconnect Gigaset base to reset\n"); 342 "unrecoverable error - please disconnect Gigaset base to reset\n");
@@ -403,14 +407,30 @@ static void cmd_in_timeout(unsigned long data)
403{ 407{
404 struct cardstate *cs = (struct cardstate *) data; 408 struct cardstate *cs = (struct cardstate *) data;
405 struct bas_cardstate *ucs = cs->hw.bas; 409 struct bas_cardstate *ucs = cs->hw.bas;
410 int rc;
406 411
407 if (!ucs->rcvbuf_size) { 412 if (!ucs->rcvbuf_size) {
408 gig_dbg(DEBUG_USBREQ, "%s: no receive in progress", __func__); 413 gig_dbg(DEBUG_USBREQ, "%s: no receive in progress", __func__);
409 return; 414 return;
410 } 415 }
411 416
412 dev_err(cs->dev, "timeout reading AT response\n"); 417 if (ucs->retry_cmd_in++ < BAS_RETRY) {
413 error_reset(cs); //FIXME retry? 418 dev_notice(cs->dev, "control read: timeout, retry %d\n",
419 ucs->retry_cmd_in);
420 rc = atread_submit(cs, BAS_TIMEOUT);
421 if (rc >= 0 || rc == -ENODEV)
422 /* resubmitted or disconnected */
423 /* - bypass regular exit block */
424 return;
425 } else {
426 dev_err(cs->dev,
427 "control read: timeout, giving up after %d tries\n",
428 ucs->retry_cmd_in);
429 }
430 kfree(ucs->rcvbuf);
431 ucs->rcvbuf = NULL;
432 ucs->rcvbuf_size = 0;
433 error_reset(cs);
414} 434}
415 435
416/* set/clear bits in base connection state, return previous state 436/* set/clear bits in base connection state, return previous state
@@ -428,6 +448,96 @@ inline static int update_basstate(struct bas_cardstate *ucs,
428 return state; 448 return state;
429} 449}
430 450
451/* read_ctrl_callback
452 * USB completion handler for control pipe input
453 * called by the USB subsystem in interrupt context
454 * parameter:
455 * urb USB request block
456 * urb->context = inbuf structure for controller state
457 */
458static void read_ctrl_callback(struct urb *urb, struct pt_regs *regs)
459{
460 struct inbuf_t *inbuf = urb->context;
461 struct cardstate *cs = inbuf->cs;
462 struct bas_cardstate *ucs = cs->hw.bas;
463 int have_data = 0;
464 unsigned numbytes;
465 int rc;
466
467 update_basstate(ucs, 0, BS_ATRDPEND);
468
469 if (!ucs->rcvbuf_size) {
470 dev_warn(cs->dev, "%s: no receive in progress\n", __func__);
471 return;
472 }
473
474 del_timer(&ucs->timer_cmd_in);
475
476 switch (urb->status) {
477 case 0: /* normal completion */
478 numbytes = urb->actual_length;
479 if (unlikely(numbytes != ucs->rcvbuf_size)) {
480 dev_warn(cs->dev,
481 "control read: received %d chars, expected %d\n",
482 numbytes, ucs->rcvbuf_size);
483 if (numbytes > ucs->rcvbuf_size)
484 numbytes = ucs->rcvbuf_size;
485 }
486
487 /* copy received bytes to inbuf */
488 have_data = gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes);
489
490 if (unlikely(numbytes < ucs->rcvbuf_size)) {
491 /* incomplete - resubmit for remaining bytes */
492 ucs->rcvbuf_size -= numbytes;
493 ucs->retry_cmd_in = 0;
494 rc = atread_submit(cs, BAS_TIMEOUT);
495 if (rc >= 0 || rc == -ENODEV)
496 /* resubmitted or disconnected */
497 /* - bypass regular exit block */
498 return;
499 error_reset(cs);
500 }
501 break;
502
503 case -ENOENT: /* cancelled */
504 case -ECONNRESET: /* cancelled (async) */
505 case -EINPROGRESS: /* pending */
506 case -ENODEV: /* device removed */
507 case -ESHUTDOWN: /* device shut down */
508 /* no action necessary */
509 gig_dbg(DEBUG_USBREQ, "%s: %s",
510 __func__, get_usb_statmsg(urb->status));
511 break;
512
513 default: /* severe trouble */
514 dev_warn(cs->dev, "control read: %s\n",
515 get_usb_statmsg(urb->status));
516 if (ucs->retry_cmd_in++ < BAS_RETRY) {
517 dev_notice(cs->dev, "control read: retry %d\n",
518 ucs->retry_cmd_in);
519 rc = atread_submit(cs, BAS_TIMEOUT);
520 if (rc >= 0 || rc == -ENODEV)
521 /* resubmitted or disconnected */
522 /* - bypass regular exit block */
523 return;
524 } else {
525 dev_err(cs->dev,
526 "control read: giving up after %d tries\n",
527 ucs->retry_cmd_in);
528 }
529 error_reset(cs);
530 }
531
532 kfree(ucs->rcvbuf);
533 ucs->rcvbuf = NULL;
534 ucs->rcvbuf_size = 0;
535 if (have_data) {
536 gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
537 gigaset_schedule_event(cs);
538 }
539}
540
431/* atread_submit 541/* atread_submit
432 * submit an HD_READ_ATMESSAGE command URB and optionally start a timeout 542 * submit an HD_READ_ATMESSAGE command URB and optionally start a timeout
433 * parameters: 543 * parameters:
@@ -466,7 +576,7 @@ static int atread_submit(struct cardstate *cs, int timeout)
466 if ((ret = usb_submit_urb(ucs->urb_cmd_in, SLAB_ATOMIC)) != 0) { 576 if ((ret = usb_submit_urb(ucs->urb_cmd_in, SLAB_ATOMIC)) != 0) {
467 update_basstate(ucs, 0, BS_ATRDPEND); 577 update_basstate(ucs, 0, BS_ATRDPEND);
468 dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n", 578 dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n",
469 get_usb_statmsg(ret)); 579 get_usb_rcmsg(ret));
470 return ret; 580 return ret;
471 } 581 }
472 582
@@ -611,9 +721,12 @@ static void read_int_callback(struct urb *urb, struct pt_regs *regs)
611 kfree(ucs->rcvbuf); 721 kfree(ucs->rcvbuf);
612 ucs->rcvbuf = NULL; 722 ucs->rcvbuf = NULL;
613 ucs->rcvbuf_size = 0; 723 ucs->rcvbuf_size = 0;
614 if (rc != -ENODEV) 724 if (rc != -ENODEV) {
615 //FIXME corrective action? 725 //FIXME corrective action?
726 spin_unlock_irqrestore(&cs->lock, flags);
616 error_reset(cs); 727 error_reset(cs);
728 break;
729 }
617 } 730 }
618 spin_unlock_irqrestore(&cs->lock, flags); 731 spin_unlock_irqrestore(&cs->lock, flags);
619 break; 732 break;
@@ -643,97 +756,6 @@ resubmit:
643 } 756 }
644} 757}
645 758
646/* read_ctrl_callback
647 * USB completion handler for control pipe input
648 * called by the USB subsystem in interrupt context
649 * parameter:
650 * urb USB request block
651 * urb->context = inbuf structure for controller state
652 */
653static void read_ctrl_callback(struct urb *urb, struct pt_regs *regs)
654{
655 struct inbuf_t *inbuf = urb->context;
656 struct cardstate *cs = inbuf->cs;
657 struct bas_cardstate *ucs = cs->hw.bas;
658 int have_data = 0;
659 unsigned numbytes;
660 int rc;
661
662 update_basstate(ucs, 0, BS_ATRDPEND);
663
664 if (!ucs->rcvbuf_size) {
665 dev_warn(cs->dev, "%s: no receive in progress\n", __func__);
666 return;
667 }
668
669 del_timer(&ucs->timer_cmd_in);
670
671 switch (urb->status) {
672 case 0: /* normal completion */
673 numbytes = urb->actual_length;
674 if (unlikely(numbytes == 0)) {
675 dev_warn(cs->dev,
676 "control read: empty block received\n");
677 goto retry;
678 }
679 if (unlikely(numbytes != ucs->rcvbuf_size)) {
680 dev_warn(cs->dev,
681 "control read: received %d chars, expected %d\n",
682 numbytes, ucs->rcvbuf_size);
683 if (numbytes > ucs->rcvbuf_size)
684 numbytes = ucs->rcvbuf_size;
685 }
686
687 /* copy received bytes to inbuf */
688 have_data = gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes);
689
690 if (unlikely(numbytes < ucs->rcvbuf_size)) {
691 /* incomplete - resubmit for remaining bytes */
692 ucs->rcvbuf_size -= numbytes;
693 ucs->retry_cmd_in = 0;
694 goto retry;
695 }
696 break;
697
698 case -ENOENT: /* cancelled */
699 case -ECONNRESET: /* cancelled (async) */
700 case -EINPROGRESS: /* pending */
701 case -ENODEV: /* device removed */
702 case -ESHUTDOWN: /* device shut down */
703 /* no action necessary */
704 gig_dbg(DEBUG_USBREQ, "%s: %s",
705 __func__, get_usb_statmsg(urb->status));
706 break;
707
708 default: /* severe trouble */
709 dev_warn(cs->dev, "control read: %s\n",
710 get_usb_statmsg(urb->status));
711 retry:
712 if (ucs->retry_cmd_in++ < BAS_RETRY) {
713 dev_notice(cs->dev, "control read: retry %d\n",
714 ucs->retry_cmd_in);
715 rc = atread_submit(cs, BAS_TIMEOUT);
716 if (rc >= 0 || rc == -ENODEV)
717 /* resubmitted or disconnected */
718 /* - bypass regular exit block */
719 return;
720 } else {
721 dev_err(cs->dev,
722 "control read: giving up after %d tries\n",
723 ucs->retry_cmd_in);
724 }
725 error_reset(cs);
726 }
727
728 kfree(ucs->rcvbuf);
729 ucs->rcvbuf = NULL;
730 ucs->rcvbuf_size = 0;
731 if (have_data) {
732 gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
733 gigaset_schedule_event(cs);
734 }
735}
736
737/* read_iso_callback 759/* read_iso_callback
738 * USB completion handler for B channel isochronous input 760 * USB completion handler for B channel isochronous input
739 * called by the USB subsystem in interrupt context 761 * called by the USB subsystem in interrupt context
@@ -1378,6 +1400,7 @@ static void req_timeout(unsigned long data)
1378 case HD_CLOSE_B1CHANNEL: 1400 case HD_CLOSE_B1CHANNEL:
1379 dev_err(bcs->cs->dev, "timeout closing channel %d\n", 1401 dev_err(bcs->cs->dev, "timeout closing channel %d\n",
1380 bcs->channel + 1); 1402 bcs->channel + 1);
1403 error_reset(bcs->cs);
1381 break; 1404 break;
1382 1405
1383 default: 1406 default:
@@ -1396,22 +1419,61 @@ static void req_timeout(unsigned long data)
1396static void write_ctrl_callback(struct urb *urb, struct pt_regs *regs) 1419static void write_ctrl_callback(struct urb *urb, struct pt_regs *regs)
1397{ 1420{
1398 struct bas_cardstate *ucs = urb->context; 1421 struct bas_cardstate *ucs = urb->context;
1422 int rc;
1399 unsigned long flags; 1423 unsigned long flags;
1400 1424
1401 spin_lock_irqsave(&ucs->lock, flags); 1425 /* check status */
1402 if (urb->status && ucs->pending) { 1426 switch (urb->status) {
1403 dev_err(&ucs->interface->dev, 1427 case 0: /* normal completion */
1404 "control request 0x%02x failed: %s\n", 1428 spin_lock_irqsave(&ucs->lock, flags);
1405 ucs->pending, get_usb_statmsg(urb->status)); 1429 switch (ucs->pending) {
1406 del_timer(&ucs->timer_ctrl); 1430 case HD_DEVICE_INIT_ACK: /* no reply expected */
1407 ucs->pending = 0; 1431 del_timer(&ucs->timer_ctrl);
1408 } 1432 ucs->pending = 0;
1409 /* individual handling of specific request types */ 1433 break;
1410 switch (ucs->pending) { 1434 }
1411 case HD_DEVICE_INIT_ACK: /* no reply expected */ 1435 spin_unlock_irqrestore(&ucs->lock, flags);
1412 ucs->pending = 0; 1436 return;
1437
1438 case -ENOENT: /* cancelled */
1439 case -ECONNRESET: /* cancelled (async) */
1440 case -EINPROGRESS: /* pending */
1441 case -ENODEV: /* device removed */
1442 case -ESHUTDOWN: /* device shut down */
1443 /* ignore silently */
1444 gig_dbg(DEBUG_USBREQ, "%s: %s",
1445 __func__, get_usb_statmsg(urb->status));
1413 break; 1446 break;
1447
1448 default: /* any failure */
1449 if (++ucs->retry_ctrl > BAS_RETRY) {
1450 dev_err(&ucs->interface->dev,
1451 "control request 0x%02x failed: %s\n",
1452 ucs->dr_ctrl.bRequest,
1453 get_usb_statmsg(urb->status));
1454 break; /* give up */
1455 }
1456 dev_notice(&ucs->interface->dev,
1457 "control request 0x%02x: %s, retry %d\n",
1458 ucs->dr_ctrl.bRequest, get_usb_statmsg(urb->status),
1459 ucs->retry_ctrl);
1460 /* urb->dev is clobbered by USB subsystem */
1461 urb->dev = ucs->udev;
1462 rc = usb_submit_urb(urb, SLAB_ATOMIC);
1463 if (unlikely(rc)) {
1464 dev_err(&ucs->interface->dev,
1465 "could not resubmit request 0x%02x: %s\n",
1466 ucs->dr_ctrl.bRequest, get_usb_rcmsg(rc));
1467 break;
1468 }
1469 /* resubmitted */
1470 return;
1414 } 1471 }
1472
1473 /* failed, clear pending request */
1474 spin_lock_irqsave(&ucs->lock, flags);
1475 del_timer(&ucs->timer_ctrl);
1476 ucs->pending = 0;
1415 spin_unlock_irqrestore(&ucs->lock, flags); 1477 spin_unlock_irqrestore(&ucs->lock, flags);
1416} 1478}
1417 1479
@@ -1455,9 +1517,11 @@ static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
1455 usb_sndctrlpipe(ucs->udev, 0), 1517 usb_sndctrlpipe(ucs->udev, 0),
1456 (unsigned char*) &ucs->dr_ctrl, NULL, 0, 1518 (unsigned char*) &ucs->dr_ctrl, NULL, 0,
1457 write_ctrl_callback, ucs); 1519 write_ctrl_callback, ucs);
1458 if ((ret = usb_submit_urb(ucs->urb_ctrl, SLAB_ATOMIC)) != 0) { 1520 ucs->retry_ctrl = 0;
1521 ret = usb_submit_urb(ucs->urb_ctrl, SLAB_ATOMIC);
1522 if (unlikely(ret)) {
1459 dev_err(bcs->cs->dev, "could not submit request 0x%02x: %s\n", 1523 dev_err(bcs->cs->dev, "could not submit request 0x%02x: %s\n",
1460 req, get_usb_statmsg(ret)); 1524 req, get_usb_rcmsg(ret));
1461 spin_unlock_irqrestore(&ucs->lock, flags); 1525 spin_unlock_irqrestore(&ucs->lock, flags);
1462 return ret; 1526 return ret;
1463 } 1527 }
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index acb7e26567..2a56bf33a6 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -981,7 +981,7 @@ exit:
981EXPORT_SYMBOL_GPL(gigaset_stop); 981EXPORT_SYMBOL_GPL(gigaset_stop);
982 982
983static LIST_HEAD(drivers); 983static LIST_HEAD(drivers);
984static spinlock_t driver_lock = SPIN_LOCK_UNLOCKED; 984static DEFINE_SPINLOCK(driver_lock);
985 985
986struct cardstate *gigaset_get_cs_by_id(int id) 986struct cardstate *gigaset_get_cs_by_id(int id)
987{ 987{
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index 18e05c09b7..44f02dbd11 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -1262,7 +1262,8 @@ static void do_action(int action, struct cardstate *cs,
1262 break; 1262 break;
1263 case ACT_HUPMODEM: 1263 case ACT_HUPMODEM:
1264 /* send "+++" (hangup in unimodem mode) */ 1264 /* send "+++" (hangup in unimodem mode) */
1265 cs->ops->write_cmd(cs, "+++", 3, NULL); 1265 if (cs->connected)
1266 cs->ops->write_cmd(cs, "+++", 3, NULL);
1266 break; 1267 break;
1267 case ACT_RING: 1268 case ACT_RING:
1268 /* get fresh AT state structure for new CID */ 1269 /* get fresh AT state structure for new CID */
@@ -1294,7 +1295,6 @@ static void do_action(int action, struct cardstate *cs,
1294 break; 1295 break;
1295 case ACT_ICALL: 1296 case ACT_ICALL:
1296 handle_icall(cs, bcs, p_at_state); 1297 handle_icall(cs, bcs, p_at_state);
1297 at_state = *p_at_state;
1298 break; 1298 break;
1299 case ACT_FAILSDOWN: 1299 case ACT_FAILSDOWN:
1300 dev_warn(cs->dev, "Could not shut down the device.\n"); 1300 dev_warn(cs->dev, "Could not shut down the device.\n");
@@ -1334,10 +1334,8 @@ static void do_action(int action, struct cardstate *cs,
1334 */ 1334 */
1335 at_state->pending_commands |= PC_DLE0; 1335 at_state->pending_commands |= PC_DLE0;
1336 atomic_set(&cs->commands_pending, 1); 1336 atomic_set(&cs->commands_pending, 1);
1337 } else { 1337 } else
1338 disconnect(p_at_state); 1338 disconnect(p_at_state);
1339 at_state = *p_at_state;
1340 }
1341 break; 1339 break;
1342 case ACT_FAKEDLE0: 1340 case ACT_FAKEDLE0:
1343 at_state->int_var[VAR_ZDLE] = 0; 1341 at_state->int_var[VAR_ZDLE] = 0;
@@ -1354,10 +1352,8 @@ static void do_action(int action, struct cardstate *cs,
1354 at_state->cid = -1; 1352 at_state->cid = -1;
1355 if (bcs && cs->onechannel) 1353 if (bcs && cs->onechannel)
1356 at_state->pending_commands |= PC_DLE0; 1354 at_state->pending_commands |= PC_DLE0;
1357 else { 1355 else
1358 disconnect(p_at_state); 1356 disconnect(p_at_state);
1359 at_state = *p_at_state;
1360 }
1361 schedule_init(cs, MS_RECOVER); 1357 schedule_init(cs, MS_RECOVER);
1362 break; 1358 break;
1363 case ACT_FAILDLE0: 1359 case ACT_FAILDLE0:
@@ -1410,7 +1406,6 @@ static void do_action(int action, struct cardstate *cs,
1410 1406
1411 case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL processing */ 1407 case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL processing */
1412 disconnect(p_at_state); 1408 disconnect(p_at_state);
1413 at_state = *p_at_state;
1414 break; 1409 break;
1415 1410
1416 case ACT_ABORTDIAL: /* error/timeout during dial preparation */ 1411 case ACT_ABORTDIAL: /* error/timeout during dial preparation */
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index abecabf8c2..aacbf0d14b 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -1402,12 +1402,12 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
1402 } 1402 }
1403 /* No, locate it in the table */ 1403 /* No, locate it in the table */
1404 if (cset == 0) { 1404 if (cset == 0) {
1405 for (i = 0; i < IESIZE; i++) 1405 for (i = 0; i < IESIZE_NI1; i++)
1406 if (*buf == ielist_ni1[i].nr) 1406 if (*buf == ielist_ni1[i].nr)
1407 break; 1407 break;
1408 1408
1409 /* When not found, give appropriate msg */ 1409 /* When not found, give appropriate msg */
1410 if (i != IESIZE) { 1410 if (i != IESIZE_NI1) {
1411 dp += sprintf(dp, " %s\n", ielist_ni1[i].descr); 1411 dp += sprintf(dp, " %s\n", ielist_ni1[i].descr);
1412 dp += ielist_ni1[i].f(dp, buf); 1412 dp += ielist_ni1[i].f(dp, buf);
1413 } else 1413 } else
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 81accdf351..b26e509ec7 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -933,7 +933,7 @@ isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
933 count_put = count_pull; 933 count_put = count_pull;
934 if(count_put > 1) 934 if(count_put > 1)
935 tty_insert_flip_string(tty, skb->data, count_put - 1); 935 tty_insert_flip_string(tty, skb->data, count_put - 1);
936 last = skb->data[count_put] - 1; 936 last = skb->data[count_put - 1];
937 len -= count_put; 937 len -= count_put;
938#ifdef CONFIG_ISDN_AUDIO 938#ifdef CONFIG_ISDN_AUDIO
939 } 939 }
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 2ac90242d2..433389daed 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -82,7 +82,7 @@ isdn_tty_try_read(modem_info * info, struct sk_buff *skb)
82 int l = skb->len; 82 int l = skb->len;
83 unsigned char *dp = skb->data; 83 unsigned char *dp = skb->data;
84 while (--l) { 84 while (--l) {
85 if (*skb->data == DLE) 85 if (*dp == DLE)
86 tty_insert_flip_char(tty, DLE, 0); 86 tty_insert_flip_char(tty, DLE, 0);
87 tty_insert_flip_char(tty, *dp++, 0); 87 tty_insert_flip_char(tty, *dp++, 0);
88 } 88 }
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index fe6541326c..9b015f9af3 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -18,7 +18,7 @@
18#include <linux/leds.h> 18#include <linux/leds.h>
19#include "leds.h" 19#include "leds.h"
20 20
21rwlock_t leds_list_lock = RW_LOCK_UNLOCKED; 21DEFINE_RWLOCK(leds_list_lock);
22LIST_HEAD(leds_list); 22LIST_HEAD(leds_list);
23 23
24EXPORT_SYMBOL_GPL(leds_list); 24EXPORT_SYMBOL_GPL(leds_list);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 5e2cd8be11..1b1ce65239 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -26,7 +26,7 @@
26/* 26/*
27 * Nests outside led_cdev->trigger_lock 27 * Nests outside led_cdev->trigger_lock
28 */ 28 */
29static rwlock_t triggers_list_lock = RW_LOCK_UNLOCKED; 29static DEFINE_RWLOCK(triggers_list_lock);
30static LIST_HEAD(trigger_list); 30static LIST_HEAD(trigger_list);
31 31
32ssize_t led_trigger_store(struct class_device *dev, const char *buf, 32ssize_t led_trigger_store(struct class_device *dev, const char *buf,
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index 8972e53d2d..45a268f804 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_MAC_EMUMOUSEBTN) += mac_hid.o
11obj-$(CONFIG_INPUT_ADBHID) += adbhid.o 11obj-$(CONFIG_INPUT_ADBHID) += adbhid.o
12obj-$(CONFIG_ANSLCD) += ans-lcd.o 12obj-$(CONFIG_ANSLCD) += ans-lcd.o
13 13
14obj-$(CONFIG_ADB_PMU) += via-pmu.o 14obj-$(CONFIG_ADB_PMU) += via-pmu.o via-pmu-event.o
15obj-$(CONFIG_PMAC_BACKLIGHT) += via-pmu-backlight.o 15obj-$(CONFIG_PMAC_BACKLIGHT) += via-pmu-backlight.o
16obj-$(CONFIG_ADB_CUDA) += via-cuda.o 16obj-$(CONFIG_ADB_CUDA) += via-cuda.o
17obj-$(CONFIG_PMAC_APM_EMU) += apm_emu.o 17obj-$(CONFIG_PMAC_APM_EMU) += apm_emu.o
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index c26e1236b2..cbfbbe2b15 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -179,7 +179,7 @@ u8 adb_to_linux_keycodes[128] = {
179 /* 0x65 */ KEY_F9, /* 67 */ 179 /* 0x65 */ KEY_F9, /* 67 */
180 /* 0x66 */ KEY_HANJA, /* 123 */ 180 /* 0x66 */ KEY_HANJA, /* 123 */
181 /* 0x67 */ KEY_F11, /* 87 */ 181 /* 0x67 */ KEY_F11, /* 87 */
182 /* 0x68 */ KEY_HANGUEL, /* 122 */ 182 /* 0x68 */ KEY_HANGEUL, /* 122 */
183 /* 0x69 */ KEY_SYSRQ, /* 99 */ 183 /* 0x69 */ KEY_SYSRQ, /* 99 */
184 /* 0x6a */ 0, 184 /* 0x6a */ 0,
185 /* 0x6b */ KEY_SCROLLLOCK, /* 70 */ 185 /* 0x6b */ KEY_SCROLLLOCK, /* 70 */
diff --git a/drivers/macintosh/via-pmu-event.c b/drivers/macintosh/via-pmu-event.c
new file mode 100644
index 0000000000..25cd565423
--- /dev/null
+++ b/drivers/macintosh/via-pmu-event.c
@@ -0,0 +1,80 @@
1/*
2 * via-pmu event device for reporting some events that come through the PMU
3 *
4 * Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 *
21 */
22
23#include <linux/input.h>
24#include <linux/adb.h>
25#include <linux/pmu.h>
26#include "via-pmu-event.h"
27
28static struct input_dev *pmu_input_dev;
29
30static int __init via_pmu_event_init(void)
31{
32 int err;
33
34 /* do other models report button/lid status? */
35 if (pmu_get_model() != PMU_KEYLARGO_BASED)
36 return -ENODEV;
37
38 pmu_input_dev = input_allocate_device();
39 if (!pmu_input_dev)
40 return -ENOMEM;
41
42 pmu_input_dev->name = "PMU";
43 pmu_input_dev->id.bustype = BUS_HOST;
44 pmu_input_dev->id.vendor = 0x0001;
45 pmu_input_dev->id.product = 0x0001;
46 pmu_input_dev->id.version = 0x0100;
47
48 set_bit(EV_KEY, pmu_input_dev->evbit);
49 set_bit(EV_SW, pmu_input_dev->evbit);
50 set_bit(KEY_POWER, pmu_input_dev->keybit);
51 set_bit(SW_LID, pmu_input_dev->swbit);
52
53 err = input_register_device(pmu_input_dev);
54 if (err)
55 input_free_device(pmu_input_dev);
56 return err;
57}
58
59void via_pmu_event(int key, int down)
60{
61
62 if (unlikely(!pmu_input_dev))
63 return;
64
65 switch (key) {
66 case PMU_EVT_POWER:
67 input_report_key(pmu_input_dev, KEY_POWER, down);
68 break;
69 case PMU_EVT_LID:
70 input_report_switch(pmu_input_dev, SW_LID, down);
71 break;
72 default:
73 /* no such key handled */
74 return;
75 }
76
77 input_sync(pmu_input_dev);
78}
79
80late_initcall(via_pmu_event_init);
diff --git a/drivers/macintosh/via-pmu-event.h b/drivers/macintosh/via-pmu-event.h
new file mode 100644
index 0000000000..72c54de408
--- /dev/null
+++ b/drivers/macintosh/via-pmu-event.h
@@ -0,0 +1,8 @@
1#ifndef __VIA_PMU_EVENT_H
2#define __VIA_PMU_EVENT_H
3
4#define PMU_EVT_POWER 0
5#define PMU_EVT_LID 1
6extern void via_pmu_event(int key, int down);
7
8#endif /* __VIA_PMU_EVENT_H */
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 2a355ae595..1ab4f16c08 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -69,6 +69,8 @@
69#include <asm/open_pic.h> 69#include <asm/open_pic.h>
70#endif 70#endif
71 71
72#include "via-pmu-event.h"
73
72/* Some compile options */ 74/* Some compile options */
73#undef SUSPEND_USES_PMU 75#undef SUSPEND_USES_PMU
74#define DEBUG_SLEEP 76#define DEBUG_SLEEP
@@ -1427,6 +1429,12 @@ next:
1427 if (pmu_battery_count) 1429 if (pmu_battery_count)
1428 query_battery_state(); 1430 query_battery_state();
1429 pmu_pass_intr(data, len); 1431 pmu_pass_intr(data, len);
1432 /* len == 6 is probably a bad check. But how do I
1433 * know what PMU versions send what events here? */
1434 if (len == 6) {
1435 via_pmu_event(PMU_EVT_POWER, !!(data[1]&8));
1436 via_pmu_event(PMU_EVT_LID, data[1]&1);
1437 }
1430 } else { 1438 } else {
1431 pmu_pass_intr(data, len); 1439 pmu_pass_intr(data, len);
1432 } 1440 }
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index ac25a48362..bf869ed03e 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -90,7 +90,7 @@ config MD_RAID10
90 depends on BLK_DEV_MD && EXPERIMENTAL 90 depends on BLK_DEV_MD && EXPERIMENTAL
91 ---help--- 91 ---help---
92 RAID-10 provides a combination of striping (RAID-0) and 92 RAID-10 provides a combination of striping (RAID-0) and
93 mirroring (RAID-1) with easier configuration and more flexable 93 mirroring (RAID-1) with easier configuration and more flexible
94 layout. 94 layout.
95 Unlike RAID-0, but like RAID-1, RAID-10 requires all devices to 95 Unlike RAID-0, but like RAID-1, RAID-10 requires all devices to
96 be the same size (or at least, only as much as the smallest device 96 be the same size (or at least, only as much as the smallest device
@@ -104,8 +104,8 @@ config MD_RAID10
104 104
105 If unsure, say Y. 105 If unsure, say Y.
106 106
107config MD_RAID5 107config MD_RAID456
108 tristate "RAID-4/RAID-5 mode" 108 tristate "RAID-4/RAID-5/RAID-6 mode"
109 depends on BLK_DEV_MD 109 depends on BLK_DEV_MD
110 ---help--- 110 ---help---
111 A RAID-5 set of N drives with a capacity of C MB per drive provides 111 A RAID-5 set of N drives with a capacity of C MB per drive provides
@@ -116,20 +116,28 @@ config MD_RAID5
116 while a RAID-5 set distributes the parity across the drives in one 116 while a RAID-5 set distributes the parity across the drives in one
117 of the available parity distribution methods. 117 of the available parity distribution methods.
118 118
119 A RAID-6 set of N drives with a capacity of C MB per drive
120 provides the capacity of C * (N - 2) MB, and protects
121 against a failure of any two drives. For a given sector
122 (row) number, (N - 2) drives contain data sectors, and two
123 drives contains two independent redundancy syndromes. Like
124 RAID-5, RAID-6 distributes the syndromes across the drives
125 in one of the available parity distribution methods.
126
119 Information about Software RAID on Linux is contained in the 127 Information about Software RAID on Linux is contained in the
120 Software-RAID mini-HOWTO, available from 128 Software-RAID mini-HOWTO, available from
121 <http://www.tldp.org/docs.html#howto>. There you will also 129 <http://www.tldp.org/docs.html#howto>. There you will also
122 learn where to get the supporting user space utilities raidtools. 130 learn where to get the supporting user space utilities raidtools.
123 131
124 If you want to use such a RAID-4/RAID-5 set, say Y. To 132 If you want to use such a RAID-4/RAID-5/RAID-6 set, say Y. To
125 compile this code as a module, choose M here: the module 133 compile this code as a module, choose M here: the module
126 will be called raid5. 134 will be called raid456.
127 135
128 If unsure, say Y. 136 If unsure, say Y.
129 137
130config MD_RAID5_RESHAPE 138config MD_RAID5_RESHAPE
131 bool "Support adding drives to a raid-5 array (experimental)" 139 bool "Support adding drives to a raid-5 array (experimental)"
132 depends on MD_RAID5 && EXPERIMENTAL 140 depends on MD_RAID456 && EXPERIMENTAL
133 ---help--- 141 ---help---
134 A RAID-5 set can be expanded by adding extra drives. This 142 A RAID-5 set can be expanded by adding extra drives. This
135 requires "restriping" the array which means (almost) every 143 requires "restriping" the array which means (almost) every
@@ -139,7 +147,7 @@ config MD_RAID5_RESHAPE
139 is online. However it is still EXPERIMENTAL code. It should 147 is online. However it is still EXPERIMENTAL code. It should
140 work, but please be sure that you have backups. 148 work, but please be sure that you have backups.
141 149
142 You will need mdadm verion 2.4.1 or later to use this 150 You will need mdadm version 2.4.1 or later to use this
143 feature safely. During the early stage of reshape there is 151 feature safely. During the early stage of reshape there is
144 a critical section where live data is being over-written. A 152 a critical section where live data is being over-written. A
145 crash during this time needs extra care for recovery. The 153 crash during this time needs extra care for recovery. The
@@ -154,28 +162,6 @@ config MD_RAID5_RESHAPE
154 There should be enough spares already present to make the new 162 There should be enough spares already present to make the new
155 array workable. 163 array workable.
156 164
157config MD_RAID6
158 tristate "RAID-6 mode"
159 depends on BLK_DEV_MD
160 ---help---
161 A RAID-6 set of N drives with a capacity of C MB per drive
162 provides the capacity of C * (N - 2) MB, and protects
163 against a failure of any two drives. For a given sector
164 (row) number, (N - 2) drives contain data sectors, and two
165 drives contains two independent redundancy syndromes. Like
166 RAID-5, RAID-6 distributes the syndromes across the drives
167 in one of the available parity distribution methods.
168
169 RAID-6 requires mdadm-1.5.0 or later, available at:
170
171 ftp://ftp.kernel.org/pub/linux/utils/raid/mdadm/
172
173 If you want to use such a RAID-6 set, say Y. To compile
174 this code as a module, choose M here: the module will be
175 called raid6.
176
177 If unsure, say Y.
178
179config MD_MULTIPATH 165config MD_MULTIPATH
180 tristate "Multipath I/O support" 166 tristate "Multipath I/O support"
181 depends on BLK_DEV_MD 167 depends on BLK_DEV_MD
@@ -235,7 +221,7 @@ config DM_SNAPSHOT
235 tristate "Snapshot target (EXPERIMENTAL)" 221 tristate "Snapshot target (EXPERIMENTAL)"
236 depends on BLK_DEV_DM && EXPERIMENTAL 222 depends on BLK_DEV_DM && EXPERIMENTAL
237 ---help--- 223 ---help---
238 Allow volume managers to take writeable snapshots of a device. 224 Allow volume managers to take writable snapshots of a device.
239 225
240config DM_MIRROR 226config DM_MIRROR
241 tristate "Mirror target (EXPERIMENTAL)" 227 tristate "Mirror target (EXPERIMENTAL)"
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index d3efedf6a6..34957a68d9 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -8,7 +8,7 @@ dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o
8dm-snapshot-objs := dm-snap.o dm-exception-store.o 8dm-snapshot-objs := dm-snap.o dm-exception-store.o
9dm-mirror-objs := dm-log.o dm-raid1.o 9dm-mirror-objs := dm-log.o dm-raid1.o
10md-mod-objs := md.o bitmap.o 10md-mod-objs := md.o bitmap.o
11raid6-objs := raid6main.o raid6algos.o raid6recov.o raid6tables.o \ 11raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \
12 raid6int1.o raid6int2.o raid6int4.o \ 12 raid6int1.o raid6int2.o raid6int4.o \
13 raid6int8.o raid6int16.o raid6int32.o \ 13 raid6int8.o raid6int16.o raid6int32.o \
14 raid6altivec1.o raid6altivec2.o raid6altivec4.o \ 14 raid6altivec1.o raid6altivec2.o raid6altivec4.o \
@@ -25,8 +25,7 @@ obj-$(CONFIG_MD_LINEAR) += linear.o
25obj-$(CONFIG_MD_RAID0) += raid0.o 25obj-$(CONFIG_MD_RAID0) += raid0.o
26obj-$(CONFIG_MD_RAID1) += raid1.o 26obj-$(CONFIG_MD_RAID1) += raid1.o
27obj-$(CONFIG_MD_RAID10) += raid10.o 27obj-$(CONFIG_MD_RAID10) += raid10.o
28obj-$(CONFIG_MD_RAID5) += raid5.o xor.o 28obj-$(CONFIG_MD_RAID456) += raid456.o xor.o
29obj-$(CONFIG_MD_RAID6) += raid6.o xor.o
30obj-$(CONFIG_MD_MULTIPATH) += multipath.o 29obj-$(CONFIG_MD_MULTIPATH) += multipath.o
31obj-$(CONFIG_MD_FAULTY) += faulty.o 30obj-$(CONFIG_MD_FAULTY) += faulty.o
32obj-$(CONFIG_BLK_DEV_MD) += md-mod.o 31obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index f8ffaee20f..ebbd2d8562 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -7,7 +7,6 @@
7 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.: 7 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
8 * - added disk storage for bitmap 8 * - added disk storage for bitmap
9 * - changes to allow various bitmap chunk sizes 9 * - changes to allow various bitmap chunk sizes
10 * - added bitmap daemon (to asynchronously clear bitmap bits from disk)
11 */ 10 */
12 11
13/* 12/*
@@ -15,9 +14,6 @@
15 * 14 *
16 * flush after percent set rather than just time based. (maybe both). 15 * flush after percent set rather than just time based. (maybe both).
17 * wait if count gets too high, wake when it drops to half. 16 * wait if count gets too high, wake when it drops to half.
18 * allow bitmap to be mirrored with superblock (before or after...)
19 * allow hot-add to re-instate a current device.
20 * allow hot-add of bitmap after quiessing device
21 */ 17 */
22 18
23#include <linux/module.h> 19#include <linux/module.h>
@@ -73,24 +69,6 @@ static inline char * bmname(struct bitmap *bitmap)
73 69
74 70
75/* 71/*
76 * test if the bitmap is active
77 */
78int bitmap_active(struct bitmap *bitmap)
79{
80 unsigned long flags;
81 int res = 0;
82
83 if (!bitmap)
84 return res;
85 spin_lock_irqsave(&bitmap->lock, flags);
86 res = bitmap->flags & BITMAP_ACTIVE;
87 spin_unlock_irqrestore(&bitmap->lock, flags);
88 return res;
89}
90
91#define WRITE_POOL_SIZE 256
92
93/*
94 * just a placeholder - calls kmalloc for bitmap pages 72 * just a placeholder - calls kmalloc for bitmap pages
95 */ 73 */
96static unsigned char *bitmap_alloc_page(struct bitmap *bitmap) 74static unsigned char *bitmap_alloc_page(struct bitmap *bitmap)
@@ -269,6 +247,8 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde
269 247
270 if (sync_page_io(rdev->bdev, target, PAGE_SIZE, page, READ)) { 248 if (sync_page_io(rdev->bdev, target, PAGE_SIZE, page, READ)) {
271 page->index = index; 249 page->index = index;
250 attach_page_buffers(page, NULL); /* so that free_buffer will
251 * quietly no-op */
272 return page; 252 return page;
273 } 253 }
274 } 254 }
@@ -300,77 +280,132 @@ static int write_sb_page(mddev_t *mddev, long offset, struct page *page, int wai
300 */ 280 */
301static int write_page(struct bitmap *bitmap, struct page *page, int wait) 281static int write_page(struct bitmap *bitmap, struct page *page, int wait)
302{ 282{
303 int ret = -ENOMEM; 283 struct buffer_head *bh;
304 284
305 if (bitmap->file == NULL) 285 if (bitmap->file == NULL)
306 return write_sb_page(bitmap->mddev, bitmap->offset, page, wait); 286 return write_sb_page(bitmap->mddev, bitmap->offset, page, wait);
307 287
308 flush_dcache_page(page); /* make sure visible to anyone reading the file */ 288 bh = page_buffers(page);
309 289
310 if (wait) 290 while (bh && bh->b_blocknr) {
311 lock_page(page); 291 atomic_inc(&bitmap->pending_writes);
312 else { 292 set_buffer_locked(bh);
313 if (TestSetPageLocked(page)) 293 set_buffer_mapped(bh);
314 return -EAGAIN; /* already locked */ 294 submit_bh(WRITE, bh);
315 if (PageWriteback(page)) { 295 bh = bh->b_this_page;
316 unlock_page(page);
317 return -EAGAIN;
318 }
319 } 296 }
320 297
321 ret = page->mapping->a_ops->prepare_write(bitmap->file, page, 0, PAGE_SIZE); 298 if (wait) {
322 if (!ret) 299 wait_event(bitmap->write_wait,
323 ret = page->mapping->a_ops->commit_write(bitmap->file, page, 0, 300 atomic_read(&bitmap->pending_writes)==0);
324 PAGE_SIZE); 301 return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
325 if (ret) {
326 unlock_page(page);
327 return ret;
328 } 302 }
303 return 0;
304}
329 305
330 set_page_dirty(page); /* force it to be written out */ 306static void end_bitmap_write(struct buffer_head *bh, int uptodate)
331 307{
332 if (!wait) { 308 struct bitmap *bitmap = bh->b_private;
333 /* add to list to be waited for by daemon */ 309 unsigned long flags;
334 struct page_list *item = mempool_alloc(bitmap->write_pool, GFP_NOIO); 310
335 item->page = page; 311 if (!uptodate) {
336 get_page(page); 312 spin_lock_irqsave(&bitmap->lock, flags);
337 spin_lock(&bitmap->write_lock); 313 bitmap->flags |= BITMAP_WRITE_ERROR;
338 list_add(&item->list, &bitmap->complete_pages); 314 spin_unlock_irqrestore(&bitmap->lock, flags);
339 spin_unlock(&bitmap->write_lock); 315 }
340 md_wakeup_thread(bitmap->writeback_daemon); 316 if (atomic_dec_and_test(&bitmap->pending_writes))
317 wake_up(&bitmap->write_wait);
318}
319
320/* copied from buffer.c */
321static void
322__clear_page_buffers(struct page *page)
323{
324 ClearPagePrivate(page);
325 set_page_private(page, 0);
326 page_cache_release(page);
327}
328static void free_buffers(struct page *page)
329{
330 struct buffer_head *bh = page_buffers(page);
331
332 while (bh) {
333 struct buffer_head *next = bh->b_this_page;
334 free_buffer_head(bh);
335 bh = next;
341 } 336 }
342 return write_one_page(page, wait); 337 __clear_page_buffers(page);
338 put_page(page);
343} 339}
344 340
345/* read a page from a file, pinning it into cache, and return bytes_read */ 341/* read a page from a file.
342 * We both read the page, and attach buffers to the page to record the
343 * address of each block (using bmap). These addresses will be used
344 * to write the block later, completely bypassing the filesystem.
345 * This usage is similar to how swap files are handled, and allows us
346 * to write to a file with no concerns of memory allocation failing.
347 */
346static struct page *read_page(struct file *file, unsigned long index, 348static struct page *read_page(struct file *file, unsigned long index,
347 unsigned long *bytes_read) 349 struct bitmap *bitmap,
350 unsigned long count)
348{ 351{
349 struct inode *inode = file->f_mapping->host;
350 struct page *page = NULL; 352 struct page *page = NULL;
351 loff_t isize = i_size_read(inode); 353 struct inode *inode = file->f_dentry->d_inode;
352 unsigned long end_index = isize >> PAGE_SHIFT; 354 struct buffer_head *bh;
355 sector_t block;
353 356
354 PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE, 357 PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE,
355 (unsigned long long)index << PAGE_SHIFT); 358 (unsigned long long)index << PAGE_SHIFT);
356 359
357 page = read_cache_page(inode->i_mapping, index, 360 page = alloc_page(GFP_KERNEL);
358 (filler_t *)inode->i_mapping->a_ops->readpage, file); 361 if (!page)
362 page = ERR_PTR(-ENOMEM);
359 if (IS_ERR(page)) 363 if (IS_ERR(page))
360 goto out; 364 goto out;
361 wait_on_page_locked(page); 365
362 if (!PageUptodate(page) || PageError(page)) { 366 bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
367 if (!bh) {
363 put_page(page); 368 put_page(page);
364 page = ERR_PTR(-EIO); 369 page = ERR_PTR(-ENOMEM);
365 goto out; 370 goto out;
366 } 371 }
372 attach_page_buffers(page, bh);
373 block = index << (PAGE_SHIFT - inode->i_blkbits);
374 while (bh) {
375 if (count == 0)
376 bh->b_blocknr = 0;
377 else {
378 bh->b_blocknr = bmap(inode, block);
379 if (bh->b_blocknr == 0) {
380 /* Cannot use this file! */
381 free_buffers(page);
382 page = ERR_PTR(-EINVAL);
383 goto out;
384 }
385 bh->b_bdev = inode->i_sb->s_bdev;
386 if (count < (1<<inode->i_blkbits))
387 count = 0;
388 else
389 count -= (1<<inode->i_blkbits);
390
391 bh->b_end_io = end_bitmap_write;
392 bh->b_private = bitmap;
393 atomic_inc(&bitmap->pending_writes);
394 set_buffer_locked(bh);
395 set_buffer_mapped(bh);
396 submit_bh(READ, bh);
397 }
398 block++;
399 bh = bh->b_this_page;
400 }
401 page->index = index;
367 402
368 if (index > end_index) /* we have read beyond EOF */ 403 wait_event(bitmap->write_wait,
369 *bytes_read = 0; 404 atomic_read(&bitmap->pending_writes)==0);
370 else if (index == end_index) /* possible short read */ 405 if (bitmap->flags & BITMAP_WRITE_ERROR) {
371 *bytes_read = isize & ~PAGE_MASK; 406 free_buffers(page);
372 else 407 page = ERR_PTR(-EIO);
373 *bytes_read = PAGE_SIZE; /* got a full page */ 408 }
374out: 409out:
375 if (IS_ERR(page)) 410 if (IS_ERR(page))
376 printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n", 411 printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n",
@@ -441,16 +476,14 @@ static int bitmap_read_sb(struct bitmap *bitmap)
441 char *reason = NULL; 476 char *reason = NULL;
442 bitmap_super_t *sb; 477 bitmap_super_t *sb;
443 unsigned long chunksize, daemon_sleep, write_behind; 478 unsigned long chunksize, daemon_sleep, write_behind;
444 unsigned long bytes_read;
445 unsigned long long events; 479 unsigned long long events;
446 int err = -EINVAL; 480 int err = -EINVAL;
447 481
448 /* page 0 is the superblock, read it... */ 482 /* page 0 is the superblock, read it... */
449 if (bitmap->file) 483 if (bitmap->file)
450 bitmap->sb_page = read_page(bitmap->file, 0, &bytes_read); 484 bitmap->sb_page = read_page(bitmap->file, 0, bitmap, PAGE_SIZE);
451 else { 485 else {
452 bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, 0); 486 bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, 0);
453 bytes_read = PAGE_SIZE;
454 } 487 }
455 if (IS_ERR(bitmap->sb_page)) { 488 if (IS_ERR(bitmap->sb_page)) {
456 err = PTR_ERR(bitmap->sb_page); 489 err = PTR_ERR(bitmap->sb_page);
@@ -460,13 +493,6 @@ static int bitmap_read_sb(struct bitmap *bitmap)
460 493
461 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); 494 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
462 495
463 if (bytes_read < sizeof(*sb)) { /* short read */
464 printk(KERN_INFO "%s: bitmap file superblock truncated\n",
465 bmname(bitmap));
466 err = -ENOSPC;
467 goto out;
468 }
469
470 chunksize = le32_to_cpu(sb->chunksize); 496 chunksize = le32_to_cpu(sb->chunksize);
471 daemon_sleep = le32_to_cpu(sb->daemon_sleep); 497 daemon_sleep = le32_to_cpu(sb->daemon_sleep);
472 write_behind = le32_to_cpu(sb->write_behind); 498 write_behind = le32_to_cpu(sb->write_behind);
@@ -550,7 +576,6 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
550 spin_unlock_irqrestore(&bitmap->lock, flags); 576 spin_unlock_irqrestore(&bitmap->lock, flags);
551 return; 577 return;
552 } 578 }
553 get_page(bitmap->sb_page);
554 spin_unlock_irqrestore(&bitmap->lock, flags); 579 spin_unlock_irqrestore(&bitmap->lock, flags);
555 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); 580 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
556 switch (op) { 581 switch (op) {
@@ -561,7 +586,6 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
561 default: BUG(); 586 default: BUG();
562 } 587 }
563 kunmap_atomic(sb, KM_USER0); 588 kunmap_atomic(sb, KM_USER0);
564 put_page(bitmap->sb_page);
565} 589}
566 590
567/* 591/*
@@ -614,48 +638,17 @@ static void bitmap_file_unmap(struct bitmap *bitmap)
614 638
615 while (pages--) 639 while (pages--)
616 if (map[pages]->index != 0) /* 0 is sb_page, release it below */ 640 if (map[pages]->index != 0) /* 0 is sb_page, release it below */
617 put_page(map[pages]); 641 free_buffers(map[pages]);
618 kfree(map); 642 kfree(map);
619 kfree(attr); 643 kfree(attr);
620 644
621 safe_put_page(sb_page); 645 if (sb_page)
622} 646 free_buffers(sb_page);
623
624static void bitmap_stop_daemon(struct bitmap *bitmap);
625
626/* dequeue the next item in a page list -- don't call from irq context */
627static struct page_list *dequeue_page(struct bitmap *bitmap)
628{
629 struct page_list *item = NULL;
630 struct list_head *head = &bitmap->complete_pages;
631
632 spin_lock(&bitmap->write_lock);
633 if (list_empty(head))
634 goto out;
635 item = list_entry(head->prev, struct page_list, list);
636 list_del(head->prev);
637out:
638 spin_unlock(&bitmap->write_lock);
639 return item;
640}
641
642static void drain_write_queues(struct bitmap *bitmap)
643{
644 struct page_list *item;
645
646 while ((item = dequeue_page(bitmap))) {
647 /* don't bother to wait */
648 put_page(item->page);
649 mempool_free(item, bitmap->write_pool);
650 }
651
652 wake_up(&bitmap->write_wait);
653} 647}
654 648
655static void bitmap_file_put(struct bitmap *bitmap) 649static void bitmap_file_put(struct bitmap *bitmap)
656{ 650{
657 struct file *file; 651 struct file *file;
658 struct inode *inode;
659 unsigned long flags; 652 unsigned long flags;
660 653
661 spin_lock_irqsave(&bitmap->lock, flags); 654 spin_lock_irqsave(&bitmap->lock, flags);
@@ -663,17 +656,14 @@ static void bitmap_file_put(struct bitmap *bitmap)
663 bitmap->file = NULL; 656 bitmap->file = NULL;
664 spin_unlock_irqrestore(&bitmap->lock, flags); 657 spin_unlock_irqrestore(&bitmap->lock, flags);
665 658
666 bitmap_stop_daemon(bitmap); 659 if (file)
667 660 wait_event(bitmap->write_wait,
668 drain_write_queues(bitmap); 661 atomic_read(&bitmap->pending_writes)==0);
669
670 bitmap_file_unmap(bitmap); 662 bitmap_file_unmap(bitmap);
671 663
672 if (file) { 664 if (file) {
673 inode = file->f_mapping->host; 665 struct inode *inode = file->f_dentry->d_inode;
674 spin_lock(&inode->i_lock); 666 invalidate_inode_pages(inode->i_mapping);
675 atomic_set(&inode->i_writecount, 1); /* allow writes again */
676 spin_unlock(&inode->i_lock);
677 fput(file); 667 fput(file);
678 } 668 }
679} 669}
@@ -708,26 +698,27 @@ static void bitmap_file_kick(struct bitmap *bitmap)
708} 698}
709 699
710enum bitmap_page_attr { 700enum bitmap_page_attr {
711 BITMAP_PAGE_DIRTY = 1, // there are set bits that need to be synced 701 BITMAP_PAGE_DIRTY = 0, // there are set bits that need to be synced
712 BITMAP_PAGE_CLEAN = 2, // there are bits that might need to be cleared 702 BITMAP_PAGE_CLEAN = 1, // there are bits that might need to be cleared
713 BITMAP_PAGE_NEEDWRITE=4, // there are cleared bits that need to be synced 703 BITMAP_PAGE_NEEDWRITE=2, // there are cleared bits that need to be synced
714}; 704};
715 705
716static inline void set_page_attr(struct bitmap *bitmap, struct page *page, 706static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
717 enum bitmap_page_attr attr) 707 enum bitmap_page_attr attr)
718{ 708{
719 bitmap->filemap_attr[page->index] |= attr; 709 __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
720} 710}
721 711
722static inline void clear_page_attr(struct bitmap *bitmap, struct page *page, 712static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
723 enum bitmap_page_attr attr) 713 enum bitmap_page_attr attr)
724{ 714{
725 bitmap->filemap_attr[page->index] &= ~attr; 715 __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
726} 716}
727 717
728static inline unsigned long get_page_attr(struct bitmap *bitmap, struct page *page) 718static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page,
719 enum bitmap_page_attr attr)
729{ 720{
730 return bitmap->filemap_attr[page->index]; 721 return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
731} 722}
732 723
733/* 724/*
@@ -751,11 +742,6 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
751 page = filemap_get_page(bitmap, chunk); 742 page = filemap_get_page(bitmap, chunk);
752 bit = file_page_offset(chunk); 743 bit = file_page_offset(chunk);
753 744
754
755 /* make sure the page stays cached until it gets written out */
756 if (! (get_page_attr(bitmap, page) & BITMAP_PAGE_DIRTY))
757 get_page(page);
758
759 /* set the bit */ 745 /* set the bit */
760 kaddr = kmap_atomic(page, KM_USER0); 746 kaddr = kmap_atomic(page, KM_USER0);
761 if (bitmap->flags & BITMAP_HOSTENDIAN) 747 if (bitmap->flags & BITMAP_HOSTENDIAN)
@@ -775,7 +761,8 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
775 * sync the dirty pages of the bitmap file to disk */ 761 * sync the dirty pages of the bitmap file to disk */
776int bitmap_unplug(struct bitmap *bitmap) 762int bitmap_unplug(struct bitmap *bitmap)
777{ 763{
778 unsigned long i, attr, flags; 764 unsigned long i, flags;
765 int dirty, need_write;
779 struct page *page; 766 struct page *page;
780 int wait = 0; 767 int wait = 0;
781 int err; 768 int err;
@@ -792,35 +779,26 @@ int bitmap_unplug(struct bitmap *bitmap)
792 return 0; 779 return 0;
793 } 780 }
794 page = bitmap->filemap[i]; 781 page = bitmap->filemap[i];
795 attr = get_page_attr(bitmap, page); 782 dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
783 need_write = test_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
796 clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); 784 clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
797 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); 785 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
798 if ((attr & BITMAP_PAGE_DIRTY)) 786 if (dirty)
799 wait = 1; 787 wait = 1;
800 spin_unlock_irqrestore(&bitmap->lock, flags); 788 spin_unlock_irqrestore(&bitmap->lock, flags);
801 789
802 if (attr & (BITMAP_PAGE_DIRTY | BITMAP_PAGE_NEEDWRITE)) { 790 if (dirty | need_write)
803 err = write_page(bitmap, page, 0); 791 err = write_page(bitmap, page, 0);
804 if (err == -EAGAIN) {
805 if (attr & BITMAP_PAGE_DIRTY)
806 err = write_page(bitmap, page, 1);
807 else
808 err = 0;
809 }
810 if (err)
811 return 1;
812 }
813 } 792 }
814 if (wait) { /* if any writes were performed, we need to wait on them */ 793 if (wait) { /* if any writes were performed, we need to wait on them */
815 if (bitmap->file) { 794 if (bitmap->file)
816 spin_lock_irq(&bitmap->write_lock); 795 wait_event(bitmap->write_wait,
817 wait_event_lock_irq(bitmap->write_wait, 796 atomic_read(&bitmap->pending_writes)==0);
818 list_empty(&bitmap->complete_pages), bitmap->write_lock, 797 else
819 wake_up_process(bitmap->writeback_daemon->tsk));
820 spin_unlock_irq(&bitmap->write_lock);
821 } else
822 md_super_wait(bitmap->mddev); 798 md_super_wait(bitmap->mddev);
823 } 799 }
800 if (bitmap->flags & BITMAP_WRITE_ERROR)
801 bitmap_file_kick(bitmap);
824 return 0; 802 return 0;
825} 803}
826 804
@@ -842,7 +820,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
842 struct page *page = NULL, *oldpage = NULL; 820 struct page *page = NULL, *oldpage = NULL;
843 unsigned long num_pages, bit_cnt = 0; 821 unsigned long num_pages, bit_cnt = 0;
844 struct file *file; 822 struct file *file;
845 unsigned long bytes, offset, dummy; 823 unsigned long bytes, offset;
846 int outofdate; 824 int outofdate;
847 int ret = -ENOSPC; 825 int ret = -ENOSPC;
848 void *paddr; 826 void *paddr;
@@ -879,7 +857,12 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
879 if (!bitmap->filemap) 857 if (!bitmap->filemap)
880 goto out; 858 goto out;
881 859
882 bitmap->filemap_attr = kzalloc(sizeof(long) * num_pages, GFP_KERNEL); 860 /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
861 bitmap->filemap_attr = kzalloc(
862 (((num_pages*4/8)+sizeof(unsigned long)-1)
863 /sizeof(unsigned long))
864 *sizeof(unsigned long),
865 GFP_KERNEL);
883 if (!bitmap->filemap_attr) 866 if (!bitmap->filemap_attr)
884 goto out; 867 goto out;
885 868
@@ -890,7 +873,12 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
890 index = file_page_index(i); 873 index = file_page_index(i);
891 bit = file_page_offset(i); 874 bit = file_page_offset(i);
892 if (index != oldindex) { /* this is a new page, read it in */ 875 if (index != oldindex) { /* this is a new page, read it in */
876 int count;
893 /* unmap the old page, we're done with it */ 877 /* unmap the old page, we're done with it */
878 if (index == num_pages-1)
879 count = bytes - index * PAGE_SIZE;
880 else
881 count = PAGE_SIZE;
894 if (index == 0) { 882 if (index == 0) {
895 /* 883 /*
896 * if we're here then the superblock page 884 * if we're here then the superblock page
@@ -900,7 +888,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
900 page = bitmap->sb_page; 888 page = bitmap->sb_page;
901 offset = sizeof(bitmap_super_t); 889 offset = sizeof(bitmap_super_t);
902 } else if (file) { 890 } else if (file) {
903 page = read_page(file, index, &dummy); 891 page = read_page(file, index, bitmap, count);
904 offset = 0; 892 offset = 0;
905 } else { 893 } else {
906 page = read_sb_page(bitmap->mddev, bitmap->offset, index); 894 page = read_sb_page(bitmap->mddev, bitmap->offset, index);
@@ -971,12 +959,11 @@ void bitmap_write_all(struct bitmap *bitmap)
971 /* We don't actually write all bitmap blocks here, 959 /* We don't actually write all bitmap blocks here,
972 * just flag them as needing to be written 960 * just flag them as needing to be written
973 */ 961 */
962 int i;
974 963
975 unsigned long chunks = bitmap->chunks; 964 for (i=0; i < bitmap->file_pages; i++)
976 unsigned long bytes = (chunks+7)/8 + sizeof(bitmap_super_t); 965 set_page_attr(bitmap, bitmap->filemap[i],
977 unsigned long num_pages = (bytes + PAGE_SIZE-1) / PAGE_SIZE; 966 BITMAP_PAGE_NEEDWRITE);
978 while (num_pages--)
979 bitmap->filemap_attr[num_pages] |= BITMAP_PAGE_NEEDWRITE;
980} 967}
981 968
982 969
@@ -1007,7 +994,6 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1007 struct page *page = NULL, *lastpage = NULL; 994 struct page *page = NULL, *lastpage = NULL;
1008 int err = 0; 995 int err = 0;
1009 int blocks; 996 int blocks;
1010 int attr;
1011 void *paddr; 997 void *paddr;
1012 998
1013 if (bitmap == NULL) 999 if (bitmap == NULL)
@@ -1029,43 +1015,34 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1029 1015
1030 if (page != lastpage) { 1016 if (page != lastpage) {
1031 /* skip this page unless it's marked as needing cleaning */ 1017 /* skip this page unless it's marked as needing cleaning */
1032 if (!((attr=get_page_attr(bitmap, page)) & BITMAP_PAGE_CLEAN)) { 1018 if (!test_page_attr(bitmap, page, BITMAP_PAGE_CLEAN)) {
1033 if (attr & BITMAP_PAGE_NEEDWRITE) { 1019 int need_write = test_page_attr(bitmap, page,
1034 get_page(page); 1020 BITMAP_PAGE_NEEDWRITE);
1021 if (need_write)
1035 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); 1022 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
1036 } 1023
1037 spin_unlock_irqrestore(&bitmap->lock, flags); 1024 spin_unlock_irqrestore(&bitmap->lock, flags);
1038 if (attr & BITMAP_PAGE_NEEDWRITE) { 1025 if (need_write) {
1039 switch (write_page(bitmap, page, 0)) { 1026 switch (write_page(bitmap, page, 0)) {
1040 case -EAGAIN:
1041 set_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
1042 break;
1043 case 0: 1027 case 0:
1044 break; 1028 break;
1045 default: 1029 default:
1046 bitmap_file_kick(bitmap); 1030 bitmap_file_kick(bitmap);
1047 } 1031 }
1048 put_page(page);
1049 } 1032 }
1050 continue; 1033 continue;
1051 } 1034 }
1052 1035
1053 /* grab the new page, sync and release the old */ 1036 /* grab the new page, sync and release the old */
1054 get_page(page);
1055 if (lastpage != NULL) { 1037 if (lastpage != NULL) {
1056 if (get_page_attr(bitmap, lastpage) & BITMAP_PAGE_NEEDWRITE) { 1038 if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
1057 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1039 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1058 spin_unlock_irqrestore(&bitmap->lock, flags); 1040 spin_unlock_irqrestore(&bitmap->lock, flags);
1059 err = write_page(bitmap, lastpage, 0); 1041 err = write_page(bitmap, lastpage, 0);
1060 if (err == -EAGAIN) {
1061 err = 0;
1062 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1063 }
1064 } else { 1042 } else {
1065 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1043 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1066 spin_unlock_irqrestore(&bitmap->lock, flags); 1044 spin_unlock_irqrestore(&bitmap->lock, flags);
1067 } 1045 }
1068 put_page(lastpage);
1069 if (err) 1046 if (err)
1070 bitmap_file_kick(bitmap); 1047 bitmap_file_kick(bitmap);
1071 } else 1048 } else
@@ -1107,131 +1084,19 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1107 /* now sync the final page */ 1084 /* now sync the final page */
1108 if (lastpage != NULL) { 1085 if (lastpage != NULL) {
1109 spin_lock_irqsave(&bitmap->lock, flags); 1086 spin_lock_irqsave(&bitmap->lock, flags);
1110 if (get_page_attr(bitmap, lastpage) &BITMAP_PAGE_NEEDWRITE) { 1087 if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
1111 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1088 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1112 spin_unlock_irqrestore(&bitmap->lock, flags); 1089 spin_unlock_irqrestore(&bitmap->lock, flags);
1113 err = write_page(bitmap, lastpage, 0); 1090 err = write_page(bitmap, lastpage, 0);
1114 if (err == -EAGAIN) {
1115 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1116 err = 0;
1117 }
1118 } else { 1091 } else {
1119 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1092 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1120 spin_unlock_irqrestore(&bitmap->lock, flags); 1093 spin_unlock_irqrestore(&bitmap->lock, flags);
1121 } 1094 }
1122
1123 put_page(lastpage);
1124 } 1095 }
1125 1096
1126 return err; 1097 return err;
1127} 1098}
1128 1099
1129static void daemon_exit(struct bitmap *bitmap, mdk_thread_t **daemon)
1130{
1131 mdk_thread_t *dmn;
1132 unsigned long flags;
1133
1134 /* if no one is waiting on us, we'll free the md thread struct
1135 * and exit, otherwise we let the waiter clean things up */
1136 spin_lock_irqsave(&bitmap->lock, flags);
1137 if ((dmn = *daemon)) { /* no one is waiting, cleanup and exit */
1138 *daemon = NULL;
1139 spin_unlock_irqrestore(&bitmap->lock, flags);
1140 kfree(dmn);
1141 complete_and_exit(NULL, 0); /* do_exit not exported */
1142 }
1143 spin_unlock_irqrestore(&bitmap->lock, flags);
1144}
1145
1146static void bitmap_writeback_daemon(mddev_t *mddev)
1147{
1148 struct bitmap *bitmap = mddev->bitmap;
1149 struct page *page;
1150 struct page_list *item;
1151 int err = 0;
1152
1153 if (signal_pending(current)) {
1154 printk(KERN_INFO
1155 "%s: bitmap writeback daemon got signal, exiting...\n",
1156 bmname(bitmap));
1157 err = -EINTR;
1158 goto out;
1159 }
1160 if (bitmap == NULL)
1161 /* about to be stopped. */
1162 return;
1163
1164 PRINTK("%s: bitmap writeback daemon woke up...\n", bmname(bitmap));
1165 /* wait on bitmap page writebacks */
1166 while ((item = dequeue_page(bitmap))) {
1167 page = item->page;
1168 mempool_free(item, bitmap->write_pool);
1169 PRINTK("wait on page writeback: %p\n", page);
1170 wait_on_page_writeback(page);
1171 PRINTK("finished page writeback: %p\n", page);
1172
1173 err = PageError(page);
1174 put_page(page);
1175 if (err) {
1176 printk(KERN_WARNING "%s: bitmap file writeback "
1177 "failed (page %lu): %d\n",
1178 bmname(bitmap), page->index, err);
1179 bitmap_file_kick(bitmap);
1180 goto out;
1181 }
1182 }
1183 out:
1184 wake_up(&bitmap->write_wait);
1185 if (err) {
1186 printk(KERN_INFO "%s: bitmap writeback daemon exiting (%d)\n",
1187 bmname(bitmap), err);
1188 daemon_exit(bitmap, &bitmap->writeback_daemon);
1189 }
1190}
1191
1192static mdk_thread_t *bitmap_start_daemon(struct bitmap *bitmap,
1193 void (*func)(mddev_t *), char *name)
1194{
1195 mdk_thread_t *daemon;
1196 char namebuf[32];
1197
1198#ifdef INJECT_FATAL_FAULT_2
1199 daemon = NULL;
1200#else
1201 sprintf(namebuf, "%%s_%s", name);
1202 daemon = md_register_thread(func, bitmap->mddev, namebuf);
1203#endif
1204 if (!daemon) {
1205 printk(KERN_ERR "%s: failed to start bitmap daemon\n",
1206 bmname(bitmap));
1207 return ERR_PTR(-ECHILD);
1208 }
1209
1210 md_wakeup_thread(daemon); /* start it running */
1211
1212 PRINTK("%s: %s daemon (pid %d) started...\n",
1213 bmname(bitmap), name, daemon->tsk->pid);
1214
1215 return daemon;
1216}
1217
1218static void bitmap_stop_daemon(struct bitmap *bitmap)
1219{
1220 /* the daemon can't stop itself... it'll just exit instead... */
1221 if (bitmap->writeback_daemon && ! IS_ERR(bitmap->writeback_daemon) &&
1222 current->pid != bitmap->writeback_daemon->tsk->pid) {
1223 mdk_thread_t *daemon;
1224 unsigned long flags;
1225
1226 spin_lock_irqsave(&bitmap->lock, flags);
1227 daemon = bitmap->writeback_daemon;
1228 bitmap->writeback_daemon = NULL;
1229 spin_unlock_irqrestore(&bitmap->lock, flags);
1230 if (daemon && ! IS_ERR(daemon))
1231 md_unregister_thread(daemon); /* destroy the thread */
1232 }
1233}
1234
1235static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, 1100static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
1236 sector_t offset, int *blocks, 1101 sector_t offset, int *blocks,
1237 int create) 1102 int create)
@@ -1500,8 +1365,6 @@ static void bitmap_free(struct bitmap *bitmap)
1500 1365
1501 /* free all allocated memory */ 1366 /* free all allocated memory */
1502 1367
1503 mempool_destroy(bitmap->write_pool);
1504
1505 if (bp) /* deallocate the page memory */ 1368 if (bp) /* deallocate the page memory */
1506 for (k = 0; k < pages; k++) 1369 for (k = 0; k < pages; k++)
1507 if (bp[k].map && !bp[k].hijacked) 1370 if (bp[k].map && !bp[k].hijacked)
@@ -1549,20 +1412,20 @@ int bitmap_create(mddev_t *mddev)
1549 return -ENOMEM; 1412 return -ENOMEM;
1550 1413
1551 spin_lock_init(&bitmap->lock); 1414 spin_lock_init(&bitmap->lock);
1552 bitmap->mddev = mddev; 1415 atomic_set(&bitmap->pending_writes, 0);
1553
1554 spin_lock_init(&bitmap->write_lock);
1555 INIT_LIST_HEAD(&bitmap->complete_pages);
1556 init_waitqueue_head(&bitmap->write_wait); 1416 init_waitqueue_head(&bitmap->write_wait);
1557 bitmap->write_pool = mempool_create_kmalloc_pool(WRITE_POOL_SIZE, 1417
1558 sizeof(struct page_list)); 1418 bitmap->mddev = mddev;
1559 err = -ENOMEM;
1560 if (!bitmap->write_pool)
1561 goto error;
1562 1419
1563 bitmap->file = file; 1420 bitmap->file = file;
1564 bitmap->offset = mddev->bitmap_offset; 1421 bitmap->offset = mddev->bitmap_offset;
1565 if (file) get_file(file); 1422 if (file) {
1423 get_file(file);
1424 do_sync_file_range(file, 0, LLONG_MAX,
1425 SYNC_FILE_RANGE_WAIT_BEFORE |
1426 SYNC_FILE_RANGE_WRITE |
1427 SYNC_FILE_RANGE_WAIT_AFTER);
1428 }
1566 /* read superblock from bitmap file (this sets bitmap->chunksize) */ 1429 /* read superblock from bitmap file (this sets bitmap->chunksize) */
1567 err = bitmap_read_sb(bitmap); 1430 err = bitmap_read_sb(bitmap);
1568 if (err) 1431 if (err)
@@ -1594,8 +1457,6 @@ int bitmap_create(mddev_t *mddev)
1594 if (!bitmap->bp) 1457 if (!bitmap->bp)
1595 goto error; 1458 goto error;
1596 1459
1597 bitmap->flags |= BITMAP_ACTIVE;
1598
1599 /* now that we have some pages available, initialize the in-memory 1460 /* now that we have some pages available, initialize the in-memory
1600 * bitmap from the on-disk bitmap */ 1461 * bitmap from the on-disk bitmap */
1601 start = 0; 1462 start = 0;
@@ -1613,15 +1474,6 @@ int bitmap_create(mddev_t *mddev)
1613 1474
1614 mddev->bitmap = bitmap; 1475 mddev->bitmap = bitmap;
1615 1476
1616 if (file)
1617 /* kick off the bitmap writeback daemon */
1618 bitmap->writeback_daemon =
1619 bitmap_start_daemon(bitmap,
1620 bitmap_writeback_daemon,
1621 "bitmap_wb");
1622
1623 if (IS_ERR(bitmap->writeback_daemon))
1624 return PTR_ERR(bitmap->writeback_daemon);
1625 mddev->thread->timeout = bitmap->daemon_sleep * HZ; 1477 mddev->thread->timeout = bitmap->daemon_sleep * HZ;
1626 1478
1627 return bitmap_update_sb(bitmap); 1479 return bitmap_update_sb(bitmap);
@@ -1638,4 +1490,3 @@ EXPORT_SYMBOL(bitmap_start_sync);
1638EXPORT_SYMBOL(bitmap_end_sync); 1490EXPORT_SYMBOL(bitmap_end_sync);
1639EXPORT_SYMBOL(bitmap_unplug); 1491EXPORT_SYMBOL(bitmap_unplug);
1640EXPORT_SYMBOL(bitmap_close_sync); 1492EXPORT_SYMBOL(bitmap_close_sync);
1641EXPORT_SYMBOL(bitmap_daemon_work);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 61a590bb62..6022ed12a7 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -20,7 +20,7 @@
20 20
21#include "dm.h" 21#include "dm.h"
22 22
23#define PFX "crypt: " 23#define DM_MSG_PREFIX "crypt"
24 24
25/* 25/*
26 * per bio private data 26 * per bio private data
@@ -125,19 +125,19 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
125 u8 *salt; 125 u8 *salt;
126 126
127 if (opts == NULL) { 127 if (opts == NULL) {
128 ti->error = PFX "Digest algorithm missing for ESSIV mode"; 128 ti->error = "Digest algorithm missing for ESSIV mode";
129 return -EINVAL; 129 return -EINVAL;
130 } 130 }
131 131
132 /* Hash the cipher key with the given hash algorithm */ 132 /* Hash the cipher key with the given hash algorithm */
133 hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP); 133 hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP);
134 if (hash_tfm == NULL) { 134 if (hash_tfm == NULL) {
135 ti->error = PFX "Error initializing ESSIV hash"; 135 ti->error = "Error initializing ESSIV hash";
136 return -EINVAL; 136 return -EINVAL;
137 } 137 }
138 138
139 if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) { 139 if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) {
140 ti->error = PFX "Expected digest algorithm for ESSIV hash"; 140 ti->error = "Expected digest algorithm for ESSIV hash";
141 crypto_free_tfm(hash_tfm); 141 crypto_free_tfm(hash_tfm);
142 return -EINVAL; 142 return -EINVAL;
143 } 143 }
@@ -145,7 +145,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
145 saltsize = crypto_tfm_alg_digestsize(hash_tfm); 145 saltsize = crypto_tfm_alg_digestsize(hash_tfm);
146 salt = kmalloc(saltsize, GFP_KERNEL); 146 salt = kmalloc(saltsize, GFP_KERNEL);
147 if (salt == NULL) { 147 if (salt == NULL) {
148 ti->error = PFX "Error kmallocing salt storage in ESSIV"; 148 ti->error = "Error kmallocing salt storage in ESSIV";
149 crypto_free_tfm(hash_tfm); 149 crypto_free_tfm(hash_tfm);
150 return -ENOMEM; 150 return -ENOMEM;
151 } 151 }
@@ -159,20 +159,20 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
159 CRYPTO_TFM_MODE_ECB | 159 CRYPTO_TFM_MODE_ECB |
160 CRYPTO_TFM_REQ_MAY_SLEEP); 160 CRYPTO_TFM_REQ_MAY_SLEEP);
161 if (essiv_tfm == NULL) { 161 if (essiv_tfm == NULL) {
162 ti->error = PFX "Error allocating crypto tfm for ESSIV"; 162 ti->error = "Error allocating crypto tfm for ESSIV";
163 kfree(salt); 163 kfree(salt);
164 return -EINVAL; 164 return -EINVAL;
165 } 165 }
166 if (crypto_tfm_alg_blocksize(essiv_tfm) 166 if (crypto_tfm_alg_blocksize(essiv_tfm)
167 != crypto_tfm_alg_ivsize(cc->tfm)) { 167 != crypto_tfm_alg_ivsize(cc->tfm)) {
168 ti->error = PFX "Block size of ESSIV cipher does " 168 ti->error = "Block size of ESSIV cipher does "
169 "not match IV size of block cipher"; 169 "not match IV size of block cipher";
170 crypto_free_tfm(essiv_tfm); 170 crypto_free_tfm(essiv_tfm);
171 kfree(salt); 171 kfree(salt);
172 return -EINVAL; 172 return -EINVAL;
173 } 173 }
174 if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) { 174 if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) {
175 ti->error = PFX "Failed to set key for ESSIV cipher"; 175 ti->error = "Failed to set key for ESSIV cipher";
176 crypto_free_tfm(essiv_tfm); 176 crypto_free_tfm(essiv_tfm);
177 kfree(salt); 177 kfree(salt);
178 return -EINVAL; 178 return -EINVAL;
@@ -521,7 +521,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
521 unsigned long long tmpll; 521 unsigned long long tmpll;
522 522
523 if (argc != 5) { 523 if (argc != 5) {
524 ti->error = PFX "Not enough arguments"; 524 ti->error = "Not enough arguments";
525 return -EINVAL; 525 return -EINVAL;
526 } 526 }
527 527
@@ -532,21 +532,21 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
532 ivmode = strsep(&ivopts, ":"); 532 ivmode = strsep(&ivopts, ":");
533 533
534 if (tmp) 534 if (tmp)
535 DMWARN(PFX "Unexpected additional cipher options"); 535 DMWARN("Unexpected additional cipher options");
536 536
537 key_size = strlen(argv[1]) >> 1; 537 key_size = strlen(argv[1]) >> 1;
538 538
539 cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); 539 cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
540 if (cc == NULL) { 540 if (cc == NULL) {
541 ti->error = 541 ti->error =
542 PFX "Cannot allocate transparent encryption context"; 542 "Cannot allocate transparent encryption context";
543 return -ENOMEM; 543 return -ENOMEM;
544 } 544 }
545 545
546 cc->key_size = key_size; 546 cc->key_size = key_size;
547 if ((!key_size && strcmp(argv[1], "-") != 0) || 547 if ((!key_size && strcmp(argv[1], "-") != 0) ||
548 (key_size && crypt_decode_key(cc->key, argv[1], key_size) < 0)) { 548 (key_size && crypt_decode_key(cc->key, argv[1], key_size) < 0)) {
549 ti->error = PFX "Error decoding key"; 549 ti->error = "Error decoding key";
550 goto bad1; 550 goto bad1;
551 } 551 }
552 552
@@ -562,22 +562,22 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
562 else if (strcmp(chainmode, "ecb") == 0) 562 else if (strcmp(chainmode, "ecb") == 0)
563 crypto_flags = CRYPTO_TFM_MODE_ECB; 563 crypto_flags = CRYPTO_TFM_MODE_ECB;
564 else { 564 else {
565 ti->error = PFX "Unknown chaining mode"; 565 ti->error = "Unknown chaining mode";
566 goto bad1; 566 goto bad1;
567 } 567 }
568 568
569 if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) { 569 if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) {
570 ti->error = PFX "This chaining mode requires an IV mechanism"; 570 ti->error = "This chaining mode requires an IV mechanism";
571 goto bad1; 571 goto bad1;
572 } 572 }
573 573
574 tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP); 574 tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP);
575 if (!tfm) { 575 if (!tfm) {
576 ti->error = PFX "Error allocating crypto tfm"; 576 ti->error = "Error allocating crypto tfm";
577 goto bad1; 577 goto bad1;
578 } 578 }
579 if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) { 579 if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) {
580 ti->error = PFX "Expected cipher algorithm"; 580 ti->error = "Expected cipher algorithm";
581 goto bad2; 581 goto bad2;
582 } 582 }
583 583
@@ -595,7 +595,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
595 else if (strcmp(ivmode, "essiv") == 0) 595 else if (strcmp(ivmode, "essiv") == 0)
596 cc->iv_gen_ops = &crypt_iv_essiv_ops; 596 cc->iv_gen_ops = &crypt_iv_essiv_ops;
597 else { 597 else {
598 ti->error = PFX "Invalid IV mode"; 598 ti->error = "Invalid IV mode";
599 goto bad2; 599 goto bad2;
600 } 600 }
601 601
@@ -610,7 +610,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
610 else { 610 else {
611 cc->iv_size = 0; 611 cc->iv_size = 0;
612 if (cc->iv_gen_ops) { 612 if (cc->iv_gen_ops) {
613 DMWARN(PFX "Selected cipher does not support IVs"); 613 DMWARN("Selected cipher does not support IVs");
614 if (cc->iv_gen_ops->dtr) 614 if (cc->iv_gen_ops->dtr)
615 cc->iv_gen_ops->dtr(cc); 615 cc->iv_gen_ops->dtr(cc);
616 cc->iv_gen_ops = NULL; 616 cc->iv_gen_ops = NULL;
@@ -619,36 +619,36 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
619 619
620 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); 620 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
621 if (!cc->io_pool) { 621 if (!cc->io_pool) {
622 ti->error = PFX "Cannot allocate crypt io mempool"; 622 ti->error = "Cannot allocate crypt io mempool";
623 goto bad3; 623 goto bad3;
624 } 624 }
625 625
626 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 626 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
627 if (!cc->page_pool) { 627 if (!cc->page_pool) {
628 ti->error = PFX "Cannot allocate page mempool"; 628 ti->error = "Cannot allocate page mempool";
629 goto bad4; 629 goto bad4;
630 } 630 }
631 631
632 if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) { 632 if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) {
633 ti->error = PFX "Error setting key"; 633 ti->error = "Error setting key";
634 goto bad5; 634 goto bad5;
635 } 635 }
636 636
637 if (sscanf(argv[2], "%llu", &tmpll) != 1) { 637 if (sscanf(argv[2], "%llu", &tmpll) != 1) {
638 ti->error = PFX "Invalid iv_offset sector"; 638 ti->error = "Invalid iv_offset sector";
639 goto bad5; 639 goto bad5;
640 } 640 }
641 cc->iv_offset = tmpll; 641 cc->iv_offset = tmpll;
642 642
643 if (sscanf(argv[4], "%llu", &tmpll) != 1) { 643 if (sscanf(argv[4], "%llu", &tmpll) != 1) {
644 ti->error = PFX "Invalid device sector"; 644 ti->error = "Invalid device sector";
645 goto bad5; 645 goto bad5;
646 } 646 }
647 cc->start = tmpll; 647 cc->start = tmpll;
648 648
649 if (dm_get_device(ti, argv[3], cc->start, ti->len, 649 if (dm_get_device(ti, argv[3], cc->start, ti->len,
650 dm_table_get_mode(ti->table), &cc->dev)) { 650 dm_table_get_mode(ti->table), &cc->dev)) {
651 ti->error = PFX "Device lookup failed"; 651 ti->error = "Device lookup failed";
652 goto bad5; 652 goto bad5;
653 } 653 }
654 654
@@ -657,7 +657,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
657 *(ivopts - 1) = ':'; 657 *(ivopts - 1) = ':';
658 cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); 658 cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
659 if (!cc->iv_mode) { 659 if (!cc->iv_mode) {
660 ti->error = PFX "Error kmallocing iv_mode string"; 660 ti->error = "Error kmallocing iv_mode string";
661 goto bad5; 661 goto bad5;
662 } 662 }
663 strcpy(cc->iv_mode, ivmode); 663 strcpy(cc->iv_mode, ivmode);
@@ -918,13 +918,13 @@ static int __init dm_crypt_init(void)
918 _kcryptd_workqueue = create_workqueue("kcryptd"); 918 _kcryptd_workqueue = create_workqueue("kcryptd");
919 if (!_kcryptd_workqueue) { 919 if (!_kcryptd_workqueue) {
920 r = -ENOMEM; 920 r = -ENOMEM;
921 DMERR(PFX "couldn't create kcryptd"); 921 DMERR("couldn't create kcryptd");
922 goto bad1; 922 goto bad1;
923 } 923 }
924 924
925 r = dm_register_target(&crypt_target); 925 r = dm_register_target(&crypt_target);
926 if (r < 0) { 926 if (r < 0) {
927 DMERR(PFX "register failed %d", r); 927 DMERR("register failed %d", r);
928 goto bad2; 928 goto bad2;
929 } 929 }
930 930
@@ -942,7 +942,7 @@ static void __exit dm_crypt_exit(void)
942 int r = dm_unregister_target(&crypt_target); 942 int r = dm_unregister_target(&crypt_target);
943 943
944 if (r < 0) 944 if (r < 0)
945 DMERR(PFX "unregister failed %d", r); 945 DMERR("unregister failed %d", r);
946 946
947 destroy_workqueue(_kcryptd_workqueue); 947 destroy_workqueue(_kcryptd_workqueue);
948 kmem_cache_destroy(_crypt_io_pool); 948 kmem_cache_destroy(_crypt_io_pool);
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
index c7067674dc..2a374ccb30 100644
--- a/drivers/md/dm-emc.c
+++ b/drivers/md/dm-emc.c
@@ -12,6 +12,8 @@
12#include <scsi/scsi.h> 12#include <scsi/scsi.h>
13#include <scsi/scsi_cmnd.h> 13#include <scsi/scsi_cmnd.h>
14 14
15#define DM_MSG_PREFIX "multipath emc"
16
15struct emc_handler { 17struct emc_handler {
16 spinlock_t lock; 18 spinlock_t lock;
17 19
@@ -66,7 +68,7 @@ static struct bio *get_failover_bio(struct path *path, unsigned data_size)
66 68
67 bio = bio_alloc(GFP_ATOMIC, 1); 69 bio = bio_alloc(GFP_ATOMIC, 1);
68 if (!bio) { 70 if (!bio) {
69 DMERR("dm-emc: get_failover_bio: bio_alloc() failed."); 71 DMERR("get_failover_bio: bio_alloc() failed.");
70 return NULL; 72 return NULL;
71 } 73 }
72 74
@@ -78,13 +80,13 @@ static struct bio *get_failover_bio(struct path *path, unsigned data_size)
78 80
79 page = alloc_page(GFP_ATOMIC); 81 page = alloc_page(GFP_ATOMIC);
80 if (!page) { 82 if (!page) {
81 DMERR("dm-emc: get_failover_bio: alloc_page() failed."); 83 DMERR("get_failover_bio: alloc_page() failed.");
82 bio_put(bio); 84 bio_put(bio);
83 return NULL; 85 return NULL;
84 } 86 }
85 87
86 if (bio_add_page(bio, page, data_size, 0) != data_size) { 88 if (bio_add_page(bio, page, data_size, 0) != data_size) {
87 DMERR("dm-emc: get_failover_bio: alloc_page() failed."); 89 DMERR("get_failover_bio: alloc_page() failed.");
88 __free_page(page); 90 __free_page(page);
89 bio_put(bio); 91 bio_put(bio);
90 return NULL; 92 return NULL;
@@ -103,7 +105,7 @@ static struct request *get_failover_req(struct emc_handler *h,
103 /* FIXME: Figure out why it fails with GFP_ATOMIC. */ 105 /* FIXME: Figure out why it fails with GFP_ATOMIC. */
104 rq = blk_get_request(q, WRITE, __GFP_WAIT); 106 rq = blk_get_request(q, WRITE, __GFP_WAIT);
105 if (!rq) { 107 if (!rq) {
106 DMERR("dm-emc: get_failover_req: blk_get_request failed"); 108 DMERR("get_failover_req: blk_get_request failed");
107 return NULL; 109 return NULL;
108 } 110 }
109 111
@@ -160,7 +162,7 @@ static struct request *emc_trespass_get(struct emc_handler *h,
160 162
161 bio = get_failover_bio(path, data_size); 163 bio = get_failover_bio(path, data_size);
162 if (!bio) { 164 if (!bio) {
163 DMERR("dm-emc: emc_trespass_get: no bio"); 165 DMERR("emc_trespass_get: no bio");
164 return NULL; 166 return NULL;
165 } 167 }
166 168
@@ -173,7 +175,7 @@ static struct request *emc_trespass_get(struct emc_handler *h,
173 /* get request for block layer packet command */ 175 /* get request for block layer packet command */
174 rq = get_failover_req(h, bio, path); 176 rq = get_failover_req(h, bio, path);
175 if (!rq) { 177 if (!rq) {
176 DMERR("dm-emc: emc_trespass_get: no rq"); 178 DMERR("emc_trespass_get: no rq");
177 free_bio(bio); 179 free_bio(bio);
178 return NULL; 180 return NULL;
179 } 181 }
@@ -200,18 +202,18 @@ static void emc_pg_init(struct hw_handler *hwh, unsigned bypassed,
200 * initial state passed into us and then get an update here. 202 * initial state passed into us and then get an update here.
201 */ 203 */
202 if (!q) { 204 if (!q) {
203 DMINFO("dm-emc: emc_pg_init: no queue"); 205 DMINFO("emc_pg_init: no queue");
204 goto fail_path; 206 goto fail_path;
205 } 207 }
206 208
207 /* FIXME: The request should be pre-allocated. */ 209 /* FIXME: The request should be pre-allocated. */
208 rq = emc_trespass_get(hwh->context, path); 210 rq = emc_trespass_get(hwh->context, path);
209 if (!rq) { 211 if (!rq) {
210 DMERR("dm-emc: emc_pg_init: no rq"); 212 DMERR("emc_pg_init: no rq");
211 goto fail_path; 213 goto fail_path;
212 } 214 }
213 215
214 DMINFO("dm-emc: emc_pg_init: sending switch-over command"); 216 DMINFO("emc_pg_init: sending switch-over command");
215 elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1); 217 elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
216 return; 218 return;
217 219
@@ -241,18 +243,18 @@ static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv)
241 hr = 0; 243 hr = 0;
242 short_trespass = 0; 244 short_trespass = 0;
243 } else if (argc != 2) { 245 } else if (argc != 2) {
244 DMWARN("dm-emc hwhandler: incorrect number of arguments"); 246 DMWARN("incorrect number of arguments");
245 return -EINVAL; 247 return -EINVAL;
246 } else { 248 } else {
247 if ((sscanf(argv[0], "%u", &short_trespass) != 1) 249 if ((sscanf(argv[0], "%u", &short_trespass) != 1)
248 || (short_trespass > 1)) { 250 || (short_trespass > 1)) {
249 DMWARN("dm-emc: invalid trespass mode selected"); 251 DMWARN("invalid trespass mode selected");
250 return -EINVAL; 252 return -EINVAL;
251 } 253 }
252 254
253 if ((sscanf(argv[1], "%u", &hr) != 1) 255 if ((sscanf(argv[1], "%u", &hr) != 1)
254 || (hr > 1)) { 256 || (hr > 1)) {
255 DMWARN("dm-emc: invalid honor reservation flag selected"); 257 DMWARN("invalid honor reservation flag selected");
256 return -EINVAL; 258 return -EINVAL;
257 } 259 }
258 } 260 }
@@ -264,14 +266,14 @@ static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv)
264 hwh->context = h; 266 hwh->context = h;
265 267
266 if ((h->short_trespass = short_trespass)) 268 if ((h->short_trespass = short_trespass))
267 DMWARN("dm-emc: short trespass command will be send"); 269 DMWARN("short trespass command will be send");
268 else 270 else
269 DMWARN("dm-emc: long trespass command will be send"); 271 DMWARN("long trespass command will be send");
270 272
271 if ((h->hr = hr)) 273 if ((h->hr = hr))
272 DMWARN("dm-emc: honor reservation bit will be set"); 274 DMWARN("honor reservation bit will be set");
273 else 275 else
274 DMWARN("dm-emc: honor reservation bit will not be set (default)"); 276 DMWARN("honor reservation bit will not be set (default)");
275 277
276 return 0; 278 return 0;
277} 279}
@@ -336,9 +338,9 @@ static int __init dm_emc_init(void)
336 int r = dm_register_hw_handler(&emc_hwh); 338 int r = dm_register_hw_handler(&emc_hwh);
337 339
338 if (r < 0) 340 if (r < 0)
339 DMERR("emc: register failed %d", r); 341 DMERR("register failed %d", r);
340 342
341 DMINFO("dm-emc version 0.0.3 loaded"); 343 DMINFO("version 0.0.3 loaded");
342 344
343 return r; 345 return r;
344} 346}
@@ -348,7 +350,7 @@ static void __exit dm_emc_exit(void)
348 int r = dm_unregister_hw_handler(&emc_hwh); 350 int r = dm_unregister_hw_handler(&emc_hwh);
349 351
350 if (r < 0) 352 if (r < 0)
351 DMERR("emc: unregister failed %d", r); 353 DMERR("unregister failed %d", r);
352} 354}
353 355
354module_init(dm_emc_init); 356module_init(dm_emc_init);
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index cc07bbebbb..d12379b5cd 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -16,6 +16,8 @@
16#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18 18
19#define DM_MSG_PREFIX "snapshots"
20
19/*----------------------------------------------------------------- 21/*-----------------------------------------------------------------
20 * Persistent snapshots, by persistent we mean that the snapshot 22 * Persistent snapshots, by persistent we mean that the snapshot
21 * will survive a reboot. 23 * will survive a reboot.
@@ -91,7 +93,6 @@ struct pstore {
91 struct dm_snapshot *snap; /* up pointer to my snapshot */ 93 struct dm_snapshot *snap; /* up pointer to my snapshot */
92 int version; 94 int version;
93 int valid; 95 int valid;
94 uint32_t chunk_size;
95 uint32_t exceptions_per_area; 96 uint32_t exceptions_per_area;
96 97
97 /* 98 /*
@@ -133,7 +134,7 @@ static int alloc_area(struct pstore *ps)
133 int r = -ENOMEM; 134 int r = -ENOMEM;
134 size_t len; 135 size_t len;
135 136
136 len = ps->chunk_size << SECTOR_SHIFT; 137 len = ps->snap->chunk_size << SECTOR_SHIFT;
137 138
138 /* 139 /*
139 * Allocate the chunk_size block of memory that will hold 140 * Allocate the chunk_size block of memory that will hold
@@ -160,8 +161,8 @@ static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
160 unsigned long bits; 161 unsigned long bits;
161 162
162 where.bdev = ps->snap->cow->bdev; 163 where.bdev = ps->snap->cow->bdev;
163 where.sector = ps->chunk_size * chunk; 164 where.sector = ps->snap->chunk_size * chunk;
164 where.count = ps->chunk_size; 165 where.count = ps->snap->chunk_size;
165 166
166 return dm_io_sync_vm(1, &where, rw, ps->area, &bits); 167 return dm_io_sync_vm(1, &where, rw, ps->area, &bits);
167} 168}
@@ -188,7 +189,7 @@ static int area_io(struct pstore *ps, uint32_t area, int rw)
188 189
189static int zero_area(struct pstore *ps, uint32_t area) 190static int zero_area(struct pstore *ps, uint32_t area)
190{ 191{
191 memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT); 192 memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
192 return area_io(ps, area, WRITE); 193 return area_io(ps, area, WRITE);
193} 194}
194 195
@@ -196,6 +197,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
196{ 197{
197 int r; 198 int r;
198 struct disk_header *dh; 199 struct disk_header *dh;
200 chunk_t chunk_size;
199 201
200 r = chunk_io(ps, 0, READ); 202 r = chunk_io(ps, 0, READ);
201 if (r) 203 if (r)
@@ -210,8 +212,29 @@ static int read_header(struct pstore *ps, int *new_snapshot)
210 *new_snapshot = 0; 212 *new_snapshot = 0;
211 ps->valid = le32_to_cpu(dh->valid); 213 ps->valid = le32_to_cpu(dh->valid);
212 ps->version = le32_to_cpu(dh->version); 214 ps->version = le32_to_cpu(dh->version);
213 ps->chunk_size = le32_to_cpu(dh->chunk_size); 215 chunk_size = le32_to_cpu(dh->chunk_size);
214 216 if (ps->snap->chunk_size != chunk_size) {
217 DMWARN("chunk size %llu in device metadata overrides "
218 "table chunk size of %llu.",
219 (unsigned long long)chunk_size,
220 (unsigned long long)ps->snap->chunk_size);
221
222 /* We had a bogus chunk_size. Fix stuff up. */
223 dm_io_put(sectors_to_pages(ps->snap->chunk_size));
224 free_area(ps);
225
226 ps->snap->chunk_size = chunk_size;
227 ps->snap->chunk_mask = chunk_size - 1;
228 ps->snap->chunk_shift = ffs(chunk_size) - 1;
229
230 r = alloc_area(ps);
231 if (r)
232 return r;
233
234 r = dm_io_get(sectors_to_pages(chunk_size));
235 if (r)
236 return r;
237 }
215 } else { 238 } else {
216 DMWARN("Invalid/corrupt snapshot"); 239 DMWARN("Invalid/corrupt snapshot");
217 r = -ENXIO; 240 r = -ENXIO;
@@ -224,13 +247,13 @@ static int write_header(struct pstore *ps)
224{ 247{
225 struct disk_header *dh; 248 struct disk_header *dh;
226 249
227 memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT); 250 memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
228 251
229 dh = (struct disk_header *) ps->area; 252 dh = (struct disk_header *) ps->area;
230 dh->magic = cpu_to_le32(SNAP_MAGIC); 253 dh->magic = cpu_to_le32(SNAP_MAGIC);
231 dh->valid = cpu_to_le32(ps->valid); 254 dh->valid = cpu_to_le32(ps->valid);
232 dh->version = cpu_to_le32(ps->version); 255 dh->version = cpu_to_le32(ps->version);
233 dh->chunk_size = cpu_to_le32(ps->chunk_size); 256 dh->chunk_size = cpu_to_le32(ps->snap->chunk_size);
234 257
235 return chunk_io(ps, 0, WRITE); 258 return chunk_io(ps, 0, WRITE);
236} 259}
@@ -365,7 +388,7 @@ static void persistent_destroy(struct exception_store *store)
365{ 388{
366 struct pstore *ps = get_info(store); 389 struct pstore *ps = get_info(store);
367 390
368 dm_io_put(sectors_to_pages(ps->chunk_size)); 391 dm_io_put(sectors_to_pages(ps->snap->chunk_size));
369 vfree(ps->callbacks); 392 vfree(ps->callbacks);
370 free_area(ps); 393 free_area(ps);
371 kfree(ps); 394 kfree(ps);
@@ -384,6 +407,16 @@ static int persistent_read_metadata(struct exception_store *store)
384 return r; 407 return r;
385 408
386 /* 409 /*
410 * Now we know correct chunk_size, complete the initialisation.
411 */
412 ps->exceptions_per_area = (ps->snap->chunk_size << SECTOR_SHIFT) /
413 sizeof(struct disk_exception);
414 ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
415 sizeof(*ps->callbacks));
416 if (!ps->callbacks)
417 return -ENOMEM;
418
419 /*
387 * Do we need to setup a new snapshot ? 420 * Do we need to setup a new snapshot ?
388 */ 421 */
389 if (new_snapshot) { 422 if (new_snapshot) {
@@ -533,9 +566,6 @@ int dm_create_persistent(struct exception_store *store, uint32_t chunk_size)
533 ps->snap = store->snap; 566 ps->snap = store->snap;
534 ps->valid = 1; 567 ps->valid = 1;
535 ps->version = SNAPSHOT_DISK_VERSION; 568 ps->version = SNAPSHOT_DISK_VERSION;
536 ps->chunk_size = chunk_size;
537 ps->exceptions_per_area = (chunk_size << SECTOR_SHIFT) /
538 sizeof(struct disk_exception);
539 ps->next_free = 2; /* skipping the header and first area */ 569 ps->next_free = 2; /* skipping the header and first area */
540 ps->current_committed = 0; 570 ps->current_committed = 0;
541 571
@@ -543,18 +573,9 @@ int dm_create_persistent(struct exception_store *store, uint32_t chunk_size)
543 if (r) 573 if (r)
544 goto bad; 574 goto bad;
545 575
546 /*
547 * Allocate space for all the callbacks.
548 */
549 ps->callback_count = 0; 576 ps->callback_count = 0;
550 atomic_set(&ps->pending_count, 0); 577 atomic_set(&ps->pending_count, 0);
551 ps->callbacks = dm_vcalloc(ps->exceptions_per_area, 578 ps->callbacks = NULL;
552 sizeof(*ps->callbacks));
553
554 if (!ps->callbacks) {
555 r = -ENOMEM;
556 goto bad;
557 }
558 579
559 store->destroy = persistent_destroy; 580 store->destroy = persistent_destroy;
560 store->read_metadata = persistent_read_metadata; 581 store->read_metadata = persistent_read_metadata;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 8edd643541..3edb3477f9 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 - 2005 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
@@ -19,6 +19,7 @@
19 19
20#include <asm/uaccess.h> 20#include <asm/uaccess.h>
21 21
22#define DM_MSG_PREFIX "ioctl"
22#define DM_DRIVER_EMAIL "dm-devel@redhat.com" 23#define DM_DRIVER_EMAIL "dm-devel@redhat.com"
23 24
24/*----------------------------------------------------------------- 25/*-----------------------------------------------------------------
@@ -48,7 +49,7 @@ struct vers_iter {
48static struct list_head _name_buckets[NUM_BUCKETS]; 49static struct list_head _name_buckets[NUM_BUCKETS];
49static struct list_head _uuid_buckets[NUM_BUCKETS]; 50static struct list_head _uuid_buckets[NUM_BUCKETS];
50 51
51static void dm_hash_remove_all(void); 52static void dm_hash_remove_all(int keep_open_devices);
52 53
53/* 54/*
54 * Guards access to both hash tables. 55 * Guards access to both hash tables.
@@ -73,7 +74,7 @@ static int dm_hash_init(void)
73 74
74static void dm_hash_exit(void) 75static void dm_hash_exit(void)
75{ 76{
76 dm_hash_remove_all(); 77 dm_hash_remove_all(0);
77 devfs_remove(DM_DIR); 78 devfs_remove(DM_DIR);
78} 79}
79 80
@@ -102,8 +103,10 @@ static struct hash_cell *__get_name_cell(const char *str)
102 unsigned int h = hash_str(str); 103 unsigned int h = hash_str(str);
103 104
104 list_for_each_entry (hc, _name_buckets + h, name_list) 105 list_for_each_entry (hc, _name_buckets + h, name_list)
105 if (!strcmp(hc->name, str)) 106 if (!strcmp(hc->name, str)) {
107 dm_get(hc->md);
106 return hc; 108 return hc;
109 }
107 110
108 return NULL; 111 return NULL;
109} 112}
@@ -114,8 +117,10 @@ static struct hash_cell *__get_uuid_cell(const char *str)
114 unsigned int h = hash_str(str); 117 unsigned int h = hash_str(str);
115 118
116 list_for_each_entry (hc, _uuid_buckets + h, uuid_list) 119 list_for_each_entry (hc, _uuid_buckets + h, uuid_list)
117 if (!strcmp(hc->uuid, str)) 120 if (!strcmp(hc->uuid, str)) {
121 dm_get(hc->md);
118 return hc; 122 return hc;
123 }
119 124
120 return NULL; 125 return NULL;
121} 126}
@@ -191,7 +196,7 @@ static int unregister_with_devfs(struct hash_cell *hc)
191 */ 196 */
192static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md) 197static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md)
193{ 198{
194 struct hash_cell *cell; 199 struct hash_cell *cell, *hc;
195 200
196 /* 201 /*
197 * Allocate the new cells. 202 * Allocate the new cells.
@@ -204,14 +209,19 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi
204 * Insert the cell into both hash tables. 209 * Insert the cell into both hash tables.
205 */ 210 */
206 down_write(&_hash_lock); 211 down_write(&_hash_lock);
207 if (__get_name_cell(name)) 212 hc = __get_name_cell(name);
213 if (hc) {
214 dm_put(hc->md);
208 goto bad; 215 goto bad;
216 }
209 217
210 list_add(&cell->name_list, _name_buckets + hash_str(name)); 218 list_add(&cell->name_list, _name_buckets + hash_str(name));
211 219
212 if (uuid) { 220 if (uuid) {
213 if (__get_uuid_cell(uuid)) { 221 hc = __get_uuid_cell(uuid);
222 if (hc) {
214 list_del(&cell->name_list); 223 list_del(&cell->name_list);
224 dm_put(hc->md);
215 goto bad; 225 goto bad;
216 } 226 }
217 list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); 227 list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
@@ -251,19 +261,41 @@ static void __hash_remove(struct hash_cell *hc)
251 free_cell(hc); 261 free_cell(hc);
252} 262}
253 263
254static void dm_hash_remove_all(void) 264static void dm_hash_remove_all(int keep_open_devices)
255{ 265{
256 int i; 266 int i, dev_skipped, dev_removed;
257 struct hash_cell *hc; 267 struct hash_cell *hc;
258 struct list_head *tmp, *n; 268 struct list_head *tmp, *n;
259 269
260 down_write(&_hash_lock); 270 down_write(&_hash_lock);
271
272retry:
273 dev_skipped = dev_removed = 0;
261 for (i = 0; i < NUM_BUCKETS; i++) { 274 for (i = 0; i < NUM_BUCKETS; i++) {
262 list_for_each_safe (tmp, n, _name_buckets + i) { 275 list_for_each_safe (tmp, n, _name_buckets + i) {
263 hc = list_entry(tmp, struct hash_cell, name_list); 276 hc = list_entry(tmp, struct hash_cell, name_list);
277
278 if (keep_open_devices &&
279 dm_lock_for_deletion(hc->md)) {
280 dev_skipped++;
281 continue;
282 }
264 __hash_remove(hc); 283 __hash_remove(hc);
284 dev_removed = 1;
265 } 285 }
266 } 286 }
287
288 /*
289 * Some mapped devices may be using other mapped devices, so if any
290 * still exist, repeat until we make no further progress.
291 */
292 if (dev_skipped) {
293 if (dev_removed)
294 goto retry;
295
296 DMWARN("remove_all left %d open device(s)", dev_skipped);
297 }
298
267 up_write(&_hash_lock); 299 up_write(&_hash_lock);
268} 300}
269 301
@@ -289,6 +321,7 @@ static int dm_hash_rename(const char *old, const char *new)
289 if (hc) { 321 if (hc) {
290 DMWARN("asked to rename to an already existing name %s -> %s", 322 DMWARN("asked to rename to an already existing name %s -> %s",
291 old, new); 323 old, new);
324 dm_put(hc->md);
292 up_write(&_hash_lock); 325 up_write(&_hash_lock);
293 kfree(new_name); 326 kfree(new_name);
294 return -EBUSY; 327 return -EBUSY;
@@ -328,6 +361,7 @@ static int dm_hash_rename(const char *old, const char *new)
328 dm_table_put(table); 361 dm_table_put(table);
329 } 362 }
330 363
364 dm_put(hc->md);
331 up_write(&_hash_lock); 365 up_write(&_hash_lock);
332 kfree(old_name); 366 kfree(old_name);
333 return 0; 367 return 0;
@@ -344,7 +378,7 @@ typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size);
344 378
345static int remove_all(struct dm_ioctl *param, size_t param_size) 379static int remove_all(struct dm_ioctl *param, size_t param_size)
346{ 380{
347 dm_hash_remove_all(); 381 dm_hash_remove_all(1);
348 param->data_size = 0; 382 param->data_size = 0;
349 return 0; 383 return 0;
350} 384}
@@ -524,7 +558,6 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
524{ 558{
525 struct gendisk *disk = dm_disk(md); 559 struct gendisk *disk = dm_disk(md);
526 struct dm_table *table; 560 struct dm_table *table;
527 struct block_device *bdev;
528 561
529 param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | 562 param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG |
530 DM_ACTIVE_PRESENT_FLAG); 563 DM_ACTIVE_PRESENT_FLAG);
@@ -534,20 +567,12 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
534 567
535 param->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor)); 568 param->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor));
536 569
537 if (!(param->flags & DM_SKIP_BDGET_FLAG)) { 570 /*
538 bdev = bdget_disk(disk, 0); 571 * Yes, this will be out of date by the time it gets back
539 if (!bdev) 572 * to userland, but it is still very useful for
540 return -ENXIO; 573 * debugging.
541 574 */
542 /* 575 param->open_count = dm_open_count(md);
543 * Yes, this will be out of date by the time it gets back
544 * to userland, but it is still very useful for
545 * debugging.
546 */
547 param->open_count = bdev->bd_openers;
548 bdput(bdev);
549 } else
550 param->open_count = -1;
551 576
552 if (disk->policy) 577 if (disk->policy)
553 param->flags |= DM_READONLY_FLAG; 578 param->flags |= DM_READONLY_FLAG;
@@ -567,7 +592,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
567 592
568static int dev_create(struct dm_ioctl *param, size_t param_size) 593static int dev_create(struct dm_ioctl *param, size_t param_size)
569{ 594{
570 int r; 595 int r, m = DM_ANY_MINOR;
571 struct mapped_device *md; 596 struct mapped_device *md;
572 597
573 r = check_name(param->name); 598 r = check_name(param->name);
@@ -575,10 +600,9 @@ static int dev_create(struct dm_ioctl *param, size_t param_size)
575 return r; 600 return r;
576 601
577 if (param->flags & DM_PERSISTENT_DEV_FLAG) 602 if (param->flags & DM_PERSISTENT_DEV_FLAG)
578 r = dm_create_with_minor(MINOR(huge_decode_dev(param->dev)), &md); 603 m = MINOR(huge_decode_dev(param->dev));
579 else
580 r = dm_create(&md);
581 604
605 r = dm_create(m, &md);
582 if (r) 606 if (r)
583 return r; 607 return r;
584 608
@@ -611,10 +635,8 @@ static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
611 return __get_name_cell(param->name); 635 return __get_name_cell(param->name);
612 636
613 md = dm_get_md(huge_decode_dev(param->dev)); 637 md = dm_get_md(huge_decode_dev(param->dev));
614 if (md) { 638 if (md)
615 mdptr = dm_get_mdptr(md); 639 mdptr = dm_get_mdptr(md);
616 dm_put(md);
617 }
618 640
619 return mdptr; 641 return mdptr;
620} 642}
@@ -628,7 +650,6 @@ static struct mapped_device *find_device(struct dm_ioctl *param)
628 hc = __find_device_hash_cell(param); 650 hc = __find_device_hash_cell(param);
629 if (hc) { 651 if (hc) {
630 md = hc->md; 652 md = hc->md;
631 dm_get(md);
632 653
633 /* 654 /*
634 * Sneakily write in both the name and the uuid 655 * Sneakily write in both the name and the uuid
@@ -653,6 +674,8 @@ static struct mapped_device *find_device(struct dm_ioctl *param)
653static int dev_remove(struct dm_ioctl *param, size_t param_size) 674static int dev_remove(struct dm_ioctl *param, size_t param_size)
654{ 675{
655 struct hash_cell *hc; 676 struct hash_cell *hc;
677 struct mapped_device *md;
678 int r;
656 679
657 down_write(&_hash_lock); 680 down_write(&_hash_lock);
658 hc = __find_device_hash_cell(param); 681 hc = __find_device_hash_cell(param);
@@ -663,8 +686,22 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
663 return -ENXIO; 686 return -ENXIO;
664 } 687 }
665 688
689 md = hc->md;
690
691 /*
692 * Ensure the device is not open and nothing further can open it.
693 */
694 r = dm_lock_for_deletion(md);
695 if (r) {
696 DMWARN("unable to remove open device %s", hc->name);
697 up_write(&_hash_lock);
698 dm_put(md);
699 return r;
700 }
701
666 __hash_remove(hc); 702 __hash_remove(hc);
667 up_write(&_hash_lock); 703 up_write(&_hash_lock);
704 dm_put(md);
668 param->data_size = 0; 705 param->data_size = 0;
669 return 0; 706 return 0;
670} 707}
@@ -790,7 +827,6 @@ static int do_resume(struct dm_ioctl *param)
790 } 827 }
791 828
792 md = hc->md; 829 md = hc->md;
793 dm_get(md);
794 830
795 new_map = hc->new_map; 831 new_map = hc->new_map;
796 hc->new_map = NULL; 832 hc->new_map = NULL;
@@ -1078,6 +1114,7 @@ static int table_clear(struct dm_ioctl *param, size_t param_size)
1078{ 1114{
1079 int r; 1115 int r;
1080 struct hash_cell *hc; 1116 struct hash_cell *hc;
1117 struct mapped_device *md;
1081 1118
1082 down_write(&_hash_lock); 1119 down_write(&_hash_lock);
1083 1120
@@ -1096,7 +1133,9 @@ static int table_clear(struct dm_ioctl *param, size_t param_size)
1096 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; 1133 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
1097 1134
1098 r = __dev_status(hc->md, param); 1135 r = __dev_status(hc->md, param);
1136 md = hc->md;
1099 up_write(&_hash_lock); 1137 up_write(&_hash_lock);
1138 dm_put(md);
1100 return r; 1139 return r;
1101} 1140}
1102 1141
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index daf586c089..47b3c62bbd 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -12,6 +12,8 @@
12#include <linux/bio.h> 12#include <linux/bio.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14 14
15#define DM_MSG_PREFIX "linear"
16
15/* 17/*
16 * Linear: maps a linear range of a device. 18 * Linear: maps a linear range of a device.
17 */ 19 */
@@ -29,7 +31,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
29 unsigned long long tmp; 31 unsigned long long tmp;
30 32
31 if (argc != 2) { 33 if (argc != 2) {
32 ti->error = "dm-linear: Invalid argument count"; 34 ti->error = "Invalid argument count";
33 return -EINVAL; 35 return -EINVAL;
34 } 36 }
35 37
@@ -111,7 +113,7 @@ int __init dm_linear_init(void)
111 int r = dm_register_target(&linear_target); 113 int r = dm_register_target(&linear_target);
112 114
113 if (r < 0) 115 if (r < 0)
114 DMERR("linear: register failed %d", r); 116 DMERR("register failed %d", r);
115 117
116 return r; 118 return r;
117} 119}
@@ -121,5 +123,5 @@ void dm_linear_exit(void)
121 int r = dm_unregister_target(&linear_target); 123 int r = dm_unregister_target(&linear_target);
122 124
123 if (r < 0) 125 if (r < 0)
124 DMERR("linear: unregister failed %d", r); 126 DMERR("unregister failed %d", r);
125} 127}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index d73779a424..64b764bd02 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -12,6 +12,8 @@
12#include "dm-log.h" 12#include "dm-log.h"
13#include "dm-io.h" 13#include "dm-io.h"
14 14
15#define DM_MSG_PREFIX "mirror log"
16
15static LIST_HEAD(_log_types); 17static LIST_HEAD(_log_types);
16static DEFINE_SPINLOCK(_lock); 18static DEFINE_SPINLOCK(_lock);
17 19
@@ -155,8 +157,6 @@ struct log_c {
155 157
156 struct io_region header_location; 158 struct io_region header_location;
157 struct log_header *disk_header; 159 struct log_header *disk_header;
158
159 struct io_region bits_location;
160}; 160};
161 161
162/* 162/*
@@ -241,43 +241,21 @@ static inline int write_header(struct log_c *log)
241} 241}
242 242
243/*---------------------------------------------------------------- 243/*----------------------------------------------------------------
244 * Bits IO
245 *--------------------------------------------------------------*/
246static int read_bits(struct log_c *log)
247{
248 int r;
249 unsigned long ebits;
250
251 r = dm_io_sync_vm(1, &log->bits_location, READ,
252 log->clean_bits, &ebits);
253 if (r)
254 return r;
255
256 return 0;
257}
258
259static int write_bits(struct log_c *log)
260{
261 unsigned long ebits;
262 return dm_io_sync_vm(1, &log->bits_location, WRITE,
263 log->clean_bits, &ebits);
264}
265
266/*----------------------------------------------------------------
267 * core log constructor/destructor 244 * core log constructor/destructor
268 * 245 *
269 * argv contains region_size followed optionally by [no]sync 246 * argv contains region_size followed optionally by [no]sync
270 *--------------------------------------------------------------*/ 247 *--------------------------------------------------------------*/
271#define BYTE_SHIFT 3 248#define BYTE_SHIFT 3
272static int core_ctr(struct dirty_log *log, struct dm_target *ti, 249static int create_log_context(struct dirty_log *log, struct dm_target *ti,
273 unsigned int argc, char **argv) 250 unsigned int argc, char **argv,
251 struct dm_dev *dev)
274{ 252{
275 enum sync sync = DEFAULTSYNC; 253 enum sync sync = DEFAULTSYNC;
276 254
277 struct log_c *lc; 255 struct log_c *lc;
278 uint32_t region_size; 256 uint32_t region_size;
279 unsigned int region_count; 257 unsigned int region_count;
280 size_t bitset_size; 258 size_t bitset_size, buf_size;
281 259
282 if (argc < 1 || argc > 2) { 260 if (argc < 1 || argc > 2) {
283 DMWARN("wrong number of arguments to mirror log"); 261 DMWARN("wrong number of arguments to mirror log");
@@ -319,22 +297,53 @@ static int core_ctr(struct dirty_log *log, struct dm_target *ti,
319 * Work out how many "unsigned long"s we need to hold the bitset. 297 * Work out how many "unsigned long"s we need to hold the bitset.
320 */ 298 */
321 bitset_size = dm_round_up(region_count, 299 bitset_size = dm_round_up(region_count,
322 sizeof(unsigned long) << BYTE_SHIFT); 300 sizeof(*lc->clean_bits) << BYTE_SHIFT);
323 bitset_size >>= BYTE_SHIFT; 301 bitset_size >>= BYTE_SHIFT;
324 302
325 lc->bitset_uint32_count = bitset_size / 4; 303 lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
326 lc->clean_bits = vmalloc(bitset_size); 304
327 if (!lc->clean_bits) { 305 /*
328 DMWARN("couldn't allocate clean bitset"); 306 * Disk log?
329 kfree(lc); 307 */
330 return -ENOMEM; 308 if (!dev) {
309 lc->clean_bits = vmalloc(bitset_size);
310 if (!lc->clean_bits) {
311 DMWARN("couldn't allocate clean bitset");
312 kfree(lc);
313 return -ENOMEM;
314 }
315 lc->disk_header = NULL;
316 } else {
317 lc->log_dev = dev;
318 lc->header_location.bdev = lc->log_dev->bdev;
319 lc->header_location.sector = 0;
320
321 /*
322 * Buffer holds both header and bitset.
323 */
324 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
325 bitset_size, ti->limits.hardsect_size);
326 lc->header_location.count = buf_size >> SECTOR_SHIFT;
327
328 lc->disk_header = vmalloc(buf_size);
329 if (!lc->disk_header) {
330 DMWARN("couldn't allocate disk log buffer");
331 kfree(lc);
332 return -ENOMEM;
333 }
334
335 lc->clean_bits = (void *)lc->disk_header +
336 (LOG_OFFSET << SECTOR_SHIFT);
331 } 337 }
338
332 memset(lc->clean_bits, -1, bitset_size); 339 memset(lc->clean_bits, -1, bitset_size);
333 340
334 lc->sync_bits = vmalloc(bitset_size); 341 lc->sync_bits = vmalloc(bitset_size);
335 if (!lc->sync_bits) { 342 if (!lc->sync_bits) {
336 DMWARN("couldn't allocate sync bitset"); 343 DMWARN("couldn't allocate sync bitset");
337 vfree(lc->clean_bits); 344 if (!dev)
345 vfree(lc->clean_bits);
346 vfree(lc->disk_header);
338 kfree(lc); 347 kfree(lc);
339 return -ENOMEM; 348 return -ENOMEM;
340 } 349 }
@@ -345,25 +354,40 @@ static int core_ctr(struct dirty_log *log, struct dm_target *ti,
345 if (!lc->recovering_bits) { 354 if (!lc->recovering_bits) {
346 DMWARN("couldn't allocate sync bitset"); 355 DMWARN("couldn't allocate sync bitset");
347 vfree(lc->sync_bits); 356 vfree(lc->sync_bits);
348 vfree(lc->clean_bits); 357 if (!dev)
358 vfree(lc->clean_bits);
359 vfree(lc->disk_header);
349 kfree(lc); 360 kfree(lc);
350 return -ENOMEM; 361 return -ENOMEM;
351 } 362 }
352 memset(lc->recovering_bits, 0, bitset_size); 363 memset(lc->recovering_bits, 0, bitset_size);
353 lc->sync_search = 0; 364 lc->sync_search = 0;
354 log->context = lc; 365 log->context = lc;
366
355 return 0; 367 return 0;
356} 368}
357 369
358static void core_dtr(struct dirty_log *log) 370static int core_ctr(struct dirty_log *log, struct dm_target *ti,
371 unsigned int argc, char **argv)
372{
373 return create_log_context(log, ti, argc, argv, NULL);
374}
375
376static void destroy_log_context(struct log_c *lc)
359{ 377{
360 struct log_c *lc = (struct log_c *) log->context;
361 vfree(lc->clean_bits);
362 vfree(lc->sync_bits); 378 vfree(lc->sync_bits);
363 vfree(lc->recovering_bits); 379 vfree(lc->recovering_bits);
364 kfree(lc); 380 kfree(lc);
365} 381}
366 382
383static void core_dtr(struct dirty_log *log)
384{
385 struct log_c *lc = (struct log_c *) log->context;
386
387 vfree(lc->clean_bits);
388 destroy_log_context(lc);
389}
390
367/*---------------------------------------------------------------- 391/*----------------------------------------------------------------
368 * disk log constructor/destructor 392 * disk log constructor/destructor
369 * 393 *
@@ -373,8 +397,6 @@ static int disk_ctr(struct dirty_log *log, struct dm_target *ti,
373 unsigned int argc, char **argv) 397 unsigned int argc, char **argv)
374{ 398{
375 int r; 399 int r;
376 size_t size;
377 struct log_c *lc;
378 struct dm_dev *dev; 400 struct dm_dev *dev;
379 401
380 if (argc < 2 || argc > 3) { 402 if (argc < 2 || argc > 3) {
@@ -387,49 +409,22 @@ static int disk_ctr(struct dirty_log *log, struct dm_target *ti,
387 if (r) 409 if (r)
388 return r; 410 return r;
389 411
390 r = core_ctr(log, ti, argc - 1, argv + 1); 412 r = create_log_context(log, ti, argc - 1, argv + 1, dev);
391 if (r) { 413 if (r) {
392 dm_put_device(ti, dev); 414 dm_put_device(ti, dev);
393 return r; 415 return r;
394 } 416 }
395 417
396 lc = (struct log_c *) log->context;
397 lc->log_dev = dev;
398
399 /* setup the disk header fields */
400 lc->header_location.bdev = lc->log_dev->bdev;
401 lc->header_location.sector = 0;
402 lc->header_location.count = 1;
403
404 /*
405 * We can't read less than this amount, even though we'll
406 * not be using most of this space.
407 */
408 lc->disk_header = vmalloc(1 << SECTOR_SHIFT);
409 if (!lc->disk_header)
410 goto bad;
411
412 /* setup the disk bitset fields */
413 lc->bits_location.bdev = lc->log_dev->bdev;
414 lc->bits_location.sector = LOG_OFFSET;
415
416 size = dm_round_up(lc->bitset_uint32_count * sizeof(uint32_t),
417 1 << SECTOR_SHIFT);
418 lc->bits_location.count = size >> SECTOR_SHIFT;
419 return 0; 418 return 0;
420
421 bad:
422 dm_put_device(ti, lc->log_dev);
423 core_dtr(log);
424 return -ENOMEM;
425} 419}
426 420
427static void disk_dtr(struct dirty_log *log) 421static void disk_dtr(struct dirty_log *log)
428{ 422{
429 struct log_c *lc = (struct log_c *) log->context; 423 struct log_c *lc = (struct log_c *) log->context;
424
430 dm_put_device(lc->ti, lc->log_dev); 425 dm_put_device(lc->ti, lc->log_dev);
431 vfree(lc->disk_header); 426 vfree(lc->disk_header);
432 core_dtr(log); 427 destroy_log_context(lc);
433} 428}
434 429
435static int count_bits32(uint32_t *addr, unsigned size) 430static int count_bits32(uint32_t *addr, unsigned size)
@@ -454,12 +449,7 @@ static int disk_resume(struct dirty_log *log)
454 if (r) 449 if (r)
455 return r; 450 return r;
456 451
457 /* read the bits */ 452 /* set or clear any new bits -- device has grown */
458 r = read_bits(lc);
459 if (r)
460 return r;
461
462 /* set or clear any new bits */
463 if (lc->sync == NOSYNC) 453 if (lc->sync == NOSYNC)
464 for (i = lc->header.nr_regions; i < lc->region_count; i++) 454 for (i = lc->header.nr_regions; i < lc->region_count; i++)
465 /* FIXME: amazingly inefficient */ 455 /* FIXME: amazingly inefficient */
@@ -469,15 +459,14 @@ static int disk_resume(struct dirty_log *log)
469 /* FIXME: amazingly inefficient */ 459 /* FIXME: amazingly inefficient */
470 log_clear_bit(lc, lc->clean_bits, i); 460 log_clear_bit(lc, lc->clean_bits, i);
471 461
462 /* clear any old bits -- device has shrunk */
463 for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
464 log_clear_bit(lc, lc->clean_bits, i);
465
472 /* copy clean across to sync */ 466 /* copy clean across to sync */
473 memcpy(lc->sync_bits, lc->clean_bits, size); 467 memcpy(lc->sync_bits, lc->clean_bits, size);
474 lc->sync_count = count_bits32(lc->clean_bits, lc->bitset_uint32_count); 468 lc->sync_count = count_bits32(lc->clean_bits, lc->bitset_uint32_count);
475 469
476 /* write the bits */
477 r = write_bits(lc);
478 if (r)
479 return r;
480
481 /* set the correct number of regions in the header */ 470 /* set the correct number of regions in the header */
482 lc->header.nr_regions = lc->region_count; 471 lc->header.nr_regions = lc->region_count;
483 472
@@ -518,7 +507,7 @@ static int disk_flush(struct dirty_log *log)
518 if (!lc->touched) 507 if (!lc->touched)
519 return 0; 508 return 0;
520 509
521 r = write_bits(lc); 510 r = write_header(lc);
522 if (!r) 511 if (!r)
523 lc->touched = 0; 512 lc->touched = 0;
524 513
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 1816f30678..217615b332 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -21,6 +21,7 @@
21#include <linux/workqueue.h> 21#include <linux/workqueue.h>
22#include <asm/atomic.h> 22#include <asm/atomic.h>
23 23
24#define DM_MSG_PREFIX "multipath"
24#define MESG_STR(x) x, sizeof(x) 25#define MESG_STR(x) x, sizeof(x)
25 26
26/* Path properties */ 27/* Path properties */
@@ -446,8 +447,6 @@ struct param {
446 char *error; 447 char *error;
447}; 448};
448 449
449#define ESTR(s) ("dm-multipath: " s)
450
451static int read_param(struct param *param, char *str, unsigned *v, char **error) 450static int read_param(struct param *param, char *str, unsigned *v, char **error)
452{ 451{
453 if (!str || 452 if (!str ||
@@ -495,12 +494,12 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
495 unsigned ps_argc; 494 unsigned ps_argc;
496 495
497 static struct param _params[] = { 496 static struct param _params[] = {
498 {0, 1024, ESTR("invalid number of path selector args")}, 497 {0, 1024, "invalid number of path selector args"},
499 }; 498 };
500 499
501 pst = dm_get_path_selector(shift(as)); 500 pst = dm_get_path_selector(shift(as));
502 if (!pst) { 501 if (!pst) {
503 ti->error = ESTR("unknown path selector type"); 502 ti->error = "unknown path selector type";
504 return -EINVAL; 503 return -EINVAL;
505 } 504 }
506 505
@@ -511,7 +510,7 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
511 r = pst->create(&pg->ps, ps_argc, as->argv); 510 r = pst->create(&pg->ps, ps_argc, as->argv);
512 if (r) { 511 if (r) {
513 dm_put_path_selector(pst); 512 dm_put_path_selector(pst);
514 ti->error = ESTR("path selector constructor failed"); 513 ti->error = "path selector constructor failed";
515 return r; 514 return r;
516 } 515 }
517 516
@@ -529,7 +528,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
529 528
530 /* we need at least a path arg */ 529 /* we need at least a path arg */
531 if (as->argc < 1) { 530 if (as->argc < 1) {
532 ti->error = ESTR("no device given"); 531 ti->error = "no device given";
533 return NULL; 532 return NULL;
534 } 533 }
535 534
@@ -540,7 +539,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
540 r = dm_get_device(ti, shift(as), ti->begin, ti->len, 539 r = dm_get_device(ti, shift(as), ti->begin, ti->len,
541 dm_table_get_mode(ti->table), &p->path.dev); 540 dm_table_get_mode(ti->table), &p->path.dev);
542 if (r) { 541 if (r) {
543 ti->error = ESTR("error getting device"); 542 ti->error = "error getting device";
544 goto bad; 543 goto bad;
545 } 544 }
546 545
@@ -562,8 +561,8 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
562 struct dm_target *ti) 561 struct dm_target *ti)
563{ 562{
564 static struct param _params[] = { 563 static struct param _params[] = {
565 {1, 1024, ESTR("invalid number of paths")}, 564 {1, 1024, "invalid number of paths"},
566 {0, 1024, ESTR("invalid number of selector args")} 565 {0, 1024, "invalid number of selector args"}
567 }; 566 };
568 567
569 int r; 568 int r;
@@ -572,13 +571,13 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
572 571
573 if (as->argc < 2) { 572 if (as->argc < 2) {
574 as->argc = 0; 573 as->argc = 0;
575 ti->error = ESTR("not enough priority group aruments"); 574 ti->error = "not enough priority group aruments";
576 return NULL; 575 return NULL;
577 } 576 }
578 577
579 pg = alloc_priority_group(); 578 pg = alloc_priority_group();
580 if (!pg) { 579 if (!pg) {
581 ti->error = ESTR("couldn't allocate priority group"); 580 ti->error = "couldn't allocate priority group";
582 return NULL; 581 return NULL;
583 } 582 }
584 pg->m = m; 583 pg->m = m;
@@ -633,7 +632,7 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m,
633 unsigned hw_argc; 632 unsigned hw_argc;
634 633
635 static struct param _params[] = { 634 static struct param _params[] = {
636 {0, 1024, ESTR("invalid number of hardware handler args")}, 635 {0, 1024, "invalid number of hardware handler args"},
637 }; 636 };
638 637
639 r = read_param(_params, shift(as), &hw_argc, &ti->error); 638 r = read_param(_params, shift(as), &hw_argc, &ti->error);
@@ -645,14 +644,14 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m,
645 644
646 hwht = dm_get_hw_handler(shift(as)); 645 hwht = dm_get_hw_handler(shift(as));
647 if (!hwht) { 646 if (!hwht) {
648 ti->error = ESTR("unknown hardware handler type"); 647 ti->error = "unknown hardware handler type";
649 return -EINVAL; 648 return -EINVAL;
650 } 649 }
651 650
652 r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv); 651 r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv);
653 if (r) { 652 if (r) {
654 dm_put_hw_handler(hwht); 653 dm_put_hw_handler(hwht);
655 ti->error = ESTR("hardware handler constructor failed"); 654 ti->error = "hardware handler constructor failed";
656 return r; 655 return r;
657 } 656 }
658 657
@@ -669,7 +668,7 @@ static int parse_features(struct arg_set *as, struct multipath *m,
669 unsigned argc; 668 unsigned argc;
670 669
671 static struct param _params[] = { 670 static struct param _params[] = {
672 {0, 1, ESTR("invalid number of feature args")}, 671 {0, 1, "invalid number of feature args"},
673 }; 672 };
674 673
675 r = read_param(_params, shift(as), &argc, &ti->error); 674 r = read_param(_params, shift(as), &argc, &ti->error);
@@ -692,8 +691,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
692{ 691{
693 /* target parameters */ 692 /* target parameters */
694 static struct param _params[] = { 693 static struct param _params[] = {
695 {1, 1024, ESTR("invalid number of priority groups")}, 694 {1, 1024, "invalid number of priority groups"},
696 {1, 1024, ESTR("invalid initial priority group number")}, 695 {1, 1024, "invalid initial priority group number"},
697 }; 696 };
698 697
699 int r; 698 int r;
@@ -707,7 +706,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
707 706
708 m = alloc_multipath(); 707 m = alloc_multipath();
709 if (!m) { 708 if (!m) {
710 ti->error = ESTR("can't allocate multipath"); 709 ti->error = "can't allocate multipath";
711 return -EINVAL; 710 return -EINVAL;
712 } 711 }
713 712
@@ -746,7 +745,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
746 } 745 }
747 746
748 if (pg_count != m->nr_priority_groups) { 747 if (pg_count != m->nr_priority_groups) {
749 ti->error = ESTR("priority group count mismatch"); 748 ti->error = "priority group count mismatch";
750 r = -EINVAL; 749 r = -EINVAL;
751 goto bad; 750 goto bad;
752 } 751 }
@@ -807,7 +806,7 @@ static int fail_path(struct pgpath *pgpath)
807 if (!pgpath->path.is_active) 806 if (!pgpath->path.is_active)
808 goto out; 807 goto out;
809 808
810 DMWARN("dm-multipath: Failing path %s.", pgpath->path.dev->name); 809 DMWARN("Failing path %s.", pgpath->path.dev->name);
811 810
812 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); 811 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
813 pgpath->path.is_active = 0; 812 pgpath->path.is_active = 0;
@@ -1250,7 +1249,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1250 r = dm_get_device(ti, argv[1], ti->begin, ti->len, 1249 r = dm_get_device(ti, argv[1], ti->begin, ti->len,
1251 dm_table_get_mode(ti->table), &dev); 1250 dm_table_get_mode(ti->table), &dev);
1252 if (r) { 1251 if (r) {
1253 DMWARN("dm-multipath message: error getting device %s", 1252 DMWARN("message: error getting device %s",
1254 argv[1]); 1253 argv[1]);
1255 return -EINVAL; 1254 return -EINVAL;
1256 } 1255 }
@@ -1309,7 +1308,7 @@ static int __init dm_multipath_init(void)
1309 return -ENOMEM; 1308 return -ENOMEM;
1310 } 1309 }
1311 1310
1312 DMINFO("dm-multipath version %u.%u.%u loaded", 1311 DMINFO("version %u.%u.%u loaded",
1313 multipath_target.version[0], multipath_target.version[1], 1312 multipath_target.version[0], multipath_target.version[1],
1314 multipath_target.version[2]); 1313 multipath_target.version[2]);
1315 1314
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index d12cf3e5e0..be48cedf98 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -20,6 +20,8 @@
20#include <linux/vmalloc.h> 20#include <linux/vmalloc.h>
21#include <linux/workqueue.h> 21#include <linux/workqueue.h>
22 22
23#define DM_MSG_PREFIX "raid1"
24
23static struct workqueue_struct *_kmirrord_wq; 25static struct workqueue_struct *_kmirrord_wq;
24static struct work_struct _kmirrord_work; 26static struct work_struct _kmirrord_work;
25 27
@@ -106,12 +108,42 @@ struct region {
106 struct bio_list delayed_bios; 108 struct bio_list delayed_bios;
107}; 109};
108 110
111
112/*-----------------------------------------------------------------
113 * Mirror set structures.
114 *---------------------------------------------------------------*/
115struct mirror {
116 atomic_t error_count;
117 struct dm_dev *dev;
118 sector_t offset;
119};
120
121struct mirror_set {
122 struct dm_target *ti;
123 struct list_head list;
124 struct region_hash rh;
125 struct kcopyd_client *kcopyd_client;
126
127 spinlock_t lock; /* protects the next two lists */
128 struct bio_list reads;
129 struct bio_list writes;
130
131 /* recovery */
132 region_t nr_regions;
133 int in_sync;
134
135 struct mirror *default_mirror; /* Default mirror */
136
137 unsigned int nr_mirrors;
138 struct mirror mirror[0];
139};
140
109/* 141/*
110 * Conversion fns 142 * Conversion fns
111 */ 143 */
112static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio) 144static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
113{ 145{
114 return bio->bi_sector >> rh->region_shift; 146 return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
115} 147}
116 148
117static inline sector_t region_to_sector(struct region_hash *rh, region_t region) 149static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
@@ -458,11 +490,9 @@ static int __rh_recovery_prepare(struct region_hash *rh)
458 /* Already quiesced ? */ 490 /* Already quiesced ? */
459 if (atomic_read(&reg->pending)) 491 if (atomic_read(&reg->pending))
460 list_del_init(&reg->list); 492 list_del_init(&reg->list);
493 else
494 list_move(&reg->list, &rh->quiesced_regions);
461 495
462 else {
463 list_del_init(&reg->list);
464 list_add(&reg->list, &rh->quiesced_regions);
465 }
466 spin_unlock_irq(&rh->region_lock); 496 spin_unlock_irq(&rh->region_lock);
467 497
468 return 1; 498 return 1;
@@ -541,35 +571,6 @@ static void rh_start_recovery(struct region_hash *rh)
541 wake(); 571 wake();
542} 572}
543 573
544/*-----------------------------------------------------------------
545 * Mirror set structures.
546 *---------------------------------------------------------------*/
547struct mirror {
548 atomic_t error_count;
549 struct dm_dev *dev;
550 sector_t offset;
551};
552
553struct mirror_set {
554 struct dm_target *ti;
555 struct list_head list;
556 struct region_hash rh;
557 struct kcopyd_client *kcopyd_client;
558
559 spinlock_t lock; /* protects the next two lists */
560 struct bio_list reads;
561 struct bio_list writes;
562
563 /* recovery */
564 region_t nr_regions;
565 int in_sync;
566
567 struct mirror *default_mirror; /* Default mirror */
568
569 unsigned int nr_mirrors;
570 struct mirror mirror[0];
571};
572
573/* 574/*
574 * Every mirror should look like this one. 575 * Every mirror should look like this one.
575 */ 576 */
@@ -603,7 +604,7 @@ static void recovery_complete(int read_err, unsigned int write_err,
603 struct region *reg = (struct region *) context; 604 struct region *reg = (struct region *) context;
604 605
605 /* FIXME: better error handling */ 606 /* FIXME: better error handling */
606 rh_recovery_end(reg, read_err || write_err); 607 rh_recovery_end(reg, !(read_err || write_err));
607} 608}
608 609
609static int recover(struct mirror_set *ms, struct region *reg) 610static int recover(struct mirror_set *ms, struct region *reg)
@@ -893,7 +894,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
893 894
894 ms = kmalloc(len, GFP_KERNEL); 895 ms = kmalloc(len, GFP_KERNEL);
895 if (!ms) { 896 if (!ms) {
896 ti->error = "dm-mirror: Cannot allocate mirror context"; 897 ti->error = "Cannot allocate mirror context";
897 return NULL; 898 return NULL;
898 } 899 }
899 900
@@ -907,7 +908,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
907 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR]; 908 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
908 909
909 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { 910 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
910 ti->error = "dm-mirror: Error creating dirty region hash"; 911 ti->error = "Error creating dirty region hash";
911 kfree(ms); 912 kfree(ms);
912 return NULL; 913 return NULL;
913 } 914 }
@@ -937,14 +938,14 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
937 unsigned long long offset; 938 unsigned long long offset;
938 939
939 if (sscanf(argv[1], "%llu", &offset) != 1) { 940 if (sscanf(argv[1], "%llu", &offset) != 1) {
940 ti->error = "dm-mirror: Invalid offset"; 941 ti->error = "Invalid offset";
941 return -EINVAL; 942 return -EINVAL;
942 } 943 }
943 944
944 if (dm_get_device(ti, argv[0], offset, ti->len, 945 if (dm_get_device(ti, argv[0], offset, ti->len,
945 dm_table_get_mode(ti->table), 946 dm_table_get_mode(ti->table),
946 &ms->mirror[mirror].dev)) { 947 &ms->mirror[mirror].dev)) {
947 ti->error = "dm-mirror: Device lookup failure"; 948 ti->error = "Device lookup failure";
948 return -ENXIO; 949 return -ENXIO;
949 } 950 }
950 951
@@ -981,30 +982,30 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti,
981 struct dirty_log *dl; 982 struct dirty_log *dl;
982 983
983 if (argc < 2) { 984 if (argc < 2) {
984 ti->error = "dm-mirror: Insufficient mirror log arguments"; 985 ti->error = "Insufficient mirror log arguments";
985 return NULL; 986 return NULL;
986 } 987 }
987 988
988 if (sscanf(argv[1], "%u", &param_count) != 1) { 989 if (sscanf(argv[1], "%u", &param_count) != 1) {
989 ti->error = "dm-mirror: Invalid mirror log argument count"; 990 ti->error = "Invalid mirror log argument count";
990 return NULL; 991 return NULL;
991 } 992 }
992 993
993 *args_used = 2 + param_count; 994 *args_used = 2 + param_count;
994 995
995 if (argc < *args_used) { 996 if (argc < *args_used) {
996 ti->error = "dm-mirror: Insufficient mirror log arguments"; 997 ti->error = "Insufficient mirror log arguments";
997 return NULL; 998 return NULL;
998 } 999 }
999 1000
1000 dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2); 1001 dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
1001 if (!dl) { 1002 if (!dl) {
1002 ti->error = "dm-mirror: Error creating mirror dirty log"; 1003 ti->error = "Error creating mirror dirty log";
1003 return NULL; 1004 return NULL;
1004 } 1005 }
1005 1006
1006 if (!_check_region_size(ti, dl->type->get_region_size(dl))) { 1007 if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
1007 ti->error = "dm-mirror: Invalid region size"; 1008 ti->error = "Invalid region size";
1008 dm_destroy_dirty_log(dl); 1009 dm_destroy_dirty_log(dl);
1009 return NULL; 1010 return NULL;
1010 } 1011 }
@@ -1038,7 +1039,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1038 1039
1039 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || 1040 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1040 nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) { 1041 nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
1041 ti->error = "dm-mirror: Invalid number of mirrors"; 1042 ti->error = "Invalid number of mirrors";
1042 dm_destroy_dirty_log(dl); 1043 dm_destroy_dirty_log(dl);
1043 return -EINVAL; 1044 return -EINVAL;
1044 } 1045 }
@@ -1046,7 +1047,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1046 argv++, argc--; 1047 argv++, argc--;
1047 1048
1048 if (argc != nr_mirrors * 2) { 1049 if (argc != nr_mirrors * 2) {
1049 ti->error = "dm-mirror: Wrong number of mirror arguments"; 1050 ti->error = "Wrong number of mirror arguments";
1050 dm_destroy_dirty_log(dl); 1051 dm_destroy_dirty_log(dl);
1051 return -EINVAL; 1052 return -EINVAL;
1052 } 1053 }
@@ -1115,7 +1116,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio,
1115 struct mirror *m; 1116 struct mirror *m;
1116 struct mirror_set *ms = ti->private; 1117 struct mirror_set *ms = ti->private;
1117 1118
1118 map_context->ll = bio->bi_sector >> ms->rh.region_shift; 1119 map_context->ll = bio_to_region(&ms->rh, bio);
1119 1120
1120 if (rw == WRITE) { 1121 if (rw == WRITE) {
1121 queue_bio(ms, bio, rw); 1122 queue_bio(ms, bio, rw);
@@ -1221,7 +1222,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1221 1222
1222static struct target_type mirror_target = { 1223static struct target_type mirror_target = {
1223 .name = "mirror", 1224 .name = "mirror",
1224 .version = {1, 0, 1}, 1225 .version = {1, 0, 2},
1225 .module = THIS_MODULE, 1226 .module = THIS_MODULE,
1226 .ctr = mirror_ctr, 1227 .ctr = mirror_ctr,
1227 .dtr = mirror_dtr, 1228 .dtr = mirror_dtr,
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index d0024865a7..c5a16c5501 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -14,6 +14,8 @@
14 14
15#include <linux/slab.h> 15#include <linux/slab.h>
16 16
17#define DM_MSG_PREFIX "multipath round-robin"
18
17/*----------------------------------------------------------------- 19/*-----------------------------------------------------------------
18 * Path-handling code, paths are held in lists 20 * Path-handling code, paths are held in lists
19 *---------------------------------------------------------------*/ 21 *---------------------------------------------------------------*/
@@ -191,9 +193,9 @@ static int __init dm_rr_init(void)
191 int r = dm_register_path_selector(&rr_ps); 193 int r = dm_register_path_selector(&rr_ps);
192 194
193 if (r < 0) 195 if (r < 0)
194 DMERR("round-robin: register failed %d", r); 196 DMERR("register failed %d", r);
195 197
196 DMINFO("dm-round-robin version 1.0.0 loaded"); 198 DMINFO("version 1.0.0 loaded");
197 199
198 return r; 200 return r;
199} 201}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 08312b4646..8eea0ddbf5 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -23,6 +23,8 @@
23#include "dm-bio-list.h" 23#include "dm-bio-list.h"
24#include "kcopyd.h" 24#include "kcopyd.h"
25 25
26#define DM_MSG_PREFIX "snapshots"
27
26/* 28/*
27 * The percentage increment we will wake up users at 29 * The percentage increment we will wake up users at
28 */ 30 */
@@ -117,7 +119,7 @@ static int init_origin_hash(void)
117 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 119 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
118 GFP_KERNEL); 120 GFP_KERNEL);
119 if (!_origins) { 121 if (!_origins) {
120 DMERR("Device mapper: Snapshot: unable to allocate memory"); 122 DMERR("unable to allocate memory");
121 return -ENOMEM; 123 return -ENOMEM;
122 } 124 }
123 125
@@ -412,7 +414,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
412 int blocksize; 414 int blocksize;
413 415
414 if (argc < 4) { 416 if (argc < 4) {
415 ti->error = "dm-snapshot: requires exactly 4 arguments"; 417 ti->error = "requires exactly 4 arguments";
416 r = -EINVAL; 418 r = -EINVAL;
417 goto bad1; 419 goto bad1;
418 } 420 }
@@ -530,7 +532,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
530 } 532 }
531 533
532 ti->private = s; 534 ti->private = s;
533 ti->split_io = chunk_size; 535 ti->split_io = s->chunk_size;
534 536
535 return 0; 537 return 0;
536 538
@@ -1127,7 +1129,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1127 struct dm_dev *dev; 1129 struct dm_dev *dev;
1128 1130
1129 if (argc != 1) { 1131 if (argc != 1) {
1130 ti->error = "dm-origin: incorrect number of arguments"; 1132 ti->error = "origin: incorrect number of arguments";
1131 return -EINVAL; 1133 return -EINVAL;
1132 } 1134 }
1133 1135
@@ -1204,7 +1206,7 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
1204 1206
1205static struct target_type origin_target = { 1207static struct target_type origin_target = {
1206 .name = "snapshot-origin", 1208 .name = "snapshot-origin",
1207 .version = {1, 1, 0}, 1209 .version = {1, 4, 0},
1208 .module = THIS_MODULE, 1210 .module = THIS_MODULE,
1209 .ctr = origin_ctr, 1211 .ctr = origin_ctr,
1210 .dtr = origin_dtr, 1212 .dtr = origin_dtr,
@@ -1215,7 +1217,7 @@ static struct target_type origin_target = {
1215 1217
1216static struct target_type snapshot_target = { 1218static struct target_type snapshot_target = {
1217 .name = "snapshot", 1219 .name = "snapshot",
1218 .version = {1, 1, 0}, 1220 .version = {1, 4, 0},
1219 .module = THIS_MODULE, 1221 .module = THIS_MODULE,
1220 .ctr = snapshot_ctr, 1222 .ctr = snapshot_ctr,
1221 .dtr = snapshot_dtr, 1223 .dtr = snapshot_dtr,
@@ -1236,7 +1238,7 @@ static int __init dm_snapshot_init(void)
1236 1238
1237 r = dm_register_target(&origin_target); 1239 r = dm_register_target(&origin_target);
1238 if (r < 0) { 1240 if (r < 0) {
1239 DMERR("Device mapper: Origin: register failed %d\n", r); 1241 DMERR("Origin target register failed %d", r);
1240 goto bad1; 1242 goto bad1;
1241 } 1243 }
1242 1244
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 08328a8f5a..6c29fcecd8 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -12,6 +12,8 @@
12#include <linux/bio.h> 12#include <linux/bio.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14 14
15#define DM_MSG_PREFIX "striped"
16
15struct stripe { 17struct stripe {
16 struct dm_dev *dev; 18 struct dm_dev *dev;
17 sector_t physical_start; 19 sector_t physical_start;
@@ -78,19 +80,19 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
78 unsigned int i; 80 unsigned int i;
79 81
80 if (argc < 2) { 82 if (argc < 2) {
81 ti->error = "dm-stripe: Not enough arguments"; 83 ti->error = "Not enough arguments";
82 return -EINVAL; 84 return -EINVAL;
83 } 85 }
84 86
85 stripes = simple_strtoul(argv[0], &end, 10); 87 stripes = simple_strtoul(argv[0], &end, 10);
86 if (*end) { 88 if (*end) {
87 ti->error = "dm-stripe: Invalid stripe count"; 89 ti->error = "Invalid stripe count";
88 return -EINVAL; 90 return -EINVAL;
89 } 91 }
90 92
91 chunk_size = simple_strtoul(argv[1], &end, 10); 93 chunk_size = simple_strtoul(argv[1], &end, 10);
92 if (*end) { 94 if (*end) {
93 ti->error = "dm-stripe: Invalid chunk_size"; 95 ti->error = "Invalid chunk_size";
94 return -EINVAL; 96 return -EINVAL;
95 } 97 }
96 98
@@ -99,19 +101,19 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
99 */ 101 */
100 if (!chunk_size || (chunk_size & (chunk_size - 1)) || 102 if (!chunk_size || (chunk_size & (chunk_size - 1)) ||
101 (chunk_size < (PAGE_SIZE >> SECTOR_SHIFT))) { 103 (chunk_size < (PAGE_SIZE >> SECTOR_SHIFT))) {
102 ti->error = "dm-stripe: Invalid chunk size"; 104 ti->error = "Invalid chunk size";
103 return -EINVAL; 105 return -EINVAL;
104 } 106 }
105 107
106 if (ti->len & (chunk_size - 1)) { 108 if (ti->len & (chunk_size - 1)) {
107 ti->error = "dm-stripe: Target length not divisible by " 109 ti->error = "Target length not divisible by "
108 "chunk size"; 110 "chunk size";
109 return -EINVAL; 111 return -EINVAL;
110 } 112 }
111 113
112 width = ti->len; 114 width = ti->len;
113 if (sector_div(width, stripes)) { 115 if (sector_div(width, stripes)) {
114 ti->error = "dm-stripe: Target length not divisible by " 116 ti->error = "Target length not divisible by "
115 "number of stripes"; 117 "number of stripes";
116 return -EINVAL; 118 return -EINVAL;
117 } 119 }
@@ -120,14 +122,14 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
120 * Do we have enough arguments for that many stripes ? 122 * Do we have enough arguments for that many stripes ?
121 */ 123 */
122 if (argc != (2 + 2 * stripes)) { 124 if (argc != (2 + 2 * stripes)) {
123 ti->error = "dm-stripe: Not enough destinations " 125 ti->error = "Not enough destinations "
124 "specified"; 126 "specified";
125 return -EINVAL; 127 return -EINVAL;
126 } 128 }
127 129
128 sc = alloc_context(stripes); 130 sc = alloc_context(stripes);
129 if (!sc) { 131 if (!sc) {
130 ti->error = "dm-stripe: Memory allocation for striped context " 132 ti->error = "Memory allocation for striped context "
131 "failed"; 133 "failed";
132 return -ENOMEM; 134 return -ENOMEM;
133 } 135 }
@@ -149,8 +151,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
149 151
150 r = get_stripe(ti, sc, i, argv); 152 r = get_stripe(ti, sc, i, argv);
151 if (r < 0) { 153 if (r < 0) {
152 ti->error = "dm-stripe: Couldn't parse stripe " 154 ti->error = "Couldn't parse stripe destination";
153 "destination";
154 while (i--) 155 while (i--)
155 dm_put_device(ti, sc->stripe[i].dev); 156 dm_put_device(ti, sc->stripe[i].dev);
156 kfree(sc); 157 kfree(sc);
@@ -227,7 +228,7 @@ int __init dm_stripe_init(void)
227 228
228 r = dm_register_target(&stripe_target); 229 r = dm_register_target(&stripe_target);
229 if (r < 0) 230 if (r < 0)
230 DMWARN("striped target registration failed"); 231 DMWARN("target registration failed");
231 232
232 return r; 233 return r;
233} 234}
@@ -235,7 +236,7 @@ int __init dm_stripe_init(void)
235void dm_stripe_exit(void) 236void dm_stripe_exit(void)
236{ 237{
237 if (dm_unregister_target(&stripe_target)) 238 if (dm_unregister_target(&stripe_target))
238 DMWARN("striped target unregistration failed"); 239 DMWARN("target unregistration failed");
239 240
240 return; 241 return;
241} 242}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 8f56a54cf0..75fe9493e6 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -17,6 +17,8 @@
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <asm/atomic.h> 18#include <asm/atomic.h>
19 19
20#define DM_MSG_PREFIX "table"
21
20#define MAX_DEPTH 16 22#define MAX_DEPTH 16
21#define NODE_SIZE L1_CACHE_BYTES 23#define NODE_SIZE L1_CACHE_BYTES
22#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) 24#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
@@ -237,6 +239,44 @@ int dm_table_create(struct dm_table **result, int mode,
237 return 0; 239 return 0;
238} 240}
239 241
242int dm_create_error_table(struct dm_table **result, struct mapped_device *md)
243{
244 struct dm_table *t;
245 sector_t dev_size = 1;
246 int r;
247
248 /*
249 * Find current size of device.
250 * Default to 1 sector if inactive.
251 */
252 t = dm_get_table(md);
253 if (t) {
254 dev_size = dm_table_get_size(t);
255 dm_table_put(t);
256 }
257
258 r = dm_table_create(&t, FMODE_READ, 1, md);
259 if (r)
260 return r;
261
262 r = dm_table_add_target(t, "error", 0, dev_size, NULL);
263 if (r)
264 goto out;
265
266 r = dm_table_complete(t);
267 if (r)
268 goto out;
269
270 *result = t;
271
272out:
273 if (r)
274 dm_table_put(t);
275
276 return r;
277}
278EXPORT_SYMBOL_GPL(dm_create_error_table);
279
240static void free_devices(struct list_head *devices) 280static void free_devices(struct list_head *devices)
241{ 281{
242 struct list_head *tmp, *next; 282 struct list_head *tmp, *next;
@@ -590,6 +630,12 @@ int dm_split_args(int *argc, char ***argvp, char *input)
590 unsigned array_size = 0; 630 unsigned array_size = 0;
591 631
592 *argc = 0; 632 *argc = 0;
633
634 if (!input) {
635 *argvp = NULL;
636 return 0;
637 }
638
593 argv = realloc_argv(&array_size, argv); 639 argv = realloc_argv(&array_size, argv);
594 if (!argv) 640 if (!argv)
595 return -ENOMEM; 641 return -ENOMEM;
@@ -671,15 +717,14 @@ int dm_table_add_target(struct dm_table *t, const char *type,
671 memset(tgt, 0, sizeof(*tgt)); 717 memset(tgt, 0, sizeof(*tgt));
672 718
673 if (!len) { 719 if (!len) {
674 tgt->error = "zero-length target"; 720 DMERR("%s: zero-length target", dm_device_name(t->md));
675 DMERR("%s", tgt->error);
676 return -EINVAL; 721 return -EINVAL;
677 } 722 }
678 723
679 tgt->type = dm_get_target_type(type); 724 tgt->type = dm_get_target_type(type);
680 if (!tgt->type) { 725 if (!tgt->type) {
681 tgt->error = "unknown target type"; 726 DMERR("%s: %s: unknown target type", dm_device_name(t->md),
682 DMERR("%s", tgt->error); 727 type);
683 return -EINVAL; 728 return -EINVAL;
684 } 729 }
685 730
@@ -716,7 +761,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
716 return 0; 761 return 0;
717 762
718 bad: 763 bad:
719 DMERR("%s", tgt->error); 764 DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
720 dm_put_target_type(tgt->type); 765 dm_put_target_type(tgt->type);
721 return r; 766 return r;
722} 767}
@@ -802,7 +847,7 @@ sector_t dm_table_get_size(struct dm_table *t)
802 847
803struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) 848struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
804{ 849{
805 if (index > t->num_targets) 850 if (index >= t->num_targets)
806 return NULL; 851 return NULL;
807 852
808 return t->targets + index; 853 return t->targets + index;
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 64fd8e79ea..477a041a41 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -12,6 +12,8 @@
12#include <linux/bio.h> 12#include <linux/bio.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14 14
15#define DM_MSG_PREFIX "target"
16
15struct tt_internal { 17struct tt_internal {
16 struct target_type tt; 18 struct target_type tt;
17 19
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index 51c0639b24..ea569f7348 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -10,13 +10,15 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/bio.h> 11#include <linux/bio.h>
12 12
13#define DM_MSG_PREFIX "zero"
14
13/* 15/*
14 * Construct a dummy mapping that only returns zeros 16 * Construct a dummy mapping that only returns zeros
15 */ 17 */
16static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv) 18static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
17{ 19{
18 if (argc != 0) { 20 if (argc != 0) {
19 ti->error = "dm-zero: No arguments required"; 21 ti->error = "No arguments required";
20 return -EINVAL; 22 return -EINVAL;
21 } 23 }
22 24
@@ -60,7 +62,7 @@ static int __init dm_zero_init(void)
60 int r = dm_register_target(&zero_target); 62 int r = dm_register_target(&zero_target);
61 63
62 if (r < 0) 64 if (r < 0)
63 DMERR("zero: register failed %d", r); 65 DMERR("register failed %d", r);
64 66
65 return r; 67 return r;
66} 68}
@@ -70,7 +72,7 @@ static void __exit dm_zero_exit(void)
70 int r = dm_unregister_target(&zero_target); 72 int r = dm_unregister_target(&zero_target);
71 73
72 if (r < 0) 74 if (r < 0)
73 DMERR("zero: unregister failed %d", r); 75 DMERR("unregister failed %d", r);
74} 76}
75 77
76module_init(dm_zero_init) 78module_init(dm_zero_init)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4d710b7a13..3ed2e53b9e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
@@ -21,11 +21,14 @@
21#include <linux/hdreg.h> 21#include <linux/hdreg.h>
22#include <linux/blktrace_api.h> 22#include <linux/blktrace_api.h>
23 23
24#define DM_MSG_PREFIX "core"
25
24static const char *_name = DM_NAME; 26static const char *_name = DM_NAME;
25 27
26static unsigned int major = 0; 28static unsigned int major = 0;
27static unsigned int _major = 0; 29static unsigned int _major = 0;
28 30
31static DEFINE_SPINLOCK(_minor_lock);
29/* 32/*
30 * One of these is allocated per bio. 33 * One of these is allocated per bio.
31 */ 34 */
@@ -49,23 +52,28 @@ struct target_io {
49 52
50union map_info *dm_get_mapinfo(struct bio *bio) 53union map_info *dm_get_mapinfo(struct bio *bio)
51{ 54{
52 if (bio && bio->bi_private) 55 if (bio && bio->bi_private)
53 return &((struct target_io *)bio->bi_private)->info; 56 return &((struct target_io *)bio->bi_private)->info;
54 return NULL; 57 return NULL;
55} 58}
56 59
60#define MINOR_ALLOCED ((void *)-1)
61
57/* 62/*
58 * Bits for the md->flags field. 63 * Bits for the md->flags field.
59 */ 64 */
60#define DMF_BLOCK_IO 0 65#define DMF_BLOCK_IO 0
61#define DMF_SUSPENDED 1 66#define DMF_SUSPENDED 1
62#define DMF_FROZEN 2 67#define DMF_FROZEN 2
68#define DMF_FREEING 3
69#define DMF_DELETING 4
63 70
64struct mapped_device { 71struct mapped_device {
65 struct rw_semaphore io_lock; 72 struct rw_semaphore io_lock;
66 struct semaphore suspend_lock; 73 struct semaphore suspend_lock;
67 rwlock_t map_lock; 74 rwlock_t map_lock;
68 atomic_t holders; 75 atomic_t holders;
76 atomic_t open_count;
69 77
70 unsigned long flags; 78 unsigned long flags;
71 79
@@ -218,9 +226,25 @@ static int dm_blk_open(struct inode *inode, struct file *file)
218{ 226{
219 struct mapped_device *md; 227 struct mapped_device *md;
220 228
229 spin_lock(&_minor_lock);
230
221 md = inode->i_bdev->bd_disk->private_data; 231 md = inode->i_bdev->bd_disk->private_data;
232 if (!md)
233 goto out;
234
235 if (test_bit(DMF_FREEING, &md->flags) ||
236 test_bit(DMF_DELETING, &md->flags)) {
237 md = NULL;
238 goto out;
239 }
240
222 dm_get(md); 241 dm_get(md);
223 return 0; 242 atomic_inc(&md->open_count);
243
244out:
245 spin_unlock(&_minor_lock);
246
247 return md ? 0 : -ENXIO;
224} 248}
225 249
226static int dm_blk_close(struct inode *inode, struct file *file) 250static int dm_blk_close(struct inode *inode, struct file *file)
@@ -228,10 +252,35 @@ static int dm_blk_close(struct inode *inode, struct file *file)
228 struct mapped_device *md; 252 struct mapped_device *md;
229 253
230 md = inode->i_bdev->bd_disk->private_data; 254 md = inode->i_bdev->bd_disk->private_data;
255 atomic_dec(&md->open_count);
231 dm_put(md); 256 dm_put(md);
232 return 0; 257 return 0;
233} 258}
234 259
260int dm_open_count(struct mapped_device *md)
261{
262 return atomic_read(&md->open_count);
263}
264
265/*
266 * Guarantees nothing is using the device before it's deleted.
267 */
268int dm_lock_for_deletion(struct mapped_device *md)
269{
270 int r = 0;
271
272 spin_lock(&_minor_lock);
273
274 if (dm_open_count(md))
275 r = -EBUSY;
276 else
277 set_bit(DMF_DELETING, &md->flags);
278
279 spin_unlock(&_minor_lock);
280
281 return r;
282}
283
235static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 284static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
236{ 285{
237 struct mapped_device *md = bdev->bd_disk->private_data; 286 struct mapped_device *md = bdev->bd_disk->private_data;
@@ -456,8 +505,8 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
456 if (r > 0) { 505 if (r > 0) {
457 /* the bio has been remapped so dispatch it */ 506 /* the bio has been remapped so dispatch it */
458 507
459 blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, 508 blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
460 tio->io->bio->bi_bdev->bd_dev, sector, 509 tio->io->bio->bi_bdev->bd_dev, sector,
461 clone->bi_sector); 510 clone->bi_sector);
462 511
463 generic_make_request(clone); 512 generic_make_request(clone);
@@ -744,43 +793,39 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
744/*----------------------------------------------------------------- 793/*-----------------------------------------------------------------
745 * An IDR is used to keep track of allocated minor numbers. 794 * An IDR is used to keep track of allocated minor numbers.
746 *---------------------------------------------------------------*/ 795 *---------------------------------------------------------------*/
747static DEFINE_MUTEX(_minor_lock);
748static DEFINE_IDR(_minor_idr); 796static DEFINE_IDR(_minor_idr);
749 797
750static void free_minor(unsigned int minor) 798static void free_minor(int minor)
751{ 799{
752 mutex_lock(&_minor_lock); 800 spin_lock(&_minor_lock);
753 idr_remove(&_minor_idr, minor); 801 idr_remove(&_minor_idr, minor);
754 mutex_unlock(&_minor_lock); 802 spin_unlock(&_minor_lock);
755} 803}
756 804
757/* 805/*
758 * See if the device with a specific minor # is free. 806 * See if the device with a specific minor # is free.
759 */ 807 */
760static int specific_minor(struct mapped_device *md, unsigned int minor) 808static int specific_minor(struct mapped_device *md, int minor)
761{ 809{
762 int r, m; 810 int r, m;
763 811
764 if (minor >= (1 << MINORBITS)) 812 if (minor >= (1 << MINORBITS))
765 return -EINVAL; 813 return -EINVAL;
766 814
767 mutex_lock(&_minor_lock); 815 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
816 if (!r)
817 return -ENOMEM;
818
819 spin_lock(&_minor_lock);
768 820
769 if (idr_find(&_minor_idr, minor)) { 821 if (idr_find(&_minor_idr, minor)) {
770 r = -EBUSY; 822 r = -EBUSY;
771 goto out; 823 goto out;
772 } 824 }
773 825
774 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 826 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
775 if (!r) { 827 if (r)
776 r = -ENOMEM;
777 goto out;
778 }
779
780 r = idr_get_new_above(&_minor_idr, md, minor, &m);
781 if (r) {
782 goto out; 828 goto out;
783 }
784 829
785 if (m != minor) { 830 if (m != minor) {
786 idr_remove(&_minor_idr, m); 831 idr_remove(&_minor_idr, m);
@@ -789,24 +834,21 @@ static int specific_minor(struct mapped_device *md, unsigned int minor)
789 } 834 }
790 835
791out: 836out:
792 mutex_unlock(&_minor_lock); 837 spin_unlock(&_minor_lock);
793 return r; 838 return r;
794} 839}
795 840
796static int next_free_minor(struct mapped_device *md, unsigned int *minor) 841static int next_free_minor(struct mapped_device *md, int *minor)
797{ 842{
798 int r; 843 int r, m;
799 unsigned int m;
800
801 mutex_lock(&_minor_lock);
802 844
803 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 845 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
804 if (!r) { 846 if (!r)
805 r = -ENOMEM; 847 return -ENOMEM;
806 goto out; 848
807 } 849 spin_lock(&_minor_lock);
808 850
809 r = idr_get_new(&_minor_idr, md, &m); 851 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
810 if (r) { 852 if (r) {
811 goto out; 853 goto out;
812 } 854 }
@@ -820,7 +862,7 @@ static int next_free_minor(struct mapped_device *md, unsigned int *minor)
820 *minor = m; 862 *minor = m;
821 863
822out: 864out:
823 mutex_unlock(&_minor_lock); 865 spin_unlock(&_minor_lock);
824 return r; 866 return r;
825} 867}
826 868
@@ -829,18 +871,25 @@ static struct block_device_operations dm_blk_dops;
829/* 871/*
830 * Allocate and initialise a blank device with a given minor. 872 * Allocate and initialise a blank device with a given minor.
831 */ 873 */
832static struct mapped_device *alloc_dev(unsigned int minor, int persistent) 874static struct mapped_device *alloc_dev(int minor)
833{ 875{
834 int r; 876 int r;
835 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL); 877 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
878 void *old_md;
836 879
837 if (!md) { 880 if (!md) {
838 DMWARN("unable to allocate device, out of memory."); 881 DMWARN("unable to allocate device, out of memory.");
839 return NULL; 882 return NULL;
840 } 883 }
841 884
885 if (!try_module_get(THIS_MODULE))
886 goto bad0;
887
842 /* get a minor number for the dev */ 888 /* get a minor number for the dev */
843 r = persistent ? specific_minor(md, minor) : next_free_minor(md, &minor); 889 if (minor == DM_ANY_MINOR)
890 r = next_free_minor(md, &minor);
891 else
892 r = specific_minor(md, minor);
844 if (r < 0) 893 if (r < 0)
845 goto bad1; 894 goto bad1;
846 895
@@ -849,6 +898,7 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
849 init_MUTEX(&md->suspend_lock); 898 init_MUTEX(&md->suspend_lock);
850 rwlock_init(&md->map_lock); 899 rwlock_init(&md->map_lock);
851 atomic_set(&md->holders, 1); 900 atomic_set(&md->holders, 1);
901 atomic_set(&md->open_count, 0);
852 atomic_set(&md->event_nr, 0); 902 atomic_set(&md->event_nr, 0);
853 903
854 md->queue = blk_alloc_queue(GFP_KERNEL); 904 md->queue = blk_alloc_queue(GFP_KERNEL);
@@ -875,6 +925,10 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
875 if (!md->disk) 925 if (!md->disk)
876 goto bad4; 926 goto bad4;
877 927
928 atomic_set(&md->pending, 0);
929 init_waitqueue_head(&md->wait);
930 init_waitqueue_head(&md->eventq);
931
878 md->disk->major = _major; 932 md->disk->major = _major;
879 md->disk->first_minor = minor; 933 md->disk->first_minor = minor;
880 md->disk->fops = &dm_blk_dops; 934 md->disk->fops = &dm_blk_dops;
@@ -884,9 +938,12 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
884 add_disk(md->disk); 938 add_disk(md->disk);
885 format_dev_t(md->name, MKDEV(_major, minor)); 939 format_dev_t(md->name, MKDEV(_major, minor));
886 940
887 atomic_set(&md->pending, 0); 941 /* Populate the mapping, nobody knows we exist yet */
888 init_waitqueue_head(&md->wait); 942 spin_lock(&_minor_lock);
889 init_waitqueue_head(&md->eventq); 943 old_md = idr_replace(&_minor_idr, md, minor);
944 spin_unlock(&_minor_lock);
945
946 BUG_ON(old_md != MINOR_ALLOCED);
890 947
891 return md; 948 return md;
892 949
@@ -898,13 +955,15 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
898 blk_cleanup_queue(md->queue); 955 blk_cleanup_queue(md->queue);
899 free_minor(minor); 956 free_minor(minor);
900 bad1: 957 bad1:
958 module_put(THIS_MODULE);
959 bad0:
901 kfree(md); 960 kfree(md);
902 return NULL; 961 return NULL;
903} 962}
904 963
905static void free_dev(struct mapped_device *md) 964static void free_dev(struct mapped_device *md)
906{ 965{
907 unsigned int minor = md->disk->first_minor; 966 int minor = md->disk->first_minor;
908 967
909 if (md->suspended_bdev) { 968 if (md->suspended_bdev) {
910 thaw_bdev(md->suspended_bdev, NULL); 969 thaw_bdev(md->suspended_bdev, NULL);
@@ -914,8 +973,14 @@ static void free_dev(struct mapped_device *md)
914 mempool_destroy(md->io_pool); 973 mempool_destroy(md->io_pool);
915 del_gendisk(md->disk); 974 del_gendisk(md->disk);
916 free_minor(minor); 975 free_minor(minor);
976
977 spin_lock(&_minor_lock);
978 md->disk->private_data = NULL;
979 spin_unlock(&_minor_lock);
980
917 put_disk(md->disk); 981 put_disk(md->disk);
918 blk_cleanup_queue(md->queue); 982 blk_cleanup_queue(md->queue);
983 module_put(THIS_MODULE);
919 kfree(md); 984 kfree(md);
920} 985}
921 986
@@ -984,12 +1049,11 @@ static void __unbind(struct mapped_device *md)
984/* 1049/*
985 * Constructor for a new device. 1050 * Constructor for a new device.
986 */ 1051 */
987static int create_aux(unsigned int minor, int persistent, 1052int dm_create(int minor, struct mapped_device **result)
988 struct mapped_device **result)
989{ 1053{
990 struct mapped_device *md; 1054 struct mapped_device *md;
991 1055
992 md = alloc_dev(minor, persistent); 1056 md = alloc_dev(minor);
993 if (!md) 1057 if (!md)
994 return -ENXIO; 1058 return -ENXIO;
995 1059
@@ -997,16 +1061,6 @@ static int create_aux(unsigned int minor, int persistent,
997 return 0; 1061 return 0;
998} 1062}
999 1063
1000int dm_create(struct mapped_device **result)
1001{
1002 return create_aux(0, 0, result);
1003}
1004
1005int dm_create_with_minor(unsigned int minor, struct mapped_device **result)
1006{
1007 return create_aux(minor, 1, result);
1008}
1009
1010static struct mapped_device *dm_find_md(dev_t dev) 1064static struct mapped_device *dm_find_md(dev_t dev)
1011{ 1065{
1012 struct mapped_device *md; 1066 struct mapped_device *md;
@@ -1015,13 +1069,18 @@ static struct mapped_device *dm_find_md(dev_t dev)
1015 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 1069 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1016 return NULL; 1070 return NULL;
1017 1071
1018 mutex_lock(&_minor_lock); 1072 spin_lock(&_minor_lock);
1019 1073
1020 md = idr_find(&_minor_idr, minor); 1074 md = idr_find(&_minor_idr, minor);
1021 if (!md || (dm_disk(md)->first_minor != minor)) 1075 if (md && (md == MINOR_ALLOCED ||
1076 (dm_disk(md)->first_minor != minor) ||
1077 test_bit(DMF_FREEING, &md->flags))) {
1022 md = NULL; 1078 md = NULL;
1079 goto out;
1080 }
1023 1081
1024 mutex_unlock(&_minor_lock); 1082out:
1083 spin_unlock(&_minor_lock);
1025 1084
1026 return md; 1085 return md;
1027} 1086}
@@ -1051,12 +1110,23 @@ void dm_get(struct mapped_device *md)
1051 atomic_inc(&md->holders); 1110 atomic_inc(&md->holders);
1052} 1111}
1053 1112
1113const char *dm_device_name(struct mapped_device *md)
1114{
1115 return md->name;
1116}
1117EXPORT_SYMBOL_GPL(dm_device_name);
1118
1054void dm_put(struct mapped_device *md) 1119void dm_put(struct mapped_device *md)
1055{ 1120{
1056 struct dm_table *map; 1121 struct dm_table *map;
1057 1122
1058 if (atomic_dec_and_test(&md->holders)) { 1123 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1124
1125 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
1059 map = dm_get_table(md); 1126 map = dm_get_table(md);
1127 idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor);
1128 set_bit(DMF_FREEING, &md->flags);
1129 spin_unlock(&_minor_lock);
1060 if (!dm_suspended(md)) { 1130 if (!dm_suspended(md)) {
1061 dm_table_presuspend_targets(map); 1131 dm_table_presuspend_targets(map);
1062 dm_table_postsuspend_targets(map); 1132 dm_table_postsuspend_targets(map);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index fd90bc8f9e..3c03c0ecab 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -2,7 +2,7 @@
2 * Internal header file for device mapper 2 * Internal header file for device mapper
3 * 3 *
4 * Copyright (C) 2001, 2002 Sistina Software 4 * Copyright (C) 2001, 2002 Sistina Software
5 * Copyright (C) 2004 Red Hat, Inc. All rights reserved. 5 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
6 * 6 *
7 * This file is released under the LGPL. 7 * This file is released under the LGPL.
8 */ 8 */
@@ -17,9 +17,10 @@
17#include <linux/hdreg.h> 17#include <linux/hdreg.h>
18 18
19#define DM_NAME "device-mapper" 19#define DM_NAME "device-mapper"
20#define DMWARN(f, x...) printk(KERN_WARNING DM_NAME ": " f "\n" , ## x) 20
21#define DMERR(f, x...) printk(KERN_ERR DM_NAME ": " f "\n" , ## x) 21#define DMERR(f, arg...) printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
22#define DMINFO(f, x...) printk(KERN_INFO DM_NAME ": " f "\n" , ## x) 22#define DMWARN(f, arg...) printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
23#define DMINFO(f, arg...) printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
23 24
24#define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 25#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
25 0 : scnprintf(result + sz, maxlen - sz, x)) 26 0 : scnprintf(result + sz, maxlen - sz, x))
@@ -39,83 +40,16 @@ struct dm_dev {
39}; 40};
40 41
41struct dm_table; 42struct dm_table;
42struct mapped_device;
43
44/*-----------------------------------------------------------------
45 * Functions for manipulating a struct mapped_device.
46 * Drop the reference with dm_put when you finish with the object.
47 *---------------------------------------------------------------*/
48int dm_create(struct mapped_device **md);
49int dm_create_with_minor(unsigned int minor, struct mapped_device **md);
50void dm_set_mdptr(struct mapped_device *md, void *ptr);
51void *dm_get_mdptr(struct mapped_device *md);
52struct mapped_device *dm_get_md(dev_t dev);
53
54/*
55 * Reference counting for md.
56 */
57void dm_get(struct mapped_device *md);
58void dm_put(struct mapped_device *md);
59
60/*
61 * A device can still be used while suspended, but I/O is deferred.
62 */
63int dm_suspend(struct mapped_device *md, int with_lockfs);
64int dm_resume(struct mapped_device *md);
65
66/*
67 * The device must be suspended before calling this method.
68 */
69int dm_swap_table(struct mapped_device *md, struct dm_table *t);
70
71/*
72 * Drop a reference on the table when you've finished with the
73 * result.
74 */
75struct dm_table *dm_get_table(struct mapped_device *md);
76
77/*
78 * Event functions.
79 */
80uint32_t dm_get_event_nr(struct mapped_device *md);
81int dm_wait_event(struct mapped_device *md, int event_nr);
82
83/*
84 * Info functions.
85 */
86struct gendisk *dm_disk(struct mapped_device *md);
87int dm_suspended(struct mapped_device *md);
88
89/*
90 * Geometry functions.
91 */
92int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
93int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
94 43
95/*----------------------------------------------------------------- 44/*-----------------------------------------------------------------
96 * Functions for manipulating a table. Tables are also reference 45 * Internal table functions.
97 * counted.
98 *---------------------------------------------------------------*/ 46 *---------------------------------------------------------------*/
99int dm_table_create(struct dm_table **result, int mode,
100 unsigned num_targets, struct mapped_device *md);
101
102void dm_table_get(struct dm_table *t);
103void dm_table_put(struct dm_table *t);
104
105int dm_table_add_target(struct dm_table *t, const char *type,
106 sector_t start, sector_t len, char *params);
107int dm_table_complete(struct dm_table *t);
108void dm_table_event_callback(struct dm_table *t, 47void dm_table_event_callback(struct dm_table *t,
109 void (*fn)(void *), void *context); 48 void (*fn)(void *), void *context);
110void dm_table_event(struct dm_table *t);
111sector_t dm_table_get_size(struct dm_table *t);
112struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); 49struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
113struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); 50struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
114void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q); 51void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q);
115unsigned int dm_table_get_num_targets(struct dm_table *t);
116struct list_head *dm_table_get_devices(struct dm_table *t); 52struct list_head *dm_table_get_devices(struct dm_table *t);
117int dm_table_get_mode(struct dm_table *t);
118struct mapped_device *dm_table_get_md(struct dm_table *t);
119void dm_table_presuspend_targets(struct dm_table *t); 53void dm_table_presuspend_targets(struct dm_table *t);
120void dm_table_postsuspend_targets(struct dm_table *t); 54void dm_table_postsuspend_targets(struct dm_table *t);
121void dm_table_resume_targets(struct dm_table *t); 55void dm_table_resume_targets(struct dm_table *t);
@@ -133,7 +67,6 @@ void dm_put_target_type(struct target_type *t);
133int dm_target_iterate(void (*iter_func)(struct target_type *tt, 67int dm_target_iterate(void (*iter_func)(struct target_type *tt,
134 void *param), void *param); 68 void *param), void *param);
135 69
136
137/*----------------------------------------------------------------- 70/*-----------------------------------------------------------------
138 * Useful inlines. 71 * Useful inlines.
139 *---------------------------------------------------------------*/ 72 *---------------------------------------------------------------*/
@@ -191,5 +124,7 @@ void dm_stripe_exit(void);
191 124
192void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); 125void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
193union map_info *dm_get_mapinfo(struct bio *bio); 126union map_info *dm_get_mapinfo(struct bio *bio);
127int dm_open_count(struct mapped_device *md);
128int dm_lock_for_deletion(struct mapped_device *md);
194 129
195#endif 130#endif
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index 72480a48d8..73ab875fb1 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -314,7 +314,7 @@ static void complete_io(unsigned long error, void *context)
314 314
315 if (error) { 315 if (error) {
316 if (job->rw == WRITE) 316 if (job->rw == WRITE)
317 job->write_err &= error; 317 job->write_err |= error;
318 else 318 else
319 job->read_err = 1; 319 job->read_err = 1;
320 320
@@ -460,7 +460,7 @@ static void segment_complete(int read_err,
460 job->read_err = 1; 460 job->read_err = 1;
461 461
462 if (write_err) 462 if (write_err)
463 job->write_err &= write_err; 463 job->write_err |= write_err;
464 464
465 /* 465 /*
466 * Only dispatch more work if there hasn't been an error. 466 * Only dispatch more work if there hasn't been an error.
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 777585458c..ff83c9b597 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -111,7 +111,7 @@ static int linear_issue_flush(request_queue_t *q, struct gendisk *disk,
111 return ret; 111 return ret;
112} 112}
113 113
114static int linear_run (mddev_t *mddev) 114static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
115{ 115{
116 linear_conf_t *conf; 116 linear_conf_t *conf;
117 dev_info_t **table; 117 dev_info_t **table;
@@ -121,20 +121,21 @@ static int linear_run (mddev_t *mddev)
121 sector_t curr_offset; 121 sector_t curr_offset;
122 struct list_head *tmp; 122 struct list_head *tmp;
123 123
124 conf = kzalloc (sizeof (*conf) + mddev->raid_disks*sizeof(dev_info_t), 124 conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t),
125 GFP_KERNEL); 125 GFP_KERNEL);
126 if (!conf) 126 if (!conf)
127 goto out; 127 return NULL;
128
128 mddev->private = conf; 129 mddev->private = conf;
129 130
130 cnt = 0; 131 cnt = 0;
131 mddev->array_size = 0; 132 conf->array_size = 0;
132 133
133 ITERATE_RDEV(mddev,rdev,tmp) { 134 ITERATE_RDEV(mddev,rdev,tmp) {
134 int j = rdev->raid_disk; 135 int j = rdev->raid_disk;
135 dev_info_t *disk = conf->disks + j; 136 dev_info_t *disk = conf->disks + j;
136 137
137 if (j < 0 || j > mddev->raid_disks || disk->rdev) { 138 if (j < 0 || j > raid_disks || disk->rdev) {
138 printk("linear: disk numbering problem. Aborting!\n"); 139 printk("linear: disk numbering problem. Aborting!\n");
139 goto out; 140 goto out;
140 } 141 }
@@ -152,11 +153,11 @@ static int linear_run (mddev_t *mddev)
152 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 153 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
153 154
154 disk->size = rdev->size; 155 disk->size = rdev->size;
155 mddev->array_size += rdev->size; 156 conf->array_size += rdev->size;
156 157
157 cnt++; 158 cnt++;
158 } 159 }
159 if (cnt != mddev->raid_disks) { 160 if (cnt != raid_disks) {
160 printk("linear: not enough drives present. Aborting!\n"); 161 printk("linear: not enough drives present. Aborting!\n");
161 goto out; 162 goto out;
162 } 163 }
@@ -200,7 +201,7 @@ static int linear_run (mddev_t *mddev)
200 unsigned round; 201 unsigned round;
201 unsigned long base; 202 unsigned long base;
202 203
203 sz = mddev->array_size >> conf->preshift; 204 sz = conf->array_size >> conf->preshift;
204 sz += 1; /* force round-up */ 205 sz += 1; /* force round-up */
205 base = conf->hash_spacing >> conf->preshift; 206 base = conf->hash_spacing >> conf->preshift;
206 round = sector_div(sz, base); 207 round = sector_div(sz, base);
@@ -247,14 +248,56 @@ static int linear_run (mddev_t *mddev)
247 248
248 BUG_ON(table - conf->hash_table > nb_zone); 249 BUG_ON(table - conf->hash_table > nb_zone);
249 250
251 return conf;
252
253out:
254 kfree(conf);
255 return NULL;
256}
257
258static int linear_run (mddev_t *mddev)
259{
260 linear_conf_t *conf;
261
262 conf = linear_conf(mddev, mddev->raid_disks);
263
264 if (!conf)
265 return 1;
266 mddev->private = conf;
267 mddev->array_size = conf->array_size;
268
250 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); 269 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
251 mddev->queue->unplug_fn = linear_unplug; 270 mddev->queue->unplug_fn = linear_unplug;
252 mddev->queue->issue_flush_fn = linear_issue_flush; 271 mddev->queue->issue_flush_fn = linear_issue_flush;
253 return 0; 272 return 0;
273}
254 274
255out: 275static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
256 kfree(conf); 276{
257 return 1; 277 /* Adding a drive to a linear array allows the array to grow.
278 * It is permitted if the new drive has a matching superblock
279 * already on it, with raid_disk equal to raid_disks.
280 * It is achieved by creating a new linear_private_data structure
281 * and swapping it in in-place of the current one.
282 * The current one is never freed until the array is stopped.
283 * This avoids races.
284 */
285 linear_conf_t *newconf;
286
287 if (rdev->raid_disk != mddev->raid_disks)
288 return -EINVAL;
289
290 newconf = linear_conf(mddev,mddev->raid_disks+1);
291
292 if (!newconf)
293 return -ENOMEM;
294
295 newconf->prev = mddev_to_conf(mddev);
296 mddev->private = newconf;
297 mddev->raid_disks++;
298 mddev->array_size = newconf->array_size;
299 set_capacity(mddev->gendisk, mddev->array_size << 1);
300 return 0;
258} 301}
259 302
260static int linear_stop (mddev_t *mddev) 303static int linear_stop (mddev_t *mddev)
@@ -262,8 +305,12 @@ static int linear_stop (mddev_t *mddev)
262 linear_conf_t *conf = mddev_to_conf(mddev); 305 linear_conf_t *conf = mddev_to_conf(mddev);
263 306
264 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 307 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
265 kfree(conf->hash_table); 308 do {
266 kfree(conf); 309 linear_conf_t *t = conf->prev;
310 kfree(conf->hash_table);
311 kfree(conf);
312 conf = t;
313 } while (conf);
267 314
268 return 0; 315 return 0;
269} 316}
@@ -360,6 +407,7 @@ static struct mdk_personality linear_personality =
360 .run = linear_run, 407 .run = linear_run,
361 .stop = linear_stop, 408 .stop = linear_stop,
362 .status = linear_status, 409 .status = linear_status,
410 .hot_add_disk = linear_add,
363}; 411};
364 412
365static int __init linear_init (void) 413static int __init linear_init (void)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f19b874753..306268ec99 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -44,6 +44,7 @@
44#include <linux/suspend.h> 44#include <linux/suspend.h>
45#include <linux/poll.h> 45#include <linux/poll.h>
46#include <linux/mutex.h> 46#include <linux/mutex.h>
47#include <linux/ctype.h>
47 48
48#include <linux/init.h> 49#include <linux/init.h>
49 50
@@ -72,6 +73,10 @@ static void autostart_arrays (int part);
72static LIST_HEAD(pers_list); 73static LIST_HEAD(pers_list);
73static DEFINE_SPINLOCK(pers_lock); 74static DEFINE_SPINLOCK(pers_lock);
74 75
76static void md_print_devices(void);
77
78#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
79
75/* 80/*
76 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 81 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
77 * is 1000 KB/sec, so the extra system load does not show up that much. 82 * is 1000 KB/sec, so the extra system load does not show up that much.
@@ -170,7 +175,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
170/* Alternate version that can be called from interrupts 175/* Alternate version that can be called from interrupts
171 * when calling sysfs_notify isn't needed. 176 * when calling sysfs_notify isn't needed.
172 */ 177 */
173void md_new_event_inintr(mddev_t *mddev) 178static void md_new_event_inintr(mddev_t *mddev)
174{ 179{
175 atomic_inc(&md_event_count); 180 atomic_inc(&md_event_count);
176 wake_up(&md_event_waiters); 181 wake_up(&md_event_waiters);
@@ -732,6 +737,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
732{ 737{
733 mdp_disk_t *desc; 738 mdp_disk_t *desc;
734 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); 739 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
740 __u64 ev1 = md_event(sb);
735 741
736 rdev->raid_disk = -1; 742 rdev->raid_disk = -1;
737 rdev->flags = 0; 743 rdev->flags = 0;
@@ -748,7 +754,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
748 mddev->layout = sb->layout; 754 mddev->layout = sb->layout;
749 mddev->raid_disks = sb->raid_disks; 755 mddev->raid_disks = sb->raid_disks;
750 mddev->size = sb->size; 756 mddev->size = sb->size;
751 mddev->events = md_event(sb); 757 mddev->events = ev1;
752 mddev->bitmap_offset = 0; 758 mddev->bitmap_offset = 0;
753 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 759 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
754 760
@@ -797,7 +803,6 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
797 803
798 } else if (mddev->pers == NULL) { 804 } else if (mddev->pers == NULL) {
799 /* Insist on good event counter while assembling */ 805 /* Insist on good event counter while assembling */
800 __u64 ev1 = md_event(sb);
801 ++ev1; 806 ++ev1;
802 if (ev1 < mddev->events) 807 if (ev1 < mddev->events)
803 return -EINVAL; 808 return -EINVAL;
@@ -805,19 +810,21 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
805 /* if adding to array with a bitmap, then we can accept an 810 /* if adding to array with a bitmap, then we can accept an
806 * older device ... but not too old. 811 * older device ... but not too old.
807 */ 812 */
808 __u64 ev1 = md_event(sb);
809 if (ev1 < mddev->bitmap->events_cleared) 813 if (ev1 < mddev->bitmap->events_cleared)
810 return 0; 814 return 0;
811 } else /* just a hot-add of a new device, leave raid_disk at -1 */ 815 } else {
812 return 0; 816 if (ev1 < mddev->events)
817 /* just a hot-add of a new device, leave raid_disk at -1 */
818 return 0;
819 }
813 820
814 if (mddev->level != LEVEL_MULTIPATH) { 821 if (mddev->level != LEVEL_MULTIPATH) {
815 desc = sb->disks + rdev->desc_nr; 822 desc = sb->disks + rdev->desc_nr;
816 823
817 if (desc->state & (1<<MD_DISK_FAULTY)) 824 if (desc->state & (1<<MD_DISK_FAULTY))
818 set_bit(Faulty, &rdev->flags); 825 set_bit(Faulty, &rdev->flags);
819 else if (desc->state & (1<<MD_DISK_SYNC) && 826 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
820 desc->raid_disk < mddev->raid_disks) { 827 desc->raid_disk < mddev->raid_disks */) {
821 set_bit(In_sync, &rdev->flags); 828 set_bit(In_sync, &rdev->flags);
822 rdev->raid_disk = desc->raid_disk; 829 rdev->raid_disk = desc->raid_disk;
823 } 830 }
@@ -1100,6 +1107,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1100static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1107static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1101{ 1108{
1102 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1109 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1110 __u64 ev1 = le64_to_cpu(sb->events);
1103 1111
1104 rdev->raid_disk = -1; 1112 rdev->raid_disk = -1;
1105 rdev->flags = 0; 1113 rdev->flags = 0;
@@ -1115,7 +1123,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1115 mddev->layout = le32_to_cpu(sb->layout); 1123 mddev->layout = le32_to_cpu(sb->layout);
1116 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1124 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1117 mddev->size = le64_to_cpu(sb->size)/2; 1125 mddev->size = le64_to_cpu(sb->size)/2;
1118 mddev->events = le64_to_cpu(sb->events); 1126 mddev->events = ev1;
1119 mddev->bitmap_offset = 0; 1127 mddev->bitmap_offset = 0;
1120 mddev->default_bitmap_offset = 1024 >> 9; 1128 mddev->default_bitmap_offset = 1024 >> 9;
1121 1129
@@ -1149,7 +1157,6 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1149 1157
1150 } else if (mddev->pers == NULL) { 1158 } else if (mddev->pers == NULL) {
1151 /* Insist of good event counter while assembling */ 1159 /* Insist of good event counter while assembling */
1152 __u64 ev1 = le64_to_cpu(sb->events);
1153 ++ev1; 1160 ++ev1;
1154 if (ev1 < mddev->events) 1161 if (ev1 < mddev->events)
1155 return -EINVAL; 1162 return -EINVAL;
@@ -1157,12 +1164,13 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1157 /* If adding to array with a bitmap, then we can accept an 1164 /* If adding to array with a bitmap, then we can accept an
1158 * older device, but not too old. 1165 * older device, but not too old.
1159 */ 1166 */
1160 __u64 ev1 = le64_to_cpu(sb->events);
1161 if (ev1 < mddev->bitmap->events_cleared) 1167 if (ev1 < mddev->bitmap->events_cleared)
1162 return 0; 1168 return 0;
1163 } else /* just a hot-add of a new device, leave raid_disk at -1 */ 1169 } else {
1164 return 0; 1170 if (ev1 < mddev->events)
1165 1171 /* just a hot-add of a new device, leave raid_disk at -1 */
1172 return 0;
1173 }
1166 if (mddev->level != LEVEL_MULTIPATH) { 1174 if (mddev->level != LEVEL_MULTIPATH) {
1167 int role; 1175 int role;
1168 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1176 rdev->desc_nr = le32_to_cpu(sb->dev_number);
@@ -1174,7 +1182,11 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1174 set_bit(Faulty, &rdev->flags); 1182 set_bit(Faulty, &rdev->flags);
1175 break; 1183 break;
1176 default: 1184 default:
1177 set_bit(In_sync, &rdev->flags); 1185 if ((le32_to_cpu(sb->feature_map) &
1186 MD_FEATURE_RECOVERY_OFFSET))
1187 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1188 else
1189 set_bit(In_sync, &rdev->flags);
1178 rdev->raid_disk = role; 1190 rdev->raid_disk = role;
1179 break; 1191 break;
1180 } 1192 }
@@ -1198,6 +1210,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1198 1210
1199 sb->feature_map = 0; 1211 sb->feature_map = 0;
1200 sb->pad0 = 0; 1212 sb->pad0 = 0;
1213 sb->recovery_offset = cpu_to_le64(0);
1201 memset(sb->pad1, 0, sizeof(sb->pad1)); 1214 memset(sb->pad1, 0, sizeof(sb->pad1));
1202 memset(sb->pad2, 0, sizeof(sb->pad2)); 1215 memset(sb->pad2, 0, sizeof(sb->pad2));
1203 memset(sb->pad3, 0, sizeof(sb->pad3)); 1216 memset(sb->pad3, 0, sizeof(sb->pad3));
@@ -1218,6 +1231,14 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1218 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1231 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1219 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1232 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1220 } 1233 }
1234
1235 if (rdev->raid_disk >= 0 &&
1236 !test_bit(In_sync, &rdev->flags) &&
1237 rdev->recovery_offset > 0) {
1238 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1239 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1240 }
1241
1221 if (mddev->reshape_position != MaxSector) { 1242 if (mddev->reshape_position != MaxSector) {
1222 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1243 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1223 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1244 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
@@ -1242,11 +1263,12 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1242 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1263 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1243 else if (test_bit(In_sync, &rdev2->flags)) 1264 else if (test_bit(In_sync, &rdev2->flags))
1244 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1265 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1266 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1267 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1245 else 1268 else
1246 sb->dev_roles[i] = cpu_to_le16(0xffff); 1269 sb->dev_roles[i] = cpu_to_le16(0xffff);
1247 } 1270 }
1248 1271
1249 sb->recovery_offset = cpu_to_le64(0); /* not supported yet */
1250 sb->sb_csum = calc_sb_1_csum(sb); 1272 sb->sb_csum = calc_sb_1_csum(sb);
1251} 1273}
1252 1274
@@ -1507,7 +1529,7 @@ static void print_rdev(mdk_rdev_t *rdev)
1507 printk(KERN_INFO "md: no rdev superblock!\n"); 1529 printk(KERN_INFO "md: no rdev superblock!\n");
1508} 1530}
1509 1531
1510void md_print_devices(void) 1532static void md_print_devices(void)
1511{ 1533{
1512 struct list_head *tmp, *tmp2; 1534 struct list_head *tmp, *tmp2;
1513 mdk_rdev_t *rdev; 1535 mdk_rdev_t *rdev;
@@ -1536,15 +1558,30 @@ void md_print_devices(void)
1536} 1558}
1537 1559
1538 1560
1539static void sync_sbs(mddev_t * mddev) 1561static void sync_sbs(mddev_t * mddev, int nospares)
1540{ 1562{
1563 /* Update each superblock (in-memory image), but
1564 * if we are allowed to, skip spares which already
1565 * have the right event counter, or have one earlier
1566 * (which would mean they aren't being marked as dirty
1567 * with the rest of the array)
1568 */
1541 mdk_rdev_t *rdev; 1569 mdk_rdev_t *rdev;
1542 struct list_head *tmp; 1570 struct list_head *tmp;
1543 1571
1544 ITERATE_RDEV(mddev,rdev,tmp) { 1572 ITERATE_RDEV(mddev,rdev,tmp) {
1545 super_types[mddev->major_version]. 1573 if (rdev->sb_events == mddev->events ||
1546 sync_super(mddev, rdev); 1574 (nospares &&
1547 rdev->sb_loaded = 1; 1575 rdev->raid_disk < 0 &&
1576 (rdev->sb_events&1)==0 &&
1577 rdev->sb_events+1 == mddev->events)) {
1578 /* Don't update this superblock */
1579 rdev->sb_loaded = 2;
1580 } else {
1581 super_types[mddev->major_version].
1582 sync_super(mddev, rdev);
1583 rdev->sb_loaded = 1;
1584 }
1548 } 1585 }
1549} 1586}
1550 1587
@@ -1554,12 +1591,42 @@ void md_update_sb(mddev_t * mddev)
1554 struct list_head *tmp; 1591 struct list_head *tmp;
1555 mdk_rdev_t *rdev; 1592 mdk_rdev_t *rdev;
1556 int sync_req; 1593 int sync_req;
1594 int nospares = 0;
1557 1595
1558repeat: 1596repeat:
1559 spin_lock_irq(&mddev->write_lock); 1597 spin_lock_irq(&mddev->write_lock);
1560 sync_req = mddev->in_sync; 1598 sync_req = mddev->in_sync;
1561 mddev->utime = get_seconds(); 1599 mddev->utime = get_seconds();
1562 mddev->events ++; 1600 if (mddev->sb_dirty == 3)
1601 /* just a clean<-> dirty transition, possibly leave spares alone,
1602 * though if events isn't the right even/odd, we will have to do
1603 * spares after all
1604 */
1605 nospares = 1;
1606
1607 /* If this is just a dirty<->clean transition, and the array is clean
1608 * and 'events' is odd, we can roll back to the previous clean state */
1609 if (mddev->sb_dirty == 3
1610 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1611 && (mddev->events & 1))
1612 mddev->events--;
1613 else {
1614 /* otherwise we have to go forward and ... */
1615 mddev->events ++;
1616 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1617 /* .. if the array isn't clean, insist on an odd 'events' */
1618 if ((mddev->events&1)==0) {
1619 mddev->events++;
1620 nospares = 0;
1621 }
1622 } else {
1623 /* otherwise insist on an even 'events' (for clean states) */
1624 if ((mddev->events&1)) {
1625 mddev->events++;
1626 nospares = 0;
1627 }
1628 }
1629 }
1563 1630
1564 if (!mddev->events) { 1631 if (!mddev->events) {
1565 /* 1632 /*
@@ -1571,7 +1638,7 @@ repeat:
1571 mddev->events --; 1638 mddev->events --;
1572 } 1639 }
1573 mddev->sb_dirty = 2; 1640 mddev->sb_dirty = 2;
1574 sync_sbs(mddev); 1641 sync_sbs(mddev, nospares);
1575 1642
1576 /* 1643 /*
1577 * do not write anything to disk if using 1644 * do not write anything to disk if using
@@ -1593,6 +1660,8 @@ repeat:
1593 ITERATE_RDEV(mddev,rdev,tmp) { 1660 ITERATE_RDEV(mddev,rdev,tmp) {
1594 char b[BDEVNAME_SIZE]; 1661 char b[BDEVNAME_SIZE];
1595 dprintk(KERN_INFO "md: "); 1662 dprintk(KERN_INFO "md: ");
1663 if (rdev->sb_loaded != 1)
1664 continue; /* no noise on spare devices */
1596 if (test_bit(Faulty, &rdev->flags)) 1665 if (test_bit(Faulty, &rdev->flags))
1597 dprintk("(skipping faulty "); 1666 dprintk("(skipping faulty ");
1598 1667
@@ -1604,6 +1673,7 @@ repeat:
1604 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", 1673 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1605 bdevname(rdev->bdev,b), 1674 bdevname(rdev->bdev,b),
1606 (unsigned long long)rdev->sb_offset); 1675 (unsigned long long)rdev->sb_offset);
1676 rdev->sb_events = mddev->events;
1607 1677
1608 } else 1678 } else
1609 dprintk(")\n"); 1679 dprintk(")\n");
@@ -1667,6 +1737,10 @@ state_show(mdk_rdev_t *rdev, char *page)
1667 len += sprintf(page+len, "%sin_sync",sep); 1737 len += sprintf(page+len, "%sin_sync",sep);
1668 sep = ","; 1738 sep = ",";
1669 } 1739 }
1740 if (test_bit(WriteMostly, &rdev->flags)) {
1741 len += sprintf(page+len, "%swrite_mostly",sep);
1742 sep = ",";
1743 }
1670 if (!test_bit(Faulty, &rdev->flags) && 1744 if (!test_bit(Faulty, &rdev->flags) &&
1671 !test_bit(In_sync, &rdev->flags)) { 1745 !test_bit(In_sync, &rdev->flags)) {
1672 len += sprintf(page+len, "%sspare", sep); 1746 len += sprintf(page+len, "%sspare", sep);
@@ -1675,8 +1749,40 @@ state_show(mdk_rdev_t *rdev, char *page)
1675 return len+sprintf(page+len, "\n"); 1749 return len+sprintf(page+len, "\n");
1676} 1750}
1677 1751
1752static ssize_t
1753state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1754{
1755 /* can write
1756 * faulty - simulates and error
1757 * remove - disconnects the device
1758 * writemostly - sets write_mostly
1759 * -writemostly - clears write_mostly
1760 */
1761 int err = -EINVAL;
1762 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
1763 md_error(rdev->mddev, rdev);
1764 err = 0;
1765 } else if (cmd_match(buf, "remove")) {
1766 if (rdev->raid_disk >= 0)
1767 err = -EBUSY;
1768 else {
1769 mddev_t *mddev = rdev->mddev;
1770 kick_rdev_from_array(rdev);
1771 md_update_sb(mddev);
1772 md_new_event(mddev);
1773 err = 0;
1774 }
1775 } else if (cmd_match(buf, "writemostly")) {
1776 set_bit(WriteMostly, &rdev->flags);
1777 err = 0;
1778 } else if (cmd_match(buf, "-writemostly")) {
1779 clear_bit(WriteMostly, &rdev->flags);
1780 err = 0;
1781 }
1782 return err ? err : len;
1783}
1678static struct rdev_sysfs_entry 1784static struct rdev_sysfs_entry
1679rdev_state = __ATTR_RO(state); 1785rdev_state = __ATTR(state, 0644, state_show, state_store);
1680 1786
1681static ssize_t 1787static ssize_t
1682super_show(mdk_rdev_t *rdev, char *page) 1788super_show(mdk_rdev_t *rdev, char *page)
@@ -1873,6 +1979,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
1873 rdev->desc_nr = -1; 1979 rdev->desc_nr = -1;
1874 rdev->flags = 0; 1980 rdev->flags = 0;
1875 rdev->data_offset = 0; 1981 rdev->data_offset = 0;
1982 rdev->sb_events = 0;
1876 atomic_set(&rdev->nr_pending, 0); 1983 atomic_set(&rdev->nr_pending, 0);
1877 atomic_set(&rdev->read_errors, 0); 1984 atomic_set(&rdev->read_errors, 0);
1878 atomic_set(&rdev->corrected_errors, 0); 1985 atomic_set(&rdev->corrected_errors, 0);
@@ -1978,6 +2085,54 @@ static void analyze_sbs(mddev_t * mddev)
1978} 2085}
1979 2086
1980static ssize_t 2087static ssize_t
2088safe_delay_show(mddev_t *mddev, char *page)
2089{
2090 int msec = (mddev->safemode_delay*1000)/HZ;
2091 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2092}
2093static ssize_t
2094safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2095{
2096 int scale=1;
2097 int dot=0;
2098 int i;
2099 unsigned long msec;
2100 char buf[30];
2101 char *e;
2102 /* remove a period, and count digits after it */
2103 if (len >= sizeof(buf))
2104 return -EINVAL;
2105 strlcpy(buf, cbuf, len);
2106 buf[len] = 0;
2107 for (i=0; i<len; i++) {
2108 if (dot) {
2109 if (isdigit(buf[i])) {
2110 buf[i-1] = buf[i];
2111 scale *= 10;
2112 }
2113 buf[i] = 0;
2114 } else if (buf[i] == '.') {
2115 dot=1;
2116 buf[i] = 0;
2117 }
2118 }
2119 msec = simple_strtoul(buf, &e, 10);
2120 if (e == buf || (*e && *e != '\n'))
2121 return -EINVAL;
2122 msec = (msec * 1000) / scale;
2123 if (msec == 0)
2124 mddev->safemode_delay = 0;
2125 else {
2126 mddev->safemode_delay = (msec*HZ)/1000;
2127 if (mddev->safemode_delay == 0)
2128 mddev->safemode_delay = 1;
2129 }
2130 return len;
2131}
2132static struct md_sysfs_entry md_safe_delay =
2133__ATTR(safe_mode_delay, 0644,safe_delay_show, safe_delay_store);
2134
2135static ssize_t
1981level_show(mddev_t *mddev, char *page) 2136level_show(mddev_t *mddev, char *page)
1982{ 2137{
1983 struct mdk_personality *p = mddev->pers; 2138 struct mdk_personality *p = mddev->pers;
@@ -2012,6 +2167,32 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
2012static struct md_sysfs_entry md_level = 2167static struct md_sysfs_entry md_level =
2013__ATTR(level, 0644, level_show, level_store); 2168__ATTR(level, 0644, level_show, level_store);
2014 2169
2170
2171static ssize_t
2172layout_show(mddev_t *mddev, char *page)
2173{
2174 /* just a number, not meaningful for all levels */
2175 return sprintf(page, "%d\n", mddev->layout);
2176}
2177
2178static ssize_t
2179layout_store(mddev_t *mddev, const char *buf, size_t len)
2180{
2181 char *e;
2182 unsigned long n = simple_strtoul(buf, &e, 10);
2183 if (mddev->pers)
2184 return -EBUSY;
2185
2186 if (!*buf || (*e && *e != '\n'))
2187 return -EINVAL;
2188
2189 mddev->layout = n;
2190 return len;
2191}
2192static struct md_sysfs_entry md_layout =
2193__ATTR(layout, 0655, layout_show, layout_store);
2194
2195
2015static ssize_t 2196static ssize_t
2016raid_disks_show(mddev_t *mddev, char *page) 2197raid_disks_show(mddev_t *mddev, char *page)
2017{ 2198{
@@ -2067,6 +2248,200 @@ static struct md_sysfs_entry md_chunk_size =
2067__ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store); 2248__ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store);
2068 2249
2069static ssize_t 2250static ssize_t
2251resync_start_show(mddev_t *mddev, char *page)
2252{
2253 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2254}
2255
2256static ssize_t
2257resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2258{
2259 /* can only set chunk_size if array is not yet active */
2260 char *e;
2261 unsigned long long n = simple_strtoull(buf, &e, 10);
2262
2263 if (mddev->pers)
2264 return -EBUSY;
2265 if (!*buf || (*e && *e != '\n'))
2266 return -EINVAL;
2267
2268 mddev->recovery_cp = n;
2269 return len;
2270}
2271static struct md_sysfs_entry md_resync_start =
2272__ATTR(resync_start, 0644, resync_start_show, resync_start_store);
2273
2274/*
2275 * The array state can be:
2276 *
2277 * clear
2278 * No devices, no size, no level
2279 * Equivalent to STOP_ARRAY ioctl
2280 * inactive
2281 * May have some settings, but array is not active
2282 * all IO results in error
2283 * When written, doesn't tear down array, but just stops it
2284 * suspended (not supported yet)
2285 * All IO requests will block. The array can be reconfigured.
2286 * Writing this, if accepted, will block until array is quiessent
2287 * readonly
2288 * no resync can happen. no superblocks get written.
2289 * write requests fail
2290 * read-auto
2291 * like readonly, but behaves like 'clean' on a write request.
2292 *
2293 * clean - no pending writes, but otherwise active.
2294 * When written to inactive array, starts without resync
2295 * If a write request arrives then
2296 * if metadata is known, mark 'dirty' and switch to 'active'.
2297 * if not known, block and switch to write-pending
2298 * If written to an active array that has pending writes, then fails.
2299 * active
2300 * fully active: IO and resync can be happening.
2301 * When written to inactive array, starts with resync
2302 *
2303 * write-pending
2304 * clean, but writes are blocked waiting for 'active' to be written.
2305 *
2306 * active-idle
2307 * like active, but no writes have been seen for a while (100msec).
2308 *
2309 */
2310enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2311 write_pending, active_idle, bad_word};
2312static char *array_states[] = {
2313 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2314 "write-pending", "active-idle", NULL };
2315
2316static int match_word(const char *word, char **list)
2317{
2318 int n;
2319 for (n=0; list[n]; n++)
2320 if (cmd_match(word, list[n]))
2321 break;
2322 return n;
2323}
2324
2325static ssize_t
2326array_state_show(mddev_t *mddev, char *page)
2327{
2328 enum array_state st = inactive;
2329
2330 if (mddev->pers)
2331 switch(mddev->ro) {
2332 case 1:
2333 st = readonly;
2334 break;
2335 case 2:
2336 st = read_auto;
2337 break;
2338 case 0:
2339 if (mddev->in_sync)
2340 st = clean;
2341 else if (mddev->safemode)
2342 st = active_idle;
2343 else
2344 st = active;
2345 }
2346 else {
2347 if (list_empty(&mddev->disks) &&
2348 mddev->raid_disks == 0 &&
2349 mddev->size == 0)
2350 st = clear;
2351 else
2352 st = inactive;
2353 }
2354 return sprintf(page, "%s\n", array_states[st]);
2355}
2356
2357static int do_md_stop(mddev_t * mddev, int ro);
2358static int do_md_run(mddev_t * mddev);
2359static int restart_array(mddev_t *mddev);
2360
2361static ssize_t
2362array_state_store(mddev_t *mddev, const char *buf, size_t len)
2363{
2364 int err = -EINVAL;
2365 enum array_state st = match_word(buf, array_states);
2366 switch(st) {
2367 case bad_word:
2368 break;
2369 case clear:
2370 /* stopping an active array */
2371 if (mddev->pers) {
2372 if (atomic_read(&mddev->active) > 1)
2373 return -EBUSY;
2374 err = do_md_stop(mddev, 0);
2375 }
2376 break;
2377 case inactive:
2378 /* stopping an active array */
2379 if (mddev->pers) {
2380 if (atomic_read(&mddev->active) > 1)
2381 return -EBUSY;
2382 err = do_md_stop(mddev, 2);
2383 }
2384 break;
2385 case suspended:
2386 break; /* not supported yet */
2387 case readonly:
2388 if (mddev->pers)
2389 err = do_md_stop(mddev, 1);
2390 else {
2391 mddev->ro = 1;
2392 err = do_md_run(mddev);
2393 }
2394 break;
2395 case read_auto:
2396 /* stopping an active array */
2397 if (mddev->pers) {
2398 err = do_md_stop(mddev, 1);
2399 if (err == 0)
2400 mddev->ro = 2; /* FIXME mark devices writable */
2401 } else {
2402 mddev->ro = 2;
2403 err = do_md_run(mddev);
2404 }
2405 break;
2406 case clean:
2407 if (mddev->pers) {
2408 restart_array(mddev);
2409 spin_lock_irq(&mddev->write_lock);
2410 if (atomic_read(&mddev->writes_pending) == 0) {
2411 mddev->in_sync = 1;
2412 mddev->sb_dirty = 1;
2413 }
2414 spin_unlock_irq(&mddev->write_lock);
2415 } else {
2416 mddev->ro = 0;
2417 mddev->recovery_cp = MaxSector;
2418 err = do_md_run(mddev);
2419 }
2420 break;
2421 case active:
2422 if (mddev->pers) {
2423 restart_array(mddev);
2424 mddev->sb_dirty = 0;
2425 wake_up(&mddev->sb_wait);
2426 err = 0;
2427 } else {
2428 mddev->ro = 0;
2429 err = do_md_run(mddev);
2430 }
2431 break;
2432 case write_pending:
2433 case active_idle:
2434 /* these cannot be set */
2435 break;
2436 }
2437 if (err)
2438 return err;
2439 else
2440 return len;
2441}
2442static struct md_sysfs_entry md_array_state = __ATTR(array_state, 0644, array_state_show, array_state_store);
2443
2444static ssize_t
2070null_show(mddev_t *mddev, char *page) 2445null_show(mddev_t *mddev, char *page)
2071{ 2446{
2072 return -EINVAL; 2447 return -EINVAL;
@@ -2428,11 +2803,15 @@ __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2428 2803
2429static struct attribute *md_default_attrs[] = { 2804static struct attribute *md_default_attrs[] = {
2430 &md_level.attr, 2805 &md_level.attr,
2806 &md_layout.attr,
2431 &md_raid_disks.attr, 2807 &md_raid_disks.attr,
2432 &md_chunk_size.attr, 2808 &md_chunk_size.attr,
2433 &md_size.attr, 2809 &md_size.attr,
2810 &md_resync_start.attr,
2434 &md_metadata.attr, 2811 &md_metadata.attr,
2435 &md_new_device.attr, 2812 &md_new_device.attr,
2813 &md_safe_delay.attr,
2814 &md_array_state.attr,
2436 NULL, 2815 NULL,
2437}; 2816};
2438 2817
@@ -2553,8 +2932,6 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
2553 return NULL; 2932 return NULL;
2554} 2933}
2555 2934
2556void md_wakeup_thread(mdk_thread_t *thread);
2557
2558static void md_safemode_timeout(unsigned long data) 2935static void md_safemode_timeout(unsigned long data)
2559{ 2936{
2560 mddev_t *mddev = (mddev_t *) data; 2937 mddev_t *mddev = (mddev_t *) data;
@@ -2708,7 +3085,7 @@ static int do_md_run(mddev_t * mddev)
2708 mddev->safemode = 0; 3085 mddev->safemode = 0;
2709 mddev->safemode_timer.function = md_safemode_timeout; 3086 mddev->safemode_timer.function = md_safemode_timeout;
2710 mddev->safemode_timer.data = (unsigned long) mddev; 3087 mddev->safemode_timer.data = (unsigned long) mddev;
2711 mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */ 3088 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
2712 mddev->in_sync = 1; 3089 mddev->in_sync = 1;
2713 3090
2714 ITERATE_RDEV(mddev,rdev,tmp) 3091 ITERATE_RDEV(mddev,rdev,tmp)
@@ -2736,6 +3113,36 @@ static int do_md_run(mddev_t * mddev)
2736 mddev->queue->queuedata = mddev; 3113 mddev->queue->queuedata = mddev;
2737 mddev->queue->make_request_fn = mddev->pers->make_request; 3114 mddev->queue->make_request_fn = mddev->pers->make_request;
2738 3115
3116 /* If there is a partially-recovered drive we need to
3117 * start recovery here. If we leave it to md_check_recovery,
3118 * it will remove the drives and not do the right thing
3119 */
3120 if (mddev->degraded) {
3121 struct list_head *rtmp;
3122 int spares = 0;
3123 ITERATE_RDEV(mddev,rdev,rtmp)
3124 if (rdev->raid_disk >= 0 &&
3125 !test_bit(In_sync, &rdev->flags) &&
3126 !test_bit(Faulty, &rdev->flags))
3127 /* complete an interrupted recovery */
3128 spares++;
3129 if (spares && mddev->pers->sync_request) {
3130 mddev->recovery = 0;
3131 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3132 mddev->sync_thread = md_register_thread(md_do_sync,
3133 mddev,
3134 "%s_resync");
3135 if (!mddev->sync_thread) {
3136 printk(KERN_ERR "%s: could not start resync"
3137 " thread...\n",
3138 mdname(mddev));
3139 /* leave the spares where they are, it shouldn't hurt */
3140 mddev->recovery = 0;
3141 } else
3142 md_wakeup_thread(mddev->sync_thread);
3143 }
3144 }
3145
2739 mddev->changed = 1; 3146 mddev->changed = 1;
2740 md_new_event(mddev); 3147 md_new_event(mddev);
2741 return 0; 3148 return 0;
@@ -2769,18 +3176,47 @@ static int restart_array(mddev_t *mddev)
2769 */ 3176 */
2770 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3177 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2771 md_wakeup_thread(mddev->thread); 3178 md_wakeup_thread(mddev->thread);
3179 md_wakeup_thread(mddev->sync_thread);
2772 err = 0; 3180 err = 0;
2773 } else { 3181 } else
2774 printk(KERN_ERR "md: %s has no personality assigned.\n",
2775 mdname(mddev));
2776 err = -EINVAL; 3182 err = -EINVAL;
2777 }
2778 3183
2779out: 3184out:
2780 return err; 3185 return err;
2781} 3186}
2782 3187
2783static int do_md_stop(mddev_t * mddev, int ro) 3188/* similar to deny_write_access, but accounts for our holding a reference
3189 * to the file ourselves */
3190static int deny_bitmap_write_access(struct file * file)
3191{
3192 struct inode *inode = file->f_mapping->host;
3193
3194 spin_lock(&inode->i_lock);
3195 if (atomic_read(&inode->i_writecount) > 1) {
3196 spin_unlock(&inode->i_lock);
3197 return -ETXTBSY;
3198 }
3199 atomic_set(&inode->i_writecount, -1);
3200 spin_unlock(&inode->i_lock);
3201
3202 return 0;
3203}
3204
3205static void restore_bitmap_write_access(struct file *file)
3206{
3207 struct inode *inode = file->f_mapping->host;
3208
3209 spin_lock(&inode->i_lock);
3210 atomic_set(&inode->i_writecount, 1);
3211 spin_unlock(&inode->i_lock);
3212}
3213
3214/* mode:
3215 * 0 - completely stop and dis-assemble array
3216 * 1 - switch to readonly
3217 * 2 - stop but do not disassemble array
3218 */
3219static int do_md_stop(mddev_t * mddev, int mode)
2784{ 3220{
2785 int err = 0; 3221 int err = 0;
2786 struct gendisk *disk = mddev->gendisk; 3222 struct gendisk *disk = mddev->gendisk;
@@ -2792,6 +3228,7 @@ static int do_md_stop(mddev_t * mddev, int ro)
2792 } 3228 }
2793 3229
2794 if (mddev->sync_thread) { 3230 if (mddev->sync_thread) {
3231 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
2795 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 3232 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2796 md_unregister_thread(mddev->sync_thread); 3233 md_unregister_thread(mddev->sync_thread);
2797 mddev->sync_thread = NULL; 3234 mddev->sync_thread = NULL;
@@ -2801,12 +3238,15 @@ static int do_md_stop(mddev_t * mddev, int ro)
2801 3238
2802 invalidate_partition(disk, 0); 3239 invalidate_partition(disk, 0);
2803 3240
2804 if (ro) { 3241 switch(mode) {
3242 case 1: /* readonly */
2805 err = -ENXIO; 3243 err = -ENXIO;
2806 if (mddev->ro==1) 3244 if (mddev->ro==1)
2807 goto out; 3245 goto out;
2808 mddev->ro = 1; 3246 mddev->ro = 1;
2809 } else { 3247 break;
3248 case 0: /* disassemble */
3249 case 2: /* stop */
2810 bitmap_flush(mddev); 3250 bitmap_flush(mddev);
2811 md_super_wait(mddev); 3251 md_super_wait(mddev);
2812 if (mddev->ro) 3252 if (mddev->ro)
@@ -2821,19 +3261,20 @@ static int do_md_stop(mddev_t * mddev, int ro)
2821 if (mddev->ro) 3261 if (mddev->ro)
2822 mddev->ro = 0; 3262 mddev->ro = 0;
2823 } 3263 }
2824 if (!mddev->in_sync) { 3264 if (!mddev->in_sync || mddev->sb_dirty) {
2825 /* mark array as shutdown cleanly */ 3265 /* mark array as shutdown cleanly */
2826 mddev->in_sync = 1; 3266 mddev->in_sync = 1;
2827 md_update_sb(mddev); 3267 md_update_sb(mddev);
2828 } 3268 }
2829 if (ro) 3269 if (mode == 1)
2830 set_disk_ro(disk, 1); 3270 set_disk_ro(disk, 1);
3271 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
2831 } 3272 }
2832 3273
2833 /* 3274 /*
2834 * Free resources if final stop 3275 * Free resources if final stop
2835 */ 3276 */
2836 if (!ro) { 3277 if (mode == 0) {
2837 mdk_rdev_t *rdev; 3278 mdk_rdev_t *rdev;
2838 struct list_head *tmp; 3279 struct list_head *tmp;
2839 struct gendisk *disk; 3280 struct gendisk *disk;
@@ -2841,7 +3282,7 @@ static int do_md_stop(mddev_t * mddev, int ro)
2841 3282
2842 bitmap_destroy(mddev); 3283 bitmap_destroy(mddev);
2843 if (mddev->bitmap_file) { 3284 if (mddev->bitmap_file) {
2844 atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1); 3285 restore_bitmap_write_access(mddev->bitmap_file);
2845 fput(mddev->bitmap_file); 3286 fput(mddev->bitmap_file);
2846 mddev->bitmap_file = NULL; 3287 mddev->bitmap_file = NULL;
2847 } 3288 }
@@ -2857,11 +3298,15 @@ static int do_md_stop(mddev_t * mddev, int ro)
2857 export_array(mddev); 3298 export_array(mddev);
2858 3299
2859 mddev->array_size = 0; 3300 mddev->array_size = 0;
3301 mddev->size = 0;
3302 mddev->raid_disks = 0;
3303 mddev->recovery_cp = 0;
3304
2860 disk = mddev->gendisk; 3305 disk = mddev->gendisk;
2861 if (disk) 3306 if (disk)
2862 set_capacity(disk, 0); 3307 set_capacity(disk, 0);
2863 mddev->changed = 1; 3308 mddev->changed = 1;
2864 } else 3309 } else if (mddev->pers)
2865 printk(KERN_INFO "md: %s switched to read-only mode.\n", 3310 printk(KERN_INFO "md: %s switched to read-only mode.\n",
2866 mdname(mddev)); 3311 mdname(mddev));
2867 err = 0; 3312 err = 0;
@@ -3264,6 +3709,17 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
3264 3709
3265 rdev->raid_disk = -1; 3710 rdev->raid_disk = -1;
3266 err = bind_rdev_to_array(rdev, mddev); 3711 err = bind_rdev_to_array(rdev, mddev);
3712 if (!err && !mddev->pers->hot_remove_disk) {
3713 /* If there is hot_add_disk but no hot_remove_disk
3714 * then added disks for geometry changes,
3715 * and should be added immediately.
3716 */
3717 super_types[mddev->major_version].
3718 validate_super(mddev, rdev);
3719 err = mddev->pers->hot_add_disk(mddev, rdev);
3720 if (err)
3721 unbind_rdev_from_array(rdev);
3722 }
3267 if (err) 3723 if (err)
3268 export_rdev(rdev); 3724 export_rdev(rdev);
3269 3725
@@ -3434,23 +3890,6 @@ abort_export:
3434 return err; 3890 return err;
3435} 3891}
3436 3892
3437/* similar to deny_write_access, but accounts for our holding a reference
3438 * to the file ourselves */
3439static int deny_bitmap_write_access(struct file * file)
3440{
3441 struct inode *inode = file->f_mapping->host;
3442
3443 spin_lock(&inode->i_lock);
3444 if (atomic_read(&inode->i_writecount) > 1) {
3445 spin_unlock(&inode->i_lock);
3446 return -ETXTBSY;
3447 }
3448 atomic_set(&inode->i_writecount, -1);
3449 spin_unlock(&inode->i_lock);
3450
3451 return 0;
3452}
3453
3454static int set_bitmap_file(mddev_t *mddev, int fd) 3893static int set_bitmap_file(mddev_t *mddev, int fd)
3455{ 3894{
3456 int err; 3895 int err;
@@ -3491,12 +3930,17 @@ static int set_bitmap_file(mddev_t *mddev, int fd)
3491 mddev->pers->quiesce(mddev, 1); 3930 mddev->pers->quiesce(mddev, 1);
3492 if (fd >= 0) 3931 if (fd >= 0)
3493 err = bitmap_create(mddev); 3932 err = bitmap_create(mddev);
3494 if (fd < 0 || err) 3933 if (fd < 0 || err) {
3495 bitmap_destroy(mddev); 3934 bitmap_destroy(mddev);
3935 fd = -1; /* make sure to put the file */
3936 }
3496 mddev->pers->quiesce(mddev, 0); 3937 mddev->pers->quiesce(mddev, 0);
3497 } else if (fd < 0) { 3938 }
3498 if (mddev->bitmap_file) 3939 if (fd < 0) {
3940 if (mddev->bitmap_file) {
3941 restore_bitmap_write_access(mddev->bitmap_file);
3499 fput(mddev->bitmap_file); 3942 fput(mddev->bitmap_file);
3943 }
3500 mddev->bitmap_file = NULL; 3944 mddev->bitmap_file = NULL;
3501 } 3945 }
3502 3946
@@ -3977,11 +4421,6 @@ static int md_ioctl(struct inode *inode, struct file *file,
3977 goto done_unlock; 4421 goto done_unlock;
3978 4422
3979 default: 4423 default:
3980 if (_IOC_TYPE(cmd) == MD_MAJOR)
3981 printk(KERN_WARNING "md: %s(pid %d) used"
3982 " obsolete MD ioctl, upgrade your"
3983 " software to use new ictls.\n",
3984 current->comm, current->pid);
3985 err = -EINVAL; 4424 err = -EINVAL;
3986 goto abort_unlock; 4425 goto abort_unlock;
3987 } 4426 }
@@ -4586,7 +5025,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
4586 spin_lock_irq(&mddev->write_lock); 5025 spin_lock_irq(&mddev->write_lock);
4587 if (mddev->in_sync) { 5026 if (mddev->in_sync) {
4588 mddev->in_sync = 0; 5027 mddev->in_sync = 0;
4589 mddev->sb_dirty = 1; 5028 mddev->sb_dirty = 3;
4590 md_wakeup_thread(mddev->thread); 5029 md_wakeup_thread(mddev->thread);
4591 } 5030 }
4592 spin_unlock_irq(&mddev->write_lock); 5031 spin_unlock_irq(&mddev->write_lock);
@@ -4599,7 +5038,7 @@ void md_write_end(mddev_t *mddev)
4599 if (atomic_dec_and_test(&mddev->writes_pending)) { 5038 if (atomic_dec_and_test(&mddev->writes_pending)) {
4600 if (mddev->safemode == 2) 5039 if (mddev->safemode == 2)
4601 md_wakeup_thread(mddev->thread); 5040 md_wakeup_thread(mddev->thread);
4602 else 5041 else if (mddev->safemode_delay)
4603 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 5042 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
4604 } 5043 }
4605} 5044}
@@ -4620,10 +5059,14 @@ void md_do_sync(mddev_t *mddev)
4620 struct list_head *tmp; 5059 struct list_head *tmp;
4621 sector_t last_check; 5060 sector_t last_check;
4622 int skipped = 0; 5061 int skipped = 0;
5062 struct list_head *rtmp;
5063 mdk_rdev_t *rdev;
4623 5064
4624 /* just incase thread restarts... */ 5065 /* just incase thread restarts... */
4625 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 5066 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
4626 return; 5067 return;
5068 if (mddev->ro) /* never try to sync a read-only array */
5069 return;
4627 5070
4628 /* we overload curr_resync somewhat here. 5071 /* we overload curr_resync somewhat here.
4629 * 0 == not engaged in resync at all 5072 * 0 == not engaged in resync at all
@@ -4682,17 +5125,30 @@ void md_do_sync(mddev_t *mddev)
4682 } 5125 }
4683 } while (mddev->curr_resync < 2); 5126 } while (mddev->curr_resync < 2);
4684 5127
5128 j = 0;
4685 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 5129 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4686 /* resync follows the size requested by the personality, 5130 /* resync follows the size requested by the personality,
4687 * which defaults to physical size, but can be virtual size 5131 * which defaults to physical size, but can be virtual size
4688 */ 5132 */
4689 max_sectors = mddev->resync_max_sectors; 5133 max_sectors = mddev->resync_max_sectors;
4690 mddev->resync_mismatches = 0; 5134 mddev->resync_mismatches = 0;
5135 /* we don't use the checkpoint if there's a bitmap */
5136 if (!mddev->bitmap &&
5137 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5138 j = mddev->recovery_cp;
4691 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 5139 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4692 max_sectors = mddev->size << 1; 5140 max_sectors = mddev->size << 1;
4693 else 5141 else {
4694 /* recovery follows the physical size of devices */ 5142 /* recovery follows the physical size of devices */
4695 max_sectors = mddev->size << 1; 5143 max_sectors = mddev->size << 1;
5144 j = MaxSector;
5145 ITERATE_RDEV(mddev,rdev,rtmp)
5146 if (rdev->raid_disk >= 0 &&
5147 !test_bit(Faulty, &rdev->flags) &&
5148 !test_bit(In_sync, &rdev->flags) &&
5149 rdev->recovery_offset < j)
5150 j = rdev->recovery_offset;
5151 }
4696 5152
4697 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev)); 5153 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
4698 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:" 5154 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
@@ -4702,12 +5158,7 @@ void md_do_sync(mddev_t *mddev)
4702 speed_max(mddev)); 5158 speed_max(mddev));
4703 5159
4704 is_mddev_idle(mddev); /* this also initializes IO event counters */ 5160 is_mddev_idle(mddev); /* this also initializes IO event counters */
4705 /* we don't use the checkpoint if there's a bitmap */ 5161
4706 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap
4707 && ! test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
4708 j = mddev->recovery_cp;
4709 else
4710 j = 0;
4711 io_sectors = 0; 5162 io_sectors = 0;
4712 for (m = 0; m < SYNC_MARKS; m++) { 5163 for (m = 0; m < SYNC_MARKS; m++) {
4713 mark[m] = jiffies; 5164 mark[m] = jiffies;
@@ -4828,15 +5279,28 @@ void md_do_sync(mddev_t *mddev)
4828 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 5279 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
4829 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 5280 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
4830 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 5281 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
4831 mddev->curr_resync > 2 && 5282 mddev->curr_resync > 2) {
4832 mddev->curr_resync >= mddev->recovery_cp) { 5283 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4833 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 5284 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4834 printk(KERN_INFO 5285 if (mddev->curr_resync >= mddev->recovery_cp) {
4835 "md: checkpointing recovery of %s.\n", 5286 printk(KERN_INFO
4836 mdname(mddev)); 5287 "md: checkpointing recovery of %s.\n",
4837 mddev->recovery_cp = mddev->curr_resync; 5288 mdname(mddev));
4838 } else 5289 mddev->recovery_cp = mddev->curr_resync;
4839 mddev->recovery_cp = MaxSector; 5290 }
5291 } else
5292 mddev->recovery_cp = MaxSector;
5293 } else {
5294 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5295 mddev->curr_resync = MaxSector;
5296 ITERATE_RDEV(mddev,rdev,rtmp)
5297 if (rdev->raid_disk >= 0 &&
5298 !test_bit(Faulty, &rdev->flags) &&
5299 !test_bit(In_sync, &rdev->flags) &&
5300 rdev->recovery_offset < mddev->curr_resync)
5301 rdev->recovery_offset = mddev->curr_resync;
5302 mddev->sb_dirty = 1;
5303 }
4840 } 5304 }
4841 5305
4842 skip: 5306 skip:
@@ -4908,7 +5372,7 @@ void md_check_recovery(mddev_t *mddev)
4908 if (mddev->safemode && !atomic_read(&mddev->writes_pending) && 5372 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
4909 !mddev->in_sync && mddev->recovery_cp == MaxSector) { 5373 !mddev->in_sync && mddev->recovery_cp == MaxSector) {
4910 mddev->in_sync = 1; 5374 mddev->in_sync = 1;
4911 mddev->sb_dirty = 1; 5375 mddev->sb_dirty = 3;
4912 } 5376 }
4913 if (mddev->safemode == 1) 5377 if (mddev->safemode == 1)
4914 mddev->safemode = 0; 5378 mddev->safemode = 0;
@@ -4957,6 +5421,8 @@ void md_check_recovery(mddev_t *mddev)
4957 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 5421 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
4958 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 5422 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4959 5423
5424 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
5425 goto unlock;
4960 /* no recovery is running. 5426 /* no recovery is running.
4961 * remove any failed drives, then 5427 * remove any failed drives, then
4962 * add spares if possible. 5428 * add spares if possible.
@@ -4979,6 +5445,7 @@ void md_check_recovery(mddev_t *mddev)
4979 ITERATE_RDEV(mddev,rdev,rtmp) 5445 ITERATE_RDEV(mddev,rdev,rtmp)
4980 if (rdev->raid_disk < 0 5446 if (rdev->raid_disk < 0
4981 && !test_bit(Faulty, &rdev->flags)) { 5447 && !test_bit(Faulty, &rdev->flags)) {
5448 rdev->recovery_offset = 0;
4982 if (mddev->pers->hot_add_disk(mddev,rdev)) { 5449 if (mddev->pers->hot_add_disk(mddev,rdev)) {
4983 char nm[20]; 5450 char nm[20];
4984 sprintf(nm, "rd%d", rdev->raid_disk); 5451 sprintf(nm, "rd%d", rdev->raid_disk);
@@ -5216,7 +5683,6 @@ EXPORT_SYMBOL(md_write_end);
5216EXPORT_SYMBOL(md_register_thread); 5683EXPORT_SYMBOL(md_register_thread);
5217EXPORT_SYMBOL(md_unregister_thread); 5684EXPORT_SYMBOL(md_unregister_thread);
5218EXPORT_SYMBOL(md_wakeup_thread); 5685EXPORT_SYMBOL(md_wakeup_thread);
5219EXPORT_SYMBOL(md_print_devices);
5220EXPORT_SYMBOL(md_check_recovery); 5686EXPORT_SYMBOL(md_check_recovery);
5221MODULE_LICENSE("GPL"); 5687MODULE_LICENSE("GPL");
5222MODULE_ALIAS("md"); 5688MODULE_ALIAS("md");
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4070eff6f0..cead918578 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -374,26 +374,26 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
374 * already. 374 * already.
375 */ 375 */
376 if (atomic_dec_and_test(&r1_bio->remaining)) { 376 if (atomic_dec_and_test(&r1_bio->remaining)) {
377 if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { 377 if (test_bit(R1BIO_BarrierRetry, &r1_bio->state))
378 reschedule_retry(r1_bio); 378 reschedule_retry(r1_bio);
379 goto out; 379 else {
380 } 380 /* it really is the end of this request */
381 /* it really is the end of this request */ 381 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
382 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 382 /* free extra copy of the data pages */
383 /* free extra copy of the data pages */ 383 int i = bio->bi_vcnt;
384 int i = bio->bi_vcnt; 384 while (i--)
385 while (i--) 385 safe_put_page(bio->bi_io_vec[i].bv_page);
386 safe_put_page(bio->bi_io_vec[i].bv_page); 386 }
387 /* clear the bitmap if all writes complete successfully */
388 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
389 r1_bio->sectors,
390 !test_bit(R1BIO_Degraded, &r1_bio->state),
391 behind);
392 md_write_end(r1_bio->mddev);
393 raid_end_bio_io(r1_bio);
387 } 394 }
388 /* clear the bitmap if all writes complete successfully */
389 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
390 r1_bio->sectors,
391 !test_bit(R1BIO_Degraded, &r1_bio->state),
392 behind);
393 md_write_end(r1_bio->mddev);
394 raid_end_bio_io(r1_bio);
395 } 395 }
396 out: 396
397 if (to_put) 397 if (to_put)
398 bio_put(to_put); 398 bio_put(to_put);
399 399
@@ -1625,6 +1625,12 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1625 /* before building a request, check if we can skip these blocks.. 1625 /* before building a request, check if we can skip these blocks..
1626 * This call the bitmap_start_sync doesn't actually record anything 1626 * This call the bitmap_start_sync doesn't actually record anything
1627 */ 1627 */
1628 if (mddev->bitmap == NULL &&
1629 mddev->recovery_cp == MaxSector &&
1630 conf->fullsync == 0) {
1631 *skipped = 1;
1632 return max_sector - sector_nr;
1633 }
1628 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 1634 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1629 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 1635 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1630 /* We can skip this block, and probably several more */ 1636 /* We can skip this block, and probably several more */
@@ -1888,7 +1894,8 @@ static int run(mddev_t *mddev)
1888 1894
1889 disk = conf->mirrors + i; 1895 disk = conf->mirrors + i;
1890 1896
1891 if (!disk->rdev) { 1897 if (!disk->rdev ||
1898 !test_bit(In_sync, &disk->rdev->flags)) {
1892 disk->head_position = 0; 1899 disk->head_position = 0;
1893 mddev->degraded++; 1900 mddev->degraded++;
1894 } 1901 }
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 1440935414..7f636283a1 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -29,6 +29,7 @@
29 * raid_disks 29 * raid_disks
30 * near_copies (stored in low byte of layout) 30 * near_copies (stored in low byte of layout)
31 * far_copies (stored in second byte of layout) 31 * far_copies (stored in second byte of layout)
32 * far_offset (stored in bit 16 of layout )
32 * 33 *
33 * The data to be stored is divided into chunks using chunksize. 34 * The data to be stored is divided into chunks using chunksize.
34 * Each device is divided into far_copies sections. 35 * Each device is divided into far_copies sections.
@@ -36,10 +37,14 @@
36 * near_copies copies of each chunk is stored (each on a different drive). 37 * near_copies copies of each chunk is stored (each on a different drive).
37 * The starting device for each section is offset near_copies from the starting 38 * The starting device for each section is offset near_copies from the starting
38 * device of the previous section. 39 * device of the previous section.
39 * Thus there are (near_copies*far_copies) of each chunk, and each is on a different 40 * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
40 * drive. 41 * drive.
41 * near_copies and far_copies must be at least one, and their product is at most 42 * near_copies and far_copies must be at least one, and their product is at most
42 * raid_disks. 43 * raid_disks.
44 *
45 * If far_offset is true, then the far_copies are handled a bit differently.
46 * The copies are still in different stripes, but instead of be very far apart
47 * on disk, there are adjacent stripes.
43 */ 48 */
44 49
45/* 50/*
@@ -357,8 +362,7 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in
357 * With this layout, and block is never stored twice on the one device. 362 * With this layout, and block is never stored twice on the one device.
358 * 363 *
359 * raid10_find_phys finds the sector offset of a given virtual sector 364 * raid10_find_phys finds the sector offset of a given virtual sector
360 * on each device that it is on. If a block isn't on a device, 365 * on each device that it is on.
361 * that entry in the array is set to MaxSector.
362 * 366 *
363 * raid10_find_virt does the reverse mapping, from a device and a 367 * raid10_find_virt does the reverse mapping, from a device and a
364 * sector offset to a virtual address 368 * sector offset to a virtual address
@@ -381,6 +385,8 @@ static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
381 chunk *= conf->near_copies; 385 chunk *= conf->near_copies;
382 stripe = chunk; 386 stripe = chunk;
383 dev = sector_div(stripe, conf->raid_disks); 387 dev = sector_div(stripe, conf->raid_disks);
388 if (conf->far_offset)
389 stripe *= conf->far_copies;
384 390
385 sector += stripe << conf->chunk_shift; 391 sector += stripe << conf->chunk_shift;
386 392
@@ -414,16 +420,24 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
414{ 420{
415 sector_t offset, chunk, vchunk; 421 sector_t offset, chunk, vchunk;
416 422
417 while (sector > conf->stride) {
418 sector -= conf->stride;
419 if (dev < conf->near_copies)
420 dev += conf->raid_disks - conf->near_copies;
421 else
422 dev -= conf->near_copies;
423 }
424
425 offset = sector & conf->chunk_mask; 423 offset = sector & conf->chunk_mask;
426 chunk = sector >> conf->chunk_shift; 424 if (conf->far_offset) {
425 int fc;
426 chunk = sector >> conf->chunk_shift;
427 fc = sector_div(chunk, conf->far_copies);
428 dev -= fc * conf->near_copies;
429 if (dev < 0)
430 dev += conf->raid_disks;
431 } else {
432 while (sector > conf->stride) {
433 sector -= conf->stride;
434 if (dev < conf->near_copies)
435 dev += conf->raid_disks - conf->near_copies;
436 else
437 dev -= conf->near_copies;
438 }
439 chunk = sector >> conf->chunk_shift;
440 }
427 vchunk = chunk * conf->raid_disks + dev; 441 vchunk = chunk * conf->raid_disks + dev;
428 sector_div(vchunk, conf->near_copies); 442 sector_div(vchunk, conf->near_copies);
429 return (vchunk << conf->chunk_shift) + offset; 443 return (vchunk << conf->chunk_shift) + offset;
@@ -900,9 +914,12 @@ static void status(struct seq_file *seq, mddev_t *mddev)
900 seq_printf(seq, " %dK chunks", mddev->chunk_size/1024); 914 seq_printf(seq, " %dK chunks", mddev->chunk_size/1024);
901 if (conf->near_copies > 1) 915 if (conf->near_copies > 1)
902 seq_printf(seq, " %d near-copies", conf->near_copies); 916 seq_printf(seq, " %d near-copies", conf->near_copies);
903 if (conf->far_copies > 1) 917 if (conf->far_copies > 1) {
904 seq_printf(seq, " %d far-copies", conf->far_copies); 918 if (conf->far_offset)
905 919 seq_printf(seq, " %d offset-copies", conf->far_copies);
920 else
921 seq_printf(seq, " %d far-copies", conf->far_copies);
922 }
906 seq_printf(seq, " [%d/%d] [", conf->raid_disks, 923 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
907 conf->working_disks); 924 conf->working_disks);
908 for (i = 0; i < conf->raid_disks; i++) 925 for (i = 0; i < conf->raid_disks; i++)
@@ -1915,7 +1932,7 @@ static int run(mddev_t *mddev)
1915 mirror_info_t *disk; 1932 mirror_info_t *disk;
1916 mdk_rdev_t *rdev; 1933 mdk_rdev_t *rdev;
1917 struct list_head *tmp; 1934 struct list_head *tmp;
1918 int nc, fc; 1935 int nc, fc, fo;
1919 sector_t stride, size; 1936 sector_t stride, size;
1920 1937
1921 if (mddev->chunk_size == 0) { 1938 if (mddev->chunk_size == 0) {
@@ -1925,8 +1942,9 @@ static int run(mddev_t *mddev)
1925 1942
1926 nc = mddev->layout & 255; 1943 nc = mddev->layout & 255;
1927 fc = (mddev->layout >> 8) & 255; 1944 fc = (mddev->layout >> 8) & 255;
1945 fo = mddev->layout & (1<<16);
1928 if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks || 1946 if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
1929 (mddev->layout >> 16)) { 1947 (mddev->layout >> 17)) {
1930 printk(KERN_ERR "raid10: %s: unsupported raid10 layout: 0x%8x\n", 1948 printk(KERN_ERR "raid10: %s: unsupported raid10 layout: 0x%8x\n",
1931 mdname(mddev), mddev->layout); 1949 mdname(mddev), mddev->layout);
1932 goto out; 1950 goto out;
@@ -1958,12 +1976,16 @@ static int run(mddev_t *mddev)
1958 conf->near_copies = nc; 1976 conf->near_copies = nc;
1959 conf->far_copies = fc; 1977 conf->far_copies = fc;
1960 conf->copies = nc*fc; 1978 conf->copies = nc*fc;
1979 conf->far_offset = fo;
1961 conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1; 1980 conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1;
1962 conf->chunk_shift = ffz(~mddev->chunk_size) - 9; 1981 conf->chunk_shift = ffz(~mddev->chunk_size) - 9;
1963 stride = mddev->size >> (conf->chunk_shift-1); 1982 if (fo)
1964 sector_div(stride, fc); 1983 conf->stride = 1 << conf->chunk_shift;
1965 conf->stride = stride << conf->chunk_shift; 1984 else {
1966 1985 stride = mddev->size >> (conf->chunk_shift-1);
1986 sector_div(stride, fc);
1987 conf->stride = stride << conf->chunk_shift;
1988 }
1967 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc, 1989 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
1968 r10bio_pool_free, conf); 1990 r10bio_pool_free, conf);
1969 if (!conf->r10bio_pool) { 1991 if (!conf->r10bio_pool) {
@@ -2015,7 +2037,8 @@ static int run(mddev_t *mddev)
2015 2037
2016 disk = conf->mirrors + i; 2038 disk = conf->mirrors + i;
2017 2039
2018 if (!disk->rdev) { 2040 if (!disk->rdev ||
2041 !test_bit(In_sync, &rdev->flags)) {
2019 disk->head_position = 0; 2042 disk->head_position = 0;
2020 mddev->degraded++; 2043 mddev->degraded++;
2021 } 2044 }
@@ -2037,7 +2060,13 @@ static int run(mddev_t *mddev)
2037 /* 2060 /*
2038 * Ok, everything is just fine now 2061 * Ok, everything is just fine now
2039 */ 2062 */
2040 size = conf->stride * conf->raid_disks; 2063 if (conf->far_offset) {
2064 size = mddev->size >> (conf->chunk_shift-1);
2065 size *= conf->raid_disks;
2066 size <<= conf->chunk_shift;
2067 sector_div(size, conf->far_copies);
2068 } else
2069 size = conf->stride * conf->raid_disks;
2041 sector_div(size, conf->near_copies); 2070 sector_div(size, conf->near_copies);
2042 mddev->array_size = size/2; 2071 mddev->array_size = size/2;
2043 mddev->resync_max_sectors = size; 2072 mddev->resync_max_sectors = size;
@@ -2050,7 +2079,7 @@ static int run(mddev_t *mddev)
2050 * maybe... 2079 * maybe...
2051 */ 2080 */
2052 { 2081 {
2053 int stripe = conf->raid_disks * mddev->chunk_size / PAGE_SIZE; 2082 int stripe = conf->raid_disks * (mddev->chunk_size / PAGE_SIZE);
2054 stripe /= conf->near_copies; 2083 stripe /= conf->near_copies;
2055 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) 2084 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
2056 mddev->queue->backing_dev_info.ra_pages = 2* stripe; 2085 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3184360404..f920e50ea1 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2,8 +2,11 @@
2 * raid5.c : Multiple Devices driver for Linux 2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar 4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
5 * 6 *
6 * RAID-5 management functions. 7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
7 * 10 *
8 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
@@ -19,11 +22,11 @@
19#include <linux/config.h> 22#include <linux/config.h>
20#include <linux/module.h> 23#include <linux/module.h>
21#include <linux/slab.h> 24#include <linux/slab.h>
22#include <linux/raid/raid5.h>
23#include <linux/highmem.h> 25#include <linux/highmem.h>
24#include <linux/bitops.h> 26#include <linux/bitops.h>
25#include <linux/kthread.h> 27#include <linux/kthread.h>
26#include <asm/atomic.h> 28#include <asm/atomic.h>
29#include "raid6.h"
27 30
28#include <linux/raid/bitmap.h> 31#include <linux/raid/bitmap.h>
29 32
@@ -68,6 +71,16 @@
68#define __inline__ 71#define __inline__
69#endif 72#endif
70 73
74#if !RAID6_USE_EMPTY_ZERO_PAGE
75/* In .bss so it's zeroed */
76const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
77#endif
78
79static inline int raid6_next_disk(int disk, int raid_disks)
80{
81 disk++;
82 return (disk < raid_disks) ? disk : 0;
83}
71static void print_raid5_conf (raid5_conf_t *conf); 84static void print_raid5_conf (raid5_conf_t *conf);
72 85
73static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 86static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
@@ -104,7 +117,7 @@ static void release_stripe(struct stripe_head *sh)
104{ 117{
105 raid5_conf_t *conf = sh->raid_conf; 118 raid5_conf_t *conf = sh->raid_conf;
106 unsigned long flags; 119 unsigned long flags;
107 120
108 spin_lock_irqsave(&conf->device_lock, flags); 121 spin_lock_irqsave(&conf->device_lock, flags);
109 __release_stripe(conf, sh); 122 __release_stripe(conf, sh);
110 spin_unlock_irqrestore(&conf->device_lock, flags); 123 spin_unlock_irqrestore(&conf->device_lock, flags);
@@ -117,7 +130,7 @@ static inline void remove_hash(struct stripe_head *sh)
117 hlist_del_init(&sh->hash); 130 hlist_del_init(&sh->hash);
118} 131}
119 132
120static void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 133static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
121{ 134{
122 struct hlist_head *hp = stripe_hash(conf, sh->sector); 135 struct hlist_head *hp = stripe_hash(conf, sh->sector);
123 136
@@ -190,7 +203,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int
190 (unsigned long long)sh->sector); 203 (unsigned long long)sh->sector);
191 204
192 remove_hash(sh); 205 remove_hash(sh);
193 206
194 sh->sector = sector; 207 sh->sector = sector;
195 sh->pd_idx = pd_idx; 208 sh->pd_idx = pd_idx;
196 sh->state = 0; 209 sh->state = 0;
@@ -269,8 +282,9 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
269 } else { 282 } else {
270 if (!test_bit(STRIPE_HANDLE, &sh->state)) 283 if (!test_bit(STRIPE_HANDLE, &sh->state))
271 atomic_inc(&conf->active_stripes); 284 atomic_inc(&conf->active_stripes);
272 if (!list_empty(&sh->lru)) 285 if (list_empty(&sh->lru))
273 list_del_init(&sh->lru); 286 BUG();
287 list_del_init(&sh->lru);
274 } 288 }
275 } 289 }
276 } while (sh == NULL); 290 } while (sh == NULL);
@@ -321,10 +335,9 @@ static int grow_stripes(raid5_conf_t *conf, int num)
321 return 1; 335 return 1;
322 conf->slab_cache = sc; 336 conf->slab_cache = sc;
323 conf->pool_size = devs; 337 conf->pool_size = devs;
324 while (num--) { 338 while (num--)
325 if (!grow_one_stripe(conf)) 339 if (!grow_one_stripe(conf))
326 return 1; 340 return 1;
327 }
328 return 0; 341 return 0;
329} 342}
330 343
@@ -631,8 +644,7 @@ static void raid5_build_block (struct stripe_head *sh, int i)
631 dev->req.bi_private = sh; 644 dev->req.bi_private = sh;
632 645
633 dev->flags = 0; 646 dev->flags = 0;
634 if (i != sh->pd_idx) 647 dev->sector = compute_blocknr(sh, i);
635 dev->sector = compute_blocknr(sh, i);
636} 648}
637 649
638static void error(mddev_t *mddev, mdk_rdev_t *rdev) 650static void error(mddev_t *mddev, mdk_rdev_t *rdev)
@@ -659,7 +671,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
659 " Operation continuing on %d devices\n", 671 " Operation continuing on %d devices\n",
660 bdevname(rdev->bdev,b), conf->working_disks); 672 bdevname(rdev->bdev,b), conf->working_disks);
661 } 673 }
662} 674}
663 675
664/* 676/*
665 * Input: a 'big' sector number, 677 * Input: a 'big' sector number,
@@ -697,9 +709,12 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
697 /* 709 /*
698 * Select the parity disk based on the user selected algorithm. 710 * Select the parity disk based on the user selected algorithm.
699 */ 711 */
700 if (conf->level == 4) 712 switch(conf->level) {
713 case 4:
701 *pd_idx = data_disks; 714 *pd_idx = data_disks;
702 else switch (conf->algorithm) { 715 break;
716 case 5:
717 switch (conf->algorithm) {
703 case ALGORITHM_LEFT_ASYMMETRIC: 718 case ALGORITHM_LEFT_ASYMMETRIC:
704 *pd_idx = data_disks - stripe % raid_disks; 719 *pd_idx = data_disks - stripe % raid_disks;
705 if (*dd_idx >= *pd_idx) 720 if (*dd_idx >= *pd_idx)
@@ -721,6 +736,39 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
721 default: 736 default:
722 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 737 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
723 conf->algorithm); 738 conf->algorithm);
739 }
740 break;
741 case 6:
742
743 /**** FIX THIS ****/
744 switch (conf->algorithm) {
745 case ALGORITHM_LEFT_ASYMMETRIC:
746 *pd_idx = raid_disks - 1 - (stripe % raid_disks);
747 if (*pd_idx == raid_disks-1)
748 (*dd_idx)++; /* Q D D D P */
749 else if (*dd_idx >= *pd_idx)
750 (*dd_idx) += 2; /* D D P Q D */
751 break;
752 case ALGORITHM_RIGHT_ASYMMETRIC:
753 *pd_idx = stripe % raid_disks;
754 if (*pd_idx == raid_disks-1)
755 (*dd_idx)++; /* Q D D D P */
756 else if (*dd_idx >= *pd_idx)
757 (*dd_idx) += 2; /* D D P Q D */
758 break;
759 case ALGORITHM_LEFT_SYMMETRIC:
760 *pd_idx = raid_disks - 1 - (stripe % raid_disks);
761 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
762 break;
763 case ALGORITHM_RIGHT_SYMMETRIC:
764 *pd_idx = stripe % raid_disks;
765 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
766 break;
767 default:
768 printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
769 conf->algorithm);
770 }
771 break;
724 } 772 }
725 773
726 /* 774 /*
@@ -742,12 +790,17 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
742 int chunk_number, dummy1, dummy2, dd_idx = i; 790 int chunk_number, dummy1, dummy2, dd_idx = i;
743 sector_t r_sector; 791 sector_t r_sector;
744 792
793
745 chunk_offset = sector_div(new_sector, sectors_per_chunk); 794 chunk_offset = sector_div(new_sector, sectors_per_chunk);
746 stripe = new_sector; 795 stripe = new_sector;
747 BUG_ON(new_sector != stripe); 796 BUG_ON(new_sector != stripe);
748 797
749 798 if (i == sh->pd_idx)
750 switch (conf->algorithm) { 799 return 0;
800 switch(conf->level) {
801 case 4: break;
802 case 5:
803 switch (conf->algorithm) {
751 case ALGORITHM_LEFT_ASYMMETRIC: 804 case ALGORITHM_LEFT_ASYMMETRIC:
752 case ALGORITHM_RIGHT_ASYMMETRIC: 805 case ALGORITHM_RIGHT_ASYMMETRIC:
753 if (i > sh->pd_idx) 806 if (i > sh->pd_idx)
@@ -761,7 +814,37 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
761 break; 814 break;
762 default: 815 default:
763 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 816 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
817 conf->algorithm);
818 }
819 break;
820 case 6:
821 data_disks = raid_disks - 2;
822 if (i == raid6_next_disk(sh->pd_idx, raid_disks))
823 return 0; /* It is the Q disk */
824 switch (conf->algorithm) {
825 case ALGORITHM_LEFT_ASYMMETRIC:
826 case ALGORITHM_RIGHT_ASYMMETRIC:
827 if (sh->pd_idx == raid_disks-1)
828 i--; /* Q D D D P */
829 else if (i > sh->pd_idx)
830 i -= 2; /* D D P Q D */
831 break;
832 case ALGORITHM_LEFT_SYMMETRIC:
833 case ALGORITHM_RIGHT_SYMMETRIC:
834 if (sh->pd_idx == raid_disks-1)
835 i--; /* Q D D D P */
836 else {
837 /* D D P Q D */
838 if (i < sh->pd_idx)
839 i += raid_disks;
840 i -= (sh->pd_idx + 2);
841 }
842 break;
843 default:
844 printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
764 conf->algorithm); 845 conf->algorithm);
846 }
847 break;
765 } 848 }
766 849
767 chunk_number = stripe * data_disks + i; 850 chunk_number = stripe * data_disks + i;
@@ -778,10 +861,11 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
778 861
779 862
780/* 863/*
781 * Copy data between a page in the stripe cache, and a bio. 864 * Copy data between a page in the stripe cache, and one or more bion
782 * There are no alignment or size guarantees between the page or the 865 * The page could align with the middle of the bio, or there could be
783 * bio except that there is some overlap. 866 * several bion, each with several bio_vecs, which cover part of the page
784 * All iovecs in the bio must be considered. 867 * Multiple bion are linked together on bi_next. There may be extras
868 * at the end of this list. We ignore them.
785 */ 869 */
786static void copy_data(int frombio, struct bio *bio, 870static void copy_data(int frombio, struct bio *bio,
787 struct page *page, 871 struct page *page,
@@ -810,7 +894,7 @@ static void copy_data(int frombio, struct bio *bio,
810 if (len > 0 && page_offset + len > STRIPE_SIZE) 894 if (len > 0 && page_offset + len > STRIPE_SIZE)
811 clen = STRIPE_SIZE - page_offset; 895 clen = STRIPE_SIZE - page_offset;
812 else clen = len; 896 else clen = len;
813 897
814 if (clen > 0) { 898 if (clen > 0) {
815 char *ba = __bio_kmap_atomic(bio, i, KM_USER0); 899 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
816 if (frombio) 900 if (frombio)
@@ -862,14 +946,14 @@ static void compute_block(struct stripe_head *sh, int dd_idx)
862 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 946 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
863} 947}
864 948
865static void compute_parity(struct stripe_head *sh, int method) 949static void compute_parity5(struct stripe_head *sh, int method)
866{ 950{
867 raid5_conf_t *conf = sh->raid_conf; 951 raid5_conf_t *conf = sh->raid_conf;
868 int i, pd_idx = sh->pd_idx, disks = sh->disks, count; 952 int i, pd_idx = sh->pd_idx, disks = sh->disks, count;
869 void *ptr[MAX_XOR_BLOCKS]; 953 void *ptr[MAX_XOR_BLOCKS];
870 struct bio *chosen; 954 struct bio *chosen;
871 955
872 PRINTK("compute_parity, stripe %llu, method %d\n", 956 PRINTK("compute_parity5, stripe %llu, method %d\n",
873 (unsigned long long)sh->sector, method); 957 (unsigned long long)sh->sector, method);
874 958
875 count = 1; 959 count = 1;
@@ -956,9 +1040,195 @@ static void compute_parity(struct stripe_head *sh, int method)
956 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1040 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
957} 1041}
958 1042
1043static void compute_parity6(struct stripe_head *sh, int method)
1044{
1045 raid6_conf_t *conf = sh->raid_conf;
1046 int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = conf->raid_disks, count;
1047 struct bio *chosen;
1048 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
1049 void *ptrs[disks];
1050
1051 qd_idx = raid6_next_disk(pd_idx, disks);
1052 d0_idx = raid6_next_disk(qd_idx, disks);
1053
1054 PRINTK("compute_parity, stripe %llu, method %d\n",
1055 (unsigned long long)sh->sector, method);
1056
1057 switch(method) {
1058 case READ_MODIFY_WRITE:
1059 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */
1060 case RECONSTRUCT_WRITE:
1061 for (i= disks; i-- ;)
1062 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) {
1063 chosen = sh->dev[i].towrite;
1064 sh->dev[i].towrite = NULL;
1065
1066 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1067 wake_up(&conf->wait_for_overlap);
1068
1069 if (sh->dev[i].written) BUG();
1070 sh->dev[i].written = chosen;
1071 }
1072 break;
1073 case CHECK_PARITY:
1074 BUG(); /* Not implemented yet */
1075 }
1076
1077 for (i = disks; i--;)
1078 if (sh->dev[i].written) {
1079 sector_t sector = sh->dev[i].sector;
1080 struct bio *wbi = sh->dev[i].written;
1081 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
1082 copy_data(1, wbi, sh->dev[i].page, sector);
1083 wbi = r5_next_bio(wbi, sector);
1084 }
1085
1086 set_bit(R5_LOCKED, &sh->dev[i].flags);
1087 set_bit(R5_UPTODATE, &sh->dev[i].flags);
1088 }
1089
1090// switch(method) {
1091// case RECONSTRUCT_WRITE:
1092// case CHECK_PARITY:
1093// case UPDATE_PARITY:
1094 /* Note that unlike RAID-5, the ordering of the disks matters greatly. */
1095 /* FIX: Is this ordering of drives even remotely optimal? */
1096 count = 0;
1097 i = d0_idx;
1098 do {
1099 ptrs[count++] = page_address(sh->dev[i].page);
1100 if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags))
1101 printk("block %d/%d not uptodate on parity calc\n", i,count);
1102 i = raid6_next_disk(i, disks);
1103 } while ( i != d0_idx );
1104// break;
1105// }
1106
1107 raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs);
1108
1109 switch(method) {
1110 case RECONSTRUCT_WRITE:
1111 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
1112 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
1113 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
1114 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags);
1115 break;
1116 case UPDATE_PARITY:
1117 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
1118 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
1119 break;
1120 }
1121}
1122
1123
1124/* Compute one missing block */
1125static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
1126{
1127 raid6_conf_t *conf = sh->raid_conf;
1128 int i, count, disks = conf->raid_disks;
1129 void *ptr[MAX_XOR_BLOCKS], *p;
1130 int pd_idx = sh->pd_idx;
1131 int qd_idx = raid6_next_disk(pd_idx, disks);
1132
1133 PRINTK("compute_block_1, stripe %llu, idx %d\n",
1134 (unsigned long long)sh->sector, dd_idx);
1135
1136 if ( dd_idx == qd_idx ) {
1137 /* We're actually computing the Q drive */
1138 compute_parity6(sh, UPDATE_PARITY);
1139 } else {
1140 ptr[0] = page_address(sh->dev[dd_idx].page);
1141 if (!nozero) memset(ptr[0], 0, STRIPE_SIZE);
1142 count = 1;
1143 for (i = disks ; i--; ) {
1144 if (i == dd_idx || i == qd_idx)
1145 continue;
1146 p = page_address(sh->dev[i].page);
1147 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
1148 ptr[count++] = p;
1149 else
1150 printk("compute_block() %d, stripe %llu, %d"
1151 " not present\n", dd_idx,
1152 (unsigned long long)sh->sector, i);
1153
1154 check_xor();
1155 }
1156 if (count != 1)
1157 xor_block(count, STRIPE_SIZE, ptr);
1158 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
1159 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
1160 }
1161}
1162
1163/* Compute two missing blocks */
1164static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
1165{
1166 raid6_conf_t *conf = sh->raid_conf;
1167 int i, count, disks = conf->raid_disks;
1168 int pd_idx = sh->pd_idx;
1169 int qd_idx = raid6_next_disk(pd_idx, disks);
1170 int d0_idx = raid6_next_disk(qd_idx, disks);
1171 int faila, failb;
1172
1173 /* faila and failb are disk numbers relative to d0_idx */
1174 /* pd_idx become disks-2 and qd_idx become disks-1 */
1175 faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx;
1176 failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx;
1177
1178 BUG_ON(faila == failb);
1179 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
1180
1181 PRINTK("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
1182 (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb);
1183
1184 if ( failb == disks-1 ) {
1185 /* Q disk is one of the missing disks */
1186 if ( faila == disks-2 ) {
1187 /* Missing P+Q, just recompute */
1188 compute_parity6(sh, UPDATE_PARITY);
1189 return;
1190 } else {
1191 /* We're missing D+Q; recompute D from P */
1192 compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0);
1193 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */
1194 return;
1195 }
1196 }
1197
1198 /* We're missing D+P or D+D; build pointer table */
1199 {
1200 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
1201 void *ptrs[disks];
1202
1203 count = 0;
1204 i = d0_idx;
1205 do {
1206 ptrs[count++] = page_address(sh->dev[i].page);
1207 i = raid6_next_disk(i, disks);
1208 if (i != dd_idx1 && i != dd_idx2 &&
1209 !test_bit(R5_UPTODATE, &sh->dev[i].flags))
1210 printk("compute_2 with missing block %d/%d\n", count, i);
1211 } while ( i != d0_idx );
1212
1213 if ( failb == disks-2 ) {
1214 /* We're missing D+P. */
1215 raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs);
1216 } else {
1217 /* We're missing D+D. */
1218 raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs);
1219 }
1220
1221 /* Both the above update both missing blocks */
1222 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags);
1223 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags);
1224 }
1225}
1226
1227
1228
959/* 1229/*
960 * Each stripe/dev can have one or more bion attached. 1230 * Each stripe/dev can have one or more bion attached.
961 * toread/towrite point to the first in a chain. 1231 * toread/towrite point to the first in a chain.
962 * The bi_next chain must be in order. 1232 * The bi_next chain must be in order.
963 */ 1233 */
964static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 1234static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
@@ -1031,6 +1301,13 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
1031 1301
1032static void end_reshape(raid5_conf_t *conf); 1302static void end_reshape(raid5_conf_t *conf);
1033 1303
1304static int page_is_zero(struct page *p)
1305{
1306 char *a = page_address(p);
1307 return ((*(u32*)a) == 0 &&
1308 memcmp(a, a+4, STRIPE_SIZE-4)==0);
1309}
1310
1034static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) 1311static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
1035{ 1312{
1036 int sectors_per_chunk = conf->chunk_size >> 9; 1313 int sectors_per_chunk = conf->chunk_size >> 9;
@@ -1062,7 +1339,7 @@ static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
1062 * 1339 *
1063 */ 1340 */
1064 1341
1065static void handle_stripe(struct stripe_head *sh) 1342static void handle_stripe5(struct stripe_head *sh)
1066{ 1343{
1067 raid5_conf_t *conf = sh->raid_conf; 1344 raid5_conf_t *conf = sh->raid_conf;
1068 int disks = sh->disks; 1345 int disks = sh->disks;
@@ -1394,7 +1671,7 @@ static void handle_stripe(struct stripe_head *sh)
1394 if (locked == 0 && (rcw == 0 ||rmw == 0) && 1671 if (locked == 0 && (rcw == 0 ||rmw == 0) &&
1395 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 1672 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
1396 PRINTK("Computing parity...\n"); 1673 PRINTK("Computing parity...\n");
1397 compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE); 1674 compute_parity5(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
1398 /* now every locked buffer is ready to be written */ 1675 /* now every locked buffer is ready to be written */
1399 for (i=disks; i--;) 1676 for (i=disks; i--;)
1400 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 1677 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
@@ -1421,13 +1698,10 @@ static void handle_stripe(struct stripe_head *sh)
1421 !test_bit(STRIPE_INSYNC, &sh->state)) { 1698 !test_bit(STRIPE_INSYNC, &sh->state)) {
1422 set_bit(STRIPE_HANDLE, &sh->state); 1699 set_bit(STRIPE_HANDLE, &sh->state);
1423 if (failed == 0) { 1700 if (failed == 0) {
1424 char *pagea;
1425 BUG_ON(uptodate != disks); 1701 BUG_ON(uptodate != disks);
1426 compute_parity(sh, CHECK_PARITY); 1702 compute_parity5(sh, CHECK_PARITY);
1427 uptodate--; 1703 uptodate--;
1428 pagea = page_address(sh->dev[sh->pd_idx].page); 1704 if (page_is_zero(sh->dev[sh->pd_idx].page)) {
1429 if ((*(u32*)pagea) == 0 &&
1430 !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
1431 /* parity is correct (on disc, not in buffer any more) */ 1705 /* parity is correct (on disc, not in buffer any more) */
1432 set_bit(STRIPE_INSYNC, &sh->state); 1706 set_bit(STRIPE_INSYNC, &sh->state);
1433 } else { 1707 } else {
@@ -1487,7 +1761,7 @@ static void handle_stripe(struct stripe_head *sh)
1487 /* Need to write out all blocks after computing parity */ 1761 /* Need to write out all blocks after computing parity */
1488 sh->disks = conf->raid_disks; 1762 sh->disks = conf->raid_disks;
1489 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks); 1763 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks);
1490 compute_parity(sh, RECONSTRUCT_WRITE); 1764 compute_parity5(sh, RECONSTRUCT_WRITE);
1491 for (i= conf->raid_disks; i--;) { 1765 for (i= conf->raid_disks; i--;) {
1492 set_bit(R5_LOCKED, &sh->dev[i].flags); 1766 set_bit(R5_LOCKED, &sh->dev[i].flags);
1493 locked++; 1767 locked++;
@@ -1615,6 +1889,569 @@ static void handle_stripe(struct stripe_head *sh)
1615 } 1889 }
1616} 1890}
1617 1891
1892static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
1893{
1894 raid6_conf_t *conf = sh->raid_conf;
1895 int disks = conf->raid_disks;
1896 struct bio *return_bi= NULL;
1897 struct bio *bi;
1898 int i;
1899 int syncing;
1900 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
1901 int non_overwrite = 0;
1902 int failed_num[2] = {0, 0};
1903 struct r5dev *dev, *pdev, *qdev;
1904 int pd_idx = sh->pd_idx;
1905 int qd_idx = raid6_next_disk(pd_idx, disks);
1906 int p_failed, q_failed;
1907
1908 PRINTK("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d, qd_idx=%d\n",
1909 (unsigned long long)sh->sector, sh->state, atomic_read(&sh->count),
1910 pd_idx, qd_idx);
1911
1912 spin_lock(&sh->lock);
1913 clear_bit(STRIPE_HANDLE, &sh->state);
1914 clear_bit(STRIPE_DELAYED, &sh->state);
1915
1916 syncing = test_bit(STRIPE_SYNCING, &sh->state);
1917 /* Now to look around and see what can be done */
1918
1919 rcu_read_lock();
1920 for (i=disks; i--; ) {
1921 mdk_rdev_t *rdev;
1922 dev = &sh->dev[i];
1923 clear_bit(R5_Insync, &dev->flags);
1924
1925 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
1926 i, dev->flags, dev->toread, dev->towrite, dev->written);
1927 /* maybe we can reply to a read */
1928 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
1929 struct bio *rbi, *rbi2;
1930 PRINTK("Return read for disc %d\n", i);
1931 spin_lock_irq(&conf->device_lock);
1932 rbi = dev->toread;
1933 dev->toread = NULL;
1934 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1935 wake_up(&conf->wait_for_overlap);
1936 spin_unlock_irq(&conf->device_lock);
1937 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1938 copy_data(0, rbi, dev->page, dev->sector);
1939 rbi2 = r5_next_bio(rbi, dev->sector);
1940 spin_lock_irq(&conf->device_lock);
1941 if (--rbi->bi_phys_segments == 0) {
1942 rbi->bi_next = return_bi;
1943 return_bi = rbi;
1944 }
1945 spin_unlock_irq(&conf->device_lock);
1946 rbi = rbi2;
1947 }
1948 }
1949
1950 /* now count some things */
1951 if (test_bit(R5_LOCKED, &dev->flags)) locked++;
1952 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
1953
1954
1955 if (dev->toread) to_read++;
1956 if (dev->towrite) {
1957 to_write++;
1958 if (!test_bit(R5_OVERWRITE, &dev->flags))
1959 non_overwrite++;
1960 }
1961 if (dev->written) written++;
1962 rdev = rcu_dereference(conf->disks[i].rdev);
1963 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
1964 /* The ReadError flag will just be confusing now */
1965 clear_bit(R5_ReadError, &dev->flags);
1966 clear_bit(R5_ReWrite, &dev->flags);
1967 }
1968 if (!rdev || !test_bit(In_sync, &rdev->flags)
1969 || test_bit(R5_ReadError, &dev->flags)) {
1970 if ( failed < 2 )
1971 failed_num[failed] = i;
1972 failed++;
1973 } else
1974 set_bit(R5_Insync, &dev->flags);
1975 }
1976 rcu_read_unlock();
1977 PRINTK("locked=%d uptodate=%d to_read=%d"
1978 " to_write=%d failed=%d failed_num=%d,%d\n",
1979 locked, uptodate, to_read, to_write, failed,
1980 failed_num[0], failed_num[1]);
1981 /* check if the array has lost >2 devices and, if so, some requests might
1982 * need to be failed
1983 */
1984 if (failed > 2 && to_read+to_write+written) {
1985 for (i=disks; i--; ) {
1986 int bitmap_end = 0;
1987
1988 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1989 mdk_rdev_t *rdev;
1990 rcu_read_lock();
1991 rdev = rcu_dereference(conf->disks[i].rdev);
1992 if (rdev && test_bit(In_sync, &rdev->flags))
1993 /* multiple read failures in one stripe */
1994 md_error(conf->mddev, rdev);
1995 rcu_read_unlock();
1996 }
1997
1998 spin_lock_irq(&conf->device_lock);
1999 /* fail all writes first */
2000 bi = sh->dev[i].towrite;
2001 sh->dev[i].towrite = NULL;
2002 if (bi) { to_write--; bitmap_end = 1; }
2003
2004 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2005 wake_up(&conf->wait_for_overlap);
2006
2007 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
2008 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2009 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2010 if (--bi->bi_phys_segments == 0) {
2011 md_write_end(conf->mddev);
2012 bi->bi_next = return_bi;
2013 return_bi = bi;
2014 }
2015 bi = nextbi;
2016 }
2017 /* and fail all 'written' */
2018 bi = sh->dev[i].written;
2019 sh->dev[i].written = NULL;
2020 if (bi) bitmap_end = 1;
2021 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
2022 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2023 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2024 if (--bi->bi_phys_segments == 0) {
2025 md_write_end(conf->mddev);
2026 bi->bi_next = return_bi;
2027 return_bi = bi;
2028 }
2029 bi = bi2;
2030 }
2031
2032 /* fail any reads if this device is non-operational */
2033 if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2034 test_bit(R5_ReadError, &sh->dev[i].flags)) {
2035 bi = sh->dev[i].toread;
2036 sh->dev[i].toread = NULL;
2037 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2038 wake_up(&conf->wait_for_overlap);
2039 if (bi) to_read--;
2040 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
2041 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2042 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2043 if (--bi->bi_phys_segments == 0) {
2044 bi->bi_next = return_bi;
2045 return_bi = bi;
2046 }
2047 bi = nextbi;
2048 }
2049 }
2050 spin_unlock_irq(&conf->device_lock);
2051 if (bitmap_end)
2052 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2053 STRIPE_SECTORS, 0, 0);
2054 }
2055 }
2056 if (failed > 2 && syncing) {
2057 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
2058 clear_bit(STRIPE_SYNCING, &sh->state);
2059 syncing = 0;
2060 }
2061
2062 /*
2063 * might be able to return some write requests if the parity blocks
2064 * are safe, or on a failed drive
2065 */
2066 pdev = &sh->dev[pd_idx];
2067 p_failed = (failed >= 1 && failed_num[0] == pd_idx)
2068 || (failed >= 2 && failed_num[1] == pd_idx);
2069 qdev = &sh->dev[qd_idx];
2070 q_failed = (failed >= 1 && failed_num[0] == qd_idx)
2071 || (failed >= 2 && failed_num[1] == qd_idx);
2072
2073 if ( written &&
2074 ( p_failed || ((test_bit(R5_Insync, &pdev->flags)
2075 && !test_bit(R5_LOCKED, &pdev->flags)
2076 && test_bit(R5_UPTODATE, &pdev->flags))) ) &&
2077 ( q_failed || ((test_bit(R5_Insync, &qdev->flags)
2078 && !test_bit(R5_LOCKED, &qdev->flags)
2079 && test_bit(R5_UPTODATE, &qdev->flags))) ) ) {
2080 /* any written block on an uptodate or failed drive can be
2081 * returned. Note that if we 'wrote' to a failed drive,
2082 * it will be UPTODATE, but never LOCKED, so we don't need
2083 * to test 'failed' directly.
2084 */
2085 for (i=disks; i--; )
2086 if (sh->dev[i].written) {
2087 dev = &sh->dev[i];
2088 if (!test_bit(R5_LOCKED, &dev->flags) &&
2089 test_bit(R5_UPTODATE, &dev->flags) ) {
2090 /* We can return any write requests */
2091 int bitmap_end = 0;
2092 struct bio *wbi, *wbi2;
2093 PRINTK("Return write for stripe %llu disc %d\n",
2094 (unsigned long long)sh->sector, i);
2095 spin_lock_irq(&conf->device_lock);
2096 wbi = dev->written;
2097 dev->written = NULL;
2098 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
2099 wbi2 = r5_next_bio(wbi, dev->sector);
2100 if (--wbi->bi_phys_segments == 0) {
2101 md_write_end(conf->mddev);
2102 wbi->bi_next = return_bi;
2103 return_bi = wbi;
2104 }
2105 wbi = wbi2;
2106 }
2107 if (dev->towrite == NULL)
2108 bitmap_end = 1;
2109 spin_unlock_irq(&conf->device_lock);
2110 if (bitmap_end)
2111 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2112 STRIPE_SECTORS,
2113 !test_bit(STRIPE_DEGRADED, &sh->state), 0);
2114 }
2115 }
2116 }
2117
2118 /* Now we might consider reading some blocks, either to check/generate
2119 * parity, or to satisfy requests
2120 * or to load a block that is being partially written.
2121 */
2122 if (to_read || non_overwrite || (to_write && failed) || (syncing && (uptodate < disks))) {
2123 for (i=disks; i--;) {
2124 dev = &sh->dev[i];
2125 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
2126 (dev->toread ||
2127 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2128 syncing ||
2129 (failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) ||
2130 (failed >= 2 && (sh->dev[failed_num[1]].toread || to_write))
2131 )
2132 ) {
2133 /* we would like to get this block, possibly
2134 * by computing it, but we might not be able to
2135 */
2136 if (uptodate == disks-1) {
2137 PRINTK("Computing stripe %llu block %d\n",
2138 (unsigned long long)sh->sector, i);
2139 compute_block_1(sh, i, 0);
2140 uptodate++;
2141 } else if ( uptodate == disks-2 && failed >= 2 ) {
2142 /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */
2143 int other;
2144 for (other=disks; other--;) {
2145 if ( other == i )
2146 continue;
2147 if ( !test_bit(R5_UPTODATE, &sh->dev[other].flags) )
2148 break;
2149 }
2150 BUG_ON(other < 0);
2151 PRINTK("Computing stripe %llu blocks %d,%d\n",
2152 (unsigned long long)sh->sector, i, other);
2153 compute_block_2(sh, i, other);
2154 uptodate += 2;
2155 } else if (test_bit(R5_Insync, &dev->flags)) {
2156 set_bit(R5_LOCKED, &dev->flags);
2157 set_bit(R5_Wantread, &dev->flags);
2158#if 0
2159 /* if I am just reading this block and we don't have
2160 a failed drive, or any pending writes then sidestep the cache */
2161 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
2162 ! syncing && !failed && !to_write) {
2163 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
2164 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
2165 }
2166#endif
2167 locked++;
2168 PRINTK("Reading block %d (sync=%d)\n",
2169 i, syncing);
2170 }
2171 }
2172 }
2173 set_bit(STRIPE_HANDLE, &sh->state);
2174 }
2175
2176 /* now to consider writing and what else, if anything should be read */
2177 if (to_write) {
2178 int rcw=0, must_compute=0;
2179 for (i=disks ; i--;) {
2180 dev = &sh->dev[i];
2181 /* Would I have to read this buffer for reconstruct_write */
2182 if (!test_bit(R5_OVERWRITE, &dev->flags)
2183 && i != pd_idx && i != qd_idx
2184 && (!test_bit(R5_LOCKED, &dev->flags)
2185#if 0
2186 || sh->bh_page[i] != bh->b_page
2187#endif
2188 ) &&
2189 !test_bit(R5_UPTODATE, &dev->flags)) {
2190 if (test_bit(R5_Insync, &dev->flags)) rcw++;
2191 else {
2192 PRINTK("raid6: must_compute: disk %d flags=%#lx\n", i, dev->flags);
2193 must_compute++;
2194 }
2195 }
2196 }
2197 PRINTK("for sector %llu, rcw=%d, must_compute=%d\n",
2198 (unsigned long long)sh->sector, rcw, must_compute);
2199 set_bit(STRIPE_HANDLE, &sh->state);
2200
2201 if (rcw > 0)
2202 /* want reconstruct write, but need to get some data */
2203 for (i=disks; i--;) {
2204 dev = &sh->dev[i];
2205 if (!test_bit(R5_OVERWRITE, &dev->flags)
2206 && !(failed == 0 && (i == pd_idx || i == qd_idx))
2207 && !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
2208 test_bit(R5_Insync, &dev->flags)) {
2209 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
2210 {
2211 PRINTK("Read_old stripe %llu block %d for Reconstruct\n",
2212 (unsigned long long)sh->sector, i);
2213 set_bit(R5_LOCKED, &dev->flags);
2214 set_bit(R5_Wantread, &dev->flags);
2215 locked++;
2216 } else {
2217 PRINTK("Request delayed stripe %llu block %d for Reconstruct\n",
2218 (unsigned long long)sh->sector, i);
2219 set_bit(STRIPE_DELAYED, &sh->state);
2220 set_bit(STRIPE_HANDLE, &sh->state);
2221 }
2222 }
2223 }
2224 /* now if nothing is locked, and if we have enough data, we can start a write request */
2225 if (locked == 0 && rcw == 0 &&
2226 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
2227 if ( must_compute > 0 ) {
2228 /* We have failed blocks and need to compute them */
2229 switch ( failed ) {
2230 case 0: BUG();
2231 case 1: compute_block_1(sh, failed_num[0], 0); break;
2232 case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break;
2233 default: BUG(); /* This request should have been failed? */
2234 }
2235 }
2236
2237 PRINTK("Computing parity for stripe %llu\n", (unsigned long long)sh->sector);
2238 compute_parity6(sh, RECONSTRUCT_WRITE);
2239 /* now every locked buffer is ready to be written */
2240 for (i=disks; i--;)
2241 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
2242 PRINTK("Writing stripe %llu block %d\n",
2243 (unsigned long long)sh->sector, i);
2244 locked++;
2245 set_bit(R5_Wantwrite, &sh->dev[i].flags);
2246 }
2247 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
2248 set_bit(STRIPE_INSYNC, &sh->state);
2249
2250 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2251 atomic_dec(&conf->preread_active_stripes);
2252 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
2253 md_wakeup_thread(conf->mddev->thread);
2254 }
2255 }
2256 }
2257
2258 /* maybe we need to check and possibly fix the parity for this stripe
2259 * Any reads will already have been scheduled, so we just see if enough data
2260 * is available
2261 */
2262 if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) {
2263 int update_p = 0, update_q = 0;
2264 struct r5dev *dev;
2265
2266 set_bit(STRIPE_HANDLE, &sh->state);
2267
2268 BUG_ON(failed>2);
2269 BUG_ON(uptodate < disks);
2270 /* Want to check and possibly repair P and Q.
2271 * However there could be one 'failed' device, in which
2272 * case we can only check one of them, possibly using the
2273 * other to generate missing data
2274 */
2275
2276 /* If !tmp_page, we cannot do the calculations,
2277 * but as we have set STRIPE_HANDLE, we will soon be called
2278 * by stripe_handle with a tmp_page - just wait until then.
2279 */
2280 if (tmp_page) {
2281 if (failed == q_failed) {
2282 /* The only possible failed device holds 'Q', so it makes
2283 * sense to check P (If anything else were failed, we would
2284 * have used P to recreate it).
2285 */
2286 compute_block_1(sh, pd_idx, 1);
2287 if (!page_is_zero(sh->dev[pd_idx].page)) {
2288 compute_block_1(sh,pd_idx,0);
2289 update_p = 1;
2290 }
2291 }
2292 if (!q_failed && failed < 2) {
2293 /* q is not failed, and we didn't use it to generate
2294 * anything, so it makes sense to check it
2295 */
2296 memcpy(page_address(tmp_page),
2297 page_address(sh->dev[qd_idx].page),
2298 STRIPE_SIZE);
2299 compute_parity6(sh, UPDATE_PARITY);
2300 if (memcmp(page_address(tmp_page),
2301 page_address(sh->dev[qd_idx].page),
2302 STRIPE_SIZE)!= 0) {
2303 clear_bit(STRIPE_INSYNC, &sh->state);
2304 update_q = 1;
2305 }
2306 }
2307 if (update_p || update_q) {
2308 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2309 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2310 /* don't try to repair!! */
2311 update_p = update_q = 0;
2312 }
2313
2314 /* now write out any block on a failed drive,
2315 * or P or Q if they need it
2316 */
2317
2318 if (failed == 2) {
2319 dev = &sh->dev[failed_num[1]];
2320 locked++;
2321 set_bit(R5_LOCKED, &dev->flags);
2322 set_bit(R5_Wantwrite, &dev->flags);
2323 }
2324 if (failed >= 1) {
2325 dev = &sh->dev[failed_num[0]];
2326 locked++;
2327 set_bit(R5_LOCKED, &dev->flags);
2328 set_bit(R5_Wantwrite, &dev->flags);
2329 }
2330
2331 if (update_p) {
2332 dev = &sh->dev[pd_idx];
2333 locked ++;
2334 set_bit(R5_LOCKED, &dev->flags);
2335 set_bit(R5_Wantwrite, &dev->flags);
2336 }
2337 if (update_q) {
2338 dev = &sh->dev[qd_idx];
2339 locked++;
2340 set_bit(R5_LOCKED, &dev->flags);
2341 set_bit(R5_Wantwrite, &dev->flags);
2342 }
2343 clear_bit(STRIPE_DEGRADED, &sh->state);
2344
2345 set_bit(STRIPE_INSYNC, &sh->state);
2346 }
2347 }
2348
2349 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
2350 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
2351 clear_bit(STRIPE_SYNCING, &sh->state);
2352 }
2353
2354 /* If the failed drives are just a ReadError, then we might need
2355 * to progress the repair/check process
2356 */
2357 if (failed <= 2 && ! conf->mddev->ro)
2358 for (i=0; i<failed;i++) {
2359 dev = &sh->dev[failed_num[i]];
2360 if (test_bit(R5_ReadError, &dev->flags)
2361 && !test_bit(R5_LOCKED, &dev->flags)
2362 && test_bit(R5_UPTODATE, &dev->flags)
2363 ) {
2364 if (!test_bit(R5_ReWrite, &dev->flags)) {
2365 set_bit(R5_Wantwrite, &dev->flags);
2366 set_bit(R5_ReWrite, &dev->flags);
2367 set_bit(R5_LOCKED, &dev->flags);
2368 } else {
2369 /* let's read it back */
2370 set_bit(R5_Wantread, &dev->flags);
2371 set_bit(R5_LOCKED, &dev->flags);
2372 }
2373 }
2374 }
2375 spin_unlock(&sh->lock);
2376
2377 while ((bi=return_bi)) {
2378 int bytes = bi->bi_size;
2379
2380 return_bi = bi->bi_next;
2381 bi->bi_next = NULL;
2382 bi->bi_size = 0;
2383 bi->bi_end_io(bi, bytes, 0);
2384 }
2385 for (i=disks; i-- ;) {
2386 int rw;
2387 struct bio *bi;
2388 mdk_rdev_t *rdev;
2389 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
2390 rw = 1;
2391 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
2392 rw = 0;
2393 else
2394 continue;
2395
2396 bi = &sh->dev[i].req;
2397
2398 bi->bi_rw = rw;
2399 if (rw)
2400 bi->bi_end_io = raid5_end_write_request;
2401 else
2402 bi->bi_end_io = raid5_end_read_request;
2403
2404 rcu_read_lock();
2405 rdev = rcu_dereference(conf->disks[i].rdev);
2406 if (rdev && test_bit(Faulty, &rdev->flags))
2407 rdev = NULL;
2408 if (rdev)
2409 atomic_inc(&rdev->nr_pending);
2410 rcu_read_unlock();
2411
2412 if (rdev) {
2413 if (syncing)
2414 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
2415
2416 bi->bi_bdev = rdev->bdev;
2417 PRINTK("for %llu schedule op %ld on disc %d\n",
2418 (unsigned long long)sh->sector, bi->bi_rw, i);
2419 atomic_inc(&sh->count);
2420 bi->bi_sector = sh->sector + rdev->data_offset;
2421 bi->bi_flags = 1 << BIO_UPTODATE;
2422 bi->bi_vcnt = 1;
2423 bi->bi_max_vecs = 1;
2424 bi->bi_idx = 0;
2425 bi->bi_io_vec = &sh->dev[i].vec;
2426 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
2427 bi->bi_io_vec[0].bv_offset = 0;
2428 bi->bi_size = STRIPE_SIZE;
2429 bi->bi_next = NULL;
2430 if (rw == WRITE &&
2431 test_bit(R5_ReWrite, &sh->dev[i].flags))
2432 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
2433 generic_make_request(bi);
2434 } else {
2435 if (rw == 1)
2436 set_bit(STRIPE_DEGRADED, &sh->state);
2437 PRINTK("skip op %ld on disc %d for sector %llu\n",
2438 bi->bi_rw, i, (unsigned long long)sh->sector);
2439 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2440 set_bit(STRIPE_HANDLE, &sh->state);
2441 }
2442 }
2443}
2444
2445static void handle_stripe(struct stripe_head *sh, struct page *tmp_page)
2446{
2447 if (sh->raid_conf->level == 6)
2448 handle_stripe6(sh, tmp_page);
2449 else
2450 handle_stripe5(sh);
2451}
2452
2453
2454
1618static void raid5_activate_delayed(raid5_conf_t *conf) 2455static void raid5_activate_delayed(raid5_conf_t *conf)
1619{ 2456{
1620 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 2457 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
@@ -1753,7 +2590,7 @@ static int make_request(request_queue_t *q, struct bio * bi)
1753 2590
1754 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 2591 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
1755 DEFINE_WAIT(w); 2592 DEFINE_WAIT(w);
1756 int disks; 2593 int disks, data_disks;
1757 2594
1758 retry: 2595 retry:
1759 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 2596 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
@@ -1781,7 +2618,9 @@ static int make_request(request_queue_t *q, struct bio * bi)
1781 } 2618 }
1782 spin_unlock_irq(&conf->device_lock); 2619 spin_unlock_irq(&conf->device_lock);
1783 } 2620 }
1784 new_sector = raid5_compute_sector(logical_sector, disks, disks - 1, 2621 data_disks = disks - conf->max_degraded;
2622
2623 new_sector = raid5_compute_sector(logical_sector, disks, data_disks,
1785 &dd_idx, &pd_idx, conf); 2624 &dd_idx, &pd_idx, conf);
1786 PRINTK("raid5: make_request, sector %llu logical %llu\n", 2625 PRINTK("raid5: make_request, sector %llu logical %llu\n",
1787 (unsigned long long)new_sector, 2626 (unsigned long long)new_sector,
@@ -1833,7 +2672,7 @@ static int make_request(request_queue_t *q, struct bio * bi)
1833 } 2672 }
1834 finish_wait(&conf->wait_for_overlap, &w); 2673 finish_wait(&conf->wait_for_overlap, &w);
1835 raid5_plug_device(conf); 2674 raid5_plug_device(conf);
1836 handle_stripe(sh); 2675 handle_stripe(sh, NULL);
1837 release_stripe(sh); 2676 release_stripe(sh);
1838 } else { 2677 } else {
1839 /* cannot get stripe for read-ahead, just give-up */ 2678 /* cannot get stripe for read-ahead, just give-up */
@@ -1849,7 +2688,7 @@ static int make_request(request_queue_t *q, struct bio * bi)
1849 if (remaining == 0) { 2688 if (remaining == 0) {
1850 int bytes = bi->bi_size; 2689 int bytes = bi->bi_size;
1851 2690
1852 if ( bio_data_dir(bi) == WRITE ) 2691 if ( rw == WRITE )
1853 md_write_end(mddev); 2692 md_write_end(mddev);
1854 bi->bi_size = 0; 2693 bi->bi_size = 0;
1855 bi->bi_end_io(bi, bytes, 0); 2694 bi->bi_end_io(bi, bytes, 0);
@@ -1857,17 +2696,142 @@ static int make_request(request_queue_t *q, struct bio * bi)
1857 return 0; 2696 return 0;
1858} 2697}
1859 2698
1860/* FIXME go_faster isn't used */ 2699static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
1861static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1862{ 2700{
2701 /* reshaping is quite different to recovery/resync so it is
2702 * handled quite separately ... here.
2703 *
2704 * On each call to sync_request, we gather one chunk worth of
2705 * destination stripes and flag them as expanding.
2706 * Then we find all the source stripes and request reads.
2707 * As the reads complete, handle_stripe will copy the data
2708 * into the destination stripe and release that stripe.
2709 */
1863 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 2710 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1864 struct stripe_head *sh; 2711 struct stripe_head *sh;
1865 int pd_idx; 2712 int pd_idx;
1866 sector_t first_sector, last_sector; 2713 sector_t first_sector, last_sector;
2714 int raid_disks;
2715 int data_disks;
2716 int i;
2717 int dd_idx;
2718 sector_t writepos, safepos, gap;
2719
2720 if (sector_nr == 0 &&
2721 conf->expand_progress != 0) {
2722 /* restarting in the middle, skip the initial sectors */
2723 sector_nr = conf->expand_progress;
2724 sector_div(sector_nr, conf->raid_disks-1);
2725 *skipped = 1;
2726 return sector_nr;
2727 }
2728
2729 /* we update the metadata when there is more than 3Meg
2730 * in the block range (that is rather arbitrary, should
2731 * probably be time based) or when the data about to be
2732 * copied would over-write the source of the data at
2733 * the front of the range.
2734 * i.e. one new_stripe forward from expand_progress new_maps
2735 * to after where expand_lo old_maps to
2736 */
2737 writepos = conf->expand_progress +
2738 conf->chunk_size/512*(conf->raid_disks-1);
2739 sector_div(writepos, conf->raid_disks-1);
2740 safepos = conf->expand_lo;
2741 sector_div(safepos, conf->previous_raid_disks-1);
2742 gap = conf->expand_progress - conf->expand_lo;
2743
2744 if (writepos >= safepos ||
2745 gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) {
2746 /* Cannot proceed until we've updated the superblock... */
2747 wait_event(conf->wait_for_overlap,
2748 atomic_read(&conf->reshape_stripes)==0);
2749 mddev->reshape_position = conf->expand_progress;
2750 mddev->sb_dirty = 1;
2751 md_wakeup_thread(mddev->thread);
2752 wait_event(mddev->sb_wait, mddev->sb_dirty == 0 ||
2753 kthread_should_stop());
2754 spin_lock_irq(&conf->device_lock);
2755 conf->expand_lo = mddev->reshape_position;
2756 spin_unlock_irq(&conf->device_lock);
2757 wake_up(&conf->wait_for_overlap);
2758 }
2759
2760 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
2761 int j;
2762 int skipped = 0;
2763 pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks);
2764 sh = get_active_stripe(conf, sector_nr+i,
2765 conf->raid_disks, pd_idx, 0);
2766 set_bit(STRIPE_EXPANDING, &sh->state);
2767 atomic_inc(&conf->reshape_stripes);
2768 /* If any of this stripe is beyond the end of the old
2769 * array, then we need to zero those blocks
2770 */
2771 for (j=sh->disks; j--;) {
2772 sector_t s;
2773 if (j == sh->pd_idx)
2774 continue;
2775 s = compute_blocknr(sh, j);
2776 if (s < (mddev->array_size<<1)) {
2777 skipped = 1;
2778 continue;
2779 }
2780 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
2781 set_bit(R5_Expanded, &sh->dev[j].flags);
2782 set_bit(R5_UPTODATE, &sh->dev[j].flags);
2783 }
2784 if (!skipped) {
2785 set_bit(STRIPE_EXPAND_READY, &sh->state);
2786 set_bit(STRIPE_HANDLE, &sh->state);
2787 }
2788 release_stripe(sh);
2789 }
2790 spin_lock_irq(&conf->device_lock);
2791 conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1);
2792 spin_unlock_irq(&conf->device_lock);
2793 /* Ok, those stripe are ready. We can start scheduling
2794 * reads on the source stripes.
2795 * The source stripes are determined by mapping the first and last
2796 * block on the destination stripes.
2797 */
2798 raid_disks = conf->previous_raid_disks;
2799 data_disks = raid_disks - 1;
2800 first_sector =
2801 raid5_compute_sector(sector_nr*(conf->raid_disks-1),
2802 raid_disks, data_disks,
2803 &dd_idx, &pd_idx, conf);
2804 last_sector =
2805 raid5_compute_sector((sector_nr+conf->chunk_size/512)
2806 *(conf->raid_disks-1) -1,
2807 raid_disks, data_disks,
2808 &dd_idx, &pd_idx, conf);
2809 if (last_sector >= (mddev->size<<1))
2810 last_sector = (mddev->size<<1)-1;
2811 while (first_sector <= last_sector) {
2812 pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks);
2813 sh = get_active_stripe(conf, first_sector,
2814 conf->previous_raid_disks, pd_idx, 0);
2815 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2816 set_bit(STRIPE_HANDLE, &sh->state);
2817 release_stripe(sh);
2818 first_sector += STRIPE_SECTORS;
2819 }
2820 return conf->chunk_size>>9;
2821}
2822
2823/* FIXME go_faster isn't used */
2824static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
2825{
2826 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2827 struct stripe_head *sh;
2828 int pd_idx;
1867 int raid_disks = conf->raid_disks; 2829 int raid_disks = conf->raid_disks;
1868 int data_disks = raid_disks-1; 2830 int data_disks = raid_disks - conf->max_degraded;
1869 sector_t max_sector = mddev->size << 1; 2831 sector_t max_sector = mddev->size << 1;
1870 int sync_blocks; 2832 int sync_blocks;
2833 int still_degraded = 0;
2834 int i;
1871 2835
1872 if (sector_nr >= max_sector) { 2836 if (sector_nr >= max_sector) {
1873 /* just being told to finish up .. nothing much to do */ 2837 /* just being told to finish up .. nothing much to do */
@@ -1880,134 +2844,22 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1880 if (mddev->curr_resync < max_sector) /* aborted */ 2844 if (mddev->curr_resync < max_sector) /* aborted */
1881 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2845 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1882 &sync_blocks, 1); 2846 &sync_blocks, 1);
1883 else /* compelted sync */ 2847 else /* completed sync */
1884 conf->fullsync = 0; 2848 conf->fullsync = 0;
1885 bitmap_close_sync(mddev->bitmap); 2849 bitmap_close_sync(mddev->bitmap);
1886 2850
1887 return 0; 2851 return 0;
1888 } 2852 }
1889 2853
1890 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 2854 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1891 /* reshaping is quite different to recovery/resync so it is 2855 return reshape_request(mddev, sector_nr, skipped);
1892 * handled quite separately ... here. 2856
1893 * 2857 /* if there is too many failed drives and we are trying
1894 * On each call to sync_request, we gather one chunk worth of
1895 * destination stripes and flag them as expanding.
1896 * Then we find all the source stripes and request reads.
1897 * As the reads complete, handle_stripe will copy the data
1898 * into the destination stripe and release that stripe.
1899 */
1900 int i;
1901 int dd_idx;
1902 sector_t writepos, safepos, gap;
1903
1904 if (sector_nr == 0 &&
1905 conf->expand_progress != 0) {
1906 /* restarting in the middle, skip the initial sectors */
1907 sector_nr = conf->expand_progress;
1908 sector_div(sector_nr, conf->raid_disks-1);
1909 *skipped = 1;
1910 return sector_nr;
1911 }
1912
1913 /* we update the metadata when there is more than 3Meg
1914 * in the block range (that is rather arbitrary, should
1915 * probably be time based) or when the data about to be
1916 * copied would over-write the source of the data at
1917 * the front of the range.
1918 * i.e. one new_stripe forward from expand_progress new_maps
1919 * to after where expand_lo old_maps to
1920 */
1921 writepos = conf->expand_progress +
1922 conf->chunk_size/512*(conf->raid_disks-1);
1923 sector_div(writepos, conf->raid_disks-1);
1924 safepos = conf->expand_lo;
1925 sector_div(safepos, conf->previous_raid_disks-1);
1926 gap = conf->expand_progress - conf->expand_lo;
1927
1928 if (writepos >= safepos ||
1929 gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) {
1930 /* Cannot proceed until we've updated the superblock... */
1931 wait_event(conf->wait_for_overlap,
1932 atomic_read(&conf->reshape_stripes)==0);
1933 mddev->reshape_position = conf->expand_progress;
1934 mddev->sb_dirty = 1;
1935 md_wakeup_thread(mddev->thread);
1936 wait_event(mddev->sb_wait, mddev->sb_dirty == 0 ||
1937 kthread_should_stop());
1938 spin_lock_irq(&conf->device_lock);
1939 conf->expand_lo = mddev->reshape_position;
1940 spin_unlock_irq(&conf->device_lock);
1941 wake_up(&conf->wait_for_overlap);
1942 }
1943
1944 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
1945 int j;
1946 int skipped = 0;
1947 pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks);
1948 sh = get_active_stripe(conf, sector_nr+i,
1949 conf->raid_disks, pd_idx, 0);
1950 set_bit(STRIPE_EXPANDING, &sh->state);
1951 atomic_inc(&conf->reshape_stripes);
1952 /* If any of this stripe is beyond the end of the old
1953 * array, then we need to zero those blocks
1954 */
1955 for (j=sh->disks; j--;) {
1956 sector_t s;
1957 if (j == sh->pd_idx)
1958 continue;
1959 s = compute_blocknr(sh, j);
1960 if (s < (mddev->array_size<<1)) {
1961 skipped = 1;
1962 continue;
1963 }
1964 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
1965 set_bit(R5_Expanded, &sh->dev[j].flags);
1966 set_bit(R5_UPTODATE, &sh->dev[j].flags);
1967 }
1968 if (!skipped) {
1969 set_bit(STRIPE_EXPAND_READY, &sh->state);
1970 set_bit(STRIPE_HANDLE, &sh->state);
1971 }
1972 release_stripe(sh);
1973 }
1974 spin_lock_irq(&conf->device_lock);
1975 conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1);
1976 spin_unlock_irq(&conf->device_lock);
1977 /* Ok, those stripe are ready. We can start scheduling
1978 * reads on the source stripes.
1979 * The source stripes are determined by mapping the first and last
1980 * block on the destination stripes.
1981 */
1982 raid_disks = conf->previous_raid_disks;
1983 data_disks = raid_disks - 1;
1984 first_sector =
1985 raid5_compute_sector(sector_nr*(conf->raid_disks-1),
1986 raid_disks, data_disks,
1987 &dd_idx, &pd_idx, conf);
1988 last_sector =
1989 raid5_compute_sector((sector_nr+conf->chunk_size/512)
1990 *(conf->raid_disks-1) -1,
1991 raid_disks, data_disks,
1992 &dd_idx, &pd_idx, conf);
1993 if (last_sector >= (mddev->size<<1))
1994 last_sector = (mddev->size<<1)-1;
1995 while (first_sector <= last_sector) {
1996 pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks);
1997 sh = get_active_stripe(conf, first_sector,
1998 conf->previous_raid_disks, pd_idx, 0);
1999 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2000 set_bit(STRIPE_HANDLE, &sh->state);
2001 release_stripe(sh);
2002 first_sector += STRIPE_SECTORS;
2003 }
2004 return conf->chunk_size>>9;
2005 }
2006 /* if there is 1 or more failed drives and we are trying
2007 * to resync, then assert that we are finished, because there is 2858 * to resync, then assert that we are finished, because there is
2008 * nothing we can do. 2859 * nothing we can do.
2009 */ 2860 */
2010 if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2861 if (mddev->degraded >= conf->max_degraded &&
2862 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2011 sector_t rv = (mddev->size << 1) - sector_nr; 2863 sector_t rv = (mddev->size << 1) - sector_nr;
2012 *skipped = 1; 2864 *skipped = 1;
2013 return rv; 2865 return rv;
@@ -2026,17 +2878,26 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
2026 if (sh == NULL) { 2878 if (sh == NULL) {
2027 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0); 2879 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0);
2028 /* make sure we don't swamp the stripe cache if someone else 2880 /* make sure we don't swamp the stripe cache if someone else
2029 * is trying to get access 2881 * is trying to get access
2030 */ 2882 */
2031 schedule_timeout_uninterruptible(1); 2883 schedule_timeout_uninterruptible(1);
2032 } 2884 }
2033 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0); 2885 /* Need to check if array will still be degraded after recovery/resync
2034 spin_lock(&sh->lock); 2886 * We don't need to check the 'failed' flag as when that gets set,
2887 * recovery aborts.
2888 */
2889 for (i=0; i<mddev->raid_disks; i++)
2890 if (conf->disks[i].rdev == NULL)
2891 still_degraded = 1;
2892
2893 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
2894
2895 spin_lock(&sh->lock);
2035 set_bit(STRIPE_SYNCING, &sh->state); 2896 set_bit(STRIPE_SYNCING, &sh->state);
2036 clear_bit(STRIPE_INSYNC, &sh->state); 2897 clear_bit(STRIPE_INSYNC, &sh->state);
2037 spin_unlock(&sh->lock); 2898 spin_unlock(&sh->lock);
2038 2899
2039 handle_stripe(sh); 2900 handle_stripe(sh, NULL);
2040 release_stripe(sh); 2901 release_stripe(sh);
2041 2902
2042 return STRIPE_SECTORS; 2903 return STRIPE_SECTORS;
@@ -2091,7 +2952,7 @@ static void raid5d (mddev_t *mddev)
2091 spin_unlock_irq(&conf->device_lock); 2952 spin_unlock_irq(&conf->device_lock);
2092 2953
2093 handled++; 2954 handled++;
2094 handle_stripe(sh); 2955 handle_stripe(sh, conf->spare_page);
2095 release_stripe(sh); 2956 release_stripe(sh);
2096 2957
2097 spin_lock_irq(&conf->device_lock); 2958 spin_lock_irq(&conf->device_lock);
@@ -2181,8 +3042,8 @@ static int run(mddev_t *mddev)
2181 struct disk_info *disk; 3042 struct disk_info *disk;
2182 struct list_head *tmp; 3043 struct list_head *tmp;
2183 3044
2184 if (mddev->level != 5 && mddev->level != 4) { 3045 if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) {
2185 printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n", 3046 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n",
2186 mdname(mddev), mddev->level); 3047 mdname(mddev), mddev->level);
2187 return -EIO; 3048 return -EIO;
2188 } 3049 }
@@ -2251,6 +3112,11 @@ static int run(mddev_t *mddev)
2251 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 3112 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
2252 goto abort; 3113 goto abort;
2253 3114
3115 if (mddev->level == 6) {
3116 conf->spare_page = alloc_page(GFP_KERNEL);
3117 if (!conf->spare_page)
3118 goto abort;
3119 }
2254 spin_lock_init(&conf->device_lock); 3120 spin_lock_init(&conf->device_lock);
2255 init_waitqueue_head(&conf->wait_for_stripe); 3121 init_waitqueue_head(&conf->wait_for_stripe);
2256 init_waitqueue_head(&conf->wait_for_overlap); 3122 init_waitqueue_head(&conf->wait_for_overlap);
@@ -2282,12 +3148,16 @@ static int run(mddev_t *mddev)
2282 } 3148 }
2283 3149
2284 /* 3150 /*
2285 * 0 for a fully functional array, 1 for a degraded array. 3151 * 0 for a fully functional array, 1 or 2 for a degraded array.
2286 */ 3152 */
2287 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks; 3153 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
2288 conf->mddev = mddev; 3154 conf->mddev = mddev;
2289 conf->chunk_size = mddev->chunk_size; 3155 conf->chunk_size = mddev->chunk_size;
2290 conf->level = mddev->level; 3156 conf->level = mddev->level;
3157 if (conf->level == 6)
3158 conf->max_degraded = 2;
3159 else
3160 conf->max_degraded = 1;
2291 conf->algorithm = mddev->layout; 3161 conf->algorithm = mddev->layout;
2292 conf->max_nr_stripes = NR_STRIPES; 3162 conf->max_nr_stripes = NR_STRIPES;
2293 conf->expand_progress = mddev->reshape_position; 3163 conf->expand_progress = mddev->reshape_position;
@@ -2296,6 +3166,11 @@ static int run(mddev_t *mddev)
2296 mddev->size &= ~(mddev->chunk_size/1024 -1); 3166 mddev->size &= ~(mddev->chunk_size/1024 -1);
2297 mddev->resync_max_sectors = mddev->size << 1; 3167 mddev->resync_max_sectors = mddev->size << 1;
2298 3168
3169 if (conf->level == 6 && conf->raid_disks < 4) {
3170 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
3171 mdname(mddev), conf->raid_disks);
3172 goto abort;
3173 }
2299 if (!conf->chunk_size || conf->chunk_size % 4) { 3174 if (!conf->chunk_size || conf->chunk_size % 4) {
2300 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 3175 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
2301 conf->chunk_size, mdname(mddev)); 3176 conf->chunk_size, mdname(mddev));
@@ -2307,14 +3182,14 @@ static int run(mddev_t *mddev)
2307 conf->algorithm, mdname(mddev)); 3182 conf->algorithm, mdname(mddev));
2308 goto abort; 3183 goto abort;
2309 } 3184 }
2310 if (mddev->degraded > 1) { 3185 if (mddev->degraded > conf->max_degraded) {
2311 printk(KERN_ERR "raid5: not enough operational devices for %s" 3186 printk(KERN_ERR "raid5: not enough operational devices for %s"
2312 " (%d/%d failed)\n", 3187 " (%d/%d failed)\n",
2313 mdname(mddev), conf->failed_disks, conf->raid_disks); 3188 mdname(mddev), conf->failed_disks, conf->raid_disks);
2314 goto abort; 3189 goto abort;
2315 } 3190 }
2316 3191
2317 if (mddev->degraded == 1 && 3192 if (mddev->degraded > 0 &&
2318 mddev->recovery_cp != MaxSector) { 3193 mddev->recovery_cp != MaxSector) {
2319 if (mddev->ok_start_degraded) 3194 if (mddev->ok_start_degraded)
2320 printk(KERN_WARNING 3195 printk(KERN_WARNING
@@ -2379,11 +3254,12 @@ static int run(mddev_t *mddev)
2379 } 3254 }
2380 3255
2381 /* read-ahead size must cover two whole stripes, which is 3256 /* read-ahead size must cover two whole stripes, which is
2382 * 2 * (n-1) * chunksize where 'n' is the number of raid devices 3257 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
2383 */ 3258 */
2384 { 3259 {
2385 int stripe = (mddev->raid_disks-1) * mddev->chunk_size 3260 int data_disks = conf->previous_raid_disks - conf->max_degraded;
2386 / PAGE_SIZE; 3261 int stripe = data_disks *
3262 (mddev->chunk_size / PAGE_SIZE);
2387 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 3263 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
2388 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 3264 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
2389 } 3265 }
@@ -2393,12 +3269,14 @@ static int run(mddev_t *mddev)
2393 3269
2394 mddev->queue->unplug_fn = raid5_unplug_device; 3270 mddev->queue->unplug_fn = raid5_unplug_device;
2395 mddev->queue->issue_flush_fn = raid5_issue_flush; 3271 mddev->queue->issue_flush_fn = raid5_issue_flush;
2396 mddev->array_size = mddev->size * (conf->previous_raid_disks - 1); 3272 mddev->array_size = mddev->size * (conf->previous_raid_disks -
3273 conf->max_degraded);
2397 3274
2398 return 0; 3275 return 0;
2399abort: 3276abort:
2400 if (conf) { 3277 if (conf) {
2401 print_raid5_conf(conf); 3278 print_raid5_conf(conf);
3279 safe_put_page(conf->spare_page);
2402 kfree(conf->disks); 3280 kfree(conf->disks);
2403 kfree(conf->stripe_hashtbl); 3281 kfree(conf->stripe_hashtbl);
2404 kfree(conf); 3282 kfree(conf);
@@ -2427,23 +3305,23 @@ static int stop(mddev_t *mddev)
2427} 3305}
2428 3306
2429#if RAID5_DEBUG 3307#if RAID5_DEBUG
2430static void print_sh (struct stripe_head *sh) 3308static void print_sh (struct seq_file *seq, struct stripe_head *sh)
2431{ 3309{
2432 int i; 3310 int i;
2433 3311
2434 printk("sh %llu, pd_idx %d, state %ld.\n", 3312 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
2435 (unsigned long long)sh->sector, sh->pd_idx, sh->state); 3313 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
2436 printk("sh %llu, count %d.\n", 3314 seq_printf(seq, "sh %llu, count %d.\n",
2437 (unsigned long long)sh->sector, atomic_read(&sh->count)); 3315 (unsigned long long)sh->sector, atomic_read(&sh->count));
2438 printk("sh %llu, ", (unsigned long long)sh->sector); 3316 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
2439 for (i = 0; i < sh->disks; i++) { 3317 for (i = 0; i < sh->disks; i++) {
2440 printk("(cache%d: %p %ld) ", 3318 seq_printf(seq, "(cache%d: %p %ld) ",
2441 i, sh->dev[i].page, sh->dev[i].flags); 3319 i, sh->dev[i].page, sh->dev[i].flags);
2442 } 3320 }
2443 printk("\n"); 3321 seq_printf(seq, "\n");
2444} 3322}
2445 3323
2446static void printall (raid5_conf_t *conf) 3324static void printall (struct seq_file *seq, raid5_conf_t *conf)
2447{ 3325{
2448 struct stripe_head *sh; 3326 struct stripe_head *sh;
2449 struct hlist_node *hn; 3327 struct hlist_node *hn;
@@ -2454,7 +3332,7 @@ static void printall (raid5_conf_t *conf)
2454 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { 3332 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
2455 if (sh->raid_conf != conf) 3333 if (sh->raid_conf != conf)
2456 continue; 3334 continue;
2457 print_sh(sh); 3335 print_sh(seq, sh);
2458 } 3336 }
2459 } 3337 }
2460 spin_unlock_irq(&conf->device_lock); 3338 spin_unlock_irq(&conf->device_lock);
@@ -2474,9 +3352,8 @@ static void status (struct seq_file *seq, mddev_t *mddev)
2474 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 3352 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
2475 seq_printf (seq, "]"); 3353 seq_printf (seq, "]");
2476#if RAID5_DEBUG 3354#if RAID5_DEBUG
2477#define D(x) \ 3355 seq_printf (seq, "\n");
2478 seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x)) 3356 printall(seq, conf);
2479 printall(conf);
2480#endif 3357#endif
2481} 3358}
2482 3359
@@ -2560,14 +3437,20 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2560 int disk; 3437 int disk;
2561 struct disk_info *p; 3438 struct disk_info *p;
2562 3439
2563 if (mddev->degraded > 1) 3440 if (mddev->degraded > conf->max_degraded)
2564 /* no point adding a device */ 3441 /* no point adding a device */
2565 return 0; 3442 return 0;
2566 3443
2567 /* 3444 /*
2568 * find the disk ... 3445 * find the disk ... but prefer rdev->saved_raid_disk
3446 * if possible.
2569 */ 3447 */
2570 for (disk=0; disk < conf->raid_disks; disk++) 3448 if (rdev->saved_raid_disk >= 0 &&
3449 conf->disks[rdev->saved_raid_disk].rdev == NULL)
3450 disk = rdev->saved_raid_disk;
3451 else
3452 disk = 0;
3453 for ( ; disk < conf->raid_disks; disk++)
2571 if ((p=conf->disks + disk)->rdev == NULL) { 3454 if ((p=conf->disks + disk)->rdev == NULL) {
2572 clear_bit(In_sync, &rdev->flags); 3455 clear_bit(In_sync, &rdev->flags);
2573 rdev->raid_disk = disk; 3456 rdev->raid_disk = disk;
@@ -2590,8 +3473,10 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
2590 * any io in the removed space completes, but it hardly seems 3473 * any io in the removed space completes, but it hardly seems
2591 * worth it. 3474 * worth it.
2592 */ 3475 */
3476 raid5_conf_t *conf = mddev_to_conf(mddev);
3477
2593 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 3478 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
2594 mddev->array_size = (sectors * (mddev->raid_disks-1))>>1; 3479 mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1;
2595 set_capacity(mddev->gendisk, mddev->array_size << 1); 3480 set_capacity(mddev->gendisk, mddev->array_size << 1);
2596 mddev->changed = 1; 3481 mddev->changed = 1;
2597 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 3482 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
@@ -2680,6 +3565,7 @@ static int raid5_start_reshape(mddev_t *mddev)
2680 set_bit(In_sync, &rdev->flags); 3565 set_bit(In_sync, &rdev->flags);
2681 conf->working_disks++; 3566 conf->working_disks++;
2682 added_devices++; 3567 added_devices++;
3568 rdev->recovery_offset = 0;
2683 sprintf(nm, "rd%d", rdev->raid_disk); 3569 sprintf(nm, "rd%d", rdev->raid_disk);
2684 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 3570 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
2685 } else 3571 } else
@@ -2731,6 +3617,17 @@ static void end_reshape(raid5_conf_t *conf)
2731 conf->expand_progress = MaxSector; 3617 conf->expand_progress = MaxSector;
2732 spin_unlock_irq(&conf->device_lock); 3618 spin_unlock_irq(&conf->device_lock);
2733 conf->mddev->reshape_position = MaxSector; 3619 conf->mddev->reshape_position = MaxSector;
3620
3621 /* read-ahead size must cover two whole stripes, which is
3622 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
3623 */
3624 {
3625 int data_disks = conf->previous_raid_disks - conf->max_degraded;
3626 int stripe = data_disks *
3627 (conf->mddev->chunk_size / PAGE_SIZE);
3628 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
3629 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
3630 }
2734 } 3631 }
2735} 3632}
2736 3633
@@ -2762,6 +3659,23 @@ static void raid5_quiesce(mddev_t *mddev, int state)
2762 } 3659 }
2763} 3660}
2764 3661
3662static struct mdk_personality raid6_personality =
3663{
3664 .name = "raid6",
3665 .level = 6,
3666 .owner = THIS_MODULE,
3667 .make_request = make_request,
3668 .run = run,
3669 .stop = stop,
3670 .status = status,
3671 .error_handler = error,
3672 .hot_add_disk = raid5_add_disk,
3673 .hot_remove_disk= raid5_remove_disk,
3674 .spare_active = raid5_spare_active,
3675 .sync_request = sync_request,
3676 .resize = raid5_resize,
3677 .quiesce = raid5_quiesce,
3678};
2765static struct mdk_personality raid5_personality = 3679static struct mdk_personality raid5_personality =
2766{ 3680{
2767 .name = "raid5", 3681 .name = "raid5",
@@ -2804,6 +3718,12 @@ static struct mdk_personality raid4_personality =
2804 3718
2805static int __init raid5_init(void) 3719static int __init raid5_init(void)
2806{ 3720{
3721 int e;
3722
3723 e = raid6_select_algo();
3724 if ( e )
3725 return e;
3726 register_md_personality(&raid6_personality);
2807 register_md_personality(&raid5_personality); 3727 register_md_personality(&raid5_personality);
2808 register_md_personality(&raid4_personality); 3728 register_md_personality(&raid4_personality);
2809 return 0; 3729 return 0;
@@ -2811,6 +3731,7 @@ static int __init raid5_init(void)
2811 3731
2812static void raid5_exit(void) 3732static void raid5_exit(void)
2813{ 3733{
3734 unregister_md_personality(&raid6_personality);
2814 unregister_md_personality(&raid5_personality); 3735 unregister_md_personality(&raid5_personality);
2815 unregister_md_personality(&raid4_personality); 3736 unregister_md_personality(&raid4_personality);
2816} 3737}
@@ -2823,3 +3744,10 @@ MODULE_ALIAS("md-raid5");
2823MODULE_ALIAS("md-raid4"); 3744MODULE_ALIAS("md-raid4");
2824MODULE_ALIAS("md-level-5"); 3745MODULE_ALIAS("md-level-5");
2825MODULE_ALIAS("md-level-4"); 3746MODULE_ALIAS("md-level-4");
3747MODULE_ALIAS("md-personality-8"); /* RAID6 */
3748MODULE_ALIAS("md-raid6");
3749MODULE_ALIAS("md-level-6");
3750
3751/* This used to be two separate modules, they were: */
3752MODULE_ALIAS("raid5");
3753MODULE_ALIAS("raid6");
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
deleted file mode 100644
index bc69355e01..0000000000
--- a/drivers/md/raid6main.c
+++ /dev/null
@@ -1,2427 +0,0 @@
1/*
2 * raid6main.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
6 *
7 * RAID-6 management functions. This code is derived from raid5.c.
8 * Last merge from raid5.c bkcvs version 1.79 (kernel 2.6.1).
9 *
10 * Thanks to Penguin Computing for making the RAID-6 development possible
11 * by donating a test server!
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * You should have received a copy of the GNU General Public License
19 * (for example /usr/src/linux/COPYING); if not, write to the Free
20 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23
24#include <linux/config.h>
25#include <linux/module.h>
26#include <linux/slab.h>
27#include <linux/highmem.h>
28#include <linux/bitops.h>
29#include <asm/atomic.h>
30#include "raid6.h"
31
32#include <linux/raid/bitmap.h>
33
34/*
35 * Stripe cache
36 */
37
38#define NR_STRIPES 256
39#define STRIPE_SIZE PAGE_SIZE
40#define STRIPE_SHIFT (PAGE_SHIFT - 9)
41#define STRIPE_SECTORS (STRIPE_SIZE>>9)
42#define IO_THRESHOLD 1
43#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
44#define HASH_MASK (NR_HASH - 1)
45
46#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
47
48/* bio's attached to a stripe+device for I/O are linked together in bi_sector
49 * order without overlap. There may be several bio's per stripe+device, and
50 * a bio could span several devices.
51 * When walking this list for a particular stripe+device, we must never proceed
52 * beyond a bio that extends past this device, as the next bio might no longer
53 * be valid.
54 * This macro is used to determine the 'next' bio in the list, given the sector
55 * of the current stripe+device
56 */
57#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
58/*
59 * The following can be used to debug the driver
60 */
61#define RAID6_DEBUG 0 /* Extremely verbose printk */
62#define RAID6_PARANOIA 1 /* Check spinlocks */
63#define RAID6_DUMPSTATE 0 /* Include stripe cache state in /proc/mdstat */
64#if RAID6_PARANOIA && defined(CONFIG_SMP)
65# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
66#else
67# define CHECK_DEVLOCK()
68#endif
69
70#define PRINTK(x...) ((void)(RAID6_DEBUG && printk(KERN_DEBUG x)))
71#if RAID6_DEBUG
72#undef inline
73#undef __inline__
74#define inline
75#define __inline__
76#endif
77
78#if !RAID6_USE_EMPTY_ZERO_PAGE
79/* In .bss so it's zeroed */
80const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
81#endif
82
83static inline int raid6_next_disk(int disk, int raid_disks)
84{
85 disk++;
86 return (disk < raid_disks) ? disk : 0;
87}
88
89static void print_raid6_conf (raid6_conf_t *conf);
90
91static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh)
92{
93 if (atomic_dec_and_test(&sh->count)) {
94 BUG_ON(!list_empty(&sh->lru));
95 BUG_ON(atomic_read(&conf->active_stripes)==0);
96 if (test_bit(STRIPE_HANDLE, &sh->state)) {
97 if (test_bit(STRIPE_DELAYED, &sh->state))
98 list_add_tail(&sh->lru, &conf->delayed_list);
99 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
100 conf->seq_write == sh->bm_seq)
101 list_add_tail(&sh->lru, &conf->bitmap_list);
102 else {
103 clear_bit(STRIPE_BIT_DELAY, &sh->state);
104 list_add_tail(&sh->lru, &conf->handle_list);
105 }
106 md_wakeup_thread(conf->mddev->thread);
107 } else {
108 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
109 atomic_dec(&conf->preread_active_stripes);
110 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
111 md_wakeup_thread(conf->mddev->thread);
112 }
113 list_add_tail(&sh->lru, &conf->inactive_list);
114 atomic_dec(&conf->active_stripes);
115 if (!conf->inactive_blocked ||
116 atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4))
117 wake_up(&conf->wait_for_stripe);
118 }
119 }
120}
121static void release_stripe(struct stripe_head *sh)
122{
123 raid6_conf_t *conf = sh->raid_conf;
124 unsigned long flags;
125
126 spin_lock_irqsave(&conf->device_lock, flags);
127 __release_stripe(conf, sh);
128 spin_unlock_irqrestore(&conf->device_lock, flags);
129}
130
131static inline void remove_hash(struct stripe_head *sh)
132{
133 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
134
135 hlist_del_init(&sh->hash);
136}
137
138static inline void insert_hash(raid6_conf_t *conf, struct stripe_head *sh)
139{
140 struct hlist_head *hp = stripe_hash(conf, sh->sector);
141
142 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
143
144 CHECK_DEVLOCK();
145 hlist_add_head(&sh->hash, hp);
146}
147
148
149/* find an idle stripe, make sure it is unhashed, and return it. */
150static struct stripe_head *get_free_stripe(raid6_conf_t *conf)
151{
152 struct stripe_head *sh = NULL;
153 struct list_head *first;
154
155 CHECK_DEVLOCK();
156 if (list_empty(&conf->inactive_list))
157 goto out;
158 first = conf->inactive_list.next;
159 sh = list_entry(first, struct stripe_head, lru);
160 list_del_init(first);
161 remove_hash(sh);
162 atomic_inc(&conf->active_stripes);
163out:
164 return sh;
165}
166
167static void shrink_buffers(struct stripe_head *sh, int num)
168{
169 struct page *p;
170 int i;
171
172 for (i=0; i<num ; i++) {
173 p = sh->dev[i].page;
174 if (!p)
175 continue;
176 sh->dev[i].page = NULL;
177 put_page(p);
178 }
179}
180
181static int grow_buffers(struct stripe_head *sh, int num)
182{
183 int i;
184
185 for (i=0; i<num; i++) {
186 struct page *page;
187
188 if (!(page = alloc_page(GFP_KERNEL))) {
189 return 1;
190 }
191 sh->dev[i].page = page;
192 }
193 return 0;
194}
195
196static void raid6_build_block (struct stripe_head *sh, int i);
197
198static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
199{
200 raid6_conf_t *conf = sh->raid_conf;
201 int disks = conf->raid_disks, i;
202
203 BUG_ON(atomic_read(&sh->count) != 0);
204 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
205
206 CHECK_DEVLOCK();
207 PRINTK("init_stripe called, stripe %llu\n",
208 (unsigned long long)sh->sector);
209
210 remove_hash(sh);
211
212 sh->sector = sector;
213 sh->pd_idx = pd_idx;
214 sh->state = 0;
215
216 for (i=disks; i--; ) {
217 struct r5dev *dev = &sh->dev[i];
218
219 if (dev->toread || dev->towrite || dev->written ||
220 test_bit(R5_LOCKED, &dev->flags)) {
221 PRINTK("sector=%llx i=%d %p %p %p %d\n",
222 (unsigned long long)sh->sector, i, dev->toread,
223 dev->towrite, dev->written,
224 test_bit(R5_LOCKED, &dev->flags));
225 BUG();
226 }
227 dev->flags = 0;
228 raid6_build_block(sh, i);
229 }
230 insert_hash(conf, sh);
231}
232
233static struct stripe_head *__find_stripe(raid6_conf_t *conf, sector_t sector)
234{
235 struct stripe_head *sh;
236 struct hlist_node *hn;
237
238 CHECK_DEVLOCK();
239 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
240 hlist_for_each_entry (sh, hn, stripe_hash(conf, sector), hash)
241 if (sh->sector == sector)
242 return sh;
243 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
244 return NULL;
245}
246
247static void unplug_slaves(mddev_t *mddev);
248
249static struct stripe_head *get_active_stripe(raid6_conf_t *conf, sector_t sector,
250 int pd_idx, int noblock)
251{
252 struct stripe_head *sh;
253
254 PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
255
256 spin_lock_irq(&conf->device_lock);
257
258 do {
259 wait_event_lock_irq(conf->wait_for_stripe,
260 conf->quiesce == 0,
261 conf->device_lock, /* nothing */);
262 sh = __find_stripe(conf, sector);
263 if (!sh) {
264 if (!conf->inactive_blocked)
265 sh = get_free_stripe(conf);
266 if (noblock && sh == NULL)
267 break;
268 if (!sh) {
269 conf->inactive_blocked = 1;
270 wait_event_lock_irq(conf->wait_for_stripe,
271 !list_empty(&conf->inactive_list) &&
272 (atomic_read(&conf->active_stripes)
273 < (conf->max_nr_stripes *3/4)
274 || !conf->inactive_blocked),
275 conf->device_lock,
276 unplug_slaves(conf->mddev);
277 );
278 conf->inactive_blocked = 0;
279 } else
280 init_stripe(sh, sector, pd_idx);
281 } else {
282 if (atomic_read(&sh->count)) {
283 BUG_ON(!list_empty(&sh->lru));
284 } else {
285 if (!test_bit(STRIPE_HANDLE, &sh->state))
286 atomic_inc(&conf->active_stripes);
287 BUG_ON(list_empty(&sh->lru));
288 list_del_init(&sh->lru);
289 }
290 }
291 } while (sh == NULL);
292
293 if (sh)
294 atomic_inc(&sh->count);
295
296 spin_unlock_irq(&conf->device_lock);
297 return sh;
298}
299
300static int grow_one_stripe(raid6_conf_t *conf)
301{
302 struct stripe_head *sh;
303 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
304 if (!sh)
305 return 0;
306 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
307 sh->raid_conf = conf;
308 spin_lock_init(&sh->lock);
309
310 if (grow_buffers(sh, conf->raid_disks)) {
311 shrink_buffers(sh, conf->raid_disks);
312 kmem_cache_free(conf->slab_cache, sh);
313 return 0;
314 }
315 /* we just created an active stripe so... */
316 atomic_set(&sh->count, 1);
317 atomic_inc(&conf->active_stripes);
318 INIT_LIST_HEAD(&sh->lru);
319 release_stripe(sh);
320 return 1;
321}
322
323static int grow_stripes(raid6_conf_t *conf, int num)
324{
325 kmem_cache_t *sc;
326 int devs = conf->raid_disks;
327
328 sprintf(conf->cache_name[0], "raid6/%s", mdname(conf->mddev));
329
330 sc = kmem_cache_create(conf->cache_name[0],
331 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
332 0, 0, NULL, NULL);
333 if (!sc)
334 return 1;
335 conf->slab_cache = sc;
336 while (num--)
337 if (!grow_one_stripe(conf))
338 return 1;
339 return 0;
340}
341
342static int drop_one_stripe(raid6_conf_t *conf)
343{
344 struct stripe_head *sh;
345 spin_lock_irq(&conf->device_lock);
346 sh = get_free_stripe(conf);
347 spin_unlock_irq(&conf->device_lock);
348 if (!sh)
349 return 0;
350 BUG_ON(atomic_read(&sh->count));
351 shrink_buffers(sh, conf->raid_disks);
352 kmem_cache_free(conf->slab_cache, sh);
353 atomic_dec(&conf->active_stripes);
354 return 1;
355}
356
357static void shrink_stripes(raid6_conf_t *conf)
358{
359 while (drop_one_stripe(conf))
360 ;
361
362 if (conf->slab_cache)
363 kmem_cache_destroy(conf->slab_cache);
364 conf->slab_cache = NULL;
365}
366
367static int raid6_end_read_request(struct bio * bi, unsigned int bytes_done,
368 int error)
369{
370 struct stripe_head *sh = bi->bi_private;
371 raid6_conf_t *conf = sh->raid_conf;
372 int disks = conf->raid_disks, i;
373 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
374
375 if (bi->bi_size)
376 return 1;
377
378 for (i=0 ; i<disks; i++)
379 if (bi == &sh->dev[i].req)
380 break;
381
382 PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
383 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
384 uptodate);
385 if (i == disks) {
386 BUG();
387 return 0;
388 }
389
390 if (uptodate) {
391#if 0
392 struct bio *bio;
393 unsigned long flags;
394 spin_lock_irqsave(&conf->device_lock, flags);
395 /* we can return a buffer if we bypassed the cache or
396 * if the top buffer is not in highmem. If there are
397 * multiple buffers, leave the extra work to
398 * handle_stripe
399 */
400 buffer = sh->bh_read[i];
401 if (buffer &&
402 (!PageHighMem(buffer->b_page)
403 || buffer->b_page == bh->b_page )
404 ) {
405 sh->bh_read[i] = buffer->b_reqnext;
406 buffer->b_reqnext = NULL;
407 } else
408 buffer = NULL;
409 spin_unlock_irqrestore(&conf->device_lock, flags);
410 if (sh->bh_page[i]==bh->b_page)
411 set_buffer_uptodate(bh);
412 if (buffer) {
413 if (buffer->b_page != bh->b_page)
414 memcpy(buffer->b_data, bh->b_data, bh->b_size);
415 buffer->b_end_io(buffer, 1);
416 }
417#else
418 set_bit(R5_UPTODATE, &sh->dev[i].flags);
419#endif
420 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
421 printk(KERN_INFO "raid6: read error corrected!!\n");
422 clear_bit(R5_ReadError, &sh->dev[i].flags);
423 clear_bit(R5_ReWrite, &sh->dev[i].flags);
424 }
425 if (atomic_read(&conf->disks[i].rdev->read_errors))
426 atomic_set(&conf->disks[i].rdev->read_errors, 0);
427 } else {
428 int retry = 0;
429 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
430 atomic_inc(&conf->disks[i].rdev->read_errors);
431 if (conf->mddev->degraded)
432 printk(KERN_WARNING "raid6: read error not correctable.\n");
433 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
434 /* Oh, no!!! */
435 printk(KERN_WARNING "raid6: read error NOT corrected!!\n");
436 else if (atomic_read(&conf->disks[i].rdev->read_errors)
437 > conf->max_nr_stripes)
438 printk(KERN_WARNING
439 "raid6: Too many read errors, failing device.\n");
440 else
441 retry = 1;
442 if (retry)
443 set_bit(R5_ReadError, &sh->dev[i].flags);
444 else {
445 clear_bit(R5_ReadError, &sh->dev[i].flags);
446 clear_bit(R5_ReWrite, &sh->dev[i].flags);
447 md_error(conf->mddev, conf->disks[i].rdev);
448 }
449 }
450 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
451#if 0
452 /* must restore b_page before unlocking buffer... */
453 if (sh->bh_page[i] != bh->b_page) {
454 bh->b_page = sh->bh_page[i];
455 bh->b_data = page_address(bh->b_page);
456 clear_buffer_uptodate(bh);
457 }
458#endif
459 clear_bit(R5_LOCKED, &sh->dev[i].flags);
460 set_bit(STRIPE_HANDLE, &sh->state);
461 release_stripe(sh);
462 return 0;
463}
464
465static int raid6_end_write_request (struct bio *bi, unsigned int bytes_done,
466 int error)
467{
468 struct stripe_head *sh = bi->bi_private;
469 raid6_conf_t *conf = sh->raid_conf;
470 int disks = conf->raid_disks, i;
471 unsigned long flags;
472 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
473
474 if (bi->bi_size)
475 return 1;
476
477 for (i=0 ; i<disks; i++)
478 if (bi == &sh->dev[i].req)
479 break;
480
481 PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
482 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
483 uptodate);
484 if (i == disks) {
485 BUG();
486 return 0;
487 }
488
489 spin_lock_irqsave(&conf->device_lock, flags);
490 if (!uptodate)
491 md_error(conf->mddev, conf->disks[i].rdev);
492
493 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
494
495 clear_bit(R5_LOCKED, &sh->dev[i].flags);
496 set_bit(STRIPE_HANDLE, &sh->state);
497 __release_stripe(conf, sh);
498 spin_unlock_irqrestore(&conf->device_lock, flags);
499 return 0;
500}
501
502
503static sector_t compute_blocknr(struct stripe_head *sh, int i);
504
505static void raid6_build_block (struct stripe_head *sh, int i)
506{
507 struct r5dev *dev = &sh->dev[i];
508 int pd_idx = sh->pd_idx;
509 int qd_idx = raid6_next_disk(pd_idx, sh->raid_conf->raid_disks);
510
511 bio_init(&dev->req);
512 dev->req.bi_io_vec = &dev->vec;
513 dev->req.bi_vcnt++;
514 dev->req.bi_max_vecs++;
515 dev->vec.bv_page = dev->page;
516 dev->vec.bv_len = STRIPE_SIZE;
517 dev->vec.bv_offset = 0;
518
519 dev->req.bi_sector = sh->sector;
520 dev->req.bi_private = sh;
521
522 dev->flags = 0;
523 if (i != pd_idx && i != qd_idx)
524 dev->sector = compute_blocknr(sh, i);
525}
526
527static void error(mddev_t *mddev, mdk_rdev_t *rdev)
528{
529 char b[BDEVNAME_SIZE];
530 raid6_conf_t *conf = (raid6_conf_t *) mddev->private;
531 PRINTK("raid6: error called\n");
532
533 if (!test_bit(Faulty, &rdev->flags)) {
534 mddev->sb_dirty = 1;
535 if (test_bit(In_sync, &rdev->flags)) {
536 conf->working_disks--;
537 mddev->degraded++;
538 conf->failed_disks++;
539 clear_bit(In_sync, &rdev->flags);
540 /*
541 * if recovery was running, make sure it aborts.
542 */
543 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
544 }
545 set_bit(Faulty, &rdev->flags);
546 printk (KERN_ALERT
547 "raid6: Disk failure on %s, disabling device."
548 " Operation continuing on %d devices\n",
549 bdevname(rdev->bdev,b), conf->working_disks);
550 }
551}
552
553/*
554 * Input: a 'big' sector number,
555 * Output: index of the data and parity disk, and the sector # in them.
556 */
557static sector_t raid6_compute_sector(sector_t r_sector, unsigned int raid_disks,
558 unsigned int data_disks, unsigned int * dd_idx,
559 unsigned int * pd_idx, raid6_conf_t *conf)
560{
561 long stripe;
562 unsigned long chunk_number;
563 unsigned int chunk_offset;
564 sector_t new_sector;
565 int sectors_per_chunk = conf->chunk_size >> 9;
566
567 /* First compute the information on this sector */
568
569 /*
570 * Compute the chunk number and the sector offset inside the chunk
571 */
572 chunk_offset = sector_div(r_sector, sectors_per_chunk);
573 chunk_number = r_sector;
574 if ( r_sector != chunk_number ) {
575 printk(KERN_CRIT "raid6: ERROR: r_sector = %llu, chunk_number = %lu\n",
576 (unsigned long long)r_sector, (unsigned long)chunk_number);
577 BUG();
578 }
579
580 /*
581 * Compute the stripe number
582 */
583 stripe = chunk_number / data_disks;
584
585 /*
586 * Compute the data disk and parity disk indexes inside the stripe
587 */
588 *dd_idx = chunk_number % data_disks;
589
590 /*
591 * Select the parity disk based on the user selected algorithm.
592 */
593
594 /**** FIX THIS ****/
595 switch (conf->algorithm) {
596 case ALGORITHM_LEFT_ASYMMETRIC:
597 *pd_idx = raid_disks - 1 - (stripe % raid_disks);
598 if (*pd_idx == raid_disks-1)
599 (*dd_idx)++; /* Q D D D P */
600 else if (*dd_idx >= *pd_idx)
601 (*dd_idx) += 2; /* D D P Q D */
602 break;
603 case ALGORITHM_RIGHT_ASYMMETRIC:
604 *pd_idx = stripe % raid_disks;
605 if (*pd_idx == raid_disks-1)
606 (*dd_idx)++; /* Q D D D P */
607 else if (*dd_idx >= *pd_idx)
608 (*dd_idx) += 2; /* D D P Q D */
609 break;
610 case ALGORITHM_LEFT_SYMMETRIC:
611 *pd_idx = raid_disks - 1 - (stripe % raid_disks);
612 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
613 break;
614 case ALGORITHM_RIGHT_SYMMETRIC:
615 *pd_idx = stripe % raid_disks;
616 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
617 break;
618 default:
619 printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
620 conf->algorithm);
621 }
622
623 PRINTK("raid6: chunk_number = %lu, pd_idx = %u, dd_idx = %u\n",
624 chunk_number, *pd_idx, *dd_idx);
625
626 /*
627 * Finally, compute the new sector number
628 */
629 new_sector = (sector_t) stripe * sectors_per_chunk + chunk_offset;
630 return new_sector;
631}
632
633
634static sector_t compute_blocknr(struct stripe_head *sh, int i)
635{
636 raid6_conf_t *conf = sh->raid_conf;
637 int raid_disks = conf->raid_disks, data_disks = raid_disks - 2;
638 sector_t new_sector = sh->sector, check;
639 int sectors_per_chunk = conf->chunk_size >> 9;
640 sector_t stripe;
641 int chunk_offset;
642 int chunk_number, dummy1, dummy2, dd_idx = i;
643 sector_t r_sector;
644 int i0 = i;
645
646 chunk_offset = sector_div(new_sector, sectors_per_chunk);
647 stripe = new_sector;
648 if ( new_sector != stripe ) {
649 printk(KERN_CRIT "raid6: ERROR: new_sector = %llu, stripe = %lu\n",
650 (unsigned long long)new_sector, (unsigned long)stripe);
651 BUG();
652 }
653
654 switch (conf->algorithm) {
655 case ALGORITHM_LEFT_ASYMMETRIC:
656 case ALGORITHM_RIGHT_ASYMMETRIC:
657 if (sh->pd_idx == raid_disks-1)
658 i--; /* Q D D D P */
659 else if (i > sh->pd_idx)
660 i -= 2; /* D D P Q D */
661 break;
662 case ALGORITHM_LEFT_SYMMETRIC:
663 case ALGORITHM_RIGHT_SYMMETRIC:
664 if (sh->pd_idx == raid_disks-1)
665 i--; /* Q D D D P */
666 else {
667 /* D D P Q D */
668 if (i < sh->pd_idx)
669 i += raid_disks;
670 i -= (sh->pd_idx + 2);
671 }
672 break;
673 default:
674 printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
675 conf->algorithm);
676 }
677
678 PRINTK("raid6: compute_blocknr: pd_idx = %u, i0 = %u, i = %u\n", sh->pd_idx, i0, i);
679
680 chunk_number = stripe * data_disks + i;
681 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
682
683 check = raid6_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
684 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
685 printk(KERN_CRIT "raid6: compute_blocknr: map not correct\n");
686 return 0;
687 }
688 return r_sector;
689}
690
691
692
693/*
694 * Copy data between a page in the stripe cache, and one or more bion
695 * The page could align with the middle of the bio, or there could be
696 * several bion, each with several bio_vecs, which cover part of the page
697 * Multiple bion are linked together on bi_next. There may be extras
698 * at the end of this list. We ignore them.
699 */
700static void copy_data(int frombio, struct bio *bio,
701 struct page *page,
702 sector_t sector)
703{
704 char *pa = page_address(page);
705 struct bio_vec *bvl;
706 int i;
707 int page_offset;
708
709 if (bio->bi_sector >= sector)
710 page_offset = (signed)(bio->bi_sector - sector) * 512;
711 else
712 page_offset = (signed)(sector - bio->bi_sector) * -512;
713 bio_for_each_segment(bvl, bio, i) {
714 int len = bio_iovec_idx(bio,i)->bv_len;
715 int clen;
716 int b_offset = 0;
717
718 if (page_offset < 0) {
719 b_offset = -page_offset;
720 page_offset += b_offset;
721 len -= b_offset;
722 }
723
724 if (len > 0 && page_offset + len > STRIPE_SIZE)
725 clen = STRIPE_SIZE - page_offset;
726 else clen = len;
727
728 if (clen > 0) {
729 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
730 if (frombio)
731 memcpy(pa+page_offset, ba+b_offset, clen);
732 else
733 memcpy(ba+b_offset, pa+page_offset, clen);
734 __bio_kunmap_atomic(ba, KM_USER0);
735 }
736 if (clen < len) /* hit end of page */
737 break;
738 page_offset += len;
739 }
740}
741
742#define check_xor() do { \
743 if (count == MAX_XOR_BLOCKS) { \
744 xor_block(count, STRIPE_SIZE, ptr); \
745 count = 1; \
746 } \
747 } while(0)
748
749/* Compute P and Q syndromes */
750static void compute_parity(struct stripe_head *sh, int method)
751{
752 raid6_conf_t *conf = sh->raid_conf;
753 int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = conf->raid_disks, count;
754 struct bio *chosen;
755 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
756 void *ptrs[disks];
757
758 qd_idx = raid6_next_disk(pd_idx, disks);
759 d0_idx = raid6_next_disk(qd_idx, disks);
760
761 PRINTK("compute_parity, stripe %llu, method %d\n",
762 (unsigned long long)sh->sector, method);
763
764 switch(method) {
765 case READ_MODIFY_WRITE:
766 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */
767 case RECONSTRUCT_WRITE:
768 for (i= disks; i-- ;)
769 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) {
770 chosen = sh->dev[i].towrite;
771 sh->dev[i].towrite = NULL;
772
773 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
774 wake_up(&conf->wait_for_overlap);
775
776 BUG_ON(sh->dev[i].written);
777 sh->dev[i].written = chosen;
778 }
779 break;
780 case CHECK_PARITY:
781 BUG(); /* Not implemented yet */
782 }
783
784 for (i = disks; i--;)
785 if (sh->dev[i].written) {
786 sector_t sector = sh->dev[i].sector;
787 struct bio *wbi = sh->dev[i].written;
788 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
789 copy_data(1, wbi, sh->dev[i].page, sector);
790 wbi = r5_next_bio(wbi, sector);
791 }
792
793 set_bit(R5_LOCKED, &sh->dev[i].flags);
794 set_bit(R5_UPTODATE, &sh->dev[i].flags);
795 }
796
797// switch(method) {
798// case RECONSTRUCT_WRITE:
799// case CHECK_PARITY:
800// case UPDATE_PARITY:
801 /* Note that unlike RAID-5, the ordering of the disks matters greatly. */
802 /* FIX: Is this ordering of drives even remotely optimal? */
803 count = 0;
804 i = d0_idx;
805 do {
806 ptrs[count++] = page_address(sh->dev[i].page);
807 if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags))
808 printk("block %d/%d not uptodate on parity calc\n", i,count);
809 i = raid6_next_disk(i, disks);
810 } while ( i != d0_idx );
811// break;
812// }
813
814 raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs);
815
816 switch(method) {
817 case RECONSTRUCT_WRITE:
818 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
819 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
820 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
821 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags);
822 break;
823 case UPDATE_PARITY:
824 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
825 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
826 break;
827 }
828}
829
830/* Compute one missing block */
831static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
832{
833 raid6_conf_t *conf = sh->raid_conf;
834 int i, count, disks = conf->raid_disks;
835 void *ptr[MAX_XOR_BLOCKS], *p;
836 int pd_idx = sh->pd_idx;
837 int qd_idx = raid6_next_disk(pd_idx, disks);
838
839 PRINTK("compute_block_1, stripe %llu, idx %d\n",
840 (unsigned long long)sh->sector, dd_idx);
841
842 if ( dd_idx == qd_idx ) {
843 /* We're actually computing the Q drive */
844 compute_parity(sh, UPDATE_PARITY);
845 } else {
846 ptr[0] = page_address(sh->dev[dd_idx].page);
847 if (!nozero) memset(ptr[0], 0, STRIPE_SIZE);
848 count = 1;
849 for (i = disks ; i--; ) {
850 if (i == dd_idx || i == qd_idx)
851 continue;
852 p = page_address(sh->dev[i].page);
853 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
854 ptr[count++] = p;
855 else
856 printk("compute_block() %d, stripe %llu, %d"
857 " not present\n", dd_idx,
858 (unsigned long long)sh->sector, i);
859
860 check_xor();
861 }
862 if (count != 1)
863 xor_block(count, STRIPE_SIZE, ptr);
864 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
865 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
866 }
867}
868
869/* Compute two missing blocks */
870static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
871{
872 raid6_conf_t *conf = sh->raid_conf;
873 int i, count, disks = conf->raid_disks;
874 int pd_idx = sh->pd_idx;
875 int qd_idx = raid6_next_disk(pd_idx, disks);
876 int d0_idx = raid6_next_disk(qd_idx, disks);
877 int faila, failb;
878
879 /* faila and failb are disk numbers relative to d0_idx */
880 /* pd_idx become disks-2 and qd_idx become disks-1 */
881 faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx;
882 failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx;
883
884 BUG_ON(faila == failb);
885 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
886
887 PRINTK("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
888 (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb);
889
890 if ( failb == disks-1 ) {
891 /* Q disk is one of the missing disks */
892 if ( faila == disks-2 ) {
893 /* Missing P+Q, just recompute */
894 compute_parity(sh, UPDATE_PARITY);
895 return;
896 } else {
897 /* We're missing D+Q; recompute D from P */
898 compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0);
899 compute_parity(sh, UPDATE_PARITY); /* Is this necessary? */
900 return;
901 }
902 }
903
904 /* We're missing D+P or D+D; build pointer table */
905 {
906 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
907 void *ptrs[disks];
908
909 count = 0;
910 i = d0_idx;
911 do {
912 ptrs[count++] = page_address(sh->dev[i].page);
913 i = raid6_next_disk(i, disks);
914 if (i != dd_idx1 && i != dd_idx2 &&
915 !test_bit(R5_UPTODATE, &sh->dev[i].flags))
916 printk("compute_2 with missing block %d/%d\n", count, i);
917 } while ( i != d0_idx );
918
919 if ( failb == disks-2 ) {
920 /* We're missing D+P. */
921 raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs);
922 } else {
923 /* We're missing D+D. */
924 raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs);
925 }
926
927 /* Both the above update both missing blocks */
928 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags);
929 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags);
930 }
931}
932
933
934/*
935 * Each stripe/dev can have one or more bion attached.
936 * toread/towrite point to the first in a chain.
937 * The bi_next chain must be in order.
938 */
939static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
940{
941 struct bio **bip;
942 raid6_conf_t *conf = sh->raid_conf;
943 int firstwrite=0;
944
945 PRINTK("adding bh b#%llu to stripe s#%llu\n",
946 (unsigned long long)bi->bi_sector,
947 (unsigned long long)sh->sector);
948
949
950 spin_lock(&sh->lock);
951 spin_lock_irq(&conf->device_lock);
952 if (forwrite) {
953 bip = &sh->dev[dd_idx].towrite;
954 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
955 firstwrite = 1;
956 } else
957 bip = &sh->dev[dd_idx].toread;
958 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
959 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
960 goto overlap;
961 bip = &(*bip)->bi_next;
962 }
963 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
964 goto overlap;
965
966 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
967 if (*bip)
968 bi->bi_next = *bip;
969 *bip = bi;
970 bi->bi_phys_segments ++;
971 spin_unlock_irq(&conf->device_lock);
972 spin_unlock(&sh->lock);
973
974 PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
975 (unsigned long long)bi->bi_sector,
976 (unsigned long long)sh->sector, dd_idx);
977
978 if (conf->mddev->bitmap && firstwrite) {
979 sh->bm_seq = conf->seq_write;
980 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
981 STRIPE_SECTORS, 0);
982 set_bit(STRIPE_BIT_DELAY, &sh->state);
983 }
984
985 if (forwrite) {
986 /* check if page is covered */
987 sector_t sector = sh->dev[dd_idx].sector;
988 for (bi=sh->dev[dd_idx].towrite;
989 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
990 bi && bi->bi_sector <= sector;
991 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
992 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
993 sector = bi->bi_sector + (bi->bi_size>>9);
994 }
995 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
996 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
997 }
998 return 1;
999
1000 overlap:
1001 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
1002 spin_unlock_irq(&conf->device_lock);
1003 spin_unlock(&sh->lock);
1004 return 0;
1005}
1006
1007
1008static int page_is_zero(struct page *p)
1009{
1010 char *a = page_address(p);
1011 return ((*(u32*)a) == 0 &&
1012 memcmp(a, a+4, STRIPE_SIZE-4)==0);
1013}
1014/*
1015 * handle_stripe - do things to a stripe.
1016 *
1017 * We lock the stripe and then examine the state of various bits
1018 * to see what needs to be done.
1019 * Possible results:
1020 * return some read request which now have data
1021 * return some write requests which are safely on disc
1022 * schedule a read on some buffers
1023 * schedule a write of some buffers
1024 * return confirmation of parity correctness
1025 *
1026 * Parity calculations are done inside the stripe lock
1027 * buffers are taken off read_list or write_list, and bh_cache buffers
1028 * get BH_Lock set before the stripe lock is released.
1029 *
1030 */
1031
1032static void handle_stripe(struct stripe_head *sh, struct page *tmp_page)
1033{
1034 raid6_conf_t *conf = sh->raid_conf;
1035 int disks = conf->raid_disks;
1036 struct bio *return_bi= NULL;
1037 struct bio *bi;
1038 int i;
1039 int syncing;
1040 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
1041 int non_overwrite = 0;
1042 int failed_num[2] = {0, 0};
1043 struct r5dev *dev, *pdev, *qdev;
1044 int pd_idx = sh->pd_idx;
1045 int qd_idx = raid6_next_disk(pd_idx, disks);
1046 int p_failed, q_failed;
1047
1048 PRINTK("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d, qd_idx=%d\n",
1049 (unsigned long long)sh->sector, sh->state, atomic_read(&sh->count),
1050 pd_idx, qd_idx);
1051
1052 spin_lock(&sh->lock);
1053 clear_bit(STRIPE_HANDLE, &sh->state);
1054 clear_bit(STRIPE_DELAYED, &sh->state);
1055
1056 syncing = test_bit(STRIPE_SYNCING, &sh->state);
1057 /* Now to look around and see what can be done */
1058
1059 rcu_read_lock();
1060 for (i=disks; i--; ) {
1061 mdk_rdev_t *rdev;
1062 dev = &sh->dev[i];
1063 clear_bit(R5_Insync, &dev->flags);
1064
1065 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
1066 i, dev->flags, dev->toread, dev->towrite, dev->written);
1067 /* maybe we can reply to a read */
1068 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
1069 struct bio *rbi, *rbi2;
1070 PRINTK("Return read for disc %d\n", i);
1071 spin_lock_irq(&conf->device_lock);
1072 rbi = dev->toread;
1073 dev->toread = NULL;
1074 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1075 wake_up(&conf->wait_for_overlap);
1076 spin_unlock_irq(&conf->device_lock);
1077 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1078 copy_data(0, rbi, dev->page, dev->sector);
1079 rbi2 = r5_next_bio(rbi, dev->sector);
1080 spin_lock_irq(&conf->device_lock);
1081 if (--rbi->bi_phys_segments == 0) {
1082 rbi->bi_next = return_bi;
1083 return_bi = rbi;
1084 }
1085 spin_unlock_irq(&conf->device_lock);
1086 rbi = rbi2;
1087 }
1088 }
1089
1090 /* now count some things */
1091 if (test_bit(R5_LOCKED, &dev->flags)) locked++;
1092 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
1093
1094
1095 if (dev->toread) to_read++;
1096 if (dev->towrite) {
1097 to_write++;
1098 if (!test_bit(R5_OVERWRITE, &dev->flags))
1099 non_overwrite++;
1100 }
1101 if (dev->written) written++;
1102 rdev = rcu_dereference(conf->disks[i].rdev);
1103 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
1104 /* The ReadError flag will just be confusing now */
1105 clear_bit(R5_ReadError, &dev->flags);
1106 clear_bit(R5_ReWrite, &dev->flags);
1107 }
1108 if (!rdev || !test_bit(In_sync, &rdev->flags)
1109 || test_bit(R5_ReadError, &dev->flags)) {
1110 if ( failed < 2 )
1111 failed_num[failed] = i;
1112 failed++;
1113 } else
1114 set_bit(R5_Insync, &dev->flags);
1115 }
1116 rcu_read_unlock();
1117 PRINTK("locked=%d uptodate=%d to_read=%d"
1118 " to_write=%d failed=%d failed_num=%d,%d\n",
1119 locked, uptodate, to_read, to_write, failed,
1120 failed_num[0], failed_num[1]);
1121 /* check if the array has lost >2 devices and, if so, some requests might
1122 * need to be failed
1123 */
1124 if (failed > 2 && to_read+to_write+written) {
1125 for (i=disks; i--; ) {
1126 int bitmap_end = 0;
1127
1128 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1129 mdk_rdev_t *rdev;
1130 rcu_read_lock();
1131 rdev = rcu_dereference(conf->disks[i].rdev);
1132 if (rdev && test_bit(In_sync, &rdev->flags))
1133 /* multiple read failures in one stripe */
1134 md_error(conf->mddev, rdev);
1135 rcu_read_unlock();
1136 }
1137
1138 spin_lock_irq(&conf->device_lock);
1139 /* fail all writes first */
1140 bi = sh->dev[i].towrite;
1141 sh->dev[i].towrite = NULL;
1142 if (bi) { to_write--; bitmap_end = 1; }
1143
1144 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1145 wake_up(&conf->wait_for_overlap);
1146
1147 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1148 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1149 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1150 if (--bi->bi_phys_segments == 0) {
1151 md_write_end(conf->mddev);
1152 bi->bi_next = return_bi;
1153 return_bi = bi;
1154 }
1155 bi = nextbi;
1156 }
1157 /* and fail all 'written' */
1158 bi = sh->dev[i].written;
1159 sh->dev[i].written = NULL;
1160 if (bi) bitmap_end = 1;
1161 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
1162 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1163 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1164 if (--bi->bi_phys_segments == 0) {
1165 md_write_end(conf->mddev);
1166 bi->bi_next = return_bi;
1167 return_bi = bi;
1168 }
1169 bi = bi2;
1170 }
1171
1172 /* fail any reads if this device is non-operational */
1173 if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
1174 test_bit(R5_ReadError, &sh->dev[i].flags)) {
1175 bi = sh->dev[i].toread;
1176 sh->dev[i].toread = NULL;
1177 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1178 wake_up(&conf->wait_for_overlap);
1179 if (bi) to_read--;
1180 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1181 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1182 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1183 if (--bi->bi_phys_segments == 0) {
1184 bi->bi_next = return_bi;
1185 return_bi = bi;
1186 }
1187 bi = nextbi;
1188 }
1189 }
1190 spin_unlock_irq(&conf->device_lock);
1191 if (bitmap_end)
1192 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1193 STRIPE_SECTORS, 0, 0);
1194 }
1195 }
1196 if (failed > 2 && syncing) {
1197 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
1198 clear_bit(STRIPE_SYNCING, &sh->state);
1199 syncing = 0;
1200 }
1201
1202 /*
1203 * might be able to return some write requests if the parity blocks
1204 * are safe, or on a failed drive
1205 */
1206 pdev = &sh->dev[pd_idx];
1207 p_failed = (failed >= 1 && failed_num[0] == pd_idx)
1208 || (failed >= 2 && failed_num[1] == pd_idx);
1209 qdev = &sh->dev[qd_idx];
1210 q_failed = (failed >= 1 && failed_num[0] == qd_idx)
1211 || (failed >= 2 && failed_num[1] == qd_idx);
1212
1213 if ( written &&
1214 ( p_failed || ((test_bit(R5_Insync, &pdev->flags)
1215 && !test_bit(R5_LOCKED, &pdev->flags)
1216 && test_bit(R5_UPTODATE, &pdev->flags))) ) &&
1217 ( q_failed || ((test_bit(R5_Insync, &qdev->flags)
1218 && !test_bit(R5_LOCKED, &qdev->flags)
1219 && test_bit(R5_UPTODATE, &qdev->flags))) ) ) {
1220 /* any written block on an uptodate or failed drive can be
1221 * returned. Note that if we 'wrote' to a failed drive,
1222 * it will be UPTODATE, but never LOCKED, so we don't need
1223 * to test 'failed' directly.
1224 */
1225 for (i=disks; i--; )
1226 if (sh->dev[i].written) {
1227 dev = &sh->dev[i];
1228 if (!test_bit(R5_LOCKED, &dev->flags) &&
1229 test_bit(R5_UPTODATE, &dev->flags) ) {
1230 /* We can return any write requests */
1231 int bitmap_end = 0;
1232 struct bio *wbi, *wbi2;
1233 PRINTK("Return write for stripe %llu disc %d\n",
1234 (unsigned long long)sh->sector, i);
1235 spin_lock_irq(&conf->device_lock);
1236 wbi = dev->written;
1237 dev->written = NULL;
1238 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1239 wbi2 = r5_next_bio(wbi, dev->sector);
1240 if (--wbi->bi_phys_segments == 0) {
1241 md_write_end(conf->mddev);
1242 wbi->bi_next = return_bi;
1243 return_bi = wbi;
1244 }
1245 wbi = wbi2;
1246 }
1247 if (dev->towrite == NULL)
1248 bitmap_end = 1;
1249 spin_unlock_irq(&conf->device_lock);
1250 if (bitmap_end)
1251 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1252 STRIPE_SECTORS,
1253 !test_bit(STRIPE_DEGRADED, &sh->state), 0);
1254 }
1255 }
1256 }
1257
1258 /* Now we might consider reading some blocks, either to check/generate
1259 * parity, or to satisfy requests
1260 * or to load a block that is being partially written.
1261 */
1262 if (to_read || non_overwrite || (to_write && failed) || (syncing && (uptodate < disks))) {
1263 for (i=disks; i--;) {
1264 dev = &sh->dev[i];
1265 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1266 (dev->toread ||
1267 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
1268 syncing ||
1269 (failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) ||
1270 (failed >= 2 && (sh->dev[failed_num[1]].toread || to_write))
1271 )
1272 ) {
1273 /* we would like to get this block, possibly
1274 * by computing it, but we might not be able to
1275 */
1276 if (uptodate == disks-1) {
1277 PRINTK("Computing stripe %llu block %d\n",
1278 (unsigned long long)sh->sector, i);
1279 compute_block_1(sh, i, 0);
1280 uptodate++;
1281 } else if ( uptodate == disks-2 && failed >= 2 ) {
1282 /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */
1283 int other;
1284 for (other=disks; other--;) {
1285 if ( other == i )
1286 continue;
1287 if ( !test_bit(R5_UPTODATE, &sh->dev[other].flags) )
1288 break;
1289 }
1290 BUG_ON(other < 0);
1291 PRINTK("Computing stripe %llu blocks %d,%d\n",
1292 (unsigned long long)sh->sector, i, other);
1293 compute_block_2(sh, i, other);
1294 uptodate += 2;
1295 } else if (test_bit(R5_Insync, &dev->flags)) {
1296 set_bit(R5_LOCKED, &dev->flags);
1297 set_bit(R5_Wantread, &dev->flags);
1298#if 0
1299 /* if I am just reading this block and we don't have
1300 a failed drive, or any pending writes then sidestep the cache */
1301 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
1302 ! syncing && !failed && !to_write) {
1303 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
1304 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
1305 }
1306#endif
1307 locked++;
1308 PRINTK("Reading block %d (sync=%d)\n",
1309 i, syncing);
1310 }
1311 }
1312 }
1313 set_bit(STRIPE_HANDLE, &sh->state);
1314 }
1315
1316 /* now to consider writing and what else, if anything should be read */
1317 if (to_write) {
1318 int rcw=0, must_compute=0;
1319 for (i=disks ; i--;) {
1320 dev = &sh->dev[i];
1321 /* Would I have to read this buffer for reconstruct_write */
1322 if (!test_bit(R5_OVERWRITE, &dev->flags)
1323 && i != pd_idx && i != qd_idx
1324 && (!test_bit(R5_LOCKED, &dev->flags)
1325#if 0
1326 || sh->bh_page[i] != bh->b_page
1327#endif
1328 ) &&
1329 !test_bit(R5_UPTODATE, &dev->flags)) {
1330 if (test_bit(R5_Insync, &dev->flags)) rcw++;
1331 else {
1332 PRINTK("raid6: must_compute: disk %d flags=%#lx\n", i, dev->flags);
1333 must_compute++;
1334 }
1335 }
1336 }
1337 PRINTK("for sector %llu, rcw=%d, must_compute=%d\n",
1338 (unsigned long long)sh->sector, rcw, must_compute);
1339 set_bit(STRIPE_HANDLE, &sh->state);
1340
1341 if (rcw > 0)
1342 /* want reconstruct write, but need to get some data */
1343 for (i=disks; i--;) {
1344 dev = &sh->dev[i];
1345 if (!test_bit(R5_OVERWRITE, &dev->flags)
1346 && !(failed == 0 && (i == pd_idx || i == qd_idx))
1347 && !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1348 test_bit(R5_Insync, &dev->flags)) {
1349 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1350 {
1351 PRINTK("Read_old stripe %llu block %d for Reconstruct\n",
1352 (unsigned long long)sh->sector, i);
1353 set_bit(R5_LOCKED, &dev->flags);
1354 set_bit(R5_Wantread, &dev->flags);
1355 locked++;
1356 } else {
1357 PRINTK("Request delayed stripe %llu block %d for Reconstruct\n",
1358 (unsigned long long)sh->sector, i);
1359 set_bit(STRIPE_DELAYED, &sh->state);
1360 set_bit(STRIPE_HANDLE, &sh->state);
1361 }
1362 }
1363 }
1364 /* now if nothing is locked, and if we have enough data, we can start a write request */
1365 if (locked == 0 && rcw == 0 &&
1366 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
1367 if ( must_compute > 0 ) {
1368 /* We have failed blocks and need to compute them */
1369 switch ( failed ) {
1370 case 0: BUG();
1371 case 1: compute_block_1(sh, failed_num[0], 0); break;
1372 case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break;
1373 default: BUG(); /* This request should have been failed? */
1374 }
1375 }
1376
1377 PRINTK("Computing parity for stripe %llu\n", (unsigned long long)sh->sector);
1378 compute_parity(sh, RECONSTRUCT_WRITE);
1379 /* now every locked buffer is ready to be written */
1380 for (i=disks; i--;)
1381 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
1382 PRINTK("Writing stripe %llu block %d\n",
1383 (unsigned long long)sh->sector, i);
1384 locked++;
1385 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1386 }
1387 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
1388 set_bit(STRIPE_INSYNC, &sh->state);
1389
1390 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
1391 atomic_dec(&conf->preread_active_stripes);
1392 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
1393 md_wakeup_thread(conf->mddev->thread);
1394 }
1395 }
1396 }
1397
1398 /* maybe we need to check and possibly fix the parity for this stripe
1399 * Any reads will already have been scheduled, so we just see if enough data
1400 * is available
1401 */
1402 if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) {
1403 int update_p = 0, update_q = 0;
1404 struct r5dev *dev;
1405
1406 set_bit(STRIPE_HANDLE, &sh->state);
1407
1408 BUG_ON(failed>2);
1409 BUG_ON(uptodate < disks);
1410 /* Want to check and possibly repair P and Q.
1411 * However there could be one 'failed' device, in which
1412 * case we can only check one of them, possibly using the
1413 * other to generate missing data
1414 */
1415
1416 /* If !tmp_page, we cannot do the calculations,
1417 * but as we have set STRIPE_HANDLE, we will soon be called
1418 * by stripe_handle with a tmp_page - just wait until then.
1419 */
1420 if (tmp_page) {
1421 if (failed == q_failed) {
1422 /* The only possible failed device holds 'Q', so it makes
1423 * sense to check P (If anything else were failed, we would
1424 * have used P to recreate it).
1425 */
1426 compute_block_1(sh, pd_idx, 1);
1427 if (!page_is_zero(sh->dev[pd_idx].page)) {
1428 compute_block_1(sh,pd_idx,0);
1429 update_p = 1;
1430 }
1431 }
1432 if (!q_failed && failed < 2) {
1433 /* q is not failed, and we didn't use it to generate
1434 * anything, so it makes sense to check it
1435 */
1436 memcpy(page_address(tmp_page),
1437 page_address(sh->dev[qd_idx].page),
1438 STRIPE_SIZE);
1439 compute_parity(sh, UPDATE_PARITY);
1440 if (memcmp(page_address(tmp_page),
1441 page_address(sh->dev[qd_idx].page),
1442 STRIPE_SIZE)!= 0) {
1443 clear_bit(STRIPE_INSYNC, &sh->state);
1444 update_q = 1;
1445 }
1446 }
1447 if (update_p || update_q) {
1448 conf->mddev->resync_mismatches += STRIPE_SECTORS;
1449 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1450 /* don't try to repair!! */
1451 update_p = update_q = 0;
1452 }
1453
1454 /* now write out any block on a failed drive,
1455 * or P or Q if they need it
1456 */
1457
1458 if (failed == 2) {
1459 dev = &sh->dev[failed_num[1]];
1460 locked++;
1461 set_bit(R5_LOCKED, &dev->flags);
1462 set_bit(R5_Wantwrite, &dev->flags);
1463 }
1464 if (failed >= 1) {
1465 dev = &sh->dev[failed_num[0]];
1466 locked++;
1467 set_bit(R5_LOCKED, &dev->flags);
1468 set_bit(R5_Wantwrite, &dev->flags);
1469 }
1470
1471 if (update_p) {
1472 dev = &sh->dev[pd_idx];
1473 locked ++;
1474 set_bit(R5_LOCKED, &dev->flags);
1475 set_bit(R5_Wantwrite, &dev->flags);
1476 }
1477 if (update_q) {
1478 dev = &sh->dev[qd_idx];
1479 locked++;
1480 set_bit(R5_LOCKED, &dev->flags);
1481 set_bit(R5_Wantwrite, &dev->flags);
1482 }
1483 clear_bit(STRIPE_DEGRADED, &sh->state);
1484
1485 set_bit(STRIPE_INSYNC, &sh->state);
1486 }
1487 }
1488
1489 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1490 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1491 clear_bit(STRIPE_SYNCING, &sh->state);
1492 }
1493
1494 /* If the failed drives are just a ReadError, then we might need
1495 * to progress the repair/check process
1496 */
1497 if (failed <= 2 && ! conf->mddev->ro)
1498 for (i=0; i<failed;i++) {
1499 dev = &sh->dev[failed_num[i]];
1500 if (test_bit(R5_ReadError, &dev->flags)
1501 && !test_bit(R5_LOCKED, &dev->flags)
1502 && test_bit(R5_UPTODATE, &dev->flags)
1503 ) {
1504 if (!test_bit(R5_ReWrite, &dev->flags)) {
1505 set_bit(R5_Wantwrite, &dev->flags);
1506 set_bit(R5_ReWrite, &dev->flags);
1507 set_bit(R5_LOCKED, &dev->flags);
1508 } else {
1509 /* let's read it back */
1510 set_bit(R5_Wantread, &dev->flags);
1511 set_bit(R5_LOCKED, &dev->flags);
1512 }
1513 }
1514 }
1515 spin_unlock(&sh->lock);
1516
1517 while ((bi=return_bi)) {
1518 int bytes = bi->bi_size;
1519
1520 return_bi = bi->bi_next;
1521 bi->bi_next = NULL;
1522 bi->bi_size = 0;
1523 bi->bi_end_io(bi, bytes, 0);
1524 }
1525 for (i=disks; i-- ;) {
1526 int rw;
1527 struct bio *bi;
1528 mdk_rdev_t *rdev;
1529 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
1530 rw = 1;
1531 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1532 rw = 0;
1533 else
1534 continue;
1535
1536 bi = &sh->dev[i].req;
1537
1538 bi->bi_rw = rw;
1539 if (rw)
1540 bi->bi_end_io = raid6_end_write_request;
1541 else
1542 bi->bi_end_io = raid6_end_read_request;
1543
1544 rcu_read_lock();
1545 rdev = rcu_dereference(conf->disks[i].rdev);
1546 if (rdev && test_bit(Faulty, &rdev->flags))
1547 rdev = NULL;
1548 if (rdev)
1549 atomic_inc(&rdev->nr_pending);
1550 rcu_read_unlock();
1551
1552 if (rdev) {
1553 if (syncing)
1554 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1555
1556 bi->bi_bdev = rdev->bdev;
1557 PRINTK("for %llu schedule op %ld on disc %d\n",
1558 (unsigned long long)sh->sector, bi->bi_rw, i);
1559 atomic_inc(&sh->count);
1560 bi->bi_sector = sh->sector + rdev->data_offset;
1561 bi->bi_flags = 1 << BIO_UPTODATE;
1562 bi->bi_vcnt = 1;
1563 bi->bi_max_vecs = 1;
1564 bi->bi_idx = 0;
1565 bi->bi_io_vec = &sh->dev[i].vec;
1566 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1567 bi->bi_io_vec[0].bv_offset = 0;
1568 bi->bi_size = STRIPE_SIZE;
1569 bi->bi_next = NULL;
1570 if (rw == WRITE &&
1571 test_bit(R5_ReWrite, &sh->dev[i].flags))
1572 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1573 generic_make_request(bi);
1574 } else {
1575 if (rw == 1)
1576 set_bit(STRIPE_DEGRADED, &sh->state);
1577 PRINTK("skip op %ld on disc %d for sector %llu\n",
1578 bi->bi_rw, i, (unsigned long long)sh->sector);
1579 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1580 set_bit(STRIPE_HANDLE, &sh->state);
1581 }
1582 }
1583}
1584
1585static void raid6_activate_delayed(raid6_conf_t *conf)
1586{
1587 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
1588 while (!list_empty(&conf->delayed_list)) {
1589 struct list_head *l = conf->delayed_list.next;
1590 struct stripe_head *sh;
1591 sh = list_entry(l, struct stripe_head, lru);
1592 list_del_init(l);
1593 clear_bit(STRIPE_DELAYED, &sh->state);
1594 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1595 atomic_inc(&conf->preread_active_stripes);
1596 list_add_tail(&sh->lru, &conf->handle_list);
1597 }
1598 }
1599}
1600
1601static void activate_bit_delay(raid6_conf_t *conf)
1602{
1603 /* device_lock is held */
1604 struct list_head head;
1605 list_add(&head, &conf->bitmap_list);
1606 list_del_init(&conf->bitmap_list);
1607 while (!list_empty(&head)) {
1608 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
1609 list_del_init(&sh->lru);
1610 atomic_inc(&sh->count);
1611 __release_stripe(conf, sh);
1612 }
1613}
1614
1615static void unplug_slaves(mddev_t *mddev)
1616{
1617 raid6_conf_t *conf = mddev_to_conf(mddev);
1618 int i;
1619
1620 rcu_read_lock();
1621 for (i=0; i<mddev->raid_disks; i++) {
1622 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
1623 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
1624 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
1625
1626 atomic_inc(&rdev->nr_pending);
1627 rcu_read_unlock();
1628
1629 if (r_queue->unplug_fn)
1630 r_queue->unplug_fn(r_queue);
1631
1632 rdev_dec_pending(rdev, mddev);
1633 rcu_read_lock();
1634 }
1635 }
1636 rcu_read_unlock();
1637}
1638
1639static void raid6_unplug_device(request_queue_t *q)
1640{
1641 mddev_t *mddev = q->queuedata;
1642 raid6_conf_t *conf = mddev_to_conf(mddev);
1643 unsigned long flags;
1644
1645 spin_lock_irqsave(&conf->device_lock, flags);
1646
1647 if (blk_remove_plug(q)) {
1648 conf->seq_flush++;
1649 raid6_activate_delayed(conf);
1650 }
1651 md_wakeup_thread(mddev->thread);
1652
1653 spin_unlock_irqrestore(&conf->device_lock, flags);
1654
1655 unplug_slaves(mddev);
1656}
1657
1658static int raid6_issue_flush(request_queue_t *q, struct gendisk *disk,
1659 sector_t *error_sector)
1660{
1661 mddev_t *mddev = q->queuedata;
1662 raid6_conf_t *conf = mddev_to_conf(mddev);
1663 int i, ret = 0;
1664
1665 rcu_read_lock();
1666 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
1667 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
1668 if (rdev && !test_bit(Faulty, &rdev->flags)) {
1669 struct block_device *bdev = rdev->bdev;
1670 request_queue_t *r_queue = bdev_get_queue(bdev);
1671
1672 if (!r_queue->issue_flush_fn)
1673 ret = -EOPNOTSUPP;
1674 else {
1675 atomic_inc(&rdev->nr_pending);
1676 rcu_read_unlock();
1677 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
1678 error_sector);
1679 rdev_dec_pending(rdev, mddev);
1680 rcu_read_lock();
1681 }
1682 }
1683 }
1684 rcu_read_unlock();
1685 return ret;
1686}
1687
1688static inline void raid6_plug_device(raid6_conf_t *conf)
1689{
1690 spin_lock_irq(&conf->device_lock);
1691 blk_plug_device(conf->mddev->queue);
1692 spin_unlock_irq(&conf->device_lock);
1693}
1694
1695static int make_request (request_queue_t *q, struct bio * bi)
1696{
1697 mddev_t *mddev = q->queuedata;
1698 raid6_conf_t *conf = mddev_to_conf(mddev);
1699 const unsigned int raid_disks = conf->raid_disks;
1700 const unsigned int data_disks = raid_disks - 2;
1701 unsigned int dd_idx, pd_idx;
1702 sector_t new_sector;
1703 sector_t logical_sector, last_sector;
1704 struct stripe_head *sh;
1705 const int rw = bio_data_dir(bi);
1706
1707 if (unlikely(bio_barrier(bi))) {
1708 bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
1709 return 0;
1710 }
1711
1712 md_write_start(mddev, bi);
1713
1714 disk_stat_inc(mddev->gendisk, ios[rw]);
1715 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
1716
1717 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
1718 last_sector = bi->bi_sector + (bi->bi_size>>9);
1719
1720 bi->bi_next = NULL;
1721 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
1722
1723 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
1724 DEFINE_WAIT(w);
1725
1726 new_sector = raid6_compute_sector(logical_sector,
1727 raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1728
1729 PRINTK("raid6: make_request, sector %llu logical %llu\n",
1730 (unsigned long long)new_sector,
1731 (unsigned long long)logical_sector);
1732
1733 retry:
1734 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
1735 sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK));
1736 if (sh) {
1737 if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
1738 /* Add failed due to overlap. Flush everything
1739 * and wait a while
1740 */
1741 raid6_unplug_device(mddev->queue);
1742 release_stripe(sh);
1743 schedule();
1744 goto retry;
1745 }
1746 finish_wait(&conf->wait_for_overlap, &w);
1747 raid6_plug_device(conf);
1748 handle_stripe(sh, NULL);
1749 release_stripe(sh);
1750 } else {
1751 /* cannot get stripe for read-ahead, just give-up */
1752 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1753 finish_wait(&conf->wait_for_overlap, &w);
1754 break;
1755 }
1756
1757 }
1758 spin_lock_irq(&conf->device_lock);
1759 if (--bi->bi_phys_segments == 0) {
1760 int bytes = bi->bi_size;
1761
1762 if (rw == WRITE )
1763 md_write_end(mddev);
1764 bi->bi_size = 0;
1765 bi->bi_end_io(bi, bytes, 0);
1766 }
1767 spin_unlock_irq(&conf->device_lock);
1768 return 0;
1769}
1770
1771/* FIXME go_faster isn't used */
1772static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1773{
1774 raid6_conf_t *conf = (raid6_conf_t *) mddev->private;
1775 struct stripe_head *sh;
1776 int sectors_per_chunk = conf->chunk_size >> 9;
1777 sector_t x;
1778 unsigned long stripe;
1779 int chunk_offset;
1780 int dd_idx, pd_idx;
1781 sector_t first_sector;
1782 int raid_disks = conf->raid_disks;
1783 int data_disks = raid_disks - 2;
1784 sector_t max_sector = mddev->size << 1;
1785 int sync_blocks;
1786 int still_degraded = 0;
1787 int i;
1788
1789 if (sector_nr >= max_sector) {
1790 /* just being told to finish up .. nothing much to do */
1791 unplug_slaves(mddev);
1792
1793 if (mddev->curr_resync < max_sector) /* aborted */
1794 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1795 &sync_blocks, 1);
1796 else /* completed sync */
1797 conf->fullsync = 0;
1798 bitmap_close_sync(mddev->bitmap);
1799
1800 return 0;
1801 }
1802 /* if there are 2 or more failed drives and we are trying
1803 * to resync, then assert that we are finished, because there is
1804 * nothing we can do.
1805 */
1806 if (mddev->degraded >= 2 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1807 sector_t rv = (mddev->size << 1) - sector_nr;
1808 *skipped = 1;
1809 return rv;
1810 }
1811 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1812 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1813 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
1814 /* we can skip this block, and probably more */
1815 sync_blocks /= STRIPE_SECTORS;
1816 *skipped = 1;
1817 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
1818 }
1819
1820 x = sector_nr;
1821 chunk_offset = sector_div(x, sectors_per_chunk);
1822 stripe = x;
1823 BUG_ON(x != stripe);
1824
1825 first_sector = raid6_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
1826 + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1827 sh = get_active_stripe(conf, sector_nr, pd_idx, 1);
1828 if (sh == NULL) {
1829 sh = get_active_stripe(conf, sector_nr, pd_idx, 0);
1830 /* make sure we don't swamp the stripe cache if someone else
1831 * is trying to get access
1832 */
1833 schedule_timeout_uninterruptible(1);
1834 }
1835 /* Need to check if array will still be degraded after recovery/resync
1836 * We don't need to check the 'failed' flag as when that gets set,
1837 * recovery aborts.
1838 */
1839 for (i=0; i<mddev->raid_disks; i++)
1840 if (conf->disks[i].rdev == NULL)
1841 still_degraded = 1;
1842
1843 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
1844
1845 spin_lock(&sh->lock);
1846 set_bit(STRIPE_SYNCING, &sh->state);
1847 clear_bit(STRIPE_INSYNC, &sh->state);
1848 spin_unlock(&sh->lock);
1849
1850 handle_stripe(sh, NULL);
1851 release_stripe(sh);
1852
1853 return STRIPE_SECTORS;
1854}
1855
1856/*
1857 * This is our raid6 kernel thread.
1858 *
1859 * We scan the hash table for stripes which can be handled now.
1860 * During the scan, completed stripes are saved for us by the interrupt
1861 * handler, so that they will not have to wait for our next wakeup.
1862 */
1863static void raid6d (mddev_t *mddev)
1864{
1865 struct stripe_head *sh;
1866 raid6_conf_t *conf = mddev_to_conf(mddev);
1867 int handled;
1868
1869 PRINTK("+++ raid6d active\n");
1870
1871 md_check_recovery(mddev);
1872
1873 handled = 0;
1874 spin_lock_irq(&conf->device_lock);
1875 while (1) {
1876 struct list_head *first;
1877
1878 if (conf->seq_flush - conf->seq_write > 0) {
1879 int seq = conf->seq_flush;
1880 spin_unlock_irq(&conf->device_lock);
1881 bitmap_unplug(mddev->bitmap);
1882 spin_lock_irq(&conf->device_lock);
1883 conf->seq_write = seq;
1884 activate_bit_delay(conf);
1885 }
1886
1887 if (list_empty(&conf->handle_list) &&
1888 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
1889 !blk_queue_plugged(mddev->queue) &&
1890 !list_empty(&conf->delayed_list))
1891 raid6_activate_delayed(conf);
1892
1893 if (list_empty(&conf->handle_list))
1894 break;
1895
1896 first = conf->handle_list.next;
1897 sh = list_entry(first, struct stripe_head, lru);
1898
1899 list_del_init(first);
1900 atomic_inc(&sh->count);
1901 BUG_ON(atomic_read(&sh->count)!= 1);
1902 spin_unlock_irq(&conf->device_lock);
1903
1904 handled++;
1905 handle_stripe(sh, conf->spare_page);
1906 release_stripe(sh);
1907
1908 spin_lock_irq(&conf->device_lock);
1909 }
1910 PRINTK("%d stripes handled\n", handled);
1911
1912 spin_unlock_irq(&conf->device_lock);
1913
1914 unplug_slaves(mddev);
1915
1916 PRINTK("--- raid6d inactive\n");
1917}
1918
1919static ssize_t
1920raid6_show_stripe_cache_size(mddev_t *mddev, char *page)
1921{
1922 raid6_conf_t *conf = mddev_to_conf(mddev);
1923 if (conf)
1924 return sprintf(page, "%d\n", conf->max_nr_stripes);
1925 else
1926 return 0;
1927}
1928
1929static ssize_t
1930raid6_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
1931{
1932 raid6_conf_t *conf = mddev_to_conf(mddev);
1933 char *end;
1934 int new;
1935 if (len >= PAGE_SIZE)
1936 return -EINVAL;
1937 if (!conf)
1938 return -ENODEV;
1939
1940 new = simple_strtoul(page, &end, 10);
1941 if (!*page || (*end && *end != '\n') )
1942 return -EINVAL;
1943 if (new <= 16 || new > 32768)
1944 return -EINVAL;
1945 while (new < conf->max_nr_stripes) {
1946 if (drop_one_stripe(conf))
1947 conf->max_nr_stripes--;
1948 else
1949 break;
1950 }
1951 while (new > conf->max_nr_stripes) {
1952 if (grow_one_stripe(conf))
1953 conf->max_nr_stripes++;
1954 else break;
1955 }
1956 return len;
1957}
1958
1959static struct md_sysfs_entry
1960raid6_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
1961 raid6_show_stripe_cache_size,
1962 raid6_store_stripe_cache_size);
1963
1964static ssize_t
1965stripe_cache_active_show(mddev_t *mddev, char *page)
1966{
1967 raid6_conf_t *conf = mddev_to_conf(mddev);
1968 if (conf)
1969 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
1970 else
1971 return 0;
1972}
1973
1974static struct md_sysfs_entry
1975raid6_stripecache_active = __ATTR_RO(stripe_cache_active);
1976
1977static struct attribute *raid6_attrs[] = {
1978 &raid6_stripecache_size.attr,
1979 &raid6_stripecache_active.attr,
1980 NULL,
1981};
1982static struct attribute_group raid6_attrs_group = {
1983 .name = NULL,
1984 .attrs = raid6_attrs,
1985};
1986
1987static int run(mddev_t *mddev)
1988{
1989 raid6_conf_t *conf;
1990 int raid_disk, memory;
1991 mdk_rdev_t *rdev;
1992 struct disk_info *disk;
1993 struct list_head *tmp;
1994
1995 if (mddev->level != 6) {
1996 PRINTK("raid6: %s: raid level not set to 6 (%d)\n", mdname(mddev), mddev->level);
1997 return -EIO;
1998 }
1999
2000 mddev->private = kzalloc(sizeof (raid6_conf_t), GFP_KERNEL);
2001 if ((conf = mddev->private) == NULL)
2002 goto abort;
2003 conf->disks = kzalloc(mddev->raid_disks * sizeof(struct disk_info),
2004 GFP_KERNEL);
2005 if (!conf->disks)
2006 goto abort;
2007
2008 conf->mddev = mddev;
2009
2010 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
2011 goto abort;
2012
2013 conf->spare_page = alloc_page(GFP_KERNEL);
2014 if (!conf->spare_page)
2015 goto abort;
2016
2017 spin_lock_init(&conf->device_lock);
2018 init_waitqueue_head(&conf->wait_for_stripe);
2019 init_waitqueue_head(&conf->wait_for_overlap);
2020 INIT_LIST_HEAD(&conf->handle_list);
2021 INIT_LIST_HEAD(&conf->delayed_list);
2022 INIT_LIST_HEAD(&conf->bitmap_list);
2023 INIT_LIST_HEAD(&conf->inactive_list);
2024 atomic_set(&conf->active_stripes, 0);
2025 atomic_set(&conf->preread_active_stripes, 0);
2026
2027 PRINTK("raid6: run(%s) called.\n", mdname(mddev));
2028
2029 ITERATE_RDEV(mddev,rdev,tmp) {
2030 raid_disk = rdev->raid_disk;
2031 if (raid_disk >= mddev->raid_disks
2032 || raid_disk < 0)
2033 continue;
2034 disk = conf->disks + raid_disk;
2035
2036 disk->rdev = rdev;
2037
2038 if (test_bit(In_sync, &rdev->flags)) {
2039 char b[BDEVNAME_SIZE];
2040 printk(KERN_INFO "raid6: device %s operational as raid"
2041 " disk %d\n", bdevname(rdev->bdev,b),
2042 raid_disk);
2043 conf->working_disks++;
2044 }
2045 }
2046
2047 conf->raid_disks = mddev->raid_disks;
2048
2049 /*
2050 * 0 for a fully functional array, 1 or 2 for a degraded array.
2051 */
2052 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
2053 conf->mddev = mddev;
2054 conf->chunk_size = mddev->chunk_size;
2055 conf->level = mddev->level;
2056 conf->algorithm = mddev->layout;
2057 conf->max_nr_stripes = NR_STRIPES;
2058
2059 /* device size must be a multiple of chunk size */
2060 mddev->size &= ~(mddev->chunk_size/1024 -1);
2061 mddev->resync_max_sectors = mddev->size << 1;
2062
2063 if (conf->raid_disks < 4) {
2064 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
2065 mdname(mddev), conf->raid_disks);
2066 goto abort;
2067 }
2068 if (!conf->chunk_size || conf->chunk_size % 4) {
2069 printk(KERN_ERR "raid6: invalid chunk size %d for %s\n",
2070 conf->chunk_size, mdname(mddev));
2071 goto abort;
2072 }
2073 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
2074 printk(KERN_ERR
2075 "raid6: unsupported parity algorithm %d for %s\n",
2076 conf->algorithm, mdname(mddev));
2077 goto abort;
2078 }
2079 if (mddev->degraded > 2) {
2080 printk(KERN_ERR "raid6: not enough operational devices for %s"
2081 " (%d/%d failed)\n",
2082 mdname(mddev), conf->failed_disks, conf->raid_disks);
2083 goto abort;
2084 }
2085
2086 if (mddev->degraded > 0 &&
2087 mddev->recovery_cp != MaxSector) {
2088 if (mddev->ok_start_degraded)
2089 printk(KERN_WARNING "raid6: starting dirty degraded array:%s"
2090 "- data corruption possible.\n",
2091 mdname(mddev));
2092 else {
2093 printk(KERN_ERR "raid6: cannot start dirty degraded array"
2094 " for %s\n", mdname(mddev));
2095 goto abort;
2096 }
2097 }
2098
2099 {
2100 mddev->thread = md_register_thread(raid6d, mddev, "%s_raid6");
2101 if (!mddev->thread) {
2102 printk(KERN_ERR
2103 "raid6: couldn't allocate thread for %s\n",
2104 mdname(mddev));
2105 goto abort;
2106 }
2107 }
2108
2109 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
2110 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
2111 if (grow_stripes(conf, conf->max_nr_stripes)) {
2112 printk(KERN_ERR
2113 "raid6: couldn't allocate %dkB for buffers\n", memory);
2114 shrink_stripes(conf);
2115 md_unregister_thread(mddev->thread);
2116 goto abort;
2117 } else
2118 printk(KERN_INFO "raid6: allocated %dkB for %s\n",
2119 memory, mdname(mddev));
2120
2121 if (mddev->degraded == 0)
2122 printk(KERN_INFO "raid6: raid level %d set %s active with %d out of %d"
2123 " devices, algorithm %d\n", conf->level, mdname(mddev),
2124 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
2125 conf->algorithm);
2126 else
2127 printk(KERN_ALERT "raid6: raid level %d set %s active with %d"
2128 " out of %d devices, algorithm %d\n", conf->level,
2129 mdname(mddev), mddev->raid_disks - mddev->degraded,
2130 mddev->raid_disks, conf->algorithm);
2131
2132 print_raid6_conf(conf);
2133
2134 /* read-ahead size must cover two whole stripes, which is
2135 * 2 * (n-2) * chunksize where 'n' is the number of raid devices
2136 */
2137 {
2138 int stripe = (mddev->raid_disks-2) * mddev->chunk_size
2139 / PAGE_SIZE;
2140 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
2141 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
2142 }
2143
2144 /* Ok, everything is just fine now */
2145 sysfs_create_group(&mddev->kobj, &raid6_attrs_group);
2146
2147 mddev->array_size = mddev->size * (mddev->raid_disks - 2);
2148
2149 mddev->queue->unplug_fn = raid6_unplug_device;
2150 mddev->queue->issue_flush_fn = raid6_issue_flush;
2151 return 0;
2152abort:
2153 if (conf) {
2154 print_raid6_conf(conf);
2155 safe_put_page(conf->spare_page);
2156 kfree(conf->stripe_hashtbl);
2157 kfree(conf->disks);
2158 kfree(conf);
2159 }
2160 mddev->private = NULL;
2161 printk(KERN_ALERT "raid6: failed to run raid set %s\n", mdname(mddev));
2162 return -EIO;
2163}
2164
2165
2166
2167static int stop (mddev_t *mddev)
2168{
2169 raid6_conf_t *conf = (raid6_conf_t *) mddev->private;
2170
2171 md_unregister_thread(mddev->thread);
2172 mddev->thread = NULL;
2173 shrink_stripes(conf);
2174 kfree(conf->stripe_hashtbl);
2175 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2176 sysfs_remove_group(&mddev->kobj, &raid6_attrs_group);
2177 kfree(conf);
2178 mddev->private = NULL;
2179 return 0;
2180}
2181
2182#if RAID6_DUMPSTATE
2183static void print_sh (struct seq_file *seq, struct stripe_head *sh)
2184{
2185 int i;
2186
2187 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
2188 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
2189 seq_printf(seq, "sh %llu, count %d.\n",
2190 (unsigned long long)sh->sector, atomic_read(&sh->count));
2191 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
2192 for (i = 0; i < sh->raid_conf->raid_disks; i++) {
2193 seq_printf(seq, "(cache%d: %p %ld) ",
2194 i, sh->dev[i].page, sh->dev[i].flags);
2195 }
2196 seq_printf(seq, "\n");
2197}
2198
2199static void printall (struct seq_file *seq, raid6_conf_t *conf)
2200{
2201 struct stripe_head *sh;
2202 struct hlist_node *hn;
2203 int i;
2204
2205 spin_lock_irq(&conf->device_lock);
2206 for (i = 0; i < NR_HASH; i++) {
2207 sh = conf->stripe_hashtbl[i];
2208 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
2209 if (sh->raid_conf != conf)
2210 continue;
2211 print_sh(seq, sh);
2212 }
2213 }
2214 spin_unlock_irq(&conf->device_lock);
2215}
2216#endif
2217
2218static void status (struct seq_file *seq, mddev_t *mddev)
2219{
2220 raid6_conf_t *conf = (raid6_conf_t *) mddev->private;
2221 int i;
2222
2223 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
2224 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks);
2225 for (i = 0; i < conf->raid_disks; i++)
2226 seq_printf (seq, "%s",
2227 conf->disks[i].rdev &&
2228 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
2229 seq_printf (seq, "]");
2230#if RAID6_DUMPSTATE
2231 seq_printf (seq, "\n");
2232 printall(seq, conf);
2233#endif
2234}
2235
2236static void print_raid6_conf (raid6_conf_t *conf)
2237{
2238 int i;
2239 struct disk_info *tmp;
2240
2241 printk("RAID6 conf printout:\n");
2242 if (!conf) {
2243 printk("(conf==NULL)\n");
2244 return;
2245 }
2246 printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks,
2247 conf->working_disks, conf->failed_disks);
2248
2249 for (i = 0; i < conf->raid_disks; i++) {
2250 char b[BDEVNAME_SIZE];
2251 tmp = conf->disks + i;
2252 if (tmp->rdev)
2253 printk(" disk %d, o:%d, dev:%s\n",
2254 i, !test_bit(Faulty, &tmp->rdev->flags),
2255 bdevname(tmp->rdev->bdev,b));
2256 }
2257}
2258
2259static int raid6_spare_active(mddev_t *mddev)
2260{
2261 int i;
2262 raid6_conf_t *conf = mddev->private;
2263 struct disk_info *tmp;
2264
2265 for (i = 0; i < conf->raid_disks; i++) {
2266 tmp = conf->disks + i;
2267 if (tmp->rdev
2268 && !test_bit(Faulty, &tmp->rdev->flags)
2269 && !test_bit(In_sync, &tmp->rdev->flags)) {
2270 mddev->degraded--;
2271 conf->failed_disks--;
2272 conf->working_disks++;
2273 set_bit(In_sync, &tmp->rdev->flags);
2274 }
2275 }
2276 print_raid6_conf(conf);
2277 return 0;
2278}
2279
2280static int raid6_remove_disk(mddev_t *mddev, int number)
2281{
2282 raid6_conf_t *conf = mddev->private;
2283 int err = 0;
2284 mdk_rdev_t *rdev;
2285 struct disk_info *p = conf->disks + number;
2286
2287 print_raid6_conf(conf);
2288 rdev = p->rdev;
2289 if (rdev) {
2290 if (test_bit(In_sync, &rdev->flags) ||
2291 atomic_read(&rdev->nr_pending)) {
2292 err = -EBUSY;
2293 goto abort;
2294 }
2295 p->rdev = NULL;
2296 synchronize_rcu();
2297 if (atomic_read(&rdev->nr_pending)) {
2298 /* lost the race, try later */
2299 err = -EBUSY;
2300 p->rdev = rdev;
2301 }
2302 }
2303
2304abort:
2305
2306 print_raid6_conf(conf);
2307 return err;
2308}
2309
2310static int raid6_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2311{
2312 raid6_conf_t *conf = mddev->private;
2313 int found = 0;
2314 int disk;
2315 struct disk_info *p;
2316
2317 if (mddev->degraded > 2)
2318 /* no point adding a device */
2319 return 0;
2320 /*
2321 * find the disk ... but prefer rdev->saved_raid_disk
2322 * if possible.
2323 */
2324 if (rdev->saved_raid_disk >= 0 &&
2325 conf->disks[rdev->saved_raid_disk].rdev == NULL)
2326 disk = rdev->saved_raid_disk;
2327 else
2328 disk = 0;
2329 for ( ; disk < mddev->raid_disks; disk++)
2330 if ((p=conf->disks + disk)->rdev == NULL) {
2331 clear_bit(In_sync, &rdev->flags);
2332 rdev->raid_disk = disk;
2333 found = 1;
2334 if (rdev->saved_raid_disk != disk)
2335 conf->fullsync = 1;
2336 rcu_assign_pointer(p->rdev, rdev);
2337 break;
2338 }
2339 print_raid6_conf(conf);
2340 return found;
2341}
2342
2343static int raid6_resize(mddev_t *mddev, sector_t sectors)
2344{
2345 /* no resync is happening, and there is enough space
2346 * on all devices, so we can resize.
2347 * We need to make sure resync covers any new space.
2348 * If the array is shrinking we should possibly wait until
2349 * any io in the removed space completes, but it hardly seems
2350 * worth it.
2351 */
2352 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
2353 mddev->array_size = (sectors * (mddev->raid_disks-2))>>1;
2354 set_capacity(mddev->gendisk, mddev->array_size << 1);
2355 mddev->changed = 1;
2356 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
2357 mddev->recovery_cp = mddev->size << 1;
2358 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2359 }
2360 mddev->size = sectors /2;
2361 mddev->resync_max_sectors = sectors;
2362 return 0;
2363}
2364
2365static void raid6_quiesce(mddev_t *mddev, int state)
2366{
2367 raid6_conf_t *conf = mddev_to_conf(mddev);
2368
2369 switch(state) {
2370 case 1: /* stop all writes */
2371 spin_lock_irq(&conf->device_lock);
2372 conf->quiesce = 1;
2373 wait_event_lock_irq(conf->wait_for_stripe,
2374 atomic_read(&conf->active_stripes) == 0,
2375 conf->device_lock, /* nothing */);
2376 spin_unlock_irq(&conf->device_lock);
2377 break;
2378
2379 case 0: /* re-enable writes */
2380 spin_lock_irq(&conf->device_lock);
2381 conf->quiesce = 0;
2382 wake_up(&conf->wait_for_stripe);
2383 spin_unlock_irq(&conf->device_lock);
2384 break;
2385 }
2386}
2387
2388static struct mdk_personality raid6_personality =
2389{
2390 .name = "raid6",
2391 .level = 6,
2392 .owner = THIS_MODULE,
2393 .make_request = make_request,
2394 .run = run,
2395 .stop = stop,
2396 .status = status,
2397 .error_handler = error,
2398 .hot_add_disk = raid6_add_disk,
2399 .hot_remove_disk= raid6_remove_disk,
2400 .spare_active = raid6_spare_active,
2401 .sync_request = sync_request,
2402 .resize = raid6_resize,
2403 .quiesce = raid6_quiesce,
2404};
2405
2406static int __init raid6_init(void)
2407{
2408 int e;
2409
2410 e = raid6_select_algo();
2411 if ( e )
2412 return e;
2413
2414 return register_md_personality(&raid6_personality);
2415}
2416
2417static void raid6_exit (void)
2418{
2419 unregister_md_personality(&raid6_personality);
2420}
2421
2422module_init(raid6_init);
2423module_exit(raid6_exit);
2424MODULE_LICENSE("GPL");
2425MODULE_ALIAS("md-personality-8"); /* RAID6 */
2426MODULE_ALIAS("md-raid6");
2427MODULE_ALIAS("md-level-6");
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 583d151b74..ef52e6da01 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -82,9 +82,6 @@ config VIDEO_IR
82config VIDEO_TVEEPROM 82config VIDEO_TVEEPROM
83 tristate 83 tristate
84 84
85config VIDEO_CX2341X
86 tristate
87
88config USB_DABUSB 85config USB_DABUSB
89 tristate "DABUSB driver" 86 tristate "DABUSB driver"
90 depends on USB 87 depends on USB
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 3152a54a25..5e8bb41a08 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -556,22 +556,23 @@ static int dvb_frontend_thread(void *data)
556 } 556 }
557 557
558 /* do an iteration of the tuning loop */ 558 /* do an iteration of the tuning loop */
559 if (fe->ops.get_frontend_algo(fe) == FE_ALGO_HW) { 559 if (fe->ops.get_frontend_algo) {
560 /* have we been asked to retune? */ 560 if (fe->ops.get_frontend_algo(fe) == FE_ALGO_HW) {
561 params = NULL; 561 /* have we been asked to retune? */
562 if (fepriv->state & FESTATE_RETUNE) { 562 params = NULL;
563 params = &fepriv->parameters; 563 if (fepriv->state & FESTATE_RETUNE) {
564 fepriv->state = FESTATE_TUNED; 564 params = &fepriv->parameters;
565 } 565 fepriv->state = FESTATE_TUNED;
566 }
566 567
567 fe->ops.tune(fe, params, fepriv->tune_mode_flags, &fepriv->delay, &s); 568 fe->ops.tune(fe, params, fepriv->tune_mode_flags, &fepriv->delay, &s);
568 if (s != fepriv->status) { 569 if (s != fepriv->status) {
569 dvb_frontend_add_event(fe, s); 570 dvb_frontend_add_event(fe, s);
570 fepriv->status = s; 571 fepriv->status = s;
572 }
571 } 573 }
572 } else { 574 } else
573 dvb_frontend_swzigzag(fe); 575 dvb_frontend_swzigzag(fe);
574 }
575 } 576 }
576 577
577 if (dvb_shutdown_timeout) { 578 if (dvb_shutdown_timeout) {
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index 8832f80c05..7a5c99c200 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -152,13 +152,9 @@ static void init_av7110_av(struct av7110 *av7110)
152 /* remaining inits according to card and frontend type */ 152 /* remaining inits according to card and frontend type */
153 av7110->analog_tuner_flags = 0; 153 av7110->analog_tuner_flags = 0;
154 av7110->current_input = 0; 154 av7110->current_input = 0;
155 if (dev->pci->subsystem_vendor == 0x13c2 && dev->pci->subsystem_device == 0x000a) { 155 if (dev->pci->subsystem_vendor == 0x13c2 && dev->pci->subsystem_device == 0x000a)
156 printk("dvb-ttpci: MSP3415 audio DAC @ card %d\n",
157 av7110->dvb_adapter.num);
158 av7110->adac_type = DVB_ADAC_MSP34x5;
159 av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, ADSwitch, 1, 0); // SPDIF on 156 av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, ADSwitch, 1, 0); // SPDIF on
160 } 157 if (i2c_writereg(av7110, 0x20, 0x00, 0x00) == 1) {
161 else if (i2c_writereg(av7110, 0x20, 0x00, 0x00) == 1) {
162 printk ("dvb-ttpci: Crystal audio DAC @ card %d detected\n", 158 printk ("dvb-ttpci: Crystal audio DAC @ card %d detected\n",
163 av7110->dvb_adapter.num); 159 av7110->dvb_adapter.num);
164 av7110->adac_type = DVB_ADAC_CRYSTAL; 160 av7110->adac_type = DVB_ADAC_CRYSTAL;
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index 2eff09f638..0f3a044aeb 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -318,7 +318,17 @@ int av7110_set_volume(struct av7110 *av7110, int volleft, int volright)
318 msp_writereg(av7110, MSP_WR_DSP, 0x0000, val); /* loudspeaker */ 318 msp_writereg(av7110, MSP_WR_DSP, 0x0000, val); /* loudspeaker */
319 msp_writereg(av7110, MSP_WR_DSP, 0x0006, val); /* headphonesr */ 319 msp_writereg(av7110, MSP_WR_DSP, 0x0006, val); /* headphonesr */
320 return 0; 320 return 0;
321
322 case DVB_ADAC_MSP34x5:
323 vol = (volleft > volright) ? volleft : volright;
324 val = (vol * 0x73 / 255) << 8;
325 if (vol > 0)
326 balance = ((volright - volleft) * 127) / vol;
327 msp_writereg(av7110, MSP_WR_DSP, 0x0001, balance << 8);
328 msp_writereg(av7110, MSP_WR_DSP, 0x0000, val); /* loudspeaker */
329 return 0;
321 } 330 }
331
322 return 0; 332 return 0;
323} 333}
324 334
@@ -1267,23 +1277,32 @@ static int dvb_audio_ioctl(struct inode *inode, struct file *file,
1267 switch(av7110->audiostate.channel_select) { 1277 switch(av7110->audiostate.channel_select) {
1268 case AUDIO_STEREO: 1278 case AUDIO_STEREO:
1269 ret = audcom(av7110, AUDIO_CMD_STEREO); 1279 ret = audcom(av7110, AUDIO_CMD_STEREO);
1270 if (!ret) 1280 if (!ret) {
1271 if (av7110->adac_type == DVB_ADAC_CRYSTAL) 1281 if (av7110->adac_type == DVB_ADAC_CRYSTAL)
1272 i2c_writereg(av7110, 0x20, 0x02, 0x49); 1282 i2c_writereg(av7110, 0x20, 0x02, 0x49);
1283 else if (av7110->adac_type == DVB_ADAC_MSP34x5)
1284 msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220);
1285 }
1273 break; 1286 break;
1274 1287
1275 case AUDIO_MONO_LEFT: 1288 case AUDIO_MONO_LEFT:
1276 ret = audcom(av7110, AUDIO_CMD_MONO_L); 1289 ret = audcom(av7110, AUDIO_CMD_MONO_L);
1277 if (!ret) 1290 if (!ret) {
1278 if (av7110->adac_type == DVB_ADAC_CRYSTAL) 1291 if (av7110->adac_type == DVB_ADAC_CRYSTAL)
1279 i2c_writereg(av7110, 0x20, 0x02, 0x4a); 1292 i2c_writereg(av7110, 0x20, 0x02, 0x4a);
1293 else if (av7110->adac_type == DVB_ADAC_MSP34x5)
1294 msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0200);
1295 }
1280 break; 1296 break;
1281 1297
1282 case AUDIO_MONO_RIGHT: 1298 case AUDIO_MONO_RIGHT:
1283 ret = audcom(av7110, AUDIO_CMD_MONO_R); 1299 ret = audcom(av7110, AUDIO_CMD_MONO_R);
1284 if (!ret) 1300 if (!ret) {
1285 if (av7110->adac_type == DVB_ADAC_CRYSTAL) 1301 if (av7110->adac_type == DVB_ADAC_CRYSTAL)
1286 i2c_writereg(av7110, 0x20, 0x02, 0x45); 1302 i2c_writereg(av7110, 0x20, 0x02, 0x45);
1303 else if (av7110->adac_type == DVB_ADAC_MSP34x5)
1304 msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0210);
1305 }
1287 break; 1306 break;
1288 1307
1289 default: 1308 default:
diff --git a/drivers/media/dvb/ttpci/av7110_v4l.c b/drivers/media/dvb/ttpci/av7110_v4l.c
index 603a22e4bf..6405546155 100644
--- a/drivers/media/dvb/ttpci/av7110_v4l.c
+++ b/drivers/media/dvb/ttpci/av7110_v4l.c
@@ -42,7 +42,18 @@
42int msp_writereg(struct av7110 *av7110, u8 dev, u16 reg, u16 val) 42int msp_writereg(struct av7110 *av7110, u8 dev, u16 reg, u16 val)
43{ 43{
44 u8 msg[5] = { dev, reg >> 8, reg & 0xff, val >> 8 , val & 0xff }; 44 u8 msg[5] = { dev, reg >> 8, reg & 0xff, val >> 8 , val & 0xff };
45 struct i2c_msg msgs = { .flags = 0, .addr = 0x40, .len = 5, .buf = msg }; 45 struct i2c_msg msgs = { .flags = 0, .len = 5, .buf = msg };
46
47 switch (av7110->adac_type) {
48 case DVB_ADAC_MSP34x0:
49 msgs.addr = 0x40;
50 break;
51 case DVB_ADAC_MSP34x5:
52 msgs.addr = 0x42;
53 break;
54 default:
55 return 0;
56 }
46 57
47 if (i2c_transfer(&av7110->i2c_adap, &msgs, 1) != 1) { 58 if (i2c_transfer(&av7110->i2c_adap, &msgs, 1) != 1) {
48 dprintk(1, "dvb-ttpci: failed @ card %d, %u = %u\n", 59 dprintk(1, "dvb-ttpci: failed @ card %d, %u = %u\n",
@@ -57,10 +68,23 @@ static int msp_readreg(struct av7110 *av7110, u8 dev, u16 reg, u16 *val)
57 u8 msg1[3] = { dev, reg >> 8, reg & 0xff }; 68 u8 msg1[3] = { dev, reg >> 8, reg & 0xff };
58 u8 msg2[2]; 69 u8 msg2[2];
59 struct i2c_msg msgs[2] = { 70 struct i2c_msg msgs[2] = {
60 { .flags = 0, .addr = 0x40, .len = 3, .buf = msg1 }, 71 { .flags = 0 , .len = 3, .buf = msg1 },
61 { .flags = I2C_M_RD, .addr = 0x40, .len = 2, .buf = msg2 } 72 { .flags = I2C_M_RD, .len = 2, .buf = msg2 }
62 }; 73 };
63 74
75 switch (av7110->adac_type) {
76 case DVB_ADAC_MSP34x0:
77 msgs[0].addr = 0x40;
78 msgs[1].addr = 0x40;
79 break;
80 case DVB_ADAC_MSP34x5:
81 msgs[0].addr = 0x42;
82 msgs[1].addr = 0x42;
83 break;
84 default:
85 return 0;
86 }
87
64 if (i2c_transfer(&av7110->i2c_adap, &msgs[0], 2) != 2) { 88 if (i2c_transfer(&av7110->i2c_adap, &msgs[0], 2) != 2) {
65 dprintk(1, "dvb-ttpci: failed @ card %d, %u\n", 89 dprintk(1, "dvb-ttpci: failed @ card %d, %u\n",
66 av7110->dvb_adapter.num, reg); 90 av7110->dvb_adapter.num, reg);
@@ -678,17 +702,23 @@ int av7110_init_analog_module(struct av7110 *av7110)
678{ 702{
679 u16 version1, version2; 703 u16 version1, version2;
680 704
681 if (i2c_writereg(av7110, 0x80, 0x0, 0x80) != 1 705 if (i2c_writereg(av7110, 0x80, 0x0, 0x80) == 1 &&
682 || i2c_writereg(av7110, 0x80, 0x0, 0) != 1) 706 i2c_writereg(av7110, 0x80, 0x0, 0) == 1) {
707 printk("dvb-ttpci: DVB-C analog module @ card %d detected, initializing MSP3400\n",
708 av7110->dvb_adapter.num);
709 av7110->adac_type = DVB_ADAC_MSP34x0;
710 } else if (i2c_writereg(av7110, 0x84, 0x0, 0x80) == 1 &&
711 i2c_writereg(av7110, 0x84, 0x0, 0) == 1) {
712 printk("dvb-ttpci: DVB-C analog module @ card %d detected, initializing MSP3415\n",
713 av7110->dvb_adapter.num);
714 av7110->adac_type = DVB_ADAC_MSP34x5;
715 } else
683 return -ENODEV; 716 return -ENODEV;
684 717
685 printk("dvb-ttpci: DVB-C analog module @ card %d detected, initializing MSP3400\n",
686 av7110->dvb_adapter.num);
687 av7110->adac_type = DVB_ADAC_MSP34x0;
688 msleep(100); // the probing above resets the msp... 718 msleep(100); // the probing above resets the msp...
689 msp_readreg(av7110, MSP_RD_DSP, 0x001e, &version1); 719 msp_readreg(av7110, MSP_RD_DSP, 0x001e, &version1);
690 msp_readreg(av7110, MSP_RD_DSP, 0x001f, &version2); 720 msp_readreg(av7110, MSP_RD_DSP, 0x001f, &version2);
691 dprintk(1, "dvb-ttpci: @ card %d MSP3400 version 0x%04x 0x%04x\n", 721 dprintk(1, "dvb-ttpci: @ card %d MSP34xx version 0x%04x 0x%04x\n",
692 av7110->dvb_adapter.num, version1, version2); 722 av7110->dvb_adapter.num, version1, version2);
693 msp_writereg(av7110, MSP_WR_DSP, 0x0013, 0x0c00); 723 msp_writereg(av7110, MSP_WR_DSP, 0x0013, 0x0c00);
694 msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x7f00); // loudspeaker + headphone 724 msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x7f00); // loudspeaker + headphone
@@ -697,7 +727,7 @@ int av7110_init_analog_module(struct av7110 *av7110)
697 msp_writereg(av7110, MSP_WR_DSP, 0x0004, 0x7f00); // loudspeaker volume 727 msp_writereg(av7110, MSP_WR_DSP, 0x0004, 0x7f00); // loudspeaker volume
698 msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0220); // SCART 1 source 728 msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0220); // SCART 1 source
699 msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x7f00); // SCART 1 volume 729 msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x7f00); // SCART 1 volume
700 msp_writereg(av7110, MSP_WR_DSP, 0x000d, 0x4800); // prescale SCART 730 msp_writereg(av7110, MSP_WR_DSP, 0x000d, 0x1900); // prescale SCART
701 731
702 if (i2c_writereg(av7110, 0x48, 0x01, 0x00)!=1) { 732 if (i2c_writereg(av7110, 0x48, 0x01, 0x00)!=1) {
703 INFO(("saa7113 not accessible.\n")); 733 INFO(("saa7113 not accessible.\n"));
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 824a63c926..6d532f170c 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -381,6 +381,18 @@ config VIDEO_WM8739
381 To compile this driver as a module, choose M here: the 381 To compile this driver as a module, choose M here: the
382 module will be called wm8739. 382 module will be called wm8739.
383 383
384config VIDEO_CX2341X
385 tristate "Conexant CX2341x MPEG encoders"
386 depends on VIDEO_V4L2 && EXPERIMENTAL
387 ---help---
388 Support for the Conexant CX23416 MPEG encoders
389 and CX23415 MPEG encoder/decoders.
390
391 This module currently supports the encoding functions only.
392
393 To compile this driver as a module, choose M here: the
394 module will be called cx2341x.
395
384source "drivers/media/video/cx25840/Kconfig" 396source "drivers/media/video/cx25840/Kconfig"
385 397
386config VIDEO_SAA711X 398config VIDEO_SAA711X
@@ -433,6 +445,8 @@ endmenu # encoder / decoder chips
433menu "V4L USB devices" 445menu "V4L USB devices"
434 depends on USB && VIDEO_DEV 446 depends on USB && VIDEO_DEV
435 447
448source "drivers/media/video/pvrusb2/Kconfig"
449
436source "drivers/media/video/em28xx/Kconfig" 450source "drivers/media/video/em28xx/Kconfig"
437 451
438config USB_DSBR 452config USB_DSBR
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 6c401b4639..353d61cfac 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_VIDEO_SAA7134) += ir-kbd-i2c.o saa7134/
47obj-$(CONFIG_VIDEO_CX88) += cx88/ 47obj-$(CONFIG_VIDEO_CX88) += cx88/
48obj-$(CONFIG_VIDEO_EM28XX) += em28xx/ 48obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
49obj-$(CONFIG_VIDEO_EM28XX) += tvp5150.o 49obj-$(CONFIG_VIDEO_EM28XX) += tvp5150.o
50obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
50obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o 51obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
51obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o 52obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o
52obj-$(CONFIG_VIDEO_TLV320AIC23B) += tlv320aic23b.o 53obj-$(CONFIG_VIDEO_TLV320AIC23B) += tlv320aic23b.o
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 3116345c93..e68a6d2fff 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -4848,7 +4848,7 @@ static void picolo_tetra_muxsel (struct bttv* btv, unsigned int input)
4848 * 4848 *
4849 * The IVC120G security card has 4 i2c controlled TDA8540 matrix 4849 * The IVC120G security card has 4 i2c controlled TDA8540 matrix
4850 * swichers to provide 16 channels to MUX0. The TDA8540's have 4850 * swichers to provide 16 channels to MUX0. The TDA8540's have
4851 * 4 indepedant outputs and as such the IVC120G also has the 4851 * 4 independent outputs and as such the IVC120G also has the
4852 * optional "Monitor Out" bus. This allows the card to be looking 4852 * optional "Monitor Out" bus. This allows the card to be looking
4853 * at one input while the monitor is looking at another. 4853 * at one input while the monitor is looking at another.
4854 * 4854 *
diff --git a/drivers/media/video/cx2341x.c b/drivers/media/video/cx2341x.c
index 554813e6f6..65f00fc08f 100644
--- a/drivers/media/video/cx2341x.c
+++ b/drivers/media/video/cx2341x.c
@@ -43,6 +43,7 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
43const u32 cx2341x_mpeg_ctrls[] = { 43const u32 cx2341x_mpeg_ctrls[] = {
44 V4L2_CID_MPEG_CLASS, 44 V4L2_CID_MPEG_CLASS,
45 V4L2_CID_MPEG_STREAM_TYPE, 45 V4L2_CID_MPEG_STREAM_TYPE,
46 V4L2_CID_MPEG_STREAM_VBI_FMT,
46 V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ, 47 V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ,
47 V4L2_CID_MPEG_AUDIO_ENCODING, 48 V4L2_CID_MPEG_AUDIO_ENCODING,
48 V4L2_CID_MPEG_AUDIO_L2_BITRATE, 49 V4L2_CID_MPEG_AUDIO_L2_BITRATE,
@@ -135,6 +136,9 @@ static int cx2341x_get_ctrl(struct cx2341x_mpeg_params *params,
135 case V4L2_CID_MPEG_STREAM_TYPE: 136 case V4L2_CID_MPEG_STREAM_TYPE:
136 ctrl->value = params->stream_type; 137 ctrl->value = params->stream_type;
137 break; 138 break;
139 case V4L2_CID_MPEG_STREAM_VBI_FMT:
140 ctrl->value = params->stream_vbi_fmt;
141 break;
138 case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: 142 case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
139 ctrl->value = params->video_spatial_filter_mode; 143 ctrl->value = params->video_spatial_filter_mode;
140 break; 144 break;
@@ -257,6 +261,9 @@ static int cx2341x_set_ctrl(struct cx2341x_mpeg_params *params,
257 params->video_bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR; 261 params->video_bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
258 } 262 }
259 break; 263 break;
264 case V4L2_CID_MPEG_STREAM_VBI_FMT:
265 params->stream_vbi_fmt = ctrl->value;
266 break;
260 case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: 267 case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
261 params->video_spatial_filter_mode = ctrl->value; 268 params->video_spatial_filter_mode = ctrl->value;
262 break; 269 break;
@@ -418,6 +425,14 @@ int cx2341x_ctrl_query(struct cx2341x_mpeg_params *params, struct v4l2_queryctrl
418 qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; 425 qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
419 return err; 426 return err;
420 427
428 case V4L2_CID_MPEG_STREAM_VBI_FMT:
429 if (params->capabilities & CX2341X_CAP_HAS_SLICED_VBI)
430 return v4l2_ctrl_query_fill_std(qctrl);
431 return cx2341x_ctrl_query_fill(qctrl,
432 V4L2_MPEG_STREAM_VBI_FMT_NONE,
433 V4L2_MPEG_STREAM_VBI_FMT_NONE, 1,
434 V4L2_MPEG_STREAM_VBI_FMT_NONE);
435
421 /* CX23415/6 specific */ 436 /* CX23415/6 specific */
422 case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: 437 case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
423 return cx2341x_ctrl_query_fill(qctrl, 438 return cx2341x_ctrl_query_fill(qctrl,
@@ -586,7 +601,7 @@ static void cx2341x_calc_audio_properties(struct cx2341x_mpeg_params *params)
586} 601}
587 602
588int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params, 603int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params,
589 struct v4l2_ext_controls *ctrls, int cmd) 604 struct v4l2_ext_controls *ctrls, unsigned int cmd)
590{ 605{
591 int err = 0; 606 int err = 0;
592 int i; 607 int i;
@@ -639,6 +654,7 @@ void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p)
639{ 654{
640 static struct cx2341x_mpeg_params default_params = { 655 static struct cx2341x_mpeg_params default_params = {
641 /* misc */ 656 /* misc */
657 .capabilities = 0,
642 .port = CX2341X_PORT_MEMORY, 658 .port = CX2341X_PORT_MEMORY,
643 .width = 720, 659 .width = 720,
644 .height = 480, 660 .height = 480,
@@ -646,6 +662,7 @@ void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p)
646 662
647 /* stream */ 663 /* stream */
648 .stream_type = V4L2_MPEG_STREAM_TYPE_MPEG2_PS, 664 .stream_type = V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
665 .stream_vbi_fmt = V4L2_MPEG_STREAM_VBI_FMT_NONE,
649 666
650 /* audio */ 667 /* audio */
651 .audio_sampling_freq = V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000, 668 .audio_sampling_freq = V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000,
@@ -830,22 +847,22 @@ invalid:
830 return "<invalid>"; 847 return "<invalid>";
831} 848}
832 849
833void cx2341x_log_status(struct cx2341x_mpeg_params *p, int card_id) 850void cx2341x_log_status(struct cx2341x_mpeg_params *p, const char *prefix)
834{ 851{
835 int is_mpeg1 = p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1; 852 int is_mpeg1 = p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1;
836 853
837 /* Stream */ 854 /* Stream */
838 printk(KERN_INFO "cx2341x-%d: Stream: %s\n", 855 printk(KERN_INFO "%s: Stream: %s\n",
839 card_id, 856 prefix,
840 cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE)); 857 cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE));
841 858
842 /* Video */ 859 /* Video */
843 printk(KERN_INFO "cx2341x-%d: Video: %dx%d, %d fps\n", 860 printk(KERN_INFO "%s: Video: %dx%d, %d fps\n",
844 card_id, 861 prefix,
845 p->width / (is_mpeg1 ? 2 : 1), p->height / (is_mpeg1 ? 2 : 1), 862 p->width / (is_mpeg1 ? 2 : 1), p->height / (is_mpeg1 ? 2 : 1),
846 p->is_50hz ? 25 : 30); 863 p->is_50hz ? 25 : 30);
847 printk(KERN_INFO "cx2341x-%d: Video: %s, %s, %s, %d", 864 printk(KERN_INFO "%s: Video: %s, %s, %s, %d",
848 card_id, 865 prefix,
849 cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ENCODING), 866 cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ENCODING),
850 cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ASPECT), 867 cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ASPECT),
851 cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_BITRATE_MODE), 868 cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_BITRATE_MODE),
@@ -854,19 +871,19 @@ void cx2341x_log_status(struct cx2341x_mpeg_params *p, int card_id)
854 printk(", Peak %d", p->video_bitrate_peak); 871 printk(", Peak %d", p->video_bitrate_peak);
855 } 872 }
856 printk("\n"); 873 printk("\n");
857 printk(KERN_INFO "cx2341x-%d: Video: GOP Size %d, %d B-Frames, %sGOP Closure, %s3:2 Pulldown\n", 874 printk(KERN_INFO "%s: Video: GOP Size %d, %d B-Frames, %sGOP Closure, %s3:2 Pulldown\n",
858 card_id, 875 prefix,
859 p->video_gop_size, p->video_b_frames, 876 p->video_gop_size, p->video_b_frames,
860 p->video_gop_closure ? "" : "No ", 877 p->video_gop_closure ? "" : "No ",
861 p->video_pulldown ? "" : "No "); 878 p->video_pulldown ? "" : "No ");
862 if (p->video_temporal_decimation) { 879 if (p->video_temporal_decimation) {
863 printk(KERN_INFO "cx2341x-%d: Video: Temporal Decimation %d\n", 880 printk(KERN_INFO "%s: Video: Temporal Decimation %d\n",
864 card_id, p->video_temporal_decimation); 881 prefix, p->video_temporal_decimation);
865 } 882 }
866 883
867 /* Audio */ 884 /* Audio */
868 printk(KERN_INFO "cx2341x-%d: Audio: %s, %s, %s, %s", 885 printk(KERN_INFO "%s: Audio: %s, %s, %s, %s",
869 card_id, 886 prefix,
870 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ), 887 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ),
871 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_ENCODING), 888 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_ENCODING),
872 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_L2_BITRATE), 889 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_L2_BITRATE),
@@ -880,18 +897,18 @@ void cx2341x_log_status(struct cx2341x_mpeg_params *p, int card_id)
880 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_CRC)); 897 cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_CRC));
881 898
882 /* Encoding filters */ 899 /* Encoding filters */
883 printk(KERN_INFO "cx2341x-%d: Spatial Filter: %s, Luma %s, Chroma %s, %d\n", 900 printk(KERN_INFO "%s: Spatial Filter: %s, Luma %s, Chroma %s, %d\n",
884 card_id, 901 prefix,
885 cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE), 902 cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE),
886 cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE), 903 cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE),
887 cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE), 904 cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE),
888 p->video_spatial_filter); 905 p->video_spatial_filter);
889 printk(KERN_INFO "cx2341x-%d: Temporal Filter: %s, %d\n", 906 printk(KERN_INFO "%s: Temporal Filter: %s, %d\n",
890 card_id, 907 prefix,
891 cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE), 908 cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE),
892 p->video_temporal_filter); 909 p->video_temporal_filter);
893 printk(KERN_INFO "cx2341x-%d: Median Filter: %s, Luma [%d, %d], Chroma [%d, %d]\n", 910 printk(KERN_INFO "%s: Median Filter: %s, Luma [%d, %d], Chroma [%d, %d]\n",
894 card_id, 911 prefix,
895 cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE), 912 cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE),
896 p->video_luma_median_filter_bottom, 913 p->video_luma_median_filter_bottom,
897 p->video_luma_median_filter_top, 914 p->video_luma_median_filter_top,
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index 91e1c481a1..80e23ee980 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -11,7 +11,6 @@ config VIDEO_CX88
11 select VIDEO_BUF 11 select VIDEO_BUF
12 select VIDEO_TUNER 12 select VIDEO_TUNER
13 select VIDEO_TVEEPROM 13 select VIDEO_TVEEPROM
14 select VIDEO_CX2341X
15 select VIDEO_IR 14 select VIDEO_IR
16 ---help--- 15 ---help---
17 This is a video4linux driver for Conexant 2388x based 16 This is a video4linux driver for Conexant 2388x based
@@ -36,13 +35,25 @@ config VIDEO_CX88_ALSA
36 To compile this driver as a module, choose M here: the 35 To compile this driver as a module, choose M here: the
37 module will be called cx88-alsa. 36 module will be called cx88-alsa.
38 37
38config VIDEO_CX88_BLACKBIRD
39 tristate "Blackbird MPEG encoder support (cx2388x + cx23416)"
40 depends on VIDEO_CX88
41 select VIDEO_CX2341X
42 ---help---
43 This adds support for MPEG encoder cards based on the
44 Blackbird reference design, using the Conexant 2388x
45 and 23416 chips.
46
47 To compile this driver as a module, choose M here: the
48 module will be called cx88-blackbird.
49
39config VIDEO_CX88_DVB 50config VIDEO_CX88_DVB
40 tristate "DVB/ATSC Support for cx2388x based TV cards" 51 tristate "DVB/ATSC Support for cx2388x based TV cards"
41 depends on VIDEO_CX88 && DVB_CORE 52 depends on VIDEO_CX88 && DVB_CORE
42 select VIDEO_BUF_DVB 53 select VIDEO_BUF_DVB
43 ---help--- 54 ---help---
44 This adds support for DVB/ATSC cards based on the 55 This adds support for DVB/ATSC cards based on the
45 Connexant 2388x chip. 56 Conexant 2388x chip.
46 57
47 To compile this driver as a module, choose M here: the 58 To compile this driver as a module, choose M here: the
48 module will be called cx88-dvb. 59 module will be called cx88-dvb.
diff --git a/drivers/media/video/cx88/Makefile b/drivers/media/video/cx88/Makefile
index 0dcd09b9b7..352b919f30 100644
--- a/drivers/media/video/cx88/Makefile
+++ b/drivers/media/video/cx88/Makefile
@@ -3,9 +3,10 @@ cx88xx-objs := cx88-cards.o cx88-core.o cx88-i2c.o cx88-tvaudio.o \
3cx8800-objs := cx88-video.o cx88-vbi.o 3cx8800-objs := cx88-video.o cx88-vbi.o
4cx8802-objs := cx88-mpeg.o 4cx8802-objs := cx88-mpeg.o
5 5
6obj-$(CONFIG_VIDEO_CX88) += cx88xx.o cx8800.o cx8802.o cx88-blackbird.o 6obj-$(CONFIG_VIDEO_CX88) += cx88xx.o cx8800.o cx8802.o
7obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o
8obj-$(CONFIG_VIDEO_CX88_ALSA) += cx88-alsa.o 7obj-$(CONFIG_VIDEO_CX88_ALSA) += cx88-alsa.o
8obj-$(CONFIG_VIDEO_CX88_BLACKBIRD) += cx88-blackbird.o
9obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o
9obj-$(CONFIG_VIDEO_CX88_VP3054) += cx88-vp3054-i2c.o 10obj-$(CONFIG_VIDEO_CX88_VP3054) += cx88-vp3054-i2c.o
10 11
11EXTRA_CFLAGS += -Idrivers/media/video 12EXTRA_CFLAGS += -Idrivers/media/video
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 67fd3302e8..4ff81582ec 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -846,24 +846,33 @@ static int mpeg_do_ioctl(struct inode *inode, struct file *file,
846 BLACKBIRD_MPEG_CAPTURE, 846 BLACKBIRD_MPEG_CAPTURE,
847 BLACKBIRD_RAW_BITS_NONE); 847 BLACKBIRD_RAW_BITS_NONE);
848 848
849 cx88_do_ioctl( inode, file, 0, dev->core, cmd, arg, cx88_ioctl_hook ); 849 cx88_do_ioctl(inode, file, 0, dev->core, cmd, arg, mpeg_do_ioctl);
850 850
851 blackbird_initialize_codec(dev); 851 blackbird_initialize_codec(dev);
852 cx88_set_scale(dev->core, dev->width, dev->height, 852 cx88_set_scale(dev->core, dev->width, dev->height,
853 fh->mpegq.field); 853 fh->mpegq.field);
854 return 0; 854 return 0;
855 } 855 }
856 case VIDIOC_LOG_STATUS:
857 {
858 char name[32 + 2];
859
860 snprintf(name, sizeof(name), "%s/2", core->name);
861 printk("%s/2: ============ START LOG STATUS ============\n",
862 core->name);
863 cx88_call_i2c_clients(core, VIDIOC_LOG_STATUS, 0);
864 cx2341x_log_status(&dev->params, name);
865 printk("%s/2: ============= END LOG STATUS =============\n",
866 core->name);
867 return 0;
868 }
856 869
857 default: 870 default:
858 return cx88_do_ioctl( inode, file, 0, dev->core, cmd, arg, cx88_ioctl_hook ); 871 return cx88_do_ioctl(inode, file, 0, dev->core, cmd, arg, mpeg_do_ioctl);
859 } 872 }
860 return 0; 873 return 0;
861} 874}
862 875
863int (*cx88_ioctl_hook)(struct inode *inode, struct file *file,
864 unsigned int cmd, void *arg);
865unsigned int (*cx88_ioctl_translator)(unsigned int cmd);
866
867static unsigned int mpeg_translate_ioctl(unsigned int cmd) 876static unsigned int mpeg_translate_ioctl(unsigned int cmd)
868{ 877{
869 return cmd; 878 return cmd;
@@ -872,8 +881,8 @@ static unsigned int mpeg_translate_ioctl(unsigned int cmd)
872static int mpeg_ioctl(struct inode *inode, struct file *file, 881static int mpeg_ioctl(struct inode *inode, struct file *file,
873 unsigned int cmd, unsigned long arg) 882 unsigned int cmd, unsigned long arg)
874{ 883{
875 cmd = cx88_ioctl_translator( cmd ); 884 cmd = mpeg_translate_ioctl( cmd );
876 return video_usercopy(inode, file, cmd, arg, cx88_ioctl_hook); 885 return video_usercopy(inode, file, cmd, arg, mpeg_do_ioctl);
877} 886}
878 887
879static int mpeg_open(struct inode *inode, struct file *file) 888static int mpeg_open(struct inode *inode, struct file *file)
@@ -1119,8 +1128,6 @@ static int blackbird_init(void)
1119 printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n", 1128 printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
1120 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100); 1129 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
1121#endif 1130#endif
1122 cx88_ioctl_hook = mpeg_do_ioctl;
1123 cx88_ioctl_translator = mpeg_translate_ioctl;
1124 return pci_register_driver(&blackbird_pci_driver); 1131 return pci_register_driver(&blackbird_pci_driver);
1125} 1132}
1126 1133
@@ -1132,9 +1139,6 @@ static void blackbird_fini(void)
1132module_init(blackbird_init); 1139module_init(blackbird_init);
1133module_exit(blackbird_fini); 1140module_exit(blackbird_fini);
1134 1141
1135EXPORT_SYMBOL(cx88_ioctl_hook);
1136EXPORT_SYMBOL(cx88_ioctl_translator);
1137
1138/* ----------------------------------------------------------- */ 1142/* ----------------------------------------------------------- */
1139/* 1143/*
1140 * Local variables: 1144 * Local variables:
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 67cdd82708..f9d68f20dc 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1700,11 +1700,6 @@ void cx88_card_setup(struct cx88_core *core)
1700/* ------------------------------------------------------------------ */ 1700/* ------------------------------------------------------------------ */
1701 1701
1702EXPORT_SYMBOL(cx88_boards); 1702EXPORT_SYMBOL(cx88_boards);
1703EXPORT_SYMBOL(cx88_bcount);
1704EXPORT_SYMBOL(cx88_subids);
1705EXPORT_SYMBOL(cx88_idcount);
1706EXPORT_SYMBOL(cx88_card_list);
1707EXPORT_SYMBOL(cx88_card_setup);
1708 1703
1709/* 1704/*
1710 * Local variables: 1705 * Local variables:
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index c56292d8d9..26f4c0fb8c 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -1181,8 +1181,6 @@ EXPORT_SYMBOL(cx88_set_scale);
1181EXPORT_SYMBOL(cx88_vdev_init); 1181EXPORT_SYMBOL(cx88_vdev_init);
1182EXPORT_SYMBOL(cx88_core_get); 1182EXPORT_SYMBOL(cx88_core_get);
1183EXPORT_SYMBOL(cx88_core_put); 1183EXPORT_SYMBOL(cx88_core_put);
1184EXPORT_SYMBOL(cx88_start_audio_dma);
1185EXPORT_SYMBOL(cx88_stop_audio_dma);
1186 1184
1187/* 1185/*
1188 * Local variables: 1186 * Local variables:
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 7efa6def0b..70663805cc 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -234,7 +234,6 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
234/* ----------------------------------------------------------------------- */ 234/* ----------------------------------------------------------------------- */
235 235
236EXPORT_SYMBOL(cx88_call_i2c_clients); 236EXPORT_SYMBOL(cx88_call_i2c_clients);
237EXPORT_SYMBOL(cx88_i2c_init);
238 237
239/* 238/*
240 * Local variables: 239 * Local variables:
diff --git a/drivers/media/video/cx88/cx88-tvaudio.c b/drivers/media/video/cx88/cx88-tvaudio.c
index 1e4278b588..5785c34815 100644
--- a/drivers/media/video/cx88/cx88-tvaudio.c
+++ b/drivers/media/video/cx88/cx88-tvaudio.c
@@ -726,7 +726,7 @@ static void set_audio_standard_FM(struct cx88_core *core,
726 726
727/* ----------------------------------------------------------- */ 727/* ----------------------------------------------------------- */
728 728
729int cx88_detect_nicam(struct cx88_core *core) 729static int cx88_detect_nicam(struct cx88_core *core)
730{ 730{
731 int i, j = 0; 731 int i, j = 0;
732 732
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 694d1d80ff..dcda5291b9 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -494,8 +494,7 @@ static int restart_video_queue(struct cx8800_dev *dev,
494 return 0; 494 return 0;
495 buf = list_entry(q->queued.next, struct cx88_buffer, vb.queue); 495 buf = list_entry(q->queued.next, struct cx88_buffer, vb.queue);
496 if (NULL == prev) { 496 if (NULL == prev) {
497 list_del(&buf->vb.queue); 497 list_move_tail(&buf->vb.queue, &q->active);
498 list_add_tail(&buf->vb.queue,&q->active);
499 start_video_dma(dev, q, buf); 498 start_video_dma(dev, q, buf);
500 buf->vb.state = STATE_ACTIVE; 499 buf->vb.state = STATE_ACTIVE;
501 buf->count = q->count++; 500 buf->count = q->count++;
@@ -506,8 +505,7 @@ static int restart_video_queue(struct cx8800_dev *dev,
506 } else if (prev->vb.width == buf->vb.width && 505 } else if (prev->vb.width == buf->vb.width &&
507 prev->vb.height == buf->vb.height && 506 prev->vb.height == buf->vb.height &&
508 prev->fmt == buf->fmt) { 507 prev->fmt == buf->fmt) {
509 list_del(&buf->vb.queue); 508 list_move_tail(&buf->vb.queue, &q->active);
510 list_add_tail(&buf->vb.queue,&q->active);
511 buf->vb.state = STATE_ACTIVE; 509 buf->vb.state = STATE_ACTIVE;
512 buf->count = q->count++; 510 buf->count = q->count++;
513 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); 511 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index dc7bc35f18..9a9a0fc7a4 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -563,7 +563,6 @@ void cx88_newstation(struct cx88_core *core);
563void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t); 563void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t);
564void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual); 564void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual);
565int cx88_audio_thread(void *data); 565int cx88_audio_thread(void *data);
566int cx88_detect_nicam(struct cx88_core *core);
567 566
568/* ----------------------------------------------------------- */ 567/* ----------------------------------------------------------- */
569/* cx88-input.c */ 568/* cx88-input.c */
@@ -592,12 +591,6 @@ extern int cx88_do_ioctl(struct inode *inode, struct file *file, int radio,
592 struct cx88_core *core, unsigned int cmd, 591 struct cx88_core *core, unsigned int cmd,
593 void *arg, v4l2_kioctl driver_ioctl); 592 void *arg, v4l2_kioctl driver_ioctl);
594 593
595/* ----------------------------------------------------------- */
596/* cx88-blackbird.c */
597extern int (*cx88_ioctl_hook)(struct inode *inode, struct file *file,
598 unsigned int cmd, void *arg);
599extern unsigned int (*cx88_ioctl_translator)(unsigned int cmd);
600
601/* 594/*
602 * Local variables: 595 * Local variables:
603 * c-basic-offset: 8 596 * c-basic-offset: 8
diff --git a/drivers/media/video/pvrusb2/Kconfig b/drivers/media/video/pvrusb2/Kconfig
new file mode 100644
index 0000000000..7e727fe14b
--- /dev/null
+++ b/drivers/media/video/pvrusb2/Kconfig
@@ -0,0 +1,62 @@
1config VIDEO_PVRUSB2
2 tristate "Hauppauge WinTV-PVR USB2 support"
3 depends on VIDEO_V4L2 && USB && I2C && EXPERIMENTAL
4 select FW_LOADER
5 select VIDEO_TUNER
6 select VIDEO_TVEEPROM
7 select VIDEO_CX2341X
8 select VIDEO_SAA711X
9 select VIDEO_MSP3400
10 ---help---
11 This is a video4linux driver for Conexant 23416 based
12 usb2 personal video recorder devices.
13
14 To compile this driver as a module, choose M here: the
15 module will be called pvrusb2
16
17config VIDEO_PVRUSB2_24XXX
18 bool "Hauppauge WinTV-PVR USB2 support for 24xxx model series"
19 depends on VIDEO_PVRUSB2 && EXPERIMENTAL
20 select VIDEO_CX25840
21 select VIDEO_WM8775
22 ---help---
23 This option enables inclusion of additional logic to operate
24 newer WinTV-PVR USB2 devices whose model number is of the
25 form "24xxx" (leading prefix of "24" followed by 3 digits).
26 To see if you may need this option, examine the white
27 sticker on the underside of your device. Enabling this
28 option will not harm support for older devices, however it
29 is a separate option because of the experimental nature of
30 this new feature.
31
32 If you are in doubt, say N.
33
34 Note: This feature is _very_ experimental. You have been
35 warned.
36
37config VIDEO_PVRUSB2_SYSFS
38 bool "pvrusb2 sysfs support (EXPERIMENTAL)"
39 default y
40 depends on VIDEO_PVRUSB2 && SYSFS && EXPERIMENTAL
41 ---help---
42 This option enables the operation of a sysfs based
43 interface for query and control of the pvrusb2 driver.
44
45 This is not generally needed for v4l applications,
46 although certain applications are optimized to take
47 advantage of this feature.
48
49 If you are in doubt, say Y.
50
51 Note: This feature is experimental and subject to change.
52
53config VIDEO_PVRUSB2_DEBUGIFC
54 bool "pvrusb2 debug interface"
55 depends on VIDEO_PVRUSB2_SYSFS
56 ---help---
57 This option enables the inclusion of a debug interface
58 in the pvrusb2 driver, hosted through sysfs.
59
60 You do not need to select this option unless you plan
61 on debugging the driver or performing a manual firmware
62 extraction.
diff --git a/drivers/media/video/pvrusb2/Makefile b/drivers/media/video/pvrusb2/Makefile
new file mode 100644
index 0000000000..fed603ad0a
--- /dev/null
+++ b/drivers/media/video/pvrusb2/Makefile
@@ -0,0 +1,18 @@
1obj-pvrusb2-sysfs-$(CONFIG_VIDEO_PVRUSB2_SYSFS) := pvrusb2-sysfs.o
2obj-pvrusb2-debugifc-$(CONFIG_VIDEO_PVRUSB2_DEBUGIFC) := pvrusb2-debugifc.o
3
4obj-pvrusb2-24xxx-$(CONFIG_VIDEO_PVRUSB2_24XXX) := \
5 pvrusb2-cx2584x-v4l.o \
6 pvrusb2-wm8775.o
7
8pvrusb2-objs := pvrusb2-i2c-core.o pvrusb2-i2c-cmd-v4l2.o \
9 pvrusb2-audio.o pvrusb2-i2c-chips-v4l2.o \
10 pvrusb2-encoder.o pvrusb2-video-v4l.o \
11 pvrusb2-eeprom.o pvrusb2-tuner.o pvrusb2-demod.o \
12 pvrusb2-main.o pvrusb2-hdw.o pvrusb2-v4l2.o \
13 pvrusb2-ctrl.o pvrusb2-std.o \
14 pvrusb2-context.o pvrusb2-io.o pvrusb2-ioread.o \
15 $(obj-pvrusb2-24xxx-y) \
16 $(obj-pvrusb2-sysfs-y) $(obj-pvrusb2-debugifc-y)
17
18obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2.o
diff --git a/drivers/media/video/pvrusb2/pvrusb2-audio.c b/drivers/media/video/pvrusb2/pvrusb2-audio.c
new file mode 100644
index 0000000000..313d2dcf9e
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-audio.c
@@ -0,0 +1,204 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include "pvrusb2-audio.h"
24#include "pvrusb2-hdw-internal.h"
25#include "pvrusb2-debug.h"
26#include <linux/videodev2.h>
27#include <media/msp3400.h>
28#include <media/v4l2-common.h>
29
30struct pvr2_msp3400_handler {
31 struct pvr2_hdw *hdw;
32 struct pvr2_i2c_client *client;
33 struct pvr2_i2c_handler i2c_handler;
34 struct pvr2_audio_stat astat;
35 unsigned long stale_mask;
36};
37
38
39/* This function selects the correct audio input source */
40static void set_stereo(struct pvr2_msp3400_handler *ctxt)
41{
42 struct pvr2_hdw *hdw = ctxt->hdw;
43 struct v4l2_routing route;
44
45 pvr2_trace(PVR2_TRACE_CHIPS,"i2c msp3400 v4l2 set_stereo");
46
47 if (hdw->input_val == PVR2_CVAL_INPUT_TV) {
48 struct v4l2_tuner vt;
49 memset(&vt,0,sizeof(vt));
50 vt.audmode = hdw->audiomode_val;
51 pvr2_i2c_client_cmd(ctxt->client,VIDIOC_S_TUNER,&vt);
52 }
53
54 route.input = MSP_INPUT_DEFAULT;
55 route.output = MSP_OUTPUT(MSP_SC_IN_DSP_SCART1);
56 switch (hdw->input_val) {
57 case PVR2_CVAL_INPUT_TV:
58 break;
59 case PVR2_CVAL_INPUT_RADIO:
60 /* Assume that msp34xx also handle FM decoding, in which case
61 we're still using the tuner. */
62 /* HV: actually it is more likely to be the SCART2 input if
63 the ivtv experience is any indication. */
64 route.input = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1,
65 MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
66 break;
67 case PVR2_CVAL_INPUT_SVIDEO:
68 case PVR2_CVAL_INPUT_COMPOSITE:
69 /* SCART 1 input */
70 route.input = MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1,
71 MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
72 break;
73 }
74 pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_AUDIO_ROUTING,&route);
75}
76
77
78static int check_stereo(struct pvr2_msp3400_handler *ctxt)
79{
80 struct pvr2_hdw *hdw = ctxt->hdw;
81 return (hdw->input_dirty ||
82 hdw->audiomode_dirty);
83}
84
85
86struct pvr2_msp3400_ops {
87 void (*update)(struct pvr2_msp3400_handler *);
88 int (*check)(struct pvr2_msp3400_handler *);
89};
90
91
92static const struct pvr2_msp3400_ops msp3400_ops[] = {
93 { .update = set_stereo, .check = check_stereo},
94};
95
96
97static int msp3400_check(struct pvr2_msp3400_handler *ctxt)
98{
99 unsigned long msk;
100 unsigned int idx;
101
102 for (idx = 0; idx < sizeof(msp3400_ops)/sizeof(msp3400_ops[0]);
103 idx++) {
104 msk = 1 << idx;
105 if (ctxt->stale_mask & msk) continue;
106 if (msp3400_ops[idx].check(ctxt)) {
107 ctxt->stale_mask |= msk;
108 }
109 }
110 return ctxt->stale_mask != 0;
111}
112
113
114static void msp3400_update(struct pvr2_msp3400_handler *ctxt)
115{
116 unsigned long msk;
117 unsigned int idx;
118
119 for (idx = 0; idx < sizeof(msp3400_ops)/sizeof(msp3400_ops[0]);
120 idx++) {
121 msk = 1 << idx;
122 if (!(ctxt->stale_mask & msk)) continue;
123 ctxt->stale_mask &= ~msk;
124 msp3400_ops[idx].update(ctxt);
125 }
126}
127
128
129/* This reads back the current signal type */
130static int get_audio_status(struct pvr2_msp3400_handler *ctxt)
131{
132 struct v4l2_tuner vt;
133 int stat;
134
135 memset(&vt,0,sizeof(vt));
136 stat = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_G_TUNER,&vt);
137 if (stat < 0) return stat;
138
139 ctxt->hdw->flag_stereo = (vt.audmode & V4L2_TUNER_MODE_STEREO) != 0;
140 ctxt->hdw->flag_bilingual =
141 (vt.audmode & V4L2_TUNER_MODE_LANG2) != 0;
142 return 0;
143}
144
145
146static void pvr2_msp3400_detach(struct pvr2_msp3400_handler *ctxt)
147{
148 ctxt->client->handler = 0;
149 ctxt->hdw->audio_stat = 0;
150 kfree(ctxt);
151}
152
153
154static unsigned int pvr2_msp3400_describe(struct pvr2_msp3400_handler *ctxt,
155 char *buf,unsigned int cnt)
156{
157 return scnprintf(buf,cnt,"handler: pvrusb2-audio v4l2");
158}
159
160
161const static struct pvr2_i2c_handler_functions msp3400_funcs = {
162 .detach = (void (*)(void *))pvr2_msp3400_detach,
163 .check = (int (*)(void *))msp3400_check,
164 .update = (void (*)(void *))msp3400_update,
165 .describe = (unsigned int (*)(void *,char *,unsigned int))pvr2_msp3400_describe,
166};
167
168
169int pvr2_i2c_msp3400_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp)
170{
171 struct pvr2_msp3400_handler *ctxt;
172 if (hdw->audio_stat) return 0;
173 if (cp->handler) return 0;
174
175 ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
176 if (!ctxt) return 0;
177 memset(ctxt,0,sizeof(*ctxt));
178
179 ctxt->i2c_handler.func_data = ctxt;
180 ctxt->i2c_handler.func_table = &msp3400_funcs;
181 ctxt->client = cp;
182 ctxt->hdw = hdw;
183 ctxt->astat.ctxt = ctxt;
184 ctxt->astat.status = (int (*)(void *))get_audio_status;
185 ctxt->astat.detach = (void (*)(void *))pvr2_msp3400_detach;
186 ctxt->stale_mask = (1 << (sizeof(msp3400_ops)/
187 sizeof(msp3400_ops[0]))) - 1;
188 cp->handler = &ctxt->i2c_handler;
189 hdw->audio_stat = &ctxt->astat;
190 pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x msp3400 V4L2 handler set up",
191 cp->client->addr);
192 return !0;
193}
194
195
196/*
197 Stuff for Emacs to see, in order to encourage consistent editing style:
198 *** Local Variables: ***
199 *** mode: c ***
200 *** fill-column: 70 ***
201 *** tab-width: 8 ***
202 *** c-basic-offset: 8 ***
203 *** End: ***
204 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-audio.h b/drivers/media/video/pvrusb2/pvrusb2-audio.h
new file mode 100644
index 0000000000..536339b688
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-audio.h
@@ -0,0 +1,40 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef __PVRUSB2_AUDIO_H
24#define __PVRUSB2_AUDIO_H
25
26#include "pvrusb2-i2c-core.h"
27
28int pvr2_i2c_msp3400_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
29
30#endif /* __PVRUSB2_AUDIO_H */
31
32/*
33 Stuff for Emacs to see, in order to encourage consistent editing style:
34 *** Local Variables: ***
35 *** mode: c ***
36 *** fill-column: 70 ***
37 *** tab-width: 8 ***
38 *** c-basic-offset: 8 ***
39 *** End: ***
40 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.c b/drivers/media/video/pvrusb2/pvrusb2-context.c
new file mode 100644
index 0000000000..40dc59871a
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-context.c
@@ -0,0 +1,230 @@
1/*
2 * $Id$
3 *
4 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20
21#include "pvrusb2-context.h"
22#include "pvrusb2-io.h"
23#include "pvrusb2-ioread.h"
24#include "pvrusb2-hdw.h"
25#include "pvrusb2-debug.h"
26#include <linux/errno.h>
27#include <linux/string.h>
28#include <linux/slab.h>
29#include <asm/semaphore.h>
30
31
32static void pvr2_context_destroy(struct pvr2_context *mp)
33{
34 if (mp->hdw) pvr2_hdw_destroy(mp->hdw);
35 pvr2_trace(PVR2_TRACE_STRUCT,"Destroying pvr_main id=%p",mp);
36 flush_workqueue(mp->workqueue);
37 destroy_workqueue(mp->workqueue);
38 kfree(mp);
39}
40
41
42static void pvr2_context_trigger_poll(struct pvr2_context *mp)
43{
44 queue_work(mp->workqueue,&mp->workpoll);
45}
46
47
48static void pvr2_context_poll(struct pvr2_context *mp)
49{
50 pvr2_context_enter(mp); do {
51 pvr2_hdw_poll(mp->hdw);
52 } while (0); pvr2_context_exit(mp);
53}
54
55
56static void pvr2_context_setup(struct pvr2_context *mp)
57{
58 pvr2_context_enter(mp); do {
59 if (!pvr2_hdw_dev_ok(mp->hdw)) break;
60 pvr2_hdw_setup(mp->hdw);
61 pvr2_hdw_setup_poll_trigger(
62 mp->hdw,
63 (void (*)(void *))pvr2_context_trigger_poll,
64 mp);
65 if (!pvr2_hdw_dev_ok(mp->hdw)) break;
66 if (!pvr2_hdw_init_ok(mp->hdw)) break;
67 mp->video_stream.stream = pvr2_hdw_get_video_stream(mp->hdw);
68 if (mp->setup_func) {
69 mp->setup_func(mp);
70 }
71 } while (0); pvr2_context_exit(mp);
72}
73
74
75struct pvr2_context *pvr2_context_create(
76 struct usb_interface *intf,
77 const struct usb_device_id *devid,
78 void (*setup_func)(struct pvr2_context *))
79{
80 struct pvr2_context *mp = 0;
81 mp = kmalloc(sizeof(*mp),GFP_KERNEL);
82 if (!mp) goto done;
83 memset(mp,0,sizeof(*mp));
84 pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr_main id=%p",mp);
85 mp->setup_func = setup_func;
86 mutex_init(&mp->mutex);
87 mp->hdw = pvr2_hdw_create(intf,devid);
88 if (!mp->hdw) {
89 pvr2_context_destroy(mp);
90 mp = 0;
91 goto done;
92 }
93
94 mp->workqueue = create_singlethread_workqueue("pvrusb2");
95 INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp);
96 INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp);
97 queue_work(mp->workqueue,&mp->workinit);
98 done:
99 return mp;
100}
101
102
103void pvr2_context_enter(struct pvr2_context *mp)
104{
105 mutex_lock(&mp->mutex);
106 pvr2_trace(PVR2_TRACE_CREG,"pvr2_context_enter(id=%p)",mp);
107}
108
109
110void pvr2_context_exit(struct pvr2_context *mp)
111{
112 int destroy_flag = 0;
113 if (!(mp->mc_first || !mp->disconnect_flag)) {
114 destroy_flag = !0;
115 }
116 pvr2_trace(PVR2_TRACE_CREG,"pvr2_context_exit(id=%p) outside",mp);
117 mutex_unlock(&mp->mutex);
118 if (destroy_flag) pvr2_context_destroy(mp);
119}
120
121
122static void pvr2_context_run_checks(struct pvr2_context *mp)
123{
124 struct pvr2_channel *ch1,*ch2;
125 for (ch1 = mp->mc_first; ch1; ch1 = ch2) {
126 ch2 = ch1->mc_next;
127 if (ch1->check_func) {
128 ch1->check_func(ch1);
129 }
130 }
131}
132
133
134void pvr2_context_disconnect(struct pvr2_context *mp)
135{
136 pvr2_context_enter(mp); do {
137 pvr2_hdw_disconnect(mp->hdw);
138 mp->disconnect_flag = !0;
139 pvr2_context_run_checks(mp);
140 } while (0); pvr2_context_exit(mp);
141}
142
143
144void pvr2_channel_init(struct pvr2_channel *cp,struct pvr2_context *mp)
145{
146 cp->hdw = mp->hdw;
147 cp->mc_head = mp;
148 cp->mc_next = 0;
149 cp->mc_prev = mp->mc_last;
150 if (mp->mc_last) {
151 mp->mc_last->mc_next = cp;
152 } else {
153 mp->mc_first = cp;
154 }
155 mp->mc_last = cp;
156}
157
158
159static void pvr2_channel_disclaim_stream(struct pvr2_channel *cp)
160{
161 if (!cp->stream) return;
162 pvr2_stream_kill(cp->stream->stream);
163 cp->stream->user = 0;
164 cp->stream = 0;
165}
166
167
168void pvr2_channel_done(struct pvr2_channel *cp)
169{
170 struct pvr2_context *mp = cp->mc_head;
171 pvr2_channel_disclaim_stream(cp);
172 if (cp->mc_next) {
173 cp->mc_next->mc_prev = cp->mc_prev;
174 } else {
175 mp->mc_last = cp->mc_prev;
176 }
177 if (cp->mc_prev) {
178 cp->mc_prev->mc_next = cp->mc_next;
179 } else {
180 mp->mc_first = cp->mc_next;
181 }
182 cp->hdw = 0;
183}
184
185
186int pvr2_channel_claim_stream(struct pvr2_channel *cp,
187 struct pvr2_context_stream *sp)
188{
189 int code = 0;
190 pvr2_context_enter(cp->mc_head); do {
191 if (sp == cp->stream) break;
192 if (sp->user) {
193 code = -EBUSY;
194 break;
195 }
196 pvr2_channel_disclaim_stream(cp);
197 if (!sp) break;
198 sp->user = cp;
199 cp->stream = sp;
200 } while (0); pvr2_context_exit(cp->mc_head);
201 return code;
202}
203
204
205// This is the marker for the real beginning of a legitimate mpeg2 stream.
206static char stream_sync_key[] = {
207 0x00, 0x00, 0x01, 0xba,
208};
209
210struct pvr2_ioread *pvr2_channel_create_mpeg_stream(
211 struct pvr2_context_stream *sp)
212{
213 struct pvr2_ioread *cp;
214 cp = pvr2_ioread_create();
215 if (!cp) return 0;
216 pvr2_ioread_setup(cp,sp->stream);
217 pvr2_ioread_set_sync_key(cp,stream_sync_key,sizeof(stream_sync_key));
218 return cp;
219}
220
221
222/*
223 Stuff for Emacs to see, in order to encourage consistent editing style:
224 *** Local Variables: ***
225 *** mode: c ***
226 *** fill-column: 75 ***
227 *** tab-width: 8 ***
228 *** c-basic-offset: 8 ***
229 *** End: ***
230 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.h b/drivers/media/video/pvrusb2/pvrusb2-context.h
new file mode 100644
index 0000000000..6327fa1f7e
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-context.h
@@ -0,0 +1,92 @@
1/*
2 * $Id$
3 *
4 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20#ifndef __PVRUSB2_BASE_H
21#define __PVRUSB2_BASE_H
22
23#include <linux/mutex.h>
24#include <linux/usb.h>
25#include <linux/workqueue.h>
26
27struct pvr2_hdw; /* hardware interface - defined elsewhere */
28struct pvr2_stream; /* stream interface - defined elsewhere */
29
30struct pvr2_context; /* All central state */
31struct pvr2_channel; /* One I/O pathway to a user */
32struct pvr2_context_stream; /* Wrapper for a stream */
33struct pvr2_crit_reg; /* Critical region pointer */
34struct pvr2_ioread; /* Low level stream structure */
35
36struct pvr2_context_stream {
37 struct pvr2_channel *user;
38 struct pvr2_stream *stream;
39};
40
41struct pvr2_context {
42 struct pvr2_channel *mc_first;
43 struct pvr2_channel *mc_last;
44 struct pvr2_hdw *hdw;
45 struct pvr2_context_stream video_stream;
46 struct mutex mutex;
47 int disconnect_flag;
48
49 /* Called after pvr2_context initialization is complete */
50 void (*setup_func)(struct pvr2_context *);
51
52 /* Work queue overhead for out-of-line processing */
53 struct workqueue_struct *workqueue;
54 struct work_struct workinit;
55 struct work_struct workpoll;
56};
57
58struct pvr2_channel {
59 struct pvr2_context *mc_head;
60 struct pvr2_channel *mc_next;
61 struct pvr2_channel *mc_prev;
62 struct pvr2_context_stream *stream;
63 struct pvr2_hdw *hdw;
64 void (*check_func)(struct pvr2_channel *);
65};
66
67void pvr2_context_enter(struct pvr2_context *);
68void pvr2_context_exit(struct pvr2_context *);
69
70struct pvr2_context *pvr2_context_create(struct usb_interface *intf,
71 const struct usb_device_id *devid,
72 void (*setup_func)(struct pvr2_context *));
73void pvr2_context_disconnect(struct pvr2_context *);
74
75void pvr2_channel_init(struct pvr2_channel *,struct pvr2_context *);
76void pvr2_channel_done(struct pvr2_channel *);
77int pvr2_channel_claim_stream(struct pvr2_channel *,
78 struct pvr2_context_stream *);
79struct pvr2_ioread *pvr2_channel_create_mpeg_stream(
80 struct pvr2_context_stream *);
81
82
83#endif /* __PVRUSB2_CONTEXT_H */
84/*
85 Stuff for Emacs to see, in order to encourage consistent editing style:
86 *** Local Variables: ***
87 *** mode: c ***
88 *** fill-column: 75 ***
89 *** tab-width: 8 ***
90 *** c-basic-offset: 8 ***
91 *** End: ***
92 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ctrl.c b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c
new file mode 100644
index 0000000000..d5df9fbeba
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c
@@ -0,0 +1,593 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include "pvrusb2-ctrl.h"
23#include "pvrusb2-hdw-internal.h"
24#include <linux/errno.h>
25#include <linux/string.h>
26#include <linux/mutex.h>
27
28
29/* Set the given control. */
30int pvr2_ctrl_set_value(struct pvr2_ctrl *cptr,int val)
31{
32 return pvr2_ctrl_set_mask_value(cptr,~0,val);
33}
34
35
36/* Set/clear specific bits of the given control. */
37int pvr2_ctrl_set_mask_value(struct pvr2_ctrl *cptr,int mask,int val)
38{
39 int ret = 0;
40 if (!cptr) return -EINVAL;
41 LOCK_TAKE(cptr->hdw->big_lock); do {
42 if (cptr->info->set_value != 0) {
43 if (cptr->info->type == pvr2_ctl_bitmask) {
44 mask &= cptr->info->def.type_bitmask.valid_bits;
45 } else if (cptr->info->type == pvr2_ctl_int) {
46 if (val < cptr->info->def.type_int.min_value) {
47 break;
48 }
49 if (val > cptr->info->def.type_int.max_value) {
50 break;
51 }
52 } else if (cptr->info->type == pvr2_ctl_enum) {
53 if (val >= cptr->info->def.type_enum.count) {
54 break;
55 }
56 } else if (cptr->info->type != pvr2_ctl_bool) {
57 break;
58 }
59 ret = cptr->info->set_value(cptr,mask,val);
60 } else {
61 ret = -EPERM;
62 }
63 } while(0); LOCK_GIVE(cptr->hdw->big_lock);
64 return ret;
65}
66
67
68/* Get the current value of the given control. */
69int pvr2_ctrl_get_value(struct pvr2_ctrl *cptr,int *valptr)
70{
71 int ret = 0;
72 if (!cptr) return -EINVAL;
73 LOCK_TAKE(cptr->hdw->big_lock); do {
74 ret = cptr->info->get_value(cptr,valptr);
75 } while(0); LOCK_GIVE(cptr->hdw->big_lock);
76 return ret;
77}
78
79
80/* Retrieve control's type */
81enum pvr2_ctl_type pvr2_ctrl_get_type(struct pvr2_ctrl *cptr)
82{
83 if (!cptr) return pvr2_ctl_int;
84 return cptr->info->type;
85}
86
87
88/* Retrieve control's maximum value (int type) */
89int pvr2_ctrl_get_max(struct pvr2_ctrl *cptr)
90{
91 int ret = 0;
92 if (!cptr) return 0;
93 LOCK_TAKE(cptr->hdw->big_lock); do {
94 if (cptr->info->type == pvr2_ctl_int) {
95 ret = cptr->info->def.type_int.max_value;
96 }
97 } while(0); LOCK_GIVE(cptr->hdw->big_lock);
98 return ret;
99}
100
101
102/* Retrieve control's minimum value (int type) */
103int pvr2_ctrl_get_min(struct pvr2_ctrl *cptr)
104{
105 int ret = 0;
106 if (!cptr) return 0;
107 LOCK_TAKE(cptr->hdw->big_lock); do {
108 if (cptr->info->type == pvr2_ctl_int) {
109 ret = cptr->info->def.type_int.min_value;
110 }
111 } while(0); LOCK_GIVE(cptr->hdw->big_lock);
112 return ret;
113}
114
115
116/* Retrieve control's default value (any type) */
117int pvr2_ctrl_get_def(struct pvr2_ctrl *cptr)
118{
119 int ret = 0;
120 if (!cptr) return 0;
121 LOCK_TAKE(cptr->hdw->big_lock); do {
122 if (cptr->info->type == pvr2_ctl_int) {
123 ret = cptr->info->default_value;
124 }
125 } while(0); LOCK_GIVE(cptr->hdw->big_lock);
126 return ret;
127}
128
129
130/* Retrieve control's enumeration count (enum only) */
131int pvr2_ctrl_get_cnt(struct pvr2_ctrl *cptr)
132{
133 int ret = 0;
134 if (!cptr) return 0;
135 LOCK_TAKE(cptr->hdw->big_lock); do {
136 if (cptr->info->type == pvr2_ctl_enum) {
137 ret = cptr->info->def.type_enum.count;
138 }
139 } while(0); LOCK_GIVE(cptr->hdw->big_lock);
140 return ret;
141}
142
143
144/* Retrieve control's valid mask bits (bit mask only) */
145int pvr2_ctrl_get_mask(struct pvr2_ctrl *cptr)
146{
147 int ret = 0;
148 if (!cptr) return 0;
149 LOCK_TAKE(cptr->hdw->big_lock); do {
150 if (cptr->info->type == pvr2_ctl_bitmask) {
151 ret = cptr->info->def.type_bitmask.valid_bits;
152 }
153 } while(0); LOCK_GIVE(cptr->hdw->big_lock);
154 return ret;
155}
156
157
158/* Retrieve the control's name */
159const char *pvr2_ctrl_get_name(struct pvr2_ctrl *cptr)
160{
161 if (!cptr) return 0;
162 return cptr->info->name;
163}
164
165
166/* Retrieve the control's desc */
167const char *pvr2_ctrl_get_desc(struct pvr2_ctrl *cptr)
168{
169 if (!cptr) return 0;
170 return cptr->info->desc;
171}
172
173
174/* Retrieve a control enumeration or bit mask value */
175int pvr2_ctrl_get_valname(struct pvr2_ctrl *cptr,int val,
176 char *bptr,unsigned int bmax,
177 unsigned int *blen)
178{
179 int ret = -EINVAL;
180 if (!cptr) return 0;
181 *blen = 0;
182 LOCK_TAKE(cptr->hdw->big_lock); do {
183 if (cptr->info->type == pvr2_ctl_enum) {
184 const char **names;
185 names = cptr->info->def.type_enum.value_names;
186 if ((val >= 0) &&
187 (val < cptr->info->def.type_enum.count)) {
188 if (names[val]) {
189 *blen = scnprintf(
190 bptr,bmax,"%s",
191 names[val]);
192 } else {
193 *blen = 0;
194 }
195 ret = 0;
196 }
197 } else if (cptr->info->type == pvr2_ctl_bitmask) {
198 const char **names;
199 unsigned int idx;
200 int msk;
201 names = cptr->info->def.type_bitmask.bit_names;
202 val &= cptr->info->def.type_bitmask.valid_bits;
203 for (idx = 0, msk = 1; val; idx++, msk <<= 1) {
204 if (val & msk) {
205 *blen = scnprintf(bptr,bmax,"%s",
206 names[idx]);
207 ret = 0;
208 break;
209 }
210 }
211 }
212 } while(0); LOCK_GIVE(cptr->hdw->big_lock);
213 return ret;
214}
215
216
217/* Return V4L ID for this control or zero if none */
218int pvr2_ctrl_get_v4lid(struct pvr2_ctrl *cptr)
219{
220 if (!cptr) return 0;
221 return cptr->info->v4l_id;
222}
223
224
225unsigned int pvr2_ctrl_get_v4lflags(struct pvr2_ctrl *cptr)
226{
227 unsigned int flags = 0;
228
229 if (cptr->info->get_v4lflags) {
230 flags = cptr->info->get_v4lflags(cptr);
231 }
232
233 if (cptr->info->set_value) {
234 flags &= ~V4L2_CTRL_FLAG_READ_ONLY;
235 } else {
236 flags |= V4L2_CTRL_FLAG_READ_ONLY;
237 }
238
239 return flags;
240}
241
242
243/* Return true if control is writable */
244int pvr2_ctrl_is_writable(struct pvr2_ctrl *cptr)
245{
246 if (!cptr) return 0;
247 return cptr->info->set_value != 0;
248}
249
250
251/* Return true if control has custom symbolic representation */
252int pvr2_ctrl_has_custom_symbols(struct pvr2_ctrl *cptr)
253{
254 if (!cptr) return 0;
255 if (!cptr->info->val_to_sym) return 0;
256 if (!cptr->info->sym_to_val) return 0;
257 return !0;
258}
259
260
261/* Convert a given mask/val to a custom symbolic value */
262int pvr2_ctrl_custom_value_to_sym(struct pvr2_ctrl *cptr,
263 int mask,int val,
264 char *buf,unsigned int maxlen,
265 unsigned int *len)
266{
267 if (!cptr) return -EINVAL;
268 if (!cptr->info->val_to_sym) return -EINVAL;
269 return cptr->info->val_to_sym(cptr,mask,val,buf,maxlen,len);
270}
271
272
273/* Convert a symbolic value to a mask/value pair */
274int pvr2_ctrl_custom_sym_to_value(struct pvr2_ctrl *cptr,
275 const char *buf,unsigned int len,
276 int *maskptr,int *valptr)
277{
278 if (!cptr) return -EINVAL;
279 if (!cptr->info->sym_to_val) return -EINVAL;
280 return cptr->info->sym_to_val(cptr,buf,len,maskptr,valptr);
281}
282
283
284static unsigned int gen_bitmask_string(int msk,int val,int msk_only,
285 const char **names,
286 char *ptr,unsigned int len)
287{
288 unsigned int idx;
289 long sm,um;
290 int spcFl;
291 unsigned int uc,cnt;
292 const char *idStr;
293
294 spcFl = 0;
295 uc = 0;
296 um = 0;
297 for (idx = 0, sm = 1; msk; idx++, sm <<= 1) {
298 if (sm & msk) {
299 msk &= ~sm;
300 idStr = names[idx];
301 if (idStr) {
302 cnt = scnprintf(ptr,len,"%s%s%s",
303 (spcFl ? " " : ""),
304 (msk_only ? "" :
305 ((val & sm) ? "+" : "-")),
306 idStr);
307 ptr += cnt; len -= cnt; uc += cnt;
308 spcFl = !0;
309 } else {
310 um |= sm;
311 }
312 }
313 }
314 if (um) {
315 if (msk_only) {
316 cnt = scnprintf(ptr,len,"%s0x%lx",
317 (spcFl ? " " : ""),
318 um);
319 ptr += cnt; len -= cnt; uc += cnt;
320 spcFl = !0;
321 } else if (um & val) {
322 cnt = scnprintf(ptr,len,"%s+0x%lx",
323 (spcFl ? " " : ""),
324 um & val);
325 ptr += cnt; len -= cnt; uc += cnt;
326 spcFl = !0;
327 } else if (um & ~val) {
328 cnt = scnprintf(ptr,len,"%s+0x%lx",
329 (spcFl ? " " : ""),
330 um & ~val);
331 ptr += cnt; len -= cnt; uc += cnt;
332 spcFl = !0;
333 }
334 }
335 return uc;
336}
337
338
339static const char *boolNames[] = {
340 "false",
341 "true",
342 "no",
343 "yes",
344};
345
346
347static int parse_token(const char *ptr,unsigned int len,
348 int *valptr,
349 const char **names,unsigned int namecnt)
350{
351 char buf[33];
352 unsigned int slen;
353 unsigned int idx;
354 int negfl;
355 char *p2;
356 *valptr = 0;
357 if (!names) namecnt = 0;
358 for (idx = 0; idx < namecnt; idx++) {
359 if (!names[idx]) continue;
360 slen = strlen(names[idx]);
361 if (slen != len) continue;
362 if (memcmp(names[idx],ptr,slen)) continue;
363 *valptr = idx;
364 return 0;
365 }
366 negfl = 0;
367 if ((*ptr == '-') || (*ptr == '+')) {
368 negfl = (*ptr == '-');
369 ptr++; len--;
370 }
371 if (len >= sizeof(buf)) return -EINVAL;
372 memcpy(buf,ptr,len);
373 buf[len] = 0;
374 *valptr = simple_strtol(buf,&p2,0);
375 if (negfl) *valptr = -(*valptr);
376 if (*p2) return -EINVAL;
377 return 1;
378}
379
380
381static int parse_mtoken(const char *ptr,unsigned int len,
382 int *valptr,
383 const char **names,int valid_bits)
384{
385 char buf[33];
386 unsigned int slen;
387 unsigned int idx;
388 char *p2;
389 int msk;
390 *valptr = 0;
391 for (idx = 0, msk = 1; valid_bits; idx++, msk <<= 1) {
392 if (!msk & valid_bits) continue;
393 valid_bits &= ~msk;
394 if (!names[idx]) continue;
395 slen = strlen(names[idx]);
396 if (slen != len) continue;
397 if (memcmp(names[idx],ptr,slen)) continue;
398 *valptr = msk;
399 return 0;
400 }
401 if (len >= sizeof(buf)) return -EINVAL;
402 memcpy(buf,ptr,len);
403 buf[len] = 0;
404 *valptr = simple_strtol(buf,&p2,0);
405 if (*p2) return -EINVAL;
406 return 0;
407}
408
409
410static int parse_tlist(const char *ptr,unsigned int len,
411 int *maskptr,int *valptr,
412 const char **names,int valid_bits)
413{
414 unsigned int cnt;
415 int mask,val,kv,mode,ret;
416 mask = 0;
417 val = 0;
418 ret = 0;
419 while (len) {
420 cnt = 0;
421 while ((cnt < len) &&
422 ((ptr[cnt] <= 32) ||
423 (ptr[cnt] >= 127))) cnt++;
424 ptr += cnt;
425 len -= cnt;
426 mode = 0;
427 if ((*ptr == '-') || (*ptr == '+')) {
428 mode = (*ptr == '-') ? -1 : 1;
429 ptr++;
430 len--;
431 }
432 cnt = 0;
433 while (cnt < len) {
434 if (ptr[cnt] <= 32) break;
435 if (ptr[cnt] >= 127) break;
436 cnt++;
437 }
438 if (!cnt) break;
439 if (parse_mtoken(ptr,cnt,&kv,names,valid_bits)) {
440 ret = -EINVAL;
441 break;
442 }
443 ptr += cnt;
444 len -= cnt;
445 switch (mode) {
446 case 0:
447 mask = valid_bits;
448 val |= kv;
449 break;
450 case -1:
451 mask |= kv;
452 val &= ~kv;
453 break;
454 case 1:
455 mask |= kv;
456 val |= kv;
457 break;
458 default:
459 break;
460 }
461 }
462 *maskptr = mask;
463 *valptr = val;
464 return ret;
465}
466
467
468/* Convert a symbolic value to a mask/value pair */
469int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr,
470 const char *ptr,unsigned int len,
471 int *maskptr,int *valptr)
472{
473 int ret = -EINVAL;
474 unsigned int cnt;
475
476 *maskptr = 0;
477 *valptr = 0;
478
479 cnt = 0;
480 while ((cnt < len) && ((ptr[cnt] <= 32) || (ptr[cnt] >= 127))) cnt++;
481 len -= cnt; ptr += cnt;
482 cnt = 0;
483 while ((cnt < len) && ((ptr[len-(cnt+1)] <= 32) ||
484 (ptr[len-(cnt+1)] >= 127))) cnt++;
485 len -= cnt;
486
487 if (!len) return -EINVAL;
488
489 LOCK_TAKE(cptr->hdw->big_lock); do {
490 if (cptr->info->type == pvr2_ctl_int) {
491 ret = parse_token(ptr,len,valptr,0,0);
492 if ((ret >= 0) &&
493 ((*valptr < cptr->info->def.type_int.min_value) ||
494 (*valptr > cptr->info->def.type_int.max_value))) {
495 ret = -ERANGE;
496 }
497 if (maskptr) *maskptr = ~0;
498 } else if (cptr->info->type == pvr2_ctl_bool) {
499 ret = parse_token(
500 ptr,len,valptr,boolNames,
501 sizeof(boolNames)/sizeof(boolNames[0]));
502 if (ret == 1) {
503 *valptr = *valptr ? !0 : 0;
504 } else if (ret == 0) {
505 *valptr = (*valptr & 1) ? !0 : 0;
506 }
507 if (maskptr) *maskptr = 1;
508 } else if (cptr->info->type == pvr2_ctl_enum) {
509 ret = parse_token(
510 ptr,len,valptr,
511 cptr->info->def.type_enum.value_names,
512 cptr->info->def.type_enum.count);
513 if ((ret >= 0) &&
514 ((*valptr < 0) ||
515 (*valptr >= cptr->info->def.type_enum.count))) {
516 ret = -ERANGE;
517 }
518 if (maskptr) *maskptr = ~0;
519 } else if (cptr->info->type == pvr2_ctl_bitmask) {
520 ret = parse_tlist(
521 ptr,len,maskptr,valptr,
522 cptr->info->def.type_bitmask.bit_names,
523 cptr->info->def.type_bitmask.valid_bits);
524 }
525 } while(0); LOCK_GIVE(cptr->hdw->big_lock);
526 return ret;
527}
528
529
530/* Convert a given mask/val to a symbolic value */
531int pvr2_ctrl_value_to_sym_internal(struct pvr2_ctrl *cptr,
532 int mask,int val,
533 char *buf,unsigned int maxlen,
534 unsigned int *len)
535{
536 int ret = -EINVAL;
537
538 *len = 0;
539 if (cptr->info->type == pvr2_ctl_int) {
540 *len = scnprintf(buf,maxlen,"%d",val);
541 ret = 0;
542 } else if (cptr->info->type == pvr2_ctl_bool) {
543 *len = scnprintf(buf,maxlen,"%s",val ? "true" : "false");
544 ret = 0;
545 } else if (cptr->info->type == pvr2_ctl_enum) {
546 const char **names;
547 names = cptr->info->def.type_enum.value_names;
548 if ((val >= 0) &&
549 (val < cptr->info->def.type_enum.count)) {
550 if (names[val]) {
551 *len = scnprintf(
552 buf,maxlen,"%s",
553 names[val]);
554 } else {
555 *len = 0;
556 }
557 ret = 0;
558 }
559 } else if (cptr->info->type == pvr2_ctl_bitmask) {
560 *len = gen_bitmask_string(
561 val & mask & cptr->info->def.type_bitmask.valid_bits,
562 ~0,!0,
563 cptr->info->def.type_bitmask.bit_names,
564 buf,maxlen);
565 }
566 return ret;
567}
568
569
570/* Convert a given mask/val to a symbolic value */
571int pvr2_ctrl_value_to_sym(struct pvr2_ctrl *cptr,
572 int mask,int val,
573 char *buf,unsigned int maxlen,
574 unsigned int *len)
575{
576 int ret;
577 LOCK_TAKE(cptr->hdw->big_lock); do {
578 ret = pvr2_ctrl_value_to_sym_internal(cptr,mask,val,
579 buf,maxlen,len);
580 } while(0); LOCK_GIVE(cptr->hdw->big_lock);
581 return ret;
582}
583
584
585/*
586 Stuff for Emacs to see, in order to encourage consistent editing style:
587 *** Local Variables: ***
588 *** mode: c ***
589 *** fill-column: 75 ***
590 *** tab-width: 8 ***
591 *** c-basic-offset: 8 ***
592 *** End: ***
593 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ctrl.h b/drivers/media/video/pvrusb2/pvrusb2-ctrl.h
new file mode 100644
index 0000000000..c1680053cd
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-ctrl.h
@@ -0,0 +1,123 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#ifndef __PVRUSB2_CTRL_H
22#define __PVRUSB2_CTRL_H
23
24struct pvr2_ctrl;
25
26enum pvr2_ctl_type {
27 pvr2_ctl_int = 0,
28 pvr2_ctl_enum = 1,
29 pvr2_ctl_bitmask = 2,
30 pvr2_ctl_bool = 3,
31};
32
33
34/* Set the given control. */
35int pvr2_ctrl_set_value(struct pvr2_ctrl *,int val);
36
37/* Set/clear specific bits of the given control. */
38int pvr2_ctrl_set_mask_value(struct pvr2_ctrl *,int mask,int val);
39
40/* Get the current value of the given control. */
41int pvr2_ctrl_get_value(struct pvr2_ctrl *,int *valptr);
42
43/* Retrieve control's type */
44enum pvr2_ctl_type pvr2_ctrl_get_type(struct pvr2_ctrl *);
45
46/* Retrieve control's maximum value (int type) */
47int pvr2_ctrl_get_max(struct pvr2_ctrl *);
48
49/* Retrieve control's minimum value (int type) */
50int pvr2_ctrl_get_min(struct pvr2_ctrl *);
51
52/* Retrieve control's default value (any type) */
53int pvr2_ctrl_get_def(struct pvr2_ctrl *);
54
55/* Retrieve control's enumeration count (enum only) */
56int pvr2_ctrl_get_cnt(struct pvr2_ctrl *);
57
58/* Retrieve control's valid mask bits (bit mask only) */
59int pvr2_ctrl_get_mask(struct pvr2_ctrl *);
60
61/* Retrieve the control's name */
62const char *pvr2_ctrl_get_name(struct pvr2_ctrl *);
63
64/* Retrieve the control's desc */
65const char *pvr2_ctrl_get_desc(struct pvr2_ctrl *);
66
67/* Retrieve a control enumeration or bit mask value */
68int pvr2_ctrl_get_valname(struct pvr2_ctrl *,int,char *,unsigned int,
69 unsigned int *);
70
71/* Return true if control is writable */
72int pvr2_ctrl_is_writable(struct pvr2_ctrl *);
73
74/* Return V4L flags value for control (or zero if there is no v4l control
75 actually under this control) */
76unsigned int pvr2_ctrl_get_v4lflags(struct pvr2_ctrl *);
77
78/* Return V4L ID for this control or zero if none */
79int pvr2_ctrl_get_v4lid(struct pvr2_ctrl *);
80
81/* Return true if control has custom symbolic representation */
82int pvr2_ctrl_has_custom_symbols(struct pvr2_ctrl *);
83
84/* Convert a given mask/val to a custom symbolic value */
85int pvr2_ctrl_custom_value_to_sym(struct pvr2_ctrl *,
86 int mask,int val,
87 char *buf,unsigned int maxlen,
88 unsigned int *len);
89
90/* Convert a symbolic value to a mask/value pair */
91int pvr2_ctrl_custom_sym_to_value(struct pvr2_ctrl *,
92 const char *buf,unsigned int len,
93 int *maskptr,int *valptr);
94
95/* Convert a given mask/val to a symbolic value */
96int pvr2_ctrl_value_to_sym(struct pvr2_ctrl *,
97 int mask,int val,
98 char *buf,unsigned int maxlen,
99 unsigned int *len);
100
101/* Convert a symbolic value to a mask/value pair */
102int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *,
103 const char *buf,unsigned int len,
104 int *maskptr,int *valptr);
105
106/* Convert a given mask/val to a symbolic value - must already be
107 inside of critical region. */
108int pvr2_ctrl_value_to_sym_internal(struct pvr2_ctrl *,
109 int mask,int val,
110 char *buf,unsigned int maxlen,
111 unsigned int *len);
112
113#endif /* __PVRUSB2_CTRL_H */
114
115/*
116 Stuff for Emacs to see, in order to encourage consistent editing style:
117 *** Local Variables: ***
118 *** mode: c ***
119 *** fill-column: 75 ***
120 *** tab-width: 8 ***
121 *** c-basic-offset: 8 ***
122 *** End: ***
123 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
new file mode 100644
index 0000000000..27eadaff75
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -0,0 +1,279 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23/*
24
25 This source file is specifically designed to interface with the
26 cx2584x, in kernels 2.6.16 or newer.
27
28*/
29
30#include "pvrusb2-cx2584x-v4l.h"
31#include "pvrusb2-video-v4l.h"
32#include "pvrusb2-i2c-cmd-v4l2.h"
33
34
35#include "pvrusb2-hdw-internal.h"
36#include "pvrusb2-debug.h"
37#include <media/cx25840.h>
38#include <linux/videodev2.h>
39#include <media/v4l2-common.h>
40#include <linux/errno.h>
41#include <linux/slab.h>
42
43struct pvr2_v4l_cx2584x {
44 struct pvr2_i2c_handler handler;
45 struct pvr2_decoder_ctrl ctrl;
46 struct pvr2_i2c_client *client;
47 struct pvr2_hdw *hdw;
48 unsigned long stale_mask;
49};
50
51
52static void set_input(struct pvr2_v4l_cx2584x *ctxt)
53{
54 struct pvr2_hdw *hdw = ctxt->hdw;
55 struct v4l2_routing route;
56 enum cx25840_video_input vid_input;
57 enum cx25840_audio_input aud_input;
58
59 memset(&route,0,sizeof(route));
60
61 switch(hdw->input_val) {
62 case PVR2_CVAL_INPUT_TV:
63 vid_input = CX25840_COMPOSITE7;
64 aud_input = CX25840_AUDIO8;
65 break;
66 case PVR2_CVAL_INPUT_COMPOSITE:
67 vid_input = CX25840_COMPOSITE3;
68 aud_input = CX25840_AUDIO_SERIAL;
69 break;
70 case PVR2_CVAL_INPUT_SVIDEO:
71 vid_input = CX25840_SVIDEO1;
72 aud_input = CX25840_AUDIO_SERIAL;
73 break;
74 case PVR2_CVAL_INPUT_RADIO:
75 default:
76 // Just set it to be composite input for now...
77 vid_input = CX25840_COMPOSITE3;
78 aud_input = CX25840_AUDIO_SERIAL;
79 break;
80 }
81
82 pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx2584x set_input vid=0x%x aud=0x%x",
83 vid_input,aud_input);
84 route.input = (u32)vid_input;
85 pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_VIDEO_ROUTING,&route);
86 route.input = (u32)aud_input;
87 pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_AUDIO_ROUTING,&route);
88}
89
90
91static int check_input(struct pvr2_v4l_cx2584x *ctxt)
92{
93 struct pvr2_hdw *hdw = ctxt->hdw;
94 return hdw->input_dirty != 0;
95}
96
97
98static void set_audio(struct pvr2_v4l_cx2584x *ctxt)
99{
100 u32 val;
101 struct pvr2_hdw *hdw = ctxt->hdw;
102
103 pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx2584x set_audio %d",
104 hdw->srate_val);
105 switch (hdw->srate_val) {
106 default:
107 case V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000:
108 val = 48000;
109 break;
110 case V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100:
111 val = 44100;
112 break;
113 case V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000:
114 val = 32000;
115 break;
116 }
117 pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_AUDIO_CLOCK_FREQ,&val);
118}
119
120
121static int check_audio(struct pvr2_v4l_cx2584x *ctxt)
122{
123 struct pvr2_hdw *hdw = ctxt->hdw;
124 return hdw->srate_dirty != 0;
125}
126
127
128struct pvr2_v4l_cx2584x_ops {
129 void (*update)(struct pvr2_v4l_cx2584x *);
130 int (*check)(struct pvr2_v4l_cx2584x *);
131};
132
133
134static const struct pvr2_v4l_cx2584x_ops decoder_ops[] = {
135 { .update = set_input, .check = check_input},
136 { .update = set_audio, .check = check_audio},
137};
138
139
140static void decoder_detach(struct pvr2_v4l_cx2584x *ctxt)
141{
142 ctxt->client->handler = 0;
143 ctxt->hdw->decoder_ctrl = 0;
144 kfree(ctxt);
145}
146
147
148static int decoder_check(struct pvr2_v4l_cx2584x *ctxt)
149{
150 unsigned long msk;
151 unsigned int idx;
152
153 for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]);
154 idx++) {
155 msk = 1 << idx;
156 if (ctxt->stale_mask & msk) continue;
157 if (decoder_ops[idx].check(ctxt)) {
158 ctxt->stale_mask |= msk;
159 }
160 }
161 return ctxt->stale_mask != 0;
162}
163
164
165static void decoder_update(struct pvr2_v4l_cx2584x *ctxt)
166{
167 unsigned long msk;
168 unsigned int idx;
169
170 for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]);
171 idx++) {
172 msk = 1 << idx;
173 if (!(ctxt->stale_mask & msk)) continue;
174 ctxt->stale_mask &= ~msk;
175 decoder_ops[idx].update(ctxt);
176 }
177}
178
179
180static void decoder_enable(struct pvr2_v4l_cx2584x *ctxt,int fl)
181{
182 pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx25840 decoder_enable(%d)",fl);
183 pvr2_v4l2_cmd_stream(ctxt->client,fl);
184}
185
186
187static int decoder_detect(struct pvr2_i2c_client *cp)
188{
189 int ret;
190 /* Attempt to query the decoder - let's see if it will answer */
191 struct v4l2_queryctrl qc;
192
193 memset(&qc,0,sizeof(qc));
194
195 qc.id = V4L2_CID_BRIGHTNESS;
196
197 ret = pvr2_i2c_client_cmd(cp,VIDIOC_QUERYCTRL,&qc);
198 return ret == 0; /* Return true if it answered */
199}
200
201
202static int decoder_is_tuned(struct pvr2_v4l_cx2584x *ctxt)
203{
204 struct v4l2_tuner vt;
205 int ret;
206
207 memset(&vt,0,sizeof(vt));
208 ret = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_G_TUNER,&vt);
209 if (ret < 0) return -EINVAL;
210 return vt.signal ? 1 : 0;
211}
212
213
214static unsigned int decoder_describe(struct pvr2_v4l_cx2584x *ctxt,
215 char *buf,unsigned int cnt)
216{
217 return scnprintf(buf,cnt,"handler: pvrusb2-cx2584x-v4l");
218}
219
220
221static void decoder_reset(struct pvr2_v4l_cx2584x *ctxt)
222{
223 int ret;
224 ret = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_RESET,0);
225 pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx25840 decoder_reset (ret=%d)",ret);
226}
227
228
229const static struct pvr2_i2c_handler_functions hfuncs = {
230 .detach = (void (*)(void *))decoder_detach,
231 .check = (int (*)(void *))decoder_check,
232 .update = (void (*)(void *))decoder_update,
233 .describe = (unsigned int (*)(void *,char *,unsigned int))decoder_describe,
234};
235
236
237int pvr2_i2c_cx2584x_v4l_setup(struct pvr2_hdw *hdw,
238 struct pvr2_i2c_client *cp)
239{
240 struct pvr2_v4l_cx2584x *ctxt;
241
242 if (hdw->decoder_ctrl) return 0;
243 if (cp->handler) return 0;
244 if (!decoder_detect(cp)) return 0;
245
246 ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
247 if (!ctxt) return 0;
248 memset(ctxt,0,sizeof(*ctxt));
249
250 ctxt->handler.func_data = ctxt;
251 ctxt->handler.func_table = &hfuncs;
252 ctxt->ctrl.ctxt = ctxt;
253 ctxt->ctrl.detach = (void (*)(void *))decoder_detach;
254 ctxt->ctrl.enable = (void (*)(void *,int))decoder_enable;
255 ctxt->ctrl.tuned = (int (*)(void *))decoder_is_tuned;
256 ctxt->ctrl.force_reset = (void (*)(void*))decoder_reset;
257 ctxt->client = cp;
258 ctxt->hdw = hdw;
259 ctxt->stale_mask = (1 << (sizeof(decoder_ops)/
260 sizeof(decoder_ops[0]))) - 1;
261 hdw->decoder_ctrl = &ctxt->ctrl;
262 cp->handler = &ctxt->handler;
263 pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x cx2584x V4L2 handler set up",
264 cp->client->addr);
265 return !0;
266}
267
268
269
270
271/*
272 Stuff for Emacs to see, in order to encourage consistent editing style:
273 *** Local Variables: ***
274 *** mode: c ***
275 *** fill-column: 70 ***
276 *** tab-width: 8 ***
277 *** c-basic-offset: 8 ***
278 *** End: ***
279 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.h b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.h
new file mode 100644
index 0000000000..54b2844e7a
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.h
@@ -0,0 +1,53 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef __PVRUSB2_CX2584X_V4L_H
24#define __PVRUSB2_CX2584X_V4L_H
25
26/*
27
28 This module connects the pvrusb2 driver to the I2C chip level
29 driver which handles combined device audio & video processing.
30 This interface is used internally by the driver; higher level code
31 should only interact through the interface provided by
32 pvrusb2-hdw.h.
33
34*/
35
36
37
38#include "pvrusb2-i2c-core.h"
39
40int pvr2_i2c_cx2584x_v4l_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
41
42
43#endif /* __PVRUSB2_CX2584X_V4L_H */
44
45/*
46 Stuff for Emacs to see, in order to encourage consistent editing style:
47 *** Local Variables: ***
48 *** mode: c ***
49 *** fill-column: 70 ***
50 *** tab-width: 8 ***
51 *** c-basic-offset: 8 ***
52 *** End: ***
53 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-debug.h b/drivers/media/video/pvrusb2/pvrusb2-debug.h
new file mode 100644
index 0000000000..d95a8588e4
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-debug.h
@@ -0,0 +1,67 @@
1/*
2 * $Id$
3 *
4 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20#ifndef __PVRUSB2_DEBUG_H
21#define __PVRUSB2_DEBUG_H
22
23extern int pvrusb2_debug;
24
25#define pvr2_trace(msk, fmt, arg...) do {if(msk & pvrusb2_debug) printk(KERN_INFO "pvrusb2: " fmt "\n", ##arg); } while (0)
26
27/* These are listed in *rough* order of decreasing usefulness and
28 increasing noise level. */
29#define PVR2_TRACE_INFO (1 << 0) // Normal messages
30#define PVR2_TRACE_ERROR_LEGS (1 << 1) // error messages
31#define PVR2_TRACE_TOLERANCE (1 << 2) // track tolerance-affected errors
32#define PVR2_TRACE_TRAP (1 << 3) // Trap & report misbehavior from app
33#define PVR2_TRACE_INIT (1 << 4) // misc initialization steps
34#define PVR2_TRACE_START_STOP (1 << 5) // Streaming start / stop
35#define PVR2_TRACE_CTL (1 << 6) // commit of control changes
36#define PVR2_TRACE_DEBUG (1 << 7) // Temporary debug code
37#define PVR2_TRACE_EEPROM (1 << 8) // eeprom parsing / report
38#define PVR2_TRACE_STRUCT (1 << 9) // internal struct creation
39#define PVR2_TRACE_OPEN_CLOSE (1 << 10) // application open / close
40#define PVR2_TRACE_CREG (1 << 11) // Main critical region entry / exit
41#define PVR2_TRACE_SYSFS (1 << 12) // Sysfs driven I/O
42#define PVR2_TRACE_FIRMWARE (1 << 13) // firmware upload actions
43#define PVR2_TRACE_CHIPS (1 << 14) // chip broadcast operation
44#define PVR2_TRACE_I2C (1 << 15) // I2C related stuff
45#define PVR2_TRACE_I2C_CMD (1 << 16) // Software commands to I2C modules
46#define PVR2_TRACE_I2C_CORE (1 << 17) // I2C core debugging
47#define PVR2_TRACE_I2C_TRAF (1 << 18) // I2C traffic through the adapter
48#define PVR2_TRACE_V4LIOCTL (1 << 19) // v4l ioctl details
49#define PVR2_TRACE_ENCODER (1 << 20) // mpeg2 encoder operation
50#define PVR2_TRACE_BUF_POOL (1 << 21) // Track buffer pool management
51#define PVR2_TRACE_BUF_FLOW (1 << 22) // Track buffer flow in system
52#define PVR2_TRACE_DATA_FLOW (1 << 23) // Track data flow
53#define PVR2_TRACE_DEBUGIFC (1 << 24) // Debug interface actions
54#define PVR2_TRACE_GPIO (1 << 25) // GPIO state bit changes
55
56
57#endif /* __PVRUSB2_HDW_INTERNAL_H */
58
59/*
60 Stuff for Emacs to see, in order to encourage consistent editing style:
61 *** Local Variables: ***
62 *** mode: c ***
63 *** fill-column: 75 ***
64 *** tab-width: 8 ***
65 *** c-basic-offset: 8 ***
66 *** End: ***
67 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-debugifc.c b/drivers/media/video/pvrusb2/pvrusb2-debugifc.c
new file mode 100644
index 0000000000..586900e365
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-debugifc.c
@@ -0,0 +1,478 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include <linux/string.h>
23#include <linux/slab.h>
24#include "pvrusb2-debugifc.h"
25#include "pvrusb2-hdw.h"
26#include "pvrusb2-debug.h"
27#include "pvrusb2-i2c-core.h"
28
29struct debugifc_mask_item {
30 const char *name;
31 unsigned long msk;
32};
33
34static struct debugifc_mask_item mask_items[] = {
35 {"ENC_FIRMWARE",(1<<PVR2_SUBSYS_B_ENC_FIRMWARE)},
36 {"ENC_CFG",(1<<PVR2_SUBSYS_B_ENC_CFG)},
37 {"DIG_RUN",(1<<PVR2_SUBSYS_B_DIGITIZER_RUN)},
38 {"USB_RUN",(1<<PVR2_SUBSYS_B_USBSTREAM_RUN)},
39 {"ENC_RUN",(1<<PVR2_SUBSYS_B_ENC_RUN)},
40};
41
42
43static unsigned int debugifc_count_whitespace(const char *buf,
44 unsigned int count)
45{
46 unsigned int scnt;
47 char ch;
48
49 for (scnt = 0; scnt < count; scnt++) {
50 ch = buf[scnt];
51 if (ch == ' ') continue;
52 if (ch == '\t') continue;
53 if (ch == '\n') continue;
54 break;
55 }
56 return scnt;
57}
58
59
60static unsigned int debugifc_count_nonwhitespace(const char *buf,
61 unsigned int count)
62{
63 unsigned int scnt;
64 char ch;
65
66 for (scnt = 0; scnt < count; scnt++) {
67 ch = buf[scnt];
68 if (ch == ' ') break;
69 if (ch == '\t') break;
70 if (ch == '\n') break;
71 }
72 return scnt;
73}
74
75
76static unsigned int debugifc_isolate_word(const char *buf,unsigned int count,
77 const char **wstrPtr,
78 unsigned int *wlenPtr)
79{
80 const char *wptr;
81 unsigned int consume_cnt = 0;
82 unsigned int wlen;
83 unsigned int scnt;
84
85 wptr = 0;
86 wlen = 0;
87 scnt = debugifc_count_whitespace(buf,count);
88 consume_cnt += scnt; count -= scnt; buf += scnt;
89 if (!count) goto done;
90
91 scnt = debugifc_count_nonwhitespace(buf,count);
92 if (!scnt) goto done;
93 wptr = buf;
94 wlen = scnt;
95 consume_cnt += scnt; count -= scnt; buf += scnt;
96
97 done:
98 *wstrPtr = wptr;
99 *wlenPtr = wlen;
100 return consume_cnt;
101}
102
103
104static int debugifc_parse_unsigned_number(const char *buf,unsigned int count,
105 u32 *num_ptr)
106{
107 u32 result = 0;
108 u32 val;
109 int ch;
110 int radix = 10;
111 if ((count >= 2) && (buf[0] == '0') &&
112 ((buf[1] == 'x') || (buf[1] == 'X'))) {
113 radix = 16;
114 count -= 2;
115 buf += 2;
116 } else if ((count >= 1) && (buf[0] == '0')) {
117 radix = 8;
118 }
119
120 while (count--) {
121 ch = *buf++;
122 if ((ch >= '0') && (ch <= '9')) {
123 val = ch - '0';
124 } else if ((ch >= 'a') && (ch <= 'f')) {
125 val = ch - 'a' + 10;
126 } else if ((ch >= 'A') && (ch <= 'F')) {
127 val = ch - 'A' + 10;
128 } else {
129 return -EINVAL;
130 }
131 if (val >= radix) return -EINVAL;
132 result *= radix;
133 result += val;
134 }
135 *num_ptr = result;
136 return 0;
137}
138
139
140static int debugifc_match_keyword(const char *buf,unsigned int count,
141 const char *keyword)
142{
143 unsigned int kl;
144 if (!keyword) return 0;
145 kl = strlen(keyword);
146 if (kl != count) return 0;
147 return !memcmp(buf,keyword,kl);
148}
149
150
151static unsigned long debugifc_find_mask(const char *buf,unsigned int count)
152{
153 struct debugifc_mask_item *mip;
154 unsigned int idx;
155 for (idx = 0; idx < sizeof(mask_items)/sizeof(mask_items[0]); idx++) {
156 mip = mask_items + idx;
157 if (debugifc_match_keyword(buf,count,mip->name)) {
158 return mip->msk;
159 }
160 }
161 return 0;
162}
163
164
165static int debugifc_print_mask(char *buf,unsigned int sz,
166 unsigned long msk,unsigned long val)
167{
168 struct debugifc_mask_item *mip;
169 unsigned int idx;
170 int bcnt = 0;
171 int ccnt;
172 for (idx = 0; idx < sizeof(mask_items)/sizeof(mask_items[0]); idx++) {
173 mip = mask_items + idx;
174 if (!(mip->msk & msk)) continue;
175 ccnt = scnprintf(buf,sz,"%s%c%s",
176 (bcnt ? " " : ""),
177 ((mip->msk & val) ? '+' : '-'),
178 mip->name);
179 sz -= ccnt;
180 buf += ccnt;
181 bcnt += ccnt;
182 }
183 return bcnt;
184}
185
186static unsigned int debugifc_parse_subsys_mask(const char *buf,
187 unsigned int count,
188 unsigned long *mskPtr,
189 unsigned long *valPtr)
190{
191 const char *wptr;
192 unsigned int consume_cnt = 0;
193 unsigned int scnt;
194 unsigned int wlen;
195 int mode;
196 unsigned long m1,msk,val;
197
198 msk = 0;
199 val = 0;
200
201 while (count) {
202 scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
203 if (!scnt) break;
204 consume_cnt += scnt; count -= scnt; buf += scnt;
205 if (!wptr) break;
206
207 mode = 0;
208 if (wlen) switch (wptr[0]) {
209 case '+':
210 wptr++;
211 wlen--;
212 break;
213 case '-':
214 mode = 1;
215 wptr++;
216 wlen--;
217 break;
218 }
219 if (!wlen) continue;
220 m1 = debugifc_find_mask(wptr,wlen);
221 if (!m1) break;
222 msk |= m1;
223 if (!mode) val |= m1;
224 }
225 *mskPtr = msk;
226 *valPtr = val;
227 return consume_cnt;
228}
229
230
231int pvr2_debugifc_print_info(struct pvr2_hdw *hdw,char *buf,unsigned int acnt)
232{
233 int bcnt = 0;
234 int ccnt;
235 struct pvr2_hdw_debug_info dbg;
236
237 pvr2_hdw_get_debug_info(hdw,&dbg);
238
239 ccnt = scnprintf(buf,acnt,"big lock %s; ctl lock %s",
240 (dbg.big_lock_held ? "held" : "free"),
241 (dbg.ctl_lock_held ? "held" : "free"));
242 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
243 if (dbg.ctl_lock_held) {
244 ccnt = scnprintf(buf,acnt,"; cmd_state=%d cmd_code=%d"
245 " cmd_wlen=%d cmd_rlen=%d"
246 " wpend=%d rpend=%d tmout=%d rstatus=%d"
247 " wstatus=%d",
248 dbg.cmd_debug_state,dbg.cmd_code,
249 dbg.cmd_debug_write_len,
250 dbg.cmd_debug_read_len,
251 dbg.cmd_debug_write_pend,
252 dbg.cmd_debug_read_pend,
253 dbg.cmd_debug_timeout,
254 dbg.cmd_debug_rstatus,
255 dbg.cmd_debug_wstatus);
256 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
257 }
258 ccnt = scnprintf(buf,acnt,"\n");
259 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
260 ccnt = scnprintf(
261 buf,acnt,"driver flags: %s %s %s\n",
262 (dbg.flag_init_ok ? "initialized" : "uninitialized"),
263 (dbg.flag_ok ? "ok" : "fail"),
264 (dbg.flag_disconnected ? "disconnected" : "connected"));
265 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
266 ccnt = scnprintf(buf,acnt,"Subsystems enabled / configured: ");
267 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
268 ccnt = debugifc_print_mask(buf,acnt,dbg.subsys_flags,dbg.subsys_flags);
269 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
270 ccnt = scnprintf(buf,acnt,"\n");
271 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
272 ccnt = scnprintf(buf,acnt,"Subsystems disabled / unconfigured: ");
273 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
274 ccnt = debugifc_print_mask(buf,acnt,~dbg.subsys_flags,dbg.subsys_flags);
275 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
276 ccnt = scnprintf(buf,acnt,"\n");
277 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
278
279 ccnt = scnprintf(buf,acnt,"Attached I2C modules:\n");
280 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
281 ccnt = pvr2_i2c_report(hdw,buf,acnt);
282 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
283
284 return bcnt;
285}
286
287
288int pvr2_debugifc_print_status(struct pvr2_hdw *hdw,
289 char *buf,unsigned int acnt)
290{
291 int bcnt = 0;
292 int ccnt;
293 unsigned long msk;
294 int ret;
295 u32 gpio_dir,gpio_in,gpio_out;
296
297 ret = pvr2_hdw_is_hsm(hdw);
298 ccnt = scnprintf(buf,acnt,"USB link speed: %s\n",
299 (ret < 0 ? "FAIL" : (ret ? "high" : "full")));
300 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
301
302 gpio_dir = 0; gpio_in = 0; gpio_out = 0;
303 pvr2_hdw_gpio_get_dir(hdw,&gpio_dir);
304 pvr2_hdw_gpio_get_out(hdw,&gpio_out);
305 pvr2_hdw_gpio_get_in(hdw,&gpio_in);
306 ccnt = scnprintf(buf,acnt,"GPIO state: dir=0x%x in=0x%x out=0x%x\n",
307 gpio_dir,gpio_in,gpio_out);
308 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
309
310 ccnt = scnprintf(buf,acnt,"Streaming is %s\n",
311 pvr2_hdw_get_streaming(hdw) ? "on" : "off");
312 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
313
314 msk = pvr2_hdw_subsys_get(hdw);
315 ccnt = scnprintf(buf,acnt,"Subsystems enabled / configured: ");
316 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
317 ccnt = debugifc_print_mask(buf,acnt,msk,msk);
318 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
319 ccnt = scnprintf(buf,acnt,"\n");
320 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
321 ccnt = scnprintf(buf,acnt,"Subsystems disabled / unconfigured: ");
322 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
323 ccnt = debugifc_print_mask(buf,acnt,~msk,msk);
324 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
325 ccnt = scnprintf(buf,acnt,"\n");
326 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
327
328 msk = pvr2_hdw_subsys_stream_get(hdw);
329 ccnt = scnprintf(buf,acnt,"Subsystems stopped on stream shutdown: ");
330 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
331 ccnt = debugifc_print_mask(buf,acnt,msk,msk);
332 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
333 ccnt = scnprintf(buf,acnt,"\n");
334 bcnt += ccnt; acnt -= ccnt; buf += ccnt;
335
336 return bcnt;
337}
338
339
340int pvr2_debugifc_do1cmd(struct pvr2_hdw *hdw,const char *buf,
341 unsigned int count)
342{
343 const char *wptr;
344 unsigned int wlen;
345 unsigned int scnt;
346
347 scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
348 if (!scnt) return 0;
349 count -= scnt; buf += scnt;
350 if (!wptr) return 0;
351
352 pvr2_trace(PVR2_TRACE_DEBUGIFC,"debugifc cmd: \"%.*s\"",wlen,wptr);
353 if (debugifc_match_keyword(wptr,wlen,"reset")) {
354 scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
355 if (!scnt) return -EINVAL;
356 count -= scnt; buf += scnt;
357 if (!wptr) return -EINVAL;
358 if (debugifc_match_keyword(wptr,wlen,"cpu")) {
359 pvr2_hdw_cpureset_assert(hdw,!0);
360 pvr2_hdw_cpureset_assert(hdw,0);
361 return 0;
362 } else if (debugifc_match_keyword(wptr,wlen,"bus")) {
363 pvr2_hdw_device_reset(hdw);
364 } else if (debugifc_match_keyword(wptr,wlen,"soft")) {
365 return pvr2_hdw_cmd_powerup(hdw);
366 } else if (debugifc_match_keyword(wptr,wlen,"deep")) {
367 return pvr2_hdw_cmd_deep_reset(hdw);
368 } else if (debugifc_match_keyword(wptr,wlen,"firmware")) {
369 return pvr2_upload_firmware2(hdw);
370 } else if (debugifc_match_keyword(wptr,wlen,"decoder")) {
371 return pvr2_hdw_cmd_decoder_reset(hdw);
372 }
373 return -EINVAL;
374 } else if (debugifc_match_keyword(wptr,wlen,"subsys_flags")) {
375 unsigned long msk = 0;
376 unsigned long val = 0;
377 if (debugifc_parse_subsys_mask(buf,count,&msk,&val) != count) {
378 pvr2_trace(PVR2_TRACE_DEBUGIFC,
379 "debugifc parse error on subsys mask");
380 return -EINVAL;
381 }
382 pvr2_hdw_subsys_bit_chg(hdw,msk,val);
383 return 0;
384 } else if (debugifc_match_keyword(wptr,wlen,"stream_flags")) {
385 unsigned long msk = 0;
386 unsigned long val = 0;
387 if (debugifc_parse_subsys_mask(buf,count,&msk,&val) != count) {
388 pvr2_trace(PVR2_TRACE_DEBUGIFC,
389 "debugifc parse error on stream mask");
390 return -EINVAL;
391 }
392 pvr2_hdw_subsys_stream_bit_chg(hdw,msk,val);
393 return 0;
394 } else if (debugifc_match_keyword(wptr,wlen,"cpufw")) {
395 scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
396 if (!scnt) return -EINVAL;
397 count -= scnt; buf += scnt;
398 if (!wptr) return -EINVAL;
399 if (debugifc_match_keyword(wptr,wlen,"fetch")) {
400 pvr2_hdw_cpufw_set_enabled(hdw,!0);
401 return 0;
402 } else if (debugifc_match_keyword(wptr,wlen,"done")) {
403 pvr2_hdw_cpufw_set_enabled(hdw,0);
404 return 0;
405 } else {
406 return -EINVAL;
407 }
408 } else if (debugifc_match_keyword(wptr,wlen,"gpio")) {
409 int dir_fl = 0;
410 int ret;
411 u32 msk,val;
412 scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
413 if (!scnt) return -EINVAL;
414 count -= scnt; buf += scnt;
415 if (!wptr) return -EINVAL;
416 if (debugifc_match_keyword(wptr,wlen,"dir")) {
417 dir_fl = !0;
418 } else if (!debugifc_match_keyword(wptr,wlen,"out")) {
419 return -EINVAL;
420 }
421 scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
422 if (!scnt) return -EINVAL;
423 count -= scnt; buf += scnt;
424 if (!wptr) return -EINVAL;
425 ret = debugifc_parse_unsigned_number(wptr,wlen,&msk);
426 if (ret) return ret;
427 scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
428 if (wptr) {
429 ret = debugifc_parse_unsigned_number(wptr,wlen,&val);
430 if (ret) return ret;
431 } else {
432 val = msk;
433 msk = 0xffffffff;
434 }
435 if (dir_fl) {
436 ret = pvr2_hdw_gpio_chg_dir(hdw,msk,val);
437 } else {
438 ret = pvr2_hdw_gpio_chg_out(hdw,msk,val);
439 }
440 return ret;
441 }
442 pvr2_trace(PVR2_TRACE_DEBUGIFC,
443 "debugifc failed to recognize cmd: \"%.*s\"",wlen,wptr);
444 return -EINVAL;
445}
446
447
448int pvr2_debugifc_docmd(struct pvr2_hdw *hdw,const char *buf,
449 unsigned int count)
450{
451 unsigned int bcnt = 0;
452 int ret;
453
454 while (count) {
455 for (bcnt = 0; bcnt < count; bcnt++) {
456 if (buf[bcnt] == '\n') break;
457 }
458
459 ret = pvr2_debugifc_do1cmd(hdw,buf,bcnt);
460 if (ret < 0) return ret;
461 if (bcnt < count) bcnt++;
462 buf += bcnt;
463 count -= bcnt;
464 }
465
466 return 0;
467}
468
469
470/*
471 Stuff for Emacs to see, in order to encourage consistent editing style:
472 *** Local Variables: ***
473 *** mode: c ***
474 *** fill-column: 75 ***
475 *** tab-width: 8 ***
476 *** c-basic-offset: 8 ***
477 *** End: ***
478 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-debugifc.h b/drivers/media/video/pvrusb2/pvrusb2-debugifc.h
new file mode 100644
index 0000000000..990b02d35d
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-debugifc.h
@@ -0,0 +1,53 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#ifndef __PVRUSB2_DEBUGIFC_H
22#define __PVRUSB2_DEBUGIFC_H
23
24struct pvr2_hdw;
25
26/* Non-intrusively print some useful debugging info from inside the
27 driver. This should work even if the driver appears to be
28 wedged. */
29int pvr2_debugifc_print_info(struct pvr2_hdw *,
30 char *buf_ptr,unsigned int buf_size);
31
32/* Print general status of driver. This will also trigger a probe of
33 the USB link. Unlike print_info(), this one synchronizes with the
34 driver so the information should be self-consistent (but it will
35 hang if the driver is wedged). */
36int pvr2_debugifc_print_status(struct pvr2_hdw *,
37 char *buf_ptr,unsigned int buf_size);
38
39/* Parse a string command into a driver action. */
40int pvr2_debugifc_docmd(struct pvr2_hdw *,
41 const char *buf_ptr,unsigned int buf_size);
42
43#endif /* __PVRUSB2_DEBUGIFC_H */
44
45/*
46 Stuff for Emacs to see, in order to encourage consistent editing style:
47 *** Local Variables: ***
48 *** mode: c ***
49 *** fill-column: 75 ***
50 *** tab-width: 8 ***
51 *** c-basic-offset: 8 ***
52 *** End: ***
53 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-demod.c b/drivers/media/video/pvrusb2/pvrusb2-demod.c
new file mode 100644
index 0000000000..9686569a11
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-demod.c
@@ -0,0 +1,126 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include "pvrusb2.h"
24#include "pvrusb2-util.h"
25#include "pvrusb2-demod.h"
26#include "pvrusb2-hdw-internal.h"
27#include "pvrusb2-debug.h"
28#include <linux/videodev2.h>
29#include <media/tuner.h>
30#include <media/v4l2-common.h>
31
32
33struct pvr2_demod_handler {
34 struct pvr2_hdw *hdw;
35 struct pvr2_i2c_client *client;
36 struct pvr2_i2c_handler i2c_handler;
37 int type_update_fl;
38};
39
40
41static void set_config(struct pvr2_demod_handler *ctxt)
42{
43 struct pvr2_hdw *hdw = ctxt->hdw;
44 int cfg = 0;
45
46 switch (hdw->tuner_type) {
47 case TUNER_PHILIPS_FM1216ME_MK3:
48 case TUNER_PHILIPS_FM1236_MK3:
49 cfg = TDA9887_PORT1_ACTIVE|TDA9887_PORT2_ACTIVE;
50 break;
51 default:
52 break;
53 }
54 pvr2_trace(PVR2_TRACE_CHIPS,"i2c demod set_config(0x%x)",cfg);
55 pvr2_i2c_client_cmd(ctxt->client,TDA9887_SET_CONFIG,&cfg);
56 ctxt->type_update_fl = 0;
57}
58
59
60static int demod_check(struct pvr2_demod_handler *ctxt)
61{
62 struct pvr2_hdw *hdw = ctxt->hdw;
63 if (hdw->tuner_updated) ctxt->type_update_fl = !0;
64 return ctxt->type_update_fl != 0;
65}
66
67
68static void demod_update(struct pvr2_demod_handler *ctxt)
69{
70 if (ctxt->type_update_fl) set_config(ctxt);
71}
72
73
74static void demod_detach(struct pvr2_demod_handler *ctxt)
75{
76 ctxt->client->handler = 0;
77 kfree(ctxt);
78}
79
80
81static unsigned int demod_describe(struct pvr2_demod_handler *ctxt,char *buf,unsigned int cnt)
82{
83 return scnprintf(buf,cnt,"handler: pvrusb2-demod");
84}
85
86
87const static struct pvr2_i2c_handler_functions tuner_funcs = {
88 .detach = (void (*)(void *))demod_detach,
89 .check = (int (*)(void *))demod_check,
90 .update = (void (*)(void *))demod_update,
91 .describe = (unsigned int (*)(void *,char *,unsigned int))demod_describe,
92};
93
94
95int pvr2_i2c_demod_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp)
96{
97 struct pvr2_demod_handler *ctxt;
98 if (cp->handler) return 0;
99
100 ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
101 if (!ctxt) return 0;
102 memset(ctxt,0,sizeof(*ctxt));
103
104 ctxt->i2c_handler.func_data = ctxt;
105 ctxt->i2c_handler.func_table = &tuner_funcs;
106 ctxt->type_update_fl = !0;
107 ctxt->client = cp;
108 ctxt->hdw = hdw;
109 cp->handler = &ctxt->i2c_handler;
110 pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x tda9887 V4L2 handler set up",
111 cp->client->addr);
112 return !0;
113}
114
115
116
117
118/*
119 Stuff for Emacs to see, in order to encourage consistent editing style:
120 *** Local Variables: ***
121 *** mode: c ***
122 *** fill-column: 70 ***
123 *** tab-width: 8 ***
124 *** c-basic-offset: 8 ***
125 *** End: ***
126 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-demod.h b/drivers/media/video/pvrusb2/pvrusb2-demod.h
new file mode 100644
index 0000000000..4c4e40ffbf
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-demod.h
@@ -0,0 +1,38 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#ifndef __PVRUSB2_DEMOD_H
22#define __PVRUSB2_DEMOD_H
23
24#include "pvrusb2-i2c-core.h"
25
26int pvr2_i2c_demod_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
27
28#endif /* __PVRUSB2_DEMOD_H */
29
30/*
31 Stuff for Emacs to see, in order to encourage consistent editing style:
32 *** Local Variables: ***
33 *** mode: c ***
34 *** fill-column: 70 ***
35 *** tab-width: 8 ***
36 *** c-basic-offset: 8 ***
37 *** End: ***
38 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
new file mode 100644
index 0000000000..94d383ff98
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
@@ -0,0 +1,164 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include "pvrusb2-eeprom.h"
24#include "pvrusb2-hdw-internal.h"
25#include "pvrusb2-debug.h"
26
27#define trace_eeprom(...) pvr2_trace(PVR2_TRACE_EEPROM,__VA_ARGS__)
28
29
30
31/*
32
33 Read and analyze data in the eeprom. Use tveeprom to figure out
34 the packet structure, since this is another Hauppauge device and
35 internally it has a family resemblence to ivtv-type devices
36
37*/
38
39#include <media/tveeprom.h>
40
41/* We seem to only be interested in the last 128 bytes of the EEPROM */
42#define EEPROM_SIZE 128
43
44/* Grab EEPROM contents, needed for direct method. */
45static u8 *pvr2_eeprom_fetch(struct pvr2_hdw *hdw)
46{
47 struct i2c_msg msg[2];
48 u8 *eeprom;
49 u8 iadd[2];
50 u8 addr;
51 u16 eepromSize;
52 unsigned int offs;
53 int ret;
54 int mode16 = 0;
55 unsigned pcnt,tcnt;
56 eeprom = kmalloc(EEPROM_SIZE,GFP_KERNEL);
57 if (!eeprom) {
58 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
59 "Failed to allocate memory"
60 " required to read eeprom");
61 return 0;
62 }
63
64 trace_eeprom("Value for eeprom addr from controller was 0x%x",
65 hdw->eeprom_addr);
66 addr = hdw->eeprom_addr;
67 /* Seems that if the high bit is set, then the *real* eeprom
68 address is shifted right now bit position (noticed this in
69 newer PVR USB2 hardware) */
70 if (addr & 0x80) addr >>= 1;
71
72 /* FX2 documentation states that a 16bit-addressed eeprom is
73 expected if the I2C address is an odd number (yeah, this is
74 strange but it's what they do) */
75 mode16 = (addr & 1);
76 eepromSize = (mode16 ? 4096 : 256);
77 trace_eeprom("Examining %d byte eeprom at location 0x%x"
78 " using %d bit addressing",eepromSize,addr,
79 mode16 ? 16 : 8);
80
81 msg[0].addr = addr;
82 msg[0].flags = 0;
83 msg[0].len = mode16 ? 2 : 1;
84 msg[0].buf = iadd;
85 msg[1].addr = addr;
86 msg[1].flags = I2C_M_RD;
87
88 /* We have to do the actual eeprom data fetch ourselves, because
89 (1) we're only fetching part of the eeprom, and (2) if we were
90 getting the whole thing our I2C driver can't grab it in one
91 pass - which is what tveeprom is otherwise going to attempt */
92 memset(eeprom,0,EEPROM_SIZE);
93 for (tcnt = 0; tcnt < EEPROM_SIZE; tcnt += pcnt) {
94 pcnt = 16;
95 if (pcnt + tcnt > EEPROM_SIZE) pcnt = EEPROM_SIZE-tcnt;
96 offs = tcnt + (eepromSize - EEPROM_SIZE);
97 if (mode16) {
98 iadd[0] = offs >> 8;
99 iadd[1] = offs;
100 } else {
101 iadd[0] = offs;
102 }
103 msg[1].len = pcnt;
104 msg[1].buf = eeprom+tcnt;
105 if ((ret = i2c_transfer(
106 &hdw->i2c_adap,
107 msg,sizeof(msg)/sizeof(msg[0]))) != 2) {
108 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
109 "eeprom fetch set offs err=%d",ret);
110 kfree(eeprom);
111 return 0;
112 }
113 }
114 return eeprom;
115}
116
117
118/* Directly call eeprom analysis function within tveeprom. */
119int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
120{
121 u8 *eeprom;
122 struct tveeprom tvdata;
123
124 memset(&tvdata,0,sizeof(tvdata));
125
126 eeprom = pvr2_eeprom_fetch(hdw);
127 if (!eeprom) return -EINVAL;
128
129 {
130 struct i2c_client fake_client;
131 /* Newer version expects a useless client interface */
132 fake_client.addr = hdw->eeprom_addr;
133 fake_client.adapter = &hdw->i2c_adap;
134 tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom);
135 }
136
137 trace_eeprom("eeprom assumed v4l tveeprom module");
138 trace_eeprom("eeprom direct call results:");
139 trace_eeprom("has_radio=%d",tvdata.has_radio);
140 trace_eeprom("tuner_type=%d",tvdata.tuner_type);
141 trace_eeprom("tuner_formats=0x%x",tvdata.tuner_formats);
142 trace_eeprom("audio_processor=%d",tvdata.audio_processor);
143 trace_eeprom("model=%d",tvdata.model);
144 trace_eeprom("revision=%d",tvdata.revision);
145 trace_eeprom("serial_number=%d",tvdata.serial_number);
146 trace_eeprom("rev_str=%s",tvdata.rev_str);
147 hdw->tuner_type = tvdata.tuner_type;
148 hdw->serial_number = tvdata.serial_number;
149 hdw->std_mask_eeprom = tvdata.tuner_formats;
150
151 kfree(eeprom);
152
153 return 0;
154}
155
156/*
157 Stuff for Emacs to see, in order to encourage consistent editing style:
158 *** Local Variables: ***
159 *** mode: c ***
160 *** fill-column: 70 ***
161 *** tab-width: 8 ***
162 *** c-basic-offset: 8 ***
163 *** End: ***
164 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.h b/drivers/media/video/pvrusb2/pvrusb2-eeprom.h
new file mode 100644
index 0000000000..84242975de
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.h
@@ -0,0 +1,40 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef __PVRUSB2_EEPROM_H
24#define __PVRUSB2_EEPROM_H
25
26struct pvr2_hdw;
27
28int pvr2_eeprom_analyze(struct pvr2_hdw *);
29
30#endif /* __PVRUSB2_EEPROM_H */
31
32/*
33 Stuff for Emacs to see, in order to encourage consistent editing style:
34 *** Local Variables: ***
35 *** mode: c ***
36 *** fill-column: 70 ***
37 *** tab-width: 8 ***
38 *** c-basic-offset: 8 ***
39 *** End: ***
40 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-encoder.c b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
new file mode 100644
index 0000000000..2cc31695b4
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
@@ -0,0 +1,418 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/device.h> // for linux/firmware.h
24#include <linux/firmware.h>
25#include "pvrusb2-util.h"
26#include "pvrusb2-encoder.h"
27#include "pvrusb2-hdw-internal.h"
28#include "pvrusb2-debug.h"
29
30
31
32/* Firmware mailbox flags - definitions found from ivtv */
33#define IVTV_MBOX_FIRMWARE_DONE 0x00000004
34#define IVTV_MBOX_DRIVER_DONE 0x00000002
35#define IVTV_MBOX_DRIVER_BUSY 0x00000001
36
37
38static int pvr2_encoder_write_words(struct pvr2_hdw *hdw,
39 const u32 *data, unsigned int dlen)
40{
41 unsigned int idx;
42 int ret;
43 unsigned int offs = 0;
44 unsigned int chunkCnt;
45
46 /*
47
48 Format: First byte must be 0x01. Remaining 32 bit words are
49 spread out into chunks of 7 bytes each, little-endian ordered,
50 offset at zero within each 2 blank bytes following and a
51 single byte that is 0x44 plus the offset of the word. Repeat
52 request for additional words, with offset adjusted
53 accordingly.
54
55 */
56 while (dlen) {
57 chunkCnt = 8;
58 if (chunkCnt > dlen) chunkCnt = dlen;
59 memset(hdw->cmd_buffer,0,sizeof(hdw->cmd_buffer));
60 hdw->cmd_buffer[0] = 0x01;
61 for (idx = 0; idx < chunkCnt; idx++) {
62 hdw->cmd_buffer[1+(idx*7)+6] = 0x44 + idx + offs;
63 PVR2_DECOMPOSE_LE(hdw->cmd_buffer, 1+(idx*7),
64 data[idx]);
65 }
66 ret = pvr2_send_request(hdw,
67 hdw->cmd_buffer,1+(chunkCnt*7),
68 0,0);
69 if (ret) return ret;
70 data += chunkCnt;
71 dlen -= chunkCnt;
72 offs += chunkCnt;
73 }
74
75 return 0;
76}
77
78
79static int pvr2_encoder_read_words(struct pvr2_hdw *hdw,int statusFl,
80 u32 *data, unsigned int dlen)
81{
82 unsigned int idx;
83 int ret;
84 unsigned int offs = 0;
85 unsigned int chunkCnt;
86
87 /*
88
89 Format: First byte must be 0x02 (status check) or 0x28 (read
90 back block of 32 bit words). Next 6 bytes must be zero,
91 followed by a single byte of 0x44+offset for portion to be
92 read. Returned data is packed set of 32 bits words that were
93 read.
94
95 */
96
97 while (dlen) {
98 chunkCnt = 16;
99 if (chunkCnt > dlen) chunkCnt = dlen;
100 memset(hdw->cmd_buffer,0,sizeof(hdw->cmd_buffer));
101 hdw->cmd_buffer[0] = statusFl ? 0x02 : 0x28;
102 hdw->cmd_buffer[7] = 0x44 + offs;
103 ret = pvr2_send_request(hdw,
104 hdw->cmd_buffer,8,
105 hdw->cmd_buffer,chunkCnt * 4);
106 if (ret) return ret;
107
108 for (idx = 0; idx < chunkCnt; idx++) {
109 data[idx] = PVR2_COMPOSE_LE(hdw->cmd_buffer,idx*4);
110 }
111 data += chunkCnt;
112 dlen -= chunkCnt;
113 offs += chunkCnt;
114 }
115
116 return 0;
117}
118
119
120/* This prototype is set up to be compatible with the
121 cx2341x_mbox_func prototype in cx2341x.h, which should be in
122 kernels 2.6.18 or later. We do this so that we can enable
123 cx2341x.ko to write to our encoder (by handing it a pointer to this
124 function). For earlier kernels this doesn't really matter. */
125static int pvr2_encoder_cmd(void *ctxt,
126 int cmd,
127 int arg_cnt_send,
128 int arg_cnt_recv,
129 u32 *argp)
130{
131 unsigned int poll_count;
132 int ret = 0;
133 unsigned int idx;
134 /* These sizes look to be limited by the FX2 firmware implementation */
135 u32 wrData[16];
136 u32 rdData[16];
137 struct pvr2_hdw *hdw = (struct pvr2_hdw *)ctxt;
138
139
140 /*
141
142 The encoder seems to speak entirely using blocks 32 bit words.
143 In ivtv driver terms, this is a mailbox which we populate with
144 data and watch what the hardware does with it. The first word
145 is a set of flags used to control the transaction, the second
146 word is the command to execute, the third byte is zero (ivtv
147 driver suggests that this is some kind of return value), and
148 the fourth byte is a specified timeout (windows driver always
149 uses 0x00060000 except for one case when it is zero). All
150 successive words are the argument words for the command.
151
152 First, write out the entire set of words, with the first word
153 being zero.
154
155 Next, write out just the first word again, but set it to
156 IVTV_MBOX_DRIVER_DONE | IVTV_DRIVER_BUSY this time (which
157 probably means "go").
158
159 Next, read back 16 words as status. Check the first word,
160 which should have IVTV_MBOX_FIRMWARE_DONE set. If however
161 that bit is not set, then the command isn't done so repeat the
162 read.
163
164 Next, read back 32 words and compare with the original
165 arugments. Hopefully they will match.
166
167 Finally, write out just the first word again, but set it to
168 0x0 this time (which probably means "idle").
169
170 */
171
172 if (arg_cnt_send > (sizeof(wrData)/sizeof(wrData[0]))-4) {
173 pvr2_trace(
174 PVR2_TRACE_ERROR_LEGS,
175 "Failed to write cx23416 command"
176 " - too many input arguments"
177 " (was given %u limit %u)",
178 arg_cnt_send,
179 (unsigned int)(sizeof(wrData)/sizeof(wrData[0])) - 4);
180 return -EINVAL;
181 }
182
183 if (arg_cnt_recv > (sizeof(rdData)/sizeof(rdData[0]))-4) {
184 pvr2_trace(
185 PVR2_TRACE_ERROR_LEGS,
186 "Failed to write cx23416 command"
187 " - too many return arguments"
188 " (was given %u limit %u)",
189 arg_cnt_recv,
190 (unsigned int)(sizeof(rdData)/sizeof(rdData[0])) - 4);
191 return -EINVAL;
192 }
193
194
195 LOCK_TAKE(hdw->ctl_lock); do {
196
197 wrData[0] = 0;
198 wrData[1] = cmd;
199 wrData[2] = 0;
200 wrData[3] = 0x00060000;
201 for (idx = 0; idx < arg_cnt_send; idx++) {
202 wrData[idx+4] = argp[idx];
203 }
204 for (; idx < (sizeof(wrData)/sizeof(wrData[0]))-4; idx++) {
205 wrData[idx+4] = 0;
206 }
207
208 ret = pvr2_encoder_write_words(hdw,wrData,idx);
209 if (ret) break;
210 wrData[0] = IVTV_MBOX_DRIVER_DONE|IVTV_MBOX_DRIVER_BUSY;
211 ret = pvr2_encoder_write_words(hdw,wrData,1);
212 if (ret) break;
213 poll_count = 0;
214 while (1) {
215 if (poll_count < 10000000) poll_count++;
216 ret = pvr2_encoder_read_words(hdw,!0,rdData,1);
217 if (ret) break;
218 if (rdData[0] & IVTV_MBOX_FIRMWARE_DONE) {
219 break;
220 }
221 if (poll_count == 100) {
222 pvr2_trace(
223 PVR2_TRACE_ERROR_LEGS,
224 "***WARNING*** device's encoder"
225 " appears to be stuck"
226 " (status=0%08x)",rdData[0]);
227 pvr2_trace(
228 PVR2_TRACE_ERROR_LEGS,
229 "Encoder command: 0x%02x",cmd);
230 for (idx = 4; idx < arg_cnt_send; idx++) {
231 pvr2_trace(
232 PVR2_TRACE_ERROR_LEGS,
233 "Encoder arg%d: 0x%08x",
234 idx-3,wrData[idx]);
235 }
236 pvr2_trace(
237 PVR2_TRACE_ERROR_LEGS,
238 "Giving up waiting."
239 " It is likely that"
240 " this is a bad idea...");
241 ret = -EBUSY;
242 break;
243 }
244 }
245 if (ret) break;
246 wrData[0] = 0x7;
247 ret = pvr2_encoder_read_words(
248 hdw,0,rdData,
249 sizeof(rdData)/sizeof(rdData[0]));
250 if (ret) break;
251 for (idx = 0; idx < arg_cnt_recv; idx++) {
252 argp[idx] = rdData[idx+4];
253 }
254
255 wrData[0] = 0x0;
256 ret = pvr2_encoder_write_words(hdw,wrData,1);
257 if (ret) break;
258
259 } while(0); LOCK_GIVE(hdw->ctl_lock);
260
261 return ret;
262}
263
264
265static int pvr2_encoder_vcmd(struct pvr2_hdw *hdw, int cmd,
266 int args, ...)
267{
268 va_list vl;
269 unsigned int idx;
270 u32 data[12];
271
272 if (args > sizeof(data)/sizeof(data[0])) {
273 pvr2_trace(
274 PVR2_TRACE_ERROR_LEGS,
275 "Failed to write cx23416 command"
276 " - too many arguments"
277 " (was given %u limit %u)",
278 args,(unsigned int)(sizeof(data)/sizeof(data[0])));
279 return -EINVAL;
280 }
281
282 va_start(vl, args);
283 for (idx = 0; idx < args; idx++) {
284 data[idx] = va_arg(vl, u32);
285 }
286 va_end(vl);
287
288 return pvr2_encoder_cmd(hdw,cmd,args,0,data);
289}
290
291int pvr2_encoder_configure(struct pvr2_hdw *hdw)
292{
293 int ret;
294 pvr2_trace(PVR2_TRACE_ENCODER,"pvr2_encoder_configure"
295 " (cx2341x module)");
296 hdw->enc_ctl_state.port = CX2341X_PORT_STREAMING;
297 hdw->enc_ctl_state.width = hdw->res_hor_val;
298 hdw->enc_ctl_state.height = hdw->res_ver_val;
299 hdw->enc_ctl_state.is_50hz = ((hdw->std_mask_cur &
300 (V4L2_STD_NTSC|V4L2_STD_PAL_M)) ?
301 0 : 1);
302
303 ret = 0;
304
305 if (!ret) ret = pvr2_encoder_vcmd(
306 hdw,CX2341X_ENC_SET_NUM_VSYNC_LINES, 2,
307 0xf0, 0xf0);
308
309 /* setup firmware to notify us about some events (don't know why...) */
310 if (!ret) ret = pvr2_encoder_vcmd(
311 hdw,CX2341X_ENC_SET_EVENT_NOTIFICATION, 4,
312 0, 0, 0x10000000, 0xffffffff);
313
314 if (!ret) ret = pvr2_encoder_vcmd(
315 hdw,CX2341X_ENC_SET_VBI_LINE, 5,
316 0xffffffff,0,0,0,0);
317
318 if (ret) {
319 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
320 "Failed to configure cx32416");
321 return ret;
322 }
323
324 ret = cx2341x_update(hdw,pvr2_encoder_cmd,
325 (hdw->enc_cur_valid ? &hdw->enc_cur_state : 0),
326 &hdw->enc_ctl_state);
327 if (ret) {
328 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
329 "Error from cx2341x module code=%d",ret);
330 return ret;
331 }
332
333 ret = 0;
334
335 if (!ret) ret = pvr2_encoder_vcmd(
336 hdw, CX2341X_ENC_INITIALIZE_INPUT, 0);
337
338 if (ret) {
339 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
340 "Failed to initialize cx32416 video input");
341 return ret;
342 }
343
344 hdw->subsys_enabled_mask |= (1<<PVR2_SUBSYS_B_ENC_CFG);
345 memcpy(&hdw->enc_cur_state,&hdw->enc_ctl_state,
346 sizeof(struct cx2341x_mpeg_params));
347 hdw->enc_cur_valid = !0;
348 return 0;
349}
350
351
352int pvr2_encoder_start(struct pvr2_hdw *hdw)
353{
354 int status;
355
356 /* unmask some interrupts */
357 pvr2_write_register(hdw, 0x0048, 0xbfffffff);
358
359 /* change some GPIO data */
360 pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000481);
361 pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000000);
362
363 if (hdw->config == pvr2_config_vbi) {
364 status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2,
365 0x01,0x14);
366 } else if (hdw->config == pvr2_config_mpeg) {
367 status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2,
368 0,0x13);
369 } else {
370 status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2,
371 0,0x13);
372 }
373 if (!status) {
374 hdw->subsys_enabled_mask |= (1<<PVR2_SUBSYS_B_ENC_RUN);
375 }
376 return status;
377}
378
379int pvr2_encoder_stop(struct pvr2_hdw *hdw)
380{
381 int status;
382
383 /* mask all interrupts */
384 pvr2_write_register(hdw, 0x0048, 0xffffffff);
385
386 if (hdw->config == pvr2_config_vbi) {
387 status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3,
388 0x01,0x01,0x14);
389 } else if (hdw->config == pvr2_config_mpeg) {
390 status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3,
391 0x01,0,0x13);
392 } else {
393 status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3,
394 0x01,0,0x13);
395 }
396
397 /* change some GPIO data */
398 /* Note: Bit d7 of dir appears to control the LED. So we shut it
399 off here. */
400 pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000401);
401 pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000000);
402
403 if (!status) {
404 hdw->subsys_enabled_mask &= ~(1<<PVR2_SUBSYS_B_ENC_RUN);
405 }
406 return status;
407}
408
409
410/*
411 Stuff for Emacs to see, in order to encourage consistent editing style:
412 *** Local Variables: ***
413 *** mode: c ***
414 *** fill-column: 70 ***
415 *** tab-width: 8 ***
416 *** c-basic-offset: 8 ***
417 *** End: ***
418 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-encoder.h b/drivers/media/video/pvrusb2/pvrusb2-encoder.h
new file mode 100644
index 0000000000..01b5a0b89c
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-encoder.h
@@ -0,0 +1,42 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef __PVRUSB2_ENCODER_H
24#define __PVRUSB2_ENCODER_H
25
26struct pvr2_hdw;
27
28int pvr2_encoder_configure(struct pvr2_hdw *);
29int pvr2_encoder_start(struct pvr2_hdw *);
30int pvr2_encoder_stop(struct pvr2_hdw *);
31
32#endif /* __PVRUSB2_ENCODER_H */
33
34/*
35 Stuff for Emacs to see, in order to encourage consistent editing style:
36 *** Local Variables: ***
37 *** mode: c ***
38 *** fill-column: 70 ***
39 *** tab-width: 8 ***
40 *** c-basic-offset: 8 ***
41 *** End: ***
42 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
new file mode 100644
index 0000000000..ba2afbfe32
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
@@ -0,0 +1,384 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#ifndef __PVRUSB2_HDW_INTERNAL_H
22#define __PVRUSB2_HDW_INTERNAL_H
23
24/*
25
26 This header sets up all the internal structures and definitions needed to
27 track and coordinate the driver's interaction with the hardware. ONLY
28 source files which actually implement part of that whole circus should be
29 including this header. Higher levels, like the external layers to the
30 various public APIs (V4L, sysfs, etc) should NOT ever include this
31 private, internal header. This means that pvrusb2-hdw, pvrusb2-encoder,
32 etc will include this, but pvrusb2-v4l should not.
33
34*/
35
36#include <linux/config.h>
37#include <linux/videodev2.h>
38#include <linux/i2c.h>
39#include <linux/mutex.h>
40#include "pvrusb2-hdw.h"
41#include "pvrusb2-io.h"
42#include <media/cx2341x.h>
43
44/* Legal values for the SRATE state variable */
45#define PVR2_CVAL_SRATE_48 0
46#define PVR2_CVAL_SRATE_44_1 1
47
48/* Legal values for the AUDIOBITRATE state variable */
49#define PVR2_CVAL_AUDIOBITRATE_384 0
50#define PVR2_CVAL_AUDIOBITRATE_320 1
51#define PVR2_CVAL_AUDIOBITRATE_256 2
52#define PVR2_CVAL_AUDIOBITRATE_224 3
53#define PVR2_CVAL_AUDIOBITRATE_192 4
54#define PVR2_CVAL_AUDIOBITRATE_160 5
55#define PVR2_CVAL_AUDIOBITRATE_128 6
56#define PVR2_CVAL_AUDIOBITRATE_112 7
57#define PVR2_CVAL_AUDIOBITRATE_96 8
58#define PVR2_CVAL_AUDIOBITRATE_80 9
59#define PVR2_CVAL_AUDIOBITRATE_64 10
60#define PVR2_CVAL_AUDIOBITRATE_56 11
61#define PVR2_CVAL_AUDIOBITRATE_48 12
62#define PVR2_CVAL_AUDIOBITRATE_32 13
63#define PVR2_CVAL_AUDIOBITRATE_VBR 14
64
65/* Legal values for the AUDIOEMPHASIS state variable */
66#define PVR2_CVAL_AUDIOEMPHASIS_NONE 0
67#define PVR2_CVAL_AUDIOEMPHASIS_50_15 1
68#define PVR2_CVAL_AUDIOEMPHASIS_CCITT 2
69
70/* Legal values for PVR2_CID_HSM */
71#define PVR2_CVAL_HSM_FAIL 0
72#define PVR2_CVAL_HSM_FULL 1
73#define PVR2_CVAL_HSM_HIGH 2
74
75#define PVR2_VID_ENDPOINT 0x84
76#define PVR2_UNK_ENDPOINT 0x86 /* maybe raw yuv ? */
77#define PVR2_VBI_ENDPOINT 0x88
78
79#define PVR2_CTL_BUFFSIZE 64
80
81#define FREQTABLE_SIZE 500
82
83#define LOCK_TAKE(x) do { mutex_lock(&x##_mutex); x##_held = !0; } while (0)
84#define LOCK_GIVE(x) do { x##_held = 0; mutex_unlock(&x##_mutex); } while (0)
85
86struct pvr2_decoder;
87
88typedef int (*pvr2_ctlf_is_dirty)(struct pvr2_ctrl *);
89typedef void (*pvr2_ctlf_clear_dirty)(struct pvr2_ctrl *);
90typedef int (*pvr2_ctlf_get_value)(struct pvr2_ctrl *,int *);
91typedef int (*pvr2_ctlf_set_value)(struct pvr2_ctrl *,int msk,int val);
92typedef int (*pvr2_ctlf_val_to_sym)(struct pvr2_ctrl *,int msk,int val,
93 char *,unsigned int,unsigned int *);
94typedef int (*pvr2_ctlf_sym_to_val)(struct pvr2_ctrl *,
95 const char *,unsigned int,
96 int *mskp,int *valp);
97typedef unsigned int (*pvr2_ctlf_get_v4lflags)(struct pvr2_ctrl *);
98
99/* This structure describes a specific control. A table of these is set up
100 in pvrusb2-hdw.c. */
101struct pvr2_ctl_info {
102 /* Control's name suitable for use as an identifier */
103 const char *name;
104
105 /* Short description of control */
106 const char *desc;
107
108 /* Control's implementation */
109 pvr2_ctlf_get_value get_value; /* Get its value */
110 pvr2_ctlf_set_value set_value; /* Set its value */
111 pvr2_ctlf_val_to_sym val_to_sym; /* Custom convert value->symbol */
112 pvr2_ctlf_sym_to_val sym_to_val; /* Custom convert symbol->value */
113 pvr2_ctlf_is_dirty is_dirty; /* Return true if dirty */
114 pvr2_ctlf_clear_dirty clear_dirty; /* Clear dirty state */
115 pvr2_ctlf_get_v4lflags get_v4lflags;/* Retrieve v4l flags */
116
117 /* Control's type (int, enum, bitmask) */
118 enum pvr2_ctl_type type;
119
120 /* Associated V4L control ID, if any */
121 int v4l_id;
122
123 /* Associated driver internal ID, if any */
124 int internal_id;
125
126 /* Don't implicitly initialize this control's value */
127 int skip_init;
128
129 /* Starting value for this control */
130 int default_value;
131
132 /* Type-specific control information */
133 union {
134 struct { /* Integer control */
135 long min_value; /* lower limit */
136 long max_value; /* upper limit */
137 } type_int;
138 struct { /* enumerated control */
139 unsigned int count; /* enum value count */
140 const char **value_names; /* symbol names */
141 } type_enum;
142 struct { /* bitmask control */
143 unsigned int valid_bits; /* bits in use */
144 const char **bit_names; /* symbol name/bit */
145 } type_bitmask;
146 } def;
147};
148
149
150/* Same as pvr2_ctl_info, but includes storage for the control description */
151#define PVR2_CTLD_INFO_DESC_SIZE 32
152struct pvr2_ctld_info {
153 struct pvr2_ctl_info info;
154 char desc[PVR2_CTLD_INFO_DESC_SIZE];
155};
156
157struct pvr2_ctrl {
158 const struct pvr2_ctl_info *info;
159 struct pvr2_hdw *hdw;
160};
161
162
163struct pvr2_audio_stat {
164 void *ctxt;
165 void (*detach)(void *);
166 int (*status)(void *);
167};
168
169struct pvr2_decoder_ctrl {
170 void *ctxt;
171 void (*detach)(void *);
172 void (*enable)(void *,int);
173 int (*tuned)(void *);
174 void (*force_reset)(void *);
175};
176
177#define PVR2_I2C_PEND_DETECT 0x01 /* Need to detect a client type */
178#define PVR2_I2C_PEND_CLIENT 0x02 /* Client needs a specific update */
179#define PVR2_I2C_PEND_REFRESH 0x04 /* Client has specific pending bits */
180#define PVR2_I2C_PEND_STALE 0x08 /* Broadcast pending bits */
181
182#define PVR2_I2C_PEND_ALL (PVR2_I2C_PEND_DETECT |\
183 PVR2_I2C_PEND_CLIENT |\
184 PVR2_I2C_PEND_REFRESH |\
185 PVR2_I2C_PEND_STALE)
186
187/* Disposition of firmware1 loading situation */
188#define FW1_STATE_UNKNOWN 0
189#define FW1_STATE_MISSING 1
190#define FW1_STATE_FAILED 2
191#define FW1_STATE_RELOAD 3
192#define FW1_STATE_OK 4
193
194/* Known major hardware variants, keyed from device ID */
195#define PVR2_HDW_TYPE_29XXX 0
196#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
197#define PVR2_HDW_TYPE_24XXX 1
198#endif
199
200typedef int (*pvr2_i2c_func)(struct pvr2_hdw *,u8,u8 *,u16,u8 *, u16);
201#define PVR2_I2C_FUNC_CNT 128
202
203/* This structure contains all state data directly needed to
204 manipulate the hardware (as opposed to complying with a kernel
205 interface) */
206struct pvr2_hdw {
207 /* Underlying USB device handle */
208 struct usb_device *usb_dev;
209 struct usb_interface *usb_intf;
210
211 /* Device type, one of PVR2_HDW_TYPE_xxxxx */
212 unsigned int hdw_type;
213
214 /* Video spigot */
215 struct pvr2_stream *vid_stream;
216
217 /* Mutex for all hardware state control */
218 struct mutex big_lock_mutex;
219 int big_lock_held; /* For debugging */
220
221 void (*poll_trigger_func)(void *);
222 void *poll_trigger_data;
223
224 char name[32];
225
226 /* I2C stuff */
227 struct i2c_adapter i2c_adap;
228 struct i2c_algorithm i2c_algo;
229 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
230 int i2c_cx25840_hack_state;
231 int i2c_linked;
232 unsigned int i2c_pend_types; /* Which types of update are needed */
233 unsigned long i2c_pend_mask; /* Change bits we need to scan */
234 unsigned long i2c_stale_mask; /* Pending broadcast change bits */
235 unsigned long i2c_active_mask; /* All change bits currently in use */
236 struct list_head i2c_clients;
237 struct mutex i2c_list_lock;
238
239 /* Frequency table */
240 unsigned int freqTable[FREQTABLE_SIZE];
241 unsigned int freqProgSlot;
242 unsigned int freqSlot;
243
244 /* Stuff for handling low level control interaction with device */
245 struct mutex ctl_lock_mutex;
246 int ctl_lock_held; /* For debugging */
247 struct urb *ctl_write_urb;
248 struct urb *ctl_read_urb;
249 unsigned char *ctl_write_buffer;
250 unsigned char *ctl_read_buffer;
251 volatile int ctl_write_pend_flag;
252 volatile int ctl_read_pend_flag;
253 volatile int ctl_timeout_flag;
254 struct completion ctl_done;
255 unsigned char cmd_buffer[PVR2_CTL_BUFFSIZE];
256 int cmd_debug_state; // Low level command debugging info
257 unsigned char cmd_debug_code; //
258 unsigned int cmd_debug_write_len; //
259 unsigned int cmd_debug_read_len; //
260
261 int flag_ok; // device in known good state
262 int flag_disconnected; // flag_ok == 0 due to disconnect
263 int flag_init_ok; // true if structure is fully initialized
264 int flag_streaming_enabled; // true if streaming should be on
265 int fw1_state; // current situation with fw1
266
267 int flag_decoder_is_tuned;
268
269 struct pvr2_decoder_ctrl *decoder_ctrl;
270
271 // CPU firmware info (used to help find / save firmware data)
272 char *fw_buffer;
273 unsigned int fw_size;
274
275 // Which subsystem pieces have been enabled / configured
276 unsigned long subsys_enabled_mask;
277
278 // Which subsystems are manipulated to enable streaming
279 unsigned long subsys_stream_mask;
280
281 // True if there is a request to trigger logging of state in each
282 // module.
283 int log_requested;
284
285 /* Tuner / frequency control stuff */
286 unsigned int tuner_type;
287 int tuner_updated;
288 unsigned int freqVal;
289 int freqDirty;
290
291 /* Video standard handling */
292 v4l2_std_id std_mask_eeprom; // Hardware supported selections
293 v4l2_std_id std_mask_avail; // Which standards we may select from
294 v4l2_std_id std_mask_cur; // Currently selected standard(s)
295 unsigned int std_enum_cnt; // # of enumerated standards
296 int std_enum_cur; // selected standard enumeration value
297 int std_dirty; // True if std_mask_cur has changed
298 struct pvr2_ctl_info std_info_enum;
299 struct pvr2_ctl_info std_info_avail;
300 struct pvr2_ctl_info std_info_cur;
301 struct v4l2_standard *std_defs;
302 const char **std_enum_names;
303
304 // Generated string names, one per actual V4L2 standard
305 const char *std_mask_ptrs[32];
306 char std_mask_names[32][10];
307
308 int unit_number; /* ID for driver instance */
309 unsigned long serial_number; /* ID for hardware itself */
310
311 /* Minor number used by v4l logic (yes, this is a hack, as there should
312 be no v4l junk here). Probably a better way to do this. */
313 int v4l_minor_number;
314
315 /* Location of eeprom or a negative number if none */
316 int eeprom_addr;
317
318 enum pvr2_config config;
319
320 /* Information about what audio signal we're hearing */
321 int flag_stereo;
322 int flag_bilingual;
323 struct pvr2_audio_stat *audio_stat;
324
325 /* Control state needed for cx2341x module */
326 struct cx2341x_mpeg_params enc_cur_state;
327 struct cx2341x_mpeg_params enc_ctl_state;
328 /* True if an encoder attribute has changed */
329 int enc_stale;
330 /* True if enc_cur_state is valid */
331 int enc_cur_valid;
332
333 /* Control state */
334#define VCREATE_DATA(lab) int lab##_val; int lab##_dirty
335 VCREATE_DATA(brightness);
336 VCREATE_DATA(contrast);
337 VCREATE_DATA(saturation);
338 VCREATE_DATA(hue);
339 VCREATE_DATA(volume);
340 VCREATE_DATA(balance);
341 VCREATE_DATA(bass);
342 VCREATE_DATA(treble);
343 VCREATE_DATA(mute);
344 VCREATE_DATA(input);
345 VCREATE_DATA(audiomode);
346 VCREATE_DATA(res_hor);
347 VCREATE_DATA(res_ver);
348 VCREATE_DATA(srate);
349#undef VCREATE_DATA
350
351 struct pvr2_ctld_info *mpeg_ctrl_info;
352
353 struct pvr2_ctrl *controls;
354 unsigned int control_cnt;
355};
356
357int pvr2_hdw_commit_ctl_internal(struct pvr2_hdw *hdw);
358
359unsigned int pvr2_hdw_get_signal_status_internal(struct pvr2_hdw *);
360
361void pvr2_hdw_subsys_bit_chg_no_lock(struct pvr2_hdw *hdw,
362 unsigned long msk,unsigned long val);
363void pvr2_hdw_subsys_stream_bit_chg_no_lock(struct pvr2_hdw *hdw,
364 unsigned long msk,
365 unsigned long val);
366
367void pvr2_hdw_internal_find_stdenum(struct pvr2_hdw *hdw);
368void pvr2_hdw_internal_set_std_avail(struct pvr2_hdw *hdw);
369
370int pvr2_i2c_basic_op(struct pvr2_hdw *,u8 i2c_addr,
371 u8 *wdata,u16 wlen,
372 u8 *rdata,u16 rlen);
373
374#endif /* __PVRUSB2_HDW_INTERNAL_H */
375
376/*
377 Stuff for Emacs to see, in order to encourage consistent editing style:
378 *** Local Variables: ***
379 *** mode: c ***
380 *** fill-column: 75 ***
381 *** tab-width: 8 ***
382 *** c-basic-offset: 8 ***
383 *** End: ***
384 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
new file mode 100644
index 0000000000..643c471375
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -0,0 +1,3120 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25#include <linux/firmware.h>
26#include <linux/videodev2.h>
27#include <asm/semaphore.h>
28#include "pvrusb2.h"
29#include "pvrusb2-std.h"
30#include "pvrusb2-util.h"
31#include "pvrusb2-hdw.h"
32#include "pvrusb2-i2c-core.h"
33#include "pvrusb2-tuner.h"
34#include "pvrusb2-eeprom.h"
35#include "pvrusb2-hdw-internal.h"
36#include "pvrusb2-encoder.h"
37#include "pvrusb2-debug.h"
38
39struct usb_device_id pvr2_device_table[] = {
40 [PVR2_HDW_TYPE_29XXX] = { USB_DEVICE(0x2040, 0x2900) },
41#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
42 [PVR2_HDW_TYPE_24XXX] = { USB_DEVICE(0x2040, 0x2400) },
43#endif
44 { }
45};
46
47MODULE_DEVICE_TABLE(usb, pvr2_device_table);
48
49static const char *pvr2_device_names[] = {
50 [PVR2_HDW_TYPE_29XXX] = "WinTV PVR USB2 Model Category 29xxxx",
51#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
52 [PVR2_HDW_TYPE_24XXX] = "WinTV PVR USB2 Model Category 24xxxx",
53#endif
54};
55
56struct pvr2_string_table {
57 const char **lst;
58 unsigned int cnt;
59};
60
61#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
62// Names of other client modules to request for 24xxx model hardware
63static const char *pvr2_client_24xxx[] = {
64 "cx25840",
65 "tuner",
66 "tda9887",
67 "wm8775",
68};
69#endif
70
71// Names of other client modules to request for 29xxx model hardware
72static const char *pvr2_client_29xxx[] = {
73 "msp3400",
74 "saa7115",
75 "tuner",
76 "tda9887",
77};
78
79static struct pvr2_string_table pvr2_client_lists[] = {
80 [PVR2_HDW_TYPE_29XXX] = {
81 pvr2_client_29xxx,
82 sizeof(pvr2_client_29xxx)/sizeof(pvr2_client_29xxx[0]),
83 },
84#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
85 [PVR2_HDW_TYPE_24XXX] = {
86 pvr2_client_24xxx,
87 sizeof(pvr2_client_24xxx)/sizeof(pvr2_client_24xxx[0]),
88 },
89#endif
90};
91
92static struct pvr2_hdw *unit_pointers[PVR_NUM] = {[ 0 ... PVR_NUM-1 ] = 0};
93DECLARE_MUTEX(pvr2_unit_sem);
94
95static int ctlchg = 0;
96static int initusbreset = 1;
97static int procreload = 0;
98static int tuner[PVR_NUM] = { [0 ... PVR_NUM-1] = -1 };
99static int tolerance[PVR_NUM] = { [0 ... PVR_NUM-1] = 0 };
100static int video_std[PVR_NUM] = { [0 ... PVR_NUM-1] = 0 };
101static int init_pause_msec = 0;
102
103module_param(ctlchg, int, S_IRUGO|S_IWUSR);
104MODULE_PARM_DESC(ctlchg, "0=optimize ctl change 1=always accept new ctl value");
105module_param(init_pause_msec, int, S_IRUGO|S_IWUSR);
106MODULE_PARM_DESC(init_pause_msec, "hardware initialization settling delay");
107module_param(initusbreset, int, S_IRUGO|S_IWUSR);
108MODULE_PARM_DESC(initusbreset, "Do USB reset device on probe");
109module_param(procreload, int, S_IRUGO|S_IWUSR);
110MODULE_PARM_DESC(procreload,
111 "Attempt init failure recovery with firmware reload");
112module_param_array(tuner, int, NULL, 0444);
113MODULE_PARM_DESC(tuner,"specify installed tuner type");
114module_param_array(video_std, int, NULL, 0444);
115MODULE_PARM_DESC(video_std,"specify initial video standard");
116module_param_array(tolerance, int, NULL, 0444);
117MODULE_PARM_DESC(tolerance,"specify stream error tolerance");
118
119#define PVR2_CTL_WRITE_ENDPOINT 0x01
120#define PVR2_CTL_READ_ENDPOINT 0x81
121
122#define PVR2_GPIO_IN 0x9008
123#define PVR2_GPIO_OUT 0x900c
124#define PVR2_GPIO_DIR 0x9020
125
126#define trace_firmware(...) pvr2_trace(PVR2_TRACE_FIRMWARE,__VA_ARGS__)
127
128#define PVR2_FIRMWARE_ENDPOINT 0x02
129
130/* size of a firmware chunk */
131#define FIRMWARE_CHUNK_SIZE 0x2000
132
133/* Define the list of additional controls we'll dynamically construct based
134 on query of the cx2341x module. */
135struct pvr2_mpeg_ids {
136 const char *strid;
137 int id;
138};
139static const struct pvr2_mpeg_ids mpeg_ids[] = {
140 {
141 .strid = "audio_layer",
142 .id = V4L2_CID_MPEG_AUDIO_ENCODING,
143 },{
144 .strid = "audio_bitrate",
145 .id = V4L2_CID_MPEG_AUDIO_L2_BITRATE,
146 },{
147 /* Already using audio_mode elsewhere :-( */
148 .strid = "mpeg_audio_mode",
149 .id = V4L2_CID_MPEG_AUDIO_MODE,
150 },{
151 .strid = "mpeg_audio_mode_extension",
152 .id = V4L2_CID_MPEG_AUDIO_MODE_EXTENSION,
153 },{
154 .strid = "audio_emphasis",
155 .id = V4L2_CID_MPEG_AUDIO_EMPHASIS,
156 },{
157 .strid = "audio_crc",
158 .id = V4L2_CID_MPEG_AUDIO_CRC,
159 },{
160 .strid = "video_aspect",
161 .id = V4L2_CID_MPEG_VIDEO_ASPECT,
162 },{
163 .strid = "video_b_frames",
164 .id = V4L2_CID_MPEG_VIDEO_B_FRAMES,
165 },{
166 .strid = "video_gop_size",
167 .id = V4L2_CID_MPEG_VIDEO_GOP_SIZE,
168 },{
169 .strid = "video_gop_closure",
170 .id = V4L2_CID_MPEG_VIDEO_GOP_CLOSURE,
171 },{
172 .strid = "video_pulldown",
173 .id = V4L2_CID_MPEG_VIDEO_PULLDOWN,
174 },{
175 .strid = "video_bitrate_mode",
176 .id = V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
177 },{
178 .strid = "video_bitrate",
179 .id = V4L2_CID_MPEG_VIDEO_BITRATE,
180 },{
181 .strid = "video_bitrate_peak",
182 .id = V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
183 },{
184 .strid = "video_temporal_decimation",
185 .id = V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION,
186 },{
187 .strid = "stream_type",
188 .id = V4L2_CID_MPEG_STREAM_TYPE,
189 },{
190 .strid = "video_spatial_filter_mode",
191 .id = V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE,
192 },{
193 .strid = "video_spatial_filter",
194 .id = V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER,
195 },{
196 .strid = "video_luma_spatial_filter_type",
197 .id = V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE,
198 },{
199 .strid = "video_chroma_spatial_filter_type",
200 .id = V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE,
201 },{
202 .strid = "video_temporal_filter_mode",
203 .id = V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE,
204 },{
205 .strid = "video_temporal_filter",
206 .id = V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER,
207 },{
208 .strid = "video_median_filter_type",
209 .id = V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE,
210 },{
211 .strid = "video_luma_median_filter_top",
212 .id = V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP,
213 },{
214 .strid = "video_luma_median_filter_bottom",
215 .id = V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM,
216 },{
217 .strid = "video_chroma_median_filter_top",
218 .id = V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP,
219 },{
220 .strid = "video_chroma_median_filter_bottom",
221 .id = V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM,
222 }
223};
224#define MPEGDEF_COUNT (sizeof(mpeg_ids)/sizeof(mpeg_ids[0]))
225
226static const char *control_values_srate[] = {
227 [PVR2_CVAL_SRATE_48] = "48KHz",
228 [PVR2_CVAL_SRATE_44_1] = "44.1KHz",
229};
230
231
232
233
234static const char *control_values_input[] = {
235 [PVR2_CVAL_INPUT_TV] = "television", /*xawtv needs this name*/
236 [PVR2_CVAL_INPUT_RADIO] = "radio",
237 [PVR2_CVAL_INPUT_SVIDEO] = "s-video",
238 [PVR2_CVAL_INPUT_COMPOSITE] = "composite",
239};
240
241
242static const char *control_values_audiomode[] = {
243 [V4L2_TUNER_MODE_MONO] = "Mono",
244 [V4L2_TUNER_MODE_STEREO] = "Stereo",
245 [V4L2_TUNER_MODE_LANG1] = "Lang1",
246 [V4L2_TUNER_MODE_LANG2] = "Lang2",
247 [V4L2_TUNER_MODE_LANG1_LANG2] = "Lang1+Lang2",
248};
249
250
251static const char *control_values_hsm[] = {
252 [PVR2_CVAL_HSM_FAIL] = "Fail",
253 [PVR2_CVAL_HSM_HIGH] = "High",
254 [PVR2_CVAL_HSM_FULL] = "Full",
255};
256
257
258static const char *control_values_subsystem[] = {
259 [PVR2_SUBSYS_B_ENC_FIRMWARE] = "enc_firmware",
260 [PVR2_SUBSYS_B_ENC_CFG] = "enc_config",
261 [PVR2_SUBSYS_B_DIGITIZER_RUN] = "digitizer_run",
262 [PVR2_SUBSYS_B_USBSTREAM_RUN] = "usbstream_run",
263 [PVR2_SUBSYS_B_ENC_RUN] = "enc_run",
264};
265
266
267static int ctrl_channelfreq_get(struct pvr2_ctrl *cptr,int *vp)
268{
269 struct pvr2_hdw *hdw = cptr->hdw;
270 if ((hdw->freqProgSlot > 0) && (hdw->freqProgSlot <= FREQTABLE_SIZE)) {
271 *vp = hdw->freqTable[hdw->freqProgSlot-1];
272 } else {
273 *vp = 0;
274 }
275 return 0;
276}
277
278static int ctrl_channelfreq_set(struct pvr2_ctrl *cptr,int m,int v)
279{
280 struct pvr2_hdw *hdw = cptr->hdw;
281 if ((hdw->freqProgSlot > 0) && (hdw->freqProgSlot <= FREQTABLE_SIZE)) {
282 hdw->freqTable[hdw->freqProgSlot-1] = v;
283 }
284 return 0;
285}
286
287static int ctrl_channelprog_get(struct pvr2_ctrl *cptr,int *vp)
288{
289 *vp = cptr->hdw->freqProgSlot;
290 return 0;
291}
292
293static int ctrl_channelprog_set(struct pvr2_ctrl *cptr,int m,int v)
294{
295 struct pvr2_hdw *hdw = cptr->hdw;
296 if ((v >= 0) && (v <= FREQTABLE_SIZE)) {
297 hdw->freqProgSlot = v;
298 }
299 return 0;
300}
301
302static int ctrl_channel_get(struct pvr2_ctrl *cptr,int *vp)
303{
304 *vp = cptr->hdw->freqSlot;
305 return 0;
306}
307
308static int ctrl_channel_set(struct pvr2_ctrl *cptr,int m,int v)
309{
310 unsigned freq = 0;
311 struct pvr2_hdw *hdw = cptr->hdw;
312 hdw->freqSlot = v;
313 if ((hdw->freqSlot > 0) && (hdw->freqSlot <= FREQTABLE_SIZE)) {
314 freq = hdw->freqTable[hdw->freqSlot-1];
315 }
316 if (freq && (freq != hdw->freqVal)) {
317 hdw->freqVal = freq;
318 hdw->freqDirty = !0;
319 }
320 return 0;
321}
322
323static int ctrl_freq_get(struct pvr2_ctrl *cptr,int *vp)
324{
325 *vp = cptr->hdw->freqVal;
326 return 0;
327}
328
329static int ctrl_freq_is_dirty(struct pvr2_ctrl *cptr)
330{
331 return cptr->hdw->freqDirty != 0;
332}
333
334static void ctrl_freq_clear_dirty(struct pvr2_ctrl *cptr)
335{
336 cptr->hdw->freqDirty = 0;
337}
338
339static int ctrl_freq_set(struct pvr2_ctrl *cptr,int m,int v)
340{
341 struct pvr2_hdw *hdw = cptr->hdw;
342 hdw->freqVal = v;
343 hdw->freqDirty = !0;
344 hdw->freqSlot = 0;
345 return 0;
346}
347
348static int ctrl_cx2341x_is_dirty(struct pvr2_ctrl *cptr)
349{
350 return cptr->hdw->enc_stale != 0;
351}
352
353static void ctrl_cx2341x_clear_dirty(struct pvr2_ctrl *cptr)
354{
355 cptr->hdw->enc_stale = 0;
356}
357
358static int ctrl_cx2341x_get(struct pvr2_ctrl *cptr,int *vp)
359{
360 int ret;
361 struct v4l2_ext_controls cs;
362 struct v4l2_ext_control c1;
363 memset(&cs,0,sizeof(cs));
364 memset(&c1,0,sizeof(c1));
365 cs.controls = &c1;
366 cs.count = 1;
367 c1.id = cptr->info->v4l_id;
368 ret = cx2341x_ext_ctrls(&cptr->hdw->enc_ctl_state,&cs,
369 VIDIOC_G_EXT_CTRLS);
370 if (ret) return ret;
371 *vp = c1.value;
372 return 0;
373}
374
375static int ctrl_cx2341x_set(struct pvr2_ctrl *cptr,int m,int v)
376{
377 int ret;
378 struct v4l2_ext_controls cs;
379 struct v4l2_ext_control c1;
380 memset(&cs,0,sizeof(cs));
381 memset(&c1,0,sizeof(c1));
382 cs.controls = &c1;
383 cs.count = 1;
384 c1.id = cptr->info->v4l_id;
385 c1.value = v;
386 ret = cx2341x_ext_ctrls(&cptr->hdw->enc_ctl_state,&cs,
387 VIDIOC_S_EXT_CTRLS);
388 if (ret) return ret;
389 cptr->hdw->enc_stale = !0;
390 return 0;
391}
392
393static unsigned int ctrl_cx2341x_getv4lflags(struct pvr2_ctrl *cptr)
394{
395 struct v4l2_queryctrl qctrl;
396 struct pvr2_ctl_info *info;
397 qctrl.id = cptr->info->v4l_id;
398 cx2341x_ctrl_query(&cptr->hdw->enc_ctl_state,&qctrl);
399 /* Strip out the const so we can adjust a function pointer. It's
400 OK to do this here because we know this is a dynamically created
401 control, so the underlying storage for the info pointer is (a)
402 private to us, and (b) not in read-only storage. Either we do
403 this or we significantly complicate the underlying control
404 implementation. */
405 info = (struct pvr2_ctl_info *)(cptr->info);
406 if (qctrl.flags & V4L2_CTRL_FLAG_READ_ONLY) {
407 if (info->set_value) {
408 info->set_value = 0;
409 }
410 } else {
411 if (!(info->set_value)) {
412 info->set_value = ctrl_cx2341x_set;
413 }
414 }
415 return qctrl.flags;
416}
417
418static int ctrl_streamingenabled_get(struct pvr2_ctrl *cptr,int *vp)
419{
420 *vp = cptr->hdw->flag_streaming_enabled;
421 return 0;
422}
423
424static int ctrl_hsm_get(struct pvr2_ctrl *cptr,int *vp)
425{
426 int result = pvr2_hdw_is_hsm(cptr->hdw);
427 *vp = PVR2_CVAL_HSM_FULL;
428 if (result < 0) *vp = PVR2_CVAL_HSM_FAIL;
429 if (result) *vp = PVR2_CVAL_HSM_HIGH;
430 return 0;
431}
432
433static int ctrl_stdavail_get(struct pvr2_ctrl *cptr,int *vp)
434{
435 *vp = cptr->hdw->std_mask_avail;
436 return 0;
437}
438
439static int ctrl_stdavail_set(struct pvr2_ctrl *cptr,int m,int v)
440{
441 struct pvr2_hdw *hdw = cptr->hdw;
442 v4l2_std_id ns;
443 ns = hdw->std_mask_avail;
444 ns = (ns & ~m) | (v & m);
445 if (ns == hdw->std_mask_avail) return 0;
446 hdw->std_mask_avail = ns;
447 pvr2_hdw_internal_set_std_avail(hdw);
448 pvr2_hdw_internal_find_stdenum(hdw);
449 return 0;
450}
451
452static int ctrl_std_val_to_sym(struct pvr2_ctrl *cptr,int msk,int val,
453 char *bufPtr,unsigned int bufSize,
454 unsigned int *len)
455{
456 *len = pvr2_std_id_to_str(bufPtr,bufSize,msk & val);
457 return 0;
458}
459
460static int ctrl_std_sym_to_val(struct pvr2_ctrl *cptr,
461 const char *bufPtr,unsigned int bufSize,
462 int *mskp,int *valp)
463{
464 int ret;
465 v4l2_std_id id;
466 ret = pvr2_std_str_to_id(&id,bufPtr,bufSize);
467 if (ret < 0) return ret;
468 if (mskp) *mskp = id;
469 if (valp) *valp = id;
470 return 0;
471}
472
473static int ctrl_stdcur_get(struct pvr2_ctrl *cptr,int *vp)
474{
475 *vp = cptr->hdw->std_mask_cur;
476 return 0;
477}
478
479static int ctrl_stdcur_set(struct pvr2_ctrl *cptr,int m,int v)
480{
481 struct pvr2_hdw *hdw = cptr->hdw;
482 v4l2_std_id ns;
483 ns = hdw->std_mask_cur;
484 ns = (ns & ~m) | (v & m);
485 if (ns == hdw->std_mask_cur) return 0;
486 hdw->std_mask_cur = ns;
487 hdw->std_dirty = !0;
488 pvr2_hdw_internal_find_stdenum(hdw);
489 return 0;
490}
491
492static int ctrl_stdcur_is_dirty(struct pvr2_ctrl *cptr)
493{
494 return cptr->hdw->std_dirty != 0;
495}
496
497static void ctrl_stdcur_clear_dirty(struct pvr2_ctrl *cptr)
498{
499 cptr->hdw->std_dirty = 0;
500}
501
502static int ctrl_signal_get(struct pvr2_ctrl *cptr,int *vp)
503{
504 *vp = ((pvr2_hdw_get_signal_status_internal(cptr->hdw) &
505 PVR2_SIGNAL_OK) ? 1 : 0);
506 return 0;
507}
508
509static int ctrl_subsys_get(struct pvr2_ctrl *cptr,int *vp)
510{
511 *vp = cptr->hdw->subsys_enabled_mask;
512 return 0;
513}
514
515static int ctrl_subsys_set(struct pvr2_ctrl *cptr,int m,int v)
516{
517 pvr2_hdw_subsys_bit_chg_no_lock(cptr->hdw,m,v);
518 return 0;
519}
520
521static int ctrl_subsys_stream_get(struct pvr2_ctrl *cptr,int *vp)
522{
523 *vp = cptr->hdw->subsys_stream_mask;
524 return 0;
525}
526
527static int ctrl_subsys_stream_set(struct pvr2_ctrl *cptr,int m,int v)
528{
529 pvr2_hdw_subsys_stream_bit_chg_no_lock(cptr->hdw,m,v);
530 return 0;
531}
532
533static int ctrl_stdenumcur_set(struct pvr2_ctrl *cptr,int m,int v)
534{
535 struct pvr2_hdw *hdw = cptr->hdw;
536 if (v < 0) return -EINVAL;
537 if (v > hdw->std_enum_cnt) return -EINVAL;
538 hdw->std_enum_cur = v;
539 if (!v) return 0;
540 v--;
541 if (hdw->std_mask_cur == hdw->std_defs[v].id) return 0;
542 hdw->std_mask_cur = hdw->std_defs[v].id;
543 hdw->std_dirty = !0;
544 return 0;
545}
546
547
548static int ctrl_stdenumcur_get(struct pvr2_ctrl *cptr,int *vp)
549{
550 *vp = cptr->hdw->std_enum_cur;
551 return 0;
552}
553
554
555static int ctrl_stdenumcur_is_dirty(struct pvr2_ctrl *cptr)
556{
557 return cptr->hdw->std_dirty != 0;
558}
559
560
561static void ctrl_stdenumcur_clear_dirty(struct pvr2_ctrl *cptr)
562{
563 cptr->hdw->std_dirty = 0;
564}
565
566
567#define DEFINT(vmin,vmax) \
568 .type = pvr2_ctl_int, \
569 .def.type_int.min_value = vmin, \
570 .def.type_int.max_value = vmax
571
572#define DEFENUM(tab) \
573 .type = pvr2_ctl_enum, \
574 .def.type_enum.count = (sizeof(tab)/sizeof((tab)[0])), \
575 .def.type_enum.value_names = tab
576
577#define DEFBOOL \
578 .type = pvr2_ctl_bool
579
580#define DEFMASK(msk,tab) \
581 .type = pvr2_ctl_bitmask, \
582 .def.type_bitmask.valid_bits = msk, \
583 .def.type_bitmask.bit_names = tab
584
585#define DEFREF(vname) \
586 .set_value = ctrl_set_##vname, \
587 .get_value = ctrl_get_##vname, \
588 .is_dirty = ctrl_isdirty_##vname, \
589 .clear_dirty = ctrl_cleardirty_##vname
590
591
592#define VCREATE_FUNCS(vname) \
593static int ctrl_get_##vname(struct pvr2_ctrl *cptr,int *vp) \
594{*vp = cptr->hdw->vname##_val; return 0;} \
595static int ctrl_set_##vname(struct pvr2_ctrl *cptr,int m,int v) \
596{cptr->hdw->vname##_val = v; cptr->hdw->vname##_dirty = !0; return 0;} \
597static int ctrl_isdirty_##vname(struct pvr2_ctrl *cptr) \
598{return cptr->hdw->vname##_dirty != 0;} \
599static void ctrl_cleardirty_##vname(struct pvr2_ctrl *cptr) \
600{cptr->hdw->vname##_dirty = 0;}
601
602VCREATE_FUNCS(brightness)
603VCREATE_FUNCS(contrast)
604VCREATE_FUNCS(saturation)
605VCREATE_FUNCS(hue)
606VCREATE_FUNCS(volume)
607VCREATE_FUNCS(balance)
608VCREATE_FUNCS(bass)
609VCREATE_FUNCS(treble)
610VCREATE_FUNCS(mute)
611VCREATE_FUNCS(input)
612VCREATE_FUNCS(audiomode)
613VCREATE_FUNCS(res_hor)
614VCREATE_FUNCS(res_ver)
615VCREATE_FUNCS(srate)
616
617#define MIN_FREQ 55250000L
618#define MAX_FREQ 850000000L
619
620/* Table definition of all controls which can be manipulated */
621static const struct pvr2_ctl_info control_defs[] = {
622 {
623 .v4l_id = V4L2_CID_BRIGHTNESS,
624 .desc = "Brightness",
625 .name = "brightness",
626 .default_value = 128,
627 DEFREF(brightness),
628 DEFINT(0,255),
629 },{
630 .v4l_id = V4L2_CID_CONTRAST,
631 .desc = "Contrast",
632 .name = "contrast",
633 .default_value = 68,
634 DEFREF(contrast),
635 DEFINT(0,127),
636 },{
637 .v4l_id = V4L2_CID_SATURATION,
638 .desc = "Saturation",
639 .name = "saturation",
640 .default_value = 64,
641 DEFREF(saturation),
642 DEFINT(0,127),
643 },{
644 .v4l_id = V4L2_CID_HUE,
645 .desc = "Hue",
646 .name = "hue",
647 .default_value = 0,
648 DEFREF(hue),
649 DEFINT(-128,127),
650 },{
651 .v4l_id = V4L2_CID_AUDIO_VOLUME,
652 .desc = "Volume",
653 .name = "volume",
654 .default_value = 65535,
655 DEFREF(volume),
656 DEFINT(0,65535),
657 },{
658 .v4l_id = V4L2_CID_AUDIO_BALANCE,
659 .desc = "Balance",
660 .name = "balance",
661 .default_value = 0,
662 DEFREF(balance),
663 DEFINT(-32768,32767),
664 },{
665 .v4l_id = V4L2_CID_AUDIO_BASS,
666 .desc = "Bass",
667 .name = "bass",
668 .default_value = 0,
669 DEFREF(bass),
670 DEFINT(-32768,32767),
671 },{
672 .v4l_id = V4L2_CID_AUDIO_TREBLE,
673 .desc = "Treble",
674 .name = "treble",
675 .default_value = 0,
676 DEFREF(treble),
677 DEFINT(-32768,32767),
678 },{
679 .v4l_id = V4L2_CID_AUDIO_MUTE,
680 .desc = "Mute",
681 .name = "mute",
682 .default_value = 0,
683 DEFREF(mute),
684 DEFBOOL,
685 },{
686 .desc = "Video Source",
687 .name = "input",
688 .internal_id = PVR2_CID_INPUT,
689 .default_value = PVR2_CVAL_INPUT_TV,
690 DEFREF(input),
691 DEFENUM(control_values_input),
692 },{
693 .desc = "Audio Mode",
694 .name = "audio_mode",
695 .internal_id = PVR2_CID_AUDIOMODE,
696 .default_value = V4L2_TUNER_MODE_STEREO,
697 DEFREF(audiomode),
698 DEFENUM(control_values_audiomode),
699 },{
700 .desc = "Horizontal capture resolution",
701 .name = "resolution_hor",
702 .internal_id = PVR2_CID_HRES,
703 .default_value = 720,
704 DEFREF(res_hor),
705 DEFINT(320,720),
706 },{
707 .desc = "Vertical capture resolution",
708 .name = "resolution_ver",
709 .internal_id = PVR2_CID_VRES,
710 .default_value = 480,
711 DEFREF(res_ver),
712 DEFINT(200,625),
713 },{
714 .v4l_id = V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ,
715 .desc = "Sample rate",
716 .name = "srate",
717 .default_value = PVR2_CVAL_SRATE_48,
718 DEFREF(srate),
719 DEFENUM(control_values_srate),
720 },{
721 .desc = "Tuner Frequency (Hz)",
722 .name = "frequency",
723 .internal_id = PVR2_CID_FREQUENCY,
724 .default_value = 175250000L,
725 .set_value = ctrl_freq_set,
726 .get_value = ctrl_freq_get,
727 .is_dirty = ctrl_freq_is_dirty,
728 .clear_dirty = ctrl_freq_clear_dirty,
729 DEFINT(MIN_FREQ,MAX_FREQ),
730 },{
731 .desc = "Channel",
732 .name = "channel",
733 .set_value = ctrl_channel_set,
734 .get_value = ctrl_channel_get,
735 DEFINT(0,FREQTABLE_SIZE),
736 },{
737 .desc = "Channel Program Frequency",
738 .name = "freq_table_value",
739 .set_value = ctrl_channelfreq_set,
740 .get_value = ctrl_channelfreq_get,
741 DEFINT(MIN_FREQ,MAX_FREQ),
742 },{
743 .desc = "Channel Program ID",
744 .name = "freq_table_channel",
745 .set_value = ctrl_channelprog_set,
746 .get_value = ctrl_channelprog_get,
747 DEFINT(0,FREQTABLE_SIZE),
748 },{
749 .desc = "Streaming Enabled",
750 .name = "streaming_enabled",
751 .get_value = ctrl_streamingenabled_get,
752 DEFBOOL,
753 },{
754 .desc = "USB Speed",
755 .name = "usb_speed",
756 .get_value = ctrl_hsm_get,
757 DEFENUM(control_values_hsm),
758 },{
759 .desc = "Signal Present",
760 .name = "signal_present",
761 .get_value = ctrl_signal_get,
762 DEFBOOL,
763 },{
764 .desc = "Video Standards Available Mask",
765 .name = "video_standard_mask_available",
766 .internal_id = PVR2_CID_STDAVAIL,
767 .skip_init = !0,
768 .get_value = ctrl_stdavail_get,
769 .set_value = ctrl_stdavail_set,
770 .val_to_sym = ctrl_std_val_to_sym,
771 .sym_to_val = ctrl_std_sym_to_val,
772 .type = pvr2_ctl_bitmask,
773 },{
774 .desc = "Video Standards In Use Mask",
775 .name = "video_standard_mask_active",
776 .internal_id = PVR2_CID_STDCUR,
777 .skip_init = !0,
778 .get_value = ctrl_stdcur_get,
779 .set_value = ctrl_stdcur_set,
780 .is_dirty = ctrl_stdcur_is_dirty,
781 .clear_dirty = ctrl_stdcur_clear_dirty,
782 .val_to_sym = ctrl_std_val_to_sym,
783 .sym_to_val = ctrl_std_sym_to_val,
784 .type = pvr2_ctl_bitmask,
785 },{
786 .desc = "Subsystem enabled mask",
787 .name = "debug_subsys_mask",
788 .skip_init = !0,
789 .get_value = ctrl_subsys_get,
790 .set_value = ctrl_subsys_set,
791 DEFMASK(PVR2_SUBSYS_ALL,control_values_subsystem),
792 },{
793 .desc = "Subsystem stream mask",
794 .name = "debug_subsys_stream_mask",
795 .skip_init = !0,
796 .get_value = ctrl_subsys_stream_get,
797 .set_value = ctrl_subsys_stream_set,
798 DEFMASK(PVR2_SUBSYS_ALL,control_values_subsystem),
799 },{
800 .desc = "Video Standard Name",
801 .name = "video_standard",
802 .internal_id = PVR2_CID_STDENUM,
803 .skip_init = !0,
804 .get_value = ctrl_stdenumcur_get,
805 .set_value = ctrl_stdenumcur_set,
806 .is_dirty = ctrl_stdenumcur_is_dirty,
807 .clear_dirty = ctrl_stdenumcur_clear_dirty,
808 .type = pvr2_ctl_enum,
809 }
810};
811
812#define CTRLDEF_COUNT (sizeof(control_defs)/sizeof(control_defs[0]))
813
814
815const char *pvr2_config_get_name(enum pvr2_config cfg)
816{
817 switch (cfg) {
818 case pvr2_config_empty: return "empty";
819 case pvr2_config_mpeg: return "mpeg";
820 case pvr2_config_vbi: return "vbi";
821 case pvr2_config_radio: return "radio";
822 }
823 return "<unknown>";
824}
825
826
827struct usb_device *pvr2_hdw_get_dev(struct pvr2_hdw *hdw)
828{
829 return hdw->usb_dev;
830}
831
832
833unsigned long pvr2_hdw_get_sn(struct pvr2_hdw *hdw)
834{
835 return hdw->serial_number;
836}
837
838
839struct pvr2_hdw *pvr2_hdw_find(int unit_number)
840{
841 if (unit_number < 0) return 0;
842 if (unit_number >= PVR_NUM) return 0;
843 return unit_pointers[unit_number];
844}
845
846
847int pvr2_hdw_get_unit_number(struct pvr2_hdw *hdw)
848{
849 return hdw->unit_number;
850}
851
852
853/* Attempt to locate one of the given set of files. Messages are logged
854 appropriate to what has been found. The return value will be 0 or
855 greater on success (it will be the index of the file name found) and
856 fw_entry will be filled in. Otherwise a negative error is returned on
857 failure. If the return value is -ENOENT then no viable firmware file
858 could be located. */
859static int pvr2_locate_firmware(struct pvr2_hdw *hdw,
860 const struct firmware **fw_entry,
861 const char *fwtypename,
862 unsigned int fwcount,
863 const char *fwnames[])
864{
865 unsigned int idx;
866 int ret = -EINVAL;
867 for (idx = 0; idx < fwcount; idx++) {
868 ret = request_firmware(fw_entry,
869 fwnames[idx],
870 &hdw->usb_dev->dev);
871 if (!ret) {
872 trace_firmware("Located %s firmware: %s;"
873 " uploading...",
874 fwtypename,
875 fwnames[idx]);
876 return idx;
877 }
878 if (ret == -ENOENT) continue;
879 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
880 "request_firmware fatal error with code=%d",ret);
881 return ret;
882 }
883 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
884 "***WARNING***"
885 " Device %s firmware"
886 " seems to be missing.",
887 fwtypename);
888 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
889 "Did you install the pvrusb2 firmware files"
890 " in their proper location?");
891 if (fwcount == 1) {
892 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
893 "request_firmware unable to locate %s file %s",
894 fwtypename,fwnames[0]);
895 } else {
896 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
897 "request_firmware unable to locate"
898 " one of the following %s files:",
899 fwtypename);
900 for (idx = 0; idx < fwcount; idx++) {
901 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
902 "request_firmware: Failed to find %s",
903 fwnames[idx]);
904 }
905 }
906 return ret;
907}
908
909
910/*
911 * pvr2_upload_firmware1().
912 *
913 * Send the 8051 firmware to the device. After the upload, arrange for
914 * device to re-enumerate.
915 *
916 * NOTE : the pointer to the firmware data given by request_firmware()
917 * is not suitable for an usb transaction.
918 *
919 */
920int pvr2_upload_firmware1(struct pvr2_hdw *hdw)
921{
922 const struct firmware *fw_entry = 0;
923 void *fw_ptr;
924 unsigned int pipe;
925 int ret;
926 u16 address;
927 static const char *fw_files_29xxx[] = {
928 "v4l-pvrusb2-29xxx-01.fw",
929 };
930#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
931 static const char *fw_files_24xxx[] = {
932 "v4l-pvrusb2-24xxx-01.fw",
933 };
934#endif
935 static const struct pvr2_string_table fw_file_defs[] = {
936 [PVR2_HDW_TYPE_29XXX] = {
937 fw_files_29xxx,
938 sizeof(fw_files_29xxx)/sizeof(fw_files_29xxx[0]),
939 },
940#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
941 [PVR2_HDW_TYPE_24XXX] = {
942 fw_files_24xxx,
943 sizeof(fw_files_24xxx)/sizeof(fw_files_24xxx[0]),
944 },
945#endif
946 };
947 hdw->fw1_state = FW1_STATE_FAILED; // default result
948
949 trace_firmware("pvr2_upload_firmware1");
950
951 ret = pvr2_locate_firmware(hdw,&fw_entry,"fx2 controller",
952 fw_file_defs[hdw->hdw_type].cnt,
953 fw_file_defs[hdw->hdw_type].lst);
954 if (ret < 0) {
955 if (ret == -ENOENT) hdw->fw1_state = FW1_STATE_MISSING;
956 return ret;
957 }
958
959 usb_settoggle(hdw->usb_dev, 0 & 0xf, !(0 & USB_DIR_IN), 0);
960 usb_clear_halt(hdw->usb_dev, usb_sndbulkpipe(hdw->usb_dev, 0 & 0x7f));
961
962 pipe = usb_sndctrlpipe(hdw->usb_dev, 0);
963
964 if (fw_entry->size != 0x2000){
965 pvr2_trace(PVR2_TRACE_ERROR_LEGS,"wrong fx2 firmware size");
966 release_firmware(fw_entry);
967 return -ENOMEM;
968 }
969
970 fw_ptr = kmalloc(0x800, GFP_KERNEL);
971 if (fw_ptr == NULL){
972 release_firmware(fw_entry);
973 return -ENOMEM;
974 }
975
976 /* We have to hold the CPU during firmware upload. */
977 pvr2_hdw_cpureset_assert(hdw,1);
978
979 /* upload the firmware to address 0000-1fff in 2048 (=0x800) bytes
980 chunk. */
981
982 ret = 0;
983 for(address = 0; address < fw_entry->size; address += 0x800) {
984 memcpy(fw_ptr, fw_entry->data + address, 0x800);
985 ret += usb_control_msg(hdw->usb_dev, pipe, 0xa0, 0x40, address,
986 0, fw_ptr, 0x800, HZ);
987 }
988
989 trace_firmware("Upload done, releasing device's CPU");
990
991 /* Now release the CPU. It will disconnect and reconnect later. */
992 pvr2_hdw_cpureset_assert(hdw,0);
993
994 kfree(fw_ptr);
995 release_firmware(fw_entry);
996
997 trace_firmware("Upload done (%d bytes sent)",ret);
998
999 /* We should have written 8192 bytes */
1000 if (ret == 8192) {
1001 hdw->fw1_state = FW1_STATE_RELOAD;
1002 return 0;
1003 }
1004
1005 return -EIO;
1006}
1007
1008
1009/*
1010 * pvr2_upload_firmware2()
1011 *
1012 * This uploads encoder firmware on endpoint 2.
1013 *
1014 */
1015
1016int pvr2_upload_firmware2(struct pvr2_hdw *hdw)
1017{
1018 const struct firmware *fw_entry = 0;
1019 void *fw_ptr;
1020 unsigned int pipe, fw_len, fw_done;
1021 int actual_length;
1022 int ret = 0;
1023 int fwidx;
1024 static const char *fw_files[] = {
1025 CX2341X_FIRM_ENC_FILENAME,
1026 };
1027
1028 trace_firmware("pvr2_upload_firmware2");
1029
1030 ret = pvr2_locate_firmware(hdw,&fw_entry,"encoder",
1031 sizeof(fw_files)/sizeof(fw_files[0]),
1032 fw_files);
1033 if (ret < 0) return ret;
1034 fwidx = ret;
1035 ret = 0;
1036 /* Since we're about to completely reinitialize the encoder,
1037 invalidate our cached copy of its configuration state. Next
1038 time we configure the encoder, then we'll fully configure it. */
1039 hdw->enc_cur_valid = 0;
1040
1041 /* First prepare firmware loading */
1042 ret |= pvr2_write_register(hdw, 0x0048, 0xffffffff); /*interrupt mask*/
1043 ret |= pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000088); /*gpio dir*/
1044 ret |= pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000008); /*gpio output state*/
1045 ret |= pvr2_hdw_cmd_deep_reset(hdw);
1046 ret |= pvr2_write_register(hdw, 0xa064, 0x00000000); /*APU command*/
1047 ret |= pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000408); /*gpio dir*/
1048 ret |= pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000008); /*gpio output state*/
1049 ret |= pvr2_write_register(hdw, 0x9058, 0xffffffed); /*VPU ctrl*/
1050 ret |= pvr2_write_register(hdw, 0x9054, 0xfffffffd); /*reset hw blocks*/
1051 ret |= pvr2_write_register(hdw, 0x07f8, 0x80000800); /*encoder SDRAM refresh*/
1052 ret |= pvr2_write_register(hdw, 0x07fc, 0x0000001a); /*encoder SDRAM pre-charge*/
1053 ret |= pvr2_write_register(hdw, 0x0700, 0x00000000); /*I2C clock*/
1054 ret |= pvr2_write_register(hdw, 0xaa00, 0x00000000); /*unknown*/
1055 ret |= pvr2_write_register(hdw, 0xaa04, 0x00057810); /*unknown*/
1056 ret |= pvr2_write_register(hdw, 0xaa10, 0x00148500); /*unknown*/
1057 ret |= pvr2_write_register(hdw, 0xaa18, 0x00840000); /*unknown*/
1058 ret |= pvr2_write_u8(hdw, 0x52, 0);
1059 ret |= pvr2_write_u16(hdw, 0x0600, 0);
1060
1061 if (ret) {
1062 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1063 "firmware2 upload prep failed, ret=%d",ret);
1064 release_firmware(fw_entry);
1065 return ret;
1066 }
1067
1068 /* Now send firmware */
1069
1070 fw_len = fw_entry->size;
1071
1072 if (fw_len % FIRMWARE_CHUNK_SIZE) {
1073 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1074 "size of %s firmware"
1075 " must be a multiple of 8192B",
1076 fw_files[fwidx]);
1077 release_firmware(fw_entry);
1078 return -1;
1079 }
1080
1081 fw_ptr = kmalloc(FIRMWARE_CHUNK_SIZE, GFP_KERNEL);
1082 if (fw_ptr == NULL){
1083 release_firmware(fw_entry);
1084 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1085 "failed to allocate memory for firmware2 upload");
1086 return -ENOMEM;
1087 }
1088
1089 pipe = usb_sndbulkpipe(hdw->usb_dev, PVR2_FIRMWARE_ENDPOINT);
1090
1091 for (fw_done = 0 ; (fw_done < fw_len) && !ret ;
1092 fw_done += FIRMWARE_CHUNK_SIZE ) {
1093 int i;
1094 memcpy(fw_ptr, fw_entry->data + fw_done, FIRMWARE_CHUNK_SIZE);
1095 /* Usbsnoop log shows that we must swap bytes... */
1096 for (i = 0; i < FIRMWARE_CHUNK_SIZE/4 ; i++)
1097 ((u32 *)fw_ptr)[i] = ___swab32(((u32 *)fw_ptr)[i]);
1098
1099 ret |= usb_bulk_msg(hdw->usb_dev, pipe, fw_ptr,
1100 FIRMWARE_CHUNK_SIZE,
1101 &actual_length, HZ);
1102 ret |= (actual_length != FIRMWARE_CHUNK_SIZE);
1103 }
1104
1105 trace_firmware("upload of %s : %i / %i ",
1106 fw_files[fwidx],fw_done,fw_len);
1107
1108 kfree(fw_ptr);
1109 release_firmware(fw_entry);
1110
1111 if (ret) {
1112 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1113 "firmware2 upload transfer failure");
1114 return ret;
1115 }
1116
1117 /* Finish upload */
1118
1119 ret |= pvr2_write_register(hdw, 0x9054, 0xffffffff); /*reset hw blocks*/
1120 ret |= pvr2_write_register(hdw, 0x9058, 0xffffffe8); /*VPU ctrl*/
1121 ret |= pvr2_write_u16(hdw, 0x0600, 0);
1122
1123 if (ret) {
1124 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1125 "firmware2 upload post-proc failure");
1126 } else {
1127 hdw->subsys_enabled_mask |= (1<<PVR2_SUBSYS_B_ENC_FIRMWARE);
1128 }
1129 return ret;
1130}
1131
1132
1133#define FIRMWARE_RECOVERY_BITS \
1134 ((1<<PVR2_SUBSYS_B_ENC_CFG) | \
1135 (1<<PVR2_SUBSYS_B_ENC_RUN) | \
1136 (1<<PVR2_SUBSYS_B_ENC_FIRMWARE) | \
1137 (1<<PVR2_SUBSYS_B_USBSTREAM_RUN))
1138
1139/*
1140
1141 This single function is key to pretty much everything. The pvrusb2
1142 device can logically be viewed as a series of subsystems which can be
1143 stopped / started or unconfigured / configured. To get things streaming,
1144 one must configure everything and start everything, but there may be
1145 various reasons over time to deconfigure something or stop something.
1146 This function handles all of this activity. Everything EVERYWHERE that
1147 must affect a subsystem eventually comes here to do the work.
1148
1149 The current state of all subsystems is represented by a single bit mask,
1150 known as subsys_enabled_mask. The bit positions are defined by the
1151 PVR2_SUBSYS_xxxx macros, with one subsystem per bit position. At any
1152 time the set of configured or active subsystems can be queried just by
1153 looking at that mask. To change bits in that mask, this function here
1154 must be called. The "msk" argument indicates which bit positions to
1155 change, and the "val" argument defines the new values for the positions
1156 defined by "msk".
1157
1158 There is a priority ordering of starting / stopping things, and for
1159 multiple requested changes, this function implements that ordering.
1160 (Thus we will act on a request to load encoder firmware before we
1161 configure the encoder.) In addition to priority ordering, there is a
1162 recovery strategy implemented here. If a particular step fails and we
1163 detect that failure, this function will clear the affected subsystem bits
1164 and restart. Thus we have a means for recovering from a dead encoder:
1165 Clear all bits that correspond to subsystems that we need to restart /
1166 reconfigure and start over.
1167
1168*/
1169void pvr2_hdw_subsys_bit_chg_no_lock(struct pvr2_hdw *hdw,
1170 unsigned long msk,unsigned long val)
1171{
1172 unsigned long nmsk;
1173 unsigned long vmsk;
1174 int ret;
1175 unsigned int tryCount = 0;
1176
1177 if (!hdw->flag_ok) return;
1178
1179 msk &= PVR2_SUBSYS_ALL;
1180 nmsk = (hdw->subsys_enabled_mask & ~msk) | (val & msk);
1181 nmsk &= PVR2_SUBSYS_ALL;
1182
1183 for (;;) {
1184 tryCount++;
1185 if (!((nmsk ^ hdw->subsys_enabled_mask) &
1186 PVR2_SUBSYS_ALL)) break;
1187 if (tryCount > 4) {
1188 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1189 "Too many retries when configuring device;"
1190 " giving up");
1191 pvr2_hdw_render_useless(hdw);
1192 break;
1193 }
1194 if (tryCount > 1) {
1195 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1196 "Retrying device reconfiguration");
1197 }
1198 pvr2_trace(PVR2_TRACE_INIT,
1199 "subsys mask changing 0x%lx:0x%lx"
1200 " from 0x%lx to 0x%lx",
1201 msk,val,hdw->subsys_enabled_mask,nmsk);
1202
1203 vmsk = (nmsk ^ hdw->subsys_enabled_mask) &
1204 hdw->subsys_enabled_mask;
1205 if (vmsk) {
1206 if (vmsk & (1<<PVR2_SUBSYS_B_ENC_RUN)) {
1207 pvr2_trace(PVR2_TRACE_CTL,
1208 "/*---TRACE_CTL----*/"
1209 " pvr2_encoder_stop");
1210 ret = pvr2_encoder_stop(hdw);
1211 if (ret) {
1212 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1213 "Error recovery initiated");
1214 hdw->subsys_enabled_mask &=
1215 ~FIRMWARE_RECOVERY_BITS;
1216 continue;
1217 }
1218 }
1219 if (vmsk & (1<<PVR2_SUBSYS_B_USBSTREAM_RUN)) {
1220 pvr2_trace(PVR2_TRACE_CTL,
1221 "/*---TRACE_CTL----*/"
1222 " pvr2_hdw_cmd_usbstream(0)");
1223 pvr2_hdw_cmd_usbstream(hdw,0);
1224 }
1225 if (vmsk & (1<<PVR2_SUBSYS_B_DIGITIZER_RUN)) {
1226 pvr2_trace(PVR2_TRACE_CTL,
1227 "/*---TRACE_CTL----*/"
1228 " decoder disable");
1229 if (hdw->decoder_ctrl) {
1230 hdw->decoder_ctrl->enable(
1231 hdw->decoder_ctrl->ctxt,0);
1232 } else {
1233 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1234 "WARNING:"
1235 " No decoder present");
1236 }
1237 hdw->subsys_enabled_mask &=
1238 ~(1<<PVR2_SUBSYS_B_DIGITIZER_RUN);
1239 }
1240 if (vmsk & PVR2_SUBSYS_CFG_ALL) {
1241 hdw->subsys_enabled_mask &=
1242 ~(vmsk & PVR2_SUBSYS_CFG_ALL);
1243 }
1244 }
1245 vmsk = (nmsk ^ hdw->subsys_enabled_mask) & nmsk;
1246 if (vmsk) {
1247 if (vmsk & (1<<PVR2_SUBSYS_B_ENC_FIRMWARE)) {
1248 pvr2_trace(PVR2_TRACE_CTL,
1249 "/*---TRACE_CTL----*/"
1250 " pvr2_upload_firmware2");
1251 ret = pvr2_upload_firmware2(hdw);
1252 if (ret) {
1253 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1254 "Failure uploading encoder"
1255 " firmware");
1256 pvr2_hdw_render_useless(hdw);
1257 break;
1258 }
1259 }
1260 if (vmsk & (1<<PVR2_SUBSYS_B_ENC_CFG)) {
1261 pvr2_trace(PVR2_TRACE_CTL,
1262 "/*---TRACE_CTL----*/"
1263 " pvr2_encoder_configure");
1264 ret = pvr2_encoder_configure(hdw);
1265 if (ret) {
1266 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1267 "Error recovery initiated");
1268 hdw->subsys_enabled_mask &=
1269 ~FIRMWARE_RECOVERY_BITS;
1270 continue;
1271 }
1272 }
1273 if (vmsk & (1<<PVR2_SUBSYS_B_DIGITIZER_RUN)) {
1274 pvr2_trace(PVR2_TRACE_CTL,
1275 "/*---TRACE_CTL----*/"
1276 " decoder enable");
1277 if (hdw->decoder_ctrl) {
1278 hdw->decoder_ctrl->enable(
1279 hdw->decoder_ctrl->ctxt,!0);
1280 } else {
1281 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1282 "WARNING:"
1283 " No decoder present");
1284 }
1285 hdw->subsys_enabled_mask |=
1286 (1<<PVR2_SUBSYS_B_DIGITIZER_RUN);
1287 }
1288 if (vmsk & (1<<PVR2_SUBSYS_B_USBSTREAM_RUN)) {
1289 pvr2_trace(PVR2_TRACE_CTL,
1290 "/*---TRACE_CTL----*/"
1291 " pvr2_hdw_cmd_usbstream(1)");
1292 pvr2_hdw_cmd_usbstream(hdw,!0);
1293 }
1294 if (vmsk & (1<<PVR2_SUBSYS_B_ENC_RUN)) {
1295 pvr2_trace(PVR2_TRACE_CTL,
1296 "/*---TRACE_CTL----*/"
1297 " pvr2_encoder_start");
1298 ret = pvr2_encoder_start(hdw);
1299 if (ret) {
1300 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1301 "Error recovery initiated");
1302 hdw->subsys_enabled_mask &=
1303 ~FIRMWARE_RECOVERY_BITS;
1304 continue;
1305 }
1306 }
1307 }
1308 }
1309}
1310
1311
1312void pvr2_hdw_subsys_bit_chg(struct pvr2_hdw *hdw,
1313 unsigned long msk,unsigned long val)
1314{
1315 LOCK_TAKE(hdw->big_lock); do {
1316 pvr2_hdw_subsys_bit_chg_no_lock(hdw,msk,val);
1317 } while (0); LOCK_GIVE(hdw->big_lock);
1318}
1319
1320
1321void pvr2_hdw_subsys_bit_set(struct pvr2_hdw *hdw,unsigned long msk)
1322{
1323 pvr2_hdw_subsys_bit_chg(hdw,msk,msk);
1324}
1325
1326
1327void pvr2_hdw_subsys_bit_clr(struct pvr2_hdw *hdw,unsigned long msk)
1328{
1329 pvr2_hdw_subsys_bit_chg(hdw,msk,0);
1330}
1331
1332
1333unsigned long pvr2_hdw_subsys_get(struct pvr2_hdw *hdw)
1334{
1335 return hdw->subsys_enabled_mask;
1336}
1337
1338
1339unsigned long pvr2_hdw_subsys_stream_get(struct pvr2_hdw *hdw)
1340{
1341 return hdw->subsys_stream_mask;
1342}
1343
1344
1345void pvr2_hdw_subsys_stream_bit_chg_no_lock(struct pvr2_hdw *hdw,
1346 unsigned long msk,
1347 unsigned long val)
1348{
1349 unsigned long val2;
1350 msk &= PVR2_SUBSYS_ALL;
1351 val2 = ((hdw->subsys_stream_mask & ~msk) | (val & msk));
1352 pvr2_trace(PVR2_TRACE_INIT,
1353 "stream mask changing 0x%lx:0x%lx from 0x%lx to 0x%lx",
1354 msk,val,hdw->subsys_stream_mask,val2);
1355 hdw->subsys_stream_mask = val2;
1356}
1357
1358
1359void pvr2_hdw_subsys_stream_bit_chg(struct pvr2_hdw *hdw,
1360 unsigned long msk,
1361 unsigned long val)
1362{
1363 LOCK_TAKE(hdw->big_lock); do {
1364 pvr2_hdw_subsys_stream_bit_chg_no_lock(hdw,msk,val);
1365 } while (0); LOCK_GIVE(hdw->big_lock);
1366}
1367
1368
1369int pvr2_hdw_set_streaming_no_lock(struct pvr2_hdw *hdw,int enableFl)
1370{
1371 if ((!enableFl) == !(hdw->flag_streaming_enabled)) return 0;
1372 if (enableFl) {
1373 pvr2_trace(PVR2_TRACE_START_STOP,
1374 "/*--TRACE_STREAM--*/ enable");
1375 pvr2_hdw_subsys_bit_chg_no_lock(hdw,~0,~0);
1376 } else {
1377 pvr2_trace(PVR2_TRACE_START_STOP,
1378 "/*--TRACE_STREAM--*/ disable");
1379 pvr2_hdw_subsys_bit_chg_no_lock(hdw,hdw->subsys_stream_mask,0);
1380 }
1381 if (!hdw->flag_ok) return -EIO;
1382 hdw->flag_streaming_enabled = enableFl != 0;
1383 return 0;
1384}
1385
1386
1387int pvr2_hdw_get_streaming(struct pvr2_hdw *hdw)
1388{
1389 return hdw->flag_streaming_enabled != 0;
1390}
1391
1392
1393int pvr2_hdw_set_streaming(struct pvr2_hdw *hdw,int enable_flag)
1394{
1395 int ret;
1396 LOCK_TAKE(hdw->big_lock); do {
1397 ret = pvr2_hdw_set_streaming_no_lock(hdw,enable_flag);
1398 } while (0); LOCK_GIVE(hdw->big_lock);
1399 return ret;
1400}
1401
1402
1403int pvr2_hdw_set_stream_type_no_lock(struct pvr2_hdw *hdw,
1404 enum pvr2_config config)
1405{
1406 unsigned long sm = hdw->subsys_enabled_mask;
1407 if (!hdw->flag_ok) return -EIO;
1408 pvr2_hdw_subsys_bit_chg_no_lock(hdw,hdw->subsys_stream_mask,0);
1409 hdw->config = config;
1410 pvr2_hdw_subsys_bit_chg_no_lock(hdw,~0,sm);
1411 return 0;
1412}
1413
1414
1415int pvr2_hdw_set_stream_type(struct pvr2_hdw *hdw,enum pvr2_config config)
1416{
1417 int ret;
1418 if (!hdw->flag_ok) return -EIO;
1419 LOCK_TAKE(hdw->big_lock);
1420 ret = pvr2_hdw_set_stream_type_no_lock(hdw,config);
1421 LOCK_GIVE(hdw->big_lock);
1422 return ret;
1423}
1424
1425
1426static int get_default_tuner_type(struct pvr2_hdw *hdw)
1427{
1428 int unit_number = hdw->unit_number;
1429 int tp = -1;
1430 if ((unit_number >= 0) && (unit_number < PVR_NUM)) {
1431 tp = tuner[unit_number];
1432 }
1433 if (tp < 0) return -EINVAL;
1434 hdw->tuner_type = tp;
1435 return 0;
1436}
1437
1438
1439static v4l2_std_id get_default_standard(struct pvr2_hdw *hdw)
1440{
1441 int unit_number = hdw->unit_number;
1442 int tp = 0;
1443 if ((unit_number >= 0) && (unit_number < PVR_NUM)) {
1444 tp = video_std[unit_number];
1445 }
1446 return tp;
1447}
1448
1449
1450static unsigned int get_default_error_tolerance(struct pvr2_hdw *hdw)
1451{
1452 int unit_number = hdw->unit_number;
1453 int tp = 0;
1454 if ((unit_number >= 0) && (unit_number < PVR_NUM)) {
1455 tp = tolerance[unit_number];
1456 }
1457 return tp;
1458}
1459
1460
1461static int pvr2_hdw_check_firmware(struct pvr2_hdw *hdw)
1462{
1463 /* Try a harmless request to fetch the eeprom's address over
1464 endpoint 1. See what happens. Only the full FX2 image can
1465 respond to this. If this probe fails then likely the FX2
1466 firmware needs be loaded. */
1467 int result;
1468 LOCK_TAKE(hdw->ctl_lock); do {
1469 hdw->cmd_buffer[0] = 0xeb;
1470 result = pvr2_send_request_ex(hdw,HZ*1,!0,
1471 hdw->cmd_buffer,1,
1472 hdw->cmd_buffer,1);
1473 if (result < 0) break;
1474 } while(0); LOCK_GIVE(hdw->ctl_lock);
1475 if (result) {
1476 pvr2_trace(PVR2_TRACE_INIT,
1477 "Probe of device endpoint 1 result status %d",
1478 result);
1479 } else {
1480 pvr2_trace(PVR2_TRACE_INIT,
1481 "Probe of device endpoint 1 succeeded");
1482 }
1483 return result == 0;
1484}
1485
1486static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
1487{
1488 char buf[40];
1489 unsigned int bcnt;
1490 v4l2_std_id std1,std2;
1491
1492 std1 = get_default_standard(hdw);
1493
1494 bcnt = pvr2_std_id_to_str(buf,sizeof(buf),hdw->std_mask_eeprom);
1495 pvr2_trace(PVR2_TRACE_INIT,
1496 "Supported video standard(s) reported by eeprom: %.*s",
1497 bcnt,buf);
1498
1499 hdw->std_mask_avail = hdw->std_mask_eeprom;
1500
1501 std2 = std1 & ~hdw->std_mask_avail;
1502 if (std2) {
1503 bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std2);
1504 pvr2_trace(PVR2_TRACE_INIT,
1505 "Expanding supported video standards"
1506 " to include: %.*s",
1507 bcnt,buf);
1508 hdw->std_mask_avail |= std2;
1509 }
1510
1511 pvr2_hdw_internal_set_std_avail(hdw);
1512
1513 if (std1) {
1514 bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std1);
1515 pvr2_trace(PVR2_TRACE_INIT,
1516 "Initial video standard forced to %.*s",
1517 bcnt,buf);
1518 hdw->std_mask_cur = std1;
1519 hdw->std_dirty = !0;
1520 pvr2_hdw_internal_find_stdenum(hdw);
1521 return;
1522 }
1523
1524 if (hdw->std_enum_cnt > 1) {
1525 // Autoselect the first listed standard
1526 hdw->std_enum_cur = 1;
1527 hdw->std_mask_cur = hdw->std_defs[hdw->std_enum_cur-1].id;
1528 hdw->std_dirty = !0;
1529 pvr2_trace(PVR2_TRACE_INIT,
1530 "Initial video standard auto-selected to %s",
1531 hdw->std_defs[hdw->std_enum_cur-1].name);
1532 return;
1533 }
1534
1535 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1536 "Unable to select a viable initial video standard");
1537}
1538
1539
1540static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
1541{
1542 int ret;
1543 unsigned int idx;
1544 struct pvr2_ctrl *cptr;
1545 int reloadFl = 0;
1546 if (!reloadFl) {
1547 reloadFl = (hdw->usb_intf->cur_altsetting->desc.bNumEndpoints
1548 == 0);
1549 if (reloadFl) {
1550 pvr2_trace(PVR2_TRACE_INIT,
1551 "USB endpoint config looks strange"
1552 "; possibly firmware needs to be loaded");
1553 }
1554 }
1555 if (!reloadFl) {
1556 reloadFl = !pvr2_hdw_check_firmware(hdw);
1557 if (reloadFl) {
1558 pvr2_trace(PVR2_TRACE_INIT,
1559 "Check for FX2 firmware failed"
1560 "; possibly firmware needs to be loaded");
1561 }
1562 }
1563 if (reloadFl) {
1564 if (pvr2_upload_firmware1(hdw) != 0) {
1565 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1566 "Failure uploading firmware1");
1567 }
1568 return;
1569 }
1570 hdw->fw1_state = FW1_STATE_OK;
1571
1572 if (initusbreset) {
1573 pvr2_hdw_device_reset(hdw);
1574 }
1575 if (!pvr2_hdw_dev_ok(hdw)) return;
1576
1577 for (idx = 0; idx < pvr2_client_lists[hdw->hdw_type].cnt; idx++) {
1578 request_module(pvr2_client_lists[hdw->hdw_type].lst[idx]);
1579 }
1580
1581 pvr2_hdw_cmd_powerup(hdw);
1582 if (!pvr2_hdw_dev_ok(hdw)) return;
1583
1584 if (pvr2_upload_firmware2(hdw)){
1585 pvr2_trace(PVR2_TRACE_ERROR_LEGS,"device unstable!!");
1586 pvr2_hdw_render_useless(hdw);
1587 return;
1588 }
1589
1590 // This step MUST happen after the earlier powerup step.
1591 pvr2_i2c_core_init(hdw);
1592 if (!pvr2_hdw_dev_ok(hdw)) return;
1593
1594 for (idx = 0; idx < CTRLDEF_COUNT; idx++) {
1595 cptr = hdw->controls + idx;
1596 if (cptr->info->skip_init) continue;
1597 if (!cptr->info->set_value) continue;
1598 cptr->info->set_value(cptr,~0,cptr->info->default_value);
1599 }
1600
1601 // Do not use pvr2_reset_ctl_endpoints() here. It is not
1602 // thread-safe against the normal pvr2_send_request() mechanism.
1603 // (We should make it thread safe).
1604
1605 ret = pvr2_hdw_get_eeprom_addr(hdw);
1606 if (!pvr2_hdw_dev_ok(hdw)) return;
1607 if (ret < 0) {
1608 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1609 "Unable to determine location of eeprom, skipping");
1610 } else {
1611 hdw->eeprom_addr = ret;
1612 pvr2_eeprom_analyze(hdw);
1613 if (!pvr2_hdw_dev_ok(hdw)) return;
1614 }
1615
1616 pvr2_hdw_setup_std(hdw);
1617
1618 if (!get_default_tuner_type(hdw)) {
1619 pvr2_trace(PVR2_TRACE_INIT,
1620 "pvr2_hdw_setup: Tuner type overridden to %d",
1621 hdw->tuner_type);
1622 }
1623
1624 hdw->tuner_updated = !0;
1625 pvr2_i2c_core_check_stale(hdw);
1626 hdw->tuner_updated = 0;
1627
1628 if (!pvr2_hdw_dev_ok(hdw)) return;
1629
1630 pvr2_hdw_commit_ctl_internal(hdw);
1631 if (!pvr2_hdw_dev_ok(hdw)) return;
1632
1633 hdw->vid_stream = pvr2_stream_create();
1634 if (!pvr2_hdw_dev_ok(hdw)) return;
1635 pvr2_trace(PVR2_TRACE_INIT,
1636 "pvr2_hdw_setup: video stream is %p",hdw->vid_stream);
1637 if (hdw->vid_stream) {
1638 idx = get_default_error_tolerance(hdw);
1639 if (idx) {
1640 pvr2_trace(PVR2_TRACE_INIT,
1641 "pvr2_hdw_setup: video stream %p"
1642 " setting tolerance %u",
1643 hdw->vid_stream,idx);
1644 }
1645 pvr2_stream_setup(hdw->vid_stream,hdw->usb_dev,
1646 PVR2_VID_ENDPOINT,idx);
1647 }
1648
1649 if (!pvr2_hdw_dev_ok(hdw)) return;
1650
1651 /* Make sure everything is up to date */
1652 pvr2_i2c_core_sync(hdw);
1653
1654 if (!pvr2_hdw_dev_ok(hdw)) return;
1655
1656 hdw->flag_init_ok = !0;
1657}
1658
1659
1660int pvr2_hdw_setup(struct pvr2_hdw *hdw)
1661{
1662 pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_setup(hdw=%p) begin",hdw);
1663 LOCK_TAKE(hdw->big_lock); do {
1664 pvr2_hdw_setup_low(hdw);
1665 pvr2_trace(PVR2_TRACE_INIT,
1666 "pvr2_hdw_setup(hdw=%p) done, ok=%d init_ok=%d",
1667 hdw,hdw->flag_ok,hdw->flag_init_ok);
1668 if (pvr2_hdw_dev_ok(hdw)) {
1669 if (pvr2_hdw_init_ok(hdw)) {
1670 pvr2_trace(
1671 PVR2_TRACE_INFO,
1672 "Device initialization"
1673 " completed successfully.");
1674 break;
1675 }
1676 if (hdw->fw1_state == FW1_STATE_RELOAD) {
1677 pvr2_trace(
1678 PVR2_TRACE_INFO,
1679 "Device microcontroller firmware"
1680 " (re)loaded; it should now reset"
1681 " and reconnect.");
1682 break;
1683 }
1684 pvr2_trace(
1685 PVR2_TRACE_ERROR_LEGS,
1686 "Device initialization was not successful.");
1687 if (hdw->fw1_state == FW1_STATE_MISSING) {
1688 pvr2_trace(
1689 PVR2_TRACE_ERROR_LEGS,
1690 "Giving up since device"
1691 " microcontroller firmware"
1692 " appears to be missing.");
1693 break;
1694 }
1695 }
1696 if (procreload) {
1697 pvr2_trace(
1698 PVR2_TRACE_ERROR_LEGS,
1699 "Attempting pvrusb2 recovery by reloading"
1700 " primary firmware.");
1701 pvr2_trace(
1702 PVR2_TRACE_ERROR_LEGS,
1703 "If this works, device should disconnect"
1704 " and reconnect in a sane state.");
1705 hdw->fw1_state = FW1_STATE_UNKNOWN;
1706 pvr2_upload_firmware1(hdw);
1707 } else {
1708 pvr2_trace(
1709 PVR2_TRACE_ERROR_LEGS,
1710 "***WARNING*** pvrusb2 device hardware"
1711 " appears to be jammed"
1712 " and I can't clear it.");
1713 pvr2_trace(
1714 PVR2_TRACE_ERROR_LEGS,
1715 "You might need to power cycle"
1716 " the pvrusb2 device"
1717 " in order to recover.");
1718 }
1719 } while (0); LOCK_GIVE(hdw->big_lock);
1720 pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_setup(hdw=%p) end",hdw);
1721 return hdw->flag_init_ok;
1722}
1723
1724
1725/* Create and return a structure for interacting with the underlying
1726 hardware */
1727struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
1728 const struct usb_device_id *devid)
1729{
1730 unsigned int idx,cnt1,cnt2;
1731 struct pvr2_hdw *hdw;
1732 unsigned int hdw_type;
1733 int valid_std_mask;
1734 struct pvr2_ctrl *cptr;
1735 __u8 ifnum;
1736 struct v4l2_queryctrl qctrl;
1737 struct pvr2_ctl_info *ciptr;
1738
1739 hdw_type = devid - pvr2_device_table;
1740 if (hdw_type >=
1741 sizeof(pvr2_device_names)/sizeof(pvr2_device_names[0])) {
1742 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
1743 "Bogus device type of %u reported",hdw_type);
1744 return 0;
1745 }
1746
1747 hdw = kmalloc(sizeof(*hdw),GFP_KERNEL);
1748 pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_create: hdw=%p, type \"%s\"",
1749 hdw,pvr2_device_names[hdw_type]);
1750 if (!hdw) goto fail;
1751 memset(hdw,0,sizeof(*hdw));
1752 cx2341x_fill_defaults(&hdw->enc_ctl_state);
1753
1754 hdw->control_cnt = CTRLDEF_COUNT;
1755 hdw->control_cnt += MPEGDEF_COUNT;
1756 hdw->controls = kmalloc(sizeof(struct pvr2_ctrl) * hdw->control_cnt,
1757 GFP_KERNEL);
1758 if (!hdw->controls) goto fail;
1759 memset(hdw->controls,0,sizeof(struct pvr2_ctrl) * hdw->control_cnt);
1760 hdw->hdw_type = hdw_type;
1761 for (idx = 0; idx < hdw->control_cnt; idx++) {
1762 cptr = hdw->controls + idx;
1763 cptr->hdw = hdw;
1764 }
1765 for (idx = 0; idx < 32; idx++) {
1766 hdw->std_mask_ptrs[idx] = hdw->std_mask_names[idx];
1767 }
1768 for (idx = 0; idx < CTRLDEF_COUNT; idx++) {
1769 cptr = hdw->controls + idx;
1770 cptr->info = control_defs+idx;
1771 }
1772 /* Define and configure additional controls from cx2341x module. */
1773 hdw->mpeg_ctrl_info = kmalloc(
1774 sizeof(*(hdw->mpeg_ctrl_info)) * MPEGDEF_COUNT, GFP_KERNEL);
1775 if (!hdw->mpeg_ctrl_info) goto fail;
1776 memset(hdw->mpeg_ctrl_info,0,
1777 sizeof(*(hdw->mpeg_ctrl_info)) * MPEGDEF_COUNT);
1778 for (idx = 0; idx < MPEGDEF_COUNT; idx++) {
1779 cptr = hdw->controls + idx + CTRLDEF_COUNT;
1780 ciptr = &(hdw->mpeg_ctrl_info[idx].info);
1781 ciptr->desc = hdw->mpeg_ctrl_info[idx].desc;
1782 ciptr->name = mpeg_ids[idx].strid;
1783 ciptr->v4l_id = mpeg_ids[idx].id;
1784 ciptr->skip_init = !0;
1785 ciptr->get_value = ctrl_cx2341x_get;
1786 ciptr->get_v4lflags = ctrl_cx2341x_getv4lflags;
1787 ciptr->is_dirty = ctrl_cx2341x_is_dirty;
1788 if (!idx) ciptr->clear_dirty = ctrl_cx2341x_clear_dirty;
1789 qctrl.id = ciptr->v4l_id;
1790 cx2341x_ctrl_query(&hdw->enc_ctl_state,&qctrl);
1791 if (!(qctrl.flags & V4L2_CTRL_FLAG_READ_ONLY)) {
1792 ciptr->set_value = ctrl_cx2341x_set;
1793 }
1794 strncpy(hdw->mpeg_ctrl_info[idx].desc,qctrl.name,
1795 PVR2_CTLD_INFO_DESC_SIZE);
1796 hdw->mpeg_ctrl_info[idx].desc[PVR2_CTLD_INFO_DESC_SIZE-1] = 0;
1797 ciptr->default_value = qctrl.default_value;
1798 switch (qctrl.type) {
1799 default:
1800 case V4L2_CTRL_TYPE_INTEGER:
1801 ciptr->type = pvr2_ctl_int;
1802 ciptr->def.type_int.min_value = qctrl.minimum;
1803 ciptr->def.type_int.max_value = qctrl.maximum;
1804 break;
1805 case V4L2_CTRL_TYPE_BOOLEAN:
1806 ciptr->type = pvr2_ctl_bool;
1807 break;
1808 case V4L2_CTRL_TYPE_MENU:
1809 ciptr->type = pvr2_ctl_enum;
1810 ciptr->def.type_enum.value_names =
1811 cx2341x_ctrl_get_menu(ciptr->v4l_id);
1812 for (cnt1 = 0;
1813 ciptr->def.type_enum.value_names[cnt1] != NULL;
1814 cnt1++) { }
1815 ciptr->def.type_enum.count = cnt1;
1816 break;
1817 }
1818 cptr->info = ciptr;
1819 }
1820
1821 // Initialize video standard enum dynamic control
1822 cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDENUM);
1823 if (cptr) {
1824 memcpy(&hdw->std_info_enum,cptr->info,
1825 sizeof(hdw->std_info_enum));
1826 cptr->info = &hdw->std_info_enum;
1827
1828 }
1829 // Initialize control data regarding video standard masks
1830 valid_std_mask = pvr2_std_get_usable();
1831 for (idx = 0; idx < 32; idx++) {
1832 if (!(valid_std_mask & (1 << idx))) continue;
1833 cnt1 = pvr2_std_id_to_str(
1834 hdw->std_mask_names[idx],
1835 sizeof(hdw->std_mask_names[idx])-1,
1836 1 << idx);
1837 hdw->std_mask_names[idx][cnt1] = 0;
1838 }
1839 cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDAVAIL);
1840 if (cptr) {
1841 memcpy(&hdw->std_info_avail,cptr->info,
1842 sizeof(hdw->std_info_avail));
1843 cptr->info = &hdw->std_info_avail;
1844 hdw->std_info_avail.def.type_bitmask.bit_names =
1845 hdw->std_mask_ptrs;
1846 hdw->std_info_avail.def.type_bitmask.valid_bits =
1847 valid_std_mask;
1848 }
1849 cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR);
1850 if (cptr) {
1851 memcpy(&hdw->std_info_cur,cptr->info,
1852 sizeof(hdw->std_info_cur));
1853 cptr->info = &hdw->std_info_cur;
1854 hdw->std_info_cur.def.type_bitmask.bit_names =
1855 hdw->std_mask_ptrs;
1856 hdw->std_info_avail.def.type_bitmask.valid_bits =
1857 valid_std_mask;
1858 }
1859
1860 hdw->eeprom_addr = -1;
1861 hdw->unit_number = -1;
1862 hdw->v4l_minor_number = -1;
1863 hdw->ctl_write_buffer = kmalloc(PVR2_CTL_BUFFSIZE,GFP_KERNEL);
1864 if (!hdw->ctl_write_buffer) goto fail;
1865 hdw->ctl_read_buffer = kmalloc(PVR2_CTL_BUFFSIZE,GFP_KERNEL);
1866 if (!hdw->ctl_read_buffer) goto fail;
1867 hdw->ctl_write_urb = usb_alloc_urb(0,GFP_KERNEL);
1868 if (!hdw->ctl_write_urb) goto fail;
1869 hdw->ctl_read_urb = usb_alloc_urb(0,GFP_KERNEL);
1870 if (!hdw->ctl_read_urb) goto fail;
1871
1872 down(&pvr2_unit_sem); do {
1873 for (idx = 0; idx < PVR_NUM; idx++) {
1874 if (unit_pointers[idx]) continue;
1875 hdw->unit_number = idx;
1876 unit_pointers[idx] = hdw;
1877 break;
1878 }
1879 } while (0); up(&pvr2_unit_sem);
1880
1881 cnt1 = 0;
1882 cnt2 = scnprintf(hdw->name+cnt1,sizeof(hdw->name)-cnt1,"pvrusb2");
1883 cnt1 += cnt2;
1884 if (hdw->unit_number >= 0) {
1885 cnt2 = scnprintf(hdw->name+cnt1,sizeof(hdw->name)-cnt1,"_%c",
1886 ('a' + hdw->unit_number));
1887 cnt1 += cnt2;
1888 }
1889 if (cnt1 >= sizeof(hdw->name)) cnt1 = sizeof(hdw->name)-1;
1890 hdw->name[cnt1] = 0;
1891
1892 pvr2_trace(PVR2_TRACE_INIT,"Driver unit number is %d, name is %s",
1893 hdw->unit_number,hdw->name);
1894
1895 hdw->tuner_type = -1;
1896 hdw->flag_ok = !0;
1897 /* Initialize the mask of subsystems that we will shut down when we
1898 stop streaming. */
1899 hdw->subsys_stream_mask = PVR2_SUBSYS_RUN_ALL;
1900 hdw->subsys_stream_mask |= (1<<PVR2_SUBSYS_B_ENC_CFG);
1901
1902 pvr2_trace(PVR2_TRACE_INIT,"subsys_stream_mask: 0x%lx",
1903 hdw->subsys_stream_mask);
1904
1905 hdw->usb_intf = intf;
1906 hdw->usb_dev = interface_to_usbdev(intf);
1907
1908 ifnum = hdw->usb_intf->cur_altsetting->desc.bInterfaceNumber;
1909 usb_set_interface(hdw->usb_dev,ifnum,0);
1910
1911 mutex_init(&hdw->ctl_lock_mutex);
1912 mutex_init(&hdw->big_lock_mutex);
1913
1914 return hdw;
1915 fail:
1916 if (hdw) {
1917 if (hdw->ctl_read_urb) usb_free_urb(hdw->ctl_read_urb);
1918 if (hdw->ctl_write_urb) usb_free_urb(hdw->ctl_write_urb);
1919 if (hdw->ctl_read_buffer) kfree(hdw->ctl_read_buffer);
1920 if (hdw->ctl_write_buffer) kfree(hdw->ctl_write_buffer);
1921 if (hdw->controls) kfree(hdw->controls);
1922 if (hdw->mpeg_ctrl_info) kfree(hdw->mpeg_ctrl_info);
1923 kfree(hdw);
1924 }
1925 return 0;
1926}
1927
1928
1929/* Remove _all_ associations between this driver and the underlying USB
1930 layer. */
1931void pvr2_hdw_remove_usb_stuff(struct pvr2_hdw *hdw)
1932{
1933 if (hdw->flag_disconnected) return;
1934 pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_remove_usb_stuff: hdw=%p",hdw);
1935 if (hdw->ctl_read_urb) {
1936 usb_kill_urb(hdw->ctl_read_urb);
1937 usb_free_urb(hdw->ctl_read_urb);
1938 hdw->ctl_read_urb = 0;
1939 }
1940 if (hdw->ctl_write_urb) {
1941 usb_kill_urb(hdw->ctl_write_urb);
1942 usb_free_urb(hdw->ctl_write_urb);
1943 hdw->ctl_write_urb = 0;
1944 }
1945 if (hdw->ctl_read_buffer) {
1946 kfree(hdw->ctl_read_buffer);
1947 hdw->ctl_read_buffer = 0;
1948 }
1949 if (hdw->ctl_write_buffer) {
1950 kfree(hdw->ctl_write_buffer);
1951 hdw->ctl_write_buffer = 0;
1952 }
1953 pvr2_hdw_render_useless_unlocked(hdw);
1954 hdw->flag_disconnected = !0;
1955 hdw->usb_dev = 0;
1956 hdw->usb_intf = 0;
1957}
1958
1959
1960/* Destroy hardware interaction structure */
1961void pvr2_hdw_destroy(struct pvr2_hdw *hdw)
1962{
1963 pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_destroy: hdw=%p",hdw);
1964 if (hdw->fw_buffer) {
1965 kfree(hdw->fw_buffer);
1966 hdw->fw_buffer = 0;
1967 }
1968 if (hdw->vid_stream) {
1969 pvr2_stream_destroy(hdw->vid_stream);
1970 hdw->vid_stream = 0;
1971 }
1972 if (hdw->audio_stat) {
1973 hdw->audio_stat->detach(hdw->audio_stat->ctxt);
1974 }
1975 if (hdw->decoder_ctrl) {
1976 hdw->decoder_ctrl->detach(hdw->decoder_ctrl->ctxt);
1977 }
1978 pvr2_i2c_core_done(hdw);
1979 pvr2_hdw_remove_usb_stuff(hdw);
1980 down(&pvr2_unit_sem); do {
1981 if ((hdw->unit_number >= 0) &&
1982 (hdw->unit_number < PVR_NUM) &&
1983 (unit_pointers[hdw->unit_number] == hdw)) {
1984 unit_pointers[hdw->unit_number] = 0;
1985 }
1986 } while (0); up(&pvr2_unit_sem);
1987 if (hdw->controls) kfree(hdw->controls);
1988 if (hdw->mpeg_ctrl_info) kfree(hdw->mpeg_ctrl_info);
1989 if (hdw->std_defs) kfree(hdw->std_defs);
1990 if (hdw->std_enum_names) kfree(hdw->std_enum_names);
1991 kfree(hdw);
1992}
1993
1994
1995int pvr2_hdw_init_ok(struct pvr2_hdw *hdw)
1996{
1997 return hdw->flag_init_ok;
1998}
1999
2000
2001int pvr2_hdw_dev_ok(struct pvr2_hdw *hdw)
2002{
2003 return (hdw && hdw->flag_ok);
2004}
2005
2006
2007/* Called when hardware has been unplugged */
2008void pvr2_hdw_disconnect(struct pvr2_hdw *hdw)
2009{
2010 pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_disconnect(hdw=%p)",hdw);
2011 LOCK_TAKE(hdw->big_lock);
2012 LOCK_TAKE(hdw->ctl_lock);
2013 pvr2_hdw_remove_usb_stuff(hdw);
2014 LOCK_GIVE(hdw->ctl_lock);
2015 LOCK_GIVE(hdw->big_lock);
2016}
2017
2018
2019// Attempt to autoselect an appropriate value for std_enum_cur given
2020// whatever is currently in std_mask_cur
2021void pvr2_hdw_internal_find_stdenum(struct pvr2_hdw *hdw)
2022{
2023 unsigned int idx;
2024 for (idx = 1; idx < hdw->std_enum_cnt; idx++) {
2025 if (hdw->std_defs[idx-1].id == hdw->std_mask_cur) {
2026 hdw->std_enum_cur = idx;
2027 return;
2028 }
2029 }
2030 hdw->std_enum_cur = 0;
2031}
2032
2033
2034// Calculate correct set of enumerated standards based on currently known
2035// set of available standards bits.
2036void pvr2_hdw_internal_set_std_avail(struct pvr2_hdw *hdw)
2037{
2038 struct v4l2_standard *newstd;
2039 unsigned int std_cnt;
2040 unsigned int idx;
2041
2042 newstd = pvr2_std_create_enum(&std_cnt,hdw->std_mask_avail);
2043
2044 if (hdw->std_defs) {
2045 kfree(hdw->std_defs);
2046 hdw->std_defs = 0;
2047 }
2048 hdw->std_enum_cnt = 0;
2049 if (hdw->std_enum_names) {
2050 kfree(hdw->std_enum_names);
2051 hdw->std_enum_names = 0;
2052 }
2053
2054 if (!std_cnt) {
2055 pvr2_trace(
2056 PVR2_TRACE_ERROR_LEGS,
2057 "WARNING: Failed to identify any viable standards");
2058 }
2059 hdw->std_enum_names = kmalloc(sizeof(char *)*(std_cnt+1),GFP_KERNEL);
2060 hdw->std_enum_names[0] = "none";
2061 for (idx = 0; idx < std_cnt; idx++) {
2062 hdw->std_enum_names[idx+1] =
2063 newstd[idx].name;
2064 }
2065 // Set up the dynamic control for this standard
2066 hdw->std_info_enum.def.type_enum.value_names = hdw->std_enum_names;
2067 hdw->std_info_enum.def.type_enum.count = std_cnt+1;
2068 hdw->std_defs = newstd;
2069 hdw->std_enum_cnt = std_cnt+1;
2070 hdw->std_enum_cur = 0;
2071 hdw->std_info_cur.def.type_bitmask.valid_bits = hdw->std_mask_avail;
2072}
2073
2074
2075int pvr2_hdw_get_stdenum_value(struct pvr2_hdw *hdw,
2076 struct v4l2_standard *std,
2077 unsigned int idx)
2078{
2079 int ret = -EINVAL;
2080 if (!idx) return ret;
2081 LOCK_TAKE(hdw->big_lock); do {
2082 if (idx >= hdw->std_enum_cnt) break;
2083 idx--;
2084 memcpy(std,hdw->std_defs+idx,sizeof(*std));
2085 ret = 0;
2086 } while (0); LOCK_GIVE(hdw->big_lock);
2087 return ret;
2088}
2089
2090
2091/* Get the number of defined controls */
2092unsigned int pvr2_hdw_get_ctrl_count(struct pvr2_hdw *hdw)
2093{
2094 return hdw->control_cnt;
2095}
2096
2097
2098/* Retrieve a control handle given its index (0..count-1) */
2099struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_index(struct pvr2_hdw *hdw,
2100 unsigned int idx)
2101{
2102 if (idx >= hdw->control_cnt) return 0;
2103 return hdw->controls + idx;
2104}
2105
2106
2107/* Retrieve a control handle given its index (0..count-1) */
2108struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_id(struct pvr2_hdw *hdw,
2109 unsigned int ctl_id)
2110{
2111 struct pvr2_ctrl *cptr;
2112 unsigned int idx;
2113 int i;
2114
2115 /* This could be made a lot more efficient, but for now... */
2116 for (idx = 0; idx < hdw->control_cnt; idx++) {
2117 cptr = hdw->controls + idx;
2118 i = cptr->info->internal_id;
2119 if (i && (i == ctl_id)) return cptr;
2120 }
2121 return 0;
2122}
2123
2124
2125/* Given a V4L ID, retrieve the control structure associated with it. */
2126struct pvr2_ctrl *pvr2_hdw_get_ctrl_v4l(struct pvr2_hdw *hdw,unsigned int ctl_id)
2127{
2128 struct pvr2_ctrl *cptr;
2129 unsigned int idx;
2130 int i;
2131
2132 /* This could be made a lot more efficient, but for now... */
2133 for (idx = 0; idx < hdw->control_cnt; idx++) {
2134 cptr = hdw->controls + idx;
2135 i = cptr->info->v4l_id;
2136 if (i && (i == ctl_id)) return cptr;
2137 }
2138 return 0;
2139}
2140
2141
2142/* Given a V4L ID for its immediate predecessor, retrieve the control
2143 structure associated with it. */
2144struct pvr2_ctrl *pvr2_hdw_get_ctrl_nextv4l(struct pvr2_hdw *hdw,
2145 unsigned int ctl_id)
2146{
2147 struct pvr2_ctrl *cptr,*cp2;
2148 unsigned int idx;
2149 int i;
2150
2151 /* This could be made a lot more efficient, but for now... */
2152 cp2 = 0;
2153 for (idx = 0; idx < hdw->control_cnt; idx++) {
2154 cptr = hdw->controls + idx;
2155 i = cptr->info->v4l_id;
2156 if (!i) continue;
2157 if (i <= ctl_id) continue;
2158 if (cp2 && (cp2->info->v4l_id < i)) continue;
2159 cp2 = cptr;
2160 }
2161 return cp2;
2162 return 0;
2163}
2164
2165
2166static const char *get_ctrl_typename(enum pvr2_ctl_type tp)
2167{
2168 switch (tp) {
2169 case pvr2_ctl_int: return "integer";
2170 case pvr2_ctl_enum: return "enum";
2171 case pvr2_ctl_bool: return "boolean";
2172 case pvr2_ctl_bitmask: return "bitmask";
2173 }
2174 return "";
2175}
2176
2177
2178/* Commit all control changes made up to this point. Subsystems can be
2179 indirectly affected by these changes. For a given set of things being
2180 committed, we'll clear the affected subsystem bits and then once we're
2181 done committing everything we'll make a request to restore the subsystem
2182 state(s) back to their previous value before this function was called.
2183 Thus we can automatically reconfigure affected pieces of the driver as
2184 controls are changed. */
2185int pvr2_hdw_commit_ctl_internal(struct pvr2_hdw *hdw)
2186{
2187 unsigned long saved_subsys_mask = hdw->subsys_enabled_mask;
2188 unsigned long stale_subsys_mask = 0;
2189 unsigned int idx;
2190 struct pvr2_ctrl *cptr;
2191 int value;
2192 int commit_flag = 0;
2193 char buf[100];
2194 unsigned int bcnt,ccnt;
2195
2196 for (idx = 0; idx < hdw->control_cnt; idx++) {
2197 cptr = hdw->controls + idx;
2198 if (cptr->info->is_dirty == 0) continue;
2199 if (!cptr->info->is_dirty(cptr)) continue;
2200 if (!commit_flag) {
2201 commit_flag = !0;
2202 }
2203
2204 bcnt = scnprintf(buf,sizeof(buf),"\"%s\" <-- ",
2205 cptr->info->name);
2206 value = 0;
2207 cptr->info->get_value(cptr,&value);
2208 pvr2_ctrl_value_to_sym_internal(cptr,~0,value,
2209 buf+bcnt,
2210 sizeof(buf)-bcnt,&ccnt);
2211 bcnt += ccnt;
2212 bcnt += scnprintf(buf+bcnt,sizeof(buf)-bcnt," <%s>",
2213 get_ctrl_typename(cptr->info->type));
2214 pvr2_trace(PVR2_TRACE_CTL,
2215 "/*--TRACE_COMMIT--*/ %.*s",
2216 bcnt,buf);
2217 }
2218
2219 if (!commit_flag) {
2220 /* Nothing has changed */
2221 return 0;
2222 }
2223
2224 /* When video standard changes, reset the hres and vres values -
2225 but if the user has pending changes there, then let the changes
2226 take priority. */
2227 if (hdw->std_dirty) {
2228 /* Rewrite the vertical resolution to be appropriate to the
2229 video standard that has been selected. */
2230 int nvres;
2231 if (hdw->std_mask_cur & V4L2_STD_525_60) {
2232 nvres = 480;
2233 } else {
2234 nvres = 576;
2235 }
2236 if (nvres != hdw->res_ver_val) {
2237 hdw->res_ver_val = nvres;
2238 hdw->res_ver_dirty = !0;
2239 }
2240 }
2241
2242 if (hdw->std_dirty ||
2243 0) {
2244 /* If any of this changes, then the encoder needs to be
2245 reconfigured, and we need to reset the stream. */
2246 stale_subsys_mask |= (1<<PVR2_SUBSYS_B_ENC_CFG);
2247 stale_subsys_mask |= hdw->subsys_stream_mask;
2248 }
2249
2250 if (hdw->srate_dirty) {
2251 /* Write new sample rate into control structure since
2252 * the master copy is stale. We must track srate
2253 * separate from the mpeg control structure because
2254 * other logic also uses this value. */
2255 struct v4l2_ext_controls cs;
2256 struct v4l2_ext_control c1;
2257 memset(&cs,0,sizeof(cs));
2258 memset(&c1,0,sizeof(c1));
2259 cs.controls = &c1;
2260 cs.count = 1;
2261 c1.id = V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ;
2262 c1.value = hdw->srate_val;
2263 cx2341x_ext_ctrls(&hdw->enc_ctl_state,&cs,VIDIOC_S_EXT_CTRLS);
2264 }
2265
2266 /* Scan i2c core at this point - before we clear all the dirty
2267 bits. Various parts of the i2c core will notice dirty bits as
2268 appropriate and arrange to broadcast or directly send updates to
2269 the client drivers in order to keep everything in sync */
2270 pvr2_i2c_core_check_stale(hdw);
2271
2272 for (idx = 0; idx < hdw->control_cnt; idx++) {
2273 cptr = hdw->controls + idx;
2274 if (!cptr->info->clear_dirty) continue;
2275 cptr->info->clear_dirty(cptr);
2276 }
2277
2278 /* Now execute i2c core update */
2279 pvr2_i2c_core_sync(hdw);
2280
2281 pvr2_hdw_subsys_bit_chg_no_lock(hdw,stale_subsys_mask,0);
2282 pvr2_hdw_subsys_bit_chg_no_lock(hdw,~0,saved_subsys_mask);
2283
2284 return 0;
2285}
2286
2287
2288int pvr2_hdw_commit_ctl(struct pvr2_hdw *hdw)
2289{
2290 LOCK_TAKE(hdw->big_lock); do {
2291 pvr2_hdw_commit_ctl_internal(hdw);
2292 } while (0); LOCK_GIVE(hdw->big_lock);
2293 return 0;
2294}
2295
2296
2297void pvr2_hdw_poll(struct pvr2_hdw *hdw)
2298{
2299 LOCK_TAKE(hdw->big_lock); do {
2300 pvr2_i2c_core_sync(hdw);
2301 } while (0); LOCK_GIVE(hdw->big_lock);
2302}
2303
2304
2305void pvr2_hdw_setup_poll_trigger(struct pvr2_hdw *hdw,
2306 void (*func)(void *),
2307 void *data)
2308{
2309 LOCK_TAKE(hdw->big_lock); do {
2310 hdw->poll_trigger_func = func;
2311 hdw->poll_trigger_data = data;
2312 } while (0); LOCK_GIVE(hdw->big_lock);
2313}
2314
2315
2316void pvr2_hdw_poll_trigger_unlocked(struct pvr2_hdw *hdw)
2317{
2318 if (hdw->poll_trigger_func) {
2319 hdw->poll_trigger_func(hdw->poll_trigger_data);
2320 }
2321}
2322
2323
2324void pvr2_hdw_poll_trigger(struct pvr2_hdw *hdw)
2325{
2326 LOCK_TAKE(hdw->big_lock); do {
2327 pvr2_hdw_poll_trigger_unlocked(hdw);
2328 } while (0); LOCK_GIVE(hdw->big_lock);
2329}
2330
2331
2332/* Return name for this driver instance */
2333const char *pvr2_hdw_get_driver_name(struct pvr2_hdw *hdw)
2334{
2335 return hdw->name;
2336}
2337
2338
2339/* Return bit mask indicating signal status */
2340unsigned int pvr2_hdw_get_signal_status_internal(struct pvr2_hdw *hdw)
2341{
2342 unsigned int msk = 0;
2343 switch (hdw->input_val) {
2344 case PVR2_CVAL_INPUT_TV:
2345 case PVR2_CVAL_INPUT_RADIO:
2346 if (hdw->decoder_ctrl &&
2347 hdw->decoder_ctrl->tuned(hdw->decoder_ctrl->ctxt)) {
2348 msk |= PVR2_SIGNAL_OK;
2349 if (hdw->audio_stat &&
2350 hdw->audio_stat->status(hdw->audio_stat->ctxt)) {
2351 if (hdw->flag_stereo) {
2352 msk |= PVR2_SIGNAL_STEREO;
2353 }
2354 if (hdw->flag_bilingual) {
2355 msk |= PVR2_SIGNAL_SAP;
2356 }
2357 }
2358 }
2359 break;
2360 default:
2361 msk |= PVR2_SIGNAL_OK | PVR2_SIGNAL_STEREO;
2362 }
2363 return msk;
2364}
2365
2366
2367int pvr2_hdw_is_hsm(struct pvr2_hdw *hdw)
2368{
2369 int result;
2370 LOCK_TAKE(hdw->ctl_lock); do {
2371 hdw->cmd_buffer[0] = 0x0b;
2372 result = pvr2_send_request(hdw,
2373 hdw->cmd_buffer,1,
2374 hdw->cmd_buffer,1);
2375 if (result < 0) break;
2376 result = (hdw->cmd_buffer[0] != 0);
2377 } while(0); LOCK_GIVE(hdw->ctl_lock);
2378 return result;
2379}
2380
2381
2382/* Return bit mask indicating signal status */
2383unsigned int pvr2_hdw_get_signal_status(struct pvr2_hdw *hdw)
2384{
2385 unsigned int msk = 0;
2386 LOCK_TAKE(hdw->big_lock); do {
2387 msk = pvr2_hdw_get_signal_status_internal(hdw);
2388 } while (0); LOCK_GIVE(hdw->big_lock);
2389 return msk;
2390}
2391
2392
2393/* Get handle to video output stream */
2394struct pvr2_stream *pvr2_hdw_get_video_stream(struct pvr2_hdw *hp)
2395{
2396 return hp->vid_stream;
2397}
2398
2399
2400void pvr2_hdw_trigger_module_log(struct pvr2_hdw *hdw)
2401{
2402 int nr = pvr2_hdw_get_unit_number(hdw);
2403 LOCK_TAKE(hdw->big_lock); do {
2404 hdw->log_requested = !0;
2405 printk(KERN_INFO "pvrusb2: ================= START STATUS CARD #%d =================\n", nr);
2406 pvr2_i2c_core_check_stale(hdw);
2407 hdw->log_requested = 0;
2408 pvr2_i2c_core_sync(hdw);
2409 pvr2_trace(PVR2_TRACE_INFO,"cx2341x config:");
2410 cx2341x_log_status(&hdw->enc_ctl_state, "pvrusb2");
2411 printk(KERN_INFO "pvrusb2: ================== END STATUS CARD #%d ==================\n", nr);
2412 } while (0); LOCK_GIVE(hdw->big_lock);
2413}
2414
2415void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *hdw, int enable_flag)
2416{
2417 int ret;
2418 u16 address;
2419 unsigned int pipe;
2420 LOCK_TAKE(hdw->big_lock); do {
2421 if ((hdw->fw_buffer == 0) == !enable_flag) break;
2422
2423 if (!enable_flag) {
2424 pvr2_trace(PVR2_TRACE_FIRMWARE,
2425 "Cleaning up after CPU firmware fetch");
2426 kfree(hdw->fw_buffer);
2427 hdw->fw_buffer = 0;
2428 hdw->fw_size = 0;
2429 /* Now release the CPU. It will disconnect and
2430 reconnect later. */
2431 pvr2_hdw_cpureset_assert(hdw,0);
2432 break;
2433 }
2434
2435 pvr2_trace(PVR2_TRACE_FIRMWARE,
2436 "Preparing to suck out CPU firmware");
2437 hdw->fw_size = 0x2000;
2438 hdw->fw_buffer = kmalloc(hdw->fw_size,GFP_KERNEL);
2439 if (!hdw->fw_buffer) {
2440 hdw->fw_size = 0;
2441 break;
2442 }
2443
2444 memset(hdw->fw_buffer,0,hdw->fw_size);
2445
2446 /* We have to hold the CPU during firmware upload. */
2447 pvr2_hdw_cpureset_assert(hdw,1);
2448
2449 /* download the firmware from address 0000-1fff in 2048
2450 (=0x800) bytes chunk. */
2451
2452 pvr2_trace(PVR2_TRACE_FIRMWARE,"Grabbing CPU firmware");
2453 pipe = usb_rcvctrlpipe(hdw->usb_dev, 0);
2454 for(address = 0; address < hdw->fw_size; address += 0x800) {
2455 ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0xc0,
2456 address,0,
2457 hdw->fw_buffer+address,0x800,HZ);
2458 if (ret < 0) break;
2459 }
2460
2461 pvr2_trace(PVR2_TRACE_FIRMWARE,"Done grabbing CPU firmware");
2462
2463 } while (0); LOCK_GIVE(hdw->big_lock);
2464}
2465
2466
2467/* Return true if we're in a mode for retrieval CPU firmware */
2468int pvr2_hdw_cpufw_get_enabled(struct pvr2_hdw *hdw)
2469{
2470 return hdw->fw_buffer != 0;
2471}
2472
2473
2474int pvr2_hdw_cpufw_get(struct pvr2_hdw *hdw,unsigned int offs,
2475 char *buf,unsigned int cnt)
2476{
2477 int ret = -EINVAL;
2478 LOCK_TAKE(hdw->big_lock); do {
2479 if (!buf) break;
2480 if (!cnt) break;
2481
2482 if (!hdw->fw_buffer) {
2483 ret = -EIO;
2484 break;
2485 }
2486
2487 if (offs >= hdw->fw_size) {
2488 pvr2_trace(PVR2_TRACE_FIRMWARE,
2489 "Read firmware data offs=%d EOF",
2490 offs);
2491 ret = 0;
2492 break;
2493 }
2494
2495 if (offs + cnt > hdw->fw_size) cnt = hdw->fw_size - offs;
2496
2497 memcpy(buf,hdw->fw_buffer+offs,cnt);
2498
2499 pvr2_trace(PVR2_TRACE_FIRMWARE,
2500 "Read firmware data offs=%d cnt=%d",
2501 offs,cnt);
2502 ret = cnt;
2503 } while (0); LOCK_GIVE(hdw->big_lock);
2504
2505 return ret;
2506}
2507
2508
2509int pvr2_hdw_v4l_get_minor_number(struct pvr2_hdw *hdw)
2510{
2511 return hdw->v4l_minor_number;
2512}
2513
2514
2515/* Store the v4l minor device number */
2516void pvr2_hdw_v4l_store_minor_number(struct pvr2_hdw *hdw,int v)
2517{
2518 hdw->v4l_minor_number = v;
2519}
2520
2521
2522void pvr2_reset_ctl_endpoints(struct pvr2_hdw *hdw)
2523{
2524 if (!hdw->usb_dev) return;
2525 usb_settoggle(hdw->usb_dev, PVR2_CTL_WRITE_ENDPOINT & 0xf,
2526 !(PVR2_CTL_WRITE_ENDPOINT & USB_DIR_IN), 0);
2527 usb_settoggle(hdw->usb_dev, PVR2_CTL_READ_ENDPOINT & 0xf,
2528 !(PVR2_CTL_READ_ENDPOINT & USB_DIR_IN), 0);
2529 usb_clear_halt(hdw->usb_dev,
2530 usb_rcvbulkpipe(hdw->usb_dev,
2531 PVR2_CTL_READ_ENDPOINT & 0x7f));
2532 usb_clear_halt(hdw->usb_dev,
2533 usb_sndbulkpipe(hdw->usb_dev,
2534 PVR2_CTL_WRITE_ENDPOINT & 0x7f));
2535}
2536
2537
2538static void pvr2_ctl_write_complete(struct urb *urb, struct pt_regs *regs)
2539{
2540 struct pvr2_hdw *hdw = urb->context;
2541 hdw->ctl_write_pend_flag = 0;
2542 if (hdw->ctl_read_pend_flag) return;
2543 complete(&hdw->ctl_done);
2544}
2545
2546
2547static void pvr2_ctl_read_complete(struct urb *urb, struct pt_regs *regs)
2548{
2549 struct pvr2_hdw *hdw = urb->context;
2550 hdw->ctl_read_pend_flag = 0;
2551 if (hdw->ctl_write_pend_flag) return;
2552 complete(&hdw->ctl_done);
2553}
2554
2555
2556static void pvr2_ctl_timeout(unsigned long data)
2557{
2558 struct pvr2_hdw *hdw = (struct pvr2_hdw *)data;
2559 if (hdw->ctl_write_pend_flag || hdw->ctl_read_pend_flag) {
2560 hdw->ctl_timeout_flag = !0;
2561 if (hdw->ctl_write_pend_flag && hdw->ctl_write_urb) {
2562 usb_unlink_urb(hdw->ctl_write_urb);
2563 }
2564 if (hdw->ctl_read_pend_flag && hdw->ctl_read_urb) {
2565 usb_unlink_urb(hdw->ctl_read_urb);
2566 }
2567 }
2568}
2569
2570
2571int pvr2_send_request_ex(struct pvr2_hdw *hdw,
2572 unsigned int timeout,int probe_fl,
2573 void *write_data,unsigned int write_len,
2574 void *read_data,unsigned int read_len)
2575{
2576 unsigned int idx;
2577 int status = 0;
2578 struct timer_list timer;
2579 if (!hdw->ctl_lock_held) {
2580 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
2581 "Attempted to execute control transfer"
2582 " without lock!!");
2583 return -EDEADLK;
2584 }
2585 if ((!hdw->flag_ok) && !probe_fl) {
2586 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
2587 "Attempted to execute control transfer"
2588 " when device not ok");
2589 return -EIO;
2590 }
2591 if (!(hdw->ctl_read_urb && hdw->ctl_write_urb)) {
2592 if (!probe_fl) {
2593 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
2594 "Attempted to execute control transfer"
2595 " when USB is disconnected");
2596 }
2597 return -ENOTTY;
2598 }
2599
2600 /* Ensure that we have sane parameters */
2601 if (!write_data) write_len = 0;
2602 if (!read_data) read_len = 0;
2603 if (write_len > PVR2_CTL_BUFFSIZE) {
2604 pvr2_trace(
2605 PVR2_TRACE_ERROR_LEGS,
2606 "Attempted to execute %d byte"
2607 " control-write transfer (limit=%d)",
2608 write_len,PVR2_CTL_BUFFSIZE);
2609 return -EINVAL;
2610 }
2611 if (read_len > PVR2_CTL_BUFFSIZE) {
2612 pvr2_trace(
2613 PVR2_TRACE_ERROR_LEGS,
2614 "Attempted to execute %d byte"
2615 " control-read transfer (limit=%d)",
2616 write_len,PVR2_CTL_BUFFSIZE);
2617 return -EINVAL;
2618 }
2619 if ((!write_len) && (!read_len)) {
2620 pvr2_trace(
2621 PVR2_TRACE_ERROR_LEGS,
2622 "Attempted to execute null control transfer?");
2623 return -EINVAL;
2624 }
2625
2626
2627 hdw->cmd_debug_state = 1;
2628 if (write_len) {
2629 hdw->cmd_debug_code = ((unsigned char *)write_data)[0];
2630 } else {
2631 hdw->cmd_debug_code = 0;
2632 }
2633 hdw->cmd_debug_write_len = write_len;
2634 hdw->cmd_debug_read_len = read_len;
2635
2636 /* Initialize common stuff */
2637 init_completion(&hdw->ctl_done);
2638 hdw->ctl_timeout_flag = 0;
2639 hdw->ctl_write_pend_flag = 0;
2640 hdw->ctl_read_pend_flag = 0;
2641 init_timer(&timer);
2642 timer.expires = jiffies + timeout;
2643 timer.data = (unsigned long)hdw;
2644 timer.function = pvr2_ctl_timeout;
2645
2646 if (write_len) {
2647 hdw->cmd_debug_state = 2;
2648 /* Transfer write data to internal buffer */
2649 for (idx = 0; idx < write_len; idx++) {
2650 hdw->ctl_write_buffer[idx] =
2651 ((unsigned char *)write_data)[idx];
2652 }
2653 /* Initiate a write request */
2654 usb_fill_bulk_urb(hdw->ctl_write_urb,
2655 hdw->usb_dev,
2656 usb_sndbulkpipe(hdw->usb_dev,
2657 PVR2_CTL_WRITE_ENDPOINT),
2658 hdw->ctl_write_buffer,
2659 write_len,
2660 pvr2_ctl_write_complete,
2661 hdw);
2662 hdw->ctl_write_urb->actual_length = 0;
2663 hdw->ctl_write_pend_flag = !0;
2664 status = usb_submit_urb(hdw->ctl_write_urb,GFP_KERNEL);
2665 if (status < 0) {
2666 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
2667 "Failed to submit write-control"
2668 " URB status=%d",status);
2669 hdw->ctl_write_pend_flag = 0;
2670 goto done;
2671 }
2672 }
2673
2674 if (read_len) {
2675 hdw->cmd_debug_state = 3;
2676 memset(hdw->ctl_read_buffer,0x43,read_len);
2677 /* Initiate a read request */
2678 usb_fill_bulk_urb(hdw->ctl_read_urb,
2679 hdw->usb_dev,
2680 usb_rcvbulkpipe(hdw->usb_dev,
2681 PVR2_CTL_READ_ENDPOINT),
2682 hdw->ctl_read_buffer,
2683 read_len,
2684 pvr2_ctl_read_complete,
2685 hdw);
2686 hdw->ctl_read_urb->actual_length = 0;
2687 hdw->ctl_read_pend_flag = !0;
2688 status = usb_submit_urb(hdw->ctl_read_urb,GFP_KERNEL);
2689 if (status < 0) {
2690 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
2691 "Failed to submit read-control"
2692 " URB status=%d",status);
2693 hdw->ctl_read_pend_flag = 0;
2694 goto done;
2695 }
2696 }
2697
2698 /* Start timer */
2699 add_timer(&timer);
2700
2701 /* Now wait for all I/O to complete */
2702 hdw->cmd_debug_state = 4;
2703 while (hdw->ctl_write_pend_flag || hdw->ctl_read_pend_flag) {
2704 wait_for_completion(&hdw->ctl_done);
2705 }
2706 hdw->cmd_debug_state = 5;
2707
2708 /* Stop timer */
2709 del_timer_sync(&timer);
2710
2711 hdw->cmd_debug_state = 6;
2712 status = 0;
2713
2714 if (hdw->ctl_timeout_flag) {
2715 status = -ETIMEDOUT;
2716 if (!probe_fl) {
2717 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
2718 "Timed out control-write");
2719 }
2720 goto done;
2721 }
2722
2723 if (write_len) {
2724 /* Validate results of write request */
2725 if ((hdw->ctl_write_urb->status != 0) &&
2726 (hdw->ctl_write_urb->status != -ENOENT) &&
2727 (hdw->ctl_write_urb->status != -ESHUTDOWN) &&
2728 (hdw->ctl_write_urb->status != -ECONNRESET)) {
2729 /* USB subsystem is reporting some kind of failure
2730 on the write */
2731 status = hdw->ctl_write_urb->status;
2732 if (!probe_fl) {
2733 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
2734 "control-write URB failure,"
2735 " status=%d",
2736 status);
2737 }
2738 goto done;
2739 }
2740 if (hdw->ctl_write_urb->actual_length < write_len) {
2741 /* Failed to write enough data */
2742 status = -EIO;
2743 if (!probe_fl) {
2744 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
2745 "control-write URB short,"
2746 " expected=%d got=%d",
2747 write_len,
2748 hdw->ctl_write_urb->actual_length);
2749 }
2750 goto done;
2751 }
2752 }
2753 if (read_len) {
2754 /* Validate results of read request */
2755 if ((hdw->ctl_read_urb->status != 0) &&
2756 (hdw->ctl_read_urb->status != -ENOENT) &&
2757 (hdw->ctl_read_urb->status != -ESHUTDOWN) &&
2758 (hdw->ctl_read_urb->status != -ECONNRESET)) {
2759 /* USB subsystem is reporting some kind of failure
2760 on the read */
2761 status = hdw->ctl_read_urb->status;
2762 if (!probe_fl) {
2763 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
2764 "control-read URB failure,"
2765 " status=%d",
2766 status);
2767 }
2768 goto done;
2769 }
2770 if (hdw->ctl_read_urb->actual_length < read_len) {
2771 /* Failed to read enough data */
2772 status = -EIO;
2773 if (!probe_fl) {
2774 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
2775 "control-read URB short,"
2776 " expected=%d got=%d",
2777 read_len,
2778 hdw->ctl_read_urb->actual_length);
2779 }
2780 goto done;
2781 }
2782 /* Transfer retrieved data out from internal buffer */
2783 for (idx = 0; idx < read_len; idx++) {
2784 ((unsigned char *)read_data)[idx] =
2785 hdw->ctl_read_buffer[idx];
2786 }
2787 }
2788
2789 done:
2790
2791 hdw->cmd_debug_state = 0;
2792 if ((status < 0) && (!probe_fl)) {
2793 pvr2_hdw_render_useless_unlocked(hdw);
2794 }
2795 return status;
2796}
2797
2798
2799int pvr2_send_request(struct pvr2_hdw *hdw,
2800 void *write_data,unsigned int write_len,
2801 void *read_data,unsigned int read_len)
2802{
2803 return pvr2_send_request_ex(hdw,HZ*4,0,
2804 write_data,write_len,
2805 read_data,read_len);
2806}
2807
2808int pvr2_write_register(struct pvr2_hdw *hdw, u16 reg, u32 data)
2809{
2810 int ret;
2811
2812 LOCK_TAKE(hdw->ctl_lock);
2813
2814 hdw->cmd_buffer[0] = 0x04; /* write register prefix */
2815 PVR2_DECOMPOSE_LE(hdw->cmd_buffer,1,data);
2816 hdw->cmd_buffer[5] = 0;
2817 hdw->cmd_buffer[6] = (reg >> 8) & 0xff;
2818 hdw->cmd_buffer[7] = reg & 0xff;
2819
2820
2821 ret = pvr2_send_request(hdw, hdw->cmd_buffer, 8, hdw->cmd_buffer, 0);
2822
2823 LOCK_GIVE(hdw->ctl_lock);
2824
2825 return ret;
2826}
2827
2828
2829int pvr2_read_register(struct pvr2_hdw *hdw, u16 reg, u32 *data)
2830{
2831 int ret = 0;
2832
2833 LOCK_TAKE(hdw->ctl_lock);
2834
2835 hdw->cmd_buffer[0] = 0x05; /* read register prefix */
2836 hdw->cmd_buffer[1] = 0;
2837 hdw->cmd_buffer[2] = 0;
2838 hdw->cmd_buffer[3] = 0;
2839 hdw->cmd_buffer[4] = 0;
2840 hdw->cmd_buffer[5] = 0;
2841 hdw->cmd_buffer[6] = (reg >> 8) & 0xff;
2842 hdw->cmd_buffer[7] = reg & 0xff;
2843
2844 ret |= pvr2_send_request(hdw, hdw->cmd_buffer, 8, hdw->cmd_buffer, 4);
2845 *data = PVR2_COMPOSE_LE(hdw->cmd_buffer,0);
2846
2847 LOCK_GIVE(hdw->ctl_lock);
2848
2849 return ret;
2850}
2851
2852
2853int pvr2_write_u16(struct pvr2_hdw *hdw, u16 data, int res)
2854{
2855 int ret;
2856
2857 LOCK_TAKE(hdw->ctl_lock);
2858
2859 hdw->cmd_buffer[0] = (data >> 8) & 0xff;
2860 hdw->cmd_buffer[1] = data & 0xff;
2861
2862 ret = pvr2_send_request(hdw, hdw->cmd_buffer, 2, hdw->cmd_buffer, res);
2863
2864 LOCK_GIVE(hdw->ctl_lock);
2865
2866 return ret;
2867}
2868
2869
2870int pvr2_write_u8(struct pvr2_hdw *hdw, u8 data, int res)
2871{
2872 int ret;
2873
2874 LOCK_TAKE(hdw->ctl_lock);
2875
2876 hdw->cmd_buffer[0] = data;
2877
2878 ret = pvr2_send_request(hdw, hdw->cmd_buffer, 1, hdw->cmd_buffer, res);
2879
2880 LOCK_GIVE(hdw->ctl_lock);
2881
2882 return ret;
2883}
2884
2885
2886void pvr2_hdw_render_useless_unlocked(struct pvr2_hdw *hdw)
2887{
2888 if (!hdw->flag_ok) return;
2889 pvr2_trace(PVR2_TRACE_INIT,"render_useless");
2890 hdw->flag_ok = 0;
2891 if (hdw->vid_stream) {
2892 pvr2_stream_setup(hdw->vid_stream,0,0,0);
2893 }
2894 hdw->flag_streaming_enabled = 0;
2895 hdw->subsys_enabled_mask = 0;
2896}
2897
2898
2899void pvr2_hdw_render_useless(struct pvr2_hdw *hdw)
2900{
2901 LOCK_TAKE(hdw->ctl_lock);
2902 pvr2_hdw_render_useless_unlocked(hdw);
2903 LOCK_GIVE(hdw->ctl_lock);
2904}
2905
2906
2907void pvr2_hdw_device_reset(struct pvr2_hdw *hdw)
2908{
2909 int ret;
2910 pvr2_trace(PVR2_TRACE_INIT,"Performing a device reset...");
2911 ret = usb_lock_device_for_reset(hdw->usb_dev,0);
2912 if (ret == 1) {
2913 ret = usb_reset_device(hdw->usb_dev);
2914 usb_unlock_device(hdw->usb_dev);
2915 } else {
2916 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
2917 "Failed to lock USB device ret=%d",ret);
2918 }
2919 if (init_pause_msec) {
2920 pvr2_trace(PVR2_TRACE_INFO,
2921 "Waiting %u msec for hardware to settle",
2922 init_pause_msec);
2923 msleep(init_pause_msec);
2924 }
2925
2926}
2927
2928
2929void pvr2_hdw_cpureset_assert(struct pvr2_hdw *hdw,int val)
2930{
2931 char da[1];
2932 unsigned int pipe;
2933 int ret;
2934
2935 if (!hdw->usb_dev) return;
2936
2937 pvr2_trace(PVR2_TRACE_INIT,"cpureset_assert(%d)",val);
2938
2939 da[0] = val ? 0x01 : 0x00;
2940
2941 /* Write the CPUCS register on the 8051. The lsb of the register
2942 is the reset bit; a 1 asserts reset while a 0 clears it. */
2943 pipe = usb_sndctrlpipe(hdw->usb_dev, 0);
2944 ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0x40,0xe600,0,da,1,HZ);
2945 if (ret < 0) {
2946 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
2947 "cpureset_assert(%d) error=%d",val,ret);
2948 pvr2_hdw_render_useless(hdw);
2949 }
2950}
2951
2952
2953int pvr2_hdw_cmd_deep_reset(struct pvr2_hdw *hdw)
2954{
2955 int status;
2956 LOCK_TAKE(hdw->ctl_lock); do {
2957 pvr2_trace(PVR2_TRACE_INIT,"Requesting uproc hard reset");
2958 hdw->flag_ok = !0;
2959 hdw->cmd_buffer[0] = 0xdd;
2960 status = pvr2_send_request(hdw,hdw->cmd_buffer,1,0,0);
2961 } while (0); LOCK_GIVE(hdw->ctl_lock);
2962 return status;
2963}
2964
2965
2966int pvr2_hdw_cmd_powerup(struct pvr2_hdw *hdw)
2967{
2968 int status;
2969 LOCK_TAKE(hdw->ctl_lock); do {
2970 pvr2_trace(PVR2_TRACE_INIT,"Requesting powerup");
2971 hdw->cmd_buffer[0] = 0xde;
2972 status = pvr2_send_request(hdw,hdw->cmd_buffer,1,0,0);
2973 } while (0); LOCK_GIVE(hdw->ctl_lock);
2974 return status;
2975}
2976
2977
2978int pvr2_hdw_cmd_decoder_reset(struct pvr2_hdw *hdw)
2979{
2980 if (!hdw->decoder_ctrl) {
2981 pvr2_trace(PVR2_TRACE_INIT,
2982 "Unable to reset decoder: nothing attached");
2983 return -ENOTTY;
2984 }
2985
2986 if (!hdw->decoder_ctrl->force_reset) {
2987 pvr2_trace(PVR2_TRACE_INIT,
2988 "Unable to reset decoder: not implemented");
2989 return -ENOTTY;
2990 }
2991
2992 pvr2_trace(PVR2_TRACE_INIT,
2993 "Requesting decoder reset");
2994 hdw->decoder_ctrl->force_reset(hdw->decoder_ctrl->ctxt);
2995 return 0;
2996}
2997
2998
2999int pvr2_hdw_cmd_usbstream(struct pvr2_hdw *hdw,int runFl)
3000{
3001 int status;
3002 LOCK_TAKE(hdw->ctl_lock); do {
3003 hdw->cmd_buffer[0] = (runFl ? 0x36 : 0x37);
3004 status = pvr2_send_request(hdw,hdw->cmd_buffer,1,0,0);
3005 } while (0); LOCK_GIVE(hdw->ctl_lock);
3006 if (!status) {
3007 hdw->subsys_enabled_mask =
3008 ((hdw->subsys_enabled_mask &
3009 ~(1<<PVR2_SUBSYS_B_USBSTREAM_RUN)) |
3010 (runFl ? (1<<PVR2_SUBSYS_B_USBSTREAM_RUN) : 0));
3011 }
3012 return status;
3013}
3014
3015
3016void pvr2_hdw_get_debug_info(const struct pvr2_hdw *hdw,
3017 struct pvr2_hdw_debug_info *ptr)
3018{
3019 ptr->big_lock_held = hdw->big_lock_held;
3020 ptr->ctl_lock_held = hdw->ctl_lock_held;
3021 ptr->flag_ok = hdw->flag_ok;
3022 ptr->flag_disconnected = hdw->flag_disconnected;
3023 ptr->flag_init_ok = hdw->flag_init_ok;
3024 ptr->flag_streaming_enabled = hdw->flag_streaming_enabled;
3025 ptr->subsys_flags = hdw->subsys_enabled_mask;
3026 ptr->cmd_debug_state = hdw->cmd_debug_state;
3027 ptr->cmd_code = hdw->cmd_debug_code;
3028 ptr->cmd_debug_write_len = hdw->cmd_debug_write_len;
3029 ptr->cmd_debug_read_len = hdw->cmd_debug_read_len;
3030 ptr->cmd_debug_timeout = hdw->ctl_timeout_flag;
3031 ptr->cmd_debug_write_pend = hdw->ctl_write_pend_flag;
3032 ptr->cmd_debug_read_pend = hdw->ctl_read_pend_flag;
3033 ptr->cmd_debug_rstatus = hdw->ctl_read_urb->status;
3034 ptr->cmd_debug_wstatus = hdw->ctl_read_urb->status;
3035}
3036
3037
3038int pvr2_hdw_gpio_get_dir(struct pvr2_hdw *hdw,u32 *dp)
3039{
3040 return pvr2_read_register(hdw,PVR2_GPIO_DIR,dp);
3041}
3042
3043
3044int pvr2_hdw_gpio_get_out(struct pvr2_hdw *hdw,u32 *dp)
3045{
3046 return pvr2_read_register(hdw,PVR2_GPIO_OUT,dp);
3047}
3048
3049
3050int pvr2_hdw_gpio_get_in(struct pvr2_hdw *hdw,u32 *dp)
3051{
3052 return pvr2_read_register(hdw,PVR2_GPIO_IN,dp);
3053}
3054
3055
3056int pvr2_hdw_gpio_chg_dir(struct pvr2_hdw *hdw,u32 msk,u32 val)
3057{
3058 u32 cval,nval;
3059 int ret;
3060 if (~msk) {
3061 ret = pvr2_read_register(hdw,PVR2_GPIO_DIR,&cval);
3062 if (ret) return ret;
3063 nval = (cval & ~msk) | (val & msk);
3064 pvr2_trace(PVR2_TRACE_GPIO,
3065 "GPIO direction changing 0x%x:0x%x"
3066 " from 0x%x to 0x%x",
3067 msk,val,cval,nval);
3068 } else {
3069 nval = val;
3070 pvr2_trace(PVR2_TRACE_GPIO,
3071 "GPIO direction changing to 0x%x",nval);
3072 }
3073 return pvr2_write_register(hdw,PVR2_GPIO_DIR,nval);
3074}
3075
3076
3077int pvr2_hdw_gpio_chg_out(struct pvr2_hdw *hdw,u32 msk,u32 val)
3078{
3079 u32 cval,nval;
3080 int ret;
3081 if (~msk) {
3082 ret = pvr2_read_register(hdw,PVR2_GPIO_OUT,&cval);
3083 if (ret) return ret;
3084 nval = (cval & ~msk) | (val & msk);
3085 pvr2_trace(PVR2_TRACE_GPIO,
3086 "GPIO output changing 0x%x:0x%x from 0x%x to 0x%x",
3087 msk,val,cval,nval);
3088 } else {
3089 nval = val;
3090 pvr2_trace(PVR2_TRACE_GPIO,
3091 "GPIO output changing to 0x%x",nval);
3092 }
3093 return pvr2_write_register(hdw,PVR2_GPIO_OUT,nval);
3094}
3095
3096
3097int pvr2_hdw_get_eeprom_addr(struct pvr2_hdw *hdw)
3098{
3099 int result;
3100 LOCK_TAKE(hdw->ctl_lock); do {
3101 hdw->cmd_buffer[0] = 0xeb;
3102 result = pvr2_send_request(hdw,
3103 hdw->cmd_buffer,1,
3104 hdw->cmd_buffer,1);
3105 if (result < 0) break;
3106 result = hdw->cmd_buffer[0];
3107 } while(0); LOCK_GIVE(hdw->ctl_lock);
3108 return result;
3109}
3110
3111
3112/*
3113 Stuff for Emacs to see, in order to encourage consistent editing style:
3114 *** Local Variables: ***
3115 *** mode: c ***
3116 *** fill-column: 75 ***
3117 *** tab-width: 8 ***
3118 *** c-basic-offset: 8 ***
3119 *** End: ***
3120 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.h b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
new file mode 100644
index 0000000000..63f5291544
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
@@ -0,0 +1,335 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#ifndef __PVRUSB2_HDW_H
22#define __PVRUSB2_HDW_H
23
24#include <linux/usb.h>
25#include <linux/videodev2.h>
26#include "pvrusb2-io.h"
27#include "pvrusb2-ctrl.h"
28
29
30/* Private internal control ids, look these up with
31 pvr2_hdw_get_ctrl_by_id() - these are NOT visible in V4L */
32#define PVR2_CID_STDENUM 1
33#define PVR2_CID_STDCUR 2
34#define PVR2_CID_STDAVAIL 3
35#define PVR2_CID_INPUT 4
36#define PVR2_CID_AUDIOMODE 5
37#define PVR2_CID_FREQUENCY 6
38#define PVR2_CID_HRES 7
39#define PVR2_CID_VRES 8
40
41/* Legal values for the INPUT state variable */
42#define PVR2_CVAL_INPUT_TV 0
43#define PVR2_CVAL_INPUT_SVIDEO 1
44#define PVR2_CVAL_INPUT_COMPOSITE 2
45#define PVR2_CVAL_INPUT_RADIO 3
46
47/* Values that pvr2_hdw_get_signal_status() returns */
48#define PVR2_SIGNAL_OK 0x0001
49#define PVR2_SIGNAL_STEREO 0x0002
50#define PVR2_SIGNAL_SAP 0x0004
51
52
53/* Subsystem definitions - these are various pieces that can be
54 independently stopped / started. Usually you don't want to mess with
55 this directly (let the driver handle things itself), but it is useful
56 for debugging. */
57#define PVR2_SUBSYS_B_ENC_FIRMWARE 0
58#define PVR2_SUBSYS_B_ENC_CFG 1
59#define PVR2_SUBSYS_B_DIGITIZER_RUN 2
60#define PVR2_SUBSYS_B_USBSTREAM_RUN 3
61#define PVR2_SUBSYS_B_ENC_RUN 4
62
63#define PVR2_SUBSYS_CFG_ALL ( \
64 (1 << PVR2_SUBSYS_B_ENC_FIRMWARE) | \
65 (1 << PVR2_SUBSYS_B_ENC_CFG) )
66#define PVR2_SUBSYS_RUN_ALL ( \
67 (1 << PVR2_SUBSYS_B_DIGITIZER_RUN) | \
68 (1 << PVR2_SUBSYS_B_USBSTREAM_RUN) | \
69 (1 << PVR2_SUBSYS_B_ENC_RUN) )
70#define PVR2_SUBSYS_ALL ( \
71 PVR2_SUBSYS_CFG_ALL | \
72 PVR2_SUBSYS_RUN_ALL )
73
74enum pvr2_config {
75 pvr2_config_empty,
76 pvr2_config_mpeg,
77 pvr2_config_vbi,
78 pvr2_config_radio,
79};
80
81const char *pvr2_config_get_name(enum pvr2_config);
82
83struct pvr2_hdw;
84
85/* Create and return a structure for interacting with the underlying
86 hardware */
87struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
88 const struct usb_device_id *devid);
89
90/* Poll for background activity (if any) */
91void pvr2_hdw_poll(struct pvr2_hdw *);
92
93/* Trigger a poll to take place later at a convenient time */
94void pvr2_hdw_poll_trigger(struct pvr2_hdw *);
95void pvr2_hdw_poll_trigger_unlocked(struct pvr2_hdw *);
96
97/* Register a callback used to trigger a future poll */
98void pvr2_hdw_setup_poll_trigger(struct pvr2_hdw *,
99 void (*func)(void *),
100 void *data);
101
102/* Get pointer to structure given unit number */
103struct pvr2_hdw *pvr2_hdw_find(int unit_number);
104
105/* Destroy hardware interaction structure */
106void pvr2_hdw_destroy(struct pvr2_hdw *);
107
108/* Set up the structure and attempt to put the device into a usable state.
109 This can be a time-consuming operation, which is why it is not done
110 internally as part of the create() step. Return value is exactly the
111 same as pvr2_hdw_init_ok(). */
112int pvr2_hdw_setup(struct pvr2_hdw *);
113
114/* Initialization succeeded */
115int pvr2_hdw_init_ok(struct pvr2_hdw *);
116
117/* Return true if in the ready (normal) state */
118int pvr2_hdw_dev_ok(struct pvr2_hdw *);
119
120/* Return small integer number [1..N] for logical instance number of this
121 device. This is useful for indexing array-valued module parameters. */
122int pvr2_hdw_get_unit_number(struct pvr2_hdw *);
123
124/* Get pointer to underlying USB device */
125struct usb_device *pvr2_hdw_get_dev(struct pvr2_hdw *);
126
127/* Retrieve serial number of device */
128unsigned long pvr2_hdw_get_sn(struct pvr2_hdw *);
129
130/* Called when hardware has been unplugged */
131void pvr2_hdw_disconnect(struct pvr2_hdw *);
132
133/* Get the number of defined controls */
134unsigned int pvr2_hdw_get_ctrl_count(struct pvr2_hdw *);
135
136/* Retrieve a control handle given its index (0..count-1) */
137struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_index(struct pvr2_hdw *,unsigned int);
138
139/* Retrieve a control handle given its internal ID (if any) */
140struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_id(struct pvr2_hdw *,unsigned int);
141
142/* Retrieve a control handle given its V4L ID (if any) */
143struct pvr2_ctrl *pvr2_hdw_get_ctrl_v4l(struct pvr2_hdw *,unsigned int ctl_id);
144
145/* Retrieve a control handle given its immediate predecessor V4L ID (if any) */
146struct pvr2_ctrl *pvr2_hdw_get_ctrl_nextv4l(struct pvr2_hdw *,
147 unsigned int ctl_id);
148
149/* Commit all control changes made up to this point */
150int pvr2_hdw_commit_ctl(struct pvr2_hdw *);
151
152/* Return name for this driver instance */
153const char *pvr2_hdw_get_driver_name(struct pvr2_hdw *);
154
155/* Return PVR2_SIGNAL_XXXX bit mask indicating signal status */
156unsigned int pvr2_hdw_get_signal_status(struct pvr2_hdw *);
157
158/* Query device and see if it thinks it is on a high-speed USB link */
159int pvr2_hdw_is_hsm(struct pvr2_hdw *);
160
161/* Turn streaming on/off */
162int pvr2_hdw_set_streaming(struct pvr2_hdw *,int);
163
164/* Find out if streaming is on */
165int pvr2_hdw_get_streaming(struct pvr2_hdw *);
166
167/* Configure the type of stream to generate */
168int pvr2_hdw_set_stream_type(struct pvr2_hdw *, enum pvr2_config);
169
170/* Get handle to video output stream */
171struct pvr2_stream *pvr2_hdw_get_video_stream(struct pvr2_hdw *);
172
173/* Emit a video standard struct */
174int pvr2_hdw_get_stdenum_value(struct pvr2_hdw *hdw,struct v4l2_standard *std,
175 unsigned int idx);
176
177/* Enable / disable various pieces of hardware. Items to change are
178 identified by bit positions within msk, and new state for each item is
179 identified by corresponding bit positions within val. */
180void pvr2_hdw_subsys_bit_chg(struct pvr2_hdw *hdw,
181 unsigned long msk,unsigned long val);
182
183/* Shortcut for pvr2_hdw_subsys_bit_chg(hdw,msk,msk) */
184void pvr2_hdw_subsys_bit_set(struct pvr2_hdw *hdw,unsigned long msk);
185
186/* Shortcut for pvr2_hdw_subsys_bit_chg(hdw,msk,0) */
187void pvr2_hdw_subsys_bit_clr(struct pvr2_hdw *hdw,unsigned long msk);
188
189/* Retrieve mask indicating which pieces of hardware are currently enabled
190 / configured. */
191unsigned long pvr2_hdw_subsys_get(struct pvr2_hdw *);
192
193/* Adjust mask of what get shut down when streaming is stopped. This is a
194 debugging aid. */
195void pvr2_hdw_subsys_stream_bit_chg(struct pvr2_hdw *hdw,
196 unsigned long msk,unsigned long val);
197
198/* Retrieve mask indicating which pieces of hardware are disabled when
199 streaming is turned off. */
200unsigned long pvr2_hdw_subsys_stream_get(struct pvr2_hdw *);
201
202
203/* Enable / disable retrieval of CPU firmware. This must be enabled before
204 pvr2_hdw_cpufw_get() will function. Note that doing this may prevent
205 the device from running (and leaving this mode may imply a device
206 reset). */
207void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *, int enable_flag);
208
209/* Return true if we're in a mode for retrieval CPU firmware */
210int pvr2_hdw_cpufw_get_enabled(struct pvr2_hdw *);
211
212/* Retrieve a piece of the CPU's firmware at the given offset. Return
213 value is the number of bytes retrieved or zero if we're past the end or
214 an error otherwise (e.g. if firmware retrieval is not enabled). */
215int pvr2_hdw_cpufw_get(struct pvr2_hdw *,unsigned int offs,
216 char *buf,unsigned int cnt);
217
218/* Retrieve previously stored v4l minor device number */
219int pvr2_hdw_v4l_get_minor_number(struct pvr2_hdw *);
220
221/* Store the v4l minor device number */
222void pvr2_hdw_v4l_store_minor_number(struct pvr2_hdw *,int);
223
224
225/* The following entry points are all lower level things you normally don't
226 want to worry about. */
227
228/* Attempt to recover from a USB foul-up (in practice I find that if you
229 have to do this, then it's already too late). */
230void pvr2_reset_ctl_endpoints(struct pvr2_hdw *hdw);
231
232/* Issue a command and get a response from the device. LOTS of higher
233 level stuff is built on this. */
234int pvr2_send_request(struct pvr2_hdw *,
235 void *write_ptr,unsigned int write_len,
236 void *read_ptr,unsigned int read_len);
237
238/* Issue a command and get a response from the device. This extended
239 version includes a probe flag (which if set means that device errors
240 should not be logged or treated as fatal) and a timeout in jiffies.
241 This can be used to non-lethally probe the health of endpoint 1. */
242int pvr2_send_request_ex(struct pvr2_hdw *,unsigned int timeout,int probe_fl,
243 void *write_ptr,unsigned int write_len,
244 void *read_ptr,unsigned int read_len);
245
246/* Slightly higher level device communication functions. */
247int pvr2_write_register(struct pvr2_hdw *, u16, u32);
248int pvr2_read_register(struct pvr2_hdw *, u16, u32 *);
249int pvr2_write_u16(struct pvr2_hdw *, u16, int);
250int pvr2_write_u8(struct pvr2_hdw *, u8, int);
251
252/* Call if for any reason we can't talk to the hardware anymore - this will
253 cause the driver to stop flailing on the device. */
254void pvr2_hdw_render_useless(struct pvr2_hdw *);
255void pvr2_hdw_render_useless_unlocked(struct pvr2_hdw *);
256
257/* Set / clear 8051's reset bit */
258void pvr2_hdw_cpureset_assert(struct pvr2_hdw *,int);
259
260/* Execute a USB-commanded device reset */
261void pvr2_hdw_device_reset(struct pvr2_hdw *);
262
263/* Execute hard reset command (after this point it's likely that the
264 encoder will have to be reconfigured). This also clears the "useless"
265 state. */
266int pvr2_hdw_cmd_deep_reset(struct pvr2_hdw *);
267
268/* Execute simple reset command */
269int pvr2_hdw_cmd_powerup(struct pvr2_hdw *);
270
271/* Order decoder to reset */
272int pvr2_hdw_cmd_decoder_reset(struct pvr2_hdw *);
273
274/* Stop / start video stream transport */
275int pvr2_hdw_cmd_usbstream(struct pvr2_hdw *hdw,int runFl);
276
277/* Find I2C address of eeprom */
278int pvr2_hdw_get_eeprom_addr(struct pvr2_hdw *);
279
280/* Direct manipulation of GPIO bits */
281int pvr2_hdw_gpio_get_dir(struct pvr2_hdw *hdw,u32 *);
282int pvr2_hdw_gpio_get_out(struct pvr2_hdw *hdw,u32 *);
283int pvr2_hdw_gpio_get_in(struct pvr2_hdw *hdw,u32 *);
284int pvr2_hdw_gpio_chg_dir(struct pvr2_hdw *hdw,u32 msk,u32 val);
285int pvr2_hdw_gpio_chg_out(struct pvr2_hdw *hdw,u32 msk,u32 val);
286
287/* This data structure is specifically for the next function... */
288struct pvr2_hdw_debug_info {
289 int big_lock_held;
290 int ctl_lock_held;
291 int flag_ok;
292 int flag_disconnected;
293 int flag_init_ok;
294 int flag_streaming_enabled;
295 unsigned long subsys_flags;
296 int cmd_debug_state;
297 int cmd_debug_write_len;
298 int cmd_debug_read_len;
299 int cmd_debug_write_pend;
300 int cmd_debug_read_pend;
301 int cmd_debug_timeout;
302 int cmd_debug_rstatus;
303 int cmd_debug_wstatus;
304 unsigned char cmd_code;
305};
306
307/* Non-intrusively retrieve internal state info - this is useful for
308 diagnosing lockups. Note that this operation is completed without any
309 kind of locking and so it is not atomic and may yield inconsistent
310 results. This is *purely* a debugging aid. */
311void pvr2_hdw_get_debug_info(const struct pvr2_hdw *hdw,
312 struct pvr2_hdw_debug_info *);
313
314/* Cause modules to log their state once */
315void pvr2_hdw_trigger_module_log(struct pvr2_hdw *hdw);
316
317/* Cause encoder firmware to be uploaded into the device. This is normally
318 done autonomously, but the interface is exported here because it is also
319 a debugging aid. */
320int pvr2_upload_firmware2(struct pvr2_hdw *hdw);
321
322/* List of device types that we can match */
323extern struct usb_device_id pvr2_device_table[];
324
325#endif /* __PVRUSB2_HDW_H */
326
327/*
328 Stuff for Emacs to see, in order to encourage consistent editing style:
329 *** Local Variables: ***
330 *** mode: c ***
331 *** fill-column: 75 ***
332 *** tab-width: 8 ***
333 *** c-basic-offset: 8 ***
334 *** End: ***
335 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-chips-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-chips-v4l2.c
new file mode 100644
index 0000000000..1dd4f6249b
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-chips-v4l2.c
@@ -0,0 +1,115 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include "pvrusb2-i2c-core.h"
23#include "pvrusb2-hdw-internal.h"
24#include "pvrusb2-debug.h"
25#include "pvrusb2-i2c-cmd-v4l2.h"
26#include "pvrusb2-audio.h"
27#include "pvrusb2-tuner.h"
28#include "pvrusb2-demod.h"
29#include "pvrusb2-video-v4l.h"
30#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
31#include "pvrusb2-cx2584x-v4l.h"
32#include "pvrusb2-wm8775.h"
33#endif
34
35#define trace_i2c(...) pvr2_trace(PVR2_TRACE_I2C,__VA_ARGS__)
36
37#define OP_STANDARD 0
38#define OP_BCSH 1
39#define OP_VOLUME 2
40#define OP_FREQ 3
41#define OP_AUDIORATE 4
42#define OP_SIZE 5
43#define OP_LOG 6
44
45static const struct pvr2_i2c_op * const ops[] = {
46 [OP_STANDARD] = &pvr2_i2c_op_v4l2_standard,
47 [OP_BCSH] = &pvr2_i2c_op_v4l2_bcsh,
48 [OP_VOLUME] = &pvr2_i2c_op_v4l2_volume,
49 [OP_FREQ] = &pvr2_i2c_op_v4l2_frequency,
50 [OP_SIZE] = &pvr2_i2c_op_v4l2_size,
51 [OP_LOG] = &pvr2_i2c_op_v4l2_log,
52};
53
54void pvr2_i2c_probe(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp)
55{
56 int id;
57 id = cp->client->driver->id;
58 cp->ctl_mask = ((1 << OP_STANDARD) |
59 (1 << OP_BCSH) |
60 (1 << OP_VOLUME) |
61 (1 << OP_FREQ) |
62 (1 << OP_SIZE) |
63 (1 << OP_LOG));
64
65 if (id == I2C_DRIVERID_MSP3400) {
66 if (pvr2_i2c_msp3400_setup(hdw,cp)) {
67 return;
68 }
69 }
70 if (id == I2C_DRIVERID_TUNER) {
71 if (pvr2_i2c_tuner_setup(hdw,cp)) {
72 return;
73 }
74 }
75#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
76 if (id == I2C_DRIVERID_CX25840) {
77 if (pvr2_i2c_cx2584x_v4l_setup(hdw,cp)) {
78 return;
79 }
80 }
81 if (id == I2C_DRIVERID_WM8775) {
82 if (pvr2_i2c_wm8775_setup(hdw,cp)) {
83 return;
84 }
85 }
86#endif
87 if (id == I2C_DRIVERID_SAA711X) {
88 if (pvr2_i2c_decoder_v4l_setup(hdw,cp)) {
89 return;
90 }
91 }
92 if (id == I2C_DRIVERID_TDA9887) {
93 if (pvr2_i2c_demod_setup(hdw,cp)) {
94 return;
95 }
96 }
97}
98
99
100const struct pvr2_i2c_op *pvr2_i2c_get_op(unsigned int idx)
101{
102 if (idx >= sizeof(ops)/sizeof(ops[0])) return 0;
103 return ops[idx];
104}
105
106
107/*
108 Stuff for Emacs to see, in order to encourage consistent editing style:
109 *** Local Variables: ***
110 *** mode: c ***
111 *** fill-column: 75 ***
112 *** tab-width: 8 ***
113 *** c-basic-offset: 8 ***
114 *** End: ***
115 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.c
new file mode 100644
index 0000000000..9f81aff2b3
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.c
@@ -0,0 +1,232 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include "pvrusb2-i2c-cmd-v4l2.h"
24#include "pvrusb2-hdw-internal.h"
25#include "pvrusb2-debug.h"
26#include <linux/videodev2.h>
27
28
29static void set_standard(struct pvr2_hdw *hdw)
30{
31 v4l2_std_id vs;
32 vs = hdw->std_mask_cur;
33 pvr2_trace(PVR2_TRACE_CHIPS,
34 "i2c v4l2 set_standard(0x%llx)",(__u64)vs);
35
36 pvr2_i2c_core_cmd(hdw,VIDIOC_S_STD,&vs);
37}
38
39
40static int check_standard(struct pvr2_hdw *hdw)
41{
42 return hdw->std_dirty != 0;
43}
44
45
46const struct pvr2_i2c_op pvr2_i2c_op_v4l2_standard = {
47 .check = check_standard,
48 .update = set_standard,
49 .name = "v4l2_standard",
50};
51
52
53static void set_bcsh(struct pvr2_hdw *hdw)
54{
55 struct v4l2_control ctrl;
56 pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_bcsh"
57 " b=%d c=%d s=%d h=%d",
58 hdw->brightness_val,hdw->contrast_val,
59 hdw->saturation_val,hdw->hue_val);
60 memset(&ctrl,0,sizeof(ctrl));
61 ctrl.id = V4L2_CID_BRIGHTNESS;
62 ctrl.value = hdw->brightness_val;
63 pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
64 ctrl.id = V4L2_CID_CONTRAST;
65 ctrl.value = hdw->contrast_val;
66 pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
67 ctrl.id = V4L2_CID_SATURATION;
68 ctrl.value = hdw->saturation_val;
69 pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
70 ctrl.id = V4L2_CID_HUE;
71 ctrl.value = hdw->hue_val;
72 pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
73}
74
75
76static int check_bcsh(struct pvr2_hdw *hdw)
77{
78 return (hdw->brightness_dirty ||
79 hdw->contrast_dirty ||
80 hdw->saturation_dirty ||
81 hdw->hue_dirty);
82}
83
84
85const struct pvr2_i2c_op pvr2_i2c_op_v4l2_bcsh = {
86 .check = check_bcsh,
87 .update = set_bcsh,
88 .name = "v4l2_bcsh",
89};
90
91
92static void set_volume(struct pvr2_hdw *hdw)
93{
94 struct v4l2_control ctrl;
95 pvr2_trace(PVR2_TRACE_CHIPS,
96 "i2c v4l2 set_volume"
97 "(vol=%d bal=%d bas=%d treb=%d mute=%d)",
98 hdw->volume_val,
99 hdw->balance_val,
100 hdw->bass_val,
101 hdw->treble_val,
102 hdw->mute_val);
103 memset(&ctrl,0,sizeof(ctrl));
104 ctrl.id = V4L2_CID_AUDIO_MUTE;
105 ctrl.value = hdw->mute_val ? 1 : 0;
106 pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
107 ctrl.id = V4L2_CID_AUDIO_VOLUME;
108 ctrl.value = hdw->volume_val;
109 pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
110 ctrl.id = V4L2_CID_AUDIO_BALANCE;
111 ctrl.value = hdw->balance_val;
112 pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
113 ctrl.id = V4L2_CID_AUDIO_BASS;
114 ctrl.value = hdw->bass_val;
115 pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
116 ctrl.id = V4L2_CID_AUDIO_TREBLE;
117 ctrl.value = hdw->treble_val;
118 pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
119}
120
121
122static int check_volume(struct pvr2_hdw *hdw)
123{
124 return (hdw->volume_dirty ||
125 hdw->balance_dirty ||
126 hdw->bass_dirty ||
127 hdw->treble_dirty ||
128 hdw->mute_dirty);
129}
130
131
132const struct pvr2_i2c_op pvr2_i2c_op_v4l2_volume = {
133 .check = check_volume,
134 .update = set_volume,
135 .name = "v4l2_volume",
136};
137
138
139static void set_frequency(struct pvr2_hdw *hdw)
140{
141 unsigned long fv;
142 struct v4l2_frequency freq;
143 fv = hdw->freqVal;
144 pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_freq(%lu)",fv);
145 memset(&freq,0,sizeof(freq));
146 freq.frequency = fv / 62500;
147 freq.tuner = 0;
148 freq.type = V4L2_TUNER_ANALOG_TV;
149 pvr2_i2c_core_cmd(hdw,VIDIOC_S_FREQUENCY,&freq);
150}
151
152
153static int check_frequency(struct pvr2_hdw *hdw)
154{
155 return hdw->freqDirty != 0;
156}
157
158
159const struct pvr2_i2c_op pvr2_i2c_op_v4l2_frequency = {
160 .check = check_frequency,
161 .update = set_frequency,
162 .name = "v4l2_freq",
163};
164
165
166static void set_size(struct pvr2_hdw *hdw)
167{
168 struct v4l2_format fmt;
169
170 memset(&fmt,0,sizeof(fmt));
171
172 fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
173 fmt.fmt.pix.width = hdw->res_hor_val;
174 fmt.fmt.pix.height = hdw->res_ver_val;
175
176 pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_size(%dx%d)",
177 fmt.fmt.pix.width,fmt.fmt.pix.height);
178
179 pvr2_i2c_core_cmd(hdw,VIDIOC_S_FMT,&fmt);
180}
181
182
183static int check_size(struct pvr2_hdw *hdw)
184{
185 return (hdw->res_hor_dirty || hdw->res_ver_dirty);
186}
187
188
189const struct pvr2_i2c_op pvr2_i2c_op_v4l2_size = {
190 .check = check_size,
191 .update = set_size,
192 .name = "v4l2_size",
193};
194
195
196static void do_log(struct pvr2_hdw *hdw)
197{
198 pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 do_log()");
199 pvr2_i2c_core_cmd(hdw,VIDIOC_LOG_STATUS,0);
200
201}
202
203
204static int check_log(struct pvr2_hdw *hdw)
205{
206 return hdw->log_requested != 0;
207}
208
209
210const struct pvr2_i2c_op pvr2_i2c_op_v4l2_log = {
211 .check = check_log,
212 .update = do_log,
213 .name = "v4l2_log",
214};
215
216
217void pvr2_v4l2_cmd_stream(struct pvr2_i2c_client *cp,int fl)
218{
219 pvr2_i2c_client_cmd(cp,
220 (fl ? VIDIOC_STREAMON : VIDIOC_STREAMOFF),0);
221}
222
223
224/*
225 Stuff for Emacs to see, in order to encourage consistent editing style:
226 *** Local Variables: ***
227 *** mode: c ***
228 *** fill-column: 70 ***
229 *** tab-width: 8 ***
230 *** c-basic-offset: 8 ***
231 *** End: ***
232 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.h b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.h
new file mode 100644
index 0000000000..ecabddba1e
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.h
@@ -0,0 +1,47 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef __PVRUSB2_CMD_V4L2_H
24#define __PVRUSB2_CMD_V4L2_H
25
26#include "pvrusb2-i2c-core.h"
27
28extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_standard;
29extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_bcsh;
30extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_volume;
31extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_frequency;
32extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_size;
33extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_log;
34
35void pvr2_v4l2_cmd_stream(struct pvr2_i2c_client *,int);
36
37#endif /* __PVRUSB2_CMD_V4L2_H */
38
39/*
40 Stuff for Emacs to see, in order to encourage consistent editing style:
41 *** Local Variables: ***
42 *** mode: c ***
43 *** fill-column: 70 ***
44 *** tab-width: 8 ***
45 *** c-basic-offset: 8 ***
46 *** End: ***
47 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
new file mode 100644
index 0000000000..c8d0bdee3f
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -0,0 +1,937 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include "pvrusb2-i2c-core.h"
23#include "pvrusb2-hdw-internal.h"
24#include "pvrusb2-debug.h"
25
26#define trace_i2c(...) pvr2_trace(PVR2_TRACE_I2C,__VA_ARGS__)
27
28/*
29
30 This module attempts to implement a compliant I2C adapter for the pvrusb2
31 device. By doing this we can then make use of existing functionality in
32 V4L (e.g. tuner.c) rather than rolling our own.
33
34*/
35
36static unsigned int i2c_scan = 0;
37module_param(i2c_scan, int, S_IRUGO|S_IWUSR);
38MODULE_PARM_DESC(i2c_scan,"scan i2c bus at insmod time");
39
40static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
41 u8 i2c_addr, /* I2C address we're talking to */
42 u8 *data, /* Data to write */
43 u16 length) /* Size of data to write */
44{
45 /* Return value - default 0 means success */
46 int ret;
47
48
49 if (!data) length = 0;
50 if (length > (sizeof(hdw->cmd_buffer) - 3)) {
51 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
52 "Killing an I2C write to %u that is too large"
53 " (desired=%u limit=%u)",
54 i2c_addr,
55 length,(unsigned int)(sizeof(hdw->cmd_buffer) - 3));
56 return -ENOTSUPP;
57 }
58
59 LOCK_TAKE(hdw->ctl_lock);
60
61 /* Clear the command buffer (likely to be paranoia) */
62 memset(hdw->cmd_buffer, 0, sizeof(hdw->cmd_buffer));
63
64 /* Set up command buffer for an I2C write */
65 hdw->cmd_buffer[0] = 0x08; /* write prefix */
66 hdw->cmd_buffer[1] = i2c_addr; /* i2c addr of chip */
67 hdw->cmd_buffer[2] = length; /* length of what follows */
68 if (length) memcpy(hdw->cmd_buffer + 3, data, length);
69
70 /* Do the operation */
71 ret = pvr2_send_request(hdw,
72 hdw->cmd_buffer,
73 length + 3,
74 hdw->cmd_buffer,
75 1);
76 if (!ret) {
77 if (hdw->cmd_buffer[0] != 8) {
78 ret = -EIO;
79 if (hdw->cmd_buffer[0] != 7) {
80 trace_i2c("unexpected status"
81 " from i2_write[%d]: %d",
82 i2c_addr,hdw->cmd_buffer[0]);
83 }
84 }
85 }
86
87 LOCK_GIVE(hdw->ctl_lock);
88
89 return ret;
90}
91
92static int pvr2_i2c_read(struct pvr2_hdw *hdw, /* Context */
93 u8 i2c_addr, /* I2C address we're talking to */
94 u8 *data, /* Data to write */
95 u16 dlen, /* Size of data to write */
96 u8 *res, /* Where to put data we read */
97 u16 rlen) /* Amount of data to read */
98{
99 /* Return value - default 0 means success */
100 int ret;
101
102
103 if (!data) dlen = 0;
104 if (dlen > (sizeof(hdw->cmd_buffer) - 4)) {
105 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
106 "Killing an I2C read to %u that has wlen too large"
107 " (desired=%u limit=%u)",
108 i2c_addr,
109 dlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 4));
110 return -ENOTSUPP;
111 }
112 if (res && (rlen > (sizeof(hdw->cmd_buffer) - 1))) {
113 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
114 "Killing an I2C read to %u that has rlen too large"
115 " (desired=%u limit=%u)",
116 i2c_addr,
117 rlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 1));
118 return -ENOTSUPP;
119 }
120
121 LOCK_TAKE(hdw->ctl_lock);
122
123 /* Clear the command buffer (likely to be paranoia) */
124 memset(hdw->cmd_buffer, 0, sizeof(hdw->cmd_buffer));
125
126 /* Set up command buffer for an I2C write followed by a read */
127 hdw->cmd_buffer[0] = 0x09; /* read prefix */
128 hdw->cmd_buffer[1] = dlen; /* arg length */
129 hdw->cmd_buffer[2] = rlen; /* answer length. Device will send one
130 more byte (status). */
131 hdw->cmd_buffer[3] = i2c_addr; /* i2c addr of chip */
132 if (dlen) memcpy(hdw->cmd_buffer + 4, data, dlen);
133
134 /* Do the operation */
135 ret = pvr2_send_request(hdw,
136 hdw->cmd_buffer,
137 4 + dlen,
138 hdw->cmd_buffer,
139 rlen + 1);
140 if (!ret) {
141 if (hdw->cmd_buffer[0] != 8) {
142 ret = -EIO;
143 if (hdw->cmd_buffer[0] != 7) {
144 trace_i2c("unexpected status"
145 " from i2_read[%d]: %d",
146 i2c_addr,hdw->cmd_buffer[0]);
147 }
148 }
149 }
150
151 /* Copy back the result */
152 if (res && rlen) {
153 if (ret) {
154 /* Error, just blank out the return buffer */
155 memset(res, 0, rlen);
156 } else {
157 memcpy(res, hdw->cmd_buffer + 1, rlen);
158 }
159 }
160
161 LOCK_GIVE(hdw->ctl_lock);
162
163 return ret;
164}
165
166/* This is the common low level entry point for doing I2C operations to the
167 hardware. */
168int pvr2_i2c_basic_op(struct pvr2_hdw *hdw,
169 u8 i2c_addr,
170 u8 *wdata,
171 u16 wlen,
172 u8 *rdata,
173 u16 rlen)
174{
175 if (!rdata) rlen = 0;
176 if (!wdata) wlen = 0;
177 if (rlen || !wlen) {
178 return pvr2_i2c_read(hdw,i2c_addr,wdata,wlen,rdata,rlen);
179 } else {
180 return pvr2_i2c_write(hdw,i2c_addr,wdata,wlen);
181 }
182}
183
184#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
185
186/* This is a special entry point that is entered if an I2C operation is
187 attempted to a wm8775 chip on model 24xxx hardware. Autodetect of this
188 part doesn't work, but we know it is really there. So let's look for
189 the autodetect attempt and just return success if we see that. */
190static int i2c_hack_wm8775(struct pvr2_hdw *hdw,
191 u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen)
192{
193 if (!(rlen || wlen)) {
194 // This is a probe attempt. Just let it succeed.
195 return 0;
196 }
197 return pvr2_i2c_basic_op(hdw,i2c_addr,wdata,wlen,rdata,rlen);
198}
199
200/* This is a special entry point that is entered if an I2C operation is
201 attempted to a cx25840 chip on model 24xxx hardware. This chip can
202 sometimes wedge itself. Worse still, when this happens msp3400 can
203 falsely detect this part and then the system gets hosed up after msp3400
204 gets confused and dies. What we want to do here is try to keep msp3400
205 away and also try to notice if the chip is wedged and send a warning to
206 the system log. */
207static int i2c_hack_cx25840(struct pvr2_hdw *hdw,
208 u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen)
209{
210 int ret;
211 unsigned int subaddr;
212 u8 wbuf[2];
213 int state = hdw->i2c_cx25840_hack_state;
214
215 if (!(rlen || wlen)) {
216 // Probe attempt - always just succeed and don't bother the
217 // hardware (this helps to make the state machine further
218 // down somewhat easier).
219 return 0;
220 }
221
222 if (state == 3) {
223 return pvr2_i2c_basic_op(hdw,i2c_addr,wdata,wlen,rdata,rlen);
224 }
225
226 /* We're looking for the exact pattern where the revision register
227 is being read. The cx25840 module will always look at the
228 revision register first. Any other pattern of access therefore
229 has to be a probe attempt from somebody else so we'll reject it.
230 Normally we could just let each client just probe the part
231 anyway, but when the cx25840 is wedged, msp3400 will get a false
232 positive and that just screws things up... */
233
234 if (wlen == 0) {
235 switch (state) {
236 case 1: subaddr = 0x0100; break;
237 case 2: subaddr = 0x0101; break;
238 default: goto fail;
239 }
240 } else if (wlen == 2) {
241 subaddr = (wdata[0] << 8) | wdata[1];
242 switch (subaddr) {
243 case 0x0100: state = 1; break;
244 case 0x0101: state = 2; break;
245 default: goto fail;
246 }
247 } else {
248 goto fail;
249 }
250 if (!rlen) goto success;
251 state = 0;
252 if (rlen != 1) goto fail;
253
254 /* If we get to here then we have a legitimate read for one of the
255 two revision bytes, so pass it through. */
256 wbuf[0] = subaddr >> 8;
257 wbuf[1] = subaddr;
258 ret = pvr2_i2c_basic_op(hdw,i2c_addr,wbuf,2,rdata,rlen);
259
260 if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) {
261 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
262 "WARNING: Detected a wedged cx25840 chip;"
263 " the device will not work.");
264 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
265 "WARNING: Try power cycling the pvrusb2 device.");
266 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
267 "WARNING: Disabling further access to the device"
268 " to prevent other foul-ups.");
269 // This blocks all further communication with the part.
270 hdw->i2c_func[0x44] = 0;
271 pvr2_hdw_render_useless(hdw);
272 goto fail;
273 }
274
275 /* Success! */
276 pvr2_trace(PVR2_TRACE_CHIPS,"cx25840 appears to be OK.");
277 state = 3;
278
279 success:
280 hdw->i2c_cx25840_hack_state = state;
281 return 0;
282
283 fail:
284 hdw->i2c_cx25840_hack_state = state;
285 return -EIO;
286}
287
288#endif /* CONFIG_VIDEO_PVRUSB2_24XXX */
289
290/* This is a very, very limited I2C adapter implementation. We can only
291 support what we actually know will work on the device... */
292static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
293 struct i2c_msg msgs[],
294 int num)
295{
296 int ret = -ENOTSUPP;
297 pvr2_i2c_func funcp = 0;
298 struct pvr2_hdw *hdw = (struct pvr2_hdw *)(i2c_adap->algo_data);
299
300 if (!num) {
301 ret = -EINVAL;
302 goto done;
303 }
304 if ((msgs[0].flags & I2C_M_NOSTART)) {
305 trace_i2c("i2c refusing I2C_M_NOSTART");
306 goto done;
307 }
308 if (msgs[0].addr < PVR2_I2C_FUNC_CNT) {
309 funcp = hdw->i2c_func[msgs[0].addr];
310 }
311 if (!funcp) {
312 ret = -EIO;
313 goto done;
314 }
315
316 if (num == 1) {
317 if (msgs[0].flags & I2C_M_RD) {
318 /* Simple read */
319 u16 tcnt,bcnt,offs;
320 if (!msgs[0].len) {
321 /* Length == 0 read. This is a probe. */
322 if (funcp(hdw,msgs[0].addr,0,0,0,0)) {
323 ret = -EIO;
324 goto done;
325 }
326 ret = 1;
327 goto done;
328 }
329 /* If the read is short enough we'll do the whole
330 thing atomically. Otherwise we have no choice
331 but to break apart the reads. */
332 tcnt = msgs[0].len;
333 offs = 0;
334 while (tcnt) {
335 bcnt = tcnt;
336 if (bcnt > sizeof(hdw->cmd_buffer)-1) {
337 bcnt = sizeof(hdw->cmd_buffer)-1;
338 }
339 if (funcp(hdw,msgs[0].addr,0,0,
340 msgs[0].buf+offs,bcnt)) {
341 ret = -EIO;
342 goto done;
343 }
344 offs += bcnt;
345 tcnt -= bcnt;
346 }
347 ret = 1;
348 goto done;
349 } else {
350 /* Simple write */
351 ret = 1;
352 if (funcp(hdw,msgs[0].addr,
353 msgs[0].buf,msgs[0].len,0,0)) {
354 ret = -EIO;
355 }
356 goto done;
357 }
358 } else if (num == 2) {
359 if (msgs[0].addr != msgs[1].addr) {
360 trace_i2c("i2c refusing 2 phase transfer with"
361 " conflicting target addresses");
362 ret = -ENOTSUPP;
363 goto done;
364 }
365 if ((!((msgs[0].flags & I2C_M_RD))) &&
366 (msgs[1].flags & I2C_M_RD)) {
367 u16 tcnt,bcnt,wcnt,offs;
368 /* Write followed by atomic read. If the read
369 portion is short enough we'll do the whole thing
370 atomically. Otherwise we have no choice but to
371 break apart the reads. */
372 tcnt = msgs[1].len;
373 wcnt = msgs[0].len;
374 offs = 0;
375 while (tcnt || wcnt) {
376 bcnt = tcnt;
377 if (bcnt > sizeof(hdw->cmd_buffer)-1) {
378 bcnt = sizeof(hdw->cmd_buffer)-1;
379 }
380 if (funcp(hdw,msgs[0].addr,
381 msgs[0].buf,wcnt,
382 msgs[1].buf+offs,bcnt)) {
383 ret = -EIO;
384 goto done;
385 }
386 offs += bcnt;
387 tcnt -= bcnt;
388 wcnt = 0;
389 }
390 ret = 2;
391 goto done;
392 } else {
393 trace_i2c("i2c refusing complex transfer"
394 " read0=%d read1=%d",
395 (msgs[0].flags & I2C_M_RD),
396 (msgs[1].flags & I2C_M_RD));
397 }
398 } else {
399 trace_i2c("i2c refusing %d phase transfer",num);
400 }
401
402 done:
403 if (pvrusb2_debug & PVR2_TRACE_I2C_TRAF) {
404 unsigned int idx,offs,cnt;
405 for (idx = 0; idx < num; idx++) {
406 cnt = msgs[idx].len;
407 printk(KERN_INFO
408 "pvrusb2 i2c xfer %u/%u:"
409 " addr=0x%x len=%d %s%s",
410 idx+1,num,
411 msgs[idx].addr,
412 cnt,
413 (msgs[idx].flags & I2C_M_RD ?
414 "read" : "write"),
415 (msgs[idx].flags & I2C_M_NOSTART ?
416 " nostart" : ""));
417 if ((ret > 0) || !(msgs[idx].flags & I2C_M_RD)) {
418 if (cnt > 8) cnt = 8;
419 printk(" [");
420 for (offs = 0; offs < (cnt>8?8:cnt); offs++) {
421 if (offs) printk(" ");
422 printk("%02x",msgs[idx].buf[offs]);
423 }
424 if (offs < cnt) printk(" ...");
425 printk("]");
426 }
427 if (idx+1 == num) {
428 printk(" result=%d",ret);
429 }
430 printk("\n");
431 }
432 if (!num) {
433 printk(KERN_INFO
434 "pvrusb2 i2c xfer null transfer result=%d\n",
435 ret);
436 }
437 }
438 return ret;
439}
440
441static int pvr2_i2c_control(struct i2c_adapter *adapter,
442 unsigned int cmd, unsigned long arg)
443{
444 return 0;
445}
446
447static u32 pvr2_i2c_functionality(struct i2c_adapter *adap)
448{
449 return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE_DATA;
450}
451
452static int pvr2_i2c_core_singleton(struct i2c_client *cp,
453 unsigned int cmd,void *arg)
454{
455 int stat;
456 if (!cp) return -EINVAL;
457 if (!(cp->driver)) return -EINVAL;
458 if (!(cp->driver->command)) return -EINVAL;
459 if (!try_module_get(cp->driver->driver.owner)) return -EAGAIN;
460 stat = cp->driver->command(cp,cmd,arg);
461 module_put(cp->driver->driver.owner);
462 return stat;
463}
464
465int pvr2_i2c_client_cmd(struct pvr2_i2c_client *cp,unsigned int cmd,void *arg)
466{
467 int stat;
468 if (pvrusb2_debug & PVR2_TRACE_I2C_CMD) {
469 char buf[100];
470 unsigned int cnt;
471 cnt = pvr2_i2c_client_describe(cp,PVR2_I2C_DETAIL_DEBUG,
472 buf,sizeof(buf));
473 pvr2_trace(PVR2_TRACE_I2C_CMD,
474 "i2c COMMAND (code=%u 0x%x) to %.*s",
475 cmd,cmd,cnt,buf);
476 }
477 stat = pvr2_i2c_core_singleton(cp->client,cmd,arg);
478 if (pvrusb2_debug & PVR2_TRACE_I2C_CMD) {
479 char buf[100];
480 unsigned int cnt;
481 cnt = pvr2_i2c_client_describe(cp,PVR2_I2C_DETAIL_DEBUG,
482 buf,sizeof(buf));
483 pvr2_trace(PVR2_TRACE_I2C_CMD,
484 "i2c COMMAND to %.*s (ret=%d)",cnt,buf,stat);
485 }
486 return stat;
487}
488
489int pvr2_i2c_core_cmd(struct pvr2_hdw *hdw,unsigned int cmd,void *arg)
490{
491 struct list_head *item,*nc;
492 struct pvr2_i2c_client *cp;
493 int stat = -EINVAL;
494
495 if (!hdw) return stat;
496
497 mutex_lock(&hdw->i2c_list_lock);
498 list_for_each_safe(item,nc,&hdw->i2c_clients) {
499 cp = list_entry(item,struct pvr2_i2c_client,list);
500 if (!cp->recv_enable) continue;
501 mutex_unlock(&hdw->i2c_list_lock);
502 stat = pvr2_i2c_client_cmd(cp,cmd,arg);
503 mutex_lock(&hdw->i2c_list_lock);
504 }
505 mutex_unlock(&hdw->i2c_list_lock);
506 return stat;
507}
508
509
510static int handler_check(struct pvr2_i2c_client *cp)
511{
512 struct pvr2_i2c_handler *hp = cp->handler;
513 if (!hp) return 0;
514 if (!hp->func_table->check) return 0;
515 return hp->func_table->check(hp->func_data) != 0;
516}
517
518#define BUFSIZE 500
519
520void pvr2_i2c_core_sync(struct pvr2_hdw *hdw)
521{
522 unsigned long msk;
523 unsigned int idx;
524 struct list_head *item,*nc;
525 struct pvr2_i2c_client *cp;
526
527 if (!hdw->i2c_linked) return;
528 if (!(hdw->i2c_pend_types & PVR2_I2C_PEND_ALL)) {
529 return;
530 }
531 mutex_lock(&hdw->i2c_list_lock); do {
532 pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: core_sync BEGIN");
533 if (hdw->i2c_pend_types & PVR2_I2C_PEND_DETECT) {
534 /* One or more I2C clients have attached since we
535 last synced. So scan the list and identify the
536 new clients. */
537 char *buf;
538 unsigned int cnt;
539 unsigned long amask = 0;
540 buf = kmalloc(BUFSIZE,GFP_KERNEL);
541 pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: PEND_DETECT");
542 hdw->i2c_pend_types &= ~PVR2_I2C_PEND_DETECT;
543 list_for_each(item,&hdw->i2c_clients) {
544 cp = list_entry(item,struct pvr2_i2c_client,
545 list);
546 if (!cp->detected_flag) {
547 cp->ctl_mask = 0;
548 pvr2_i2c_probe(hdw,cp);
549 cp->detected_flag = !0;
550 msk = cp->ctl_mask;
551 cnt = 0;
552 if (buf) {
553 cnt = pvr2_i2c_client_describe(
554 cp,
555 PVR2_I2C_DETAIL_ALL,
556 buf,BUFSIZE);
557 }
558 trace_i2c("Probed: %.*s",cnt,buf);
559 if (handler_check(cp)) {
560 hdw->i2c_pend_types |=
561 PVR2_I2C_PEND_CLIENT;
562 }
563 cp->pend_mask = msk;
564 hdw->i2c_pend_mask |= msk;
565 hdw->i2c_pend_types |=
566 PVR2_I2C_PEND_REFRESH;
567 }
568 amask |= cp->ctl_mask;
569 }
570 hdw->i2c_active_mask = amask;
571 if (buf) kfree(buf);
572 }
573 if (hdw->i2c_pend_types & PVR2_I2C_PEND_STALE) {
574 /* Need to do one or more global updates. Arrange
575 for this to happen. */
576 unsigned long m2;
577 pvr2_trace(PVR2_TRACE_I2C_CORE,
578 "i2c: PEND_STALE (0x%lx)",
579 hdw->i2c_stale_mask);
580 hdw->i2c_pend_types &= ~PVR2_I2C_PEND_STALE;
581 list_for_each(item,&hdw->i2c_clients) {
582 cp = list_entry(item,struct pvr2_i2c_client,
583 list);
584 m2 = hdw->i2c_stale_mask;
585 m2 &= cp->ctl_mask;
586 m2 &= ~cp->pend_mask;
587 if (m2) {
588 pvr2_trace(PVR2_TRACE_I2C_CORE,
589 "i2c: cp=%p setting 0x%lx",
590 cp,m2);
591 cp->pend_mask |= m2;
592 }
593 }
594 hdw->i2c_pend_mask |= hdw->i2c_stale_mask;
595 hdw->i2c_stale_mask = 0;
596 hdw->i2c_pend_types |= PVR2_I2C_PEND_REFRESH;
597 }
598 if (hdw->i2c_pend_types & PVR2_I2C_PEND_CLIENT) {
599 /* One or more client handlers are asking for an
600 update. Run through the list of known clients
601 and update each one. */
602 pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: PEND_CLIENT");
603 hdw->i2c_pend_types &= ~PVR2_I2C_PEND_CLIENT;
604 list_for_each_safe(item,nc,&hdw->i2c_clients) {
605 cp = list_entry(item,struct pvr2_i2c_client,
606 list);
607 if (!cp->handler) continue;
608 if (!cp->handler->func_table->update) continue;
609 pvr2_trace(PVR2_TRACE_I2C_CORE,
610 "i2c: cp=%p update",cp);
611 mutex_unlock(&hdw->i2c_list_lock);
612 cp->handler->func_table->update(
613 cp->handler->func_data);
614 mutex_lock(&hdw->i2c_list_lock);
615 /* If client's update function set some
616 additional pending bits, account for that
617 here. */
618 if (cp->pend_mask & ~hdw->i2c_pend_mask) {
619 hdw->i2c_pend_mask |= cp->pend_mask;
620 hdw->i2c_pend_types |=
621 PVR2_I2C_PEND_REFRESH;
622 }
623 }
624 }
625 if (hdw->i2c_pend_types & PVR2_I2C_PEND_REFRESH) {
626 const struct pvr2_i2c_op *opf;
627 unsigned long pm;
628 /* Some actual updates are pending. Walk through
629 each update type and perform it. */
630 pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: PEND_REFRESH"
631 " (0x%lx)",hdw->i2c_pend_mask);
632 hdw->i2c_pend_types &= ~PVR2_I2C_PEND_REFRESH;
633 pm = hdw->i2c_pend_mask;
634 hdw->i2c_pend_mask = 0;
635 for (idx = 0, msk = 1; pm; idx++, msk <<= 1) {
636 if (!(pm & msk)) continue;
637 pm &= ~msk;
638 list_for_each(item,&hdw->i2c_clients) {
639 cp = list_entry(item,
640 struct pvr2_i2c_client,
641 list);
642 if (cp->pend_mask & msk) {
643 cp->pend_mask &= ~msk;
644 cp->recv_enable = !0;
645 } else {
646 cp->recv_enable = 0;
647 }
648 }
649 opf = pvr2_i2c_get_op(idx);
650 if (!opf) continue;
651 mutex_unlock(&hdw->i2c_list_lock);
652 opf->update(hdw);
653 mutex_lock(&hdw->i2c_list_lock);
654 }
655 }
656 pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: core_sync END");
657 } while (0); mutex_unlock(&hdw->i2c_list_lock);
658}
659
660int pvr2_i2c_core_check_stale(struct pvr2_hdw *hdw)
661{
662 unsigned long msk,sm,pm;
663 unsigned int idx;
664 const struct pvr2_i2c_op *opf;
665 struct list_head *item;
666 struct pvr2_i2c_client *cp;
667 unsigned int pt = 0;
668
669 pvr2_trace(PVR2_TRACE_I2C_CORE,"pvr2_i2c_core_check_stale BEGIN");
670
671 pm = hdw->i2c_active_mask;
672 sm = 0;
673 for (idx = 0, msk = 1; pm; idx++, msk <<= 1) {
674 if (!(msk & pm)) continue;
675 pm &= ~msk;
676 opf = pvr2_i2c_get_op(idx);
677 if (!opf) continue;
678 if (opf->check(hdw)) {
679 sm |= msk;
680 }
681 }
682 if (sm) pt |= PVR2_I2C_PEND_STALE;
683
684 list_for_each(item,&hdw->i2c_clients) {
685 cp = list_entry(item,struct pvr2_i2c_client,list);
686 if (!handler_check(cp)) continue;
687 pt |= PVR2_I2C_PEND_CLIENT;
688 }
689
690 if (pt) {
691 mutex_lock(&hdw->i2c_list_lock); do {
692 hdw->i2c_pend_types |= pt;
693 hdw->i2c_stale_mask |= sm;
694 hdw->i2c_pend_mask |= hdw->i2c_stale_mask;
695 } while (0); mutex_unlock(&hdw->i2c_list_lock);
696 }
697
698 pvr2_trace(PVR2_TRACE_I2C_CORE,
699 "i2c: types=0x%x stale=0x%lx pend=0x%lx",
700 hdw->i2c_pend_types,
701 hdw->i2c_stale_mask,
702 hdw->i2c_pend_mask);
703 pvr2_trace(PVR2_TRACE_I2C_CORE,"pvr2_i2c_core_check_stale END");
704
705 return (hdw->i2c_pend_types & PVR2_I2C_PEND_ALL) != 0;
706}
707
708unsigned int pvr2_i2c_client_describe(struct pvr2_i2c_client *cp,
709 unsigned int detail,
710 char *buf,unsigned int maxlen)
711{
712 unsigned int ccnt,bcnt;
713 int spcfl = 0;
714 const struct pvr2_i2c_op *opf;
715
716 ccnt = 0;
717 if (detail & PVR2_I2C_DETAIL_DEBUG) {
718 bcnt = scnprintf(buf,maxlen,
719 "ctxt=%p ctl_mask=0x%lx",
720 cp,cp->ctl_mask);
721 ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
722 spcfl = !0;
723 }
724 bcnt = scnprintf(buf,maxlen,
725 "%s%s @ 0x%x",
726 (spcfl ? " " : ""),
727 cp->client->name,
728 cp->client->addr);
729 ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
730 if ((detail & PVR2_I2C_DETAIL_HANDLER) &&
731 cp->handler && cp->handler->func_table->describe) {
732 bcnt = scnprintf(buf,maxlen," (");
733 ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
734 bcnt = cp->handler->func_table->describe(
735 cp->handler->func_data,buf,maxlen);
736 ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
737 bcnt = scnprintf(buf,maxlen,")");
738 ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
739 }
740 if ((detail & PVR2_I2C_DETAIL_CTLMASK) && cp->ctl_mask) {
741 unsigned int idx;
742 unsigned long msk,sm;
743 int spcfl;
744 bcnt = scnprintf(buf,maxlen," [");
745 ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
746 sm = 0;
747 spcfl = 0;
748 for (idx = 0, msk = 1; msk; idx++, msk <<= 1) {
749 if (!(cp->ctl_mask & msk)) continue;
750 opf = pvr2_i2c_get_op(idx);
751 if (opf) {
752 bcnt = scnprintf(buf,maxlen,"%s%s",
753 spcfl ? " " : "",
754 opf->name);
755 ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
756 spcfl = !0;
757 } else {
758 sm |= msk;
759 }
760 }
761 if (sm) {
762 bcnt = scnprintf(buf,maxlen,"%s%lx",
763 idx != 0 ? " " : "",sm);
764 ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
765 }
766 bcnt = scnprintf(buf,maxlen,"]");
767 ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
768 }
769 return ccnt;
770}
771
772unsigned int pvr2_i2c_report(struct pvr2_hdw *hdw,
773 char *buf,unsigned int maxlen)
774{
775 unsigned int ccnt,bcnt;
776 struct list_head *item;
777 struct pvr2_i2c_client *cp;
778 ccnt = 0;
779 mutex_lock(&hdw->i2c_list_lock); do {
780 list_for_each(item,&hdw->i2c_clients) {
781 cp = list_entry(item,struct pvr2_i2c_client,list);
782 bcnt = pvr2_i2c_client_describe(
783 cp,
784 (PVR2_I2C_DETAIL_HANDLER|
785 PVR2_I2C_DETAIL_CTLMASK),
786 buf,maxlen);
787 ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
788 bcnt = scnprintf(buf,maxlen,"\n");
789 ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
790 }
791 } while (0); mutex_unlock(&hdw->i2c_list_lock);
792 return ccnt;
793}
794
795static int pvr2_i2c_attach_inform(struct i2c_client *client)
796{
797 struct pvr2_hdw *hdw = (struct pvr2_hdw *)(client->adapter->algo_data);
798 struct pvr2_i2c_client *cp;
799 int fl = !(hdw->i2c_pend_types & PVR2_I2C_PEND_ALL);
800 cp = kmalloc(sizeof(*cp),GFP_KERNEL);
801 trace_i2c("i2c_attach [client=%s @ 0x%x ctxt=%p]",
802 client->name,
803 client->addr,cp);
804 if (!cp) return -ENOMEM;
805 memset(cp,0,sizeof(*cp));
806 INIT_LIST_HEAD(&cp->list);
807 cp->client = client;
808 mutex_lock(&hdw->i2c_list_lock); do {
809 list_add_tail(&cp->list,&hdw->i2c_clients);
810 hdw->i2c_pend_types |= PVR2_I2C_PEND_DETECT;
811 } while (0); mutex_unlock(&hdw->i2c_list_lock);
812 if (fl) pvr2_hdw_poll_trigger_unlocked(hdw);
813 return 0;
814}
815
816static int pvr2_i2c_detach_inform(struct i2c_client *client)
817{
818 struct pvr2_hdw *hdw = (struct pvr2_hdw *)(client->adapter->algo_data);
819 struct pvr2_i2c_client *cp;
820 struct list_head *item,*nc;
821 unsigned long amask = 0;
822 int foundfl = 0;
823 mutex_lock(&hdw->i2c_list_lock); do {
824 list_for_each_safe(item,nc,&hdw->i2c_clients) {
825 cp = list_entry(item,struct pvr2_i2c_client,list);
826 if (cp->client == client) {
827 trace_i2c("pvr2_i2c_detach"
828 " [client=%s @ 0x%x ctxt=%p]",
829 client->name,
830 client->addr,cp);
831 if (cp->handler &&
832 cp->handler->func_table->detach) {
833 cp->handler->func_table->detach(
834 cp->handler->func_data);
835 }
836 list_del(&cp->list);
837 kfree(cp);
838 foundfl = !0;
839 continue;
840 }
841 amask |= cp->ctl_mask;
842 }
843 hdw->i2c_active_mask = amask;
844 } while (0); mutex_unlock(&hdw->i2c_list_lock);
845 if (!foundfl) {
846 trace_i2c("pvr2_i2c_detach [client=%s @ 0x%x ctxt=<unknown>]",
847 client->name,
848 client->addr);
849 }
850 return 0;
851}
852
853static struct i2c_algorithm pvr2_i2c_algo_template = {
854 .master_xfer = pvr2_i2c_xfer,
855 .algo_control = pvr2_i2c_control,
856 .functionality = pvr2_i2c_functionality,
857};
858
859static struct i2c_adapter pvr2_i2c_adap_template = {
860 .owner = THIS_MODULE,
861 .class = I2C_CLASS_TV_ANALOG,
862 .id = I2C_HW_B_BT848,
863 .client_register = pvr2_i2c_attach_inform,
864 .client_unregister = pvr2_i2c_detach_inform,
865};
866
867static void do_i2c_scan(struct pvr2_hdw *hdw)
868{
869 struct i2c_msg msg[1];
870 int i,rc;
871 msg[0].addr = 0;
872 msg[0].flags = I2C_M_RD;
873 msg[0].len = 0;
874 msg[0].buf = 0;
875 printk("%s: i2c scan beginning\n",hdw->name);
876 for (i = 0; i < 128; i++) {
877 msg[0].addr = i;
878 rc = i2c_transfer(&hdw->i2c_adap,msg,
879 sizeof(msg)/sizeof(msg[0]));
880 if (rc != 1) continue;
881 printk("%s: i2c scan: found device @ 0x%x\n",hdw->name,i);
882 }
883 printk("%s: i2c scan done.\n",hdw->name);
884}
885
886void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
887{
888 unsigned int idx;
889
890 // The default action for all possible I2C addresses is just to do
891 // the transfer normally.
892 for (idx = 0; idx < PVR2_I2C_FUNC_CNT; idx++) {
893 hdw->i2c_func[idx] = pvr2_i2c_basic_op;
894 }
895
896#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
897 // If however we're dealing with new hardware, insert some hacks in
898 // the I2C transfer stack to let things work better.
899 if (hdw->hdw_type == PVR2_HDW_TYPE_24XXX) {
900 hdw->i2c_func[0x1b] = i2c_hack_wm8775;
901 hdw->i2c_func[0x44] = i2c_hack_cx25840;
902 }
903#endif
904
905 // Configure the adapter and set up everything else related to it.
906 memcpy(&hdw->i2c_adap,&pvr2_i2c_adap_template,sizeof(hdw->i2c_adap));
907 memcpy(&hdw->i2c_algo,&pvr2_i2c_algo_template,sizeof(hdw->i2c_algo));
908 strlcpy(hdw->i2c_adap.name,hdw->name,sizeof(hdw->i2c_adap.name));
909 hdw->i2c_adap.algo = &hdw->i2c_algo;
910 hdw->i2c_adap.algo_data = hdw;
911 hdw->i2c_pend_mask = 0;
912 hdw->i2c_stale_mask = 0;
913 hdw->i2c_active_mask = 0;
914 INIT_LIST_HEAD(&hdw->i2c_clients);
915 mutex_init(&hdw->i2c_list_lock);
916 hdw->i2c_linked = !0;
917 i2c_add_adapter(&hdw->i2c_adap);
918 if (i2c_scan) do_i2c_scan(hdw);
919}
920
921void pvr2_i2c_core_done(struct pvr2_hdw *hdw)
922{
923 if (hdw->i2c_linked) {
924 i2c_del_adapter(&hdw->i2c_adap);
925 hdw->i2c_linked = 0;
926 }
927}
928
929/*
930 Stuff for Emacs to see, in order to encourage consistent editing style:
931 *** Local Variables: ***
932 *** mode: c ***
933 *** fill-column: 75 ***
934 *** tab-width: 8 ***
935 *** c-basic-offset: 8 ***
936 *** End: ***
937 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.h b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.h
new file mode 100644
index 0000000000..e8af5b0ed3
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.h
@@ -0,0 +1,96 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#ifndef __PVRUSB2_I2C_CORE_H
22#define __PVRUSB2_I2C_CORE_H
23
24#include <linux/list.h>
25#include <linux/i2c.h>
26
27struct pvr2_hdw;
28struct pvr2_i2c_client;
29struct pvr2_i2c_handler;
30struct pvr2_i2c_handler_functions;
31struct pvr2_i2c_op;
32struct pvr2_i2c_op_functions;
33
34struct pvr2_i2c_client {
35 struct i2c_client *client;
36 struct pvr2_i2c_handler *handler;
37 struct list_head list;
38 int detected_flag;
39 int recv_enable;
40 unsigned long pend_mask;
41 unsigned long ctl_mask;
42};
43
44struct pvr2_i2c_handler {
45 void *func_data;
46 const struct pvr2_i2c_handler_functions *func_table;
47};
48
49struct pvr2_i2c_handler_functions {
50 void (*detach)(void *);
51 int (*check)(void *);
52 void (*update)(void *);
53 unsigned int (*describe)(void *,char *,unsigned int);
54};
55
56struct pvr2_i2c_op {
57 int (*check)(struct pvr2_hdw *);
58 void (*update)(struct pvr2_hdw *);
59 const char *name;
60};
61
62void pvr2_i2c_core_init(struct pvr2_hdw *);
63void pvr2_i2c_core_done(struct pvr2_hdw *);
64
65int pvr2_i2c_client_cmd(struct pvr2_i2c_client *,unsigned int cmd,void *arg);
66int pvr2_i2c_core_cmd(struct pvr2_hdw *,unsigned int cmd,void *arg);
67
68int pvr2_i2c_core_check_stale(struct pvr2_hdw *);
69void pvr2_i2c_core_sync(struct pvr2_hdw *);
70unsigned int pvr2_i2c_report(struct pvr2_hdw *,char *buf,unsigned int maxlen);
71#define PVR2_I2C_DETAIL_DEBUG 0x0001
72#define PVR2_I2C_DETAIL_HANDLER 0x0002
73#define PVR2_I2C_DETAIL_CTLMASK 0x0004
74#define PVR2_I2C_DETAIL_ALL (\
75 PVR2_I2C_DETAIL_DEBUG |\
76 PVR2_I2C_DETAIL_HANDLER |\
77 PVR2_I2C_DETAIL_CTLMASK)
78unsigned int pvr2_i2c_client_describe(struct pvr2_i2c_client *,
79 unsigned int detail_mask,
80 char *buf,unsigned int maxlen);
81
82void pvr2_i2c_probe(struct pvr2_hdw *,struct pvr2_i2c_client *);
83const struct pvr2_i2c_op *pvr2_i2c_get_op(unsigned int idx);
84
85#endif /* __PVRUSB2_I2C_CORE_H */
86
87
88/*
89 Stuff for Emacs to see, in order to encourage consistent editing style:
90 *** Local Variables: ***
91 *** mode: c ***
92 *** fill-column: 75 ***
93 *** tab-width: 8 ***
94 *** c-basic-offset: 8 ***
95 *** End: ***
96 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-io.c b/drivers/media/video/pvrusb2/pvrusb2-io.c
new file mode 100644
index 0000000000..a984c91f57
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-io.c
@@ -0,0 +1,695 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include "pvrusb2-io.h"
23#include "pvrusb2-debug.h"
24#include <linux/errno.h>
25#include <linux/string.h>
26#include <linux/slab.h>
27#include <linux/mutex.h>
28
29#define BUFFER_SIG 0x47653271
30
31// #define SANITY_CHECK_BUFFERS
32
33
34#ifdef SANITY_CHECK_BUFFERS
35#define BUFFER_CHECK(bp) do { \
36 if ((bp)->signature != BUFFER_SIG) { \
37 pvr2_trace(PVR2_TRACE_ERROR_LEGS, \
38 "Buffer %p is bad at %s:%d", \
39 (bp),__FILE__,__LINE__); \
40 pvr2_buffer_describe(bp,"BadSig"); \
41 BUG(); \
42 } \
43} while (0)
44#else
45#define BUFFER_CHECK(bp) do {} while(0)
46#endif
47
48struct pvr2_stream {
49 /* Buffers queued for reading */
50 struct list_head queued_list;
51 unsigned int q_count;
52 unsigned int q_bcount;
53 /* Buffers with retrieved data */
54 struct list_head ready_list;
55 unsigned int r_count;
56 unsigned int r_bcount;
57 /* Buffers available for use */
58 struct list_head idle_list;
59 unsigned int i_count;
60 unsigned int i_bcount;
61 /* Pointers to all buffers */
62 struct pvr2_buffer **buffers;
63 /* Array size of buffers */
64 unsigned int buffer_slot_count;
65 /* Total buffers actually in circulation */
66 unsigned int buffer_total_count;
67 /* Designed number of buffers to be in circulation */
68 unsigned int buffer_target_count;
69 /* Executed when ready list become non-empty */
70 pvr2_stream_callback callback_func;
71 void *callback_data;
72 /* Context for transfer endpoint */
73 struct usb_device *dev;
74 int endpoint;
75 /* Overhead for mutex enforcement */
76 spinlock_t list_lock;
77 struct mutex mutex;
78 /* Tracking state for tolerating errors */
79 unsigned int fail_count;
80 unsigned int fail_tolerance;
81};
82
83struct pvr2_buffer {
84 int id;
85 int signature;
86 enum pvr2_buffer_state state;
87 void *ptr; /* Pointer to storage area */
88 unsigned int max_count; /* Size of storage area */
89 unsigned int used_count; /* Amount of valid data in storage area */
90 int status; /* Transfer result status */
91 struct pvr2_stream *stream;
92 struct list_head list_overhead;
93 struct urb *purb;
94};
95
96const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st)
97{
98 switch (st) {
99 case pvr2_buffer_state_none: return "none";
100 case pvr2_buffer_state_idle: return "idle";
101 case pvr2_buffer_state_queued: return "queued";
102 case pvr2_buffer_state_ready: return "ready";
103 }
104 return "unknown";
105}
106
107void pvr2_buffer_describe(struct pvr2_buffer *bp,const char *msg)
108{
109 pvr2_trace(PVR2_TRACE_INFO,
110 "buffer%s%s %p state=%s id=%d status=%d"
111 " stream=%p purb=%p sig=0x%x",
112 (msg ? " " : ""),
113 (msg ? msg : ""),
114 bp,
115 (bp ? pvr2_buffer_state_decode(bp->state) : "(invalid)"),
116 (bp ? bp->id : 0),
117 (bp ? bp->status : 0),
118 (bp ? bp->stream : 0),
119 (bp ? bp->purb : 0),
120 (bp ? bp->signature : 0));
121}
122
123static void pvr2_buffer_remove(struct pvr2_buffer *bp)
124{
125 unsigned int *cnt;
126 unsigned int *bcnt;
127 unsigned int ccnt;
128 struct pvr2_stream *sp = bp->stream;
129 switch (bp->state) {
130 case pvr2_buffer_state_idle:
131 cnt = &sp->i_count;
132 bcnt = &sp->i_bcount;
133 ccnt = bp->max_count;
134 break;
135 case pvr2_buffer_state_queued:
136 cnt = &sp->q_count;
137 bcnt = &sp->q_bcount;
138 ccnt = bp->max_count;
139 break;
140 case pvr2_buffer_state_ready:
141 cnt = &sp->r_count;
142 bcnt = &sp->r_bcount;
143 ccnt = bp->used_count;
144 break;
145 default:
146 return;
147 }
148 list_del_init(&bp->list_overhead);
149 (*cnt)--;
150 (*bcnt) -= ccnt;
151 pvr2_trace(PVR2_TRACE_BUF_FLOW,
152 "/*---TRACE_FLOW---*/"
153 " bufferPool %8s dec cap=%07d cnt=%02d",
154 pvr2_buffer_state_decode(bp->state),*bcnt,*cnt);
155 bp->state = pvr2_buffer_state_none;
156}
157
158static void pvr2_buffer_set_none(struct pvr2_buffer *bp)
159{
160 unsigned long irq_flags;
161 struct pvr2_stream *sp;
162 BUFFER_CHECK(bp);
163 sp = bp->stream;
164 pvr2_trace(PVR2_TRACE_BUF_FLOW,
165 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
166 bp,
167 pvr2_buffer_state_decode(bp->state),
168 pvr2_buffer_state_decode(pvr2_buffer_state_none));
169 spin_lock_irqsave(&sp->list_lock,irq_flags);
170 pvr2_buffer_remove(bp);
171 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
172}
173
174static int pvr2_buffer_set_ready(struct pvr2_buffer *bp)
175{
176 int fl;
177 unsigned long irq_flags;
178 struct pvr2_stream *sp;
179 BUFFER_CHECK(bp);
180 sp = bp->stream;
181 pvr2_trace(PVR2_TRACE_BUF_FLOW,
182 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
183 bp,
184 pvr2_buffer_state_decode(bp->state),
185 pvr2_buffer_state_decode(pvr2_buffer_state_ready));
186 spin_lock_irqsave(&sp->list_lock,irq_flags);
187 fl = (sp->r_count == 0);
188 pvr2_buffer_remove(bp);
189 list_add_tail(&bp->list_overhead,&sp->ready_list);
190 bp->state = pvr2_buffer_state_ready;
191 (sp->r_count)++;
192 sp->r_bcount += bp->used_count;
193 pvr2_trace(PVR2_TRACE_BUF_FLOW,
194 "/*---TRACE_FLOW---*/"
195 " bufferPool %8s inc cap=%07d cnt=%02d",
196 pvr2_buffer_state_decode(bp->state),
197 sp->r_bcount,sp->r_count);
198 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
199 return fl;
200}
201
202static void pvr2_buffer_set_idle(struct pvr2_buffer *bp)
203{
204 unsigned long irq_flags;
205 struct pvr2_stream *sp;
206 BUFFER_CHECK(bp);
207 sp = bp->stream;
208 pvr2_trace(PVR2_TRACE_BUF_FLOW,
209 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
210 bp,
211 pvr2_buffer_state_decode(bp->state),
212 pvr2_buffer_state_decode(pvr2_buffer_state_idle));
213 spin_lock_irqsave(&sp->list_lock,irq_flags);
214 pvr2_buffer_remove(bp);
215 list_add_tail(&bp->list_overhead,&sp->idle_list);
216 bp->state = pvr2_buffer_state_idle;
217 (sp->i_count)++;
218 sp->i_bcount += bp->max_count;
219 pvr2_trace(PVR2_TRACE_BUF_FLOW,
220 "/*---TRACE_FLOW---*/"
221 " bufferPool %8s inc cap=%07d cnt=%02d",
222 pvr2_buffer_state_decode(bp->state),
223 sp->i_bcount,sp->i_count);
224 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
225}
226
227static void pvr2_buffer_set_queued(struct pvr2_buffer *bp)
228{
229 unsigned long irq_flags;
230 struct pvr2_stream *sp;
231 BUFFER_CHECK(bp);
232 sp = bp->stream;
233 pvr2_trace(PVR2_TRACE_BUF_FLOW,
234 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
235 bp,
236 pvr2_buffer_state_decode(bp->state),
237 pvr2_buffer_state_decode(pvr2_buffer_state_queued));
238 spin_lock_irqsave(&sp->list_lock,irq_flags);
239 pvr2_buffer_remove(bp);
240 list_add_tail(&bp->list_overhead,&sp->queued_list);
241 bp->state = pvr2_buffer_state_queued;
242 (sp->q_count)++;
243 sp->q_bcount += bp->max_count;
244 pvr2_trace(PVR2_TRACE_BUF_FLOW,
245 "/*---TRACE_FLOW---*/"
246 " bufferPool %8s inc cap=%07d cnt=%02d",
247 pvr2_buffer_state_decode(bp->state),
248 sp->q_bcount,sp->q_count);
249 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
250}
251
252static void pvr2_buffer_wipe(struct pvr2_buffer *bp)
253{
254 if (bp->state == pvr2_buffer_state_queued) {
255 usb_kill_urb(bp->purb);
256 }
257}
258
259static int pvr2_buffer_init(struct pvr2_buffer *bp,
260 struct pvr2_stream *sp,
261 unsigned int id)
262{
263 memset(bp,0,sizeof(*bp));
264 bp->signature = BUFFER_SIG;
265 bp->id = id;
266 pvr2_trace(PVR2_TRACE_BUF_POOL,
267 "/*---TRACE_FLOW---*/ bufferInit %p stream=%p",bp,sp);
268 bp->stream = sp;
269 bp->state = pvr2_buffer_state_none;
270 INIT_LIST_HEAD(&bp->list_overhead);
271 bp->purb = usb_alloc_urb(0,GFP_KERNEL);
272 if (! bp->purb) return -ENOMEM;
273#ifdef SANITY_CHECK_BUFFERS
274 pvr2_buffer_describe(bp,"create");
275#endif
276 return 0;
277}
278
279static void pvr2_buffer_done(struct pvr2_buffer *bp)
280{
281#ifdef SANITY_CHECK_BUFFERS
282 pvr2_buffer_describe(bp,"delete");
283#endif
284 pvr2_buffer_wipe(bp);
285 pvr2_buffer_set_none(bp);
286 bp->signature = 0;
287 bp->stream = 0;
288 if (bp->purb) usb_free_urb(bp->purb);
289 pvr2_trace(PVR2_TRACE_BUF_POOL,"/*---TRACE_FLOW---*/"
290 " bufferDone %p",bp);
291}
292
293static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
294{
295 int ret;
296 unsigned int scnt;
297
298 /* Allocate buffers pointer array in multiples of 32 entries */
299 if (cnt == sp->buffer_total_count) return 0;
300
301 pvr2_trace(PVR2_TRACE_BUF_POOL,
302 "/*---TRACE_FLOW---*/ poolResize "
303 " stream=%p cur=%d adj=%+d",
304 sp,
305 sp->buffer_total_count,
306 cnt-sp->buffer_total_count);
307
308 scnt = cnt & ~0x1f;
309 if (cnt > scnt) scnt += 0x20;
310
311 if (cnt > sp->buffer_total_count) {
312 if (scnt > sp->buffer_slot_count) {
313 struct pvr2_buffer **nb;
314 nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL);
315 if (!nb) return -ENOMEM;
316 if (sp->buffer_slot_count) {
317 memcpy(nb,sp->buffers,
318 sp->buffer_slot_count * sizeof(*nb));
319 kfree(sp->buffers);
320 }
321 sp->buffers = nb;
322 sp->buffer_slot_count = scnt;
323 }
324 while (sp->buffer_total_count < cnt) {
325 struct pvr2_buffer *bp;
326 bp = kmalloc(sizeof(*bp),GFP_KERNEL);
327 if (!bp) return -ENOMEM;
328 ret = pvr2_buffer_init(bp,sp,sp->buffer_total_count);
329 if (ret) {
330 kfree(bp);
331 return -ENOMEM;
332 }
333 sp->buffers[sp->buffer_total_count] = bp;
334 (sp->buffer_total_count)++;
335 pvr2_buffer_set_idle(bp);
336 }
337 } else {
338 while (sp->buffer_total_count > cnt) {
339 struct pvr2_buffer *bp;
340 bp = sp->buffers[sp->buffer_total_count - 1];
341 /* Paranoia */
342 sp->buffers[sp->buffer_total_count - 1] = 0;
343 (sp->buffer_total_count)--;
344 pvr2_buffer_done(bp);
345 kfree(bp);
346 }
347 if (scnt < sp->buffer_slot_count) {
348 struct pvr2_buffer **nb = 0;
349 if (scnt) {
350 nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL);
351 if (!nb) return -ENOMEM;
352 memcpy(nb,sp->buffers,scnt * sizeof(*nb));
353 }
354 kfree(sp->buffers);
355 sp->buffers = nb;
356 sp->buffer_slot_count = scnt;
357 }
358 }
359 return 0;
360}
361
362static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp)
363{
364 struct pvr2_buffer *bp;
365 unsigned int cnt;
366
367 if (sp->buffer_total_count == sp->buffer_target_count) return 0;
368
369 pvr2_trace(PVR2_TRACE_BUF_POOL,
370 "/*---TRACE_FLOW---*/"
371 " poolCheck stream=%p cur=%d tgt=%d",
372 sp,sp->buffer_total_count,sp->buffer_target_count);
373
374 if (sp->buffer_total_count < sp->buffer_target_count) {
375 return pvr2_stream_buffer_count(sp,sp->buffer_target_count);
376 }
377
378 cnt = 0;
379 while ((sp->buffer_total_count - cnt) > sp->buffer_target_count) {
380 bp = sp->buffers[sp->buffer_total_count - (cnt + 1)];
381 if (bp->state != pvr2_buffer_state_idle) break;
382 cnt++;
383 }
384 if (cnt) {
385 pvr2_stream_buffer_count(sp,sp->buffer_total_count - cnt);
386 }
387
388 return 0;
389}
390
391static void pvr2_stream_internal_flush(struct pvr2_stream *sp)
392{
393 struct list_head *lp;
394 struct pvr2_buffer *bp1;
395 while ((lp = sp->queued_list.next) != &sp->queued_list) {
396 bp1 = list_entry(lp,struct pvr2_buffer,list_overhead);
397 pvr2_buffer_wipe(bp1);
398 /* At this point, we should be guaranteed that no
399 completion callback may happen on this buffer. But it's
400 possible that it might have completed after we noticed
401 it but before we wiped it. So double check its status
402 here first. */
403 if (bp1->state != pvr2_buffer_state_queued) continue;
404 pvr2_buffer_set_idle(bp1);
405 }
406 if (sp->buffer_total_count != sp->buffer_target_count) {
407 pvr2_stream_achieve_buffer_count(sp);
408 }
409}
410
411static void pvr2_stream_init(struct pvr2_stream *sp)
412{
413 spin_lock_init(&sp->list_lock);
414 mutex_init(&sp->mutex);
415 INIT_LIST_HEAD(&sp->queued_list);
416 INIT_LIST_HEAD(&sp->ready_list);
417 INIT_LIST_HEAD(&sp->idle_list);
418}
419
420static void pvr2_stream_done(struct pvr2_stream *sp)
421{
422 mutex_lock(&sp->mutex); do {
423 pvr2_stream_internal_flush(sp);
424 pvr2_stream_buffer_count(sp,0);
425 } while (0); mutex_unlock(&sp->mutex);
426}
427
428static void buffer_complete(struct urb *urb, struct pt_regs *regs)
429{
430 struct pvr2_buffer *bp = urb->context;
431 struct pvr2_stream *sp;
432 unsigned long irq_flags;
433 BUFFER_CHECK(bp);
434 sp = bp->stream;
435 bp->used_count = 0;
436 bp->status = 0;
437 pvr2_trace(PVR2_TRACE_BUF_FLOW,
438 "/*---TRACE_FLOW---*/ bufferComplete %p stat=%d cnt=%d",
439 bp,urb->status,urb->actual_length);
440 spin_lock_irqsave(&sp->list_lock,irq_flags);
441 if ((!(urb->status)) ||
442 (urb->status == -ENOENT) ||
443 (urb->status == -ECONNRESET) ||
444 (urb->status == -ESHUTDOWN)) {
445 bp->used_count = urb->actual_length;
446 if (sp->fail_count) {
447 pvr2_trace(PVR2_TRACE_TOLERANCE,
448 "stream %p transfer ok"
449 " - fail count reset",sp);
450 sp->fail_count = 0;
451 }
452 } else if (sp->fail_count < sp->fail_tolerance) {
453 // We can tolerate this error, because we're below the
454 // threshold...
455 (sp->fail_count)++;
456 pvr2_trace(PVR2_TRACE_TOLERANCE,
457 "stream %p ignoring error %d"
458 " - fail count increased to %u",
459 sp,urb->status,sp->fail_count);
460 } else {
461 bp->status = urb->status;
462 }
463 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
464 pvr2_buffer_set_ready(bp);
465 if (sp && sp->callback_func) {
466 sp->callback_func(sp->callback_data);
467 }
468}
469
470struct pvr2_stream *pvr2_stream_create(void)
471{
472 struct pvr2_stream *sp;
473 sp = kmalloc(sizeof(*sp),GFP_KERNEL);
474 if (!sp) return sp;
475 memset(sp,0,sizeof(*sp));
476 pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_create: sp=%p",sp);
477 pvr2_stream_init(sp);
478 return sp;
479}
480
481void pvr2_stream_destroy(struct pvr2_stream *sp)
482{
483 if (!sp) return;
484 pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_destroy: sp=%p",sp);
485 pvr2_stream_done(sp);
486 kfree(sp);
487}
488
489void pvr2_stream_setup(struct pvr2_stream *sp,
490 struct usb_device *dev,
491 int endpoint,
492 unsigned int tolerance)
493{
494 mutex_lock(&sp->mutex); do {
495 pvr2_stream_internal_flush(sp);
496 sp->dev = dev;
497 sp->endpoint = endpoint;
498 sp->fail_tolerance = tolerance;
499 } while(0); mutex_unlock(&sp->mutex);
500}
501
502void pvr2_stream_set_callback(struct pvr2_stream *sp,
503 pvr2_stream_callback func,
504 void *data)
505{
506 unsigned long irq_flags;
507 mutex_lock(&sp->mutex); do {
508 spin_lock_irqsave(&sp->list_lock,irq_flags);
509 sp->callback_data = data;
510 sp->callback_func = func;
511 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
512 } while(0); mutex_unlock(&sp->mutex);
513}
514
515/* Query / set the nominal buffer count */
516int pvr2_stream_get_buffer_count(struct pvr2_stream *sp)
517{
518 return sp->buffer_target_count;
519}
520
521int pvr2_stream_set_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
522{
523 int ret;
524 if (sp->buffer_target_count == cnt) return 0;
525 mutex_lock(&sp->mutex); do {
526 sp->buffer_target_count = cnt;
527 ret = pvr2_stream_achieve_buffer_count(sp);
528 } while(0); mutex_unlock(&sp->mutex);
529 return ret;
530}
531
532struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *sp)
533{
534 struct list_head *lp = sp->idle_list.next;
535 if (lp == &sp->idle_list) return 0;
536 return list_entry(lp,struct pvr2_buffer,list_overhead);
537}
538
539struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *sp)
540{
541 struct list_head *lp = sp->ready_list.next;
542 if (lp == &sp->ready_list) return 0;
543 return list_entry(lp,struct pvr2_buffer,list_overhead);
544}
545
546struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp,int id)
547{
548 if (id < 0) return 0;
549 if (id >= sp->buffer_total_count) return 0;
550 return sp->buffers[id];
551}
552
553int pvr2_stream_get_ready_count(struct pvr2_stream *sp)
554{
555 return sp->r_count;
556}
557
558int pvr2_stream_get_idle_count(struct pvr2_stream *sp)
559{
560 return sp->i_count;
561}
562
563void pvr2_stream_flush(struct pvr2_stream *sp)
564{
565 mutex_lock(&sp->mutex); do {
566 pvr2_stream_internal_flush(sp);
567 } while(0); mutex_unlock(&sp->mutex);
568}
569
570void pvr2_stream_kill(struct pvr2_stream *sp)
571{
572 struct pvr2_buffer *bp;
573 mutex_lock(&sp->mutex); do {
574 pvr2_stream_internal_flush(sp);
575 while ((bp = pvr2_stream_get_ready_buffer(sp)) != 0) {
576 pvr2_buffer_set_idle(bp);
577 }
578 if (sp->buffer_total_count != sp->buffer_target_count) {
579 pvr2_stream_achieve_buffer_count(sp);
580 }
581 } while(0); mutex_unlock(&sp->mutex);
582}
583
584int pvr2_buffer_queue(struct pvr2_buffer *bp)
585{
586#undef SEED_BUFFER
587#ifdef SEED_BUFFER
588 unsigned int idx;
589 unsigned int val;
590#endif
591 int ret = 0;
592 struct pvr2_stream *sp;
593 if (!bp) return -EINVAL;
594 sp = bp->stream;
595 mutex_lock(&sp->mutex); do {
596 pvr2_buffer_wipe(bp);
597 if (!sp->dev) {
598 ret = -EIO;
599 break;
600 }
601 pvr2_buffer_set_queued(bp);
602#ifdef SEED_BUFFER
603 for (idx = 0; idx < (bp->max_count) / 4; idx++) {
604 val = bp->id << 24;
605 val |= idx;
606 ((unsigned int *)(bp->ptr))[idx] = val;
607 }
608#endif
609 bp->status = -EINPROGRESS;
610 usb_fill_bulk_urb(bp->purb, // struct urb *urb
611 sp->dev, // struct usb_device *dev
612 // endpoint (below)
613 usb_rcvbulkpipe(sp->dev,sp->endpoint),
614 bp->ptr, // void *transfer_buffer
615 bp->max_count, // int buffer_length
616 buffer_complete,
617 bp);
618 usb_submit_urb(bp->purb,GFP_KERNEL);
619 } while(0); mutex_unlock(&sp->mutex);
620 return ret;
621}
622
623int pvr2_buffer_idle(struct pvr2_buffer *bp)
624{
625 struct pvr2_stream *sp;
626 if (!bp) return -EINVAL;
627 sp = bp->stream;
628 mutex_lock(&sp->mutex); do {
629 pvr2_buffer_wipe(bp);
630 pvr2_buffer_set_idle(bp);
631 if (sp->buffer_total_count != sp->buffer_target_count) {
632 pvr2_stream_achieve_buffer_count(sp);
633 }
634 } while(0); mutex_unlock(&sp->mutex);
635 return 0;
636}
637
638int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt)
639{
640 int ret = 0;
641 unsigned long irq_flags;
642 struct pvr2_stream *sp;
643 if (!bp) return -EINVAL;
644 sp = bp->stream;
645 mutex_lock(&sp->mutex); do {
646 spin_lock_irqsave(&sp->list_lock,irq_flags);
647 if (bp->state != pvr2_buffer_state_idle) {
648 ret = -EPERM;
649 } else {
650 bp->ptr = ptr;
651 bp->stream->i_bcount -= bp->max_count;
652 bp->max_count = cnt;
653 bp->stream->i_bcount += bp->max_count;
654 pvr2_trace(PVR2_TRACE_BUF_FLOW,
655 "/*---TRACE_FLOW---*/ bufferPool "
656 " %8s cap cap=%07d cnt=%02d",
657 pvr2_buffer_state_decode(
658 pvr2_buffer_state_idle),
659 bp->stream->i_bcount,bp->stream->i_count);
660 }
661 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
662 } while(0); mutex_unlock(&sp->mutex);
663 return ret;
664}
665
666unsigned int pvr2_buffer_get_count(struct pvr2_buffer *bp)
667{
668 return bp->used_count;
669}
670
671int pvr2_buffer_get_status(struct pvr2_buffer *bp)
672{
673 return bp->status;
674}
675
676enum pvr2_buffer_state pvr2_buffer_get_state(struct pvr2_buffer *bp)
677{
678 return bp->state;
679}
680
681int pvr2_buffer_get_id(struct pvr2_buffer *bp)
682{
683 return bp->id;
684}
685
686
687/*
688 Stuff for Emacs to see, in order to encourage consistent editing style:
689 *** Local Variables: ***
690 *** mode: c ***
691 *** fill-column: 75 ***
692 *** tab-width: 8 ***
693 *** c-basic-offset: 8 ***
694 *** End: ***
695 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-io.h b/drivers/media/video/pvrusb2/pvrusb2-io.h
new file mode 100644
index 0000000000..65e11385b2
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-io.h
@@ -0,0 +1,102 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#ifndef __PVRUSB2_IO_H
22#define __PVRUSB2_IO_H
23
24#include <linux/usb.h>
25#include <linux/list.h>
26
27typedef void (*pvr2_stream_callback)(void *);
28
29enum pvr2_buffer_state {
30 pvr2_buffer_state_none = 0, // Not on any list
31 pvr2_buffer_state_idle = 1, // Buffer is ready to be used again
32 pvr2_buffer_state_queued = 2, // Buffer has been queued for filling
33 pvr2_buffer_state_ready = 3, // Buffer has data available
34};
35
36struct pvr2_stream;
37struct pvr2_buffer;
38
39const char *pvr2_buffer_state_decode(enum pvr2_buffer_state);
40
41/* Initialize / tear down stream structure */
42struct pvr2_stream *pvr2_stream_create(void);
43void pvr2_stream_destroy(struct pvr2_stream *);
44void pvr2_stream_setup(struct pvr2_stream *,
45 struct usb_device *dev,int endpoint,
46 unsigned int tolerance);
47void pvr2_stream_set_callback(struct pvr2_stream *,
48 pvr2_stream_callback func,
49 void *data);
50
51/* Query / set the nominal buffer count */
52int pvr2_stream_get_buffer_count(struct pvr2_stream *);
53int pvr2_stream_set_buffer_count(struct pvr2_stream *,unsigned int);
54
55/* Get a pointer to a buffer that is either idle, ready, or is specified
56 named. */
57struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *);
58struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *);
59struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp,int id);
60
61/* Find out how many buffers are idle or ready */
62int pvr2_stream_get_idle_count(struct pvr2_stream *);
63int pvr2_stream_get_ready_count(struct pvr2_stream *);
64
65/* Kill all pending operations */
66void pvr2_stream_flush(struct pvr2_stream *);
67
68/* Kill all pending buffers and throw away any ready buffers as well */
69void pvr2_stream_kill(struct pvr2_stream *);
70
71/* Set up the actual storage for a buffer */
72int pvr2_buffer_set_buffer(struct pvr2_buffer *,void *ptr,unsigned int cnt);
73
74/* Find out size of data in the given ready buffer */
75unsigned int pvr2_buffer_get_count(struct pvr2_buffer *);
76
77/* Retrieve completion code for given ready buffer */
78int pvr2_buffer_get_status(struct pvr2_buffer *);
79
80/* Retrieve state of given buffer */
81enum pvr2_buffer_state pvr2_buffer_get_state(struct pvr2_buffer *);
82
83/* Retrieve ID of given buffer */
84int pvr2_buffer_get_id(struct pvr2_buffer *);
85
86/* Start reading into given buffer (kill it if needed) */
87int pvr2_buffer_queue(struct pvr2_buffer *);
88
89/* Move buffer back to idle pool (kill it if needed) */
90int pvr2_buffer_idle(struct pvr2_buffer *);
91
92#endif /* __PVRUSB2_IO_H */
93
94/*
95 Stuff for Emacs to see, in order to encourage consistent editing style:
96 *** Local Variables: ***
97 *** mode: c ***
98 *** fill-column: 75 ***
99 *** tab-width: 8 ***
100 *** c-basic-offset: 8 ***
101 *** End: ***
102 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ioread.c b/drivers/media/video/pvrusb2/pvrusb2-ioread.c
new file mode 100644
index 0000000000..49da062e32
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-ioread.c
@@ -0,0 +1,513 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include "pvrusb2-ioread.h"
23#include "pvrusb2-debug.h"
24#include <linux/errno.h>
25#include <linux/string.h>
26#include <linux/slab.h>
27#include <linux/mutex.h>
28#include <asm/uaccess.h>
29
30#define BUFFER_COUNT 32
31#define BUFFER_SIZE PAGE_ALIGN(0x4000)
32
33struct pvr2_ioread {
34 struct pvr2_stream *stream;
35 char *buffer_storage[BUFFER_COUNT];
36 char *sync_key_ptr;
37 unsigned int sync_key_len;
38 unsigned int sync_buf_offs;
39 unsigned int sync_state;
40 unsigned int sync_trashed_count;
41 int enabled; // Streaming is on
42 int spigot_open; // OK to pass data to client
43 int stream_running; // Passing data to client now
44
45 /* State relevant to current buffer being read */
46 struct pvr2_buffer *c_buf;
47 char *c_data_ptr;
48 unsigned int c_data_len;
49 unsigned int c_data_offs;
50 struct mutex mutex;
51};
52
53static int pvr2_ioread_init(struct pvr2_ioread *cp)
54{
55 unsigned int idx;
56
57 cp->stream = 0;
58 mutex_init(&cp->mutex);
59
60 for (idx = 0; idx < BUFFER_COUNT; idx++) {
61 cp->buffer_storage[idx] = kmalloc(BUFFER_SIZE,GFP_KERNEL);
62 if (!(cp->buffer_storage[idx])) break;
63 }
64
65 if (idx < BUFFER_COUNT) {
66 // An allocation appears to have failed
67 for (idx = 0; idx < BUFFER_COUNT; idx++) {
68 if (!(cp->buffer_storage[idx])) continue;
69 kfree(cp->buffer_storage[idx]);
70 }
71 return -ENOMEM;
72 }
73 return 0;
74}
75
76static void pvr2_ioread_done(struct pvr2_ioread *cp)
77{
78 unsigned int idx;
79
80 pvr2_ioread_setup(cp,0);
81 for (idx = 0; idx < BUFFER_COUNT; idx++) {
82 if (!(cp->buffer_storage[idx])) continue;
83 kfree(cp->buffer_storage[idx]);
84 }
85}
86
87struct pvr2_ioread *pvr2_ioread_create(void)
88{
89 struct pvr2_ioread *cp;
90 cp = kmalloc(sizeof(*cp),GFP_KERNEL);
91 if (!cp) return 0;
92 pvr2_trace(PVR2_TRACE_STRUCT,"pvr2_ioread_create id=%p",cp);
93 memset(cp,0,sizeof(*cp));
94 if (pvr2_ioread_init(cp) < 0) {
95 kfree(cp);
96 return 0;
97 }
98 return cp;
99}
100
101void pvr2_ioread_destroy(struct pvr2_ioread *cp)
102{
103 if (!cp) return;
104 pvr2_ioread_done(cp);
105 pvr2_trace(PVR2_TRACE_STRUCT,"pvr2_ioread_destroy id=%p",cp);
106 if (cp->sync_key_ptr) {
107 kfree(cp->sync_key_ptr);
108 cp->sync_key_ptr = 0;
109 }
110 kfree(cp);
111}
112
113void pvr2_ioread_set_sync_key(struct pvr2_ioread *cp,
114 const char *sync_key_ptr,
115 unsigned int sync_key_len)
116{
117 if (!cp) return;
118
119 if (!sync_key_ptr) sync_key_len = 0;
120 if ((sync_key_len == cp->sync_key_len) &&
121 ((!sync_key_len) ||
122 (!memcmp(sync_key_ptr,cp->sync_key_ptr,sync_key_len)))) return;
123
124 if (sync_key_len != cp->sync_key_len) {
125 if (cp->sync_key_ptr) {
126 kfree(cp->sync_key_ptr);
127 cp->sync_key_ptr = 0;
128 }
129 cp->sync_key_len = 0;
130 if (sync_key_len) {
131 cp->sync_key_ptr = kmalloc(sync_key_len,GFP_KERNEL);
132 if (cp->sync_key_ptr) {
133 cp->sync_key_len = sync_key_len;
134 }
135 }
136 }
137 if (!cp->sync_key_len) return;
138 memcpy(cp->sync_key_ptr,sync_key_ptr,cp->sync_key_len);
139}
140
141static void pvr2_ioread_stop(struct pvr2_ioread *cp)
142{
143 if (!(cp->enabled)) return;
144 pvr2_trace(PVR2_TRACE_START_STOP,
145 "/*---TRACE_READ---*/ pvr2_ioread_stop id=%p",cp);
146 pvr2_stream_kill(cp->stream);
147 cp->c_buf = 0;
148 cp->c_data_ptr = 0;
149 cp->c_data_len = 0;
150 cp->c_data_offs = 0;
151 cp->enabled = 0;
152 cp->stream_running = 0;
153 cp->spigot_open = 0;
154 if (cp->sync_state) {
155 pvr2_trace(PVR2_TRACE_DATA_FLOW,
156 "/*---TRACE_READ---*/ sync_state <== 0");
157 cp->sync_state = 0;
158 }
159}
160
161static int pvr2_ioread_start(struct pvr2_ioread *cp)
162{
163 int stat;
164 struct pvr2_buffer *bp;
165 if (cp->enabled) return 0;
166 if (!(cp->stream)) return 0;
167 pvr2_trace(PVR2_TRACE_START_STOP,
168 "/*---TRACE_READ---*/ pvr2_ioread_start id=%p",cp);
169 while ((bp = pvr2_stream_get_idle_buffer(cp->stream)) != 0) {
170 stat = pvr2_buffer_queue(bp);
171 if (stat < 0) {
172 pvr2_trace(PVR2_TRACE_DATA_FLOW,
173 "/*---TRACE_READ---*/"
174 " pvr2_ioread_start id=%p"
175 " error=%d",
176 cp,stat);
177 pvr2_ioread_stop(cp);
178 return stat;
179 }
180 }
181 cp->enabled = !0;
182 cp->c_buf = 0;
183 cp->c_data_ptr = 0;
184 cp->c_data_len = 0;
185 cp->c_data_offs = 0;
186 cp->stream_running = 0;
187 if (cp->sync_key_len) {
188 pvr2_trace(PVR2_TRACE_DATA_FLOW,
189 "/*---TRACE_READ---*/ sync_state <== 1");
190 cp->sync_state = 1;
191 cp->sync_trashed_count = 0;
192 cp->sync_buf_offs = 0;
193 }
194 cp->spigot_open = 0;
195 return 0;
196}
197
198struct pvr2_stream *pvr2_ioread_get_stream(struct pvr2_ioread *cp)
199{
200 return cp->stream;
201}
202
203int pvr2_ioread_setup(struct pvr2_ioread *cp,struct pvr2_stream *sp)
204{
205 int ret;
206 unsigned int idx;
207 struct pvr2_buffer *bp;
208
209 mutex_lock(&cp->mutex); do {
210 if (cp->stream) {
211 pvr2_trace(PVR2_TRACE_START_STOP,
212 "/*---TRACE_READ---*/"
213 " pvr2_ioread_setup (tear-down) id=%p",cp);
214 pvr2_ioread_stop(cp);
215 pvr2_stream_kill(cp->stream);
216 pvr2_stream_set_buffer_count(cp->stream,0);
217 cp->stream = 0;
218 }
219 if (sp) {
220 pvr2_trace(PVR2_TRACE_START_STOP,
221 "/*---TRACE_READ---*/"
222 " pvr2_ioread_setup (setup) id=%p",cp);
223 pvr2_stream_kill(sp);
224 ret = pvr2_stream_set_buffer_count(sp,BUFFER_COUNT);
225 if (ret < 0) return ret;
226 for (idx = 0; idx < BUFFER_COUNT; idx++) {
227 bp = pvr2_stream_get_buffer(sp,idx);
228 pvr2_buffer_set_buffer(bp,
229 cp->buffer_storage[idx],
230 BUFFER_SIZE);
231 }
232 cp->stream = sp;
233 }
234 } while (0); mutex_unlock(&cp->mutex);
235
236 return 0;
237}
238
239int pvr2_ioread_set_enabled(struct pvr2_ioread *cp,int fl)
240{
241 int ret = 0;
242 if ((!fl) == (!(cp->enabled))) return ret;
243
244 mutex_lock(&cp->mutex); do {
245 if (fl) {
246 ret = pvr2_ioread_start(cp);
247 } else {
248 pvr2_ioread_stop(cp);
249 }
250 } while (0); mutex_unlock(&cp->mutex);
251 return ret;
252}
253
254int pvr2_ioread_get_enabled(struct pvr2_ioread *cp)
255{
256 return cp->enabled != 0;
257}
258
259int pvr2_ioread_get_buffer(struct pvr2_ioread *cp)
260{
261 int stat;
262
263 while (cp->c_data_len <= cp->c_data_offs) {
264 if (cp->c_buf) {
265 // Flush out current buffer first.
266 stat = pvr2_buffer_queue(cp->c_buf);
267 if (stat < 0) {
268 // Streaming error...
269 pvr2_trace(PVR2_TRACE_DATA_FLOW,
270 "/*---TRACE_READ---*/"
271 " pvr2_ioread_read id=%p"
272 " queue_error=%d",
273 cp,stat);
274 pvr2_ioread_stop(cp);
275 return 0;
276 }
277 cp->c_buf = 0;
278 cp->c_data_ptr = 0;
279 cp->c_data_len = 0;
280 cp->c_data_offs = 0;
281 }
282 // Now get a freshly filled buffer.
283 cp->c_buf = pvr2_stream_get_ready_buffer(cp->stream);
284 if (!cp->c_buf) break; // Nothing ready; done.
285 cp->c_data_len = pvr2_buffer_get_count(cp->c_buf);
286 if (!cp->c_data_len) {
287 // Nothing transferred. Was there an error?
288 stat = pvr2_buffer_get_status(cp->c_buf);
289 if (stat < 0) {
290 // Streaming error...
291 pvr2_trace(PVR2_TRACE_DATA_FLOW,
292 "/*---TRACE_READ---*/"
293 " pvr2_ioread_read id=%p"
294 " buffer_error=%d",
295 cp,stat);
296 pvr2_ioread_stop(cp);
297 // Give up.
298 return 0;
299 }
300 // Start over...
301 continue;
302 }
303 cp->c_data_offs = 0;
304 cp->c_data_ptr = cp->buffer_storage[
305 pvr2_buffer_get_id(cp->c_buf)];
306 }
307 return !0;
308}
309
310void pvr2_ioread_filter(struct pvr2_ioread *cp)
311{
312 unsigned int idx;
313 if (!cp->enabled) return;
314 if (cp->sync_state != 1) return;
315
316 // Search the stream for our synchronization key. This is made
317 // complicated by the fact that in order to be honest with
318 // ourselves here we must search across buffer boundaries...
319 mutex_lock(&cp->mutex); while (1) {
320 // Ensure we have a buffer
321 if (!pvr2_ioread_get_buffer(cp)) break;
322 if (!cp->c_data_len) break;
323
324 // Now walk the buffer contents until we match the key or
325 // run out of buffer data.
326 for (idx = cp->c_data_offs; idx < cp->c_data_len; idx++) {
327 if (cp->sync_buf_offs >= cp->sync_key_len) break;
328 if (cp->c_data_ptr[idx] ==
329 cp->sync_key_ptr[cp->sync_buf_offs]) {
330 // Found the next key byte
331 (cp->sync_buf_offs)++;
332 } else {
333 // Whoops, mismatched. Start key over...
334 cp->sync_buf_offs = 0;
335 }
336 }
337
338 // Consume what we've walked through
339 cp->c_data_offs += idx;
340 cp->sync_trashed_count += idx;
341
342 // If we've found the key, then update state and get out.
343 if (cp->sync_buf_offs >= cp->sync_key_len) {
344 cp->sync_trashed_count -= cp->sync_key_len;
345 pvr2_trace(PVR2_TRACE_DATA_FLOW,
346 "/*---TRACE_READ---*/"
347 " sync_state <== 2 (skipped %u bytes)",
348 cp->sync_trashed_count);
349 cp->sync_state = 2;
350 cp->sync_buf_offs = 0;
351 break;
352 }
353
354 if (cp->c_data_offs < cp->c_data_len) {
355 // Sanity check - should NEVER get here
356 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
357 "ERROR: pvr2_ioread filter sync problem"
358 " len=%u offs=%u",
359 cp->c_data_len,cp->c_data_offs);
360 // Get out so we don't get stuck in an infinite
361 // loop.
362 break;
363 }
364
365 continue; // (for clarity)
366 } mutex_unlock(&cp->mutex);
367}
368
369int pvr2_ioread_avail(struct pvr2_ioread *cp)
370{
371 int ret;
372 if (!(cp->enabled)) {
373 // Stream is not enabled; so this is an I/O error
374 return -EIO;
375 }
376
377 if (cp->sync_state == 1) {
378 pvr2_ioread_filter(cp);
379 if (cp->sync_state == 1) return -EAGAIN;
380 }
381
382 ret = 0;
383 if (cp->stream_running) {
384 if (!pvr2_stream_get_ready_count(cp->stream)) {
385 // No data available at all right now.
386 ret = -EAGAIN;
387 }
388 } else {
389 if (pvr2_stream_get_ready_count(cp->stream) < BUFFER_COUNT/2) {
390 // Haven't buffered up enough yet; try again later
391 ret = -EAGAIN;
392 }
393 }
394
395 if ((!(cp->spigot_open)) != (!(ret == 0))) {
396 cp->spigot_open = (ret == 0);
397 pvr2_trace(PVR2_TRACE_DATA_FLOW,
398 "/*---TRACE_READ---*/ data is %s",
399 cp->spigot_open ? "available" : "pending");
400 }
401
402 return ret;
403}
404
405int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt)
406{
407 unsigned int copied_cnt;
408 unsigned int bcnt;
409 const char *src;
410 int stat;
411 int ret = 0;
412 unsigned int req_cnt = cnt;
413
414 if (!cnt) {
415 pvr2_trace(PVR2_TRACE_TRAP,
416 "/*---TRACE_READ---*/ pvr2_ioread_read id=%p"
417 " ZERO Request? Returning zero.",cp);
418 return 0;
419 }
420
421 stat = pvr2_ioread_avail(cp);
422 if (stat < 0) return stat;
423
424 cp->stream_running = !0;
425
426 mutex_lock(&cp->mutex); do {
427
428 // Suck data out of the buffers and copy to the user
429 copied_cnt = 0;
430 if (!buf) cnt = 0;
431 while (1) {
432 if (!pvr2_ioread_get_buffer(cp)) {
433 ret = -EIO;
434 break;
435 }
436
437 if (!cnt) break;
438
439 if (cp->sync_state == 2) {
440 // We're repeating the sync key data into
441 // the stream.
442 src = cp->sync_key_ptr + cp->sync_buf_offs;
443 bcnt = cp->sync_key_len - cp->sync_buf_offs;
444 } else {
445 // Normal buffer copy
446 src = cp->c_data_ptr + cp->c_data_offs;
447 bcnt = cp->c_data_len - cp->c_data_offs;
448 }
449
450 if (!bcnt) break;
451
452 // Don't run past user's buffer
453 if (bcnt > cnt) bcnt = cnt;
454
455 if (copy_to_user(buf,src,bcnt)) {
456 // User supplied a bad pointer?
457 // Give up - this *will* cause data
458 // to be lost.
459 ret = -EFAULT;
460 break;
461 }
462 cnt -= bcnt;
463 buf += bcnt;
464 copied_cnt += bcnt;
465
466 if (cp->sync_state == 2) {
467 // Update offset inside sync key that we're
468 // repeating back out.
469 cp->sync_buf_offs += bcnt;
470 if (cp->sync_buf_offs >= cp->sync_key_len) {
471 // Consumed entire key; switch mode
472 // to normal.
473 pvr2_trace(PVR2_TRACE_DATA_FLOW,
474 "/*---TRACE_READ---*/"
475 " sync_state <== 0");
476 cp->sync_state = 0;
477 }
478 } else {
479 // Update buffer offset.
480 cp->c_data_offs += bcnt;
481 }
482 }
483
484 } while (0); mutex_unlock(&cp->mutex);
485
486 if (!ret) {
487 if (copied_cnt) {
488 // If anything was copied, return that count
489 ret = copied_cnt;
490 } else {
491 // Nothing copied; suggest to caller that another
492 // attempt should be tried again later
493 ret = -EAGAIN;
494 }
495 }
496
497 pvr2_trace(PVR2_TRACE_DATA_FLOW,
498 "/*---TRACE_READ---*/ pvr2_ioread_read"
499 " id=%p request=%d result=%d",
500 cp,req_cnt,ret);
501 return ret;
502}
503
504
505/*
506 Stuff for Emacs to see, in order to encourage consistent editing style:
507 *** Local Variables: ***
508 *** mode: c ***
509 *** fill-column: 75 ***
510 *** tab-width: 8 ***
511 *** c-basic-offset: 8 ***
512 *** End: ***
513 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ioread.h b/drivers/media/video/pvrusb2/pvrusb2-ioread.h
new file mode 100644
index 0000000000..6b002597f5
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-ioread.h
@@ -0,0 +1,50 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#ifndef __PVRUSB2_IOREAD_H
22#define __PVRUSB2_IOREAD_H
23
24#include "pvrusb2-io.h"
25
26struct pvr2_ioread;
27
28struct pvr2_ioread *pvr2_ioread_create(void);
29void pvr2_ioread_destroy(struct pvr2_ioread *);
30int pvr2_ioread_setup(struct pvr2_ioread *,struct pvr2_stream *);
31struct pvr2_stream *pvr2_ioread_get_stream(struct pvr2_ioread *);
32void pvr2_ioread_set_sync_key(struct pvr2_ioread *,
33 const char *sync_key_ptr,
34 unsigned int sync_key_len);
35int pvr2_ioread_set_enabled(struct pvr2_ioread *,int fl);
36int pvr2_ioread_get_enabled(struct pvr2_ioread *);
37int pvr2_ioread_read(struct pvr2_ioread *,void __user *buf,unsigned int cnt);
38int pvr2_ioread_avail(struct pvr2_ioread *);
39
40#endif /* __PVRUSB2_IOREAD_H */
41
42/*
43 Stuff for Emacs to see, in order to encourage consistent editing style:
44 *** Local Variables: ***
45 *** mode: c ***
46 *** fill-column: 75 ***
47 *** tab-width: 8 ***
48 *** c-basic-offset: 8 ***
49 *** End: ***
50 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-main.c b/drivers/media/video/pvrusb2/pvrusb2-main.c
new file mode 100644
index 0000000000..b95248274e
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-main.c
@@ -0,0 +1,172 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/slab.h>
27#include <linux/module.h>
28#include <linux/moduleparam.h>
29#include <linux/smp_lock.h>
30#include <linux/usb.h>
31#include <linux/videodev2.h>
32
33#include "pvrusb2-hdw.h"
34#include "pvrusb2-context.h"
35#include "pvrusb2-debug.h"
36#include "pvrusb2-v4l2.h"
37#ifdef CONFIG_VIDEO_PVRUSB2_SYSFS
38#include "pvrusb2-sysfs.h"
39#endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */
40
41#define DRIVER_AUTHOR "Mike Isely <isely@pobox.com>"
42#define DRIVER_DESC "Hauppauge WinTV-PVR-USB2 MPEG2 Encoder/Tuner"
43#define DRIVER_VERSION "V4L in-tree version"
44
45#define DEFAULT_DEBUG_MASK (PVR2_TRACE_ERROR_LEGS| \
46 PVR2_TRACE_INFO| \
47 PVR2_TRACE_TOLERANCE| \
48 PVR2_TRACE_TRAP| \
49 0)
50
51int pvrusb2_debug = DEFAULT_DEBUG_MASK;
52
53module_param_named(debug,pvrusb2_debug,int,S_IRUGO|S_IWUSR);
54MODULE_PARM_DESC(debug, "Debug trace mask");
55
56#ifdef CONFIG_VIDEO_PVRUSB2_SYSFS
57static struct pvr2_sysfs_class *class_ptr = 0;
58#endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */
59
60static void pvr_setup_attach(struct pvr2_context *pvr)
61{
62 /* Create association with v4l layer */
63 pvr2_v4l2_create(pvr);
64#ifdef CONFIG_VIDEO_PVRUSB2_SYSFS
65 pvr2_sysfs_create(pvr,class_ptr);
66#endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */
67}
68
69static int pvr_probe(struct usb_interface *intf,
70 const struct usb_device_id *devid)
71{
72 struct pvr2_context *pvr;
73
74 /* Create underlying hardware interface */
75 pvr = pvr2_context_create(intf,devid,pvr_setup_attach);
76 if (!pvr) {
77 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
78 "Failed to create hdw handler");
79 return -ENOMEM;
80 }
81
82 pvr2_trace(PVR2_TRACE_INIT,"pvr_probe(pvr=%p)",pvr);
83
84 usb_set_intfdata(intf, pvr);
85
86 return 0;
87}
88
89/*
90 * pvr_disconnect()
91 *
92 */
93static void pvr_disconnect(struct usb_interface *intf)
94{
95 struct pvr2_context *pvr = usb_get_intfdata(intf);
96
97 pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) BEGIN",pvr);
98
99 usb_set_intfdata (intf, NULL);
100 pvr2_context_disconnect(pvr);
101
102 pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) DONE",pvr);
103
104}
105
106static struct usb_driver pvr_driver = {
107 name: "pvrusb2",
108 id_table: pvr2_device_table,
109 probe: pvr_probe,
110 disconnect: pvr_disconnect
111};
112
113/*
114 * pvr_init() / pvr_exit()
115 *
116 * This code is run to initialize/exit the driver.
117 *
118 */
119static int __init pvr_init(void)
120{
121 int ret;
122
123 pvr2_trace(PVR2_TRACE_INIT,"pvr_init");
124
125#ifdef CONFIG_VIDEO_PVRUSB2_SYSFS
126 class_ptr = pvr2_sysfs_class_create();
127#endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */
128
129 ret = usb_register(&pvr_driver);
130
131 if (ret == 0)
132 info(DRIVER_DESC " : " DRIVER_VERSION);
133 if (pvrusb2_debug) info("Debug mask is %d (0x%x)",
134 pvrusb2_debug,pvrusb2_debug);
135
136 return ret;
137}
138
139static void __exit pvr_exit(void)
140{
141
142 pvr2_trace(PVR2_TRACE_INIT,"pvr_exit");
143
144#ifdef CONFIG_VIDEO_PVRUSB2_SYSFS
145 pvr2_sysfs_class_destroy(class_ptr);
146#endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */
147
148 usb_deregister(&pvr_driver);
149}
150
151module_init(pvr_init);
152module_exit(pvr_exit);
153
154/* Mike Isely <mcisely@pobox.com> 11-Mar-2006: See pvrusb2-hdw.c for
155 MODULE_DEVICE_TABLE(). We have to declare that attribute there
156 because that's where the device table actually is now and it seems
157 that certain gcc configurations get angry if MODULE_DEVICE_TABLE()
158 is used on what ends up being an external symbol. */
159MODULE_AUTHOR(DRIVER_AUTHOR);
160MODULE_DESCRIPTION(DRIVER_DESC);
161MODULE_LICENSE("GPL");
162
163
164/*
165 Stuff for Emacs to see, in order to encourage consistent editing style:
166 *** Local Variables: ***
167 *** mode: c ***
168 *** fill-column: 70 ***
169 *** tab-width: 8 ***
170 *** c-basic-offset: 8 ***
171 *** End: ***
172 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-std.c b/drivers/media/video/pvrusb2/pvrusb2-std.c
new file mode 100644
index 0000000000..1340636936
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-std.c
@@ -0,0 +1,408 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include "pvrusb2-std.h"
23#include "pvrusb2-debug.h"
24#include <asm/string.h>
25#include <linux/slab.h>
26
27struct std_name {
28 const char *name;
29 v4l2_std_id id;
30};
31
32
33#define CSTD_PAL \
34 (V4L2_STD_PAL_B| \
35 V4L2_STD_PAL_B1| \
36 V4L2_STD_PAL_G| \
37 V4L2_STD_PAL_H| \
38 V4L2_STD_PAL_I| \
39 V4L2_STD_PAL_D| \
40 V4L2_STD_PAL_D1| \
41 V4L2_STD_PAL_K| \
42 V4L2_STD_PAL_M| \
43 V4L2_STD_PAL_N| \
44 V4L2_STD_PAL_Nc| \
45 V4L2_STD_PAL_60)
46
47#define CSTD_NTSC \
48 (V4L2_STD_NTSC_M| \
49 V4L2_STD_NTSC_M_JP| \
50 V4L2_STD_NTSC_M_KR| \
51 V4L2_STD_NTSC_443)
52
53#define CSTD_SECAM \
54 (V4L2_STD_SECAM_B| \
55 V4L2_STD_SECAM_D| \
56 V4L2_STD_SECAM_G| \
57 V4L2_STD_SECAM_H| \
58 V4L2_STD_SECAM_K| \
59 V4L2_STD_SECAM_K1| \
60 V4L2_STD_SECAM_L| \
61 V4L2_STD_SECAM_LC)
62
63#define TSTD_B (V4L2_STD_PAL_B|V4L2_STD_SECAM_B)
64#define TSTD_B1 (V4L2_STD_PAL_B1)
65#define TSTD_D (V4L2_STD_PAL_D|V4L2_STD_SECAM_D)
66#define TSTD_D1 (V4L2_STD_PAL_D1)
67#define TSTD_G (V4L2_STD_PAL_G|V4L2_STD_SECAM_G)
68#define TSTD_H (V4L2_STD_PAL_H|V4L2_STD_SECAM_H)
69#define TSTD_I (V4L2_STD_PAL_I)
70#define TSTD_K (V4L2_STD_PAL_K|V4L2_STD_SECAM_K)
71#define TSTD_K1 (V4L2_STD_SECAM_K1)
72#define TSTD_L (V4L2_STD_SECAM_L)
73#define TSTD_M (V4L2_STD_PAL_M|V4L2_STD_NTSC_M)
74#define TSTD_N (V4L2_STD_PAL_N)
75#define TSTD_Nc (V4L2_STD_PAL_Nc)
76#define TSTD_60 (V4L2_STD_PAL_60)
77
78#define CSTD_ALL (CSTD_PAL|CSTD_NTSC|CSTD_SECAM)
79
80/* Mapping of standard bits to color system */
81const static struct std_name std_groups[] = {
82 {"PAL",CSTD_PAL},
83 {"NTSC",CSTD_NTSC},
84 {"SECAM",CSTD_SECAM},
85};
86
87/* Mapping of standard bits to modulation system */
88const static struct std_name std_items[] = {
89 {"B",TSTD_B},
90 {"B1",TSTD_B1},
91 {"D",TSTD_D},
92 {"D1",TSTD_D1},
93 {"G",TSTD_G},
94 {"H",TSTD_H},
95 {"I",TSTD_I},
96 {"K",TSTD_K},
97 {"K1",TSTD_K1},
98 {"L",TSTD_L},
99 {"LC",V4L2_STD_SECAM_LC},
100 {"M",TSTD_M},
101 {"Mj",V4L2_STD_NTSC_M_JP},
102 {"443",V4L2_STD_NTSC_443},
103 {"Mk",V4L2_STD_NTSC_M_KR},
104 {"N",TSTD_N},
105 {"Nc",TSTD_Nc},
106 {"60",TSTD_60},
107};
108
109
110// Search an array of std_name structures and return a pointer to the
111// element with the matching name.
112static const struct std_name *find_std_name(const struct std_name *arrPtr,
113 unsigned int arrSize,
114 const char *bufPtr,
115 unsigned int bufSize)
116{
117 unsigned int idx;
118 const struct std_name *p;
119 for (idx = 0; idx < arrSize; idx++) {
120 p = arrPtr + idx;
121 if (strlen(p->name) != bufSize) continue;
122 if (!memcmp(bufPtr,p->name,bufSize)) return p;
123 }
124 return 0;
125}
126
127
128int pvr2_std_str_to_id(v4l2_std_id *idPtr,const char *bufPtr,
129 unsigned int bufSize)
130{
131 v4l2_std_id id = 0;
132 v4l2_std_id cmsk = 0;
133 v4l2_std_id t;
134 int mMode = 0;
135 unsigned int cnt;
136 char ch;
137 const struct std_name *sp;
138
139 while (bufSize) {
140 if (!mMode) {
141 cnt = 0;
142 while ((cnt < bufSize) && (bufPtr[cnt] != '-')) cnt++;
143 if (cnt >= bufSize) return 0; // No more characters
144 sp = find_std_name(
145 std_groups,
146 sizeof(std_groups)/sizeof(std_groups[0]),
147 bufPtr,cnt);
148 if (!sp) return 0; // Illegal color system name
149 cnt++;
150 bufPtr += cnt;
151 bufSize -= cnt;
152 mMode = !0;
153 cmsk = sp->id;
154 continue;
155 }
156 cnt = 0;
157 while (cnt < bufSize) {
158 ch = bufPtr[cnt];
159 if (ch == ';') {
160 mMode = 0;
161 break;
162 }
163 if (ch == '/') break;
164 cnt++;
165 }
166 sp = find_std_name(std_items,
167 sizeof(std_items)/sizeof(std_items[0]),
168 bufPtr,cnt);
169 if (!sp) return 0; // Illegal modulation system ID
170 t = sp->id & cmsk;
171 if (!t) return 0; // Specific color + modulation system illegal
172 id |= t;
173 if (cnt < bufSize) cnt++;
174 bufPtr += cnt;
175 bufSize -= cnt;
176 }
177
178 if (idPtr) *idPtr = id;
179 return !0;
180}
181
182
183unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize,
184 v4l2_std_id id)
185{
186 unsigned int idx1,idx2;
187 const struct std_name *ip,*gp;
188 int gfl,cfl;
189 unsigned int c1,c2;
190 cfl = 0;
191 c1 = 0;
192 for (idx1 = 0;
193 idx1 < sizeof(std_groups)/sizeof(std_groups[0]);
194 idx1++) {
195 gp = std_groups + idx1;
196 gfl = 0;
197 for (idx2 = 0;
198 idx2 < sizeof(std_items)/sizeof(std_items[0]);
199 idx2++) {
200 ip = std_items + idx2;
201 if (!(gp->id & ip->id & id)) continue;
202 if (!gfl) {
203 if (cfl) {
204 c2 = scnprintf(bufPtr,bufSize,";");
205 c1 += c2;
206 bufSize -= c2;
207 bufPtr += c2;
208 }
209 cfl = !0;
210 c2 = scnprintf(bufPtr,bufSize,
211 "%s-",gp->name);
212 gfl = !0;
213 } else {
214 c2 = scnprintf(bufPtr,bufSize,"/");
215 }
216 c1 += c2;
217 bufSize -= c2;
218 bufPtr += c2;
219 c2 = scnprintf(bufPtr,bufSize,
220 ip->name);
221 c1 += c2;
222 bufSize -= c2;
223 bufPtr += c2;
224 }
225 }
226 return c1;
227}
228
229
230// Template data for possible enumerated video standards. Here we group
231// standards which share common frame rates and resolution.
232static struct v4l2_standard generic_standards[] = {
233 {
234 .id = (TSTD_B|TSTD_B1|
235 TSTD_D|TSTD_D1|
236 TSTD_G|
237 TSTD_H|
238 TSTD_I|
239 TSTD_K|TSTD_K1|
240 TSTD_L|
241 V4L2_STD_SECAM_LC |
242 TSTD_N|TSTD_Nc),
243 .frameperiod =
244 {
245 .numerator = 1,
246 .denominator= 25
247 },
248 .framelines = 625,
249 .reserved = {0,0,0,0}
250 }, {
251 .id = (TSTD_M|
252 V4L2_STD_NTSC_M_JP|
253 V4L2_STD_NTSC_M_KR),
254 .frameperiod =
255 {
256 .numerator = 1001,
257 .denominator= 30000
258 },
259 .framelines = 525,
260 .reserved = {0,0,0,0}
261 }, { // This is a total wild guess
262 .id = (TSTD_60),
263 .frameperiod =
264 {
265 .numerator = 1001,
266 .denominator= 30000
267 },
268 .framelines = 525,
269 .reserved = {0,0,0,0}
270 }, { // This is total wild guess
271 .id = V4L2_STD_NTSC_443,
272 .frameperiod =
273 {
274 .numerator = 1001,
275 .denominator= 30000
276 },
277 .framelines = 525,
278 .reserved = {0,0,0,0}
279 }
280};
281
282#define generic_standards_cnt (sizeof(generic_standards)/sizeof(generic_standards[0]))
283
284static struct v4l2_standard *match_std(v4l2_std_id id)
285{
286 unsigned int idx;
287 for (idx = 0; idx < generic_standards_cnt; idx++) {
288 if (generic_standards[idx].id & id) {
289 return generic_standards + idx;
290 }
291 }
292 return 0;
293}
294
295static int pvr2_std_fill(struct v4l2_standard *std,v4l2_std_id id)
296{
297 struct v4l2_standard *template;
298 int idx;
299 unsigned int bcnt;
300 template = match_std(id);
301 if (!template) return 0;
302 idx = std->index;
303 memcpy(std,template,sizeof(*template));
304 std->index = idx;
305 std->id = id;
306 bcnt = pvr2_std_id_to_str(std->name,sizeof(std->name)-1,id);
307 std->name[bcnt] = 0;
308 pvr2_trace(PVR2_TRACE_INIT,"Set up standard idx=%u name=%s",
309 std->index,std->name);
310 return !0;
311}
312
313/* These are special cases of combined standards that we should enumerate
314 separately if the component pieces are present. */
315static v4l2_std_id std_mixes[] = {
316 V4L2_STD_PAL_B | V4L2_STD_PAL_G,
317 V4L2_STD_PAL_D | V4L2_STD_PAL_K,
318 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G,
319 V4L2_STD_SECAM_D | V4L2_STD_SECAM_K,
320};
321
322struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
323 v4l2_std_id id)
324{
325 unsigned int std_cnt = 0;
326 unsigned int idx,bcnt,idx2;
327 v4l2_std_id idmsk,cmsk,fmsk;
328 struct v4l2_standard *stddefs;
329
330 if (pvrusb2_debug & PVR2_TRACE_INIT) {
331 char buf[50];
332 bcnt = pvr2_std_id_to_str(buf,sizeof(buf),id);
333 pvr2_trace(
334 PVR2_TRACE_INIT,"Mapping standards mask=0x%x (%.*s)",
335 (int)id,bcnt,buf);
336 }
337
338 *countptr = 0;
339 std_cnt = 0;
340 fmsk = 0;
341 for (idmsk = 1, cmsk = id; cmsk; idmsk <<= 1) {
342 if (!(idmsk & cmsk)) continue;
343 cmsk &= ~idmsk;
344 if (match_std(idmsk)) {
345 std_cnt++;
346 continue;
347 }
348 fmsk |= idmsk;
349 }
350
351 for (idx2 = 0; idx2 < sizeof(std_mixes)/sizeof(std_mixes[0]); idx2++) {
352 if ((id & std_mixes[idx2]) == std_mixes[idx2]) std_cnt++;
353 }
354
355 if (fmsk) {
356 char buf[50];
357 bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk);
358 pvr2_trace(
359 PVR2_TRACE_ERROR_LEGS,
360 "WARNING:"
361 " Failed to classify the following standard(s): %.*s",
362 bcnt,buf);
363 }
364
365 pvr2_trace(PVR2_TRACE_INIT,"Setting up %u unique standard(s)",
366 std_cnt);
367 if (!std_cnt) return 0; // paranoia
368
369 stddefs = kmalloc(sizeof(struct v4l2_standard) * std_cnt,
370 GFP_KERNEL);
371 memset(stddefs,0,sizeof(struct v4l2_standard) * std_cnt);
372 for (idx = 0; idx < std_cnt; idx++) stddefs[idx].index = idx;
373
374 idx = 0;
375
376 /* Enumerate potential special cases */
377 for (idx2 = 0; ((idx2 < sizeof(std_mixes)/sizeof(std_mixes[0])) &&
378 (idx < std_cnt)); idx2++) {
379 if (!(id & std_mixes[idx2])) continue;
380 if (pvr2_std_fill(stddefs+idx,std_mixes[idx2])) idx++;
381 }
382 /* Now enumerate individual pieces */
383 for (idmsk = 1, cmsk = id; cmsk && (idx < std_cnt); idmsk <<= 1) {
384 if (!(idmsk & cmsk)) continue;
385 cmsk &= ~idmsk;
386 if (!pvr2_std_fill(stddefs+idx,idmsk)) continue;
387 idx++;
388 }
389
390 *countptr = std_cnt;
391 return stddefs;
392}
393
394v4l2_std_id pvr2_std_get_usable(void)
395{
396 return CSTD_ALL;
397}
398
399
400/*
401 Stuff for Emacs to see, in order to encourage consistent editing style:
402 *** Local Variables: ***
403 *** mode: c ***
404 *** fill-column: 75 ***
405 *** tab-width: 8 ***
406 *** c-basic-offset: 8 ***
407 *** End: ***
408 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-std.h b/drivers/media/video/pvrusb2/pvrusb2-std.h
new file mode 100644
index 0000000000..07c3993753
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-std.h
@@ -0,0 +1,60 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#ifndef __PVRUSB2_STD_H
22#define __PVRUSB2_STD_H
23
24#include <linux/videodev2.h>
25
26// Convert string describing one or more video standards into a mask of V4L
27// standard bits. Return true if conversion succeeds otherwise return
28// false. String is expected to be of the form: C1-x/y;C2-a/b where C1 and
29// C2 are color system names (e.g. "PAL", "NTSC") and x, y, a, and b are
30// modulation schemes (e.g. "M", "B", "G", etc).
31int pvr2_std_str_to_id(v4l2_std_id *idPtr,const char *bufPtr,
32 unsigned int bufSize);
33
34// Convert any arbitrary set of video standard bits into an unambiguous
35// readable string. Return value is the number of bytes consumed in the
36// buffer. The formatted string is of a form that can be parsed by our
37// sibling std_std_to_id() function.
38unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize,
39 v4l2_std_id id);
40
41// Create an array of suitable v4l2_standard structures given a bit mask of
42// video standards to support. The array is allocated from the heap, and
43// the number of elements is returned in the first argument.
44struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
45 v4l2_std_id id);
46
47// Return mask of which video standard bits are valid
48v4l2_std_id pvr2_std_get_usable(void);
49
50#endif /* __PVRUSB2_STD_H */
51
52/*
53 Stuff for Emacs to see, in order to encourage consistent editing style:
54 *** Local Variables: ***
55 *** mode: c ***
56 *** fill-column: 75 ***
57 *** tab-width: 8 ***
58 *** c-basic-offset: 8 ***
59 *** End: ***
60 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
new file mode 100644
index 0000000000..c6e6523d74
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
@@ -0,0 +1,865 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include <linux/config.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25#include <asm/semaphore.h>
26#include "pvrusb2-sysfs.h"
27#include "pvrusb2-hdw.h"
28#include "pvrusb2-debug.h"
29#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
30#include "pvrusb2-debugifc.h"
31#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
32
33#define pvr2_sysfs_trace(...) pvr2_trace(PVR2_TRACE_SYSFS,__VA_ARGS__)
34
35struct pvr2_sysfs {
36 struct pvr2_channel channel;
37 struct class_device *class_dev;
38#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
39 struct pvr2_sysfs_debugifc *debugifc;
40#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
41 struct pvr2_sysfs_ctl_item *item_first;
42 struct pvr2_sysfs_ctl_item *item_last;
43 struct sysfs_ops kops;
44 struct kobj_type ktype;
45 struct class_device_attribute attr_v4l_minor_number;
46 struct class_device_attribute attr_unit_number;
47};
48
49#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
50struct pvr2_sysfs_debugifc {
51 struct class_device_attribute attr_debugcmd;
52 struct class_device_attribute attr_debuginfo;
53};
54#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
55
56struct pvr2_sysfs_ctl_item {
57 struct class_device_attribute attr_name;
58 struct class_device_attribute attr_type;
59 struct class_device_attribute attr_min;
60 struct class_device_attribute attr_max;
61 struct class_device_attribute attr_enum;
62 struct class_device_attribute attr_bits;
63 struct class_device_attribute attr_val;
64 struct class_device_attribute attr_custom;
65 struct pvr2_ctrl *cptr;
66 struct pvr2_sysfs *chptr;
67 struct pvr2_sysfs_ctl_item *item_next;
68 struct attribute *attr_gen[7];
69 struct attribute_group grp;
70 char name[80];
71};
72
73struct pvr2_sysfs_class {
74 struct class class;
75};
76
77static ssize_t show_name(int id,struct class_device *class_dev,char *buf)
78{
79 struct pvr2_ctrl *cptr;
80 struct pvr2_sysfs *sfp;
81 const char *name;
82
83 sfp = (struct pvr2_sysfs *)class_dev->class_data;
84 if (!sfp) return -EINVAL;
85 cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
86 if (!cptr) return -EINVAL;
87
88 name = pvr2_ctrl_get_desc(cptr);
89 pvr2_sysfs_trace("pvr2_sysfs(%p) show_name(cid=%d) is %s",sfp,id,name);
90
91 if (!name) return -EINVAL;
92
93 return scnprintf(buf,PAGE_SIZE,"%s\n",name);
94}
95
96static ssize_t show_type(int id,struct class_device *class_dev,char *buf)
97{
98 struct pvr2_ctrl *cptr;
99 struct pvr2_sysfs *sfp;
100 const char *name;
101 enum pvr2_ctl_type tp;
102
103 sfp = (struct pvr2_sysfs *)class_dev->class_data;
104 if (!sfp) return -EINVAL;
105 cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
106 if (!cptr) return -EINVAL;
107
108 tp = pvr2_ctrl_get_type(cptr);
109 switch (tp) {
110 case pvr2_ctl_int: name = "integer"; break;
111 case pvr2_ctl_enum: name = "enum"; break;
112 case pvr2_ctl_bitmask: name = "bitmask"; break;
113 case pvr2_ctl_bool: name = "boolean"; break;
114 default: name = "?"; break;
115 }
116 pvr2_sysfs_trace("pvr2_sysfs(%p) show_type(cid=%d) is %s",sfp,id,name);
117
118 if (!name) return -EINVAL;
119
120 return scnprintf(buf,PAGE_SIZE,"%s\n",name);
121}
122
123static ssize_t show_min(int id,struct class_device *class_dev,char *buf)
124{
125 struct pvr2_ctrl *cptr;
126 struct pvr2_sysfs *sfp;
127 long val;
128
129 sfp = (struct pvr2_sysfs *)class_dev->class_data;
130 if (!sfp) return -EINVAL;
131 cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
132 if (!cptr) return -EINVAL;
133 val = pvr2_ctrl_get_min(cptr);
134
135 pvr2_sysfs_trace("pvr2_sysfs(%p) show_min(cid=%d) is %ld",sfp,id,val);
136
137 return scnprintf(buf,PAGE_SIZE,"%ld\n",val);
138}
139
140static ssize_t show_max(int id,struct class_device *class_dev,char *buf)
141{
142 struct pvr2_ctrl *cptr;
143 struct pvr2_sysfs *sfp;
144 long val;
145
146 sfp = (struct pvr2_sysfs *)class_dev->class_data;
147 if (!sfp) return -EINVAL;
148 cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
149 if (!cptr) return -EINVAL;
150 val = pvr2_ctrl_get_max(cptr);
151
152 pvr2_sysfs_trace("pvr2_sysfs(%p) show_max(cid=%d) is %ld",sfp,id,val);
153
154 return scnprintf(buf,PAGE_SIZE,"%ld\n",val);
155}
156
157static ssize_t show_val_norm(int id,struct class_device *class_dev,char *buf)
158{
159 struct pvr2_ctrl *cptr;
160 struct pvr2_sysfs *sfp;
161 int val,ret;
162 unsigned int cnt = 0;
163
164 sfp = (struct pvr2_sysfs *)class_dev->class_data;
165 if (!sfp) return -EINVAL;
166 cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
167 if (!cptr) return -EINVAL;
168
169 ret = pvr2_ctrl_get_value(cptr,&val);
170 if (ret < 0) return ret;
171
172 ret = pvr2_ctrl_value_to_sym(cptr,~0,val,
173 buf,PAGE_SIZE-1,&cnt);
174
175 pvr2_sysfs_trace("pvr2_sysfs(%p) show_val_norm(cid=%d) is %.*s (%d)",
176 sfp,id,cnt,buf,val);
177 buf[cnt] = '\n';
178 return cnt+1;
179}
180
181static ssize_t show_val_custom(int id,struct class_device *class_dev,char *buf)
182{
183 struct pvr2_ctrl *cptr;
184 struct pvr2_sysfs *sfp;
185 int val,ret;
186 unsigned int cnt = 0;
187
188 sfp = (struct pvr2_sysfs *)class_dev->class_data;
189 if (!sfp) return -EINVAL;
190 cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
191 if (!cptr) return -EINVAL;
192
193 ret = pvr2_ctrl_get_value(cptr,&val);
194 if (ret < 0) return ret;
195
196 ret = pvr2_ctrl_custom_value_to_sym(cptr,~0,val,
197 buf,PAGE_SIZE-1,&cnt);
198
199 pvr2_sysfs_trace("pvr2_sysfs(%p) show_val_custom(cid=%d) is %.*s (%d)",
200 sfp,id,cnt,buf,val);
201 buf[cnt] = '\n';
202 return cnt+1;
203}
204
205static ssize_t show_enum(int id,struct class_device *class_dev,char *buf)
206{
207 struct pvr2_ctrl *cptr;
208 struct pvr2_sysfs *sfp;
209 long val;
210 unsigned int bcnt,ccnt,ecnt;
211
212 sfp = (struct pvr2_sysfs *)class_dev->class_data;
213 if (!sfp) return -EINVAL;
214 cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
215 if (!cptr) return -EINVAL;
216 ecnt = pvr2_ctrl_get_cnt(cptr);
217 bcnt = 0;
218 for (val = 0; val < ecnt; val++) {
219 pvr2_ctrl_get_valname(cptr,val,buf+bcnt,PAGE_SIZE-bcnt,&ccnt);
220 if (!ccnt) continue;
221 bcnt += ccnt;
222 if (bcnt >= PAGE_SIZE) break;
223 buf[bcnt] = '\n';
224 bcnt++;
225 }
226 pvr2_sysfs_trace("pvr2_sysfs(%p) show_enum(cid=%d)",sfp,id);
227 return bcnt;
228}
229
230static ssize_t show_bits(int id,struct class_device *class_dev,char *buf)
231{
232 struct pvr2_ctrl *cptr;
233 struct pvr2_sysfs *sfp;
234 int valid_bits,msk;
235 unsigned int bcnt,ccnt;
236
237 sfp = (struct pvr2_sysfs *)class_dev->class_data;
238 if (!sfp) return -EINVAL;
239 cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
240 if (!cptr) return -EINVAL;
241 valid_bits = pvr2_ctrl_get_mask(cptr);
242 bcnt = 0;
243 for (msk = 1; valid_bits; msk <<= 1) {
244 if (!(msk & valid_bits)) continue;
245 valid_bits &= ~msk;
246 pvr2_ctrl_get_valname(cptr,msk,buf+bcnt,PAGE_SIZE-bcnt,&ccnt);
247 bcnt += ccnt;
248 if (bcnt >= PAGE_SIZE) break;
249 buf[bcnt] = '\n';
250 bcnt++;
251 }
252 pvr2_sysfs_trace("pvr2_sysfs(%p) show_bits(cid=%d)",sfp,id);
253 return bcnt;
254}
255
256static int store_val_any(int id,int customfl,struct pvr2_sysfs *sfp,
257 const char *buf,unsigned int count)
258{
259 struct pvr2_ctrl *cptr;
260 int ret;
261 int mask,val;
262
263 cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
264 if (customfl) {
265 ret = pvr2_ctrl_custom_sym_to_value(cptr,buf,count,&mask,&val);
266 } else {
267 ret = pvr2_ctrl_sym_to_value(cptr,buf,count,&mask,&val);
268 }
269 if (ret < 0) return ret;
270 ret = pvr2_ctrl_set_mask_value(cptr,mask,val);
271 pvr2_hdw_commit_ctl(sfp->channel.hdw);
272 return ret;
273}
274
275static ssize_t store_val_norm(int id,struct class_device *class_dev,
276 const char *buf,size_t count)
277{
278 struct pvr2_sysfs *sfp;
279 int ret;
280 sfp = (struct pvr2_sysfs *)class_dev->class_data;
281 ret = store_val_any(id,0,sfp,buf,count);
282 if (!ret) ret = count;
283 return ret;
284}
285
286static ssize_t store_val_custom(int id,struct class_device *class_dev,
287 const char *buf,size_t count)
288{
289 struct pvr2_sysfs *sfp;
290 int ret;
291 sfp = (struct pvr2_sysfs *)class_dev->class_data;
292 ret = store_val_any(id,1,sfp,buf,count);
293 if (!ret) ret = count;
294 return ret;
295}
296
297/*
298 Mike Isely <isely@pobox.com> 30-April-2005
299
300 This next batch of horrible preprocessor hackery is needed because the
301 kernel's class_device_attribute mechanism fails to pass the actual
302 attribute through to the show / store functions, which means we have no
303 way to package up any attribute-specific parameters, like for example the
304 control id. So we work around this brain-damage by encoding the control
305 id into the show / store functions themselves and pick the function based
306 on the control id we're setting up. These macros try to ease the pain.
307 Yuck.
308*/
309
310#define CREATE_SHOW_INSTANCE(sf_name,ctl_id) \
311static ssize_t sf_name##_##ctl_id(struct class_device *class_dev,char *buf) \
312{ return sf_name(ctl_id,class_dev,buf); }
313
314#define CREATE_STORE_INSTANCE(sf_name,ctl_id) \
315static ssize_t sf_name##_##ctl_id(struct class_device *class_dev,const char *buf,size_t count) \
316{ return sf_name(ctl_id,class_dev,buf,count); }
317
318#define CREATE_BATCH(ctl_id) \
319CREATE_SHOW_INSTANCE(show_name,ctl_id) \
320CREATE_SHOW_INSTANCE(show_type,ctl_id) \
321CREATE_SHOW_INSTANCE(show_min,ctl_id) \
322CREATE_SHOW_INSTANCE(show_max,ctl_id) \
323CREATE_SHOW_INSTANCE(show_val_norm,ctl_id) \
324CREATE_SHOW_INSTANCE(show_val_custom,ctl_id) \
325CREATE_SHOW_INSTANCE(show_enum,ctl_id) \
326CREATE_SHOW_INSTANCE(show_bits,ctl_id) \
327CREATE_STORE_INSTANCE(store_val_norm,ctl_id) \
328CREATE_STORE_INSTANCE(store_val_custom,ctl_id) \
329
330CREATE_BATCH(0)
331CREATE_BATCH(1)
332CREATE_BATCH(2)
333CREATE_BATCH(3)
334CREATE_BATCH(4)
335CREATE_BATCH(5)
336CREATE_BATCH(6)
337CREATE_BATCH(7)
338CREATE_BATCH(8)
339CREATE_BATCH(9)
340CREATE_BATCH(10)
341CREATE_BATCH(11)
342CREATE_BATCH(12)
343CREATE_BATCH(13)
344CREATE_BATCH(14)
345CREATE_BATCH(15)
346CREATE_BATCH(16)
347CREATE_BATCH(17)
348CREATE_BATCH(18)
349CREATE_BATCH(19)
350CREATE_BATCH(20)
351CREATE_BATCH(21)
352CREATE_BATCH(22)
353CREATE_BATCH(23)
354CREATE_BATCH(24)
355CREATE_BATCH(25)
356CREATE_BATCH(26)
357CREATE_BATCH(27)
358CREATE_BATCH(28)
359CREATE_BATCH(29)
360CREATE_BATCH(30)
361CREATE_BATCH(31)
362CREATE_BATCH(32)
363CREATE_BATCH(33)
364CREATE_BATCH(34)
365CREATE_BATCH(35)
366CREATE_BATCH(36)
367CREATE_BATCH(37)
368CREATE_BATCH(38)
369CREATE_BATCH(39)
370CREATE_BATCH(40)
371CREATE_BATCH(41)
372CREATE_BATCH(42)
373CREATE_BATCH(43)
374CREATE_BATCH(44)
375CREATE_BATCH(45)
376CREATE_BATCH(46)
377CREATE_BATCH(47)
378CREATE_BATCH(48)
379CREATE_BATCH(49)
380CREATE_BATCH(50)
381CREATE_BATCH(51)
382CREATE_BATCH(52)
383CREATE_BATCH(53)
384CREATE_BATCH(54)
385CREATE_BATCH(55)
386CREATE_BATCH(56)
387CREATE_BATCH(57)
388CREATE_BATCH(58)
389CREATE_BATCH(59)
390
391struct pvr2_sysfs_func_set {
392 ssize_t (*show_name)(struct class_device *,char *);
393 ssize_t (*show_type)(struct class_device *,char *);
394 ssize_t (*show_min)(struct class_device *,char *);
395 ssize_t (*show_max)(struct class_device *,char *);
396 ssize_t (*show_enum)(struct class_device *,char *);
397 ssize_t (*show_bits)(struct class_device *,char *);
398 ssize_t (*show_val_norm)(struct class_device *,char *);
399 ssize_t (*store_val_norm)(struct class_device *,
400 const char *,size_t);
401 ssize_t (*show_val_custom)(struct class_device *,char *);
402 ssize_t (*store_val_custom)(struct class_device *,
403 const char *,size_t);
404};
405
406#define INIT_BATCH(ctl_id) \
407[ctl_id] = { \
408 .show_name = show_name_##ctl_id, \
409 .show_type = show_type_##ctl_id, \
410 .show_min = show_min_##ctl_id, \
411 .show_max = show_max_##ctl_id, \
412 .show_enum = show_enum_##ctl_id, \
413 .show_bits = show_bits_##ctl_id, \
414 .show_val_norm = show_val_norm_##ctl_id, \
415 .store_val_norm = store_val_norm_##ctl_id, \
416 .show_val_custom = show_val_custom_##ctl_id, \
417 .store_val_custom = store_val_custom_##ctl_id, \
418} \
419
420static struct pvr2_sysfs_func_set funcs[] = {
421 INIT_BATCH(0),
422 INIT_BATCH(1),
423 INIT_BATCH(2),
424 INIT_BATCH(3),
425 INIT_BATCH(4),
426 INIT_BATCH(5),
427 INIT_BATCH(6),
428 INIT_BATCH(7),
429 INIT_BATCH(8),
430 INIT_BATCH(9),
431 INIT_BATCH(10),
432 INIT_BATCH(11),
433 INIT_BATCH(12),
434 INIT_BATCH(13),
435 INIT_BATCH(14),
436 INIT_BATCH(15),
437 INIT_BATCH(16),
438 INIT_BATCH(17),
439 INIT_BATCH(18),
440 INIT_BATCH(19),
441 INIT_BATCH(20),
442 INIT_BATCH(21),
443 INIT_BATCH(22),
444 INIT_BATCH(23),
445 INIT_BATCH(24),
446 INIT_BATCH(25),
447 INIT_BATCH(26),
448 INIT_BATCH(27),
449 INIT_BATCH(28),
450 INIT_BATCH(29),
451 INIT_BATCH(30),
452 INIT_BATCH(31),
453 INIT_BATCH(32),
454 INIT_BATCH(33),
455 INIT_BATCH(34),
456 INIT_BATCH(35),
457 INIT_BATCH(36),
458 INIT_BATCH(37),
459 INIT_BATCH(38),
460 INIT_BATCH(39),
461 INIT_BATCH(40),
462 INIT_BATCH(41),
463 INIT_BATCH(42),
464 INIT_BATCH(43),
465 INIT_BATCH(44),
466 INIT_BATCH(45),
467 INIT_BATCH(46),
468 INIT_BATCH(47),
469 INIT_BATCH(48),
470 INIT_BATCH(49),
471 INIT_BATCH(50),
472 INIT_BATCH(51),
473 INIT_BATCH(52),
474 INIT_BATCH(53),
475 INIT_BATCH(54),
476 INIT_BATCH(55),
477 INIT_BATCH(56),
478 INIT_BATCH(57),
479 INIT_BATCH(58),
480 INIT_BATCH(59),
481};
482
483
484static void pvr2_sysfs_add_control(struct pvr2_sysfs *sfp,int ctl_id)
485{
486 struct pvr2_sysfs_ctl_item *cip;
487 struct pvr2_sysfs_func_set *fp;
488 struct pvr2_ctrl *cptr;
489 unsigned int cnt,acnt;
490
491 if ((ctl_id < 0) || (ctl_id >= (sizeof(funcs)/sizeof(funcs[0])))) {
492 return;
493 }
494
495 fp = funcs + ctl_id;
496 cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,ctl_id);
497 if (!cptr) return;
498
499 cip = kmalloc(sizeof(*cip),GFP_KERNEL);
500 if (!cip) return;
501 memset(cip,0,sizeof(*cip));
502 pvr2_sysfs_trace("Creating pvr2_sysfs_ctl_item id=%p",cip);
503
504 cip->cptr = cptr;
505
506 cip->chptr = sfp;
507 cip->item_next = 0;
508 if (sfp->item_last) {
509 sfp->item_last->item_next = cip;
510 } else {
511 sfp->item_first = cip;
512 }
513 sfp->item_last = cip;
514
515 cip->attr_name.attr.owner = THIS_MODULE;
516 cip->attr_name.attr.name = "name";
517 cip->attr_name.attr.mode = S_IRUGO;
518 cip->attr_name.show = fp->show_name;
519
520 cip->attr_type.attr.owner = THIS_MODULE;
521 cip->attr_type.attr.name = "type";
522 cip->attr_type.attr.mode = S_IRUGO;
523 cip->attr_type.show = fp->show_type;
524
525 cip->attr_min.attr.owner = THIS_MODULE;
526 cip->attr_min.attr.name = "min_val";
527 cip->attr_min.attr.mode = S_IRUGO;
528 cip->attr_min.show = fp->show_min;
529
530 cip->attr_max.attr.owner = THIS_MODULE;
531 cip->attr_max.attr.name = "max_val";
532 cip->attr_max.attr.mode = S_IRUGO;
533 cip->attr_max.show = fp->show_max;
534
535 cip->attr_val.attr.owner = THIS_MODULE;
536 cip->attr_val.attr.name = "cur_val";
537 cip->attr_val.attr.mode = S_IRUGO;
538
539 cip->attr_custom.attr.owner = THIS_MODULE;
540 cip->attr_custom.attr.name = "custom_val";
541 cip->attr_custom.attr.mode = S_IRUGO;
542
543 cip->attr_enum.attr.owner = THIS_MODULE;
544 cip->attr_enum.attr.name = "enum_val";
545 cip->attr_enum.attr.mode = S_IRUGO;
546 cip->attr_enum.show = fp->show_enum;
547
548 cip->attr_bits.attr.owner = THIS_MODULE;
549 cip->attr_bits.attr.name = "bit_val";
550 cip->attr_bits.attr.mode = S_IRUGO;
551 cip->attr_bits.show = fp->show_bits;
552
553 if (pvr2_ctrl_is_writable(cptr)) {
554 cip->attr_val.attr.mode |= S_IWUSR|S_IWGRP;
555 cip->attr_custom.attr.mode |= S_IWUSR|S_IWGRP;
556 }
557
558 acnt = 0;
559 cip->attr_gen[acnt++] = &cip->attr_name.attr;
560 cip->attr_gen[acnt++] = &cip->attr_type.attr;
561 cip->attr_gen[acnt++] = &cip->attr_val.attr;
562 cip->attr_val.show = fp->show_val_norm;
563 cip->attr_val.store = fp->store_val_norm;
564 if (pvr2_ctrl_has_custom_symbols(cptr)) {
565 cip->attr_gen[acnt++] = &cip->attr_custom.attr;
566 cip->attr_custom.show = fp->show_val_custom;
567 cip->attr_custom.store = fp->store_val_custom;
568 }
569 switch (pvr2_ctrl_get_type(cptr)) {
570 case pvr2_ctl_enum:
571 // Control is an enumeration
572 cip->attr_gen[acnt++] = &cip->attr_enum.attr;
573 break;
574 case pvr2_ctl_int:
575 // Control is an integer
576 cip->attr_gen[acnt++] = &cip->attr_min.attr;
577 cip->attr_gen[acnt++] = &cip->attr_max.attr;
578 break;
579 case pvr2_ctl_bitmask:
580 // Control is an bitmask
581 cip->attr_gen[acnt++] = &cip->attr_bits.attr;
582 break;
583 default: break;
584 }
585
586 cnt = scnprintf(cip->name,sizeof(cip->name)-1,"ctl_%s",
587 pvr2_ctrl_get_name(cptr));
588 cip->name[cnt] = 0;
589 cip->grp.name = cip->name;
590 cip->grp.attrs = cip->attr_gen;
591
592 sysfs_create_group(&sfp->class_dev->kobj,&cip->grp);
593}
594
595#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
596static ssize_t debuginfo_show(struct class_device *,char *);
597static ssize_t debugcmd_show(struct class_device *,char *);
598static ssize_t debugcmd_store(struct class_device *,const char *,size_t count);
599
600static void pvr2_sysfs_add_debugifc(struct pvr2_sysfs *sfp)
601{
602 struct pvr2_sysfs_debugifc *dip;
603 dip = kmalloc(sizeof(*dip),GFP_KERNEL);
604 if (!dip) return;
605 memset(dip,0,sizeof(*dip));
606 dip->attr_debugcmd.attr.owner = THIS_MODULE;
607 dip->attr_debugcmd.attr.name = "debugcmd";
608 dip->attr_debugcmd.attr.mode = S_IRUGO|S_IWUSR|S_IWGRP;
609 dip->attr_debugcmd.show = debugcmd_show;
610 dip->attr_debugcmd.store = debugcmd_store;
611 dip->attr_debuginfo.attr.owner = THIS_MODULE;
612 dip->attr_debuginfo.attr.name = "debuginfo";
613 dip->attr_debuginfo.attr.mode = S_IRUGO;
614 dip->attr_debuginfo.show = debuginfo_show;
615 sfp->debugifc = dip;
616 class_device_create_file(sfp->class_dev,&dip->attr_debugcmd);
617 class_device_create_file(sfp->class_dev,&dip->attr_debuginfo);
618}
619
620
621static void pvr2_sysfs_tear_down_debugifc(struct pvr2_sysfs *sfp)
622{
623 if (!sfp->debugifc) return;
624 class_device_remove_file(sfp->class_dev,
625 &sfp->debugifc->attr_debuginfo);
626 class_device_remove_file(sfp->class_dev,&sfp->debugifc->attr_debugcmd);
627 kfree(sfp->debugifc);
628 sfp->debugifc = 0;
629}
630#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
631
632
633static void pvr2_sysfs_add_controls(struct pvr2_sysfs *sfp)
634{
635 unsigned int idx,cnt;
636 cnt = pvr2_hdw_get_ctrl_count(sfp->channel.hdw);
637 for (idx = 0; idx < cnt; idx++) {
638 pvr2_sysfs_add_control(sfp,idx);
639 }
640}
641
642
643static void pvr2_sysfs_tear_down_controls(struct pvr2_sysfs *sfp)
644{
645 struct pvr2_sysfs_ctl_item *cip1,*cip2;
646 for (cip1 = sfp->item_first; cip1; cip1 = cip2) {
647 cip2 = cip1->item_next;
648 sysfs_remove_group(&sfp->class_dev->kobj,&cip1->grp);
649 pvr2_sysfs_trace("Destroying pvr2_sysfs_ctl_item id=%p",cip1);
650 kfree(cip1);
651 }
652}
653
654
655static void pvr2_sysfs_class_release(struct class *class)
656{
657 struct pvr2_sysfs_class *clp;
658 clp = container_of(class,struct pvr2_sysfs_class,class);
659 pvr2_sysfs_trace("Destroying pvr2_sysfs_class id=%p",clp);
660 kfree(clp);
661}
662
663
664static void pvr2_sysfs_release(struct class_device *class_dev)
665{
666 pvr2_sysfs_trace("Releasing class_dev id=%p",class_dev);
667 kfree(class_dev);
668}
669
670
671static void class_dev_destroy(struct pvr2_sysfs *sfp)
672{
673 if (!sfp->class_dev) return;
674#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
675 pvr2_sysfs_tear_down_debugifc(sfp);
676#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
677 pvr2_sysfs_tear_down_controls(sfp);
678 class_device_remove_file(sfp->class_dev,&sfp->attr_v4l_minor_number);
679 class_device_remove_file(sfp->class_dev,&sfp->attr_unit_number);
680 pvr2_sysfs_trace("Destroying class_dev id=%p",sfp->class_dev);
681 sfp->class_dev->class_data = 0;
682 class_device_unregister(sfp->class_dev);
683 sfp->class_dev = 0;
684}
685
686
687static ssize_t v4l_minor_number_show(struct class_device *class_dev,char *buf)
688{
689 struct pvr2_sysfs *sfp;
690 sfp = (struct pvr2_sysfs *)class_dev->class_data;
691 if (!sfp) return -EINVAL;
692 return scnprintf(buf,PAGE_SIZE,"%d\n",
693 pvr2_hdw_v4l_get_minor_number(sfp->channel.hdw));
694}
695
696
697static ssize_t unit_number_show(struct class_device *class_dev,char *buf)
698{
699 struct pvr2_sysfs *sfp;
700 sfp = (struct pvr2_sysfs *)class_dev->class_data;
701 if (!sfp) return -EINVAL;
702 return scnprintf(buf,PAGE_SIZE,"%d\n",
703 pvr2_hdw_get_unit_number(sfp->channel.hdw));
704}
705
706
707static void class_dev_create(struct pvr2_sysfs *sfp,
708 struct pvr2_sysfs_class *class_ptr)
709{
710 struct usb_device *usb_dev;
711 struct class_device *class_dev;
712 usb_dev = pvr2_hdw_get_dev(sfp->channel.hdw);
713 if (!usb_dev) return;
714 class_dev = kmalloc(sizeof(*class_dev),GFP_KERNEL);
715 if (!class_dev) return;
716 memset(class_dev,0,sizeof(*class_dev));
717
718 pvr2_sysfs_trace("Creating class_dev id=%p",class_dev);
719
720 class_dev->class = &class_ptr->class;
721 if (pvr2_hdw_get_sn(sfp->channel.hdw)) {
722 snprintf(class_dev->class_id,BUS_ID_SIZE,"sn-%lu",
723 pvr2_hdw_get_sn(sfp->channel.hdw));
724 } else if (pvr2_hdw_get_unit_number(sfp->channel.hdw) >= 0) {
725 snprintf(class_dev->class_id,BUS_ID_SIZE,"unit-%c",
726 pvr2_hdw_get_unit_number(sfp->channel.hdw) + 'a');
727 } else {
728 kfree(class_dev);
729 return;
730 }
731
732 class_dev->dev = &usb_dev->dev;
733
734 sfp->class_dev = class_dev;
735 class_dev->class_data = sfp;
736 class_device_register(class_dev);
737
738 sfp->attr_v4l_minor_number.attr.owner = THIS_MODULE;
739 sfp->attr_v4l_minor_number.attr.name = "v4l_minor_number";
740 sfp->attr_v4l_minor_number.attr.mode = S_IRUGO;
741 sfp->attr_v4l_minor_number.show = v4l_minor_number_show;
742 sfp->attr_v4l_minor_number.store = 0;
743 class_device_create_file(sfp->class_dev,&sfp->attr_v4l_minor_number);
744 sfp->attr_unit_number.attr.owner = THIS_MODULE;
745 sfp->attr_unit_number.attr.name = "unit_number";
746 sfp->attr_unit_number.attr.mode = S_IRUGO;
747 sfp->attr_unit_number.show = unit_number_show;
748 sfp->attr_unit_number.store = 0;
749 class_device_create_file(sfp->class_dev,&sfp->attr_unit_number);
750
751 pvr2_sysfs_add_controls(sfp);
752#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
753 pvr2_sysfs_add_debugifc(sfp);
754#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
755}
756
757
758static void pvr2_sysfs_internal_check(struct pvr2_channel *chp)
759{
760 struct pvr2_sysfs *sfp;
761 sfp = container_of(chp,struct pvr2_sysfs,channel);
762 if (!sfp->channel.mc_head->disconnect_flag) return;
763 pvr2_trace(PVR2_TRACE_STRUCT,"Destroying pvr2_sysfs id=%p",sfp);
764 class_dev_destroy(sfp);
765 pvr2_channel_done(&sfp->channel);
766 kfree(sfp);
767}
768
769
770struct pvr2_sysfs *pvr2_sysfs_create(struct pvr2_context *mp,
771 struct pvr2_sysfs_class *class_ptr)
772{
773 struct pvr2_sysfs *sfp;
774 sfp = kmalloc(sizeof(*sfp),GFP_KERNEL);
775 if (!sfp) return sfp;
776 memset(sfp,0,sizeof(*sfp));
777 pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr2_sysfs id=%p",sfp);
778 pvr2_channel_init(&sfp->channel,mp);
779 sfp->channel.check_func = pvr2_sysfs_internal_check;
780
781 class_dev_create(sfp,class_ptr);
782 return sfp;
783}
784
785
786static int pvr2_sysfs_hotplug(struct class_device *cd,char **envp,
787 int numenvp,char *buf,int size)
788{
789 /* Even though we don't do anything here, we still need this function
790 because sysfs will still try to call it. */
791 return 0;
792}
793
794struct pvr2_sysfs_class *pvr2_sysfs_class_create(void)
795{
796 struct pvr2_sysfs_class *clp;
797 clp = kmalloc(sizeof(*clp),GFP_KERNEL);
798 if (!clp) return clp;
799 memset(clp,0,sizeof(*clp));
800 pvr2_sysfs_trace("Creating pvr2_sysfs_class id=%p",clp);
801 clp->class.name = "pvrusb2";
802 clp->class.class_release = pvr2_sysfs_class_release;
803 clp->class.release = pvr2_sysfs_release;
804 clp->class.uevent = pvr2_sysfs_hotplug;
805 if (class_register(&clp->class)) {
806 pvr2_sysfs_trace(
807 "Registration failed for pvr2_sysfs_class id=%p",clp);
808 kfree(clp);
809 clp = 0;
810 }
811 return clp;
812}
813
814
815void pvr2_sysfs_class_destroy(struct pvr2_sysfs_class *clp)
816{
817 class_unregister(&clp->class);
818}
819
820
821#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
822static ssize_t debuginfo_show(struct class_device *class_dev,char *buf)
823{
824 struct pvr2_sysfs *sfp;
825 sfp = (struct pvr2_sysfs *)class_dev->class_data;
826 if (!sfp) return -EINVAL;
827 pvr2_hdw_trigger_module_log(sfp->channel.hdw);
828 return pvr2_debugifc_print_info(sfp->channel.hdw,buf,PAGE_SIZE);
829}
830
831
832static ssize_t debugcmd_show(struct class_device *class_dev,char *buf)
833{
834 struct pvr2_sysfs *sfp;
835 sfp = (struct pvr2_sysfs *)class_dev->class_data;
836 if (!sfp) return -EINVAL;
837 return pvr2_debugifc_print_status(sfp->channel.hdw,buf,PAGE_SIZE);
838}
839
840
841static ssize_t debugcmd_store(struct class_device *class_dev,
842 const char *buf,size_t count)
843{
844 struct pvr2_sysfs *sfp;
845 int ret;
846
847 sfp = (struct pvr2_sysfs *)class_dev->class_data;
848 if (!sfp) return -EINVAL;
849
850 ret = pvr2_debugifc_docmd(sfp->channel.hdw,buf,count);
851 if (ret < 0) return ret;
852 return count;
853}
854#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
855
856
857/*
858 Stuff for Emacs to see, in order to encourage consistent editing style:
859 *** Local Variables: ***
860 *** mode: c ***
861 *** fill-column: 75 ***
862 *** tab-width: 8 ***
863 *** c-basic-offset: 8 ***
864 *** End: ***
865 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.h b/drivers/media/video/pvrusb2/pvrusb2-sysfs.h
new file mode 100644
index 0000000000..ff9373b47f
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.h
@@ -0,0 +1,47 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#ifndef __PVRUSB2_SYSFS_H
22#define __PVRUSB2_SYSFS_H
23
24#include <linux/list.h>
25#include <linux/sysfs.h>
26#include "pvrusb2-context.h"
27
28struct pvr2_sysfs;
29struct pvr2_sysfs_class;
30
31struct pvr2_sysfs_class *pvr2_sysfs_class_create(void);
32void pvr2_sysfs_class_destroy(struct pvr2_sysfs_class *);
33
34struct pvr2_sysfs *pvr2_sysfs_create(struct pvr2_context *,
35 struct pvr2_sysfs_class *);
36
37#endif /* __PVRUSB2_SYSFS_H */
38
39/*
40 Stuff for Emacs to see, in order to encourage consistent editing style:
41 *** Local Variables: ***
42 *** mode: c ***
43 *** fill-column: 75 ***
44 *** tab-width: 8 ***
45 *** c-basic-offset: 8 ***
46 *** End: ***
47 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-tuner.c b/drivers/media/video/pvrusb2/pvrusb2-tuner.c
new file mode 100644
index 0000000000..f4aba8144c
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-tuner.c
@@ -0,0 +1,122 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include "pvrusb2.h"
24#include "pvrusb2-util.h"
25#include "pvrusb2-tuner.h"
26#include "pvrusb2-hdw-internal.h"
27#include "pvrusb2-debug.h"
28#include <linux/videodev2.h>
29#include <media/tuner.h>
30#include <media/v4l2-common.h>
31
32struct pvr2_tuner_handler {
33 struct pvr2_hdw *hdw;
34 struct pvr2_i2c_client *client;
35 struct pvr2_i2c_handler i2c_handler;
36 int type_update_fl;
37};
38
39
40static void set_type(struct pvr2_tuner_handler *ctxt)
41{
42 struct pvr2_hdw *hdw = ctxt->hdw;
43 struct tuner_setup setup;
44 pvr2_trace(PVR2_TRACE_CHIPS,"i2c tuner set_type(%d)",hdw->tuner_type);
45 if (((int)(hdw->tuner_type)) < 0) return;
46
47 setup.addr = ADDR_UNSET;
48 setup.type = hdw->tuner_type;
49 setup.mode_mask = T_RADIO | T_ANALOG_TV;
50 /* We may really want mode_mask to be T_ANALOG_TV for now */
51 pvr2_i2c_client_cmd(ctxt->client,TUNER_SET_TYPE_ADDR,&setup);
52 ctxt->type_update_fl = 0;
53}
54
55
56static int tuner_check(struct pvr2_tuner_handler *ctxt)
57{
58 struct pvr2_hdw *hdw = ctxt->hdw;
59 if (hdw->tuner_updated) ctxt->type_update_fl = !0;
60 return ctxt->type_update_fl != 0;
61}
62
63
64static void tuner_update(struct pvr2_tuner_handler *ctxt)
65{
66 if (ctxt->type_update_fl) set_type(ctxt);
67}
68
69
70static void pvr2_tuner_detach(struct pvr2_tuner_handler *ctxt)
71{
72 ctxt->client->handler = 0;
73 kfree(ctxt);
74}
75
76
77static unsigned int pvr2_tuner_describe(struct pvr2_tuner_handler *ctxt,char *buf,unsigned int cnt)
78{
79 return scnprintf(buf,cnt,"handler: pvrusb2-tuner");
80}
81
82
83const static struct pvr2_i2c_handler_functions tuner_funcs = {
84 .detach = (void (*)(void *))pvr2_tuner_detach,
85 .check = (int (*)(void *))tuner_check,
86 .update = (void (*)(void *))tuner_update,
87 .describe = (unsigned int (*)(void *,char *,unsigned int))pvr2_tuner_describe,
88};
89
90
91int pvr2_i2c_tuner_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp)
92{
93 struct pvr2_tuner_handler *ctxt;
94 if (cp->handler) return 0;
95
96 ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
97 if (!ctxt) return 0;
98 memset(ctxt,0,sizeof(*ctxt));
99
100 ctxt->i2c_handler.func_data = ctxt;
101 ctxt->i2c_handler.func_table = &tuner_funcs;
102 ctxt->type_update_fl = !0;
103 ctxt->client = cp;
104 ctxt->hdw = hdw;
105 cp->handler = &ctxt->i2c_handler;
106 pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x tuner handler set up",
107 cp->client->addr);
108 return !0;
109}
110
111
112
113
114/*
115 Stuff for Emacs to see, in order to encourage consistent editing style:
116 *** Local Variables: ***
117 *** mode: c ***
118 *** fill-column: 70 ***
119 *** tab-width: 8 ***
120 *** c-basic-offset: 8 ***
121 *** End: ***
122 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-tuner.h b/drivers/media/video/pvrusb2/pvrusb2-tuner.h
new file mode 100644
index 0000000000..556f12aa91
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-tuner.h
@@ -0,0 +1,38 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#ifndef __PVRUSB2_TUNER_H
22#define __PVRUSB2_TUNER_H
23
24#include "pvrusb2-i2c-core.h"
25
26int pvr2_i2c_tuner_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
27
28#endif /* __PVRUSB2_TUNER_H */
29
30/*
31 Stuff for Emacs to see, in order to encourage consistent editing style:
32 *** Local Variables: ***
33 *** mode: c ***
34 *** fill-column: 70 ***
35 *** tab-width: 8 ***
36 *** c-basic-offset: 8 ***
37 *** End: ***
38 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-util.h b/drivers/media/video/pvrusb2/pvrusb2-util.h
new file mode 100644
index 0000000000..e53aee416f
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-util.h
@@ -0,0 +1,63 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#ifndef __PVRUSB2_UTIL_H
22#define __PVRUSB2_UTIL_H
23
24#define PVR2_DECOMPOSE_LE(t,i,d) \
25 do { \
26 (t)[i] = (d) & 0xff;\
27 (t)[i+1] = ((d) >> 8) & 0xff;\
28 (t)[i+2] = ((d) >> 16) & 0xff;\
29 (t)[i+3] = ((d) >> 24) & 0xff;\
30 } while(0)
31
32#define PVR2_DECOMPOSE_BE(t,i,d) \
33 do { \
34 (t)[i+3] = (d) & 0xff;\
35 (t)[i+2] = ((d) >> 8) & 0xff;\
36 (t)[i+1] = ((d) >> 16) & 0xff;\
37 (t)[i] = ((d) >> 24) & 0xff;\
38 } while(0)
39
40#define PVR2_COMPOSE_LE(t,i) \
41 ((((u32)((t)[i+3])) << 24) | \
42 (((u32)((t)[i+2])) << 16) | \
43 (((u32)((t)[i+1])) << 8) | \
44 ((u32)((t)[i])))
45
46#define PVR2_COMPOSE_BE(t,i) \
47 ((((u32)((t)[i])) << 24) | \
48 (((u32)((t)[i+1])) << 16) | \
49 (((u32)((t)[i+2])) << 8) | \
50 ((u32)((t)[i+3])))
51
52
53#endif /* __PVRUSB2_UTIL_H */
54
55/*
56 Stuff for Emacs to see, in order to encourage consistent editing style:
57 *** Local Variables: ***
58 *** mode: c ***
59 *** fill-column: 75 ***
60 *** tab-width: 8 ***
61 *** c-basic-offset: 8 ***
62 *** End: ***
63 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
new file mode 100644
index 0000000000..961951010c
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -0,0 +1,1126 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/version.h>
25#include "pvrusb2-context.h"
26#include "pvrusb2-hdw.h"
27#include "pvrusb2.h"
28#include "pvrusb2-debug.h"
29#include "pvrusb2-v4l2.h"
30#include "pvrusb2-ioread.h"
31#include <linux/videodev2.h>
32#include <media/v4l2-common.h>
33
34struct pvr2_v4l2_dev;
35struct pvr2_v4l2_fh;
36struct pvr2_v4l2;
37
38/* V4L no longer provide the ability to set / get a private context pointer
39 (i.e. video_get_drvdata / video_set_drvdata), which means we have to
40 concoct our own context locating mechanism. Supposedly this is intended
41 to simplify driver implementation. It's not clear to me how that can
42 possibly be true. Our solution here is to maintain a lookup table of
43 our context instances, indexed by the minor device number of the V4L
44 device. See pvr2_v4l2_open() for some implications of this approach. */
45static struct pvr2_v4l2_dev *devices[256];
46static DEFINE_MUTEX(device_lock);
47
48struct pvr2_v4l2_dev {
49 struct pvr2_v4l2 *v4lp;
50 struct video_device *vdev;
51 struct pvr2_context_stream *stream;
52 int ctxt_idx;
53 enum pvr2_config config;
54};
55
56struct pvr2_v4l2_fh {
57 struct pvr2_channel channel;
58 struct pvr2_v4l2_dev *dev_info;
59 enum v4l2_priority prio;
60 struct pvr2_ioread *rhp;
61 struct file *file;
62 struct pvr2_v4l2 *vhead;
63 struct pvr2_v4l2_fh *vnext;
64 struct pvr2_v4l2_fh *vprev;
65 wait_queue_head_t wait_data;
66 int fw_mode_flag;
67};
68
69struct pvr2_v4l2 {
70 struct pvr2_channel channel;
71 struct pvr2_v4l2_fh *vfirst;
72 struct pvr2_v4l2_fh *vlast;
73
74 struct v4l2_prio_state prio;
75
76 /* streams */
77 struct pvr2_v4l2_dev video_dev;
78};
79
80static int video_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1};
81module_param_array(video_nr, int, NULL, 0444);
82MODULE_PARM_DESC(video_nr, "Offset for device's minor");
83
84struct v4l2_capability pvr_capability ={
85 .driver = "pvrusb2",
86 .card = "Hauppauge WinTV pvr-usb2",
87 .bus_info = "usb",
88 .version = KERNEL_VERSION(0,8,0),
89 .capabilities = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE |
90 V4L2_CAP_TUNER | V4L2_CAP_AUDIO |
91 V4L2_CAP_READWRITE),
92 .reserved = {0,0,0,0}
93};
94
95static struct v4l2_tuner pvr_v4l2_tuners[]= {
96 {
97 .index = 0,
98 .name = "TV Tuner",
99 .type = V4L2_TUNER_ANALOG_TV,
100 .capability = (V4L2_TUNER_CAP_NORM |
101 V4L2_TUNER_CAP_STEREO |
102 V4L2_TUNER_CAP_LANG1 |
103 V4L2_TUNER_CAP_LANG2),
104 .rangelow = 0,
105 .rangehigh = 0,
106 .rxsubchans = V4L2_TUNER_SUB_STEREO,
107 .audmode = V4L2_TUNER_MODE_STEREO,
108 .signal = 0,
109 .afc = 0,
110 .reserved = {0,0,0,0}
111 }
112};
113
114struct v4l2_fmtdesc pvr_fmtdesc [] = {
115 {
116 .index = 0,
117 .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
118 .flags = V4L2_FMT_FLAG_COMPRESSED,
119 .description = "MPEG1/2",
120 // This should really be V4L2_PIX_FMT_MPEG, but xawtv
121 // breaks when I do that.
122 .pixelformat = 0, // V4L2_PIX_FMT_MPEG,
123 .reserved = { 0, 0, 0, 0 }
124 }
125};
126
127#define PVR_FORMAT_PIX 0
128#define PVR_FORMAT_VBI 1
129
130struct v4l2_format pvr_format [] = {
131 [PVR_FORMAT_PIX] = {
132 .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
133 .fmt = {
134 .pix = {
135 .width = 720,
136 .height = 576,
137 // This should really be V4L2_PIX_FMT_MPEG,
138 // but xawtv breaks when I do that.
139 .pixelformat = 0, // V4L2_PIX_FMT_MPEG,
140 .field = V4L2_FIELD_INTERLACED,
141 .bytesperline = 0, // doesn't make sense
142 // here
143 //FIXME : Don't know what to put here...
144 .sizeimage = (32*1024),
145 .colorspace = 0, // doesn't make sense here
146 .priv = 0
147 }
148 }
149 },
150 [PVR_FORMAT_VBI] = {
151 .type = V4L2_BUF_TYPE_VBI_CAPTURE,
152 .fmt = {
153 .vbi = {
154 .sampling_rate = 27000000,
155 .offset = 248,
156 .samples_per_line = 1443,
157 .sample_format = V4L2_PIX_FMT_GREY,
158 .start = { 0, 0 },
159 .count = { 0, 0 },
160 .flags = 0,
161 .reserved = { 0, 0 }
162 }
163 }
164 }
165};
166
167/*
168 * pvr_ioctl()
169 *
170 * This is part of Video 4 Linux API. The procedure handles ioctl() calls.
171 *
172 */
173static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
174 unsigned int cmd, void *arg)
175{
176 struct pvr2_v4l2_fh *fh = file->private_data;
177 struct pvr2_v4l2 *vp = fh->vhead;
178 struct pvr2_v4l2_dev *dev_info = fh->dev_info;
179 struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
180 int ret = -EINVAL;
181
182 if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
183 v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),cmd);
184 }
185
186 if (!pvr2_hdw_dev_ok(hdw)) {
187 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
188 "ioctl failed - bad or no context");
189 return -EFAULT;
190 }
191
192 /* check priority */
193 switch (cmd) {
194 case VIDIOC_S_CTRL:
195 case VIDIOC_S_STD:
196 case VIDIOC_S_INPUT:
197 case VIDIOC_S_TUNER:
198 case VIDIOC_S_FREQUENCY:
199 ret = v4l2_prio_check(&vp->prio, &fh->prio);
200 if (ret)
201 return ret;
202 }
203
204 switch (cmd) {
205 case VIDIOC_QUERYCAP:
206 {
207 struct v4l2_capability *cap = arg;
208
209 memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
210
211 ret = 0;
212 break;
213 }
214
215 case VIDIOC_G_PRIORITY:
216 {
217 enum v4l2_priority *p = arg;
218
219 *p = v4l2_prio_max(&vp->prio);
220 ret = 0;
221 break;
222 }
223
224 case VIDIOC_S_PRIORITY:
225 {
226 enum v4l2_priority *prio = arg;
227
228 ret = v4l2_prio_change(&vp->prio, &fh->prio, *prio);
229 break;
230 }
231
232 case VIDIOC_ENUMSTD:
233 {
234 struct v4l2_standard *vs = (struct v4l2_standard *)arg;
235 int idx = vs->index;
236 ret = pvr2_hdw_get_stdenum_value(hdw,vs,idx+1);
237 break;
238 }
239
240 case VIDIOC_G_STD:
241 {
242 int val = 0;
243 ret = pvr2_ctrl_get_value(
244 pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR),&val);
245 *(v4l2_std_id *)arg = val;
246 break;
247 }
248
249 case VIDIOC_S_STD:
250 {
251 ret = pvr2_ctrl_set_value(
252 pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR),
253 *(v4l2_std_id *)arg);
254 break;
255 }
256
257 case VIDIOC_ENUMINPUT:
258 {
259 struct pvr2_ctrl *cptr;
260 struct v4l2_input *vi = (struct v4l2_input *)arg;
261 struct v4l2_input tmp;
262 unsigned int cnt;
263
264 cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT);
265
266 memset(&tmp,0,sizeof(tmp));
267 tmp.index = vi->index;
268 ret = 0;
269 switch (vi->index) {
270 case PVR2_CVAL_INPUT_TV:
271 case PVR2_CVAL_INPUT_RADIO:
272 tmp.type = V4L2_INPUT_TYPE_TUNER;
273 break;
274 case PVR2_CVAL_INPUT_SVIDEO:
275 case PVR2_CVAL_INPUT_COMPOSITE:
276 tmp.type = V4L2_INPUT_TYPE_CAMERA;
277 break;
278 default:
279 ret = -EINVAL;
280 break;
281 }
282 if (ret < 0) break;
283
284 cnt = 0;
285 pvr2_ctrl_get_valname(cptr,vi->index,
286 tmp.name,sizeof(tmp.name)-1,&cnt);
287 tmp.name[cnt] = 0;
288
289 /* Don't bother with audioset, since this driver currently
290 always switches the audio whenever the video is
291 switched. */
292
293 /* Handling std is a tougher problem. It doesn't make
294 sense in cases where a device might be multi-standard.
295 We could just copy out the current value for the
296 standard, but it can change over time. For now just
297 leave it zero. */
298
299 memcpy(vi, &tmp, sizeof(tmp));
300
301 ret = 0;
302 break;
303 }
304
305 case VIDIOC_G_INPUT:
306 {
307 struct pvr2_ctrl *cptr;
308 struct v4l2_input *vi = (struct v4l2_input *)arg;
309 int val;
310 cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT);
311 val = 0;
312 ret = pvr2_ctrl_get_value(cptr,&val);
313 vi->index = val;
314 break;
315 }
316
317 case VIDIOC_S_INPUT:
318 {
319 struct v4l2_input *vi = (struct v4l2_input *)arg;
320 ret = pvr2_ctrl_set_value(
321 pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT),
322 vi->index);
323 break;
324 }
325
326 case VIDIOC_ENUMAUDIO:
327 {
328 ret = -EINVAL;
329 break;
330 }
331
332 case VIDIOC_G_AUDIO:
333 {
334 ret = -EINVAL;
335 break;
336 }
337
338 case VIDIOC_S_AUDIO:
339 {
340 ret = -EINVAL;
341 break;
342 }
343 case VIDIOC_G_TUNER:
344 {
345 struct v4l2_tuner *vt = (struct v4l2_tuner *)arg;
346 unsigned int status_mask;
347 int val;
348 if (vt->index !=0) break;
349
350 status_mask = pvr2_hdw_get_signal_status(hdw);
351
352 memcpy(vt, &pvr_v4l2_tuners[vt->index],
353 sizeof(struct v4l2_tuner));
354
355 vt->signal = 0;
356 if (status_mask & PVR2_SIGNAL_OK) {
357 if (status_mask & PVR2_SIGNAL_STEREO) {
358 vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
359 } else {
360 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
361 }
362 if (status_mask & PVR2_SIGNAL_SAP) {
363 vt->rxsubchans |= (V4L2_TUNER_SUB_LANG1 |
364 V4L2_TUNER_SUB_LANG2);
365 }
366 vt->signal = 65535;
367 }
368
369 val = 0;
370 ret = pvr2_ctrl_get_value(
371 pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_AUDIOMODE),
372 &val);
373 vt->audmode = val;
374 break;
375 }
376
377 case VIDIOC_S_TUNER:
378 {
379 struct v4l2_tuner *vt=(struct v4l2_tuner *)arg;
380
381 if (vt->index != 0)
382 break;
383
384 ret = pvr2_ctrl_set_value(
385 pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_AUDIOMODE),
386 vt->audmode);
387 }
388
389 case VIDIOC_S_FREQUENCY:
390 {
391 const struct v4l2_frequency *vf = (struct v4l2_frequency *)arg;
392 ret = pvr2_ctrl_set_value(
393 pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY),
394 vf->frequency * 62500);
395 break;
396 }
397
398 case VIDIOC_G_FREQUENCY:
399 {
400 struct v4l2_frequency *vf = (struct v4l2_frequency *)arg;
401 int val = 0;
402 ret = pvr2_ctrl_get_value(
403 pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY),
404 &val);
405 val /= 62500;
406 vf->frequency = val;
407 break;
408 }
409
410 case VIDIOC_ENUM_FMT:
411 {
412 struct v4l2_fmtdesc *fd = (struct v4l2_fmtdesc *)arg;
413
414 /* Only one format is supported : mpeg.*/
415 if (fd->index != 0)
416 break;
417
418 memcpy(fd, pvr_fmtdesc, sizeof(struct v4l2_fmtdesc));
419 ret = 0;
420 break;
421 }
422
423 case VIDIOC_G_FMT:
424 {
425 struct v4l2_format *vf = (struct v4l2_format *)arg;
426 int val;
427 switch(vf->type) {
428 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
429 memcpy(vf, &pvr_format[PVR_FORMAT_PIX],
430 sizeof(struct v4l2_format));
431 val = 0;
432 pvr2_ctrl_get_value(
433 pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_HRES),
434 &val);
435 vf->fmt.pix.width = val;
436 val = 0;
437 pvr2_ctrl_get_value(
438 pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_VRES),
439 &val);
440 vf->fmt.pix.height = val;
441 ret = 0;
442 break;
443 case V4L2_BUF_TYPE_VBI_CAPTURE:
444 // ????? Still need to figure out to do VBI correctly
445 ret = -EINVAL;
446 break;
447 default:
448 ret = -EINVAL;
449 break;
450 }
451 break;
452 }
453
454 case VIDIOC_TRY_FMT:
455 case VIDIOC_S_FMT:
456 {
457 struct v4l2_format *vf = (struct v4l2_format *)arg;
458
459 ret = 0;
460 switch(vf->type) {
461 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
462 int h = vf->fmt.pix.height;
463 int w = vf->fmt.pix.width;
464
465 if (h < 200) {
466 h = 200;
467 } else if (h > 625) {
468 h = 625;
469 }
470 if (w < 320) {
471 w = 320;
472 } else if (w > 720) {
473 w = 720;
474 }
475
476 memcpy(vf, &pvr_format[PVR_FORMAT_PIX],
477 sizeof(struct v4l2_format));
478 vf->fmt.pix.width = w;
479 vf->fmt.pix.height = h;
480
481 if (cmd == VIDIOC_S_FMT) {
482 pvr2_ctrl_set_value(
483 pvr2_hdw_get_ctrl_by_id(hdw,
484 PVR2_CID_HRES),
485 vf->fmt.pix.width);
486 pvr2_ctrl_set_value(
487 pvr2_hdw_get_ctrl_by_id(hdw,
488 PVR2_CID_VRES),
489 vf->fmt.pix.height);
490 }
491 } break;
492 case V4L2_BUF_TYPE_VBI_CAPTURE:
493 // ????? Still need to figure out to do VBI correctly
494 ret = -EINVAL;
495 break;
496 default:
497 ret = -EINVAL;
498 break;
499 }
500 break;
501 }
502
503 case VIDIOC_STREAMON:
504 {
505 ret = pvr2_hdw_set_stream_type(hdw,dev_info->config);
506 if (ret < 0) return ret;
507 ret = pvr2_hdw_set_streaming(hdw,!0);
508 break;
509 }
510
511 case VIDIOC_STREAMOFF:
512 {
513 ret = pvr2_hdw_set_streaming(hdw,0);
514 break;
515 }
516
517 case VIDIOC_QUERYCTRL:
518 {
519 struct pvr2_ctrl *cptr;
520 struct v4l2_queryctrl *vc = (struct v4l2_queryctrl *)arg;
521 ret = 0;
522 if (vc->id & V4L2_CTRL_FLAG_NEXT_CTRL) {
523 cptr = pvr2_hdw_get_ctrl_nextv4l(
524 hdw,(vc->id & ~V4L2_CTRL_FLAG_NEXT_CTRL));
525 if (cptr) vc->id = pvr2_ctrl_get_v4lid(cptr);
526 } else {
527 cptr = pvr2_hdw_get_ctrl_v4l(hdw,vc->id);
528 }
529 if (!cptr) {
530 pvr2_trace(PVR2_TRACE_V4LIOCTL,
531 "QUERYCTRL id=0x%x not implemented here",
532 vc->id);
533 ret = -EINVAL;
534 break;
535 }
536
537 pvr2_trace(PVR2_TRACE_V4LIOCTL,
538 "QUERYCTRL id=0x%x mapping name=%s (%s)",
539 vc->id,pvr2_ctrl_get_name(cptr),
540 pvr2_ctrl_get_desc(cptr));
541 strlcpy(vc->name,pvr2_ctrl_get_desc(cptr),sizeof(vc->name));
542 vc->flags = pvr2_ctrl_get_v4lflags(cptr);
543 vc->default_value = pvr2_ctrl_get_def(cptr);
544 switch (pvr2_ctrl_get_type(cptr)) {
545 case pvr2_ctl_enum:
546 vc->type = V4L2_CTRL_TYPE_MENU;
547 vc->minimum = 0;
548 vc->maximum = pvr2_ctrl_get_cnt(cptr) - 1;
549 vc->step = 1;
550 break;
551 case pvr2_ctl_bool:
552 vc->type = V4L2_CTRL_TYPE_BOOLEAN;
553 vc->minimum = 0;
554 vc->maximum = 1;
555 vc->step = 1;
556 break;
557 case pvr2_ctl_int:
558 vc->type = V4L2_CTRL_TYPE_INTEGER;
559 vc->minimum = pvr2_ctrl_get_min(cptr);
560 vc->maximum = pvr2_ctrl_get_max(cptr);
561 vc->step = 1;
562 break;
563 default:
564 pvr2_trace(PVR2_TRACE_V4LIOCTL,
565 "QUERYCTRL id=0x%x name=%s not mappable",
566 vc->id,pvr2_ctrl_get_name(cptr));
567 ret = -EINVAL;
568 break;
569 }
570 break;
571 }
572
573 case VIDIOC_QUERYMENU:
574 {
575 struct v4l2_querymenu *vm = (struct v4l2_querymenu *)arg;
576 unsigned int cnt = 0;
577 ret = pvr2_ctrl_get_valname(pvr2_hdw_get_ctrl_v4l(hdw,vm->id),
578 vm->index,
579 vm->name,sizeof(vm->name)-1,
580 &cnt);
581 vm->name[cnt] = 0;
582 break;
583 }
584
585 case VIDIOC_G_CTRL:
586 {
587 struct v4l2_control *vc = (struct v4l2_control *)arg;
588 int val = 0;
589 ret = pvr2_ctrl_get_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id),
590 &val);
591 vc->value = val;
592 break;
593 }
594
595 case VIDIOC_S_CTRL:
596 {
597 struct v4l2_control *vc = (struct v4l2_control *)arg;
598 ret = pvr2_ctrl_set_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id),
599 vc->value);
600 break;
601 }
602
603 case VIDIOC_G_EXT_CTRLS:
604 {
605 struct v4l2_ext_controls *ctls =
606 (struct v4l2_ext_controls *)arg;
607 struct v4l2_ext_control *ctrl;
608 unsigned int idx;
609 int val;
610 for (idx = 0; idx < ctls->count; idx++) {
611 ctrl = ctls->controls + idx;
612 ret = pvr2_ctrl_get_value(
613 pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id),&val);
614 if (ret) {
615 ctls->error_idx = idx;
616 break;
617 }
618 /* Ensure that if read as a 64 bit value, the user
619 will still get a hopefully sane value */
620 ctrl->value64 = 0;
621 ctrl->value = val;
622 }
623 break;
624 }
625
626 case VIDIOC_S_EXT_CTRLS:
627 {
628 struct v4l2_ext_controls *ctls =
629 (struct v4l2_ext_controls *)arg;
630 struct v4l2_ext_control *ctrl;
631 unsigned int idx;
632 for (idx = 0; idx < ctls->count; idx++) {
633 ctrl = ctls->controls + idx;
634 ret = pvr2_ctrl_set_value(
635 pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id),
636 ctrl->value);
637 if (ret) {
638 ctls->error_idx = idx;
639 break;
640 }
641 }
642 break;
643 }
644
645 case VIDIOC_TRY_EXT_CTRLS:
646 {
647 struct v4l2_ext_controls *ctls =
648 (struct v4l2_ext_controls *)arg;
649 struct v4l2_ext_control *ctrl;
650 struct pvr2_ctrl *pctl;
651 unsigned int idx;
652 /* For the moment just validate that the requested control
653 actually exists. */
654 for (idx = 0; idx < ctls->count; idx++) {
655 ctrl = ctls->controls + idx;
656 pctl = pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id);
657 if (!pctl) {
658 ret = -EINVAL;
659 ctls->error_idx = idx;
660 break;
661 }
662 }
663 break;
664 }
665
666 case VIDIOC_LOG_STATUS:
667 {
668 pvr2_hdw_trigger_module_log(hdw);
669 ret = 0;
670 break;
671 }
672
673 default :
674 ret = v4l_compat_translate_ioctl(inode,file,cmd,
675 arg,pvr2_v4l2_do_ioctl);
676 }
677
678 pvr2_hdw_commit_ctl(hdw);
679
680 if (ret < 0) {
681 if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
682 pvr2_trace(PVR2_TRACE_V4LIOCTL,
683 "pvr2_v4l2_do_ioctl failure, ret=%d",ret);
684 } else {
685 if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
686 pvr2_trace(PVR2_TRACE_V4LIOCTL,
687 "pvr2_v4l2_do_ioctl failure, ret=%d"
688 " command was:",ret);
689 v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),
690 cmd);
691 }
692 }
693 } else {
694 pvr2_trace(PVR2_TRACE_V4LIOCTL,
695 "pvr2_v4l2_do_ioctl complete, ret=%d (0x%x)",
696 ret,ret);
697 }
698 return ret;
699}
700
701
702static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
703{
704 pvr2_trace(PVR2_TRACE_INIT,
705 "unregistering device video%d [%s]",
706 dip->vdev->minor,pvr2_config_get_name(dip->config));
707 if (dip->ctxt_idx >= 0) {
708 mutex_lock(&device_lock);
709 devices[dip->ctxt_idx] = NULL;
710 dip->ctxt_idx = -1;
711 mutex_unlock(&device_lock);
712 }
713 video_unregister_device(dip->vdev);
714}
715
716
717static void pvr2_v4l2_destroy_no_lock(struct pvr2_v4l2 *vp)
718{
719 pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw,-1);
720 pvr2_v4l2_dev_destroy(&vp->video_dev);
721
722 pvr2_trace(PVR2_TRACE_STRUCT,"Destroying pvr2_v4l2 id=%p",vp);
723 pvr2_channel_done(&vp->channel);
724 kfree(vp);
725}
726
727
728void pvr2_v4l2_internal_check(struct pvr2_channel *chp)
729{
730 struct pvr2_v4l2 *vp;
731 vp = container_of(chp,struct pvr2_v4l2,channel);
732 if (!vp->channel.mc_head->disconnect_flag) return;
733 if (vp->vfirst) return;
734 pvr2_v4l2_destroy_no_lock(vp);
735}
736
737
738int pvr2_v4l2_ioctl(struct inode *inode, struct file *file,
739 unsigned int cmd, unsigned long arg)
740{
741
742/* Temporary hack : use ivtv api until a v4l2 one is available. */
743#define IVTV_IOC_G_CODEC 0xFFEE7703
744#define IVTV_IOC_S_CODEC 0xFFEE7704
745 if (cmd == IVTV_IOC_G_CODEC || cmd == IVTV_IOC_S_CODEC) return 0;
746 return video_usercopy(inode, file, cmd, arg, pvr2_v4l2_do_ioctl);
747}
748
749
750int pvr2_v4l2_release(struct inode *inode, struct file *file)
751{
752 struct pvr2_v4l2_fh *fhp = file->private_data;
753 struct pvr2_v4l2 *vp = fhp->vhead;
754 struct pvr2_context *mp = fhp->vhead->channel.mc_head;
755
756 pvr2_trace(PVR2_TRACE_OPEN_CLOSE,"pvr2_v4l2_release");
757
758 if (fhp->rhp) {
759 struct pvr2_stream *sp;
760 struct pvr2_hdw *hdw;
761 hdw = fhp->channel.mc_head->hdw;
762 pvr2_hdw_set_streaming(hdw,0);
763 sp = pvr2_ioread_get_stream(fhp->rhp);
764 if (sp) pvr2_stream_set_callback(sp,0,0);
765 pvr2_ioread_destroy(fhp->rhp);
766 fhp->rhp = 0;
767 }
768 v4l2_prio_close(&vp->prio, &fhp->prio);
769 file->private_data = NULL;
770
771 pvr2_context_enter(mp); do {
772 if (fhp->vnext) {
773 fhp->vnext->vprev = fhp->vprev;
774 } else {
775 vp->vlast = fhp->vprev;
776 }
777 if (fhp->vprev) {
778 fhp->vprev->vnext = fhp->vnext;
779 } else {
780 vp->vfirst = fhp->vnext;
781 }
782 fhp->vnext = 0;
783 fhp->vprev = 0;
784 fhp->vhead = 0;
785 pvr2_channel_done(&fhp->channel);
786 pvr2_trace(PVR2_TRACE_STRUCT,
787 "Destroying pvr_v4l2_fh id=%p",fhp);
788 kfree(fhp);
789 if (vp->channel.mc_head->disconnect_flag && !vp->vfirst) {
790 pvr2_v4l2_destroy_no_lock(vp);
791 }
792 } while (0); pvr2_context_exit(mp);
793 return 0;
794}
795
796
797int pvr2_v4l2_open(struct inode *inode, struct file *file)
798{
799 struct pvr2_v4l2_dev *dip = 0; /* Our own context pointer */
800 struct pvr2_v4l2_fh *fhp;
801 struct pvr2_v4l2 *vp;
802 struct pvr2_hdw *hdw;
803
804 mutex_lock(&device_lock);
805 /* MCI 7-Jun-2006 Even though we're just doing what amounts to an
806 atomic read of the device mapping array here, we still need the
807 mutex. The problem is that there is a tiny race possible when
808 we register the device. We can't update the device mapping
809 array until after the device has been registered, owing to the
810 fact that we can't know the minor device number until after the
811 registration succeeds. And if another thread tries to open the
812 device in the window of time after registration but before the
813 map is updated, then it will get back an erroneous null pointer
814 and the open will result in a spurious failure. The only way to
815 prevent that is to (a) be inside the mutex here before we access
816 the array, and (b) cover the entire registration process later
817 on with this same mutex. Thus if we get inside the mutex here,
818 then we can be assured that the registration process actually
819 completed correctly. This is an unhappy complication from the
820 use of global data in a driver that lives in a preemptible
821 environment. It sure would be nice if the video device itself
822 had a means for storing and retrieving a local context pointer.
823 Oh wait. It did. But now it's gone. Silly me. */
824 {
825 unsigned int midx = iminor(file->f_dentry->d_inode);
826 if (midx < sizeof(devices)/sizeof(devices[0])) {
827 dip = devices[midx];
828 }
829 }
830 mutex_unlock(&device_lock);
831
832 if (!dip) return -ENODEV; /* Should be impossible but I'm paranoid */
833
834 vp = dip->v4lp;
835 hdw = vp->channel.hdw;
836
837 pvr2_trace(PVR2_TRACE_OPEN_CLOSE,"pvr2_v4l2_open");
838
839 if (!pvr2_hdw_dev_ok(hdw)) {
840 pvr2_trace(PVR2_TRACE_OPEN_CLOSE,
841 "pvr2_v4l2_open: hardware not ready");
842 return -EIO;
843 }
844
845 fhp = kmalloc(sizeof(*fhp),GFP_KERNEL);
846 if (!fhp) {
847 return -ENOMEM;
848 }
849 memset(fhp,0,sizeof(*fhp));
850
851 init_waitqueue_head(&fhp->wait_data);
852 fhp->dev_info = dip;
853
854 pvr2_context_enter(vp->channel.mc_head); do {
855 pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr_v4l2_fh id=%p",fhp);
856 pvr2_channel_init(&fhp->channel,vp->channel.mc_head);
857 fhp->vnext = 0;
858 fhp->vprev = vp->vlast;
859 if (vp->vlast) {
860 vp->vlast->vnext = fhp;
861 } else {
862 vp->vfirst = fhp;
863 }
864 vp->vlast = fhp;
865 fhp->vhead = vp;
866 } while (0); pvr2_context_exit(vp->channel.mc_head);
867
868 fhp->file = file;
869 file->private_data = fhp;
870 v4l2_prio_open(&vp->prio,&fhp->prio);
871
872 fhp->fw_mode_flag = pvr2_hdw_cpufw_get_enabled(hdw);
873
874 return 0;
875}
876
877
878static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp)
879{
880 wake_up(&fhp->wait_data);
881}
882
883static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
884{
885 int ret;
886 struct pvr2_stream *sp;
887 struct pvr2_hdw *hdw;
888 if (fh->rhp) return 0;
889
890 /* First read() attempt. Try to claim the stream and start
891 it... */
892 if ((ret = pvr2_channel_claim_stream(&fh->channel,
893 fh->dev_info->stream)) != 0) {
894 /* Someone else must already have it */
895 return ret;
896 }
897
898 fh->rhp = pvr2_channel_create_mpeg_stream(fh->dev_info->stream);
899 if (!fh->rhp) {
900 pvr2_channel_claim_stream(&fh->channel,0);
901 return -ENOMEM;
902 }
903
904 hdw = fh->channel.mc_head->hdw;
905 sp = fh->dev_info->stream->stream;
906 pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh);
907 pvr2_hdw_set_stream_type(hdw,fh->dev_info->config);
908 pvr2_hdw_set_streaming(hdw,!0);
909 ret = pvr2_ioread_set_enabled(fh->rhp,!0);
910
911 return ret;
912}
913
914
915static ssize_t pvr2_v4l2_read(struct file *file,
916 char __user *buff, size_t count, loff_t *ppos)
917{
918 struct pvr2_v4l2_fh *fh = file->private_data;
919 int ret;
920
921 if (fh->fw_mode_flag) {
922 struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
923 char *tbuf;
924 int c1,c2;
925 int tcnt = 0;
926 unsigned int offs = *ppos;
927
928 tbuf = kmalloc(PAGE_SIZE,GFP_KERNEL);
929 if (!tbuf) return -ENOMEM;
930
931 while (count) {
932 c1 = count;
933 if (c1 > PAGE_SIZE) c1 = PAGE_SIZE;
934 c2 = pvr2_hdw_cpufw_get(hdw,offs,tbuf,c1);
935 if (c2 < 0) {
936 tcnt = c2;
937 break;
938 }
939 if (!c2) break;
940 if (copy_to_user(buff,tbuf,c2)) {
941 tcnt = -EFAULT;
942 break;
943 }
944 offs += c2;
945 tcnt += c2;
946 buff += c2;
947 count -= c2;
948 *ppos += c2;
949 }
950 kfree(tbuf);
951 return tcnt;
952 }
953
954 if (!fh->rhp) {
955 ret = pvr2_v4l2_iosetup(fh);
956 if (ret) {
957 return ret;
958 }
959 }
960
961 for (;;) {
962 ret = pvr2_ioread_read(fh->rhp,buff,count);
963 if (ret >= 0) break;
964 if (ret != -EAGAIN) break;
965 if (file->f_flags & O_NONBLOCK) break;
966 /* Doing blocking I/O. Wait here. */
967 ret = wait_event_interruptible(
968 fh->wait_data,
969 pvr2_ioread_avail(fh->rhp) >= 0);
970 if (ret < 0) break;
971 }
972
973 return ret;
974}
975
976
977static unsigned int pvr2_v4l2_poll(struct file *file, poll_table *wait)
978{
979 unsigned int mask = 0;
980 struct pvr2_v4l2_fh *fh = file->private_data;
981 int ret;
982
983 if (fh->fw_mode_flag) {
984 mask |= POLLIN | POLLRDNORM;
985 return mask;
986 }
987
988 if (!fh->rhp) {
989 ret = pvr2_v4l2_iosetup(fh);
990 if (ret) return POLLERR;
991 }
992
993 poll_wait(file,&fh->wait_data,wait);
994
995 if (pvr2_ioread_avail(fh->rhp) >= 0) {
996 mask |= POLLIN | POLLRDNORM;
997 }
998
999 return mask;
1000}
1001
1002
1003static struct file_operations vdev_fops = {
1004 .owner = THIS_MODULE,
1005 .open = pvr2_v4l2_open,
1006 .release = pvr2_v4l2_release,
1007 .read = pvr2_v4l2_read,
1008 .ioctl = pvr2_v4l2_ioctl,
1009 .llseek = no_llseek,
1010 .poll = pvr2_v4l2_poll,
1011};
1012
1013
1014#define VID_HARDWARE_PVRUSB2 38 /* FIXME : need a good value */
1015
1016static struct video_device vdev_template = {
1017 .owner = THIS_MODULE,
1018 .type = VID_TYPE_CAPTURE | VID_TYPE_TUNER,
1019 .type2 = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE
1020 | V4L2_CAP_TUNER | V4L2_CAP_AUDIO
1021 | V4L2_CAP_READWRITE),
1022 .hardware = VID_HARDWARE_PVRUSB2,
1023 .fops = &vdev_fops,
1024};
1025
1026
1027static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
1028 struct pvr2_v4l2 *vp,
1029 enum pvr2_config cfg)
1030{
1031 int mindevnum;
1032 int unit_number;
1033 int v4l_type;
1034 dip->v4lp = vp;
1035 dip->config = cfg;
1036
1037
1038 switch (cfg) {
1039 case pvr2_config_mpeg:
1040 v4l_type = VFL_TYPE_GRABBER;
1041 dip->stream = &vp->channel.mc_head->video_stream;
1042 break;
1043 case pvr2_config_vbi:
1044 v4l_type = VFL_TYPE_VBI;
1045 break;
1046 case pvr2_config_radio:
1047 v4l_type = VFL_TYPE_RADIO;
1048 break;
1049 default:
1050 /* Bail out (this should be impossible) */
1051 err("Failed to set up pvrusb2 v4l dev"
1052 " due to unrecognized config");
1053 return;
1054 }
1055
1056 if (!dip->stream) {
1057 err("Failed to set up pvrusb2 v4l dev"
1058 " due to missing stream instance");
1059 return;
1060 }
1061
1062 dip->vdev = video_device_alloc();
1063 if (!dip->vdev) {
1064 err("Alloc of pvrusb2 v4l video device failed");
1065 return;
1066 }
1067
1068 memcpy(dip->vdev,&vdev_template,sizeof(vdev_template));
1069 dip->vdev->release = video_device_release;
1070 mutex_lock(&device_lock);
1071
1072 mindevnum = -1;
1073 unit_number = pvr2_hdw_get_unit_number(vp->channel.mc_head->hdw);
1074 if ((unit_number >= 0) && (unit_number < PVR_NUM)) {
1075 mindevnum = video_nr[unit_number];
1076 }
1077 if ((video_register_device(dip->vdev, v4l_type, mindevnum) < 0) &&
1078 (video_register_device(dip->vdev, v4l_type, -1) < 0)) {
1079 err("Failed to register pvrusb2 v4l video device");
1080 } else {
1081 pvr2_trace(PVR2_TRACE_INIT,
1082 "registered device video%d [%s]",
1083 dip->vdev->minor,pvr2_config_get_name(dip->config));
1084 }
1085
1086 if ((dip->vdev->minor < sizeof(devices)/sizeof(devices[0])) &&
1087 (devices[dip->vdev->minor] == NULL)) {
1088 dip->ctxt_idx = dip->vdev->minor;
1089 devices[dip->ctxt_idx] = dip;
1090 }
1091 mutex_unlock(&device_lock);
1092
1093 pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw,
1094 dip->vdev->minor);
1095}
1096
1097
1098struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *mnp)
1099{
1100 struct pvr2_v4l2 *vp;
1101
1102 vp = kmalloc(sizeof(*vp),GFP_KERNEL);
1103 if (!vp) return vp;
1104 memset(vp,0,sizeof(*vp));
1105 vp->video_dev.ctxt_idx = -1;
1106 pvr2_channel_init(&vp->channel,mnp);
1107 pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr2_v4l2 id=%p",vp);
1108
1109 vp->channel.check_func = pvr2_v4l2_internal_check;
1110
1111 /* register streams */
1112 pvr2_v4l2_dev_init(&vp->video_dev,vp,pvr2_config_mpeg);
1113
1114
1115 return vp;
1116}
1117
1118/*
1119 Stuff for Emacs to see, in order to encourage consistent editing style:
1120 *** Local Variables: ***
1121 *** mode: c ***
1122 *** fill-column: 75 ***
1123 *** tab-width: 8 ***
1124 *** c-basic-offset: 8 ***
1125 *** End: ***
1126 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.h b/drivers/media/video/pvrusb2/pvrusb2-v4l2.h
new file mode 100644
index 0000000000..9a995e2d22
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.h
@@ -0,0 +1,40 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#ifndef __PVRUSB2_V4L2_H
22#define __PVRUSB2_V4L2_H
23
24#include "pvrusb2-context.h"
25
26struct pvr2_v4l2;
27
28struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *);
29
30#endif /* __PVRUSB2_V4L2_H */
31
32/*
33 Stuff for Emacs to see, in order to encourage consistent editing style:
34 *** Local Variables: ***
35 *** mode: c ***
36 *** fill-column: 75 ***
37 *** tab-width: 8 ***
38 *** c-basic-offset: 8 ***
39 *** End: ***
40 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
new file mode 100644
index 0000000000..e4ec7f2519
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
@@ -0,0 +1,253 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23/*
24
25 This source file is specifically designed to interface with the
26 saa711x support that is available in the v4l available starting
27 with linux 2.6.15.
28
29*/
30
31#include "pvrusb2-video-v4l.h"
32#include "pvrusb2-i2c-cmd-v4l2.h"
33
34
35#include "pvrusb2-hdw-internal.h"
36#include "pvrusb2-debug.h"
37#include <linux/videodev2.h>
38#include <media/v4l2-common.h>
39#include <media/saa7115.h>
40#include <linux/errno.h>
41#include <linux/slab.h>
42
43struct pvr2_v4l_decoder {
44 struct pvr2_i2c_handler handler;
45 struct pvr2_decoder_ctrl ctrl;
46 struct pvr2_i2c_client *client;
47 struct pvr2_hdw *hdw;
48 unsigned long stale_mask;
49};
50
51
52static void set_input(struct pvr2_v4l_decoder *ctxt)
53{
54 struct pvr2_hdw *hdw = ctxt->hdw;
55 struct v4l2_routing route;
56
57 pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_input(%d)",hdw->input_val);
58 switch(hdw->input_val) {
59 case PVR2_CVAL_INPUT_TV:
60 route.input = SAA7115_COMPOSITE4;
61 break;
62 case PVR2_CVAL_INPUT_COMPOSITE:
63 route.input = SAA7115_COMPOSITE5;
64 break;
65 case PVR2_CVAL_INPUT_SVIDEO:
66 route.input = SAA7115_SVIDEO2;
67 break;
68 case PVR2_CVAL_INPUT_RADIO:
69 // ????? No idea yet what to do here
70 default:
71 return;
72 }
73 route.output = 0;
74 pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_VIDEO_ROUTING,&route);
75}
76
77
78static int check_input(struct pvr2_v4l_decoder *ctxt)
79{
80 struct pvr2_hdw *hdw = ctxt->hdw;
81 return hdw->input_dirty != 0;
82}
83
84
85static void set_audio(struct pvr2_v4l_decoder *ctxt)
86{
87 u32 val;
88 struct pvr2_hdw *hdw = ctxt->hdw;
89
90 pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_audio %d",
91 hdw->srate_val);
92 switch (hdw->srate_val) {
93 default:
94 case V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000:
95 val = 48000;
96 break;
97 case V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100:
98 val = 44100;
99 break;
100 case V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000:
101 val = 32000;
102 break;
103 }
104 pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_AUDIO_CLOCK_FREQ,&val);
105}
106
107
108static int check_audio(struct pvr2_v4l_decoder *ctxt)
109{
110 struct pvr2_hdw *hdw = ctxt->hdw;
111 return hdw->srate_dirty != 0;
112}
113
114
115struct pvr2_v4l_decoder_ops {
116 void (*update)(struct pvr2_v4l_decoder *);
117 int (*check)(struct pvr2_v4l_decoder *);
118};
119
120
121static const struct pvr2_v4l_decoder_ops decoder_ops[] = {
122 { .update = set_input, .check = check_input},
123 { .update = set_audio, .check = check_audio},
124};
125
126
127static void decoder_detach(struct pvr2_v4l_decoder *ctxt)
128{
129 ctxt->client->handler = 0;
130 ctxt->hdw->decoder_ctrl = 0;
131 kfree(ctxt);
132}
133
134
135static int decoder_check(struct pvr2_v4l_decoder *ctxt)
136{
137 unsigned long msk;
138 unsigned int idx;
139
140 for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]);
141 idx++) {
142 msk = 1 << idx;
143 if (ctxt->stale_mask & msk) continue;
144 if (decoder_ops[idx].check(ctxt)) {
145 ctxt->stale_mask |= msk;
146 }
147 }
148 return ctxt->stale_mask != 0;
149}
150
151
152static void decoder_update(struct pvr2_v4l_decoder *ctxt)
153{
154 unsigned long msk;
155 unsigned int idx;
156
157 for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]);
158 idx++) {
159 msk = 1 << idx;
160 if (!(ctxt->stale_mask & msk)) continue;
161 ctxt->stale_mask &= ~msk;
162 decoder_ops[idx].update(ctxt);
163 }
164}
165
166
167static int decoder_detect(struct pvr2_i2c_client *cp)
168{
169 /* Attempt to query the decoder - let's see if it will answer */
170 struct v4l2_tuner vt;
171 int ret;
172
173 memset(&vt,0,sizeof(vt));
174 ret = pvr2_i2c_client_cmd(cp,VIDIOC_G_TUNER,&vt);
175 return ret == 0; /* Return true if it answered */
176}
177
178
179static void decoder_enable(struct pvr2_v4l_decoder *ctxt,int fl)
180{
181 pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 decoder_enable(%d)",fl);
182 pvr2_v4l2_cmd_stream(ctxt->client,fl);
183}
184
185
186static int decoder_is_tuned(struct pvr2_v4l_decoder *ctxt)
187{
188 struct v4l2_tuner vt;
189 int ret;
190
191 memset(&vt,0,sizeof(vt));
192 ret = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_G_TUNER,&vt);
193 if (ret < 0) return -EINVAL;
194 return vt.signal ? 1 : 0;
195}
196
197
198static unsigned int decoder_describe(struct pvr2_v4l_decoder *ctxt,char *buf,unsigned int cnt)
199{
200 return scnprintf(buf,cnt,"handler: pvrusb2-video-v4l");
201}
202
203
204const static struct pvr2_i2c_handler_functions hfuncs = {
205 .detach = (void (*)(void *))decoder_detach,
206 .check = (int (*)(void *))decoder_check,
207 .update = (void (*)(void *))decoder_update,
208 .describe = (unsigned int (*)(void *,char *,unsigned int))decoder_describe,
209};
210
211
212int pvr2_i2c_decoder_v4l_setup(struct pvr2_hdw *hdw,
213 struct pvr2_i2c_client *cp)
214{
215 struct pvr2_v4l_decoder *ctxt;
216
217 if (hdw->decoder_ctrl) return 0;
218 if (cp->handler) return 0;
219 if (!decoder_detect(cp)) return 0;
220
221 ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
222 if (!ctxt) return 0;
223 memset(ctxt,0,sizeof(*ctxt));
224
225 ctxt->handler.func_data = ctxt;
226 ctxt->handler.func_table = &hfuncs;
227 ctxt->ctrl.ctxt = ctxt;
228 ctxt->ctrl.detach = (void (*)(void *))decoder_detach;
229 ctxt->ctrl.enable = (void (*)(void *,int))decoder_enable;
230 ctxt->ctrl.tuned = (int (*)(void *))decoder_is_tuned;
231 ctxt->client = cp;
232 ctxt->hdw = hdw;
233 ctxt->stale_mask = (1 << (sizeof(decoder_ops)/
234 sizeof(decoder_ops[0]))) - 1;
235 hdw->decoder_ctrl = &ctxt->ctrl;
236 cp->handler = &ctxt->handler;
237 pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x saa711x V4L2 handler set up",
238 cp->client->addr);
239 return !0;
240}
241
242
243
244
245/*
246 Stuff for Emacs to see, in order to encourage consistent editing style:
247 *** Local Variables: ***
248 *** mode: c ***
249 *** fill-column: 70 ***
250 *** tab-width: 8 ***
251 *** c-basic-offset: 8 ***
252 *** End: ***
253 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.h b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.h
new file mode 100644
index 0000000000..2b917fda02
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.h
@@ -0,0 +1,52 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef __PVRUSB2_VIDEO_V4L_H
24#define __PVRUSB2_VIDEO_V4L_H
25
26/*
27
28 This module connects the pvrusb2 driver to the I2C chip level
29 driver which handles device video processing. This interface is
30 used internally by the driver; higher level code should only
31 interact through the interface provided by pvrusb2-hdw.h.
32
33*/
34
35
36
37#include "pvrusb2-i2c-core.h"
38
39int pvr2_i2c_decoder_v4l_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
40
41
42#endif /* __PVRUSB2_VIDEO_V4L_H */
43
44/*
45 Stuff for Emacs to see, in order to encourage consistent editing style:
46 *** Local Variables: ***
47 *** mode: c ***
48 *** fill-column: 70 ***
49 *** tab-width: 8 ***
50 *** c-basic-offset: 8 ***
51 *** End: ***
52 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-wm8775.c b/drivers/media/video/pvrusb2/pvrusb2-wm8775.c
new file mode 100644
index 0000000000..fcad346e39
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-wm8775.c
@@ -0,0 +1,170 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23/*
24
25 This source file is specifically designed to interface with the
26 wm8775.
27
28*/
29
30#include "pvrusb2-wm8775.h"
31#include "pvrusb2-i2c-cmd-v4l2.h"
32
33
34#include "pvrusb2-hdw-internal.h"
35#include "pvrusb2-debug.h"
36#include <linux/videodev2.h>
37#include <media/v4l2-common.h>
38#include <linux/errno.h>
39#include <linux/slab.h>
40
41struct pvr2_v4l_wm8775 {
42 struct pvr2_i2c_handler handler;
43 struct pvr2_i2c_client *client;
44 struct pvr2_hdw *hdw;
45 unsigned long stale_mask;
46};
47
48
49static void set_input(struct pvr2_v4l_wm8775 *ctxt)
50{
51 struct v4l2_routing route;
52 struct pvr2_hdw *hdw = ctxt->hdw;
53 int msk = 0;
54
55 memset(&route,0,sizeof(route));
56
57 pvr2_trace(PVR2_TRACE_CHIPS,"i2c wm8775 set_input(val=%d msk=0x%x)",
58 hdw->input_val,msk);
59
60 // Always point to input #1 no matter what
61 route.input = 2;
62 pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_AUDIO_ROUTING,&route);
63}
64
65static int check_input(struct pvr2_v4l_wm8775 *ctxt)
66{
67 struct pvr2_hdw *hdw = ctxt->hdw;
68 return hdw->input_dirty != 0;
69}
70
71
72struct pvr2_v4l_wm8775_ops {
73 void (*update)(struct pvr2_v4l_wm8775 *);
74 int (*check)(struct pvr2_v4l_wm8775 *);
75};
76
77
78static const struct pvr2_v4l_wm8775_ops wm8775_ops[] = {
79 { .update = set_input, .check = check_input},
80};
81
82
83static unsigned int wm8775_describe(struct pvr2_v4l_wm8775 *ctxt,
84 char *buf,unsigned int cnt)
85{
86 return scnprintf(buf,cnt,"handler: pvrusb2-wm8775");
87}
88
89
90static void wm8775_detach(struct pvr2_v4l_wm8775 *ctxt)
91{
92 ctxt->client->handler = 0;
93 kfree(ctxt);
94}
95
96
97static int wm8775_check(struct pvr2_v4l_wm8775 *ctxt)
98{
99 unsigned long msk;
100 unsigned int idx;
101
102 for (idx = 0; idx < sizeof(wm8775_ops)/sizeof(wm8775_ops[0]);
103 idx++) {
104 msk = 1 << idx;
105 if (ctxt->stale_mask & msk) continue;
106 if (wm8775_ops[idx].check(ctxt)) {
107 ctxt->stale_mask |= msk;
108 }
109 }
110 return ctxt->stale_mask != 0;
111}
112
113
114static void wm8775_update(struct pvr2_v4l_wm8775 *ctxt)
115{
116 unsigned long msk;
117 unsigned int idx;
118
119 for (idx = 0; idx < sizeof(wm8775_ops)/sizeof(wm8775_ops[0]);
120 idx++) {
121 msk = 1 << idx;
122 if (!(ctxt->stale_mask & msk)) continue;
123 ctxt->stale_mask &= ~msk;
124 wm8775_ops[idx].update(ctxt);
125 }
126}
127
128
129const static struct pvr2_i2c_handler_functions hfuncs = {
130 .detach = (void (*)(void *))wm8775_detach,
131 .check = (int (*)(void *))wm8775_check,
132 .update = (void (*)(void *))wm8775_update,
133 .describe = (unsigned int (*)(void *,char *,unsigned int))wm8775_describe,
134};
135
136
137int pvr2_i2c_wm8775_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp)
138{
139 struct pvr2_v4l_wm8775 *ctxt;
140
141 if (cp->handler) return 0;
142
143 ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
144 if (!ctxt) return 0;
145 memset(ctxt,0,sizeof(*ctxt));
146
147 ctxt->handler.func_data = ctxt;
148 ctxt->handler.func_table = &hfuncs;
149 ctxt->client = cp;
150 ctxt->hdw = hdw;
151 ctxt->stale_mask = (1 << (sizeof(wm8775_ops)/
152 sizeof(wm8775_ops[0]))) - 1;
153 cp->handler = &ctxt->handler;
154 pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x wm8775 V4L2 handler set up",
155 cp->client->addr);
156 return !0;
157}
158
159
160
161
162/*
163 Stuff for Emacs to see, in order to encourage consistent editing style:
164 *** Local Variables: ***
165 *** mode: c ***
166 *** fill-column: 70 ***
167 *** tab-width: 8 ***
168 *** c-basic-offset: 8 ***
169 *** End: ***
170 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-wm8775.h b/drivers/media/video/pvrusb2/pvrusb2-wm8775.h
new file mode 100644
index 0000000000..8aaeff4e1e
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-wm8775.h
@@ -0,0 +1,53 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef __PVRUSB2_WM8775_H
24#define __PVRUSB2_WM8775_H
25
26/*
27
28 This module connects the pvrusb2 driver to the I2C chip level
29 driver which performs analog -> digital audio conversion for
30 external audio inputs. This interface is used internally by the
31 driver; higher level code should only interact through the
32 interface provided by pvrusb2-hdw.h.
33
34*/
35
36
37
38#include "pvrusb2-i2c-core.h"
39
40int pvr2_i2c_wm8775_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
41
42
43#endif /* __PVRUSB2_WM8775_H */
44
45/*
46 Stuff for Emacs to see, in order to encourage consistent editing style:
47 *** Local Variables: ***
48 *** mode: c ***
49 *** fill-column: 70 ***
50 *** tab-width: 8 ***
51 *** c-basic-offset: 8 ***
52 *** End: ***
53 */
diff --git a/drivers/media/video/pvrusb2/pvrusb2.h b/drivers/media/video/pvrusb2/pvrusb2.h
new file mode 100644
index 0000000000..074533e9c2
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2.h
@@ -0,0 +1,43 @@
1/*
2 *
3 * $Id$
4 *
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
6 * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef __PVRUSB2_H
24#define __PVRUSB2_H
25
26/* Maximum number of pvrusb2 instances we can track at once. You
27 might want to increase this - however the driver operation will not
28 be impaired if it is too small. Instead additional units just
29 won't have an ID assigned and it might not be possible to specify
30 module paramters for those extra units. */
31#define PVR_NUM 20
32
33#endif /* __PVRUSB2_H */
34
35/*
36 Stuff for Emacs to see, in order to encourage consistent editing style:
37 *** Local Variables: ***
38 *** mode: c ***
39 *** fill-column: 70 ***
40 *** tab-width: 8 ***
41 *** c-basic-offset: 8 ***
42 *** End: ***
43 */
diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
index de7b9e6e93..afc8f352b8 100644
--- a/drivers/media/video/saa7134/saa6752hs.c
+++ b/drivers/media/video/saa7134/saa6752hs.c
@@ -432,10 +432,10 @@ static void saa6752hs_old_set_params(struct i2c_client* client,
432} 432}
433 433
434static int handle_ctrl(struct saa6752hs_mpeg_params *params, 434static int handle_ctrl(struct saa6752hs_mpeg_params *params,
435 struct v4l2_ext_control *ctrl, int cmd) 435 struct v4l2_ext_control *ctrl, unsigned int cmd)
436{ 436{
437 int old = 0, new; 437 int old = 0, new;
438 int set = cmd == VIDIOC_S_EXT_CTRLS; 438 int set = (cmd == VIDIOC_S_EXT_CTRLS);
439 439
440 new = ctrl->value; 440 new = ctrl->value;
441 switch (ctrl->id) { 441 switch (ctrl->id) {
diff --git a/drivers/media/video/stradis.c b/drivers/media/video/stradis.c
index 6be9c1131e..c18b31d992 100644
--- a/drivers/media/video/stradis.c
+++ b/drivers/media/video/stradis.c
@@ -2190,7 +2190,7 @@ static struct pci_driver stradis_driver = {
2190 .remove = __devexit_p(stradis_remove) 2190 .remove = __devexit_p(stradis_remove)
2191}; 2191};
2192 2192
2193int __init stradis_init(void) 2193static int __init stradis_init(void)
2194{ 2194{
2195 int retval; 2195 int retval;
2196 2196
@@ -2203,7 +2203,7 @@ int __init stradis_init(void)
2203 return retval; 2203 return retval;
2204} 2204}
2205 2205
2206void __exit stradis_exit(void) 2206static void __exit stradis_exit(void)
2207{ 2207{
2208 pci_unregister_driver(&stradis_driver); 2208 pci_unregister_driver(&stradis_driver);
2209 printk(KERN_INFO "stradis: module cleanup complete\n"); 2209 printk(KERN_INFO "stradis: module cleanup complete\n");
diff --git a/drivers/media/video/tda9887.c b/drivers/media/video/tda9887.c
index b6ae969563..2fadabf996 100644
--- a/drivers/media/video/tda9887.c
+++ b/drivers/media/video/tda9887.c
@@ -22,11 +22,11 @@
22*/ 22*/
23 23
24#define tda9887_info(fmt, arg...) do {\ 24#define tda9887_info(fmt, arg...) do {\
25 printk(KERN_INFO "%s %d-%04x (tda9887): " fmt, t->i2c.name, \ 25 printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.name, \
26 i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0) 26 i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
27#define tda9887_dbg(fmt, arg...) do {\ 27#define tda9887_dbg(fmt, arg...) do {\
28 if (tuner_debug) \ 28 if (tuner_debug) \
29 printk(KERN_INFO "%s %d-%04x (tda9887): " fmt, t->i2c.name, \ 29 printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.name, \
30 i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0) 30 i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
31 31
32 32
@@ -84,8 +84,7 @@ struct tvnorm {
84#define cAudioGain6 0x80 // bit c7 84#define cAudioGain6 0x80 // bit c7
85 85
86#define cTopMask 0x1f // bit c0:4 86#define cTopMask 0x1f // bit c0:4
87#define cTopPalSecamDefault 0x14 // bit c0:4 87#define cTopDefault 0x10 // bit c0:4
88#define cTopNtscRadioDefault 0x10 // bit c0:4
89 88
90//// third reg (e) 89//// third reg (e)
91#define cAudioIF_4_5 0x00 // bit e0:1 90#define cAudioIF_4_5 0x00 // bit e0:1
@@ -123,7 +122,7 @@ static struct tvnorm tvnorms[] = {
123 cQSS ), 122 cQSS ),
124 .c = ( cDeemphasisON | 123 .c = ( cDeemphasisON |
125 cDeemphasis50 | 124 cDeemphasis50 |
126 cTopPalSecamDefault), 125 cTopDefault),
127 .e = ( cGating_36 | 126 .e = ( cGating_36 |
128 cAudioIF_5_5 | 127 cAudioIF_5_5 |
129 cVideoIF_38_90 ), 128 cVideoIF_38_90 ),
@@ -134,7 +133,7 @@ static struct tvnorm tvnorms[] = {
134 cQSS ), 133 cQSS ),
135 .c = ( cDeemphasisON | 134 .c = ( cDeemphasisON |
136 cDeemphasis50 | 135 cDeemphasis50 |
137 cTopPalSecamDefault), 136 cTopDefault),
138 .e = ( cGating_36 | 137 .e = ( cGating_36 |
139 cAudioIF_6_0 | 138 cAudioIF_6_0 |
140 cVideoIF_38_90 ), 139 cVideoIF_38_90 ),
@@ -145,7 +144,7 @@ static struct tvnorm tvnorms[] = {
145 cQSS ), 144 cQSS ),
146 .c = ( cDeemphasisON | 145 .c = ( cDeemphasisON |
147 cDeemphasis50 | 146 cDeemphasis50 |
148 cTopPalSecamDefault), 147 cTopDefault),
149 .e = ( cGating_36 | 148 .e = ( cGating_36 |
150 cAudioIF_6_5 | 149 cAudioIF_6_5 |
151 cVideoIF_38_90 ), 150 cVideoIF_38_90 ),
@@ -156,7 +155,7 @@ static struct tvnorm tvnorms[] = {
156 cQSS ), 155 cQSS ),
157 .c = ( cDeemphasisON | 156 .c = ( cDeemphasisON |
158 cDeemphasis75 | 157 cDeemphasis75 |
159 cTopNtscRadioDefault), 158 cTopDefault),
160 .e = ( cGating_36 | 159 .e = ( cGating_36 |
161 cAudioIF_4_5 | 160 cAudioIF_4_5 |
162 cVideoIF_45_75 ), 161 cVideoIF_45_75 ),
@@ -165,7 +164,7 @@ static struct tvnorm tvnorms[] = {
165 .name = "SECAM-BGH", 164 .name = "SECAM-BGH",
166 .b = ( cPositiveAmTV | 165 .b = ( cPositiveAmTV |
167 cQSS ), 166 cQSS ),
168 .c = ( cTopPalSecamDefault), 167 .c = ( cTopDefault),
169 .e = ( cGating_36 | 168 .e = ( cGating_36 |
170 cAudioIF_5_5 | 169 cAudioIF_5_5 |
171 cVideoIF_38_90 ), 170 cVideoIF_38_90 ),
@@ -174,7 +173,7 @@ static struct tvnorm tvnorms[] = {
174 .name = "SECAM-L", 173 .name = "SECAM-L",
175 .b = ( cPositiveAmTV | 174 .b = ( cPositiveAmTV |
176 cQSS ), 175 cQSS ),
177 .c = ( cTopPalSecamDefault), 176 .c = ( cTopDefault),
178 .e = ( cGating_36 | 177 .e = ( cGating_36 |
179 cAudioIF_6_5 | 178 cAudioIF_6_5 |
180 cVideoIF_38_90 ), 179 cVideoIF_38_90 ),
@@ -184,7 +183,7 @@ static struct tvnorm tvnorms[] = {
184 .b = ( cOutputPort2Inactive | 183 .b = ( cOutputPort2Inactive |
185 cPositiveAmTV | 184 cPositiveAmTV |
186 cQSS ), 185 cQSS ),
187 .c = ( cTopPalSecamDefault), 186 .c = ( cTopDefault),
188 .e = ( cGating_36 | 187 .e = ( cGating_36 |
189 cAudioIF_6_5 | 188 cAudioIF_6_5 |
190 cVideoIF_33_90 ), 189 cVideoIF_33_90 ),
@@ -195,7 +194,7 @@ static struct tvnorm tvnorms[] = {
195 cQSS ), 194 cQSS ),
196 .c = ( cDeemphasisON | 195 .c = ( cDeemphasisON |
197 cDeemphasis50 | 196 cDeemphasis50 |
198 cTopPalSecamDefault), 197 cTopDefault),
199 .e = ( cGating_36 | 198 .e = ( cGating_36 |
200 cAudioIF_6_5 | 199 cAudioIF_6_5 |
201 cVideoIF_38_90 ), 200 cVideoIF_38_90 ),
@@ -206,7 +205,7 @@ static struct tvnorm tvnorms[] = {
206 cQSS ), 205 cQSS ),
207 .c = ( cDeemphasisON | 206 .c = ( cDeemphasisON |
208 cDeemphasis75 | 207 cDeemphasis75 |
209 cTopNtscRadioDefault), 208 cTopDefault),
210 .e = ( cGating_36 | 209 .e = ( cGating_36 |
211 cAudioIF_4_5 | 210 cAudioIF_4_5 |
212 cVideoIF_45_75 ), 211 cVideoIF_45_75 ),
@@ -217,7 +216,7 @@ static struct tvnorm tvnorms[] = {
217 cQSS ), 216 cQSS ),
218 .c = ( cDeemphasisON | 217 .c = ( cDeemphasisON |
219 cDeemphasis50 | 218 cDeemphasis50 |
220 cTopNtscRadioDefault), 219 cTopDefault),
221 .e = ( cGating_36 | 220 .e = ( cGating_36 |
222 cAudioIF_4_5 | 221 cAudioIF_4_5 |
223 cVideoIF_58_75 ), 222 cVideoIF_58_75 ),
@@ -230,7 +229,7 @@ static struct tvnorm radio_stereo = {
230 cQSS ), 229 cQSS ),
231 .c = ( cDeemphasisOFF | 230 .c = ( cDeemphasisOFF |
232 cAudioGain6 | 231 cAudioGain6 |
233 cTopNtscRadioDefault), 232 cTopDefault),
234 .e = ( cTunerGainLow | 233 .e = ( cTunerGainLow |
235 cAudioIF_5_5 | 234 cAudioIF_5_5 |
236 cRadioIF_38_90 ), 235 cRadioIF_38_90 ),
@@ -242,7 +241,7 @@ static struct tvnorm radio_mono = {
242 cQSS ), 241 cQSS ),
243 .c = ( cDeemphasisON | 242 .c = ( cDeemphasisON |
244 cDeemphasis75 | 243 cDeemphasis75 |
245 cTopNtscRadioDefault), 244 cTopDefault),
246 .e = ( cTunerGainLow | 245 .e = ( cTunerGainLow |
247 cAudioIF_5_5 | 246 cAudioIF_5_5 |
248 cRadioIF_38_90 ), 247 cRadioIF_38_90 ),
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index e95792fd70..011413cf34 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -40,7 +40,6 @@ static unsigned int no_autodetect = 0;
40static unsigned int show_i2c = 0; 40static unsigned int show_i2c = 0;
41 41
42/* insmod options used at runtime => read/write */ 42/* insmod options used at runtime => read/write */
43static unsigned int tuner_debug_old = 0;
44int tuner_debug = 0; 43int tuner_debug = 0;
45 44
46static unsigned int tv_range[2] = { 44, 958 }; 45static unsigned int tv_range[2] = { 44, 958 };
@@ -54,8 +53,6 @@ static char ntsc[] = "-";
54module_param(addr, int, 0444); 53module_param(addr, int, 0444);
55module_param(no_autodetect, int, 0444); 54module_param(no_autodetect, int, 0444);
56module_param(show_i2c, int, 0444); 55module_param(show_i2c, int, 0444);
57/* Note: tuner_debug is deprecated and will be removed in 2.6.17 */
58module_param_named(tuner_debug,tuner_debug_old, int, 0444);
59module_param_named(debug,tuner_debug, int, 0644); 56module_param_named(debug,tuner_debug, int, 0644);
60module_param_string(pal, pal, sizeof(pal), 0644); 57module_param_string(pal, pal, sizeof(pal), 0644);
61module_param_string(secam, secam, sizeof(secam), 0644); 58module_param_string(secam, secam, sizeof(secam), 0644);
@@ -442,11 +439,6 @@ static int tuner_attach(struct i2c_adapter *adap, int addr, int kind)
442 t->audmode = V4L2_TUNER_MODE_STEREO; 439 t->audmode = V4L2_TUNER_MODE_STEREO;
443 t->mode_mask = T_UNINITIALIZED; 440 t->mode_mask = T_UNINITIALIZED;
444 t->tuner_status = tuner_status; 441 t->tuner_status = tuner_status;
445 if (tuner_debug_old) {
446 tuner_debug = tuner_debug_old;
447 printk(KERN_ERR "tuner: tuner_debug is deprecated and will be removed in 2.6.17.\n");
448 printk(KERN_ERR "tuner: use the debug option instead.\n");
449 }
450 442
451 if (show_i2c) { 443 if (show_i2c) {
452 unsigned char buffer[16]; 444 unsigned char buffer[16];
@@ -730,14 +722,10 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
730 { 722 {
731 struct v4l2_frequency *f = arg; 723 struct v4l2_frequency *f = arg;
732 724
725 if (set_mode (client, t, f->type, "VIDIOC_S_FREQUENCY")
726 == EINVAL)
727 return 0;
733 switch_v4l2(); 728 switch_v4l2();
734 if ((V4L2_TUNER_RADIO == f->type && V4L2_TUNER_RADIO != t->mode)
735 || (V4L2_TUNER_DIGITAL_TV == f->type
736 && V4L2_TUNER_DIGITAL_TV != t->mode)) {
737 if (set_mode (client, t, f->type, "VIDIOC_S_FREQUENCY")
738 == EINVAL)
739 return 0;
740 }
741 set_freq(client,f->frequency); 729 set_freq(client,f->frequency);
742 730
743 break; 731 break;
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index 3f3182a24d..56e01b6224 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -33,7 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/input.h> 35#include <linux/input.h>
36#include <linux/usb_input.h> 36#include <linux/usb/input.h>
37 37
38#include "usbvideo.h" 38#include "usbvideo.h"
39#include "quickcam_messenger.h" 39#include "quickcam_messenger.h"
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 14e5234713..97f946db85 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -1101,6 +1101,11 @@ const char **v4l2_ctrl_get_menu(u32 id)
1101 "MPEG-2 SVCD-compatible Stream", 1101 "MPEG-2 SVCD-compatible Stream",
1102 NULL 1102 NULL
1103 }; 1103 };
1104 static const char *mpeg_stream_vbi_fmt[] = {
1105 "No VBI",
1106 "Private packet, IVTV format",
1107 NULL
1108 };
1104 1109
1105 switch (id) { 1110 switch (id) {
1106 case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: 1111 case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
@@ -1129,6 +1134,8 @@ const char **v4l2_ctrl_get_menu(u32 id)
1129 return mpeg_video_bitrate_mode; 1134 return mpeg_video_bitrate_mode;
1130 case V4L2_CID_MPEG_STREAM_TYPE: 1135 case V4L2_CID_MPEG_STREAM_TYPE:
1131 return mpeg_stream_type; 1136 return mpeg_stream_type;
1137 case V4L2_CID_MPEG_STREAM_VBI_FMT:
1138 return mpeg_stream_vbi_fmt;
1132 default: 1139 default:
1133 return NULL; 1140 return NULL;
1134 } 1141 }
@@ -1182,6 +1189,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
1182 case V4L2_CID_MPEG_STREAM_PID_PCR: name = "Stream PCR Program ID"; break; 1189 case V4L2_CID_MPEG_STREAM_PID_PCR: name = "Stream PCR Program ID"; break;
1183 case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: name = "Stream PES Audio ID"; break; 1190 case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: name = "Stream PES Audio ID"; break;
1184 case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: name = "Stream PES Video ID"; break; 1191 case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: name = "Stream PES Video ID"; break;
1192 case V4L2_CID_MPEG_STREAM_VBI_FMT: name = "Stream VBI Format"; break;
1185 1193
1186 default: 1194 default:
1187 return -EINVAL; 1195 return -EINVAL;
@@ -1208,6 +1216,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
1208 case V4L2_CID_MPEG_VIDEO_ASPECT: 1216 case V4L2_CID_MPEG_VIDEO_ASPECT:
1209 case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: 1217 case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
1210 case V4L2_CID_MPEG_STREAM_TYPE: 1218 case V4L2_CID_MPEG_STREAM_TYPE:
1219 case V4L2_CID_MPEG_STREAM_VBI_FMT:
1211 qctrl->type = V4L2_CTRL_TYPE_MENU; 1220 qctrl->type = V4L2_CTRL_TYPE_MENU;
1212 step = 1; 1221 step = 1;
1213 break; 1222 break;
@@ -1367,6 +1376,11 @@ int v4l2_ctrl_query_fill_std(struct v4l2_queryctrl *qctrl)
1367 return v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 0); 1376 return v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 0);
1368 case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: 1377 case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO:
1369 return v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 0); 1378 return v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 0);
1379 case V4L2_CID_MPEG_STREAM_VBI_FMT:
1380 return v4l2_ctrl_query_fill(qctrl,
1381 V4L2_MPEG_STREAM_VBI_FMT_NONE,
1382 V4L2_MPEG_STREAM_VBI_FMT_IVTV, 1,
1383 V4L2_MPEG_STREAM_VBI_FMT_NONE);
1370 default: 1384 default:
1371 return -EINVAL; 1385 return -EINVAL;
1372 } 1386 }
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index d0141ccbb7..a8f2fa9854 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -305,10 +305,8 @@ mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, int ioc_port,
305 } 305 }
306 306
307 out: 307 out:
308 if (pp0_array) 308 kfree(pp0_array);
309 kfree(pp0_array); 309 kfree(p0_array);
310 if (p0_array)
311 kfree(p0_array);
312 return rc; 310 return rc;
313} 311}
314 312
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index febbdd4e06..c74e5460f8 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -1239,7 +1239,6 @@ EXPORT_SYMBOL(i2o_cntxt_list_remove);
1239EXPORT_SYMBOL(i2o_cntxt_list_get_ptr); 1239EXPORT_SYMBOL(i2o_cntxt_list_get_ptr);
1240#endif 1240#endif
1241EXPORT_SYMBOL(i2o_msg_get_wait); 1241EXPORT_SYMBOL(i2o_msg_get_wait);
1242EXPORT_SYMBOL(i2o_msg_nop);
1243EXPORT_SYMBOL(i2o_find_iop); 1242EXPORT_SYMBOL(i2o_find_iop);
1244EXPORT_SYMBOL(i2o_iop_find_device); 1243EXPORT_SYMBOL(i2o_iop_find_device);
1245EXPORT_SYMBOL(i2o_event_register); 1244EXPORT_SYMBOL(i2o_event_register);
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index aff83f9668..c8426a9bf2 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -420,8 +420,10 @@ static int ucb1x00_detect_irq(struct ucb1x00 *ucb)
420 unsigned long mask; 420 unsigned long mask;
421 421
422 mask = probe_irq_on(); 422 mask = probe_irq_on();
423 if (!mask) 423 if (!mask) {
424 probe_irq_off(mask);
424 return NO_IRQ; 425 return NO_IRQ;
426 }
425 427
426 /* 428 /*
427 * Enable the ADC interrupt. 429 * Enable the ADC interrupt.
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 1fdf03fd2d..9706cc1913 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -85,7 +85,7 @@ static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_devi
85 } 85 }
86 memset(sp, 0, sizeof(struct service_processor)); 86 memset(sp, 0, sizeof(struct service_processor));
87 87
88 sp->lock = SPIN_LOCK_UNLOCKED; 88 spin_lock_init(&sp->lock);
89 INIT_LIST_HEAD(&sp->command_queue); 89 INIT_LIST_HEAD(&sp->command_queue);
90 90
91 pci_set_drvdata(pdev, (void *)sp); 91 pci_set_drvdata(pdev, (void *)sp);
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5ac265dde4..1344ad7a4b 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -86,14 +86,14 @@ config MTD_REDBOOT_DIRECTORY_BLOCK
86 block and "-2" means the penultimate block. 86 block and "-2" means the penultimate block.
87 87
88config MTD_REDBOOT_PARTS_UNALLOCATED 88config MTD_REDBOOT_PARTS_UNALLOCATED
89 bool " Include unallocated flash regions" 89 bool "Include unallocated flash regions"
90 depends on MTD_REDBOOT_PARTS 90 depends on MTD_REDBOOT_PARTS
91 help 91 help
92 If you need to register each unallocated flash region as a MTD 92 If you need to register each unallocated flash region as a MTD
93 'partition', enable this option. 93 'partition', enable this option.
94 94
95config MTD_REDBOOT_PARTS_READONLY 95config MTD_REDBOOT_PARTS_READONLY
96 bool " Force read-only for RedBoot system images" 96 bool "Force read-only for RedBoot system images"
97 depends on MTD_REDBOOT_PARTS 97 depends on MTD_REDBOOT_PARTS
98 help 98 help
99 If you need to force read-only for 'RedBoot', 'RedBoot Config' and 99 If you need to force read-only for 'RedBoot', 'RedBoot Config' and
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 0d435814aa..39edb8250f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -357,6 +357,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
357 mtd->resume = cfi_intelext_resume; 357 mtd->resume = cfi_intelext_resume;
358 mtd->flags = MTD_CAP_NORFLASH; 358 mtd->flags = MTD_CAP_NORFLASH;
359 mtd->name = map->name; 359 mtd->name = map->name;
360 mtd->writesize = 1;
360 361
361 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; 362 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
362 363
diff --git a/drivers/mtd/chips/jedec.c b/drivers/mtd/chips/jedec.c
index c40b48dabe..2c3f019197 100644
--- a/drivers/mtd/chips/jedec.c
+++ b/drivers/mtd/chips/jedec.c
@@ -256,6 +256,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
256 MTD->name = map->name; 256 MTD->name = map->name;
257 MTD->type = MTD_NORFLASH; 257 MTD->type = MTD_NORFLASH;
258 MTD->flags = MTD_CAP_NORFLASH; 258 MTD->flags = MTD_CAP_NORFLASH;
259 MTD->writesize = 1;
259 MTD->erasesize = SectorSize*(map->buswidth); 260 MTD->erasesize = SectorSize*(map->buswidth);
260 // printk("MTD->erasesize is %x\n",(unsigned int)MTD->erasesize); 261 // printk("MTD->erasesize is %x\n",(unsigned int)MTD->erasesize);
261 MTD->size = priv->size; 262 MTD->size = priv->size;
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
index a611de9b15..ac01a949b6 100644
--- a/drivers/mtd/chips/map_absent.c
+++ b/drivers/mtd/chips/map_absent.c
@@ -64,7 +64,8 @@ static struct mtd_info *map_absent_probe(struct map_info *map)
64 mtd->write = map_absent_write; 64 mtd->write = map_absent_write;
65 mtd->sync = map_absent_sync; 65 mtd->sync = map_absent_sync;
66 mtd->flags = 0; 66 mtd->flags = 0;
67 mtd->erasesize = PAGE_SIZE; 67 mtd->erasesize = PAGE_SIZE;
68 mtd->writesize = 1;
68 69
69 __module_get(THIS_MODULE); 70 __module_get(THIS_MODULE);
70 return mtd; 71 return mtd;
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index 763925747d..3a66680abf 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -71,6 +71,7 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
71 mtd->write = mapram_write; 71 mtd->write = mapram_write;
72 mtd->sync = mapram_nop; 72 mtd->sync = mapram_nop;
73 mtd->flags = MTD_CAP_RAM; 73 mtd->flags = MTD_CAP_RAM;
74 mtd->writesize = 1;
74 75
75 mtd->erasesize = PAGE_SIZE; 76 mtd->erasesize = PAGE_SIZE;
76 while(mtd->size & (mtd->erasesize - 1)) 77 while(mtd->size & (mtd->erasesize - 1))
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index bc6ee9ef8a..1b328b1378 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -47,6 +47,7 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
47 mtd->sync = maprom_nop; 47 mtd->sync = maprom_nop;
48 mtd->flags = MTD_CAP_ROM; 48 mtd->flags = MTD_CAP_ROM;
49 mtd->erasesize = map->size; 49 mtd->erasesize = map->size;
50 mtd->writesize = 1;
50 51
51 __module_get(THIS_MODULE); 52 __module_get(THIS_MODULE);
52 return mtd; 53 return mtd;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 0d98c223c5..be3f1c136d 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -324,6 +324,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
324 324
325 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; 325 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
326 dev->mtd.erasesize = erase_size; 326 dev->mtd.erasesize = erase_size;
327 dev->mtd.writesize = 1;
327 dev->mtd.type = MTD_RAM; 328 dev->mtd.type = MTD_RAM;
328 dev->mtd.flags = MTD_CAP_RAM; 329 dev->mtd.flags = MTD_CAP_RAM;
329 dev->mtd.erase = block2mtd_erase; 330 dev->mtd.erase = block2mtd_erase;
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 4ab7670770..08dfb899b2 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -225,6 +225,7 @@ static int __init ms02nv_init_one(ulong addr)
225 mtd->owner = THIS_MODULE; 225 mtd->owner = THIS_MODULE;
226 mtd->read = ms02nv_read; 226 mtd->read = ms02nv_read;
227 mtd->write = ms02nv_write; 227 mtd->write = ms02nv_write;
228 mtd->writesize = 1;
228 229
229 ret = -EIO; 230 ret = -EIO;
230 if (add_mtd_device(mtd)) { 231 if (add_mtd_device(mtd)) {
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index a19480d078..04271d02b6 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -478,6 +478,7 @@ add_dataflash(struct spi_device *spi, char *name,
478 device->name = (pdata && pdata->name) ? pdata->name : priv->name; 478 device->name = (pdata && pdata->name) ? pdata->name : priv->name;
479 device->size = nr_pages * pagesize; 479 device->size = nr_pages * pagesize;
480 device->erasesize = pagesize; 480 device->erasesize = pagesize;
481 device->writesize = pagesize;
481 device->owner = THIS_MODULE; 482 device->owner = THIS_MODULE;
482 device->type = MTD_DATAFLASH; 483 device->type = MTD_DATAFLASH;
483 device->flags = MTD_CAP_NORFLASH; 484 device->flags = MTD_CAP_NORFLASH;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index e09e416667..6c7337f9eb 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -151,6 +151,7 @@ static int register_device(char *name, unsigned long start, unsigned long len)
151 new->mtd.owner = THIS_MODULE; 151 new->mtd.owner = THIS_MODULE;
152 new->mtd.type = MTD_RAM; 152 new->mtd.type = MTD_RAM;
153 new->mtd.erasesize = PAGE_SIZE; 153 new->mtd.erasesize = PAGE_SIZE;
154 new->mtd.writesize = 1;
154 155
155 ret = -EAGAIN; 156 ret = -EAGAIN;
156 if (add_mtd_device(&new->mtd)) { 157 if (add_mtd_device(&new->mtd)) {
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 666cce1bf6..f620d74f10 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -778,7 +778,8 @@ static int __init init_pmc551(void)
778 mtd->type = MTD_RAM; 778 mtd->type = MTD_RAM;
779 mtd->name = "PMC551 RAM board"; 779 mtd->name = "PMC551 RAM board";
780 mtd->erasesize = 0x10000; 780 mtd->erasesize = 0x10000;
781 mtd->owner = THIS_MODULE; 781 mtd->writesize = 1;
782 mtd->owner = THIS_MODULE;
782 783
783 if (add_mtd_device(mtd)) { 784 if (add_mtd_device(mtd)) {
784 printk(KERN_NOTICE "pmc551: Failed to register new device\n"); 785 printk(KERN_NOTICE "pmc551: Failed to register new device\n");
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index b3f665e3c3..542a0c0090 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -209,6 +209,7 @@ static int register_device(char *name, unsigned long start, unsigned long length
209 (*curmtd)->mtdinfo->owner = THIS_MODULE; 209 (*curmtd)->mtdinfo->owner = THIS_MODULE;
210 (*curmtd)->mtdinfo->type = MTD_RAM; 210 (*curmtd)->mtdinfo->type = MTD_RAM;
211 (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ; 211 (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
212 (*curmtd)->mtdinfo->writesize = 1;
212 213
213 if (add_mtd_device((*curmtd)->mtdinfo)) { 214 if (add_mtd_device((*curmtd)->mtdinfo)) {
214 E("slram: Failed to register new device\n"); 215 E("slram: Failed to register new device\n");
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 6bdaacc6d6..83d0b2a525 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -212,7 +212,7 @@ config MTD_NETtel
212 Support for flash chips on NETtel/SecureEdge/SnapGear boards. 212 Support for flash chips on NETtel/SecureEdge/SnapGear boards.
213 213
214config MTD_ALCHEMY 214config MTD_ALCHEMY
215 tristate ' AMD Alchemy Pb1xxx/Db1xxx/RDK MTD support' 215 tristate "AMD Alchemy Pb1xxx/Db1xxx/RDK MTD support"
216 depends on SOC_AU1X00 216 depends on SOC_AU1X00
217 help 217 help
218 Flash memory access on AMD Alchemy Pb/Db/RDK Reference Boards 218 Flash memory access on AMD Alchemy Pb/Db/RDK Reference Boards
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 2c9cc7f37e..c26488a179 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -42,7 +42,6 @@ struct ixp2000_flash_info {
42 struct map_info map; 42 struct map_info map;
43 struct mtd_partition *partitions; 43 struct mtd_partition *partitions;
44 struct resource *res; 44 struct resource *res;
45 int nr_banks;
46}; 45};
47 46
48static inline unsigned long flash_bank_setup(struct map_info *map, unsigned long ofs) 47static inline unsigned long flash_bank_setup(struct map_info *map, unsigned long ofs)
@@ -183,7 +182,6 @@ static int ixp2000_flash_probe(struct platform_device *dev)
183 */ 182 */
184 info->map.phys = NO_XIP; 183 info->map.phys = NO_XIP;
185 184
186 info->nr_banks = ixp_data->nr_banks;
187 info->map.size = ixp_data->nr_banks * window_size; 185 info->map.size = ixp_data->nr_banks * window_size;
188 info->map.bankwidth = 1; 186 info->map.bankwidth = 1;
189 187
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 433c3cac3c..d6301f0890 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -182,7 +182,7 @@ static struct physmap_flash_data physmap_flash_data = {
182 182
183static struct resource physmap_flash_resource = { 183static struct resource physmap_flash_resource = {
184 .start = CONFIG_MTD_PHYSMAP_START, 184 .start = CONFIG_MTD_PHYSMAP_START,
185 .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN, 185 .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN - 1,
186 .flags = IORESOURCE_MEM, 186 .flags = IORESOURCE_MEM,
187}; 187};
188 188
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 0758cb1d01..24a03152d1 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -18,6 +18,7 @@
18#include <linux/ioport.h> 18#include <linux/ioport.h>
19#include <asm/ebus.h> 19#include <asm/ebus.h>
20#include <asm/oplib.h> 20#include <asm/oplib.h>
21#include <asm/prom.h>
21#include <asm/uaccess.h> 22#include <asm/uaccess.h>
22#include <asm/io.h> 23#include <asm/io.h>
23 24
@@ -30,146 +31,140 @@
30#define UFLASH_WINDOW_SIZE 0x200000 31#define UFLASH_WINDOW_SIZE 0x200000
31#define UFLASH_BUSWIDTH 1 /* EBus is 8-bit */ 32#define UFLASH_BUSWIDTH 1 /* EBus is 8-bit */
32 33
33MODULE_AUTHOR 34MODULE_AUTHOR("Eric Brower <ebrower@usa.net>");
34 ("Eric Brower <ebrower@usa.net>"); 35MODULE_DESCRIPTION("User-programmable flash device on Sun Microsystems boardsets");
35MODULE_DESCRIPTION 36MODULE_SUPPORTED_DEVICE("userflash");
36 ("User-programmable flash device on Sun Microsystems boardsets"); 37MODULE_LICENSE("GPL");
37MODULE_SUPPORTED_DEVICE 38MODULE_VERSION("2.0");
38 ("userflash");
39MODULE_LICENSE
40 ("GPL");
41 39
42static LIST_HEAD(device_list); 40static LIST_HEAD(device_list);
43struct uflash_dev { 41struct uflash_dev {
44 char * name; /* device name */ 42 char *name; /* device name */
45 struct map_info map; /* mtd map info */ 43 struct map_info map; /* mtd map info */
46 struct mtd_info * mtd; /* mtd info */ 44 struct mtd_info *mtd; /* mtd info */
47 struct list_head list;
48}; 45};
49 46
50 47
51struct map_info uflash_map_templ = { 48struct map_info uflash_map_templ = {
52 .name = "SUNW,???-????", 49 .name = "SUNW,???-????",
53 .size = UFLASH_WINDOW_SIZE, 50 .size = UFLASH_WINDOW_SIZE,
54 .bankwidth = UFLASH_BUSWIDTH, 51 .bankwidth = UFLASH_BUSWIDTH,
55}; 52};
56 53
57int uflash_devinit(struct linux_ebus_device* edev) 54int uflash_devinit(struct linux_ebus_device *edev, struct device_node *dp)
58{ 55{
59 int iTmp, nregs; 56 struct uflash_dev *up;
60 struct linux_prom_registers regs[2]; 57 struct resource *res;
61 struct uflash_dev *pdev;
62
63 iTmp = prom_getproperty(
64 edev->prom_node, "reg", (void *)regs, sizeof(regs));
65 if ((iTmp % sizeof(regs[0])) != 0) {
66 printk("%s: Strange reg property size %d\n",
67 UFLASH_DEVNAME, iTmp);
68 return -ENODEV;
69 }
70 58
71 nregs = iTmp / sizeof(regs[0]); 59 res = &edev->resource[0];
72 60
73 if (nregs != 1) { 61 if (edev->num_addrs != 1) {
74 /* Non-CFI userflash device-- once I find one we 62 /* Non-CFI userflash device-- once I find one we
75 * can work on supporting it. 63 * can work on supporting it.
76 */ 64 */
77 printk("%s: unsupported device at 0x%lx (%d regs): " \ 65 printk("%s: unsupported device at 0x%lx (%d regs): " \
78 "email ebrower@usa.net\n", 66 "email ebrower@usa.net\n",
79 UFLASH_DEVNAME, edev->resource[0].start, nregs); 67 dp->full_name, res->start, edev->num_addrs);
68
80 return -ENODEV; 69 return -ENODEV;
81 } 70 }
82 71
83 if(0 == (pdev = kmalloc(sizeof(struct uflash_dev), GFP_KERNEL))) { 72 up = kzalloc(sizeof(struct uflash_dev), GFP_KERNEL);
84 printk("%s: unable to kmalloc new device\n", UFLASH_DEVNAME); 73 if (!up)
85 return(-ENOMEM); 74 return -ENOMEM;
86 }
87 75
88 /* copy defaults and tweak parameters */ 76 /* copy defaults and tweak parameters */
89 memcpy(&pdev->map, &uflash_map_templ, sizeof(uflash_map_templ)); 77 memcpy(&up->map, &uflash_map_templ, sizeof(uflash_map_templ));
90 pdev->map.size = regs[0].reg_size; 78 up->map.size = (res->end - res->start) + 1UL;
91 79
92 iTmp = prom_getproplen(edev->prom_node, "model"); 80 up->name = of_get_property(dp, "model", NULL);
93 pdev->name = kmalloc(iTmp, GFP_KERNEL); 81 if (up->name && 0 < strlen(up->name))
94 prom_getstring(edev->prom_node, "model", pdev->name, iTmp); 82 up->map.name = up->name;
95 if(0 != pdev->name && 0 < strlen(pdev->name)) { 83
96 pdev->map.name = pdev->name; 84 up->map.phys = res->start;
97 } 85
98 pdev->map.phys = edev->resource[0].start; 86 up->map.virt = ioremap_nocache(res->start, up->map.size);
99 pdev->map.virt = ioremap_nocache(edev->resource[0].start, pdev->map.size); 87 if (!up->map.virt) {
100 if(0 == pdev->map.virt) { 88 printk("%s: Failed to map device.\n", dp->full_name);
101 printk("%s: failed to map device\n", __FUNCTION__); 89 kfree(up);
102 kfree(pdev->name); 90
103 kfree(pdev); 91 return -EINVAL;
104 return(-1);
105 } 92 }
106 93
107 simple_map_init(&pdev->map); 94 simple_map_init(&up->map);
108 95
109 /* MTD registration */ 96 /* MTD registration */
110 pdev->mtd = do_map_probe("cfi_probe", &pdev->map); 97 up->mtd = do_map_probe("cfi_probe", &up->map);
111 if(0 == pdev->mtd) { 98 if (!up->mtd) {
112 iounmap(pdev->map.virt); 99 iounmap(up->map.virt);
113 kfree(pdev->name); 100 kfree(up);
114 kfree(pdev); 101
115 return(-ENXIO); 102 return -ENXIO;
116 } 103 }
117 104
118 list_add(&pdev->list, &device_list); 105 up->mtd->owner = THIS_MODULE;
119 106
120 pdev->mtd->owner = THIS_MODULE; 107 add_mtd_device(up->mtd);
121 108
122 add_mtd_device(pdev->mtd); 109 dev_set_drvdata(&edev->ofdev.dev, up);
123 return(0); 110
111 return 0;
124} 112}
125 113
126static int __init uflash_init(void) 114static int __devinit uflash_probe(struct of_device *dev, const struct of_device_id *match)
127{ 115{
128 struct linux_ebus *ebus = NULL; 116 struct linux_ebus_device *edev = to_ebus_device(&dev->dev);
129 struct linux_ebus_device *edev = NULL; 117 struct device_node *dp = dev->node;
130
131 for_each_ebus(ebus) {
132 for_each_ebusdev(edev, ebus) {
133 if (!strcmp(edev->prom_name, UFLASH_OBPNAME)) {
134 if(0 > prom_getproplen(edev->prom_node, "user")) {
135 DEBUG(2, "%s: ignoring device at 0x%lx\n",
136 UFLASH_DEVNAME, edev->resource[0].start);
137 } else {
138 uflash_devinit(edev);
139 }
140 }
141 }
142 }
143 118
144 if(list_empty(&device_list)) { 119 if (of_find_property(dp, "user", NULL))
145 printk("%s: unable to locate device\n", UFLASH_DEVNAME);
146 return -ENODEV; 120 return -ENODEV;
147 } 121
148 return(0); 122 return uflash_devinit(edev, dp);
149} 123}
150 124
151static void __exit uflash_cleanup(void) 125static int __devexit uflash_remove(struct of_device *dev)
152{ 126{
153 struct list_head *udevlist; 127 struct uflash_dev *up = dev_get_drvdata(&dev->dev);
154 struct uflash_dev *udev; 128
155 129 if (up->mtd) {
156 list_for_each(udevlist, &device_list) { 130 del_mtd_device(up->mtd);
157 udev = list_entry(udevlist, struct uflash_dev, list); 131 map_destroy(up->mtd);
158 DEBUG(2, "%s: removing device %s\n",
159 UFLASH_DEVNAME, udev->name);
160
161 if(0 != udev->mtd) {
162 del_mtd_device(udev->mtd);
163 map_destroy(udev->mtd);
164 }
165 if(0 != udev->map.virt) {
166 iounmap(udev->map.virt);
167 udev->map.virt = NULL;
168 }
169 kfree(udev->name);
170 kfree(udev);
171 } 132 }
133 if (up->map.virt) {
134 iounmap(up->map.virt);
135 up->map.virt = NULL;
136 }
137
138 kfree(up);
139
140 return 0;
141}
142
143static struct of_device_id uflash_match[] = {
144 {
145 .name = UFLASH_OBPNAME,
146 },
147 {},
148};
149
150MODULE_DEVICE_TABLE(of, uflash_match);
151
152static struct of_platform_driver uflash_driver = {
153 .name = UFLASH_DEVNAME,
154 .match_table = uflash_match,
155 .probe = uflash_probe,
156 .remove = __devexit_p(uflash_remove),
157};
158
159static int __init uflash_init(void)
160{
161 return of_register_driver(&uflash_driver, &ebus_bus_type);
162}
163
164static void __exit uflash_exit(void)
165{
166 of_unregister_driver(&uflash_driver);
172} 167}
173 168
174module_init(uflash_init); 169module_init(uflash_init);
175module_exit(uflash_cleanup); 170module_exit(uflash_exit);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index aa18d45b26..9a4b59d925 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -78,7 +78,7 @@ static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
78 return -EINVAL; 78 return -EINVAL;
79 } 79 }
80 80
81 if (offset >= 0 && offset < mtd->size) 81 if (offset >= 0 && offset <= mtd->size)
82 return file->f_pos = offset; 82 return file->f_pos = offset;
83 83
84 return -EINVAL; 84 return -EINVAL;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 27083ed0a0..80a76654d9 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1176,7 +1176,7 @@ static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1176 1176
1177 status = chip->waitfunc(mtd, chip); 1177 status = chip->waitfunc(mtd, chip);
1178 1178
1179 return status; 1179 return status & NAND_STATUS_FAIL ? -EIO : 0;
1180} 1180}
1181 1181
1182/** 1182/**
@@ -1271,10 +1271,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1271 sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd); 1271 sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
1272 buf = nand_transfer_oob(chip, buf, ops); 1272 buf = nand_transfer_oob(chip, buf, ops);
1273 1273
1274 readlen -= ops->ooblen;
1275 if (!readlen)
1276 break;
1277
1278 if (!(chip->options & NAND_NO_READRDY)) { 1274 if (!(chip->options & NAND_NO_READRDY)) {
1279 /* 1275 /*
1280 * Apply delay or wait for ready/busy pin. Do this 1276 * Apply delay or wait for ready/busy pin. Do this
@@ -1288,6 +1284,10 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1288 nand_wait_ready(mtd); 1284 nand_wait_ready(mtd);
1289 } 1285 }
1290 1286
1287 readlen -= ops->ooblen;
1288 if (!readlen)
1289 break;
1290
1291 /* Increment page address */ 1291 /* Increment page address */
1292 realpage++; 1292 realpage++;
1293 1293
@@ -1610,13 +1610,13 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
1610 if (!writelen) 1610 if (!writelen)
1611 return 0; 1611 return 0;
1612 1612
1613 chipnr = (int)(to >> chip->chip_shift);
1614 chip->select_chip(mtd, chipnr);
1615
1613 /* Check, if it is write protected */ 1616 /* Check, if it is write protected */
1614 if (nand_check_wp(mtd)) 1617 if (nand_check_wp(mtd))
1615 return -EIO; 1618 return -EIO;
1616 1619
1617 chipnr = (int)(to >> chip->chip_shift);
1618 chip->select_chip(mtd, chipnr);
1619
1620 realpage = (int)(to >> chip->page_shift); 1620 realpage = (int)(to >> chip->page_shift);
1621 page = realpage & chip->pagemask; 1621 page = realpage & chip->pagemask;
1622 blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; 1622 blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index fe8d38514b..e5bd88f2d5 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -61,15 +61,15 @@ static void ndfc_select_chip(struct mtd_info *mtd, int chip)
61 61
62static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) 62static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
63{ 63{
64 struct nand_chip *chip = mtd->priv; 64 struct ndfc_controller *ndfc = &ndfc_ctrl;
65 65
66 if (cmd == NAND_CMD_NONE) 66 if (cmd == NAND_CMD_NONE)
67 return; 67 return;
68 68
69 if (ctrl & NAND_CLE) 69 if (ctrl & NAND_CLE)
70 writel(cmd & 0xFF, chip->IO_ADDR_W + NDFC_CMD); 70 writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_CMD);
71 else 71 else
72 writel(cmd & 0xFF, chip->IO_ADDR_W + NDFC_ALE); 72 writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_ALE);
73} 73}
74 74
75static int ndfc_ready(struct mtd_info *mtd) 75static int ndfc_ready(struct mtd_info *mtd)
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 2c262fe03d..ff5cef24d5 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -63,8 +63,6 @@
63#include <asm/arch/regs-nand.h> 63#include <asm/arch/regs-nand.h>
64#include <asm/arch/nand.h> 64#include <asm/arch/nand.h>
65 65
66#define PFX "s3c2410-nand: "
67
68#ifdef CONFIG_MTD_NAND_S3C2410_HWECC 66#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
69static int hardware_ecc = 1; 67static int hardware_ecc = 1;
70#else 68#else
@@ -99,6 +97,12 @@ struct s3c2410_nand_mtd {
99 int scan_res; 97 int scan_res;
100}; 98};
101 99
100enum s3c_cpu_type {
101 TYPE_S3C2410,
102 TYPE_S3C2412,
103 TYPE_S3C2440,
104};
105
102/* overview of the s3c2410 nand state */ 106/* overview of the s3c2410 nand state */
103 107
104struct s3c2410_nand_info { 108struct s3c2410_nand_info {
@@ -112,9 +116,11 @@ struct s3c2410_nand_info {
112 struct resource *area; 116 struct resource *area;
113 struct clk *clk; 117 struct clk *clk;
114 void __iomem *regs; 118 void __iomem *regs;
119 void __iomem *sel_reg;
120 int sel_bit;
115 int mtd_count; 121 int mtd_count;
116 122
117 unsigned char is_s3c2440; 123 enum s3c_cpu_type cpu_type;
118}; 124};
119 125
120/* conversion functions */ 126/* conversion functions */
@@ -148,7 +154,7 @@ static inline int allow_clk_stop(struct s3c2410_nand_info *info)
148 154
149#define NS_IN_KHZ 1000000 155#define NS_IN_KHZ 1000000
150 156
151static int s3c2410_nand_calc_rate(int wanted, unsigned long clk, int max) 157static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
152{ 158{
153 int result; 159 int result;
154 160
@@ -172,53 +178,58 @@ static int s3c2410_nand_calc_rate(int wanted, unsigned long clk, int max)
172 178
173/* controller setup */ 179/* controller setup */
174 180
175static int s3c2410_nand_inithw(struct s3c2410_nand_info *info, struct platform_device *pdev) 181static int s3c2410_nand_inithw(struct s3c2410_nand_info *info,
182 struct platform_device *pdev)
176{ 183{
177 struct s3c2410_platform_nand *plat = to_nand_plat(pdev); 184 struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
178 unsigned long clkrate = clk_get_rate(info->clk); 185 unsigned long clkrate = clk_get_rate(info->clk);
186 int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
179 int tacls, twrph0, twrph1; 187 int tacls, twrph0, twrph1;
180 unsigned long cfg; 188 unsigned long cfg = 0;
181 189
182 /* calculate the timing information for the controller */ 190 /* calculate the timing information for the controller */
183 191
184 clkrate /= 1000; /* turn clock into kHz for ease of use */ 192 clkrate /= 1000; /* turn clock into kHz for ease of use */
185 193
186 if (plat != NULL) { 194 if (plat != NULL) {
187 tacls = s3c2410_nand_calc_rate(plat->tacls, clkrate, 4); 195 tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max);
188 twrph0 = s3c2410_nand_calc_rate(plat->twrph0, clkrate, 8); 196 twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8);
189 twrph1 = s3c2410_nand_calc_rate(plat->twrph1, clkrate, 8); 197 twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8);
190 } else { 198 } else {
191 /* default timings */ 199 /* default timings */
192 tacls = 4; 200 tacls = tacls_max;
193 twrph0 = 8; 201 twrph0 = 8;
194 twrph1 = 8; 202 twrph1 = 8;
195 } 203 }
196 204
197 if (tacls < 0 || twrph0 < 0 || twrph1 < 0) { 205 if (tacls < 0 || twrph0 < 0 || twrph1 < 0) {
198 printk(KERN_ERR PFX "cannot get timings suitable for board\n"); 206 dev_err(info->device, "cannot get suitable timings\n");
199 return -EINVAL; 207 return -EINVAL;
200 } 208 }
201 209
202 printk(KERN_INFO PFX "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n", 210 dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
203 tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate)); 211 tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate));
204 212
205 if (!info->is_s3c2440) { 213 switch (info->cpu_type) {
214 case TYPE_S3C2410:
206 cfg = S3C2410_NFCONF_EN; 215 cfg = S3C2410_NFCONF_EN;
207 cfg |= S3C2410_NFCONF_TACLS(tacls - 1); 216 cfg |= S3C2410_NFCONF_TACLS(tacls - 1);
208 cfg |= S3C2410_NFCONF_TWRPH0(twrph0 - 1); 217 cfg |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
209 cfg |= S3C2410_NFCONF_TWRPH1(twrph1 - 1); 218 cfg |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
210 } else { 219 break;
220
221 case TYPE_S3C2440:
222 case TYPE_S3C2412:
211 cfg = S3C2440_NFCONF_TACLS(tacls - 1); 223 cfg = S3C2440_NFCONF_TACLS(tacls - 1);
212 cfg |= S3C2440_NFCONF_TWRPH0(twrph0 - 1); 224 cfg |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
213 cfg |= S3C2440_NFCONF_TWRPH1(twrph1 - 1); 225 cfg |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
214 226
215 /* enable the controller and de-assert nFCE */ 227 /* enable the controller and de-assert nFCE */
216 228
217 writel(S3C2440_NFCONT_ENABLE | S3C2440_NFCONT_ENABLE, 229 writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
218 info->regs + S3C2440_NFCONT);
219 } 230 }
220 231
221 pr_debug(PFX "NF_CONF is 0x%lx\n", cfg); 232 dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
222 233
223 writel(cfg, info->regs + S3C2410_NFCONF); 234 writel(cfg, info->regs + S3C2410_NFCONF);
224 return 0; 235 return 0;
@@ -231,26 +242,21 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
231 struct s3c2410_nand_info *info; 242 struct s3c2410_nand_info *info;
232 struct s3c2410_nand_mtd *nmtd; 243 struct s3c2410_nand_mtd *nmtd;
233 struct nand_chip *this = mtd->priv; 244 struct nand_chip *this = mtd->priv;
234 void __iomem *reg;
235 unsigned long cur; 245 unsigned long cur;
236 unsigned long bit;
237 246
238 nmtd = this->priv; 247 nmtd = this->priv;
239 info = nmtd->info; 248 info = nmtd->info;
240 249
241 bit = (info->is_s3c2440) ? S3C2440_NFCONT_nFCE : S3C2410_NFCONF_nFCE;
242 reg = info->regs + ((info->is_s3c2440) ? S3C2440_NFCONT : S3C2410_NFCONF);
243
244 if (chip != -1 && allow_clk_stop(info)) 250 if (chip != -1 && allow_clk_stop(info))
245 clk_enable(info->clk); 251 clk_enable(info->clk);
246 252
247 cur = readl(reg); 253 cur = readl(info->sel_reg);
248 254
249 if (chip == -1) { 255 if (chip == -1) {
250 cur |= bit; 256 cur |= info->sel_bit;
251 } else { 257 } else {
252 if (nmtd->set != NULL && chip > nmtd->set->nr_chips) { 258 if (nmtd->set != NULL && chip > nmtd->set->nr_chips) {
253 printk(KERN_ERR PFX "chip %d out of range\n", chip); 259 dev_err(info->device, "invalid chip %d\n", chip);
254 return; 260 return;
255 } 261 }
256 262
@@ -259,10 +265,10 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
259 (info->platform->select_chip) (nmtd->set, chip); 265 (info->platform->select_chip) (nmtd->set, chip);
260 } 266 }
261 267
262 cur &= ~bit; 268 cur &= ~info->sel_bit;
263 } 269 }
264 270
265 writel(cur, reg); 271 writel(cur, info->sel_reg);
266 272
267 if (chip == -1 && allow_clk_stop(info)) 273 if (chip == -1 && allow_clk_stop(info))
268 clk_disable(info->clk); 274 clk_disable(info->clk);
@@ -311,15 +317,25 @@ static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd,
311static int s3c2410_nand_devready(struct mtd_info *mtd) 317static int s3c2410_nand_devready(struct mtd_info *mtd)
312{ 318{
313 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 319 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
314
315 if (info->is_s3c2440)
316 return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
317 return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY; 320 return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
318} 321}
319 322
323static int s3c2440_nand_devready(struct mtd_info *mtd)
324{
325 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
326 return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
327}
328
329static int s3c2412_nand_devready(struct mtd_info *mtd)
330{
331 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
332 return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY;
333}
334
320/* ECC handling functions */ 335/* ECC handling functions */
321 336
322static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) 337static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
338 u_char *read_ecc, u_char *calc_ecc)
323{ 339{
324 pr_debug("s3c2410_nand_correct_data(%p,%p,%p,%p)\n", mtd, dat, read_ecc, calc_ecc); 340 pr_debug("s3c2410_nand_correct_data(%p,%p,%p,%p)\n", mtd, dat, read_ecc, calc_ecc);
325 341
@@ -487,11 +503,8 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
487 struct s3c2410_nand_set *set) 503 struct s3c2410_nand_set *set)
488{ 504{
489 struct nand_chip *chip = &nmtd->chip; 505 struct nand_chip *chip = &nmtd->chip;
506 void __iomem *regs = info->regs;
490 507
491 chip->IO_ADDR_R = info->regs + S3C2410_NFDATA;
492 chip->IO_ADDR_W = info->regs + S3C2410_NFDATA;
493 chip->cmd_ctrl = s3c2410_nand_hwcontrol;
494 chip->dev_ready = s3c2410_nand_devready;
495 chip->write_buf = s3c2410_nand_write_buf; 508 chip->write_buf = s3c2410_nand_write_buf;
496 chip->read_buf = s3c2410_nand_read_buf; 509 chip->read_buf = s3c2410_nand_read_buf;
497 chip->select_chip = s3c2410_nand_select_chip; 510 chip->select_chip = s3c2410_nand_select_chip;
@@ -500,11 +513,37 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
500 chip->options = 0; 513 chip->options = 0;
501 chip->controller = &info->controller; 514 chip->controller = &info->controller;
502 515
503 if (info->is_s3c2440) { 516 switch (info->cpu_type) {
504 chip->IO_ADDR_R = info->regs + S3C2440_NFDATA; 517 case TYPE_S3C2410:
505 chip->IO_ADDR_W = info->regs + S3C2440_NFDATA; 518 chip->IO_ADDR_W = regs + S3C2410_NFDATA;
506 chip->cmd_ctrl = s3c2440_nand_hwcontrol; 519 info->sel_reg = regs + S3C2410_NFCONF;
507 } 520 info->sel_bit = S3C2410_NFCONF_nFCE;
521 chip->cmd_ctrl = s3c2410_nand_hwcontrol;
522 chip->dev_ready = s3c2410_nand_devready;
523 break;
524
525 case TYPE_S3C2440:
526 chip->IO_ADDR_W = regs + S3C2440_NFDATA;
527 info->sel_reg = regs + S3C2440_NFCONT;
528 info->sel_bit = S3C2440_NFCONT_nFCE;
529 chip->cmd_ctrl = s3c2440_nand_hwcontrol;
530 chip->dev_ready = s3c2440_nand_devready;
531 break;
532
533 case TYPE_S3C2412:
534 chip->IO_ADDR_W = regs + S3C2440_NFDATA;
535 info->sel_reg = regs + S3C2440_NFCONT;
536 info->sel_bit = S3C2412_NFCONT_nFCE0;
537 chip->cmd_ctrl = s3c2440_nand_hwcontrol;
538 chip->dev_ready = s3c2412_nand_devready;
539
540 if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT)
541 dev_info(info->device, "System booted from NAND\n");
542
543 break;
544 }
545
546 chip->IO_ADDR_R = chip->IO_ADDR_W;
508 547
509 nmtd->info = info; 548 nmtd->info = info;
510 nmtd->mtd.priv = chip; 549 nmtd->mtd.priv = chip;
@@ -512,17 +551,25 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
512 nmtd->set = set; 551 nmtd->set = set;
513 552
514 if (hardware_ecc) { 553 if (hardware_ecc) {
515 chip->ecc.correct = s3c2410_nand_correct_data;
516 chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
517 chip->ecc.calculate = s3c2410_nand_calculate_ecc; 554 chip->ecc.calculate = s3c2410_nand_calculate_ecc;
555 chip->ecc.correct = s3c2410_nand_correct_data;
518 chip->ecc.mode = NAND_ECC_HW; 556 chip->ecc.mode = NAND_ECC_HW;
519 chip->ecc.size = 512; 557 chip->ecc.size = 512;
520 chip->ecc.bytes = 3; 558 chip->ecc.bytes = 3;
521 chip->ecc.layout = &nand_hw_eccoob; 559 chip->ecc.layout = &nand_hw_eccoob;
522 560
523 if (info->is_s3c2440) { 561 switch (info->cpu_type) {
524 chip->ecc.hwctl = s3c2440_nand_enable_hwecc; 562 case TYPE_S3C2410:
525 chip->ecc.calculate = s3c2440_nand_calculate_ecc; 563 chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
564 chip->ecc.calculate = s3c2410_nand_calculate_ecc;
565 break;
566
567 case TYPE_S3C2412:
568 case TYPE_S3C2440:
569 chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
570 chip->ecc.calculate = s3c2440_nand_calculate_ecc;
571 break;
572
526 } 573 }
527 } else { 574 } else {
528 chip->ecc.mode = NAND_ECC_SOFT; 575 chip->ecc.mode = NAND_ECC_SOFT;
@@ -537,7 +584,8 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
537 * nand layer to look for devices 584 * nand layer to look for devices
538*/ 585*/
539 586
540static int s3c24xx_nand_probe(struct platform_device *pdev, int is_s3c2440) 587static int s3c24xx_nand_probe(struct platform_device *pdev,
588 enum s3c_cpu_type cpu_type)
541{ 589{
542 struct s3c2410_platform_nand *plat = to_nand_plat(pdev); 590 struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
543 struct s3c2410_nand_info *info; 591 struct s3c2410_nand_info *info;
@@ -592,7 +640,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev, int is_s3c2440)
592 info->device = &pdev->dev; 640 info->device = &pdev->dev;
593 info->platform = plat; 641 info->platform = plat;
594 info->regs = ioremap(res->start, size); 642 info->regs = ioremap(res->start, size);
595 info->is_s3c2440 = is_s3c2440; 643 info->cpu_type = cpu_type;
596 644
597 if (info->regs == NULL) { 645 if (info->regs == NULL) {
598 dev_err(&pdev->dev, "cannot reserve register region\n"); 646 dev_err(&pdev->dev, "cannot reserve register region\n");
@@ -699,12 +747,17 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
699 747
700static int s3c2410_nand_probe(struct platform_device *dev) 748static int s3c2410_nand_probe(struct platform_device *dev)
701{ 749{
702 return s3c24xx_nand_probe(dev, 0); 750 return s3c24xx_nand_probe(dev, TYPE_S3C2410);
703} 751}
704 752
705static int s3c2440_nand_probe(struct platform_device *dev) 753static int s3c2440_nand_probe(struct platform_device *dev)
706{ 754{
707 return s3c24xx_nand_probe(dev, 1); 755 return s3c24xx_nand_probe(dev, TYPE_S3C2440);
756}
757
758static int s3c2412_nand_probe(struct platform_device *dev)
759{
760 return s3c24xx_nand_probe(dev, TYPE_S3C2412);
708} 761}
709 762
710static struct platform_driver s3c2410_nand_driver = { 763static struct platform_driver s3c2410_nand_driver = {
@@ -729,16 +782,29 @@ static struct platform_driver s3c2440_nand_driver = {
729 }, 782 },
730}; 783};
731 784
785static struct platform_driver s3c2412_nand_driver = {
786 .probe = s3c2412_nand_probe,
787 .remove = s3c2410_nand_remove,
788 .suspend = s3c24xx_nand_suspend,
789 .resume = s3c24xx_nand_resume,
790 .driver = {
791 .name = "s3c2412-nand",
792 .owner = THIS_MODULE,
793 },
794};
795
732static int __init s3c2410_nand_init(void) 796static int __init s3c2410_nand_init(void)
733{ 797{
734 printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n"); 798 printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
735 799
800 platform_driver_register(&s3c2412_nand_driver);
736 platform_driver_register(&s3c2440_nand_driver); 801 platform_driver_register(&s3c2440_nand_driver);
737 return platform_driver_register(&s3c2410_nand_driver); 802 return platform_driver_register(&s3c2410_nand_driver);
738} 803}
739 804
740static void __exit s3c2410_nand_exit(void) 805static void __exit s3c2410_nand_exit(void)
741{ 806{
807 platform_driver_unregister(&s3c2412_nand_driver);
742 platform_driver_unregister(&s3c2440_nand_driver); 808 platform_driver_unregister(&s3c2440_nand_driver);
743 platform_driver_unregister(&s3c2410_nand_driver); 809 platform_driver_unregister(&s3c2410_nand_driver);
744} 810}
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c
index a0b4b1edcb..f40081069a 100644
--- a/drivers/mtd/nand/ts7250.c
+++ b/drivers/mtd/nand/ts7250.c
@@ -97,7 +97,7 @@ static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
97 unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE; 97 unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE;
98 unsigned char bits; 98 unsigned char bits;
99 99
100 bits = (ctrl & NAND_CNE) << 2; 100 bits = (ctrl & NAND_NCE) << 2;
101 bits |= ctrl & NAND_CLE; 101 bits |= ctrl & NAND_CLE;
102 bits |= (ctrl & NAND_ALE) >> 2; 102 bits |= (ctrl & NAND_ALE) >> 2;
103 103
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index bb44509fd4..07136ec423 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -508,11 +508,11 @@ static int el_start_xmit(struct sk_buff *skb, struct net_device *dev)
508 * speak of. We simply pull the packet out of its PIO buffer (which is slow) 508 * speak of. We simply pull the packet out of its PIO buffer (which is slow)
509 * and queue it for the kernel. Then we reset the card for the next packet. 509 * and queue it for the kernel. Then we reset the card for the next packet.
510 * 510 *
511 * We sometimes get suprise interrupts late both because the SMP IRQ delivery 511 * We sometimes get surprise interrupts late both because the SMP IRQ delivery
512 * is message passing and because the card sometimes seems to deliver late. I 512 * is message passing and because the card sometimes seems to deliver late. I
513 * think if it is part way through a receive and the mode is changed it carries 513 * think if it is part way through a receive and the mode is changed it carries
514 * on receiving and sends us an interrupt. We have to band aid all these cases 514 * on receiving and sends us an interrupt. We have to band aid all these cases
515 * to get a sensible 150kbytes/second performance. Even then you want a small 515 * to get a sensible 150kBytes/second performance. Even then you want a small
516 * TCP window. 516 * TCP window.
517 */ 517 */
518 518
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index e27778926e..b467c383ae 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -375,8 +375,7 @@ limit of 4K.
375 of the drivers, and will likely be provided by some future kernel. 375 of the drivers, and will likely be provided by some future kernel.
376*/ 376*/
377enum pci_flags_bit { 377enum pci_flags_bit {
378 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, 378 PCI_USES_MASTER=4,
379 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
380}; 379};
381 380
382enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8, 381enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8,
@@ -446,95 +445,95 @@ static struct vortex_chip_info {
446 int io_size; 445 int io_size;
447} vortex_info_tbl[] __devinitdata = { 446} vortex_info_tbl[] __devinitdata = {
448 {"3c590 Vortex 10Mbps", 447 {"3c590 Vortex 10Mbps",
449 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, 448 PCI_USES_MASTER, IS_VORTEX, 32, },
450 {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ 449 {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
451 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, 450 PCI_USES_MASTER, IS_VORTEX, 32, },
452 {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ 451 {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
453 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, 452 PCI_USES_MASTER, IS_VORTEX, 32, },
454 {"3c595 Vortex 100baseTx", 453 {"3c595 Vortex 100baseTx",
455 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, 454 PCI_USES_MASTER, IS_VORTEX, 32, },
456 {"3c595 Vortex 100baseT4", 455 {"3c595 Vortex 100baseT4",
457 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, 456 PCI_USES_MASTER, IS_VORTEX, 32, },
458 457
459 {"3c595 Vortex 100base-MII", 458 {"3c595 Vortex 100base-MII",
460 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, 459 PCI_USES_MASTER, IS_VORTEX, 32, },
461 {"3c900 Boomerang 10baseT", 460 {"3c900 Boomerang 10baseT",
462 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, 461 PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
463 {"3c900 Boomerang 10Mbps Combo", 462 {"3c900 Boomerang 10Mbps Combo",
464 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, 463 PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
465 {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */ 464 {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */
466 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, 465 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
467 {"3c900 Cyclone 10Mbps Combo", 466 {"3c900 Cyclone 10Mbps Combo",
468 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, 467 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
469 468
470 {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */ 469 {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */
471 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, 470 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
472 {"3c900B-FL Cyclone 10base-FL", 471 {"3c900B-FL Cyclone 10base-FL",
473 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, 472 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
474 {"3c905 Boomerang 100baseTx", 473 {"3c905 Boomerang 100baseTx",
475 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, 474 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
476 {"3c905 Boomerang 100baseT4", 475 {"3c905 Boomerang 100baseT4",
477 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, 476 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
478 {"3c905B Cyclone 100baseTx", 477 {"3c905B Cyclone 100baseTx",
479 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, 478 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
480 479
481 {"3c905B Cyclone 10/100/BNC", 480 {"3c905B Cyclone 10/100/BNC",
482 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, 481 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
483 {"3c905B-FX Cyclone 100baseFx", 482 {"3c905B-FX Cyclone 100baseFx",
484 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, 483 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
485 {"3c905C Tornado", 484 {"3c905C Tornado",
486 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, 485 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
487 {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)", 486 {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)",
488 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, }, 487 PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, },
489 {"3c980 Cyclone", 488 {"3c980 Cyclone",
490 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, 489 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
491 490
492 {"3c980C Python-T", 491 {"3c980C Python-T",
493 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, 492 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
494 {"3cSOHO100-TX Hurricane", 493 {"3cSOHO100-TX Hurricane",
495 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, 494 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
496 {"3c555 Laptop Hurricane", 495 {"3c555 Laptop Hurricane",
497 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, }, 496 PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, },
498 {"3c556 Laptop Tornado", 497 {"3c556 Laptop Tornado",
499 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR| 498 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR|
500 HAS_HWCKSM, 128, }, 499 HAS_HWCKSM, 128, },
501 {"3c556B Laptop Hurricane", 500 {"3c556B Laptop Hurricane",
502 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR| 501 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|
503 WNO_XCVR_PWR|HAS_HWCKSM, 128, }, 502 WNO_XCVR_PWR|HAS_HWCKSM, 128, },
504 503
505 {"3c575 [Megahertz] 10/100 LAN CardBus", 504 {"3c575 [Megahertz] 10/100 LAN CardBus",
506 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, 505 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
507 {"3c575 Boomerang CardBus", 506 {"3c575 Boomerang CardBus",
508 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, 507 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
509 {"3CCFE575BT Cyclone CardBus", 508 {"3CCFE575BT Cyclone CardBus",
510 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT| 509 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|
511 INVERT_LED_PWR|HAS_HWCKSM, 128, }, 510 INVERT_LED_PWR|HAS_HWCKSM, 128, },
512 {"3CCFE575CT Tornado CardBus", 511 {"3CCFE575CT Tornado CardBus",
513 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| 512 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
514 MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, 513 MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
515 {"3CCFE656 Cyclone CardBus", 514 {"3CCFE656 Cyclone CardBus",
516 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| 515 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
517 INVERT_LED_PWR|HAS_HWCKSM, 128, }, 516 INVERT_LED_PWR|HAS_HWCKSM, 128, },
518 517
519 {"3CCFEM656B Cyclone+Winmodem CardBus", 518 {"3CCFEM656B Cyclone+Winmodem CardBus",
520 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| 519 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
521 INVERT_LED_PWR|HAS_HWCKSM, 128, }, 520 INVERT_LED_PWR|HAS_HWCKSM, 128, },
522 {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */ 521 {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */
523 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| 522 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
524 MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, 523 MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
525 {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */ 524 {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */
526 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, 525 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
527 {"3c920 Tornado", 526 {"3c920 Tornado",
528 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, 527 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
529 {"3c982 Hydra Dual Port A", 528 {"3c982 Hydra Dual Port A",
530 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, 529 PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
531 530
532 {"3c982 Hydra Dual Port B", 531 {"3c982 Hydra Dual Port B",
533 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, 532 PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
534 {"3c905B-T4", 533 {"3c905B-T4",
535 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, 534 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
536 {"3c920B-EMB-WNM Tornado", 535 {"3c920B-EMB-WNM Tornado",
537 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, 536 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
538 537
539 {NULL,}, /* NULL terminated list. */ 538 {NULL,}, /* NULL terminated list. */
540}; 539};
diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h
index 6e75482d75..53449207e5 100644
--- a/drivers/net/dl2k.h
+++ b/drivers/net/dl2k.h
@@ -683,11 +683,6 @@ struct netdev_private {
683}; 683};
684 684
685/* The station address location in the EEPROM. */ 685/* The station address location in the EEPROM. */
686#ifdef MEM_MAPPING
687#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
688#else
689#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
690#endif
691/* The struct pci_device_id consist of: 686/* The struct pci_device_id consist of:
692 vendor, device Vendor and device ID to match (or PCI_ANY_ID) 687 vendor, device Vendor and device ID to match (or PCI_ANY_ID)
693 subvendor, subdevice Subsystem vendor and device ID to match (or PCI_ANY_ID) 688 subvendor, subdevice Subsystem vendor and device ID to match (or PCI_ANY_ID)
@@ -695,9 +690,10 @@ struct netdev_private {
695 class_mask of the class are honored during the comparison. 690 class_mask of the class are honored during the comparison.
696 driver_data Data private to the driver. 691 driver_data Data private to the driver.
697*/ 692*/
698static struct pci_device_id rio_pci_tbl[] = { 693
699 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 694static const struct pci_device_id rio_pci_tbl[] = {
700 {0,} 695 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
696 { }
701}; 697};
702MODULE_DEVICE_TABLE (pci, rio_pci_tbl); 698MODULE_DEVICE_TABLE (pci, rio_pci_tbl);
703#define TX_TIMEOUT (4*HZ) 699#define TX_TIMEOUT (4*HZ)
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 24996da4c1..7965a9b08e 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -410,10 +410,7 @@ dm9000_probe(struct platform_device *pdev)
410 if (pdev->num_resources < 2) { 410 if (pdev->num_resources < 2) {
411 ret = -ENODEV; 411 ret = -ENODEV;
412 goto out; 412 goto out;
413 } 413 } else if (pdev->num_resources == 2) {
414
415 switch (pdev->num_resources) {
416 case 2:
417 base = pdev->resource[0].start; 414 base = pdev->resource[0].start;
418 415
419 if (!request_mem_region(base, 4, ndev->name)) { 416 if (!request_mem_region(base, 4, ndev->name)) {
@@ -423,17 +420,16 @@ dm9000_probe(struct platform_device *pdev)
423 420
424 ndev->base_addr = base; 421 ndev->base_addr = base;
425 ndev->irq = pdev->resource[1].start; 422 ndev->irq = pdev->resource[1].start;
426 db->io_addr = (void *)base; 423 db->io_addr = (void __iomem *)base;
427 db->io_data = (void *)(base + 4); 424 db->io_data = (void __iomem *)(base + 4);
428
429 break;
430 425
431 case 3: 426 } else {
432 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 427 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
433 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 428 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
434 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 429 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
435 430
436 if (db->addr_res == NULL || db->data_res == NULL) { 431 if (db->addr_res == NULL || db->data_res == NULL ||
432 db->irq_res == NULL) {
437 printk(KERN_ERR PFX "insufficient resources\n"); 433 printk(KERN_ERR PFX "insufficient resources\n");
438 ret = -ENOENT; 434 ret = -ENOENT;
439 goto out; 435 goto out;
@@ -482,7 +478,6 @@ dm9000_probe(struct platform_device *pdev)
482 478
483 /* ensure at least we have a default set of IO routines */ 479 /* ensure at least we have a default set of IO routines */
484 dm9000_set_io(db, iosize); 480 dm9000_set_io(db, iosize);
485
486 } 481 }
487 482
488 /* check to see if anything is being over-ridden */ 483 /* check to see if anything is being over-ridden */
@@ -564,6 +559,13 @@ dm9000_probe(struct platform_device *pdev)
564 for (i = 0; i < 6; i++) 559 for (i = 0; i < 6; i++)
565 ndev->dev_addr[i] = db->srom[i]; 560 ndev->dev_addr[i] = db->srom[i];
566 561
562 if (!is_valid_ether_addr(ndev->dev_addr)) {
563 /* try reading from mac */
564
565 for (i = 0; i < 6; i++)
566 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
567 }
568
567 if (!is_valid_ether_addr(ndev->dev_addr)) 569 if (!is_valid_ether_addr(ndev->dev_addr))
568 printk("%s: Invalid ethernet MAC address. Please " 570 printk("%s: Invalid ethernet MAC address. Please "
569 "set using ifconfig\n", ndev->name); 571 "set using ifconfig\n", ndev->name);
@@ -663,7 +665,6 @@ dm9000_init_dm9000(struct net_device *dev)
663 db->tx_pkt_cnt = 0; 665 db->tx_pkt_cnt = 0;
664 db->queue_pkt_len = 0; 666 db->queue_pkt_len = 0;
665 dev->trans_start = 0; 667 dev->trans_start = 0;
666 spin_lock_init(&db->lock);
667} 668}
668 669
669/* 670/*
@@ -767,7 +768,7 @@ dm9000_stop(struct net_device *ndev)
767 * receive the packet to upper layer, free the transmitted packet 768 * receive the packet to upper layer, free the transmitted packet
768 */ 769 */
769 770
770void 771static void
771dm9000_tx_done(struct net_device *dev, board_info_t * db) 772dm9000_tx_done(struct net_device *dev, board_info_t * db)
772{ 773{
773 int tx_status = ior(db, DM9000_NSR); /* Got TX status */ 774 int tx_status = ior(db, DM9000_NSR); /* Got TX status */
@@ -1187,13 +1188,14 @@ dm9000_drv_remove(struct platform_device *pdev)
1187} 1188}
1188 1189
1189static struct platform_driver dm9000_driver = { 1190static struct platform_driver dm9000_driver = {
1191 .driver = {
1192 .name = "dm9000",
1193 .owner = THIS_MODULE,
1194 },
1190 .probe = dm9000_probe, 1195 .probe = dm9000_probe,
1191 .remove = dm9000_drv_remove, 1196 .remove = dm9000_drv_remove,
1192 .suspend = dm9000_drv_suspend, 1197 .suspend = dm9000_drv_suspend,
1193 .resume = dm9000_drv_resume, 1198 .resume = dm9000_drv_resume,
1194 .driver = {
1195 .name = "dm9000",
1196 },
1197}; 1199};
1198 1200
1199static int __init 1201static int __init
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 467fc86136..ecf5ad85a6 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -278,11 +278,6 @@ having to sign an Intel NDA when I'm helping Intel sell their own product!
278 278
279static int speedo_found1(struct pci_dev *pdev, void __iomem *ioaddr, int fnd_cnt, int acpi_idle_state); 279static int speedo_found1(struct pci_dev *pdev, void __iomem *ioaddr, int fnd_cnt, int acpi_idle_state);
280 280
281enum pci_flags_bit {
282 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
283 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
284};
285
286/* Offsets to the various registers. 281/* Offsets to the various registers.
287 All accesses need not be longword aligned. */ 282 All accesses need not be longword aligned. */
288enum speedo_offsets { 283enum speedo_offsets {
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 724d7dc35f..ee34a16eb4 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -191,23 +191,10 @@ IVc. Errata
191*/ 191*/
192 192
193 193
194enum pci_id_flags_bits {
195 /* Set PCI command register bits before calling probe1(). */
196 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
197 /* Read and map the single following PCI BAR. */
198 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
199 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
200};
201
202enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 }; 194enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
203 195
204#define EPIC_TOTAL_SIZE 0x100 196#define EPIC_TOTAL_SIZE 0x100
205#define USE_IO_OPS 1 197#define USE_IO_OPS 1
206#ifdef USE_IO_OPS
207#define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR0
208#else
209#define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
210#endif
211 198
212typedef enum { 199typedef enum {
213 SMSC_83C170_0, 200 SMSC_83C170_0,
@@ -218,7 +205,6 @@ typedef enum {
218 205
219struct epic_chip_info { 206struct epic_chip_info {
220 const char *name; 207 const char *name;
221 enum pci_id_flags_bits pci_flags;
222 int io_size; /* Needed for I/O region check or ioremap(). */ 208 int io_size; /* Needed for I/O region check or ioremap(). */
223 int drv_flags; /* Driver use, intended as capability flags. */ 209 int drv_flags; /* Driver use, intended as capability flags. */
224}; 210};
@@ -227,11 +213,11 @@ struct epic_chip_info {
227/* indexed by chip_t */ 213/* indexed by chip_t */
228static const struct epic_chip_info pci_id_tbl[] = { 214static const struct epic_chip_info pci_id_tbl[] = {
229 { "SMSC EPIC/100 83c170", 215 { "SMSC EPIC/100 83c170",
230 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN }, 216 EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
231 { "SMSC EPIC/100 83c170", 217 { "SMSC EPIC/100 83c170",
232 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR }, 218 EPIC_TOTAL_SIZE, TYPE2_INTR },
233 { "SMSC EPIC/C 83c175", 219 { "SMSC EPIC/C 83c175",
234 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN }, 220 EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
235}; 221};
236 222
237 223
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index a8449265e5..13eca7ede2 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -126,16 +126,6 @@ MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
126 126
127#define MIN_REGION_SIZE 136 127#define MIN_REGION_SIZE 136
128 128
129enum pci_flags_bit {
130 PCI_USES_IO = 1,
131 PCI_USES_MEM = 2,
132 PCI_USES_MASTER = 4,
133 PCI_ADDR0 = 0x10 << 0,
134 PCI_ADDR1 = 0x10 << 1,
135 PCI_ADDR2 = 0x10 << 2,
136 PCI_ADDR3 = 0x10 << 3,
137};
138
139/* A chip capabilities table, matching the entries in pci_tbl[] above. */ 129/* A chip capabilities table, matching the entries in pci_tbl[] above. */
140enum chip_capability_flags { 130enum chip_capability_flags {
141 HAS_MII_XCVR, 131 HAS_MII_XCVR,
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index bd6983d1af..db694c8329 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -22,7 +22,7 @@
22 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 22 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
23 * 23 *
24 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 24 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
25 * Copyright (c) 2004-2005 Macq Electronique SA. 25 * Copyright (c) 2004-2006 Macq Electronique SA.
26 */ 26 */
27 27
28#include <linux/config.h> 28#include <linux/config.h>
@@ -51,7 +51,7 @@
51 51
52#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || \ 52#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || \
53 defined(CONFIG_M5272) || defined(CONFIG_M528x) || \ 53 defined(CONFIG_M5272) || defined(CONFIG_M528x) || \
54 defined(CONFIG_M520x) 54 defined(CONFIG_M520x) || defined(CONFIG_M532x)
55#include <asm/coldfire.h> 55#include <asm/coldfire.h>
56#include <asm/mcfsim.h> 56#include <asm/mcfsim.h>
57#include "fec.h" 57#include "fec.h"
@@ -80,6 +80,8 @@ static unsigned int fec_hw[] = {
80 (MCF_MBAR + 0x1000), 80 (MCF_MBAR + 0x1000),
81#elif defined(CONFIG_M520x) 81#elif defined(CONFIG_M520x)
82 (MCF_MBAR+0x30000), 82 (MCF_MBAR+0x30000),
83#elif defined(CONFIG_M532x)
84 (MCF_MBAR+0xfc030000),
83#else 85#else
84 &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec), 86 &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec),
85#endif 87#endif
@@ -143,7 +145,7 @@ typedef struct {
143#define TX_RING_MOD_MASK 15 /* for this to work */ 145#define TX_RING_MOD_MASK 15 /* for this to work */
144 146
145#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) 147#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
146#error "FEC: descriptor ring size contants too large" 148#error "FEC: descriptor ring size constants too large"
147#endif 149#endif
148 150
149/* Interrupt events/masks. 151/* Interrupt events/masks.
@@ -167,12 +169,12 @@ typedef struct {
167 169
168 170
169/* 171/*
170 * The 5270/5271/5280/5282 RX control register also contains maximum frame 172 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
171 * size bits. Other FEC hardware does not, so we need to take that into 173 * size bits. Other FEC hardware does not, so we need to take that into
172 * account when setting it. 174 * account when setting it.
173 */ 175 */
174#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 176#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
175 defined(CONFIG_M520x) 177 defined(CONFIG_M520x) || defined(CONFIG_M532x)
176#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 178#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
177#else 179#else
178#define OPT_FRAME_SIZE 0 180#define OPT_FRAME_SIZE 0
@@ -308,6 +310,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
308 struct fec_enet_private *fep; 310 struct fec_enet_private *fep;
309 volatile fec_t *fecp; 311 volatile fec_t *fecp;
310 volatile cbd_t *bdp; 312 volatile cbd_t *bdp;
313 unsigned short status;
311 314
312 fep = netdev_priv(dev); 315 fep = netdev_priv(dev);
313 fecp = (volatile fec_t*)dev->base_addr; 316 fecp = (volatile fec_t*)dev->base_addr;
@@ -320,8 +323,9 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
320 /* Fill in a Tx ring entry */ 323 /* Fill in a Tx ring entry */
321 bdp = fep->cur_tx; 324 bdp = fep->cur_tx;
322 325
326 status = bdp->cbd_sc;
323#ifndef final_version 327#ifndef final_version
324 if (bdp->cbd_sc & BD_ENET_TX_READY) { 328 if (status & BD_ENET_TX_READY) {
325 /* Ooops. All transmit buffers are full. Bail out. 329 /* Ooops. All transmit buffers are full. Bail out.
326 * This should not happen, since dev->tbusy should be set. 330 * This should not happen, since dev->tbusy should be set.
327 */ 331 */
@@ -332,7 +336,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
332 336
333 /* Clear all of the status flags. 337 /* Clear all of the status flags.
334 */ 338 */
335 bdp->cbd_sc &= ~BD_ENET_TX_STATS; 339 status &= ~BD_ENET_TX_STATS;
336 340
337 /* Set buffer length and buffer pointer. 341 /* Set buffer length and buffer pointer.
338 */ 342 */
@@ -366,21 +370,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
366 370
367 spin_lock_irq(&fep->lock); 371 spin_lock_irq(&fep->lock);
368 372
369 /* Send it on its way. Tell FEC its ready, interrupt when done, 373 /* Send it on its way. Tell FEC it's ready, interrupt when done,
370 * its the last BD of the frame, and to put the CRC on the end. 374 * it's the last BD of the frame, and to put the CRC on the end.
371 */ 375 */
372 376
373 bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 377 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
374 | BD_ENET_TX_LAST | BD_ENET_TX_TC); 378 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
379 bdp->cbd_sc = status;
375 380
376 dev->trans_start = jiffies; 381 dev->trans_start = jiffies;
377 382
378 /* Trigger transmission start */ 383 /* Trigger transmission start */
379 fecp->fec_x_des_active = 0x01000000; 384 fecp->fec_x_des_active = 0;
380 385
381 /* If this was the last BD in the ring, start at the beginning again. 386 /* If this was the last BD in the ring, start at the beginning again.
382 */ 387 */
383 if (bdp->cbd_sc & BD_ENET_TX_WRAP) { 388 if (status & BD_ENET_TX_WRAP) {
384 bdp = fep->tx_bd_base; 389 bdp = fep->tx_bd_base;
385 } else { 390 } else {
386 bdp++; 391 bdp++;
@@ -491,43 +496,44 @@ fec_enet_tx(struct net_device *dev)
491{ 496{
492 struct fec_enet_private *fep; 497 struct fec_enet_private *fep;
493 volatile cbd_t *bdp; 498 volatile cbd_t *bdp;
499 unsigned short status;
494 struct sk_buff *skb; 500 struct sk_buff *skb;
495 501
496 fep = netdev_priv(dev); 502 fep = netdev_priv(dev);
497 spin_lock(&fep->lock); 503 spin_lock(&fep->lock);
498 bdp = fep->dirty_tx; 504 bdp = fep->dirty_tx;
499 505
500 while ((bdp->cbd_sc&BD_ENET_TX_READY) == 0) { 506 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
501 if (bdp == fep->cur_tx && fep->tx_full == 0) break; 507 if (bdp == fep->cur_tx && fep->tx_full == 0) break;
502 508
503 skb = fep->tx_skbuff[fep->skb_dirty]; 509 skb = fep->tx_skbuff[fep->skb_dirty];
504 /* Check for errors. */ 510 /* Check for errors. */
505 if (bdp->cbd_sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | 511 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
506 BD_ENET_TX_RL | BD_ENET_TX_UN | 512 BD_ENET_TX_RL | BD_ENET_TX_UN |
507 BD_ENET_TX_CSL)) { 513 BD_ENET_TX_CSL)) {
508 fep->stats.tx_errors++; 514 fep->stats.tx_errors++;
509 if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ 515 if (status & BD_ENET_TX_HB) /* No heartbeat */
510 fep->stats.tx_heartbeat_errors++; 516 fep->stats.tx_heartbeat_errors++;
511 if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ 517 if (status & BD_ENET_TX_LC) /* Late collision */
512 fep->stats.tx_window_errors++; 518 fep->stats.tx_window_errors++;
513 if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ 519 if (status & BD_ENET_TX_RL) /* Retrans limit */
514 fep->stats.tx_aborted_errors++; 520 fep->stats.tx_aborted_errors++;
515 if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ 521 if (status & BD_ENET_TX_UN) /* Underrun */
516 fep->stats.tx_fifo_errors++; 522 fep->stats.tx_fifo_errors++;
517 if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ 523 if (status & BD_ENET_TX_CSL) /* Carrier lost */
518 fep->stats.tx_carrier_errors++; 524 fep->stats.tx_carrier_errors++;
519 } else { 525 } else {
520 fep->stats.tx_packets++; 526 fep->stats.tx_packets++;
521 } 527 }
522 528
523#ifndef final_version 529#ifndef final_version
524 if (bdp->cbd_sc & BD_ENET_TX_READY) 530 if (status & BD_ENET_TX_READY)
525 printk("HEY! Enet xmit interrupt and TX_READY.\n"); 531 printk("HEY! Enet xmit interrupt and TX_READY.\n");
526#endif 532#endif
527 /* Deferred means some collisions occurred during transmit, 533 /* Deferred means some collisions occurred during transmit,
528 * but we eventually sent the packet OK. 534 * but we eventually sent the packet OK.
529 */ 535 */
530 if (bdp->cbd_sc & BD_ENET_TX_DEF) 536 if (status & BD_ENET_TX_DEF)
531 fep->stats.collisions++; 537 fep->stats.collisions++;
532 538
533 /* Free the sk buffer associated with this last transmit. 539 /* Free the sk buffer associated with this last transmit.
@@ -538,7 +544,7 @@ fec_enet_tx(struct net_device *dev)
538 544
539 /* Update pointer to next buffer descriptor to be transmitted. 545 /* Update pointer to next buffer descriptor to be transmitted.
540 */ 546 */
541 if (bdp->cbd_sc & BD_ENET_TX_WRAP) 547 if (status & BD_ENET_TX_WRAP)
542 bdp = fep->tx_bd_base; 548 bdp = fep->tx_bd_base;
543 else 549 else
544 bdp++; 550 bdp++;
@@ -568,9 +574,14 @@ fec_enet_rx(struct net_device *dev)
568 struct fec_enet_private *fep; 574 struct fec_enet_private *fep;
569 volatile fec_t *fecp; 575 volatile fec_t *fecp;
570 volatile cbd_t *bdp; 576 volatile cbd_t *bdp;
577 unsigned short status;
571 struct sk_buff *skb; 578 struct sk_buff *skb;
572 ushort pkt_len; 579 ushort pkt_len;
573 __u8 *data; 580 __u8 *data;
581
582#ifdef CONFIG_M532x
583 flush_cache_all();
584#endif
574 585
575 fep = netdev_priv(dev); 586 fep = netdev_priv(dev);
576 fecp = (volatile fec_t*)dev->base_addr; 587 fecp = (volatile fec_t*)dev->base_addr;
@@ -580,13 +591,13 @@ fec_enet_rx(struct net_device *dev)
580 */ 591 */
581 bdp = fep->cur_rx; 592 bdp = fep->cur_rx;
582 593
583while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { 594while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
584 595
585#ifndef final_version 596#ifndef final_version
586 /* Since we have allocated space to hold a complete frame, 597 /* Since we have allocated space to hold a complete frame,
587 * the last indicator should be set. 598 * the last indicator should be set.
588 */ 599 */
589 if ((bdp->cbd_sc & BD_ENET_RX_LAST) == 0) 600 if ((status & BD_ENET_RX_LAST) == 0)
590 printk("FEC ENET: rcv is not +last\n"); 601 printk("FEC ENET: rcv is not +last\n");
591#endif 602#endif
592 603
@@ -594,26 +605,26 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
594 goto rx_processing_done; 605 goto rx_processing_done;
595 606
596 /* Check for errors. */ 607 /* Check for errors. */
597 if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 608 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
598 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 609 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
599 fep->stats.rx_errors++; 610 fep->stats.rx_errors++;
600 if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 611 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
601 /* Frame too long or too short. */ 612 /* Frame too long or too short. */
602 fep->stats.rx_length_errors++; 613 fep->stats.rx_length_errors++;
603 } 614 }
604 if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */ 615 if (status & BD_ENET_RX_NO) /* Frame alignment */
605 fep->stats.rx_frame_errors++; 616 fep->stats.rx_frame_errors++;
606 if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */ 617 if (status & BD_ENET_RX_CR) /* CRC Error */
607 fep->stats.rx_crc_errors++;
608 if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */
609 fep->stats.rx_crc_errors++; 618 fep->stats.rx_crc_errors++;
619 if (status & BD_ENET_RX_OV) /* FIFO overrun */
620 fep->stats.rx_fifo_errors++;
610 } 621 }
611 622
612 /* Report late collisions as a frame error. 623 /* Report late collisions as a frame error.
613 * On this error, the BD is closed, but we don't know what we 624 * On this error, the BD is closed, but we don't know what we
614 * have in the buffer. So, just drop this frame on the floor. 625 * have in the buffer. So, just drop this frame on the floor.
615 */ 626 */
616 if (bdp->cbd_sc & BD_ENET_RX_CL) { 627 if (status & BD_ENET_RX_CL) {
617 fep->stats.rx_errors++; 628 fep->stats.rx_errors++;
618 fep->stats.rx_frame_errors++; 629 fep->stats.rx_frame_errors++;
619 goto rx_processing_done; 630 goto rx_processing_done;
@@ -639,9 +650,7 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
639 } else { 650 } else {
640 skb->dev = dev; 651 skb->dev = dev;
641 skb_put(skb,pkt_len-4); /* Make room */ 652 skb_put(skb,pkt_len-4); /* Make room */
642 eth_copy_and_sum(skb, 653 eth_copy_and_sum(skb, data, pkt_len-4, 0);
643 (unsigned char *)__va(bdp->cbd_bufaddr),
644 pkt_len-4, 0);
645 skb->protocol=eth_type_trans(skb,dev); 654 skb->protocol=eth_type_trans(skb,dev);
646 netif_rx(skb); 655 netif_rx(skb);
647 } 656 }
@@ -649,15 +658,16 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
649 658
650 /* Clear the status flags for this buffer. 659 /* Clear the status flags for this buffer.
651 */ 660 */
652 bdp->cbd_sc &= ~BD_ENET_RX_STATS; 661 status &= ~BD_ENET_RX_STATS;
653 662
654 /* Mark the buffer empty. 663 /* Mark the buffer empty.
655 */ 664 */
656 bdp->cbd_sc |= BD_ENET_RX_EMPTY; 665 status |= BD_ENET_RX_EMPTY;
666 bdp->cbd_sc = status;
657 667
658 /* Update BD pointer to next entry. 668 /* Update BD pointer to next entry.
659 */ 669 */
660 if (bdp->cbd_sc & BD_ENET_RX_WRAP) 670 if (status & BD_ENET_RX_WRAP)
661 bdp = fep->rx_bd_base; 671 bdp = fep->rx_bd_base;
662 else 672 else
663 bdp++; 673 bdp++;
@@ -667,9 +677,9 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
667 * incoming frames. On a heavily loaded network, we should be 677 * incoming frames. On a heavily loaded network, we should be
668 * able to keep up at the expense of system resources. 678 * able to keep up at the expense of system resources.
669 */ 679 */
670 fecp->fec_r_des_active = 0x01000000; 680 fecp->fec_r_des_active = 0;
671#endif 681#endif
672 } /* while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) */ 682 } /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */
673 fep->cur_rx = (cbd_t *)bdp; 683 fep->cur_rx = (cbd_t *)bdp;
674 684
675#if 0 685#if 0
@@ -680,11 +690,12 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
680 * our way back to the interrupt return only to come right back 690 * our way back to the interrupt return only to come right back
681 * here. 691 * here.
682 */ 692 */
683 fecp->fec_r_des_active = 0x01000000; 693 fecp->fec_r_des_active = 0;
684#endif 694#endif
685} 695}
686 696
687 697
698/* called from interrupt context */
688static void 699static void
689fec_enet_mii(struct net_device *dev) 700fec_enet_mii(struct net_device *dev)
690{ 701{
@@ -696,10 +707,12 @@ fec_enet_mii(struct net_device *dev)
696 fep = netdev_priv(dev); 707 fep = netdev_priv(dev);
697 ep = fep->hwp; 708 ep = fep->hwp;
698 mii_reg = ep->fec_mii_data; 709 mii_reg = ep->fec_mii_data;
710
711 spin_lock(&fep->lock);
699 712
700 if ((mip = mii_head) == NULL) { 713 if ((mip = mii_head) == NULL) {
701 printk("MII and no head!\n"); 714 printk("MII and no head!\n");
702 return; 715 goto unlock;
703 } 716 }
704 717
705 if (mip->mii_func != NULL) 718 if (mip->mii_func != NULL)
@@ -711,6 +724,9 @@ fec_enet_mii(struct net_device *dev)
711 724
712 if ((mip = mii_head) != NULL) 725 if ((mip = mii_head) != NULL)
713 ep->fec_mii_data = mip->mii_regval; 726 ep->fec_mii_data = mip->mii_regval;
727
728unlock:
729 spin_unlock(&fep->lock);
714} 730}
715 731
716static int 732static int
@@ -728,8 +744,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
728 744
729 retval = 0; 745 retval = 0;
730 746
731 save_flags(flags); 747 spin_lock_irqsave(&fep->lock,flags);
732 cli();
733 748
734 if ((mip = mii_free) != NULL) { 749 if ((mip = mii_free) != NULL) {
735 mii_free = mip->mii_next; 750 mii_free = mip->mii_next;
@@ -749,7 +764,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
749 retval = 1; 764 retval = 1;
750 } 765 }
751 766
752 restore_flags(flags); 767 spin_unlock_irqrestore(&fep->lock,flags);
753 768
754 return(retval); 769 return(retval);
755} 770}
@@ -1216,7 +1231,7 @@ static phy_info_t const * const phy_info[] = {
1216}; 1231};
1217 1232
1218/* ------------------------------------------------------------------------- */ 1233/* ------------------------------------------------------------------------- */
1219 1234#if !defined(CONFIG_M532x)
1220#ifdef CONFIG_RPXCLASSIC 1235#ifdef CONFIG_RPXCLASSIC
1221static void 1236static void
1222mii_link_interrupt(void *dev_id); 1237mii_link_interrupt(void *dev_id);
@@ -1224,6 +1239,7 @@ mii_link_interrupt(void *dev_id);
1224static irqreturn_t 1239static irqreturn_t
1225mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs); 1240mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs);
1226#endif 1241#endif
1242#endif
1227 1243
1228#if defined(CONFIG_M5272) 1244#if defined(CONFIG_M5272)
1229 1245
@@ -1384,13 +1400,13 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
1384 { 1400 {
1385 volatile unsigned char *icrp; 1401 volatile unsigned char *icrp;
1386 volatile unsigned long *imrp; 1402 volatile unsigned long *imrp;
1387 int i; 1403 int i, ilip;
1388 1404
1389 b = (fep->index) ? MCFICM_INTC1 : MCFICM_INTC0; 1405 b = (fep->index) ? MCFICM_INTC1 : MCFICM_INTC0;
1390 icrp = (volatile unsigned char *) (MCF_IPSBAR + b + 1406 icrp = (volatile unsigned char *) (MCF_IPSBAR + b +
1391 MCFINTC_ICR0); 1407 MCFINTC_ICR0);
1392 for (i = 23; (i < 36); i++) 1408 for (i = 23, ilip = 0x28; (i < 36); i++)
1393 icrp[i] = 0x23; 1409 icrp[i] = ilip--;
1394 1410
1395 imrp = (volatile unsigned long *) (MCF_IPSBAR + b + 1411 imrp = (volatile unsigned long *) (MCF_IPSBAR + b +
1396 MCFINTC_IMRH); 1412 MCFINTC_IMRH);
@@ -1618,6 +1634,159 @@ static void __inline__ fec_uncache(unsigned long addr)
1618 1634
1619/* ------------------------------------------------------------------------- */ 1635/* ------------------------------------------------------------------------- */
1620 1636
1637#elif defined(CONFIG_M532x)
1638/*
1639 * Code specific for M532x
1640 */
1641static void __inline__ fec_request_intrs(struct net_device *dev)
1642{
1643 struct fec_enet_private *fep;
1644 int b;
1645 static const struct idesc {
1646 char *name;
1647 unsigned short irq;
1648 } *idp, id[] = {
1649 { "fec(TXF)", 36 },
1650 { "fec(TXB)", 37 },
1651 { "fec(TXFIFO)", 38 },
1652 { "fec(TXCR)", 39 },
1653 { "fec(RXF)", 40 },
1654 { "fec(RXB)", 41 },
1655 { "fec(MII)", 42 },
1656 { "fec(LC)", 43 },
1657 { "fec(HBERR)", 44 },
1658 { "fec(GRA)", 45 },
1659 { "fec(EBERR)", 46 },
1660 { "fec(BABT)", 47 },
1661 { "fec(BABR)", 48 },
1662 { NULL },
1663 };
1664
1665 fep = netdev_priv(dev);
1666 b = (fep->index) ? 128 : 64;
1667
1668 /* Setup interrupt handlers. */
1669 for (idp = id; idp->name; idp++) {
1670 if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0)
1671 printk("FEC: Could not allocate %s IRQ(%d)!\n",
1672 idp->name, b+idp->irq);
1673 }
1674
1675 /* Unmask interrupts */
1676 MCF_INTC0_ICR36 = 0x2;
1677 MCF_INTC0_ICR37 = 0x2;
1678 MCF_INTC0_ICR38 = 0x2;
1679 MCF_INTC0_ICR39 = 0x2;
1680 MCF_INTC0_ICR40 = 0x2;
1681 MCF_INTC0_ICR41 = 0x2;
1682 MCF_INTC0_ICR42 = 0x2;
1683 MCF_INTC0_ICR43 = 0x2;
1684 MCF_INTC0_ICR44 = 0x2;
1685 MCF_INTC0_ICR45 = 0x2;
1686 MCF_INTC0_ICR46 = 0x2;
1687 MCF_INTC0_ICR47 = 0x2;
1688 MCF_INTC0_ICR48 = 0x2;
1689
1690 MCF_INTC0_IMRH &= ~(
1691 MCF_INTC_IMRH_INT_MASK36 |
1692 MCF_INTC_IMRH_INT_MASK37 |
1693 MCF_INTC_IMRH_INT_MASK38 |
1694 MCF_INTC_IMRH_INT_MASK39 |
1695 MCF_INTC_IMRH_INT_MASK40 |
1696 MCF_INTC_IMRH_INT_MASK41 |
1697 MCF_INTC_IMRH_INT_MASK42 |
1698 MCF_INTC_IMRH_INT_MASK43 |
1699 MCF_INTC_IMRH_INT_MASK44 |
1700 MCF_INTC_IMRH_INT_MASK45 |
1701 MCF_INTC_IMRH_INT_MASK46 |
1702 MCF_INTC_IMRH_INT_MASK47 |
1703 MCF_INTC_IMRH_INT_MASK48 );
1704
1705 /* Set up gpio outputs for MII lines */
1706 MCF_GPIO_PAR_FECI2C |= (0 |
1707 MCF_GPIO_PAR_FECI2C_PAR_MDC_EMDC |
1708 MCF_GPIO_PAR_FECI2C_PAR_MDIO_EMDIO);
1709 MCF_GPIO_PAR_FEC = (0 |
1710 MCF_GPIO_PAR_FEC_PAR_FEC_7W_FEC |
1711 MCF_GPIO_PAR_FEC_PAR_FEC_MII_FEC);
1712}
1713
1714static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep)
1715{
1716 volatile fec_t *fecp;
1717
1718 fecp = fep->hwp;
1719 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;
1720 fecp->fec_x_cntrl = 0x00;
1721
1722 /*
1723 * Set MII speed to 2.5 MHz
1724 */
1725 fep->phy_speed = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2;
1726 fecp->fec_mii_speed = fep->phy_speed;
1727
1728 fec_restart(dev, 0);
1729}
1730
1731static void __inline__ fec_get_mac(struct net_device *dev)
1732{
1733 struct fec_enet_private *fep = netdev_priv(dev);
1734 volatile fec_t *fecp;
1735 unsigned char *iap, tmpaddr[ETH_ALEN];
1736
1737 fecp = fep->hwp;
1738
1739 if (FEC_FLASHMAC) {
1740 /*
1741 * Get MAC address from FLASH.
1742 * If it is all 1's or 0's, use the default.
1743 */
1744 iap = FEC_FLASHMAC;
1745 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
1746 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
1747 iap = fec_mac_default;
1748 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
1749 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
1750 iap = fec_mac_default;
1751 } else {
1752 *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low;
1753 *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16);
1754 iap = &tmpaddr[0];
1755 }
1756
1757 memcpy(dev->dev_addr, iap, ETH_ALEN);
1758
1759 /* Adjust MAC if using default MAC address */
1760 if (iap == fec_mac_default)
1761 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
1762}
1763
1764static void __inline__ fec_enable_phy_intr(void)
1765{
1766}
1767
1768static void __inline__ fec_disable_phy_intr(void)
1769{
1770}
1771
1772static void __inline__ fec_phy_ack_intr(void)
1773{
1774}
1775
1776static void __inline__ fec_localhw_setup(void)
1777{
1778}
1779
1780/*
1781 * Do not need to make region uncached on 532x.
1782 */
1783static void __inline__ fec_uncache(unsigned long addr)
1784{
1785}
1786
1787/* ------------------------------------------------------------------------- */
1788
1789
1621#else 1790#else
1622 1791
1623/* 1792/*
@@ -1985,9 +2154,12 @@ fec_enet_open(struct net_device *dev)
1985 mii_do_cmd(dev, fep->phy->config); 2154 mii_do_cmd(dev, fep->phy->config);
1986 mii_do_cmd(dev, phy_cmd_config); /* display configuration */ 2155 mii_do_cmd(dev, phy_cmd_config); /* display configuration */
1987 2156
1988 /* FIXME: use netif_carrier_{on,off} ; this polls 2157 /* Poll until the PHY tells us its configuration
1989 * until link is up which is wrong... could be 2158 * (not link state).
1990 * 30 seconds or more we are trapped in here. -jgarzik 2159 * Request is initiated by mii_do_cmd above, but answer
2160 * comes by interrupt.
2161 * This should take about 25 usec per register at 2.5 MHz,
2162 * and we read approximately 5 registers.
1991 */ 2163 */
1992 while(!fep->sequence_done) 2164 while(!fep->sequence_done)
1993 schedule(); 2165 schedule();
@@ -2253,15 +2425,11 @@ int __init fec_enet_init(struct net_device *dev)
2253 */ 2425 */
2254 fec_request_intrs(dev); 2426 fec_request_intrs(dev);
2255 2427
2256 /* Clear and enable interrupts */
2257 fecp->fec_ievent = 0xffc00000;
2258 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
2259 FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
2260 fecp->fec_hash_table_high = 0; 2428 fecp->fec_hash_table_high = 0;
2261 fecp->fec_hash_table_low = 0; 2429 fecp->fec_hash_table_low = 0;
2262 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; 2430 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
2263 fecp->fec_ecntrl = 2; 2431 fecp->fec_ecntrl = 2;
2264 fecp->fec_r_des_active = 0x01000000; 2432 fecp->fec_r_des_active = 0;
2265 2433
2266 dev->base_addr = (unsigned long)fecp; 2434 dev->base_addr = (unsigned long)fecp;
2267 2435
@@ -2281,6 +2449,11 @@ int __init fec_enet_init(struct net_device *dev)
2281 /* setup MII interface */ 2449 /* setup MII interface */
2282 fec_set_mii(dev, fep); 2450 fec_set_mii(dev, fep);
2283 2451
2452 /* Clear and enable interrupts */
2453 fecp->fec_ievent = 0xffc00000;
2454 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
2455 FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
2456
2284 /* Queue up command to detect the PHY and initialize the 2457 /* Queue up command to detect the PHY and initialize the
2285 * remainder of the interface. 2458 * remainder of the interface.
2286 */ 2459 */
@@ -2312,11 +2485,6 @@ fec_restart(struct net_device *dev, int duplex)
2312 fecp->fec_ecntrl = 1; 2485 fecp->fec_ecntrl = 1;
2313 udelay(10); 2486 udelay(10);
2314 2487
2315 /* Enable interrupts we wish to service.
2316 */
2317 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
2318 FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
2319
2320 /* Clear any outstanding interrupt. 2488 /* Clear any outstanding interrupt.
2321 */ 2489 */
2322 fecp->fec_ievent = 0xffc00000; 2490 fecp->fec_ievent = 0xffc00000;
@@ -2408,7 +2576,12 @@ fec_restart(struct net_device *dev, int duplex)
2408 /* And last, enable the transmit and receive processing. 2576 /* And last, enable the transmit and receive processing.
2409 */ 2577 */
2410 fecp->fec_ecntrl = 2; 2578 fecp->fec_ecntrl = 2;
2411 fecp->fec_r_des_active = 0x01000000; 2579 fecp->fec_r_des_active = 0;
2580
2581 /* Enable interrupts we wish to service.
2582 */
2583 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
2584 FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
2412} 2585}
2413 2586
2414static void 2587static void
@@ -2420,9 +2593,16 @@ fec_stop(struct net_device *dev)
2420 fep = netdev_priv(dev); 2593 fep = netdev_priv(dev);
2421 fecp = fep->hwp; 2594 fecp = fep->hwp;
2422 2595
2423 fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */ 2596 /*
2424 2597 ** We cannot expect a graceful transmit stop without link !!!
2425 while(!(fecp->fec_ievent & FEC_ENET_GRA)); 2598 */
2599 if (fep->link)
2600 {
2601 fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */
2602 udelay(10);
2603 if (!(fecp->fec_ievent & FEC_ENET_GRA))
2604 printk("fec_stop : Graceful transmit stop did not complete !\n");
2605 }
2426 2606
2427 /* Whack a reset. We should wait for this. 2607 /* Whack a reset. We should wait for this.
2428 */ 2608 */
diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c
index c6770377ef..0cd07150bf 100644
--- a/drivers/net/fs_enet/fs_enet-mii.c
+++ b/drivers/net/fs_enet/fs_enet-mii.c
@@ -431,8 +431,7 @@ static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi)
431 return bus; 431 return bus;
432 432
433err: 433err:
434 if (bus) 434 kfree(bus);
435 kfree(bus);
436 return ERR_PTR(ret); 435 return ERR_PTR(ret);
437} 436}
438 437
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 0d5fccc984..c9a46b8994 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -436,7 +436,7 @@ static int __init dmascc_init(void)
436module_init(dmascc_init); 436module_init(dmascc_init);
437module_exit(dmascc_exit); 437module_exit(dmascc_exit);
438 438
439static void dev_setup(struct net_device *dev) 439static void __init dev_setup(struct net_device *dev)
440{ 440{
441 dev->type = ARPHRD_AX25; 441 dev->type = ARPHRD_AX25;
442 dev->hard_header_len = AX25_MAX_HEADER_LEN; 442 dev->hard_header_len = AX25_MAX_HEADER_LEN;
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 98fa5319e5..44efd49bf4 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -386,7 +386,7 @@ static int __irport_change_speed(struct irda_task *task)
386 /* Locking notes : this function may be called from irq context with 386 /* Locking notes : this function may be called from irq context with
387 * spinlock, via irport_write_wakeup(), or from non-interrupt without 387 * spinlock, via irport_write_wakeup(), or from non-interrupt without
388 * spinlock (from the task timer). Yuck ! 388 * spinlock (from the task timer). Yuck !
389 * This is ugly, and unsafe is the spinlock is not already aquired. 389 * This is ugly, and unsafe is the spinlock is not already acquired.
390 * This will be fixed when irda-task get rewritten. 390 * This will be fixed when irda-task get rewritten.
391 * Jean II */ 391 * Jean II */
392 if (!spin_is_locked(&self->lock)) { 392 if (!spin_is_locked(&self->lock)) {
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index cc7ff8f00e..cb62f2a967 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -115,8 +115,12 @@ static nsc_chip_t chips[] = {
115 /* Contributed by Jan Frey - IBM A30/A31 */ 115 /* Contributed by Jan Frey - IBM A30/A31 */
116 { "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff, 116 { "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff,
117 nsc_ircc_probe_39x, nsc_ircc_init_39x }, 117 nsc_ircc_probe_39x, nsc_ircc_init_39x },
118 { "IBM", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff, 118 /* IBM ThinkPads using PC8738x (T60/X60/Z60) */
119 nsc_ircc_probe_39x, nsc_ircc_init_39x }, 119 { "IBM-PC8738x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff,
120 nsc_ircc_probe_39x, nsc_ircc_init_39x },
121 /* IBM ThinkPads using PC8394T (T43/R52/?) */
122 { "IBM-PC8394T", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf9, 0xff,
123 nsc_ircc_probe_39x, nsc_ircc_init_39x },
120 { NULL } 124 { NULL }
121}; 125};
122 126
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 2e4ecedba0..5657049c21 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -226,7 +226,6 @@ static int full_duplex[MAX_UNITS];
226 NATSEMI_PG1_NREGS) 226 NATSEMI_PG1_NREGS)
227#define NATSEMI_REGS_VER 1 /* v1 added RFDR registers */ 227#define NATSEMI_REGS_VER 1 /* v1 added RFDR registers */
228#define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32)) 228#define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32))
229#define NATSEMI_DEF_EEPROM_SIZE 24 /* 12 16-bit values */
230 229
231/* Buffer sizes: 230/* Buffer sizes:
232 * The nic writes 32-bit values, even if the upper bytes of 231 * The nic writes 32-bit values, even if the upper bytes of
@@ -344,18 +343,6 @@ None characterised.
344 343
345 344
346 345
347enum pcistuff {
348 PCI_USES_IO = 0x01,
349 PCI_USES_MEM = 0x02,
350 PCI_USES_MASTER = 0x04,
351 PCI_ADDR0 = 0x08,
352 PCI_ADDR1 = 0x10,
353};
354
355/* MMIO operations required */
356#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
357
358
359/* 346/*
360 * Support for fibre connections on Am79C874: 347 * Support for fibre connections on Am79C874:
361 * This phy needs a special setup when connected to a fibre cable. 348 * This phy needs a special setup when connected to a fibre cable.
@@ -363,22 +350,25 @@ enum pcistuff {
363 */ 350 */
364#define PHYID_AM79C874 0x0022561b 351#define PHYID_AM79C874 0x0022561b
365 352
366#define MII_MCTRL 0x15 /* mode control register */ 353enum {
367#define MII_FX_SEL 0x0001 /* 100BASE-FX (fiber) */ 354 MII_MCTRL = 0x15, /* mode control register */
368#define MII_EN_SCRM 0x0004 /* enable scrambler (tp) */ 355 MII_FX_SEL = 0x0001, /* 100BASE-FX (fiber) */
356 MII_EN_SCRM = 0x0004, /* enable scrambler (tp) */
357};
369 358
370 359
371/* array of board data directly indexed by pci_tbl[x].driver_data */ 360/* array of board data directly indexed by pci_tbl[x].driver_data */
372static const struct { 361static const struct {
373 const char *name; 362 const char *name;
374 unsigned long flags; 363 unsigned long flags;
364 unsigned int eeprom_size;
375} natsemi_pci_info[] __devinitdata = { 365} natsemi_pci_info[] __devinitdata = {
376 { "NatSemi DP8381[56]", PCI_IOTYPE }, 366 { "NatSemi DP8381[56]", 0, 24 },
377}; 367};
378 368
379static struct pci_device_id natsemi_pci_tbl[] = { 369static const struct pci_device_id natsemi_pci_tbl[] __devinitdata = {
380 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_83815, PCI_ANY_ID, PCI_ANY_ID, }, 370 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
381 { 0, }, 371 { } /* terminate list */
382}; 372};
383MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl); 373MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
384 374
@@ -813,6 +803,42 @@ static void move_int_phy(struct net_device *dev, int addr)
813 udelay(1); 803 udelay(1);
814} 804}
815 805
806static void __devinit natsemi_init_media (struct net_device *dev)
807{
808 struct netdev_private *np = netdev_priv(dev);
809 u32 tmp;
810
811 netif_carrier_off(dev);
812
813 /* get the initial settings from hardware */
814 tmp = mdio_read(dev, MII_BMCR);
815 np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10;
816 np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF;
817 np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
818 np->advertising= mdio_read(dev, MII_ADVERTISE);
819
820 if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL
821 && netif_msg_probe(np)) {
822 printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
823 "10%s %s duplex.\n",
824 pci_name(np->pci_dev),
825 (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
826 "enabled, advertise" : "disabled, force",
827 (np->advertising &
828 (ADVERTISE_100FULL|ADVERTISE_100HALF))?
829 "0" : "",
830 (np->advertising &
831 (ADVERTISE_100FULL|ADVERTISE_10FULL))?
832 "full" : "half");
833 }
834 if (netif_msg_probe(np))
835 printk(KERN_INFO
836 "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
837 pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
838 np->advertising);
839
840}
841
816static int __devinit natsemi_probe1 (struct pci_dev *pdev, 842static int __devinit natsemi_probe1 (struct pci_dev *pdev,
817 const struct pci_device_id *ent) 843 const struct pci_device_id *ent)
818{ 844{
@@ -852,8 +878,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
852 iosize = pci_resource_len(pdev, pcibar); 878 iosize = pci_resource_len(pdev, pcibar);
853 irq = pdev->irq; 879 irq = pdev->irq;
854 880
855 if (natsemi_pci_info[chip_idx].flags & PCI_USES_MASTER) 881 pci_set_master(pdev);
856 pci_set_master(pdev);
857 882
858 dev = alloc_etherdev(sizeof (struct netdev_private)); 883 dev = alloc_etherdev(sizeof (struct netdev_private));
859 if (!dev) 884 if (!dev)
@@ -892,7 +917,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
892 np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG; 917 np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
893 np->hands_off = 0; 918 np->hands_off = 0;
894 np->intr_status = 0; 919 np->intr_status = 0;
895 np->eeprom_size = NATSEMI_DEF_EEPROM_SIZE; 920 np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size;
896 921
897 /* Initial port: 922 /* Initial port:
898 * - If the nic was configured to use an external phy and if find_mii 923 * - If the nic was configured to use an external phy and if find_mii
@@ -957,34 +982,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
957 if (mtu) 982 if (mtu)
958 dev->mtu = mtu; 983 dev->mtu = mtu;
959 984
960 netif_carrier_off(dev); 985 natsemi_init_media(dev);
961
962 /* get the initial settings from hardware */
963 tmp = mdio_read(dev, MII_BMCR);
964 np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10;
965 np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF;
966 np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
967 np->advertising= mdio_read(dev, MII_ADVERTISE);
968
969 if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL
970 && netif_msg_probe(np)) {
971 printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
972 "10%s %s duplex.\n",
973 pci_name(np->pci_dev),
974 (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
975 "enabled, advertise" : "disabled, force",
976 (np->advertising &
977 (ADVERTISE_100FULL|ADVERTISE_100HALF))?
978 "0" : "",
979 (np->advertising &
980 (ADVERTISE_100FULL|ADVERTISE_10FULL))?
981 "full" : "half");
982 }
983 if (netif_msg_probe(np))
984 printk(KERN_INFO
985 "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
986 pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
987 np->advertising);
988 986
989 /* save the silicon revision for later querying */ 987 /* save the silicon revision for later querying */
990 np->srr = readl(ioaddr + SiliconRev); 988 np->srr = readl(ioaddr + SiliconRev);
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index e74bf5014e..a73d545530 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1883,7 +1883,7 @@ static void smc_reset(struct net_device *dev)
1883 /* Set the Window 1 control, configuration and station addr registers. 1883 /* Set the Window 1 control, configuration and station addr registers.
1884 No point in writing the I/O base register ;-> */ 1884 No point in writing the I/O base register ;-> */
1885 SMC_SELECT_BANK(1); 1885 SMC_SELECT_BANK(1);
1886 /* Automatically release succesfully transmitted packets, 1886 /* Automatically release successfully transmitted packets,
1887 Accept link errors, counter and Tx error interrupts. */ 1887 Accept link errors, counter and Tx error interrupts. */
1888 outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE, 1888 outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE,
1889 ioaddr + CONTROL); 1889 ioaddr + CONTROL);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index fc08c4af50..0e01c75da4 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -309,12 +309,6 @@ static int pcnet32_alloc_ring(struct net_device *dev, char *name);
309static void pcnet32_free_ring(struct net_device *dev); 309static void pcnet32_free_ring(struct net_device *dev);
310static void pcnet32_check_media(struct net_device *dev, int verbose); 310static void pcnet32_check_media(struct net_device *dev, int verbose);
311 311
312enum pci_flags_bit {
313 PCI_USES_IO = 1, PCI_USES_MEM = 2, PCI_USES_MASTER = 4,
314 PCI_ADDR0 = 0x10 << 0, PCI_ADDR1 = 0x10 << 1, PCI_ADDR2 =
315 0x10 << 2, PCI_ADDR3 = 0x10 << 3,
316};
317
318static u16 pcnet32_wio_read_csr(unsigned long addr, int index) 312static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
319{ 313{
320 outw(index, addr + PCNET32_WIO_RAP); 314 outw(index, addr + PCNET32_WIO_RAP);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index bef79e454c..3f702c503a 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -123,9 +123,9 @@ static int lxt971_config_intr(struct phy_device *phydev)
123} 123}
124 124
125static struct phy_driver lxt970_driver = { 125static struct phy_driver lxt970_driver = {
126 .phy_id = 0x07810000, 126 .phy_id = 0x78100000,
127 .name = "LXT970", 127 .name = "LXT970",
128 .phy_id_mask = 0x0fffffff, 128 .phy_id_mask = 0xfffffff0,
129 .features = PHY_BASIC_FEATURES, 129 .features = PHY_BASIC_FEATURES,
130 .flags = PHY_HAS_INTERRUPT, 130 .flags = PHY_HAS_INTERRUPT,
131 .config_init = lxt970_config_init, 131 .config_init = lxt970_config_init,
@@ -137,9 +137,9 @@ static struct phy_driver lxt970_driver = {
137}; 137};
138 138
139static struct phy_driver lxt971_driver = { 139static struct phy_driver lxt971_driver = {
140 .phy_id = 0x0001378e, 140 .phy_id = 0x001378e0,
141 .name = "LXT971", 141 .name = "LXT971",
142 .phy_id_mask = 0x0fffffff, 142 .phy_id_mask = 0xfffffff0,
143 .features = PHY_BASIC_FEATURES, 143 .features = PHY_BASIC_FEATURES,
144 .flags = PHY_HAS_INTERRUPT, 144 .flags = PHY_HAS_INTERRUPT,
145 .config_aneg = genphy_config_aneg, 145 .config_aneg = genphy_config_aneg,
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 01cd8ec751..d643a097fa 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2578,8 +2578,7 @@ ppp_find_channel(int unit)
2578 2578
2579 list_for_each_entry(pch, &new_channels, list) { 2579 list_for_each_entry(pch, &new_channels, list) {
2580 if (pch->file.index == unit) { 2580 if (pch->file.index == unit) {
2581 list_del(&pch->list); 2581 list_move(&pch->list, &all_channels);
2582 list_add(&pch->list, &all_channels);
2583 return pch; 2582 return pch;
2584 } 2583 }
2585 } 2584 }
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 8fea2aa455..602a6e5002 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -212,26 +212,15 @@ Test with 'ping -s 10000' on a fast computer.
212/* 212/*
213 PCI probe table. 213 PCI probe table.
214*/ 214*/
215enum pci_id_flags_bits {
216 /* Set PCI command register bits before calling probe1(). */
217 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
218 /* Read and map the single following PCI BAR. */
219 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
220 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
221};
222enum chip_capability_flags { 215enum chip_capability_flags {
223 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,}; 216 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
224#ifdef USE_IO_OPS 217};
225#define W840_FLAGS (PCI_USES_IO | PCI_ADDR0 | PCI_USES_MASTER)
226#else
227#define W840_FLAGS (PCI_USES_MEM | PCI_ADDR1 | PCI_USES_MASTER)
228#endif
229 218
230static struct pci_device_id w840_pci_tbl[] = { 219static const struct pci_device_id w840_pci_tbl[] = {
231 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 }, 220 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
232 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 221 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
233 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, 222 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
234 { 0, } 223 { }
235}; 224};
236MODULE_DEVICE_TABLE(pci, w840_pci_tbl); 225MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
237 226
@@ -241,18 +230,17 @@ struct pci_id_info {
241 int pci, pci_mask, subsystem, subsystem_mask; 230 int pci, pci_mask, subsystem, subsystem_mask;
242 int revision, revision_mask; /* Only 8 bits. */ 231 int revision, revision_mask; /* Only 8 bits. */
243 } id; 232 } id;
244 enum pci_id_flags_bits pci_flags;
245 int io_size; /* Needed for I/O region check or ioremap(). */ 233 int io_size; /* Needed for I/O region check or ioremap(). */
246 int drv_flags; /* Driver use, intended as capability flags. */ 234 int drv_flags; /* Driver use, intended as capability flags. */
247}; 235};
248static struct pci_id_info pci_id_tbl[] = { 236static struct pci_id_info pci_id_tbl[] = {
249 {"Winbond W89c840", /* Sometime a Level-One switch card. */ 237 {"Winbond W89c840", /* Sometime a Level-One switch card. */
250 { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 }, 238 { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 },
251 W840_FLAGS, 128, CanHaveMII | HasBrokenTx | FDXOnNoMII}, 239 128, CanHaveMII | HasBrokenTx | FDXOnNoMII},
252 {"Winbond W89c840", { 0x08401050, 0xffffffff, }, 240 {"Winbond W89c840", { 0x08401050, 0xffffffff, },
253 W840_FLAGS, 128, CanHaveMII | HasBrokenTx}, 241 128, CanHaveMII | HasBrokenTx},
254 {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,}, 242 {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,},
255 W840_FLAGS, 128, CanHaveMII | HasBrokenTx}, 243 128, CanHaveMII | HasBrokenTx},
256 {NULL,}, /* 0 terminated list. */ 244 {NULL,}, /* 0 terminated list. */
257}; 245};
258 246
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index b60ef02db7..c92ac9fde0 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -7,7 +7,7 @@
7 * under the terms of version 2 of the GNU General Public License 7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation. 8 * as published by the Free Software Foundation.
9 * 9 *
10 * For information see http://hq.pm.waw.pl/hdlc/ 10 * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
11 * 11 *
12 * Sources of information: 12 * Sources of information:
13 * Hitachi HD64570 SCA User's Manual 13 * Hitachi HD64570 SCA User's Manual
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index b7d88db89a..e013b817ca 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -7,7 +7,7 @@
7 * under the terms of version 2 of the GNU General Public License 7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation. 8 * as published by the Free Software Foundation.
9 * 9 *
10 * For information see http://hq.pm.waw.pl/hdlc/ 10 * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
11 * 11 *
12 * Note: integrated CSU/DSU/DDS are not supported by this driver 12 * Note: integrated CSU/DSU/DDS are not supported by this driver
13 * 13 *
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index 670e8bd224..24c3c57c13 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -7,7 +7,7 @@
7 * under the terms of version 2 of the GNU General Public License 7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation. 8 * as published by the Free Software Foundation.
9 * 9 *
10 * For information see http://hq.pm.waw.pl/hdlc/ 10 * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
11 * 11 *
12 * Sources of information: 12 * Sources of information:
13 * Hitachi HD64572 SCA-II User's Manual 13 * Hitachi HD64572 SCA-II User's Manual
diff --git a/drivers/net/wireless/bcm43xx/Kconfig b/drivers/net/wireless/bcm43xx/Kconfig
index 25ea4748f0..533993f538 100644
--- a/drivers/net/wireless/bcm43xx/Kconfig
+++ b/drivers/net/wireless/bcm43xx/Kconfig
@@ -2,6 +2,7 @@ config BCM43XX
2 tristate "Broadcom BCM43xx wireless support" 2 tristate "Broadcom BCM43xx wireless support"
3 depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && NET_RADIO && EXPERIMENTAL 3 depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && NET_RADIO && EXPERIMENTAL
4 select FW_LOADER 4 select FW_LOADER
5 select HW_RANDOM
5 ---help--- 6 ---help---
6 This is an experimental driver for the Broadcom 43xx wireless chip, 7 This is an experimental driver for the Broadcom 43xx wireless chip,
7 found in the Apple Airport Extreme and various other devices. 8 found in the Apple Airport Extreme and various other devices.
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index d8f917c21e..17a56828e2 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -1,6 +1,7 @@
1#ifndef BCM43xx_H_ 1#ifndef BCM43xx_H_
2#define BCM43xx_H_ 2#define BCM43xx_H_
3 3
4#include <linux/hw_random.h>
4#include <linux/version.h> 5#include <linux/version.h>
5#include <linux/kernel.h> 6#include <linux/kernel.h>
6#include <linux/spinlock.h> 7#include <linux/spinlock.h>
@@ -82,6 +83,7 @@
82#define BCM43xx_MMIO_TSF_1 0x634 /* core rev < 3 only */ 83#define BCM43xx_MMIO_TSF_1 0x634 /* core rev < 3 only */
83#define BCM43xx_MMIO_TSF_2 0x636 /* core rev < 3 only */ 84#define BCM43xx_MMIO_TSF_2 0x636 /* core rev < 3 only */
84#define BCM43xx_MMIO_TSF_3 0x638 /* core rev < 3 only */ 85#define BCM43xx_MMIO_TSF_3 0x638 /* core rev < 3 only */
86#define BCM43xx_MMIO_RNG 0x65A
85#define BCM43xx_MMIO_POWERUP_DELAY 0x6A8 87#define BCM43xx_MMIO_POWERUP_DELAY 0x6A8
86 88
87/* SPROM offsets. */ 89/* SPROM offsets. */
@@ -750,6 +752,10 @@ struct bcm43xx_private {
750 const struct firmware *initvals0; 752 const struct firmware *initvals0;
751 const struct firmware *initvals1; 753 const struct firmware *initvals1;
752 754
755 /* Random Number Generator. */
756 struct hwrng rng;
757 char rng_name[20 + 1];
758
753 /* Debugging stuff follows. */ 759 /* Debugging stuff follows. */
754#ifdef CONFIG_BCM43XX_DEBUG 760#ifdef CONFIG_BCM43XX_DEBUG
755 struct bcm43xx_dfsentry *dfsentry; 761 struct bcm43xx_dfsentry *dfsentry;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 085d7857fe..27bcf47228 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -3237,6 +3237,39 @@ static void bcm43xx_security_init(struct bcm43xx_private *bcm)
3237 bcm43xx_clear_keys(bcm); 3237 bcm43xx_clear_keys(bcm);
3238} 3238}
3239 3239
3240static int bcm43xx_rng_read(struct hwrng *rng, u32 *data)
3241{
3242 struct bcm43xx_private *bcm = (struct bcm43xx_private *)rng->priv;
3243 unsigned long flags;
3244
3245 bcm43xx_lock_irqonly(bcm, flags);
3246 *data = bcm43xx_read16(bcm, BCM43xx_MMIO_RNG);
3247 bcm43xx_unlock_irqonly(bcm, flags);
3248
3249 return (sizeof(u16));
3250}
3251
3252static void bcm43xx_rng_exit(struct bcm43xx_private *bcm)
3253{
3254 hwrng_unregister(&bcm->rng);
3255}
3256
3257static int bcm43xx_rng_init(struct bcm43xx_private *bcm)
3258{
3259 int err;
3260
3261 snprintf(bcm->rng_name, ARRAY_SIZE(bcm->rng_name),
3262 "%s_%s", KBUILD_MODNAME, bcm->net_dev->name);
3263 bcm->rng.name = bcm->rng_name;
3264 bcm->rng.data_read = bcm43xx_rng_read;
3265 bcm->rng.priv = (unsigned long)bcm;
3266 err = hwrng_register(&bcm->rng);
3267 if (err)
3268 printk(KERN_ERR PFX "RNG init failed (%d)\n", err);
3269
3270 return err;
3271}
3272
3240/* This is the opposite of bcm43xx_init_board() */ 3273/* This is the opposite of bcm43xx_init_board() */
3241static void bcm43xx_free_board(struct bcm43xx_private *bcm) 3274static void bcm43xx_free_board(struct bcm43xx_private *bcm)
3242{ 3275{
@@ -3248,6 +3281,7 @@ static void bcm43xx_free_board(struct bcm43xx_private *bcm)
3248 3281
3249 bcm43xx_set_status(bcm, BCM43xx_STAT_SHUTTINGDOWN); 3282 bcm43xx_set_status(bcm, BCM43xx_STAT_SHUTTINGDOWN);
3250 3283
3284 bcm43xx_rng_exit(bcm);
3251 for (i = 0; i < BCM43xx_MAX_80211_CORES; i++) { 3285 for (i = 0; i < BCM43xx_MAX_80211_CORES; i++) {
3252 if (!bcm->core_80211[i].available) 3286 if (!bcm->core_80211[i].available)
3253 continue; 3287 continue;
@@ -3325,6 +3359,9 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm)
3325 bcm43xx_switch_core(bcm, &bcm->core_80211[0]); 3359 bcm43xx_switch_core(bcm, &bcm->core_80211[0]);
3326 bcm43xx_mac_enable(bcm); 3360 bcm43xx_mac_enable(bcm);
3327 } 3361 }
3362 err = bcm43xx_rng_init(bcm);
3363 if (err)
3364 goto err_80211_unwind;
3328 bcm43xx_macfilter_clear(bcm, BCM43xx_MACFILTER_ASSOC); 3365 bcm43xx_macfilter_clear(bcm, BCM43xx_MACFILTER_ASSOC);
3329 bcm43xx_macfilter_set(bcm, BCM43xx_MACFILTER_SELF, (u8 *)(bcm->net_dev->dev_addr)); 3366 bcm43xx_macfilter_set(bcm, BCM43xx_MACFILTER_SELF, (u8 *)(bcm->net_dev->dev_addr));
3330 dprintk(KERN_INFO PFX "80211 cores initialized\n"); 3367 dprintk(KERN_INFO PFX "80211 cores initialized\n");
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 72335c8eb9..94aeb23a77 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -1485,7 +1485,7 @@ static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv)
1485 * 1485 *
1486 * Sending the PREPARE_FOR_POWER_DOWN will restrict the 1486 * Sending the PREPARE_FOR_POWER_DOWN will restrict the
1487 * hardware from going into standby mode and will transition 1487 * hardware from going into standby mode and will transition
1488 * out of D0-standy if it is already in that state. 1488 * out of D0-standby if it is already in that state.
1489 * 1489 *
1490 * STATUS_PREPARE_POWER_DOWN_COMPLETE will be sent by the 1490 * STATUS_PREPARE_POWER_DOWN_COMPLETE will be sent by the
1491 * driver upon completion. Once received, the driver can 1491 * driver upon completion. Once received, the driver can
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 081a899966..a8a8f97543 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -1229,12 +1229,6 @@ static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1229 return error; 1229 return error;
1230} 1230}
1231 1231
1232static void ipw_free_error_log(struct ipw_fw_error *error)
1233{
1234 if (error)
1235 kfree(error);
1236}
1237
1238static ssize_t show_event_log(struct device *d, 1232static ssize_t show_event_log(struct device *d,
1239 struct device_attribute *attr, char *buf) 1233 struct device_attribute *attr, char *buf)
1240{ 1234{
@@ -1296,10 +1290,9 @@ static ssize_t clear_error(struct device *d,
1296 const char *buf, size_t count) 1290 const char *buf, size_t count)
1297{ 1291{
1298 struct ipw_priv *priv = dev_get_drvdata(d); 1292 struct ipw_priv *priv = dev_get_drvdata(d);
1299 if (priv->error) { 1293
1300 ipw_free_error_log(priv->error); 1294 kfree(priv->error);
1301 priv->error = NULL; 1295 priv->error = NULL;
1302 }
1303 return count; 1296 return count;
1304} 1297}
1305 1298
@@ -1970,8 +1963,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
1970 struct ipw_fw_error *error = 1963 struct ipw_fw_error *error =
1971 ipw_alloc_error_log(priv); 1964 ipw_alloc_error_log(priv);
1972 ipw_dump_error_log(priv, error); 1965 ipw_dump_error_log(priv, error);
1973 if (error) 1966 kfree(error);
1974 ipw_free_error_log(error);
1975 } 1967 }
1976#endif 1968#endif
1977 } else { 1969 } else {
@@ -11693,10 +11685,8 @@ static void ipw_pci_remove(struct pci_dev *pdev)
11693 } 11685 }
11694 } 11686 }
11695 11687
11696 if (priv->error) { 11688 kfree(priv->error);
11697 ipw_free_error_log(priv->error); 11689 priv->error = NULL;
11698 priv->error = NULL;
11699 }
11700 11690
11701#ifdef CONFIG_IPW2200_PROMISCUOUS 11691#ifdef CONFIG_IPW2200_PROMISCUOUS
11702 ipw_prom_free(priv); 11692 ipw_prom_free(priv);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index ecec8e5db7..569305f575 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -234,14 +234,6 @@ See Packet Engines confidential appendix (prototype chips only).
234 234
235 235
236 236
237enum pci_id_flags_bits {
238 /* Set PCI command register bits before calling probe1(). */
239 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
240 /* Read and map the single following PCI BAR. */
241 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
242 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
243 PCI_UNUSED_IRQ=0x800,
244};
245enum capability_flags { 237enum capability_flags {
246 HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16, 238 HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
247 HasMACAddrBug=32, /* Only on early revs. */ 239 HasMACAddrBug=32, /* Only on early revs. */
@@ -249,11 +241,6 @@ enum capability_flags {
249}; 241};
250/* The PCI I/O space extent. */ 242/* The PCI I/O space extent. */
251#define YELLOWFIN_SIZE 0x100 243#define YELLOWFIN_SIZE 0x100
252#ifdef USE_IO_OPS
253#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
254#else
255#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
256#endif
257 244
258struct pci_id_info { 245struct pci_id_info {
259 const char *name; 246 const char *name;
@@ -261,24 +248,23 @@ struct pci_id_info {
261 int pci, pci_mask, subsystem, subsystem_mask; 248 int pci, pci_mask, subsystem, subsystem_mask;
262 int revision, revision_mask; /* Only 8 bits. */ 249 int revision, revision_mask; /* Only 8 bits. */
263 } id; 250 } id;
264 enum pci_id_flags_bits pci_flags;
265 int io_size; /* Needed for I/O region check or ioremap(). */ 251 int io_size; /* Needed for I/O region check or ioremap(). */
266 int drv_flags; /* Driver use, intended as capability flags. */ 252 int drv_flags; /* Driver use, intended as capability flags. */
267}; 253};
268 254
269static const struct pci_id_info pci_id_tbl[] = { 255static const struct pci_id_info pci_id_tbl[] = {
270 {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff}, 256 {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
271 PCI_IOTYPE, YELLOWFIN_SIZE, 257 YELLOWFIN_SIZE,
272 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom}, 258 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
273 {"Symbios SYM83C885", { 0x07011000, 0xffffffff}, 259 {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
274 PCI_IOTYPE, YELLOWFIN_SIZE, HasMII | DontUseEeprom }, 260 YELLOWFIN_SIZE, HasMII | DontUseEeprom },
275 {NULL,}, 261 { }
276}; 262};
277 263
278static struct pci_device_id yellowfin_pci_tbl[] = { 264static const struct pci_device_id yellowfin_pci_tbl[] = {
279 { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 265 { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
280 { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 266 { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
281 { 0, } 267 { }
282}; 268};
283MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl); 269MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
284 270
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index 1de52d9feb..7352104f7b 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -15,7 +15,7 @@
15 * Phil Blundell <philb@gnu.org> 15 * Phil Blundell <philb@gnu.org>
16 * Tim Waugh <tim@cyberelk.demon.co.uk> 16 * Tim Waugh <tim@cyberelk.demon.co.uk>
17 * Jose Renau <renau@acm.org> 17 * Jose Renau <renau@acm.org>
18 * David Campbell <campbell@torque.net> 18 * David Campbell
19 * Andrea Arcangeli 19 * Andrea Arcangeli
20 */ 20 */
21 21
diff --git a/drivers/parport/parport_gsc.h b/drivers/parport/parport_gsc.h
index 662f6c1fee..fc9c37c540 100644
--- a/drivers/parport/parport_gsc.h
+++ b/drivers/parport/parport_gsc.h
@@ -24,7 +24,7 @@
24 * Phil Blundell <Philip.Blundell@pobox.com> 24 * Phil Blundell <Philip.Blundell@pobox.com>
25 * Tim Waugh <tim@cyberelk.demon.co.uk> 25 * Tim Waugh <tim@cyberelk.demon.co.uk>
26 * Jose Renau <renau@acm.org> 26 * Jose Renau <renau@acm.org>
27 * David Campbell <campbell@torque.net> 27 * David Campbell
28 * Andrea Arcangeli 28 * Andrea Arcangeli
29 */ 29 */
30 30
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 48bbf32fd9..7318e4a9e4 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -3,7 +3,7 @@
3 * Authors: Phil Blundell <philb@gnu.org> 3 * Authors: Phil Blundell <philb@gnu.org>
4 * Tim Waugh <tim@cyberelk.demon.co.uk> 4 * Tim Waugh <tim@cyberelk.demon.co.uk>
5 * Jose Renau <renau@acm.org> 5 * Jose Renau <renau@acm.org>
6 * David Campbell <campbell@torque.net> 6 * David Campbell
7 * Andrea Arcangeli 7 * Andrea Arcangeli
8 * 8 *
9 * based on work by Grant Guenther <grant@torque.net> and Phil Blundell. 9 * based on work by Grant Guenther <grant@torque.net> and Phil Blundell.
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 69a4bbd4cb..7c43c5392b 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -389,7 +389,7 @@ static struct of_device_id bpp_match[] = {
389 {}, 389 {},
390}; 390};
391 391
392MODULE_DEVICE_TABLE(of, qec_sbus_match); 392MODULE_DEVICE_TABLE(of, bpp_match);
393 393
394static struct of_platform_driver bpp_sbus_driver = { 394static struct of_platform_driver bpp_sbus_driver = {
395 .name = "bpp", 395 .name = "bpp",
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
index cbe17184b9..8610ae88b9 100644
--- a/drivers/parport/procfs.c
+++ b/drivers/parport/procfs.c
@@ -1,6 +1,6 @@
1/* Sysctl interface for parport devices. 1/* Sysctl interface for parport devices.
2 * 2 *
3 * Authors: David Campbell <campbell@torque.net> 3 * Authors: David Campbell
4 * Tim Waugh <tim@cyberelk.demon.co.uk> 4 * Tim Waugh <tim@cyberelk.demon.co.uk>
5 * Philip Blundell <philb@gnu.org> 5 * Philip Blundell <philb@gnu.org>
6 * Andrea Arcangeli 6 * Andrea Arcangeli
diff --git a/drivers/pci/msi-apic.c b/drivers/pci/msi-apic.c
index 0eb5fe9003..5ed798b319 100644
--- a/drivers/pci/msi-apic.c
+++ b/drivers/pci/msi-apic.c
@@ -4,6 +4,7 @@
4 4
5#include <linux/pci.h> 5#include <linux/pci.h>
6#include <linux/irq.h> 6#include <linux/irq.h>
7#include <asm/smp.h>
7 8
8#include "msi.h" 9#include "msi.h"
9 10
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d408a3c304..23d3b17c8c 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -369,7 +369,7 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
369 369
370 /* 370 /*
371 * Give firmware a chance to be called, such as ACPI _PRx, _PSx 371 * Give firmware a chance to be called, such as ACPI _PRx, _PSx
372 * Firmware method after natice method ? 372 * Firmware method after native method ?
373 */ 373 */
374 if (platform_pci_set_power_state) 374 if (platform_pci_set_power_state)
375 platform_pci_set_power_state(dev, state); 375 platform_pci_set_power_state(dev, state);
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 0e07d95351..d0f68ab8f0 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -157,7 +157,7 @@ MODULE_LICENSE("Dual MPL/GPL");
157 157
158static int pcmcia_schlvl = PCMCIA_SCHLVL; 158static int pcmcia_schlvl = PCMCIA_SCHLVL;
159 159
160static spinlock_t events_lock = SPIN_LOCK_UNLOCKED; 160static DEFINE_SPINLOCK(events_lock);
161 161
162 162
163#define PCMCIA_SOCKET_KEY_5V 1 163#define PCMCIA_SOCKET_KEY_5V 1
@@ -644,7 +644,7 @@ static struct platform_device m8xx_device = {
644}; 644};
645 645
646static u32 pending_events[PCMCIA_SOCKETS_NO]; 646static u32 pending_events[PCMCIA_SOCKETS_NO];
647static spinlock_t pending_event_lock = SPIN_LOCK_UNLOCKED; 647static DEFINE_SPINLOCK(pending_event_lock);
648 648
649static irqreturn_t m8xx_interrupt(int irq, void *dev, struct pt_regs *regs) 649static irqreturn_t m8xx_interrupt(int irq, void *dev, struct pt_regs *regs)
650{ 650{
diff --git a/drivers/rapidio/rio-access.c b/drivers/rapidio/rio-access.c
index b9fab2ae3a..8b56bbdd01 100644
--- a/drivers/rapidio/rio-access.c
+++ b/drivers/rapidio/rio-access.c
@@ -17,8 +17,8 @@
17 * These interrupt-safe spinlocks protect all accesses to RIO 17 * These interrupt-safe spinlocks protect all accesses to RIO
18 * configuration space and doorbell access. 18 * configuration space and doorbell access.
19 */ 19 */
20static spinlock_t rio_config_lock = SPIN_LOCK_UNLOCKED; 20static DEFINE_SPINLOCK(rio_config_lock);
21static spinlock_t rio_doorbell_lock = SPIN_LOCK_UNLOCKED; 21static DEFINE_SPINLOCK(rio_doorbell_lock);
22 22
23/* 23/*
24 * Wrappers for all RIO configuration access functions. They just check 24 * Wrappers for all RIO configuration access functions. They just check
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 5396beec30..1cb61a761c 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -94,7 +94,9 @@ exit_kfree:
94 kfree(rtc); 94 kfree(rtc);
95 95
96exit_idr: 96exit_idr:
97 mutex_lock(&idr_lock);
97 idr_remove(&rtc_idr, id); 98 idr_remove(&rtc_idr, id);
99 mutex_unlock(&idr_lock);
98 100
99exit: 101exit:
100 dev_err(dev, "rtc core: unable to register %s, err = %d\n", 102 dev_err(dev, "rtc core: unable to register %s, err = %d\n",
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index ecafbad41a..762521a141 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -226,7 +226,7 @@ static int ds1553_rtc_ioctl(struct device *dev, unsigned int cmd,
226 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 226 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
227 227
228 if (pdata->irq < 0) 228 if (pdata->irq < 0)
229 return -ENOIOCTLCMD; 229 return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
230 switch (cmd) { 230 switch (cmd) {
231 case RTC_AIE_OFF: 231 case RTC_AIE_OFF:
232 pdata->irqen &= ~RTC_AF; 232 pdata->irqen &= ~RTC_AF;
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index ab486fbc82..9cd1cb304b 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -45,7 +45,7 @@
45 45
46static unsigned long rtc_freq = 1024; 46static unsigned long rtc_freq = 1024;
47static struct rtc_time rtc_alarm; 47static struct rtc_time rtc_alarm;
48static spinlock_t sa1100_rtc_lock = SPIN_LOCK_UNLOCKED; 48static DEFINE_SPINLOCK(sa1100_rtc_lock);
49 49
50static int rtc_update_alarm(struct rtc_time *alrm) 50static int rtc_update_alarm(struct rtc_time *alrm)
51{ 51{
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index 33e029207e..4b9291dd44 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -93,7 +93,7 @@ static void __iomem *rtc2_base;
93 93
94static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */ 94static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */
95 95
96static spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED; 96static DEFINE_SPINLOCK(rtc_lock);
97static char rtc_name[] = "RTC"; 97static char rtc_name[] = "RTC";
98static unsigned long periodic_frequency; 98static unsigned long periodic_frequency;
99static unsigned long periodic_count; 99static unsigned long periodic_count;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 2d946b6ca0..2d8af70994 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -89,7 +89,7 @@ struct eerbuffer {
89}; 89};
90 90
91static LIST_HEAD(bufferlist); 91static LIST_HEAD(bufferlist);
92static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED; 92static DEFINE_SPINLOCK(bufferlock);
93static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue); 93static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
94 94
95/* 95/*
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index f94419b334..2eded55ae8 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1140,10 +1140,9 @@ list_modified:
1140 } 1140 }
1141 } 1141 }
1142 /* re-insert all entries from the failed_list into ipm_list */ 1142 /* re-insert all entries from the failed_list into ipm_list */
1143 list_for_each_entry_safe(ipm, tmp, &failed_list, list) { 1143 list_for_each_entry_safe(ipm, tmp, &failed_list, list)
1144 list_del_init(&ipm->list); 1144 list_move_tail(&ipm->list, &card->ipm_list);
1145 list_add_tail(&ipm->list, &card->ipm_list); 1145
1146 }
1147 spin_unlock_irqrestore(&card->ipm_lock, flags); 1146 spin_unlock_irqrestore(&card->ipm_lock, flags);
1148} 1147}
1149 1148
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 4682c8b8bd..909731b99d 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -167,7 +167,7 @@ zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
167 * initiates adapter recovery which is done 167 * initiates adapter recovery which is done
168 * asynchronously 168 * asynchronously
169 * 169 *
170 * returns: 0 - initiated action succesfully 170 * returns: 0 - initiated action successfully
171 * <0 - failed to initiate action 171 * <0 - failed to initiate action
172 */ 172 */
173int 173int
@@ -203,7 +203,7 @@ zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask)
203 * purpose: Wrappper for zfcp_erp_adapter_reopen_internal 203 * purpose: Wrappper for zfcp_erp_adapter_reopen_internal
204 * used to ensure the correct locking 204 * used to ensure the correct locking
205 * 205 *
206 * returns: 0 - initiated action succesfully 206 * returns: 0 - initiated action successfully
207 * <0 - failed to initiate action 207 * <0 - failed to initiate action
208 */ 208 */
209int 209int
@@ -469,7 +469,7 @@ zfcp_test_link(struct zfcp_port *port)
469 * initiates Forced Reopen recovery which is done 469 * initiates Forced Reopen recovery which is done
470 * asynchronously 470 * asynchronously
471 * 471 *
472 * returns: 0 - initiated action succesfully 472 * returns: 0 - initiated action successfully
473 * <0 - failed to initiate action 473 * <0 - failed to initiate action
474 */ 474 */
475static int 475static int
@@ -509,7 +509,7 @@ zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port, int clear_mask)
509 * purpose: Wrappper for zfcp_erp_port_forced_reopen_internal 509 * purpose: Wrappper for zfcp_erp_port_forced_reopen_internal
510 * used to ensure the correct locking 510 * used to ensure the correct locking
511 * 511 *
512 * returns: 0 - initiated action succesfully 512 * returns: 0 - initiated action successfully
513 * <0 - failed to initiate action 513 * <0 - failed to initiate action
514 */ 514 */
515int 515int
@@ -536,7 +536,7 @@ zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask)
536 * initiates Reopen recovery which is done 536 * initiates Reopen recovery which is done
537 * asynchronously 537 * asynchronously
538 * 538 *
539 * returns: 0 - initiated action succesfully 539 * returns: 0 - initiated action successfully
540 * <0 - failed to initiate action 540 * <0 - failed to initiate action
541 */ 541 */
542static int 542static int
@@ -605,7 +605,7 @@ zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask)
605 * initiates Reopen recovery which is done 605 * initiates Reopen recovery which is done
606 * asynchronously 606 * asynchronously
607 * 607 *
608 * returns: 0 - initiated action succesfully 608 * returns: 0 - initiated action successfully
609 * <0 - failed to initiate action 609 * <0 - failed to initiate action
610 */ 610 */
611static int 611static int
@@ -1805,7 +1805,7 @@ zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u32 mask, int set_or_clear)
1805 * purpose: Wrappper for zfcp_erp_port_reopen_all_internal 1805 * purpose: Wrappper for zfcp_erp_port_reopen_all_internal
1806 * used to ensure the correct locking 1806 * used to ensure the correct locking
1807 * 1807 *
1808 * returns: 0 - initiated action succesfully 1808 * returns: 0 - initiated action successfully
1809 * <0 - failed to initiate action 1809 * <0 - failed to initiate action
1810 */ 1810 */
1811int 1811int
diff --git a/drivers/sbus/char/cpwatchdog.c b/drivers/sbus/char/cpwatchdog.c
index 5bf3dd901b..21737b7e86 100644
--- a/drivers/sbus/char/cpwatchdog.c
+++ b/drivers/sbus/char/cpwatchdog.c
@@ -755,7 +755,7 @@ static int __init wd_init(void)
755 755
756 for_each_ebus(ebus) { 756 for_each_ebus(ebus) {
757 for_each_ebusdev(edev, ebus) { 757 for_each_ebusdev(edev, ebus) {
758 if (!strcmp(edev->prom_name, WD_OBPNAME)) 758 if (!strcmp(edev->ofdev.node->name, WD_OBPNAME))
759 goto ebus_done; 759 goto ebus_done;
760 } 760 }
761 } 761 }
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index cf5b476b54..d7e4bb41bd 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -29,8 +29,6 @@
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 */ 30 */
31 31
32#define PROMLIB_INTERNAL
33
34#include <linux/config.h> 32#include <linux/config.h>
35#include <linux/module.h> 33#include <linux/module.h>
36#include <linux/kernel.h> 34#include <linux/kernel.h>
@@ -39,10 +37,10 @@
39#include <linux/slab.h> 37#include <linux/slab.h>
40#include <linux/string.h> 38#include <linux/string.h>
41#include <linux/miscdevice.h> 39#include <linux/miscdevice.h>
42#include <linux/smp_lock.h>
43#include <linux/init.h> 40#include <linux/init.h>
44#include <linux/fs.h> 41#include <linux/fs.h>
45#include <asm/oplib.h> 42#include <asm/oplib.h>
43#include <asm/prom.h>
46#include <asm/system.h> 44#include <asm/system.h>
47#include <asm/uaccess.h> 45#include <asm/uaccess.h>
48#include <asm/openpromio.h> 46#include <asm/openpromio.h>
@@ -51,15 +49,20 @@
51#include <asm/pbm.h> 49#include <asm/pbm.h>
52#endif 50#endif
53 51
52MODULE_AUTHOR("Thomas K. Dyas (tdyas@noc.rutgers.edu) and Eddie C. Dost (ecd@skynet.be)");
53MODULE_DESCRIPTION("OPENPROM Configuration Driver");
54MODULE_LICENSE("GPL");
55MODULE_VERSION("1.0");
56
54/* Private data kept by the driver for each descriptor. */ 57/* Private data kept by the driver for each descriptor. */
55typedef struct openprom_private_data 58typedef struct openprom_private_data
56{ 59{
57 int current_node; /* Current node for SunOS ioctls. */ 60 struct device_node *current_node; /* Current node for SunOS ioctls. */
58 int lastnode; /* Last valid node used by BSD ioctls. */ 61 struct device_node *lastnode; /* Last valid node used by BSD ioctls. */
59} DATA; 62} DATA;
60 63
61/* ID of the PROM node containing all of the EEPROM options. */ 64/* ID of the PROM node containing all of the EEPROM options. */
62static int options_node = 0; 65static struct device_node *options_node;
63 66
64/* 67/*
65 * Copy an openpromio structure into kernel space from user space. 68 * Copy an openpromio structure into kernel space from user space.
@@ -87,9 +90,8 @@ static int copyin(struct openpromio __user *info, struct openpromio **opp_p)
87 if (bufsize > OPROMMAXPARAM) 90 if (bufsize > OPROMMAXPARAM)
88 bufsize = OPROMMAXPARAM; 91 bufsize = OPROMMAXPARAM;
89 92
90 if (!(*opp_p = kmalloc(sizeof(int) + bufsize + 1, GFP_KERNEL))) 93 if (!(*opp_p = kzalloc(sizeof(int) + bufsize + 1, GFP_KERNEL)))
91 return -ENOMEM; 94 return -ENOMEM;
92 memset(*opp_p, 0, sizeof(int) + bufsize + 1);
93 95
94 if (copy_from_user(&(*opp_p)->oprom_array, 96 if (copy_from_user(&(*opp_p)->oprom_array,
95 &info->oprom_array, bufsize)) { 97 &info->oprom_array, bufsize)) {
@@ -107,10 +109,9 @@ static int getstrings(struct openpromio __user *info, struct openpromio **opp_p)
107 if (!info || !opp_p) 109 if (!info || !opp_p)
108 return -EFAULT; 110 return -EFAULT;
109 111
110 if (!(*opp_p = kmalloc(sizeof(int) + OPROMMAXPARAM + 1, GFP_KERNEL))) 112 if (!(*opp_p = kzalloc(sizeof(int) + OPROMMAXPARAM + 1, GFP_KERNEL)))
111 return -ENOMEM; 113 return -ENOMEM;
112 114
113 memset(*opp_p, 0, sizeof(int) + OPROMMAXPARAM + 1);
114 (*opp_p)->oprom_size = 0; 115 (*opp_p)->oprom_size = 0;
115 116
116 n = bufsize = 0; 117 n = bufsize = 0;
@@ -140,16 +141,164 @@ static int copyout(void __user *info, struct openpromio *opp, int len)
140 return 0; 141 return 0;
141} 142}
142 143
144static int opromgetprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize)
145{
146 void *pval;
147 int len;
148
149 pval = of_get_property(dp, op->oprom_array, &len);
150 if (!pval || len <= 0 || len > bufsize)
151 return copyout(argp, op, sizeof(int));
152
153 memcpy(op->oprom_array, pval, len);
154 op->oprom_array[len] = '\0';
155 op->oprom_size = len;
156
157 return copyout(argp, op, sizeof(int) + bufsize);
158}
159
160static int opromnxtprop(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize)
161{
162 struct property *prop;
163 int len;
164
165 if (op->oprom_array[0] == '\0') {
166 prop = dp->properties;
167 if (!prop)
168 return copyout(argp, op, sizeof(int));
169 len = strlen(prop->name);
170 } else {
171 prop = of_find_property(dp, op->oprom_array, NULL);
172
173 if (!prop ||
174 !prop->next ||
175 (len = strlen(prop->next->name)) + 1 > bufsize)
176 return copyout(argp, op, sizeof(int));
177
178 prop = prop->next;
179 }
180
181 memcpy(op->oprom_array, prop->name, len);
182 op->oprom_array[len] = '\0';
183 op->oprom_size = ++len;
184
185 return copyout(argp, op, sizeof(int) + bufsize);
186}
187
188static int opromsetopt(struct device_node *dp, struct openpromio *op, int bufsize)
189{
190 char *buf = op->oprom_array + strlen(op->oprom_array) + 1;
191 int len = op->oprom_array + bufsize - buf;
192
193 return of_set_property(options_node, op->oprom_array, buf, len);
194}
195
196static int opromnext(void __user *argp, unsigned int cmd, struct device_node *dp, struct openpromio *op, int bufsize, DATA *data)
197{
198 phandle ph;
199
200 BUILD_BUG_ON(sizeof(phandle) != sizeof(int));
201
202 if (bufsize < sizeof(phandle))
203 return -EINVAL;
204
205 ph = *((int *) op->oprom_array);
206 if (ph) {
207 dp = of_find_node_by_phandle(ph);
208 if (!dp)
209 return -EINVAL;
210
211 switch (cmd) {
212 case OPROMNEXT:
213 dp = dp->sibling;
214 break;
215
216 case OPROMCHILD:
217 dp = dp->child;
218 break;
219
220 case OPROMSETCUR:
221 default:
222 break;
223 };
224 } else {
225 /* Sibling of node zero is the root node. */
226 if (cmd != OPROMNEXT)
227 return -EINVAL;
228
229 dp = of_find_node_by_path("/");
230 }
231
232 ph = 0;
233 if (dp)
234 ph = dp->node;
235
236 data->current_node = dp;
237 *((int *) op->oprom_array) = ph;
238 op->oprom_size = sizeof(phandle);
239
240 return copyout(argp, op, bufsize + sizeof(int));
241}
242
243static int oprompci2node(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize, DATA *data)
244{
245 int err = -EINVAL;
246
247 if (bufsize >= 2*sizeof(int)) {
248#ifdef CONFIG_PCI
249 struct pci_dev *pdev;
250 struct pcidev_cookie *pcp;
251 pdev = pci_find_slot (((int *) op->oprom_array)[0],
252 ((int *) op->oprom_array)[1]);
253
254 pcp = pdev->sysdata;
255 if (pcp != NULL) {
256 dp = pcp->prom_node;
257 data->current_node = dp;
258 *((int *)op->oprom_array) = dp->node;
259 op->oprom_size = sizeof(int);
260 err = copyout(argp, op, bufsize + sizeof(int));
261 }
262#endif
263 }
264
265 return err;
266}
267
268static int oprompath2node(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize, DATA *data)
269{
270 dp = of_find_node_by_path(op->oprom_array);
271 data->current_node = dp;
272 *((int *)op->oprom_array) = dp->node;
273 op->oprom_size = sizeof(int);
274
275 return copyout(argp, op, bufsize + sizeof(int));
276}
277
278static int opromgetbootargs(void __user *argp, struct openpromio *op, int bufsize)
279{
280 char *buf = saved_command_line;
281 int len = strlen(buf);
282
283 if (len > bufsize)
284 return -EINVAL;
285
286 strcpy(op->oprom_array, buf);
287 op->oprom_size = len;
288
289 return copyout(argp, op, bufsize + sizeof(int));
290}
291
143/* 292/*
144 * SunOS and Solaris /dev/openprom ioctl calls. 293 * SunOS and Solaris /dev/openprom ioctl calls.
145 */ 294 */
146static int openprom_sunos_ioctl(struct inode * inode, struct file * file, 295static int openprom_sunos_ioctl(struct inode * inode, struct file * file,
147 unsigned int cmd, unsigned long arg, int node) 296 unsigned int cmd, unsigned long arg,
297 struct device_node *dp)
148{ 298{
149 DATA *data = (DATA *) file->private_data; 299 DATA *data = file->private_data;
150 char buffer[OPROMMAXPARAM+1], *buf;
151 struct openpromio *opp; 300 struct openpromio *opp;
152 int bufsize, len, error = 0; 301 int bufsize, error = 0;
153 static int cnt; 302 static int cnt;
154 void __user *argp = (void __user *)arg; 303 void __user *argp = (void __user *)arg;
155 304
@@ -164,119 +313,35 @@ static int openprom_sunos_ioctl(struct inode * inode, struct file * file,
164 switch (cmd) { 313 switch (cmd) {
165 case OPROMGETOPT: 314 case OPROMGETOPT:
166 case OPROMGETPROP: 315 case OPROMGETPROP:
167 len = prom_getproplen(node, opp->oprom_array); 316 error = opromgetprop(argp, dp, opp, bufsize);
168
169 if (len <= 0 || len > bufsize) {
170 error = copyout(argp, opp, sizeof(int));
171 break;
172 }
173
174 len = prom_getproperty(node, opp->oprom_array, buffer, bufsize);
175
176 memcpy(opp->oprom_array, buffer, len);
177 opp->oprom_array[len] = '\0';
178 opp->oprom_size = len;
179
180 error = copyout(argp, opp, sizeof(int) + bufsize);
181 break; 317 break;
182 318
183 case OPROMNXTOPT: 319 case OPROMNXTOPT:
184 case OPROMNXTPROP: 320 case OPROMNXTPROP:
185 buf = prom_nextprop(node, opp->oprom_array, buffer); 321 error = opromnxtprop(argp, dp, opp, bufsize);
186
187 len = strlen(buf);
188 if (len == 0 || len + 1 > bufsize) {
189 error = copyout(argp, opp, sizeof(int));
190 break;
191 }
192
193 memcpy(opp->oprom_array, buf, len);
194 opp->oprom_array[len] = '\0';
195 opp->oprom_size = ++len;
196
197 error = copyout(argp, opp, sizeof(int) + bufsize);
198 break; 322 break;
199 323
200 case OPROMSETOPT: 324 case OPROMSETOPT:
201 case OPROMSETOPT2: 325 case OPROMSETOPT2:
202 buf = opp->oprom_array + strlen(opp->oprom_array) + 1; 326 error = opromsetopt(dp, opp, bufsize);
203 len = opp->oprom_array + bufsize - buf;
204
205 error = prom_setprop(options_node, opp->oprom_array,
206 buf, len);
207
208 if (error < 0)
209 error = -EINVAL;
210 break; 327 break;
211 328
212 case OPROMNEXT: 329 case OPROMNEXT:
213 case OPROMCHILD: 330 case OPROMCHILD:
214 case OPROMSETCUR: 331 case OPROMSETCUR:
215 if (bufsize < sizeof(int)) { 332 error = opromnext(argp, cmd, dp, opp, bufsize, data);
216 error = -EINVAL;
217 break;
218 }
219
220 node = *((int *) opp->oprom_array);
221
222 switch (cmd) {
223 case OPROMNEXT: node = __prom_getsibling(node); break;
224 case OPROMCHILD: node = __prom_getchild(node); break;
225 case OPROMSETCUR: break;
226 }
227
228 data->current_node = node;
229 *((int *)opp->oprom_array) = node;
230 opp->oprom_size = sizeof(int);
231
232 error = copyout(argp, opp, bufsize + sizeof(int));
233 break; 333 break;
234 334
235 case OPROMPCI2NODE: 335 case OPROMPCI2NODE:
236 error = -EINVAL; 336 error = oprompci2node(argp, dp, opp, bufsize, data);
237
238 if (bufsize >= 2*sizeof(int)) {
239#ifdef CONFIG_PCI
240 struct pci_dev *pdev;
241 struct pcidev_cookie *pcp;
242 pdev = pci_find_slot (((int *) opp->oprom_array)[0],
243 ((int *) opp->oprom_array)[1]);
244
245 pcp = pdev->sysdata;
246 if (pcp != NULL) {
247 node = pcp->prom_node->node;
248 data->current_node = node;
249 *((int *)opp->oprom_array) = node;
250 opp->oprom_size = sizeof(int);
251 error = copyout(argp, opp, bufsize + sizeof(int));
252 }
253#endif
254 }
255 break; 337 break;
256 338
257 case OPROMPATH2NODE: 339 case OPROMPATH2NODE:
258 node = prom_finddevice(opp->oprom_array); 340 error = oprompath2node(argp, dp, opp, bufsize, data);
259 data->current_node = node;
260 *((int *)opp->oprom_array) = node;
261 opp->oprom_size = sizeof(int);
262
263 error = copyout(argp, opp, bufsize + sizeof(int));
264 break; 341 break;
265 342
266 case OPROMGETBOOTARGS: 343 case OPROMGETBOOTARGS:
267 buf = saved_command_line; 344 error = opromgetbootargs(argp, opp, bufsize);
268
269 len = strlen(buf);
270
271 if (len > bufsize) {
272 error = -EINVAL;
273 break;
274 }
275
276 strcpy(opp->oprom_array, buf);
277 opp->oprom_size = len;
278
279 error = copyout(argp, opp, bufsize + sizeof(int));
280 break; 345 break;
281 346
282 case OPROMU2P: 347 case OPROMU2P:
@@ -297,25 +362,14 @@ static int openprom_sunos_ioctl(struct inode * inode, struct file * file,
297 return error; 362 return error;
298} 363}
299 364
300 365static struct device_node *get_node(phandle n, DATA *data)
301/* Return nonzero if a specific node is in the PROM device tree. */
302static int intree(int root, int node)
303{ 366{
304 for (; root != 0; root = prom_getsibling(root)) 367 struct device_node *dp = of_find_node_by_phandle(n);
305 if (root == node || intree(prom_getchild(root),node))
306 return 1;
307 return 0;
308}
309 368
310/* Return nonzero if a specific node is "valid". */ 369 if (dp)
311static int goodnode(int n, DATA *data) 370 data->lastnode = dp;
312{ 371
313 if (n == data->lastnode || n == prom_root_node || n == options_node) 372 return dp;
314 return 1;
315 if (n == 0 || n == -1 || !intree(prom_root_node,n))
316 return 0;
317 data->lastnode = n;
318 return 1;
319} 373}
320 374
321/* Copy in a whole string from userspace into kernelspace. */ 375/* Copy in a whole string from userspace into kernelspace. */
@@ -330,7 +384,7 @@ static int copyin_string(char __user *user, size_t len, char **ptr)
330 if (!tmp) 384 if (!tmp)
331 return -ENOMEM; 385 return -ENOMEM;
332 386
333 if(copy_from_user(tmp, user, len)) { 387 if (copy_from_user(tmp, user, len)) {
334 kfree(tmp); 388 kfree(tmp);
335 return -EFAULT; 389 return -EFAULT;
336 } 390 }
@@ -345,162 +399,187 @@ static int copyin_string(char __user *user, size_t len, char **ptr)
345/* 399/*
346 * NetBSD /dev/openprom ioctl calls. 400 * NetBSD /dev/openprom ioctl calls.
347 */ 401 */
348static int openprom_bsd_ioctl(struct inode * inode, struct file * file, 402static int opiocget(void __user *argp, DATA *data)
349 unsigned int cmd, unsigned long arg)
350{ 403{
351 DATA *data = (DATA *) file->private_data;
352 void __user *argp = (void __user *)arg;
353 struct opiocdesc op; 404 struct opiocdesc op;
354 int error, node, len; 405 struct device_node *dp;
355 char *str, *tmp; 406 char *str;
356 char buffer[64]; 407 void *pval;
357 static int cnt; 408 int err, len;
358
359 switch (cmd) {
360 case OPIOCGET:
361 if (copy_from_user(&op, argp, sizeof(op)))
362 return -EFAULT;
363
364 if (!goodnode(op.op_nodeid,data))
365 return -EINVAL;
366 409
367 error = copyin_string(op.op_name, op.op_namelen, &str); 410 if (copy_from_user(&op, argp, sizeof(op)))
368 if (error) 411 return -EFAULT;
369 return error;
370 412
371 len = prom_getproplen(op.op_nodeid,str); 413 dp = get_node(op.op_nodeid, data);
372 414
373 if (len > op.op_buflen) { 415 err = copyin_string(op.op_name, op.op_namelen, &str);
374 kfree(str); 416 if (err)
375 return -ENOMEM; 417 return err;
376 }
377 418
419 pval = of_get_property(dp, str, &len);
420 err = 0;
421 if (!pval || len > op.op_buflen) {
422 err = -EINVAL;
423 } else {
378 op.op_buflen = len; 424 op.op_buflen = len;
425 if (copy_to_user(argp, &op, sizeof(op)) ||
426 copy_to_user(op.op_buf, pval, len))
427 err = -EFAULT;
428 }
429 kfree(str);
379 430
380 if (len <= 0) { 431 return err;
381 kfree(str); 432}
382 /* Verified by the above copy_from_user */
383 if (__copy_to_user(argp, &op,
384 sizeof(op)))
385 return -EFAULT;
386 return 0;
387 }
388 433
389 tmp = kmalloc(len + 1, GFP_KERNEL); 434static int opiocnextprop(void __user *argp, DATA *data)
390 if (!tmp) { 435{
391 kfree(str); 436 struct opiocdesc op;
392 return -ENOMEM; 437 struct device_node *dp;
393 } 438 struct property *prop;
439 char *str;
440 int err, len;
394 441
395 cnt = prom_getproperty(op.op_nodeid, str, tmp, len); 442 if (copy_from_user(&op, argp, sizeof(op)))
396 if (cnt <= 0) { 443 return -EFAULT;
397 error = -EINVAL;
398 } else {
399 tmp[len] = '\0';
400 444
401 if (__copy_to_user(argp, &op, sizeof(op)) != 0 || 445 dp = get_node(op.op_nodeid, data);
402 copy_to_user(op.op_buf, tmp, len) != 0) 446 if (!dp)
403 error = -EFAULT; 447 return -EINVAL;
404 }
405 448
406 kfree(tmp); 449 err = copyin_string(op.op_name, op.op_namelen, &str);
407 kfree(str); 450 if (err)
451 return err;
408 452
409 return error; 453 if (str[0] == '\0') {
454 prop = dp->properties;
455 } else {
456 prop = of_find_property(dp, str, NULL);
457 if (prop)
458 prop = prop->next;
459 }
460 kfree(str);
410 461
411 case OPIOCNEXTPROP: 462 if (!prop)
412 if (copy_from_user(&op, argp, sizeof(op))) 463 len = 0;
413 return -EFAULT; 464 else
465 len = prop->length;
414 466
415 if (!goodnode(op.op_nodeid,data)) 467 if (len > op.op_buflen)
416 return -EINVAL; 468 len = op.op_buflen;
417 469
418 error = copyin_string(op.op_name, op.op_namelen, &str); 470 if (copy_to_user(argp, &op, sizeof(op)))
419 if (error) 471 return -EFAULT;
420 return error;
421 472
422 tmp = prom_nextprop(op.op_nodeid,str,buffer); 473 if (len &&
474 copy_to_user(op.op_buf, prop->value, len))
475 return -EFAULT;
423 476
424 if (tmp) { 477 return 0;
425 len = strlen(tmp); 478}
426 if (len > op.op_buflen)
427 len = op.op_buflen;
428 else
429 op.op_buflen = len;
430 } else {
431 len = op.op_buflen = 0;
432 }
433 479
434 if (!access_ok(VERIFY_WRITE, argp, sizeof(op))) { 480static int opiocset(void __user *argp, DATA *data)
435 kfree(str); 481{
436 return -EFAULT; 482 struct opiocdesc op;
437 } 483 struct device_node *dp;
484 char *str, *tmp;
485 int err;
438 486
439 if (!access_ok(VERIFY_WRITE, op.op_buf, len)) { 487 if (copy_from_user(&op, argp, sizeof(op)))
440 kfree(str); 488 return -EFAULT;
441 return -EFAULT; 489
442 } 490 dp = get_node(op.op_nodeid, data);
491 if (!dp)
492 return -EINVAL;
443 493
444 error = __copy_to_user(argp, &op, sizeof(op)); 494 err = copyin_string(op.op_name, op.op_namelen, &str);
445 if (!error) error = __copy_to_user(op.op_buf, tmp, len); 495 if (err)
496 return err;
446 497
498 err = copyin_string(op.op_buf, op.op_buflen, &tmp);
499 if (err) {
447 kfree(str); 500 kfree(str);
501 return err;
502 }
448 503
449 return error; 504 err = of_set_property(dp, str, tmp, op.op_buflen);
450 505
451 case OPIOCSET: 506 kfree(str);
452 if (copy_from_user(&op, argp, sizeof(op))) 507 kfree(tmp);
453 return -EFAULT;
454 508
455 if (!goodnode(op.op_nodeid,data)) 509 return err;
456 return -EINVAL; 510}
457 511
458 error = copyin_string(op.op_name, op.op_namelen, &str); 512static int opiocgetnext(unsigned int cmd, void __user *argp)
459 if (error) 513{
460 return error; 514 struct device_node *dp;
515 phandle nd;
461 516
462 error = copyin_string(op.op_buf, op.op_buflen, &tmp); 517 BUILD_BUG_ON(sizeof(phandle) != sizeof(int));
463 if (error) {
464 kfree(str);
465 return error;
466 }
467 518
468 len = prom_setprop(op.op_nodeid,str,tmp,op.op_buflen+1); 519 if (copy_from_user(&nd, argp, sizeof(phandle)))
520 return -EFAULT;
469 521
470 if (len != op.op_buflen) 522 if (nd == 0) {
523 if (cmd != OPIOCGETNEXT)
471 return -EINVAL; 524 return -EINVAL;
525 dp = of_find_node_by_path("/");
526 } else {
527 dp = of_find_node_by_phandle(nd);
528 nd = 0;
529 if (dp) {
530 if (cmd == OPIOCGETNEXT)
531 dp = dp->sibling;
532 else
533 dp = dp->child;
534 }
535 }
536 if (dp)
537 nd = dp->node;
538 if (copy_to_user(argp, &nd, sizeof(phandle)))
539 return -EFAULT;
472 540
473 kfree(str); 541 return 0;
474 kfree(tmp); 542}
475 543
476 return 0; 544static int openprom_bsd_ioctl(struct inode * inode, struct file * file,
545 unsigned int cmd, unsigned long arg)
546{
547 DATA *data = (DATA *) file->private_data;
548 void __user *argp = (void __user *)arg;
549 int err;
477 550
478 case OPIOCGETOPTNODE: 551 switch (cmd) {
479 if (copy_to_user(argp, &options_node, sizeof(int))) 552 case OPIOCGET:
480 return -EFAULT; 553 err = opiocget(argp, data);
481 return 0; 554 break;
482 555
483 case OPIOCGETNEXT: 556 case OPIOCNEXTPROP:
484 case OPIOCGETCHILD: 557 err = opiocnextprop(argp, data);
485 if (copy_from_user(&node, argp, sizeof(int))) 558 break;
486 return -EFAULT;
487 559
488 if (cmd == OPIOCGETNEXT) 560 case OPIOCSET:
489 node = __prom_getsibling(node); 561 err = opiocset(argp, data);
490 else 562 break;
491 node = __prom_getchild(node); 563
564 case OPIOCGETOPTNODE:
565 BUILD_BUG_ON(sizeof(phandle) != sizeof(int));
492 566
493 if (__copy_to_user(argp, &node, sizeof(int))) 567 if (copy_to_user(argp, &options_node->node, sizeof(phandle)))
494 return -EFAULT; 568 return -EFAULT;
495 569
496 return 0; 570 return 0;
497 571
572 case OPIOCGETNEXT:
573 case OPIOCGETCHILD:
574 err = opiocgetnext(cmd, argp);
575 break;
576
498 default: 577 default:
499 if (cnt++ < 10)
500 printk(KERN_INFO "openprom_bsd_ioctl: cmd 0x%X\n", cmd);
501 return -EINVAL; 578 return -EINVAL;
502 579
503 } 580 };
581
582 return err;
504} 583}
505 584
506 585
@@ -511,7 +590,6 @@ static int openprom_ioctl(struct inode * inode, struct file * file,
511 unsigned int cmd, unsigned long arg) 590 unsigned int cmd, unsigned long arg)
512{ 591{
513 DATA *data = (DATA *) file->private_data; 592 DATA *data = (DATA *) file->private_data;
514 static int cnt;
515 593
516 switch (cmd) { 594 switch (cmd) {
517 case OPROMGETOPT: 595 case OPROMGETOPT:
@@ -563,10 +641,8 @@ static int openprom_ioctl(struct inode * inode, struct file * file,
563 return openprom_bsd_ioctl(inode,file,cmd,arg); 641 return openprom_bsd_ioctl(inode,file,cmd,arg);
564 642
565 default: 643 default:
566 if (cnt++ < 10)
567 printk("openprom_ioctl: cmd 0x%X, arg 0x%lX\n", cmd, arg);
568 return -EINVAL; 644 return -EINVAL;
569 } 645 };
570} 646}
571 647
572static long openprom_compat_ioctl(struct file *file, unsigned int cmd, 648static long openprom_compat_ioctl(struct file *file, unsigned int cmd,
@@ -594,9 +670,7 @@ static long openprom_compat_ioctl(struct file *file, unsigned int cmd,
594 case OPROMSETCUR: 670 case OPROMSETCUR:
595 case OPROMPCI2NODE: 671 case OPROMPCI2NODE:
596 case OPROMPATH2NODE: 672 case OPROMPATH2NODE:
597 lock_kernel();
598 rval = openprom_ioctl(file->f_dentry->d_inode, file, cmd, arg); 673 rval = openprom_ioctl(file->f_dentry->d_inode, file, cmd, arg);
599 lock_kernel();
600 break; 674 break;
601 } 675 }
602 676
@@ -607,13 +681,13 @@ static int openprom_open(struct inode * inode, struct file * file)
607{ 681{
608 DATA *data; 682 DATA *data;
609 683
610 data = (DATA *) kmalloc(sizeof(DATA), GFP_KERNEL); 684 data = kmalloc(sizeof(DATA), GFP_KERNEL);
611 if (!data) 685 if (!data)
612 return -ENOMEM; 686 return -ENOMEM;
613 687
614 data->current_node = prom_root_node; 688 data->current_node = of_find_node_by_path("/");
615 data->lastnode = prom_root_node; 689 data->lastnode = data->current_node;
616 file->private_data = (void *)data; 690 file->private_data = (void *) data;
617 691
618 return 0; 692 return 0;
619} 693}
@@ -634,24 +708,30 @@ static struct file_operations openprom_fops = {
634}; 708};
635 709
636static struct miscdevice openprom_dev = { 710static struct miscdevice openprom_dev = {
637 SUN_OPENPROM_MINOR, "openprom", &openprom_fops 711 .minor = SUN_OPENPROM_MINOR,
712 .name = "openprom",
713 .fops = &openprom_fops,
638}; 714};
639 715
640static int __init openprom_init(void) 716static int __init openprom_init(void)
641{ 717{
642 int error; 718 struct device_node *dp;
719 int err;
643 720
644 error = misc_register(&openprom_dev); 721 err = misc_register(&openprom_dev);
645 if (error) { 722 if (err)
646 printk(KERN_ERR "openprom: unable to get misc minor\n"); 723 return err;
647 return error;
648 }
649 724
650 options_node = prom_getchild(prom_root_node); 725 dp = of_find_node_by_path("/");
651 options_node = prom_searchsiblings(options_node,"options"); 726 dp = dp->child;
727 while (dp) {
728 if (!strcmp(dp->name, "options"))
729 break;
730 dp = dp->sibling;
731 }
732 options_node = dp;
652 733
653 if (options_node == 0 || options_node == -1) { 734 if (!options_node) {
654 printk(KERN_ERR "openprom: unable to find options node\n");
655 misc_deregister(&openprom_dev); 735 misc_deregister(&openprom_dev);
656 return -EIO; 736 return -EIO;
657 } 737 }
@@ -666,4 +746,3 @@ static void __exit openprom_cleanup(void)
666 746
667module_init(openprom_init); 747module_init(openprom_init);
668module_exit(openprom_cleanup); 748module_exit(openprom_cleanup);
669MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/riowatchdog.c b/drivers/sbus/char/riowatchdog.c
index d1babff6a5..2a9cc82044 100644
--- a/drivers/sbus/char/riowatchdog.c
+++ b/drivers/sbus/char/riowatchdog.c
@@ -211,7 +211,7 @@ static int __init riowd_bbc_init(void)
211 211
212 for_each_ebus(ebus) { 212 for_each_ebus(ebus) {
213 for_each_ebusdev(edev, ebus) { 213 for_each_ebusdev(edev, ebus) {
214 if (!strcmp(edev->prom_name, "bbc")) 214 if (!strcmp(edev->ofdev.node->name, "bbc"))
215 goto found_bbc; 215 goto found_bbc;
216 } 216 }
217 } 217 }
@@ -238,7 +238,7 @@ static int __init riowd_init(void)
238 238
239 for_each_ebus(ebus) { 239 for_each_ebus(ebus) {
240 for_each_ebusdev(edev, ebus) { 240 for_each_ebusdev(edev, ebus) {
241 if (!strcmp(edev->prom_name, RIOWD_NAME)) 241 if (!strcmp(edev->ofdev.node->name, RIOWD_NAME))
242 goto ebus_done; 242 goto ebus_done;
243 } 243 }
244 } 244 }
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 44728ae3fe..96a81cd176 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -501,7 +501,7 @@ config SCSI_ATA_PIIX
501 tristate "Intel PIIX/ICH SATA support" 501 tristate "Intel PIIX/ICH SATA support"
502 depends on SCSI_SATA && PCI 502 depends on SCSI_SATA && PCI
503 help 503 help
504 This option enables support for ICH5 Serial ATA. 504 This option enables support for ICH5/6/7/8 Serial ATA.
505 If PATA support was enabled previously, this enables 505 If PATA support was enabled previously, this enables
506 support for select Intel PIIX/ICH PATA host controllers. 506 support for select Intel PIIX/ICH PATA host controllers.
507 507
@@ -1169,7 +1169,7 @@ config SCSI_NCR_Q720
1169 you do not have this SCSI card, so say N. 1169 you do not have this SCSI card, so say N.
1170 1170
1171config SCSI_NCR53C8XX_DEFAULT_TAGS 1171config SCSI_NCR53C8XX_DEFAULT_TAGS
1172 int " default tagged command queue depth" 1172 int "default tagged command queue depth"
1173 depends on SCSI_ZALON || SCSI_NCR_Q720 1173 depends on SCSI_ZALON || SCSI_NCR_Q720
1174 default "8" 1174 default "8"
1175 ---help--- 1175 ---help---
@@ -1195,7 +1195,7 @@ config SCSI_NCR53C8XX_DEFAULT_TAGS
1195 There is no safe option other than using good SCSI devices. 1195 There is no safe option other than using good SCSI devices.
1196 1196
1197config SCSI_NCR53C8XX_MAX_TAGS 1197config SCSI_NCR53C8XX_MAX_TAGS
1198 int " maximum number of queued commands" 1198 int "maximum number of queued commands"
1199 depends on SCSI_ZALON || SCSI_NCR_Q720 1199 depends on SCSI_ZALON || SCSI_NCR_Q720
1200 default "32" 1200 default "32"
1201 ---help--- 1201 ---help---
@@ -1212,7 +1212,7 @@ config SCSI_NCR53C8XX_MAX_TAGS
1212 There is no safe option and the default answer is recommended. 1212 There is no safe option and the default answer is recommended.
1213 1213
1214config SCSI_NCR53C8XX_SYNC 1214config SCSI_NCR53C8XX_SYNC
1215 int " synchronous transfers frequency in MHz" 1215 int "synchronous transfers frequency in MHz"
1216 depends on SCSI_ZALON || SCSI_NCR_Q720 1216 depends on SCSI_ZALON || SCSI_NCR_Q720
1217 default "20" 1217 default "20"
1218 ---help--- 1218 ---help---
@@ -1246,7 +1246,7 @@ config SCSI_NCR53C8XX_SYNC
1246 terminations and SCSI conformant devices. 1246 terminations and SCSI conformant devices.
1247 1247
1248config SCSI_NCR53C8XX_PROFILE 1248config SCSI_NCR53C8XX_PROFILE
1249 bool " enable profiling" 1249 bool "enable profiling"
1250 depends on SCSI_ZALON || SCSI_NCR_Q720 1250 depends on SCSI_ZALON || SCSI_NCR_Q720
1251 help 1251 help
1252 This option allows you to enable profiling information gathering. 1252 This option allows you to enable profiling information gathering.
@@ -1257,7 +1257,7 @@ config SCSI_NCR53C8XX_PROFILE
1257 The normal answer therefore is N. 1257 The normal answer therefore is N.
1258 1258
1259config SCSI_NCR53C8XX_NO_DISCONNECT 1259config SCSI_NCR53C8XX_NO_DISCONNECT
1260 bool " not allow targets to disconnect" 1260 bool "not allow targets to disconnect"
1261 depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0 1261 depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0
1262 help 1262 help
1263 This option is only provided for safety if you suspect some SCSI 1263 This option is only provided for safety if you suspect some SCSI
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index fa57e0b4a5..75f2f7ae2a 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -500,7 +500,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
500/* 500/*
501 * Function : int should_disconnect (unsigned char cmd) 501 * Function : int should_disconnect (unsigned char cmd)
502 * 502 *
503 * Purpose : decide weather a command would normally disconnect or 503 * Purpose : decide whether a command would normally disconnect or
504 * not, since if it won't disconnect we should go to sleep. 504 * not, since if it won't disconnect we should go to sleep.
505 * 505 *
506 * Input : cmd - opcode of SCSI command 506 * Input : cmd - opcode of SCSI command
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 5ee47555a8..dd9fb3d910 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -12374,7 +12374,7 @@ AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
12374 ASC_PRINT1( 12374 ASC_PRINT1(
12375"AscInitFromEEP: Failed to re-write EEPROM with %d errors.\n", i); 12375"AscInitFromEEP: Failed to re-write EEPROM with %d errors.\n", i);
12376 } else { 12376 } else {
12377 ASC_PRINT("AscInitFromEEP: Succesfully re-wrote EEPROM."); 12377 ASC_PRINT("AscInitFromEEP: Successfully re-wrote EEPROM.\n");
12378 } 12378 }
12379 } 12379 }
12380 return (warn_code); 12380 return (warn_code);
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 4bb77f62b3..f059467777 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -48,7 +48,7 @@
48#include <asm/io.h> 48#include <asm/io.h>
49 49
50#define DRV_NAME "ahci" 50#define DRV_NAME "ahci"
51#define DRV_VERSION "1.3" 51#define DRV_VERSION "2.0"
52 52
53 53
54enum { 54enum {
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 521b718763..94b1261a25 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -93,7 +93,7 @@
93#include <linux/libata.h> 93#include <linux/libata.h>
94 94
95#define DRV_NAME "ata_piix" 95#define DRV_NAME "ata_piix"
96#define DRV_VERSION "1.10" 96#define DRV_VERSION "2.00"
97 97
98enum { 98enum {
99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ 99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 1832452549..58b0748045 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3771,7 +3771,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3771 * @target: The target for the new device. 3771 * @target: The target for the new device.
3772 * @lun: The lun for the new device. 3772 * @lun: The lun for the new device.
3773 * 3773 *
3774 * Return the new device if succesfull or NULL on failure. 3774 * Return the new device if successful or NULL on failure.
3775 **/ 3775 **/
3776static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb, 3776static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
3777 u8 target, u8 lun) 3777 u8 target, u8 lun)
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 115f55471e..497f6642b2 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -760,7 +760,7 @@ static int device_inquiry(int host_index, int ldn)
760 while (!got_interrupt(host_index)) 760 while (!got_interrupt(host_index))
761 barrier(); 761 barrier();
762 762
763 /*if command succesful, break */ 763 /*if command successful, break */
764 if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES)) 764 if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
765 return 1; 765 return 1;
766 } 766 }
@@ -885,7 +885,7 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
885 while (!got_interrupt(host_index)) 885 while (!got_interrupt(host_index))
886 barrier(); 886 barrier();
887 887
888 /*if command succesful, break */ 888 /*if command successful, break */
889 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED) 889 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
890 return 1; 890 return 1;
891 } 891 }
@@ -921,7 +921,7 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
921 return 2; 921 return 2;
922 } else 922 } else
923 global_command_error_excuse = 0; 923 global_command_error_excuse = 0;
924 /*if command succesful, break */ 924 /*if command successful, break */
925 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED) 925 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
926 return 1; 926 return 1;
927 } 927 }
@@ -959,7 +959,7 @@ static int immediate_reset(int host_index, unsigned int ldn)
959 /* did not work, finish */ 959 /* did not work, finish */
960 return 1; 960 return 1;
961 } 961 }
962 /*if command succesful, break */ 962 /*if command successful, break */
963 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED) 963 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
964 return 1; 964 return 1;
965 } 965 }
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index cd2dffdab7..681bd18493 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -3,9 +3,6 @@
3 * 3 *
4 * (The IMM is the embedded controller in the ZIP Plus drive.) 4 * (The IMM is the embedded controller in the ZIP Plus drive.)
5 * 5 *
6 * Current Maintainer: David Campbell (Perth, Western Australia)
7 * campbell@torque.net
8 *
9 * My unoffical company acronym list is 21 pages long: 6 * My unoffical company acronym list is 21 pages long:
10 * FLA: Four letter acronym with built in facility for 7 * FLA: Four letter acronym with built in facility for
11 * future expansion to five letters. 8 * future expansion to five letters.
diff --git a/drivers/scsi/imm.h b/drivers/scsi/imm.h
index dc3aebf0e3..ece936ac29 100644
--- a/drivers/scsi/imm.h
+++ b/drivers/scsi/imm.h
@@ -2,7 +2,7 @@
2/* Driver for the Iomega MatchMaker parallel port SCSI HBA embedded in 2/* Driver for the Iomega MatchMaker parallel port SCSI HBA embedded in
3 * the Iomega ZIP Plus drive 3 * the Iomega ZIP Plus drive
4 * 4 *
5 * (c) 1998 David Campbell campbell@torque.net 5 * (c) 1998 David Campbell
6 * 6 *
7 * Please note that I live in Perth, Western Australia. GMT+0800 7 * Please note that I live in Perth, Western Australia. GMT+0800
8 */ 8 */
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 5353b28b29..78f2ff736c 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -6438,7 +6438,7 @@ ips_erase_bios(ips_ha_t * ha)
6438 /* VPP failure */ 6438 /* VPP failure */
6439 return (1); 6439 return (1);
6440 6440
6441 /* check for succesful flash */ 6441 /* check for successful flash */
6442 if (status & 0x30) 6442 if (status & 0x30)
6443 /* sequence error */ 6443 /* sequence error */
6444 return (1); 6444 return (1);
@@ -6550,7 +6550,7 @@ ips_erase_bios_memio(ips_ha_t * ha)
6550 /* VPP failure */ 6550 /* VPP failure */
6551 return (1); 6551 return (1);
6552 6552
6553 /* check for succesful flash */ 6553 /* check for successful flash */
6554 if (status & 0x30) 6554 if (status & 0x30)
6555 /* sequence error */ 6555 /* sequence error */
6556 return (1); 6556 return (1);
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 6c66877be2..d1c1c30d12 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -88,6 +88,10 @@ int libata_fua = 0;
88module_param_named(fua, libata_fua, int, 0444); 88module_param_named(fua, libata_fua, int, 0444);
89MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 89MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
90 90
91static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
92module_param(ata_probe_timeout, int, 0444);
93MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
94
91MODULE_AUTHOR("Jeff Garzik"); 95MODULE_AUTHOR("Jeff Garzik");
92MODULE_DESCRIPTION("Library module for ATA devices"); 96MODULE_DESCRIPTION("Library module for ATA devices");
93MODULE_LICENSE("GPL"); 97MODULE_LICENSE("GPL");
@@ -777,11 +781,9 @@ void ata_std_dev_select (struct ata_port *ap, unsigned int device)
777void ata_dev_select(struct ata_port *ap, unsigned int device, 781void ata_dev_select(struct ata_port *ap, unsigned int device,
778 unsigned int wait, unsigned int can_sleep) 782 unsigned int wait, unsigned int can_sleep)
779{ 783{
780 if (ata_msg_probe(ap)) { 784 if (ata_msg_probe(ap))
781 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: " 785 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
782 "device %u, wait %u\n", 786 "device %u, wait %u\n", ap->id, device, wait);
783 ap->id, device, wait);
784 }
785 787
786 if (wait) 788 if (wait)
787 ata_wait_idle(ap); 789 ata_wait_idle(ap);
@@ -950,7 +952,8 @@ void ata_port_flush_task(struct ata_port *ap)
950 */ 952 */
951 if (!cancel_delayed_work(&ap->port_task)) { 953 if (!cancel_delayed_work(&ap->port_task)) {
952 if (ata_msg_ctl(ap)) 954 if (ata_msg_ctl(ap))
953 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", __FUNCTION__); 955 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
956 __FUNCTION__);
954 flush_workqueue(ata_wq); 957 flush_workqueue(ata_wq);
955 } 958 }
956 959
@@ -1059,7 +1062,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
1059 1062
1060 spin_unlock_irqrestore(ap->lock, flags); 1063 spin_unlock_irqrestore(ap->lock, flags);
1061 1064
1062 rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL); 1065 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1063 1066
1064 ata_port_flush_task(ap); 1067 ata_port_flush_task(ap);
1065 1068
@@ -1081,7 +1084,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
1081 1084
1082 if (ata_msg_warn(ap)) 1085 if (ata_msg_warn(ap))
1083 ata_dev_printk(dev, KERN_WARNING, 1086 ata_dev_printk(dev, KERN_WARNING,
1084 "qc timeout (cmd 0x%x)\n", command); 1087 "qc timeout (cmd 0x%x)\n", command);
1085 } 1088 }
1086 1089
1087 spin_unlock_irqrestore(ap->lock, flags); 1090 spin_unlock_irqrestore(ap->lock, flags);
@@ -1093,9 +1096,9 @@ unsigned ata_exec_internal(struct ata_device *dev,
1093 1096
1094 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) { 1097 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1095 if (ata_msg_warn(ap)) 1098 if (ata_msg_warn(ap))
1096 ata_dev_printk(dev, KERN_WARNING, 1099 ata_dev_printk(dev, KERN_WARNING,
1097 "zero err_mask for failed " 1100 "zero err_mask for failed "
1098 "internal command, assuming AC_ERR_OTHER\n"); 1101 "internal command, assuming AC_ERR_OTHER\n");
1099 qc->err_mask |= AC_ERR_OTHER; 1102 qc->err_mask |= AC_ERR_OTHER;
1100 } 1103 }
1101 1104
@@ -1132,6 +1135,33 @@ unsigned ata_exec_internal(struct ata_device *dev,
1132} 1135}
1133 1136
1134/** 1137/**
1138 * ata_do_simple_cmd - execute simple internal command
1139 * @dev: Device to which the command is sent
1140 * @cmd: Opcode to execute
1141 *
1142 * Execute a 'simple' command, that only consists of the opcode
1143 * 'cmd' itself, without filling any other registers
1144 *
1145 * LOCKING:
1146 * Kernel thread context (may sleep).
1147 *
1148 * RETURNS:
1149 * Zero on success, AC_ERR_* mask on failure
1150 */
1151unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1152{
1153 struct ata_taskfile tf;
1154
1155 ata_tf_init(dev, &tf);
1156
1157 tf.command = cmd;
1158 tf.flags |= ATA_TFLAG_DEVICE;
1159 tf.protocol = ATA_PROT_NODATA;
1160
1161 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1162}
1163
1164/**
1135 * ata_pio_need_iordy - check if iordy needed 1165 * ata_pio_need_iordy - check if iordy needed
1136 * @adev: ATA device 1166 * @adev: ATA device
1137 * 1167 *
@@ -1193,8 +1223,8 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1193 int rc; 1223 int rc;
1194 1224
1195 if (ata_msg_ctl(ap)) 1225 if (ata_msg_ctl(ap))
1196 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n", 1226 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1197 __FUNCTION__, ap->id, dev->devno); 1227 __FUNCTION__, ap->id, dev->devno);
1198 1228
1199 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ 1229 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1200 1230
@@ -1263,9 +1293,9 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1263 return 0; 1293 return 0;
1264 1294
1265 err_out: 1295 err_out:
1266 if (ata_msg_warn(ap)) 1296 if (ata_msg_warn(ap))
1267 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY " 1297 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1268 "(%s, err_mask=0x%x)\n", reason, err_mask); 1298 "(%s, err_mask=0x%x)\n", reason, err_mask);
1269 return rc; 1299 return rc;
1270} 1300}
1271 1301
@@ -1318,19 +1348,21 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1318 int i, rc; 1348 int i, rc;
1319 1349
1320 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 1350 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1321 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n", 1351 ata_dev_printk(dev, KERN_INFO,
1322 __FUNCTION__, ap->id, dev->devno); 1352 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1353 __FUNCTION__, ap->id, dev->devno);
1323 return 0; 1354 return 0;
1324 } 1355 }
1325 1356
1326 if (ata_msg_probe(ap)) 1357 if (ata_msg_probe(ap))
1327 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n", 1358 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1328 __FUNCTION__, ap->id, dev->devno); 1359 __FUNCTION__, ap->id, dev->devno);
1329 1360
1330 /* print device capabilities */ 1361 /* print device capabilities */
1331 if (ata_msg_probe(ap)) 1362 if (ata_msg_probe(ap))
1332 ata_dev_printk(dev, KERN_DEBUG, "%s: cfg 49:%04x 82:%04x 83:%04x " 1363 ata_dev_printk(dev, KERN_DEBUG,
1333 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", 1364 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1365 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1334 __FUNCTION__, 1366 __FUNCTION__,
1335 id[49], id[82], id[83], id[84], 1367 id[49], id[82], id[83], id[84],
1336 id[85], id[86], id[87], id[88]); 1368 id[85], id[86], id[87], id[88]);
@@ -1402,14 +1434,16 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1402 ata_id_major_version(id), 1434 ata_id_major_version(id),
1403 ata_mode_string(xfer_mask), 1435 ata_mode_string(xfer_mask),
1404 (unsigned long long)dev->n_sectors, 1436 (unsigned long long)dev->n_sectors,
1405 dev->cylinders, dev->heads, dev->sectors); 1437 dev->cylinders, dev->heads,
1438 dev->sectors);
1406 } 1439 }
1407 1440
1408 if (dev->id[59] & 0x100) { 1441 if (dev->id[59] & 0x100) {
1409 dev->multi_count = dev->id[59] & 0xff; 1442 dev->multi_count = dev->id[59] & 0xff;
1410 if (ata_msg_info(ap)) 1443 if (ata_msg_info(ap))
1411 ata_dev_printk(dev, KERN_INFO, "ata%u: dev %u multi count %u\n", 1444 ata_dev_printk(dev, KERN_INFO,
1412 ap->id, dev->devno, dev->multi_count); 1445 "ata%u: dev %u multi count %u\n",
1446 ap->id, dev->devno, dev->multi_count);
1413 } 1447 }
1414 1448
1415 dev->cdb_len = 16; 1449 dev->cdb_len = 16;
@@ -1422,8 +1456,8 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1422 rc = atapi_cdb_len(id); 1456 rc = atapi_cdb_len(id);
1423 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1457 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1424 if (ata_msg_warn(ap)) 1458 if (ata_msg_warn(ap))
1425 ata_dev_printk(dev, KERN_WARNING, 1459 ata_dev_printk(dev, KERN_WARNING,
1426 "unsupported CDB len\n"); 1460 "unsupported CDB len\n");
1427 rc = -EINVAL; 1461 rc = -EINVAL;
1428 goto err_out_nosup; 1462 goto err_out_nosup;
1429 } 1463 }
@@ -1466,8 +1500,8 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1466 1500
1467err_out_nosup: 1501err_out_nosup:
1468 if (ata_msg_probe(ap)) 1502 if (ata_msg_probe(ap))
1469 ata_dev_printk(dev, KERN_DEBUG, 1503 ata_dev_printk(dev, KERN_DEBUG,
1470 "%s: EXIT, err\n", __FUNCTION__); 1504 "%s: EXIT, err\n", __FUNCTION__);
1471 return rc; 1505 return rc;
1472} 1506}
1473 1507
@@ -3527,7 +3561,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
3527 * Inherited from caller. 3561 * Inherited from caller.
3528 */ 3562 */
3529 3563
3530void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf, 3564void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3531 unsigned int buflen, int write_data) 3565 unsigned int buflen, int write_data)
3532{ 3566{
3533 struct ata_port *ap = adev->ap; 3567 struct ata_port *ap = adev->ap;
@@ -3573,7 +3607,7 @@ void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3573 * Inherited from caller. 3607 * Inherited from caller.
3574 */ 3608 */
3575 3609
3576void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf, 3610void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3577 unsigned int buflen, int write_data) 3611 unsigned int buflen, int write_data)
3578{ 3612{
3579 struct ata_port *ap = adev->ap; 3613 struct ata_port *ap = adev->ap;
@@ -3607,7 +3641,7 @@ void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3607 * @buflen: buffer length 3641 * @buflen: buffer length
3608 * @write_data: read/write 3642 * @write_data: read/write
3609 * 3643 *
3610 * Transfer data from/to the device data register by PIO. Do the 3644 * Transfer data from/to the device data register by PIO. Do the
3611 * transfer with interrupts disabled. 3645 * transfer with interrupts disabled.
3612 * 3646 *
3613 * LOCKING: 3647 * LOCKING:
@@ -4946,31 +4980,9 @@ int ata_port_offline(struct ata_port *ap)
4946 return 0; 4980 return 0;
4947} 4981}
4948 4982
4949/* 4983int ata_flush_cache(struct ata_device *dev)
4950 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4951 * without filling any other registers
4952 */
4953static int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
4954{
4955 struct ata_taskfile tf;
4956 int err;
4957
4958 ata_tf_init(dev, &tf);
4959
4960 tf.command = cmd;
4961 tf.flags |= ATA_TFLAG_DEVICE;
4962 tf.protocol = ATA_PROT_NODATA;
4963
4964 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4965 if (err)
4966 ata_dev_printk(dev, KERN_ERR, "%s: ata command failed: %d\n",
4967 __FUNCTION__, err);
4968
4969 return err;
4970}
4971
4972static int ata_flush_cache(struct ata_device *dev)
4973{ 4984{
4985 unsigned int err_mask;
4974 u8 cmd; 4986 u8 cmd;
4975 4987
4976 if (!ata_try_flush_cache(dev)) 4988 if (!ata_try_flush_cache(dev))
@@ -4981,17 +4993,41 @@ static int ata_flush_cache(struct ata_device *dev)
4981 else 4993 else
4982 cmd = ATA_CMD_FLUSH; 4994 cmd = ATA_CMD_FLUSH;
4983 4995
4984 return ata_do_simple_cmd(dev, cmd); 4996 err_mask = ata_do_simple_cmd(dev, cmd);
4997 if (err_mask) {
4998 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
4999 return -EIO;
5000 }
5001
5002 return 0;
4985} 5003}
4986 5004
4987static int ata_standby_drive(struct ata_device *dev) 5005static int ata_standby_drive(struct ata_device *dev)
4988{ 5006{
4989 return ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1); 5007 unsigned int err_mask;
5008
5009 err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
5010 if (err_mask) {
5011 ata_dev_printk(dev, KERN_ERR, "failed to standby drive "
5012 "(err_mask=0x%x)\n", err_mask);
5013 return -EIO;
5014 }
5015
5016 return 0;
4990} 5017}
4991 5018
4992static int ata_start_drive(struct ata_device *dev) 5019static int ata_start_drive(struct ata_device *dev)
4993{ 5020{
4994 return ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE); 5021 unsigned int err_mask;
5022
5023 err_mask = ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
5024 if (err_mask) {
5025 ata_dev_printk(dev, KERN_ERR, "failed to start drive "
5026 "(err_mask=0x%x)\n", err_mask);
5027 return -EIO;
5028 }
5029
5030 return 0;
4995} 5031}
4996 5032
4997/** 5033/**
@@ -5212,7 +5248,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
5212 ap->msg_enable = 0x00FF; 5248 ap->msg_enable = 0x00FF;
5213#elif defined(ATA_DEBUG) 5249#elif defined(ATA_DEBUG)
5214 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 5250 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5215#else 5251#else
5216 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5252 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5217#endif 5253#endif
5218 5254
@@ -5709,6 +5745,7 @@ int ata_pci_device_resume(struct pci_dev *pdev)
5709 5745
5710static int __init ata_init(void) 5746static int __init ata_init(void)
5711{ 5747{
5748 ata_probe_timeout *= HZ;
5712 ata_wq = create_workqueue("ata"); 5749 ata_wq = create_workqueue("ata");
5713 if (!ata_wq) 5750 if (!ata_wq)
5714 return -ENOMEM; 5751 return -ENOMEM;
@@ -5733,7 +5770,7 @@ module_init(ata_init);
5733module_exit(ata_exit); 5770module_exit(ata_exit);
5734 5771
5735static unsigned long ratelimit_time; 5772static unsigned long ratelimit_time;
5736static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED; 5773static DEFINE_SPINLOCK(ata_ratelimit_lock);
5737 5774
5738int ata_ratelimit(void) 5775int ata_ratelimit(void)
5739{ 5776{
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
index 823385981a..bf5a72aca8 100644
--- a/drivers/scsi/libata-eh.c
+++ b/drivers/scsi/libata-eh.c
@@ -93,6 +93,38 @@ static int ata_ering_map(struct ata_ering *ering,
93 return rc; 93 return rc;
94} 94}
95 95
96static unsigned int ata_eh_dev_action(struct ata_device *dev)
97{
98 struct ata_eh_context *ehc = &dev->ap->eh_context;
99
100 return ehc->i.action | ehc->i.dev_action[dev->devno];
101}
102
103static void ata_eh_clear_action(struct ata_device *dev,
104 struct ata_eh_info *ehi, unsigned int action)
105{
106 int i;
107
108 if (!dev) {
109 ehi->action &= ~action;
110 for (i = 0; i < ATA_MAX_DEVICES; i++)
111 ehi->dev_action[i] &= ~action;
112 } else {
113 /* doesn't make sense for port-wide EH actions */
114 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
115
116 /* break ehi->action into ehi->dev_action */
117 if (ehi->action & action) {
118 for (i = 0; i < ATA_MAX_DEVICES; i++)
119 ehi->dev_action[i] |= ehi->action & action;
120 ehi->action &= ~action;
121 }
122
123 /* turn off the specified per-dev action */
124 ehi->dev_action[dev->devno] &= ~action;
125 }
126}
127
96/** 128/**
97 * ata_scsi_timed_out - SCSI layer time out callback 129 * ata_scsi_timed_out - SCSI layer time out callback
98 * @cmd: timed out SCSI command 130 * @cmd: timed out SCSI command
@@ -702,32 +734,11 @@ static void ata_eh_detach_dev(struct ata_device *dev)
702 ap->flags |= ATA_FLAG_SCSI_HOTPLUG; 734 ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
703 } 735 }
704 736
705 spin_unlock_irqrestore(ap->lock, flags); 737 /* clear per-dev EH actions */
706} 738 ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK);
707 739 ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK);
708static void ata_eh_clear_action(struct ata_device *dev,
709 struct ata_eh_info *ehi, unsigned int action)
710{
711 int i;
712 740
713 if (!dev) { 741 spin_unlock_irqrestore(ap->lock, flags);
714 ehi->action &= ~action;
715 for (i = 0; i < ATA_MAX_DEVICES; i++)
716 ehi->dev_action[i] &= ~action;
717 } else {
718 /* doesn't make sense for port-wide EH actions */
719 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
720
721 /* break ehi->action into ehi->dev_action */
722 if (ehi->action & action) {
723 for (i = 0; i < ATA_MAX_DEVICES; i++)
724 ehi->dev_action[i] |= ehi->action & action;
725 ehi->action &= ~action;
726 }
727
728 /* turn off the specified per-dev action */
729 ehi->dev_action[dev->devno] &= ~action;
730 }
731} 742}
732 743
733/** 744/**
@@ -1592,7 +1603,7 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1592 unsigned int action; 1603 unsigned int action;
1593 1604
1594 dev = &ap->device[i]; 1605 dev = &ap->device[i];
1595 action = ehc->i.action | ehc->i.dev_action[dev->devno]; 1606 action = ata_eh_dev_action(dev);
1596 1607
1597 if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) { 1608 if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) {
1598 if (ata_port_offline(ap)) { 1609 if (ata_port_offline(ap)) {
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 93d18a74c4..2915bca691 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -222,9 +222,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
222 && copy_to_user(arg + sizeof(args), argbuf, argsize)) 222 && copy_to_user(arg + sizeof(args), argbuf, argsize))
223 rc = -EFAULT; 223 rc = -EFAULT;
224error: 224error:
225 if (argbuf) 225 kfree(argbuf);
226 kfree(argbuf);
227
228 return rc; 226 return rc;
229} 227}
230 228
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index bdd4888970..c325679d9b 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -29,7 +29,7 @@
29#define __LIBATA_H__ 29#define __LIBATA_H__
30 30
31#define DRV_NAME "libata" 31#define DRV_NAME "libata"
32#define DRV_VERSION "1.30" /* must be exactly four chars */ 32#define DRV_VERSION "2.00" /* must be exactly four chars */
33 33
34struct ata_scsi_args { 34struct ata_scsi_args {
35 struct ata_device *dev; 35 struct ata_device *dev;
@@ -50,6 +50,7 @@ extern void ata_port_flush_task(struct ata_port *ap);
50extern unsigned ata_exec_internal(struct ata_device *dev, 50extern unsigned ata_exec_internal(struct ata_device *dev,
51 struct ata_taskfile *tf, const u8 *cdb, 51 struct ata_taskfile *tf, const u8 *cdb,
52 int dma_dir, void *buf, unsigned int buflen); 52 int dma_dir, void *buf, unsigned int buflen);
53extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
53extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 54extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
54 int post_reset, u16 *id); 55 int post_reset, u16 *id);
55extern int ata_dev_configure(struct ata_device *dev, int print_info); 56extern int ata_dev_configure(struct ata_device *dev, int print_info);
@@ -64,6 +65,7 @@ extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
64extern void ata_dev_select(struct ata_port *ap, unsigned int device, 65extern void ata_dev_select(struct ata_port *ap, unsigned int device,
65 unsigned int wait, unsigned int can_sleep); 66 unsigned int wait, unsigned int can_sleep);
66extern void swap_buf_le16(u16 *buf, unsigned int buf_words); 67extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
68extern int ata_flush_cache(struct ata_device *dev);
67extern void ata_dev_init(struct ata_device *dev); 69extern void ata_dev_init(struct ata_device *dev);
68extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); 70extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
69extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); 71extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 6ab035590e..b28712df0b 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -5118,8 +5118,7 @@ static void ncr_ccb_skipped(struct ncb *np, struct ccb *cp)
5118 cp->host_status &= ~HS_SKIPMASK; 5118 cp->host_status &= ~HS_SKIPMASK;
5119 cp->start.schedule.l_paddr = 5119 cp->start.schedule.l_paddr =
5120 cpu_to_scr(NCB_SCRIPT_PHYS (np, select)); 5120 cpu_to_scr(NCB_SCRIPT_PHYS (np, select));
5121 list_del(&cp->link_ccbq); 5121 list_move_tail(&cp->link_ccbq, &lp->skip_ccbq);
5122 list_add_tail(&cp->link_ccbq, &lp->skip_ccbq);
5123 if (cp->queued) { 5122 if (cp->queued) {
5124 --lp->queuedccbs; 5123 --lp->queuedccbs;
5125 } 5124 }
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 108910f512..d58ac5ad50 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -6,8 +6,6 @@
6 * (c) 1995,1996 Grant R. Guenther, grant@torque.net, 6 * (c) 1995,1996 Grant R. Guenther, grant@torque.net,
7 * under the terms of the GNU General Public License. 7 * under the terms of the GNU General Public License.
8 * 8 *
9 * Current Maintainer: David Campbell (Perth, Western Australia, GMT+0800)
10 * campbell@torque.net
11 */ 9 */
12 10
13#include <linux/config.h> 11#include <linux/config.h>
diff --git a/drivers/scsi/ppa.h b/drivers/scsi/ppa.h
index f6e1a1574b..7511df3588 100644
--- a/drivers/scsi/ppa.h
+++ b/drivers/scsi/ppa.h
@@ -2,7 +2,7 @@
2 * the Iomega ZIP drive 2 * the Iomega ZIP drive
3 * 3 *
4 * (c) 1996 Grant R. Guenther grant@torque.net 4 * (c) 1996 Grant R. Guenther grant@torque.net
5 * David Campbell campbell@torque.net 5 * David Campbell
6 * 6 *
7 * All comments to David. 7 * All comments to David.
8 */ 8 */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index ce32322f1e..ce74a6025a 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2347,8 +2347,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2347 } 2347 }
2348 2348
2349 /* Remove device from the new list and add it to DB */ 2349 /* Remove device from the new list and add it to DB */
2350 list_del(&fcport->list); 2350 list_move_tail(&fcport->list, &ha->fcports);
2351 list_add_tail(&fcport->list, &ha->fcports);
2352 2351
2353 /* Login and update database */ 2352 /* Login and update database */
2354 qla2x00_fabric_dev_login(ha, fcport, &next_loopid); 2353 qla2x00_fabric_dev_login(ha, fcport, &next_loopid);
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index d18e7e0932..5cc42c6054 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -44,7 +44,7 @@
44#include <linux/libata.h> 44#include <linux/libata.h>
45 45
46#define DRV_NAME "sata_nv" 46#define DRV_NAME "sata_nv"
47#define DRV_VERSION "0.9" 47#define DRV_VERSION "2.0"
48 48
49enum { 49enum {
50 NV_PORTS = 2, 50 NV_PORTS = 2,
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index bc9f918a7f..51d86d750e 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -46,12 +46,13 @@
46#include <linux/libata.h> 46#include <linux/libata.h>
47 47
48#define DRV_NAME "sata_sil" 48#define DRV_NAME "sata_sil"
49#define DRV_VERSION "1.0" 49#define DRV_VERSION "2.0"
50 50
51enum { 51enum {
52 /* 52 /*
53 * host flags 53 * host flags
54 */ 54 */
55 SIL_FLAG_NO_SATA_IRQ = (1 << 28),
55 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 56 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
56 SIL_FLAG_MOD15WRITE = (1 << 30), 57 SIL_FLAG_MOD15WRITE = (1 << 30),
57 58
@@ -62,8 +63,9 @@ enum {
62 * Controller IDs 63 * Controller IDs
63 */ 64 */
64 sil_3112 = 0, 65 sil_3112 = 0,
65 sil_3512 = 1, 66 sil_3112_no_sata_irq = 1,
66 sil_3114 = 2, 67 sil_3512 = 2,
68 sil_3114 = 3,
67 69
68 /* 70 /*
69 * Register offsets 71 * Register offsets
@@ -123,8 +125,8 @@ static const struct pci_device_id sil_pci_tbl[] = {
123 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 }, 125 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 },
124 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 }, 126 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
125 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, 127 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
126 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, 128 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
127 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, 129 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
128 { } /* terminate list */ 130 { } /* terminate list */
129}; 131};
130 132
@@ -217,6 +219,16 @@ static const struct ata_port_info sil_port_info[] = {
217 .udma_mask = 0x3f, /* udma0-5 */ 219 .udma_mask = 0x3f, /* udma0-5 */
218 .port_ops = &sil_ops, 220 .port_ops = &sil_ops,
219 }, 221 },
222 /* sil_3112_no_sata_irq */
223 {
224 .sht = &sil_sht,
225 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE |
226 SIL_FLAG_NO_SATA_IRQ,
227 .pio_mask = 0x1f, /* pio0-4 */
228 .mwdma_mask = 0x07, /* mwdma0-2 */
229 .udma_mask = 0x3f, /* udma0-5 */
230 .port_ops = &sil_ops,
231 },
220 /* sil_3512 */ 232 /* sil_3512 */
221 { 233 {
222 .sht = &sil_sht, 234 .sht = &sil_sht,
@@ -437,6 +449,10 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance,
437 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) 449 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
438 continue; 450 continue;
439 451
452 /* turn off SATA_IRQ if not supported */
453 if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
454 bmdma2 &= ~SIL_DMA_SATA_IRQ;
455
440 if (bmdma2 == 0xffffffff || 456 if (bmdma2 == 0xffffffff ||
441 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ))) 457 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
442 continue; 458 continue;
@@ -474,8 +490,9 @@ static void sil_thaw(struct ata_port *ap)
474 ata_chk_status(ap); 490 ata_chk_status(ap);
475 ata_bmdma_irq_clear(ap); 491 ata_bmdma_irq_clear(ap);
476 492
477 /* turn on SATA IRQ */ 493 /* turn on SATA IRQ if supported */
478 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien); 494 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
495 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
479 496
480 /* turn on IRQ */ 497 /* turn on IRQ */
481 tmp = readl(mmio_base + SIL_SYSCFG); 498 tmp = readl(mmio_base + SIL_SYSCFG);
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index c8b477c672..b5f8fa9556 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -31,7 +31,7 @@
31#include <asm/io.h> 31#include <asm/io.h>
32 32
33#define DRV_NAME "sata_sil24" 33#define DRV_NAME "sata_sil24"
34#define DRV_VERSION "0.24" 34#define DRV_VERSION "0.3"
35 35
36/* 36/*
37 * Port request block (PRB) 32 bytes 37 * Port request block (PRB) 32 bytes
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index c94b870cf3..7566c2caba 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -54,7 +54,7 @@
54#endif /* CONFIG_PPC_OF */ 54#endif /* CONFIG_PPC_OF */
55 55
56#define DRV_NAME "sata_svw" 56#define DRV_NAME "sata_svw"
57#define DRV_VERSION "1.8" 57#define DRV_VERSION "2.0"
58 58
59enum { 59enum {
60 /* Taskfile registers offsets */ 60 /* Taskfile registers offsets */
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index f668c997e9..64f3c1aeed 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -37,7 +37,7 @@
37#include <linux/libata.h> 37#include <linux/libata.h>
38 38
39#define DRV_NAME "sata_uli" 39#define DRV_NAME "sata_uli"
40#define DRV_VERSION "0.6" 40#define DRV_VERSION "1.0"
41 41
42enum { 42enum {
43 uli_5289 = 0, 43 uli_5289 = 0,
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 322890b400..67c3d29997 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -47,7 +47,7 @@
47#include <asm/io.h> 47#include <asm/io.h>
48 48
49#define DRV_NAME "sata_via" 49#define DRV_NAME "sata_via"
50#define DRV_VERSION "1.2" 50#define DRV_VERSION "2.0"
51 51
52enum board_ids_enum { 52enum board_ids_enum {
53 vt6420, 53 vt6420,
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 6d0c4f18e6..616fd9634b 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -47,7 +47,7 @@
47#include <linux/libata.h> 47#include <linux/libata.h>
48 48
49#define DRV_NAME "sata_vsc" 49#define DRV_NAME "sata_vsc"
50#define DRV_VERSION "1.2" 50#define DRV_VERSION "2.0"
51 51
52enum { 52enum {
53 /* Interrupt register offsets (from chip base address) */ 53 /* Interrupt register offsets (from chip base address) */
@@ -443,16 +443,12 @@ err_out:
443} 443}
444 444
445 445
446/*
447 * Intel 31244 is supposed to be identical.
448 * Compatibility is untested as of yet.
449 */
450static const struct pci_device_id vsc_sata_pci_tbl[] = { 446static const struct pci_device_id vsc_sata_pci_tbl[] = {
451 { PCI_VENDOR_ID_VITESSE, PCI_DEVICE_ID_VITESSE_VSC7174, 447 { PCI_VENDOR_ID_VITESSE, 0x7174,
452 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, 448 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
453 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GD31244, 449 { PCI_VENDOR_ID_INTEL, 0x3200,
454 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, 450 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
455 { } 451 { } /* terminate list */
456}; 452};
457 453
458 454
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 1272dd249a..b5218fc0ac 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -2818,7 +2818,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2818 (cmdstatp->sense_hdr.sense_key == NO_SENSE || 2818 (cmdstatp->sense_hdr.sense_key == NO_SENSE ||
2819 cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) && 2819 cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) &&
2820 undone == 0) { 2820 undone == 0) {
2821 ioctl_result = 0; /* EOF written succesfully at EOM */ 2821 ioctl_result = 0; /* EOF written successfully at EOM */
2822 if (fileno >= 0) 2822 if (fileno >= 0)
2823 fileno++; 2823 fileno++;
2824 STps->drv_file = fileno; 2824 STps->drv_file = fileno;
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index b79ed0665d..739bc84f91 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -323,8 +323,10 @@ static const struct pnp_device_id pnp_dev_table[] = {
323 { "USR9180", 0 }, 323 { "USR9180", 0 },
324 /* U.S. Robotics 56K Voice INT PnP*/ 324 /* U.S. Robotics 56K Voice INT PnP*/
325 { "USR9190", 0 }, 325 { "USR9190", 0 },
326 /* HP Compaq Tablet PC tc1100 Wacom tablet */ 326 /* Wacom tablets */
327 { "WACF004", 0 },
327 { "WACF005", 0 }, 328 { "WACF005", 0 },
329 { "WACF006", 0 },
328 /* Rockwell's (PORALiNK) 33600 INT PNP */ 330 /* Rockwell's (PORALiNK) 33600 INT PNP */
329 { "WCI0003", 0 }, 331 { "WCI0003", 0 },
330 /* Unkown PnP modems */ 332 /* Unkown PnP modems */
diff --git a/drivers/sn/ioc3.c b/drivers/sn/ioc3.c
index 501316b198..ed946311d3 100644
--- a/drivers/sn/ioc3.c
+++ b/drivers/sn/ioc3.c
@@ -26,7 +26,7 @@ static DECLARE_RWSEM(ioc3_devices_rwsem);
26 26
27static struct ioc3_submodule *ioc3_submodules[IOC3_MAX_SUBMODULES]; 27static struct ioc3_submodule *ioc3_submodules[IOC3_MAX_SUBMODULES];
28static struct ioc3_submodule *ioc3_ethernet; 28static struct ioc3_submodule *ioc3_ethernet;
29static rwlock_t ioc3_submodules_lock = RW_LOCK_UNLOCKED; 29static DEFINE_RWLOCK(ioc3_submodules_lock);
30 30
31/* NIC probing code */ 31/* NIC probing code */
32 32
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index 5578a9dd04..f6b2948ab2 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -5712,7 +5712,7 @@ static int ixj_daa_write(IXJ *j)
5712 return 1; 5712 return 1;
5713} 5713}
5714 5714
5715int ixj_set_tone_off(unsigned short arg, IXJ *j) 5715static int ixj_set_tone_off(unsigned short arg, IXJ *j)
5716{ 5716{
5717 j->tone_off_time = arg; 5717 j->tone_off_time = arg;
5718 if (ixj_WriteDSPCommand(0x6E05, j)) /* Set Tone Off Period */ 5718 if (ixj_WriteDSPCommand(0x6E05, j)) /* Set Tone Off Period */
diff --git a/drivers/usb/host/hc_crisv10.c b/drivers/usb/host/hc_crisv10.c
index 2fe7fd1943..4a22909518 100644
--- a/drivers/usb/host/hc_crisv10.c
+++ b/drivers/usb/host/hc_crisv10.c
@@ -411,8 +411,7 @@ static inline void urb_list_move_last(struct urb *urb, int epid)
411 urb_entry_t *urb_entry = __urb_list_entry(urb, epid); 411 urb_entry_t *urb_entry = __urb_list_entry(urb, epid);
412 assert(urb_entry); 412 assert(urb_entry);
413 413
414 list_del(&urb_entry->list); 414 list_move_tail(&urb_entry->list, &urb_list[epid]);
415 list_add_tail(&urb_entry->list, &urb_list[epid]);
416} 415}
417 416
418/* Get the next urb in the list. */ 417/* Get the next urb in the list. */
diff --git a/drivers/usb/input/fixp-arith.h b/drivers/usb/input/fixp-arith.h
index b44d398de0..ed3d2da0c4 100644
--- a/drivers/usb/input/fixp-arith.h
+++ b/drivers/usb/input/fixp-arith.h
@@ -2,8 +2,6 @@
2#define _FIXP_ARITH_H 2#define _FIXP_ARITH_H
3 3
4/* 4/*
5 * $$
6 *
7 * Simplistic fixed-point arithmetics. 5 * Simplistic fixed-point arithmetics.
8 * Hmm, I'm probably duplicating some code :( 6 * Hmm, I'm probably duplicating some code :(
9 * 7 *
@@ -31,20 +29,20 @@
31 29
32#include <linux/types.h> 30#include <linux/types.h>
33 31
34// The type representing fixed-point values 32/* The type representing fixed-point values */
35typedef s16 fixp_t; 33typedef s16 fixp_t;
36 34
37#define FRAC_N 8 35#define FRAC_N 8
38#define FRAC_MASK ((1<<FRAC_N)-1) 36#define FRAC_MASK ((1<<FRAC_N)-1)
39 37
40// Not to be used directly. Use fixp_{cos,sin} 38/* Not to be used directly. Use fixp_{cos,sin} */
41static const fixp_t cos_table[45] = { 39static const fixp_t cos_table[46] = {
42 0x0100, 0x00FF, 0x00FF, 0x00FE, 0x00FD, 0x00FC, 0x00FA, 0x00F8, 40 0x0100, 0x00FF, 0x00FF, 0x00FE, 0x00FD, 0x00FC, 0x00FA, 0x00F8,
43 0x00F6, 0x00F3, 0x00F0, 0x00ED, 0x00E9, 0x00E6, 0x00E2, 0x00DD, 41 0x00F6, 0x00F3, 0x00F0, 0x00ED, 0x00E9, 0x00E6, 0x00E2, 0x00DD,
44 0x00D9, 0x00D4, 0x00CF, 0x00C9, 0x00C4, 0x00BE, 0x00B8, 0x00B1, 42 0x00D9, 0x00D4, 0x00CF, 0x00C9, 0x00C4, 0x00BE, 0x00B8, 0x00B1,
45 0x00AB, 0x00A4, 0x009D, 0x0096, 0x008F, 0x0087, 0x0080, 0x0078, 43 0x00AB, 0x00A4, 0x009D, 0x0096, 0x008F, 0x0087, 0x0080, 0x0078,
46 0x0070, 0x0068, 0x005F, 0x0057, 0x004F, 0x0046, 0x003D, 0x0035, 44 0x0070, 0x0068, 0x005F, 0x0057, 0x004F, 0x0046, 0x003D, 0x0035,
47 0x002C, 0x0023, 0x001A, 0x0011, 0x0008 45 0x002C, 0x0023, 0x001A, 0x0011, 0x0008, 0x0000
48}; 46};
49 47
50 48
@@ -68,9 +66,8 @@ static inline fixp_t fixp_cos(unsigned int degrees)
68 int quadrant = (degrees / 90) & 3; 66 int quadrant = (degrees / 90) & 3;
69 unsigned int i = degrees % 90; 67 unsigned int i = degrees % 90;
70 68
71 if (quadrant == 1 || quadrant == 3) { 69 if (quadrant == 1 || quadrant == 3)
72 i = 89 - i; 70 i = 90 - i;
73 }
74 71
75 i >>= 1; 72 i >>= 1;
76 73
diff --git a/drivers/usb/input/hid-debug.h b/drivers/usb/input/hid-debug.h
index 702c48c2f8..f04d6d75c0 100644
--- a/drivers/usb/input/hid-debug.h
+++ b/drivers/usb/input/hid-debug.h
@@ -563,7 +563,7 @@ static char *keys[KEY_MAX + 1] = {
563 [KEY_VOLUMEUP] = "VolumeUp", [KEY_POWER] = "Power", 563 [KEY_VOLUMEUP] = "VolumeUp", [KEY_POWER] = "Power",
564 [KEY_KPEQUAL] = "KPEqual", [KEY_KPPLUSMINUS] = "KPPlusMinus", 564 [KEY_KPEQUAL] = "KPEqual", [KEY_KPPLUSMINUS] = "KPPlusMinus",
565 [KEY_PAUSE] = "Pause", [KEY_KPCOMMA] = "KPComma", 565 [KEY_PAUSE] = "Pause", [KEY_KPCOMMA] = "KPComma",
566 [KEY_HANGUEL] = "Hanguel", [KEY_HANJA] = "Hanja", 566 [KEY_HANGUEL] = "Hangeul", [KEY_HANJA] = "Hanja",
567 [KEY_YEN] = "Yen", [KEY_LEFTMETA] = "LeftMeta", 567 [KEY_YEN] = "Yen", [KEY_LEFTMETA] = "LeftMeta",
568 [KEY_RIGHTMETA] = "RightMeta", [KEY_COMPOSE] = "Compose", 568 [KEY_RIGHTMETA] = "RightMeta", [KEY_COMPOSE] = "Compose",
569 [KEY_STOP] = "Stop", [KEY_AGAIN] = "Again", 569 [KEY_STOP] = "Stop", [KEY_AGAIN] = "Again",
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 5b06fa3660..56ffc81302 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -686,19 +686,16 @@ static void whiteheat_close(struct usb_serial_port *port, struct file * filp)
686 wrap = list_entry(tmp, struct whiteheat_urb_wrap, list); 686 wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
687 urb = wrap->urb; 687 urb = wrap->urb;
688 usb_kill_urb(urb); 688 usb_kill_urb(urb);
689 list_del(tmp); 689 list_move(tmp, &info->rx_urbs_free);
690 list_add(tmp, &info->rx_urbs_free);
691 }
692 list_for_each_safe(tmp, tmp2, &info->rx_urb_q) {
693 list_del(tmp);
694 list_add(tmp, &info->rx_urbs_free);
695 } 690 }
691 list_for_each_safe(tmp, tmp2, &info->rx_urb_q)
692 list_move(tmp, &info->rx_urbs_free);
693
696 list_for_each_safe(tmp, tmp2, &info->tx_urbs_submitted) { 694 list_for_each_safe(tmp, tmp2, &info->tx_urbs_submitted) {
697 wrap = list_entry(tmp, struct whiteheat_urb_wrap, list); 695 wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
698 urb = wrap->urb; 696 urb = wrap->urb;
699 usb_kill_urb(urb); 697 usb_kill_urb(urb);
700 list_del(tmp); 698 list_move(tmp, &info->tx_urbs_free);
701 list_add(tmp, &info->tx_urbs_free);
702 } 699 }
703 spin_unlock_irqrestore(&info->lock, flags); 700 spin_unlock_irqrestore(&info->lock, flags);
704 701
@@ -1080,8 +1077,7 @@ static void whiteheat_write_callback(struct urb *urb, struct pt_regs *regs)
1080 err("%s - Not my urb!", __FUNCTION__); 1077 err("%s - Not my urb!", __FUNCTION__);
1081 return; 1078 return;
1082 } 1079 }
1083 list_del(&wrap->list); 1080 list_move(&wrap->list, &info->tx_urbs_free);
1084 list_add(&wrap->list, &info->tx_urbs_free);
1085 spin_unlock(&info->lock); 1081 spin_unlock(&info->lock);
1086 1082
1087 if (urb->status) { 1083 if (urb->status) {
@@ -1371,8 +1367,7 @@ static int start_port_read(struct usb_serial_port *port)
1371 wrap = list_entry(tmp, struct whiteheat_urb_wrap, list); 1367 wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
1372 urb = wrap->urb; 1368 urb = wrap->urb;
1373 usb_kill_urb(urb); 1369 usb_kill_urb(urb);
1374 list_del(tmp); 1370 list_move(tmp, &info->rx_urbs_free);
1375 list_add(tmp, &info->rx_urbs_free);
1376 } 1371 }
1377 break; 1372 break;
1378 } 1373 }
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 009fb0953a..5284abe1b5 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -160,10 +160,10 @@ struct us_data {
160}; 160};
161 161
162/* Convert between us_data and the corresponding Scsi_Host */ 162/* Convert between us_data and the corresponding Scsi_Host */
163static struct Scsi_Host inline *us_to_host(struct us_data *us) { 163static inline struct Scsi_Host *us_to_host(struct us_data *us) {
164 return container_of((void *) us, struct Scsi_Host, hostdata); 164 return container_of((void *) us, struct Scsi_Host, hostdata);
165} 165}
166static struct us_data inline *host_to_us(struct Scsi_Host *host) { 166static inline struct us_data *host_to_us(struct Scsi_Host *host) {
167 return (struct us_data *) host->hostdata; 167 return (struct us_data *) host->hostdata;
168} 168}
169 169
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 168ede7902..17de4c84db 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -4,6 +4,21 @@
4 4
5menu "Graphics support" 5menu "Graphics support"
6 6
7config FIRMWARE_EDID
8 bool "Enable firmware EDID"
9 default y
10 ---help---
11 This enables access to the EDID transferred from the firmware.
12 On the i386, this is from the Video BIOS. Enable this if DDC/I2C
13 transfers do not work for your driver and if you are using
14 nvidiafb, i810fb or savagefb.
15
16 In general, choosing Y for this option is safe. If you
17 experience extremely long delays while booting before you get
18 something on your display, try setting this to N. Matrox cards in
19 combination with certain motherboards and monitors are known to
20 suffer from this problem.
21
7config FB 22config FB
8 tristate "Support for frame buffer devices" 23 tristate "Support for frame buffer devices"
9 ---help--- 24 ---help---
@@ -70,22 +85,6 @@ config FB_MACMODES
70 depends on FB 85 depends on FB
71 default n 86 default n
72 87
73config FB_FIRMWARE_EDID
74 bool "Enable firmware EDID"
75 depends on FB
76 default y
77 ---help---
78 This enables access to the EDID transferred from the firmware.
79 On the i386, this is from the Video BIOS. Enable this if DDC/I2C
80 transfers do not work for your driver and if you are using
81 nvidiafb, i810fb or savagefb.
82
83 In general, choosing Y for this option is safe. If you
84 experience extremely long delays while booting before you get
85 something on your display, try setting this to N. Matrox cards in
86 combination with certain motherboards and monitors are known to
87 suffer from this problem.
88
89config FB_BACKLIGHT 88config FB_BACKLIGHT
90 bool 89 bool
91 depends on FB 90 depends on FB
@@ -551,10 +550,14 @@ config FB_VESA
551 You will get a boot time penguin logo at no additional cost. Please 550 You will get a boot time penguin logo at no additional cost. Please
552 read <file:Documentation/fb/vesafb.txt>. If unsure, say Y. 551 read <file:Documentation/fb/vesafb.txt>. If unsure, say Y.
553 552
554config VIDEO_SELECT 553config FB_IMAC
555 bool 554 bool "Intel-based Macintosh Framebuffer Support"
556 depends on FB_VESA 555 depends on (FB = y) && X86
557 default y 556 select FB_CFB_FILLRECT
557 select FB_CFB_COPYAREA
558 select FB_CFB_IMAGEBLIT
559 help
560 This is the frame buffer device driver for the Intel-based Macintosh
558 561
559config FB_HGA 562config FB_HGA
560 tristate "Hercules mono graphics support" 563 tristate "Hercules mono graphics support"
@@ -578,12 +581,6 @@ config FB_HGA_ACCEL
578 This will compile the Hercules mono graphics with 581 This will compile the Hercules mono graphics with
579 acceleration functions. 582 acceleration functions.
580 583
581
582config VIDEO_SELECT
583 bool
584 depends on (FB = y) && X86
585 default y
586
587config FB_SGIVW 584config FB_SGIVW
588 tristate "SGI Visual Workstation framebuffer support" 585 tristate "SGI Visual Workstation framebuffer support"
589 depends on FB && X86_VISWS 586 depends on FB && X86_VISWS
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 23de3b2c78..c335e9bc3b 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -4,15 +4,15 @@
4 4
5# Each configuration option enables a list of files. 5# Each configuration option enables a list of files.
6 6
7obj-$(CONFIG_VT) += console/
8obj-$(CONFIG_LOGO) += logo/
9obj-$(CONFIG_SYSFS) += backlight/
10
11obj-$(CONFIG_FB) += fb.o 7obj-$(CONFIG_FB) += fb.o
12fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \ 8fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
13 modedb.o fbcvt.o 9 modedb.o fbcvt.o
14fb-objs := $(fb-y) 10fb-objs := $(fb-y)
15 11
12obj-$(CONFIG_VT) += console/
13obj-$(CONFIG_LOGO) += logo/
14obj-$(CONFIG_SYSFS) += backlight/
15
16obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o 16obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o
17obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o 17obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o
18obj-$(CONFIG_FB_CFB_IMAGEBLIT) += cfbimgblt.o 18obj-$(CONFIG_FB_CFB_IMAGEBLIT) += cfbimgblt.o
@@ -97,6 +97,7 @@ obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o
97 97
98# Platform or fallback drivers go here 98# Platform or fallback drivers go here
99obj-$(CONFIG_FB_VESA) += vesafb.o 99obj-$(CONFIG_FB_VESA) += vesafb.o
100obj-$(CONFIG_FB_IMAC) += imacfb.o
100obj-$(CONFIG_FB_VGA16) += vga16fb.o vgastate.o 101obj-$(CONFIG_FB_VGA16) += vga16fb.o vgastate.o
101obj-$(CONFIG_FB_OF) += offb.o 102obj-$(CONFIG_FB_OF) += offb.o
102 103
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index db878fd55f..11cf7fcb1d 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -100,7 +100,7 @@
100 100
101#ifndef CONFIG_PPC_PMAC 101#ifndef CONFIG_PPC_PMAC
102/* default mode */ 102/* default mode */
103static struct fb_var_screeninfo default_var __initdata = { 103static struct fb_var_screeninfo default_var __devinitdata = {
104 /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */ 104 /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
105 640, 480, 640, 480, 0, 0, 8, 0, 105 640, 480, 640, 480, 0, 0, 8, 0,
106 {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0}, 106 {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0},
@@ -123,7 +123,7 @@ static struct fb_var_screeninfo default_var = {
123 123
124/* default modedb mode */ 124/* default modedb mode */
125/* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */ 125/* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */
126static struct fb_videomode defaultmode __initdata = { 126static struct fb_videomode defaultmode __devinitdata = {
127 .refresh = 60, 127 .refresh = 60,
128 .xres = 640, 128 .xres = 640,
129 .yres = 480, 129 .yres = 480,
@@ -335,7 +335,7 @@ static const struct aty128_meminfo sdr_sgram =
335static const struct aty128_meminfo ddr_sgram = 335static const struct aty128_meminfo ddr_sgram =
336 { 4, 4, 3, 3, 2, 3, 1, 16, 31, 16, "64-bit DDR SGRAM" }; 336 { 4, 4, 3, 3, 2, 3, 1, 16, 31, 16, "64-bit DDR SGRAM" };
337 337
338static struct fb_fix_screeninfo aty128fb_fix __initdata = { 338static struct fb_fix_screeninfo aty128fb_fix __devinitdata = {
339 .id = "ATY Rage128", 339 .id = "ATY Rage128",
340 .type = FB_TYPE_PACKED_PIXELS, 340 .type = FB_TYPE_PACKED_PIXELS,
341 .visual = FB_VISUAL_PSEUDOCOLOR, 341 .visual = FB_VISUAL_PSEUDOCOLOR,
@@ -345,15 +345,15 @@ static struct fb_fix_screeninfo aty128fb_fix __initdata = {
345 .accel = FB_ACCEL_ATI_RAGE128, 345 .accel = FB_ACCEL_ATI_RAGE128,
346}; 346};
347 347
348static char *mode_option __initdata = NULL; 348static char *mode_option __devinitdata = NULL;
349 349
350#ifdef CONFIG_PPC_PMAC 350#ifdef CONFIG_PPC_PMAC
351static int default_vmode __initdata = VMODE_1024_768_60; 351static int default_vmode __devinitdata = VMODE_1024_768_60;
352static int default_cmode __initdata = CMODE_8; 352static int default_cmode __devinitdata = CMODE_8;
353#endif 353#endif
354 354
355static int default_crt_on __initdata = 0; 355static int default_crt_on __devinitdata = 0;
356static int default_lcd_on __initdata = 1; 356static int default_lcd_on __devinitdata = 1;
357 357
358#ifdef CONFIG_MTRR 358#ifdef CONFIG_MTRR
359static int mtrr = 1; 359static int mtrr = 1;
@@ -445,9 +445,9 @@ static int aty128_encode_var(struct fb_var_screeninfo *var,
445static int aty128_decode_var(struct fb_var_screeninfo *var, 445static int aty128_decode_var(struct fb_var_screeninfo *var,
446 struct aty128fb_par *par); 446 struct aty128fb_par *par);
447#if 0 447#if 0
448static void __init aty128_get_pllinfo(struct aty128fb_par *par, 448static void __devinit aty128_get_pllinfo(struct aty128fb_par *par,
449 void __iomem *bios); 449 void __iomem *bios);
450static void __init __iomem *aty128_map_ROM(struct pci_dev *pdev, const struct aty128fb_par *par); 450static void __devinit __iomem *aty128_map_ROM(struct pci_dev *pdev, const struct aty128fb_par *par);
451#endif 451#endif
452static void aty128_timings(struct aty128fb_par *par); 452static void aty128_timings(struct aty128fb_par *par);
453static void aty128_init_engine(struct aty128fb_par *par); 453static void aty128_init_engine(struct aty128fb_par *par);
@@ -573,7 +573,7 @@ static void aty_pll_writeupdate(const struct aty128fb_par *par)
573 573
574 574
575/* write to the scratch register to test r/w functionality */ 575/* write to the scratch register to test r/w functionality */
576static int __init register_test(const struct aty128fb_par *par) 576static int __devinit register_test(const struct aty128fb_par *par)
577{ 577{
578 u32 val; 578 u32 val;
579 int flag = 0; 579 int flag = 0;
@@ -772,7 +772,7 @@ static u32 depth_to_dst(u32 depth)
772 772
773 773
774#ifndef __sparc__ 774#ifndef __sparc__
775static void __iomem * __init aty128_map_ROM(const struct aty128fb_par *par, struct pci_dev *dev) 775static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, struct pci_dev *dev)
776{ 776{
777 u16 dptr; 777 u16 dptr;
778 u8 rom_type; 778 u8 rom_type;
@@ -856,7 +856,7 @@ static void __iomem * __init aty128_map_ROM(const struct aty128fb_par *par, stru
856 return NULL; 856 return NULL;
857} 857}
858 858
859static void __init aty128_get_pllinfo(struct aty128fb_par *par, unsigned char __iomem *bios) 859static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, unsigned char __iomem *bios)
860{ 860{
861 unsigned int bios_hdr; 861 unsigned int bios_hdr;
862 unsigned int bios_pll; 862 unsigned int bios_pll;
@@ -903,7 +903,7 @@ static void __iomem * __devinit aty128_find_mem_vbios(struct aty128fb_par *par)
903#endif /* ndef(__sparc__) */ 903#endif /* ndef(__sparc__) */
904 904
905/* fill in known card constants if pll_block is not available */ 905/* fill in known card constants if pll_block is not available */
906static void __init aty128_timings(struct aty128fb_par *par) 906static void __devinit aty128_timings(struct aty128fb_par *par)
907{ 907{
908#ifdef CONFIG_PPC_OF 908#ifdef CONFIG_PPC_OF
909 /* instead of a table lookup, assume OF has properly 909 /* instead of a table lookup, assume OF has properly
@@ -1645,7 +1645,7 @@ static int aty128fb_sync(struct fb_info *info)
1645} 1645}
1646 1646
1647#ifndef MODULE 1647#ifndef MODULE
1648static int __init aty128fb_setup(char *options) 1648static int __devinit aty128fb_setup(char *options)
1649{ 1649{
1650 char *this_opt; 1650 char *this_opt;
1651 1651
@@ -1893,7 +1893,7 @@ static void aty128_early_resume(void *data)
1893} 1893}
1894#endif /* CONFIG_PPC_PMAC */ 1894#endif /* CONFIG_PPC_PMAC */
1895 1895
1896static int __init aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent) 1896static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent)
1897{ 1897{
1898 struct fb_info *info = pci_get_drvdata(pdev); 1898 struct fb_info *info = pci_get_drvdata(pdev);
1899 struct aty128fb_par *par = info->par; 1899 struct aty128fb_par *par = info->par;
@@ -2037,7 +2037,7 @@ static int __init aty128_init(struct pci_dev *pdev, const struct pci_device_id *
2037 2037
2038#ifdef CONFIG_PCI 2038#ifdef CONFIG_PCI
2039/* register a card ++ajoshi */ 2039/* register a card ++ajoshi */
2040static int __init aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2040static int __devinit aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2041{ 2041{
2042 unsigned long fb_addr, reg_addr; 2042 unsigned long fb_addr, reg_addr;
2043 struct aty128fb_par *par; 2043 struct aty128fb_par *par;
@@ -2556,7 +2556,7 @@ static int aty128_pci_resume(struct pci_dev *pdev)
2556} 2556}
2557 2557
2558 2558
2559static int __init aty128fb_init(void) 2559static int __devinit aty128fb_init(void)
2560{ 2560{
2561#ifndef MODULE 2561#ifndef MODULE
2562 char *option = NULL; 2562 char *option = NULL;
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index c5185f7cf4..22e720611b 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -316,12 +316,12 @@ static int vram;
316static int pll; 316static int pll;
317static int mclk; 317static int mclk;
318static int xclk; 318static int xclk;
319static int comp_sync __initdata = -1; 319static int comp_sync __devinitdata = -1;
320static char *mode; 320static char *mode;
321 321
322#ifdef CONFIG_PPC 322#ifdef CONFIG_PPC
323static int default_vmode __initdata = VMODE_CHOOSE; 323static int default_vmode __devinitdata = VMODE_CHOOSE;
324static int default_cmode __initdata = CMODE_CHOOSE; 324static int default_cmode __devinitdata = CMODE_CHOOSE;
325 325
326module_param_named(vmode, default_vmode, int, 0); 326module_param_named(vmode, default_vmode, int, 0);
327MODULE_PARM_DESC(vmode, "int: video mode for mac"); 327MODULE_PARM_DESC(vmode, "int: video mode for mac");
@@ -330,10 +330,10 @@ MODULE_PARM_DESC(cmode, "int: color mode for mac");
330#endif 330#endif
331 331
332#ifdef CONFIG_ATARI 332#ifdef CONFIG_ATARI
333static unsigned int mach64_count __initdata = 0; 333static unsigned int mach64_count __devinitdata = 0;
334static unsigned long phys_vmembase[FB_MAX] __initdata = { 0, }; 334static unsigned long phys_vmembase[FB_MAX] __devinitdata = { 0, };
335static unsigned long phys_size[FB_MAX] __initdata = { 0, }; 335static unsigned long phys_size[FB_MAX] __devinitdata = { 0, };
336static unsigned long phys_guiregbase[FB_MAX] __initdata = { 0, }; 336static unsigned long phys_guiregbase[FB_MAX] __devinitdata = { 0, };
337#endif 337#endif
338 338
339/* top -> down is an evolution of mach64 chipset, any corrections? */ 339/* top -> down is an evolution of mach64 chipset, any corrections? */
@@ -583,7 +583,7 @@ static u32 atyfb_get_pixclock(struct fb_var_screeninfo *var, struct atyfb_par *p
583 * Apple monitor sense 583 * Apple monitor sense
584 */ 584 */
585 585
586static int __init read_aty_sense(const struct atyfb_par *par) 586static int __devinit read_aty_sense(const struct atyfb_par *par)
587{ 587{
588 int sense, i; 588 int sense, i;
589 589
@@ -1281,6 +1281,14 @@ static int atyfb_set_par(struct fb_info *info)
1281 1281
1282 par->accel_flags = var->accel_flags; /* hack */ 1282 par->accel_flags = var->accel_flags; /* hack */
1283 1283
1284 if (var->accel_flags) {
1285 info->fbops->fb_sync = atyfb_sync;
1286 info->flags &= ~FBINFO_HWACCEL_DISABLED;
1287 } else {
1288 info->fbops->fb_sync = NULL;
1289 info->flags |= FBINFO_HWACCEL_DISABLED;
1290 }
1291
1284 if (par->blitter_may_be_busy) 1292 if (par->blitter_may_be_busy)
1285 wait_for_idle(par); 1293 wait_for_idle(par);
1286 1294
@@ -2253,7 +2261,7 @@ static void aty_bl_exit(struct atyfb_par *par)
2253 2261
2254#endif /* CONFIG_FB_ATY_BACKLIGHT */ 2262#endif /* CONFIG_FB_ATY_BACKLIGHT */
2255 2263
2256static void __init aty_calc_mem_refresh(struct atyfb_par *par, int xclk) 2264static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
2257{ 2265{
2258 const int ragepro_tbl[] = { 2266 const int ragepro_tbl[] = {
2259 44, 50, 55, 66, 75, 80, 100 2267 44, 50, 55, 66, 75, 80, 100
@@ -2313,7 +2321,7 @@ static int __devinit atyfb_get_timings_from_lcd(struct atyfb_par *par,
2313} 2321}
2314#endif /* defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD) */ 2322#endif /* defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD) */
2315 2323
2316static int __init aty_init(struct fb_info *info, const char *name) 2324static int __devinit aty_init(struct fb_info *info, const char *name)
2317{ 2325{
2318 struct atyfb_par *par = (struct atyfb_par *) info->par; 2326 struct atyfb_par *par = (struct atyfb_par *) info->par;
2319 const char *ramname = NULL, *xtal; 2327 const char *ramname = NULL, *xtal;
@@ -2394,12 +2402,15 @@ static int __init aty_init(struct fb_info *info, const char *name)
2394 break; 2402 break;
2395 } 2403 }
2396 switch (clk_type) { 2404 switch (clk_type) {
2405#ifdef CONFIG_ATARI
2397 case CLK_ATI18818_1: 2406 case CLK_ATI18818_1:
2398 par->pll_ops = &aty_pll_ati18818_1; 2407 par->pll_ops = &aty_pll_ati18818_1;
2399 break; 2408 break;
2409#else
2400 case CLK_IBMRGB514: 2410 case CLK_IBMRGB514:
2401 par->pll_ops = &aty_pll_ibm514; 2411 par->pll_ops = &aty_pll_ibm514;
2402 break; 2412 break;
2413#endif
2403#if 0 /* dead code */ 2414#if 0 /* dead code */
2404 case CLK_STG1703: 2415 case CLK_STG1703:
2405 par->pll_ops = &aty_pll_stg1703; 2416 par->pll_ops = &aty_pll_stg1703;
@@ -2604,7 +2615,11 @@ static int __init aty_init(struct fb_info *info, const char *name)
2604 2615
2605 info->fbops = &atyfb_ops; 2616 info->fbops = &atyfb_ops;
2606 info->pseudo_palette = pseudo_palette; 2617 info->pseudo_palette = pseudo_palette;
2607 info->flags = FBINFO_FLAG_DEFAULT; 2618 info->flags = FBINFO_DEFAULT |
2619 FBINFO_HWACCEL_IMAGEBLIT |
2620 FBINFO_HWACCEL_FILLRECT |
2621 FBINFO_HWACCEL_COPYAREA |
2622 FBINFO_HWACCEL_YPAN;
2608 2623
2609#ifdef CONFIG_PMAC_BACKLIGHT 2624#ifdef CONFIG_PMAC_BACKLIGHT
2610 if (M64_HAS(G3_PB_1_1) && machine_is_compatible("PowerBook1,1")) { 2625 if (M64_HAS(G3_PB_1_1) && machine_is_compatible("PowerBook1,1")) {
@@ -2733,7 +2748,7 @@ aty_init_exit:
2733} 2748}
2734 2749
2735#ifdef CONFIG_ATARI 2750#ifdef CONFIG_ATARI
2736static int __init store_video_par(char *video_str, unsigned char m64_num) 2751static int __devinit store_video_par(char *video_str, unsigned char m64_num)
2737{ 2752{
2738 char *p; 2753 char *p;
2739 unsigned long vmembase, size, guiregbase; 2754 unsigned long vmembase, size, guiregbase;
@@ -3764,7 +3779,7 @@ static struct pci_driver atyfb_driver = {
3764#endif /* CONFIG_PCI */ 3779#endif /* CONFIG_PCI */
3765 3780
3766#ifndef MODULE 3781#ifndef MODULE
3767static int __init atyfb_setup(char *options) 3782static int __devinit atyfb_setup(char *options)
3768{ 3783{
3769 char *this_opt; 3784 char *this_opt;
3770 3785
@@ -3836,7 +3851,7 @@ static int __init atyfb_setup(char *options)
3836} 3851}
3837#endif /* MODULE */ 3852#endif /* MODULE */
3838 3853
3839static int __init atyfb_init(void) 3854static int __devinit atyfb_init(void)
3840{ 3855{
3841#ifndef MODULE 3856#ifndef MODULE
3842 char *option = NULL; 3857 char *option = NULL;
diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c
index c98f4a4421..1490e5e1c2 100644
--- a/drivers/video/aty/mach64_accel.c
+++ b/drivers/video/aty/mach64_accel.c
@@ -200,8 +200,6 @@ void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
200 if (!area->width || !area->height) 200 if (!area->width || !area->height)
201 return; 201 return;
202 if (!par->accel_flags) { 202 if (!par->accel_flags) {
203 if (par->blitter_may_be_busy)
204 wait_for_idle(par);
205 cfb_copyarea(info, area); 203 cfb_copyarea(info, area);
206 return; 204 return;
207 } 205 }
@@ -248,8 +246,6 @@ void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
248 if (!rect->width || !rect->height) 246 if (!rect->width || !rect->height)
249 return; 247 return;
250 if (!par->accel_flags) { 248 if (!par->accel_flags) {
251 if (par->blitter_may_be_busy)
252 wait_for_idle(par);
253 cfb_fillrect(info, rect); 249 cfb_fillrect(info, rect);
254 return; 250 return;
255 } 251 }
@@ -288,14 +284,10 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
288 return; 284 return;
289 if (!par->accel_flags || 285 if (!par->accel_flags ||
290 (image->depth != 1 && info->var.bits_per_pixel != image->depth)) { 286 (image->depth != 1 && info->var.bits_per_pixel != image->depth)) {
291 if (par->blitter_may_be_busy)
292 wait_for_idle(par);
293
294 cfb_imageblit(info, image); 287 cfb_imageblit(info, image);
295 return; 288 return;
296 } 289 }
297 290
298 wait_for_idle(par);
299 pix_width = pix_width_save = aty_ld_le32(DP_PIX_WIDTH, par); 291 pix_width = pix_width_save = aty_ld_le32(DP_PIX_WIDTH, par);
300 host_cntl = aty_ld_le32(HOST_CNTL, par) | HOST_BYTE_ALIGN; 292 host_cntl = aty_ld_le32(HOST_CNTL, par) | HOST_BYTE_ALIGN;
301 293
@@ -425,8 +417,6 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
425 } 417 }
426 } 418 }
427 419
428 wait_for_idle(par);
429
430 /* restore pix_width */ 420 /* restore pix_width */
431 wait_for_fifo(1, par); 421 wait_for_fifo(1, par);
432 aty_st_le32(DP_PIX_WIDTH, pix_width_save, par); 422 aty_st_le32(DP_PIX_WIDTH, pix_width_save, par);
diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
index ad8b7496f8..2a7f381c33 100644
--- a/drivers/video/aty/mach64_cursor.c
+++ b/drivers/video/aty/mach64_cursor.c
@@ -66,11 +66,6 @@ static const u8 cursor_bits_lookup[16] = {
66 0x01, 0x41, 0x11, 0x51, 0x05, 0x45, 0x15, 0x55 66 0x01, 0x41, 0x11, 0x51, 0x05, 0x45, 0x15, 0x55
67}; 67};
68 68
69static const u8 cursor_mask_lookup[16] = {
70 0xaa, 0x2a, 0x8a, 0x0a, 0xa2, 0x22, 0x82, 0x02,
71 0xa8, 0x28, 0x88, 0x08, 0xa0, 0x20, 0x80, 0x00
72};
73
74static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor) 69static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
75{ 70{
76 struct atyfb_par *par = (struct atyfb_par *) info->par; 71 struct atyfb_par *par = (struct atyfb_par *) info->par;
@@ -130,13 +125,13 @@ static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
130 fg_idx = cursor->image.fg_color; 125 fg_idx = cursor->image.fg_color;
131 bg_idx = cursor->image.bg_color; 126 bg_idx = cursor->image.bg_color;
132 127
133 fg = (info->cmap.red[fg_idx] << 24) | 128 fg = ((info->cmap.red[fg_idx] & 0xff) << 24) |
134 (info->cmap.green[fg_idx] << 16) | 129 ((info->cmap.green[fg_idx] & 0xff) << 16) |
135 (info->cmap.blue[fg_idx] << 8) | 15; 130 ((info->cmap.blue[fg_idx] & 0xff) << 8) | 0xff;
136 131
137 bg = (info->cmap.red[bg_idx] << 24) | 132 bg = ((info->cmap.red[bg_idx] & 0xff) << 24) |
138 (info->cmap.green[bg_idx] << 16) | 133 ((info->cmap.green[bg_idx] & 0xff) << 16) |
139 (info->cmap.blue[bg_idx] << 8); 134 ((info->cmap.blue[bg_idx] & 0xff) << 8);
140 135
141 wait_for_fifo(2, par); 136 wait_for_fifo(2, par);
142 aty_st_le32(CUR_CLR0, bg, par); 137 aty_st_le32(CUR_CLR0, bg, par);
@@ -166,19 +161,17 @@ static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
166 switch (cursor->rop) { 161 switch (cursor->rop) {
167 case ROP_XOR: 162 case ROP_XOR:
168 // Upper 4 bits of mask data 163 // Upper 4 bits of mask data
169 fb_writeb(cursor_mask_lookup[m >> 4 ] | 164 fb_writeb(cursor_bits_lookup[(b ^ m) >> 4], dst++);
170 cursor_bits_lookup[(b ^ m) >> 4], dst++);
171 // Lower 4 bits of mask 165 // Lower 4 bits of mask
172 fb_writeb(cursor_mask_lookup[m & 0x0f ] | 166 fb_writeb(cursor_bits_lookup[(b ^ m) & 0x0f],
173 cursor_bits_lookup[(b ^ m) & 0x0f], dst++); 167 dst++);
174 break; 168 break;
175 case ROP_COPY: 169 case ROP_COPY:
176 // Upper 4 bits of mask data 170 // Upper 4 bits of mask data
177 fb_writeb(cursor_mask_lookup[m >> 4 ] | 171 fb_writeb(cursor_bits_lookup[(b & m) >> 4], dst++);
178 cursor_bits_lookup[(b & m) >> 4], dst++);
179 // Lower 4 bits of mask 172 // Lower 4 bits of mask
180 fb_writeb(cursor_mask_lookup[m & 0x0f ] | 173 fb_writeb(cursor_bits_lookup[(b & m) & 0x0f],
181 cursor_bits_lookup[(b & m) & 0x0f], dst++); 174 dst++);
182 break; 175 break;
183 } 176 }
184 } 177 }
@@ -194,7 +187,7 @@ static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
194 return 0; 187 return 0;
195} 188}
196 189
197int __init aty_init_cursor(struct fb_info *info) 190int __devinit aty_init_cursor(struct fb_info *info)
198{ 191{
199 unsigned long addr; 192 unsigned long addr;
200 193
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index c5ecbb02e0..68b15645b8 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -2379,7 +2379,6 @@ err_release_pci0:
2379err_release_fb: 2379err_release_fb:
2380 framebuffer_release(info); 2380 framebuffer_release(info);
2381err_disable: 2381err_disable:
2382 pci_disable_device(pdev);
2383err_out: 2382err_out:
2384 return ret; 2383 return ret;
2385} 2384}
@@ -2436,7 +2435,6 @@ static void __devexit radeonfb_pci_unregister (struct pci_dev *pdev)
2436#endif 2435#endif
2437 fb_dealloc_cmap(&info->cmap); 2436 fb_dealloc_cmap(&info->cmap);
2438 framebuffer_release(info); 2437 framebuffer_release(info);
2439 pci_disable_device(pdev);
2440} 2438}
2441 2439
2442 2440
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index 789450bb0b..9ef68cd83b 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -7,6 +7,8 @@
7 * Karl Lessard <klessard@sunrisetelecom.com> 7 * Karl Lessard <klessard@sunrisetelecom.com>
8 * <c.pellegrin@exadron.com> 8 * <c.pellegrin@exadron.com>
9 * 9 *
10 * PM support added by Rodolfo Giometti <giometti@linux.it>
11 *
10 * Copyright 2002 MontaVista Software 12 * Copyright 2002 MontaVista Software
11 * Author: MontaVista Software, Inc. 13 * Author: MontaVista Software, Inc.
12 * ppopov@mvista.com or source@mvista.com 14 * ppopov@mvista.com or source@mvista.com
@@ -602,17 +604,52 @@ int au1100fb_drv_remove(struct device *dev)
602 return 0; 604 return 0;
603} 605}
604 606
607#ifdef CONFIG_PM
608static u32 sys_clksrc;
609static struct au1100fb_regs fbregs;
610
605int au1100fb_drv_suspend(struct device *dev, pm_message_t state) 611int au1100fb_drv_suspend(struct device *dev, pm_message_t state)
606{ 612{
607 /* TODO */ 613 struct au1100fb_device *fbdev = dev_get_drvdata(dev);
614
615 if (!fbdev)
616 return 0;
617
618 /* Save the clock source state */
619 sys_clksrc = au_readl(SYS_CLKSRC);
620
621 /* Blank the LCD */
622 au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info);
623
624 /* Stop LCD clocking */
625 au_writel(sys_clksrc & ~SYS_CS_ML_MASK, SYS_CLKSRC);
626
627 memcpy(&fbregs, fbdev->regs, sizeof(struct au1100fb_regs));
628
608 return 0; 629 return 0;
609} 630}
610 631
611int au1100fb_drv_resume(struct device *dev) 632int au1100fb_drv_resume(struct device *dev)
612{ 633{
613 /* TODO */ 634 struct au1100fb_device *fbdev = dev_get_drvdata(dev);
635
636 if (!fbdev)
637 return 0;
638
639 memcpy(fbdev->regs, &fbregs, sizeof(struct au1100fb_regs));
640
641 /* Restart LCD clocking */
642 au_writel(sys_clksrc, SYS_CLKSRC);
643
644 /* Unblank the LCD */
645 au1100fb_fb_blank(VESA_NO_BLANKING, &fbdev->info);
646
614 return 0; 647 return 0;
615} 648}
649#else
650#define au1100fb_drv_suspend NULL
651#define au1100fb_drv_resume NULL
652#endif
616 653
617static struct device_driver au1100fb_driver = { 654static struct device_driver au1100fb_driver = {
618 .name = "au1100-lcd", 655 .name = "au1100-lcd",
@@ -706,8 +743,7 @@ void __exit au1100fb_cleanup(void)
706{ 743{
707 driver_unregister(&au1100fb_driver); 744 driver_unregister(&au1100fb_driver);
708 745
709 if (drv_info.opt_mode) 746 kfree(drv_info.opt_mode);
710 kfree(drv_info.opt_mode);
711} 747}
712 748
713module_init(au1100fb_init); 749module_init(au1100fb_init);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index b895eaaa73..022f9d3473 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -10,7 +10,7 @@ menuconfig BACKLIGHT_LCD_SUPPORT
10 10
11config BACKLIGHT_CLASS_DEVICE 11config BACKLIGHT_CLASS_DEVICE
12 tristate "Lowlevel Backlight controls" 12 tristate "Lowlevel Backlight controls"
13 depends on BACKLIGHT_LCD_SUPPORT 13 depends on BACKLIGHT_LCD_SUPPORT && FB
14 default m 14 default m
15 help 15 help
16 This framework adds support for low-level control of the LCD 16 This framework adds support for low-level control of the LCD
@@ -26,7 +26,7 @@ config BACKLIGHT_DEVICE
26 26
27config LCD_CLASS_DEVICE 27config LCD_CLASS_DEVICE
28 tristate "Lowlevel LCD controls" 28 tristate "Lowlevel LCD controls"
29 depends on BACKLIGHT_LCD_SUPPORT 29 depends on BACKLIGHT_LCD_SUPPORT && FB
30 default m 30 default m
31 help 31 help
32 This framework adds support for low-level control of LCD. 32 This framework adds support for low-level control of LCD.
@@ -50,6 +50,14 @@ config BACKLIGHT_CORGI
50 If you have a Sharp Zaurus SL-C7xx, SL-Cxx00 or SL-6000x say y to enable the 50 If you have a Sharp Zaurus SL-C7xx, SL-Cxx00 or SL-6000x say y to enable the
51 backlight driver. 51 backlight driver.
52 52
53config BACKLIGHT_LOCOMO
54 tristate "Sharp LOCOMO LCD/Backlight Driver"
55 depends on BACKLIGHT_DEVICE && SHARP_LOCOMO
56 default y
57 help
58 If you have a Sharp Zaurus SL-5500 (Collie) or SL-5600 (Poodle) say y to
59 enable the LCD/backlight driver.
60
53config BACKLIGHT_HP680 61config BACKLIGHT_HP680
54 tristate "HP Jornada 680 Backlight Driver" 62 tristate "HP Jornada 680 Backlight Driver"
55 depends on BACKLIGHT_DEVICE && SH_HP6XX 63 depends on BACKLIGHT_DEVICE && SH_HP6XX
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 744210c38e..65e5553fc8 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -4,4 +4,4 @@ obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o
4obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o 4obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
5obj-$(CONFIG_BACKLIGHT_CORGI) += corgi_bl.o 5obj-$(CONFIG_BACKLIGHT_CORGI) += corgi_bl.o
6obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o 6obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
7obj-$(CONFIG_SHARP_LOCOMO) += locomolcd.o 7obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index a71e984c93..ffc72ae3ad 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -27,7 +27,7 @@
27 27
28static int hp680bl_suspended; 28static int hp680bl_suspended;
29static int current_intensity = 0; 29static int current_intensity = 0;
30static spinlock_t bl_lock = SPIN_LOCK_UNLOCKED; 30static DEFINE_SPINLOCK(bl_lock);
31static struct backlight_device *hp680_backlight_device; 31static struct backlight_device *hp680_backlight_device;
32 32
33static void hp680bl_send_intensity(struct backlight_device *bd) 33static void hp680bl_send_intensity(struct backlight_device *bd)
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index 60831bb236..bd879b7ec1 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -17,6 +17,8 @@
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/fb.h>
21#include <linux/backlight.h>
20 22
21#include <asm/hardware/locomo.h> 23#include <asm/hardware/locomo.h>
22#include <asm/irq.h> 24#include <asm/irq.h>
@@ -25,7 +27,10 @@
25 27
26#include "../../../arch/arm/mach-sa1100/generic.h" 28#include "../../../arch/arm/mach-sa1100/generic.h"
27 29
30static struct backlight_device *locomolcd_bl_device;
28static struct locomo_dev *locomolcd_dev; 31static struct locomo_dev *locomolcd_dev;
32static unsigned long locomolcd_flags;
33#define LOCOMOLCD_SUSPENDED 0x01
29 34
30static void locomolcd_on(int comadj) 35static void locomolcd_on(int comadj)
31{ 36{
@@ -89,12 +94,10 @@ void locomolcd_power(int on)
89 } 94 }
90 95
91 /* read comadj */ 96 /* read comadj */
92 if (comadj == -1) { 97 if (comadj == -1 && machine_is_collie())
93 if (machine_is_poodle()) 98 comadj = 128;
94 comadj = 118; 99 if (comadj == -1 && machine_is_poodle())
95 if (machine_is_collie()) 100 comadj = 118;
96 comadj = 128;
97 }
98 101
99 if (on) 102 if (on)
100 locomolcd_on(comadj); 103 locomolcd_on(comadj);
@@ -105,26 +108,100 @@ void locomolcd_power(int on)
105} 108}
106EXPORT_SYMBOL(locomolcd_power); 109EXPORT_SYMBOL(locomolcd_power);
107 110
108static int poodle_lcd_probe(struct locomo_dev *dev) 111
112static int current_intensity;
113
114static int locomolcd_set_intensity(struct backlight_device *bd)
115{
116 int intensity = bd->props->brightness;
117
118 if (bd->props->power != FB_BLANK_UNBLANK)
119 intensity = 0;
120 if (bd->props->fb_blank != FB_BLANK_UNBLANK)
121 intensity = 0;
122 if (locomolcd_flags & LOCOMOLCD_SUSPENDED)
123 intensity = 0;
124
125 switch (intensity) {
126 /* AC and non-AC are handled differently, but produce same results in sharp code? */
127 case 0: locomo_frontlight_set(locomolcd_dev, 0, 0, 161); break;
128 case 1: locomo_frontlight_set(locomolcd_dev, 117, 0, 161); break;
129 case 2: locomo_frontlight_set(locomolcd_dev, 163, 0, 148); break;
130 case 3: locomo_frontlight_set(locomolcd_dev, 194, 0, 161); break;
131 case 4: locomo_frontlight_set(locomolcd_dev, 194, 1, 161); break;
132
133 default:
134 return -ENODEV;
135 }
136 current_intensity = intensity;
137 return 0;
138}
139
140static int locomolcd_get_intensity(struct backlight_device *bd)
141{
142 return current_intensity;
143}
144
145static struct backlight_properties locomobl_data = {
146 .owner = THIS_MODULE,
147 .get_brightness = locomolcd_get_intensity,
148 .update_status = locomolcd_set_intensity,
149 .max_brightness = 4,
150};
151
152#ifdef CONFIG_PM
153static int locomolcd_suspend(struct locomo_dev *dev, pm_message_t state)
154{
155 locomolcd_flags |= LOCOMOLCD_SUSPENDED;
156 locomolcd_set_intensity(locomolcd_bl_device);
157 return 0;
158}
159
160static int locomolcd_resume(struct locomo_dev *dev)
161{
162 locomolcd_flags &= ~LOCOMOLCD_SUSPENDED;
163 locomolcd_set_intensity(locomolcd_bl_device);
164 return 0;
165}
166#else
167#define locomolcd_suspend NULL
168#define locomolcd_resume NULL
169#endif
170
171static int locomolcd_probe(struct locomo_dev *dev)
109{ 172{
110 unsigned long flags; 173 unsigned long flags;
111 174
112 local_irq_save(flags); 175 local_irq_save(flags);
113 locomolcd_dev = dev; 176 locomolcd_dev = dev;
114 177
178 locomo_gpio_set_dir(dev, LOCOMO_GPIO_FL_VR, 0);
179
115 /* the poodle_lcd_power function is called for the first time 180 /* the poodle_lcd_power function is called for the first time
116 * from fs_initcall, which is before locomo is activated. 181 * from fs_initcall, which is before locomo is activated.
117 * We need to recall poodle_lcd_power here*/ 182 * We need to recall poodle_lcd_power here*/
118#ifdef CONFIG_MACH_POODLE 183 if (machine_is_poodle())
119 locomolcd_power(1); 184 locomolcd_power(1);
120#endif 185
121 local_irq_restore(flags); 186 local_irq_restore(flags);
187
188 locomolcd_bl_device = backlight_device_register("locomo-bl", NULL, &locomobl_data);
189
190 if (IS_ERR (locomolcd_bl_device))
191 return PTR_ERR (locomolcd_bl_device);
192
193 /* Set up frontlight so that screen is readable */
194 locomobl_data.brightness = 2;
195 locomolcd_set_intensity(locomolcd_bl_device);
196
122 return 0; 197 return 0;
123} 198}
124 199
125static int poodle_lcd_remove(struct locomo_dev *dev) 200static int locomolcd_remove(struct locomo_dev *dev)
126{ 201{
127 unsigned long flags; 202 unsigned long flags;
203
204 backlight_device_unregister(locomolcd_bl_device);
128 local_irq_save(flags); 205 local_irq_save(flags);
129 locomolcd_dev = NULL; 206 locomolcd_dev = NULL;
130 local_irq_restore(flags); 207 local_irq_restore(flags);
@@ -136,19 +213,33 @@ static struct locomo_driver poodle_lcd_driver = {
136 .name = "locomo-backlight", 213 .name = "locomo-backlight",
137 }, 214 },
138 .devid = LOCOMO_DEVID_BACKLIGHT, 215 .devid = LOCOMO_DEVID_BACKLIGHT,
139 .probe = poodle_lcd_probe, 216 .probe = locomolcd_probe,
140 .remove = poodle_lcd_remove, 217 .remove = locomolcd_remove,
218 .suspend = locomolcd_suspend,
219 .resume = locomolcd_resume,
141}; 220};
142 221
143static int __init poodle_lcd_init(void) 222
223static int __init locomolcd_init(void)
144{ 224{
145 int ret = locomo_driver_register(&poodle_lcd_driver); 225 int ret = locomo_driver_register(&poodle_lcd_driver);
146 if (ret) return ret; 226 if (ret)
227 return ret;
147 228
148#ifdef CONFIG_SA1100_COLLIE 229#ifdef CONFIG_SA1100_COLLIE
149 sa1100fb_lcd_power = locomolcd_power; 230 sa1100fb_lcd_power = locomolcd_power;
150#endif 231#endif
151 return 0; 232 return 0;
152} 233}
153device_initcall(poodle_lcd_init);
154 234
235static void __exit locomolcd_exit(void)
236{
237 locomo_driver_unregister(&poodle_lcd_driver);
238}
239
240module_init(locomolcd_init);
241module_exit(locomolcd_exit);
242
243MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>, Pavel Machek <pavel@suse.cz>");
244MODULE_DESCRIPTION("Collie LCD driver");
245MODULE_LICENSE("GPL");
diff --git a/drivers/video/cfbimgblt.c b/drivers/video/cfbimgblt.c
index 8ba6152db2..ad8a89bf8e 100644
--- a/drivers/video/cfbimgblt.c
+++ b/drivers/video/cfbimgblt.c
@@ -230,6 +230,7 @@ static inline void fast_imageblit(const struct fb_image *image, struct fb_info *
230 tab = cfb_tab16; 230 tab = cfb_tab16;
231 break; 231 break;
232 case 32: 232 case 32:
233 default:
233 tab = cfb_tab32; 234 tab = cfb_tab32;
234 break; 235 break;
235 } 236 }
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index 1103010af5..dda240eb73 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -2227,7 +2227,6 @@ static void cirrusfb_pci_unmap (struct cirrusfb_info *cinfo)
2227 release_region(0x3C0, 32); 2227 release_region(0x3C0, 32);
2228 pci_release_regions(pdev); 2228 pci_release_regions(pdev);
2229 framebuffer_release(cinfo->info); 2229 framebuffer_release(cinfo->info);
2230 pci_disable_device(pdev);
2231} 2230}
2232#endif /* CONFIG_PCI */ 2231#endif /* CONFIG_PCI */
2233 2232
@@ -2458,7 +2457,6 @@ err_release_regions:
2458err_release_fb: 2457err_release_fb:
2459 framebuffer_release(info); 2458 framebuffer_release(info);
2460err_disable: 2459err_disable:
2461 pci_disable_device(pdev);
2462err_out: 2460err_out:
2463 return ret; 2461 return ret;
2464} 2462}
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 47ba1a79ad..5dc4083552 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -125,6 +125,8 @@ static int softback_lines;
125static int first_fb_vc; 125static int first_fb_vc;
126static int last_fb_vc = MAX_NR_CONSOLES - 1; 126static int last_fb_vc = MAX_NR_CONSOLES - 1;
127static int fbcon_is_default = 1; 127static int fbcon_is_default = 1;
128static int fbcon_has_exited;
129
128/* font data */ 130/* font data */
129static char fontname[40]; 131static char fontname[40];
130 132
@@ -140,7 +142,6 @@ static const struct consw fb_con;
140 142
141#define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row) 143#define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row)
142 144
143static void fbcon_free_font(struct display *);
144static int fbcon_set_origin(struct vc_data *); 145static int fbcon_set_origin(struct vc_data *);
145 146
146#define CURSOR_DRAW_DELAY (1) 147#define CURSOR_DRAW_DELAY (1)
@@ -194,6 +195,9 @@ static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
194 int line, int count, int dy); 195 int line, int count, int dy);
195static void fbcon_modechanged(struct fb_info *info); 196static void fbcon_modechanged(struct fb_info *info);
196static void fbcon_set_all_vcs(struct fb_info *info); 197static void fbcon_set_all_vcs(struct fb_info *info);
198static void fbcon_start(void);
199static void fbcon_exit(void);
200static struct class_device *fbcon_class_device;
197 201
198#ifdef CONFIG_MAC 202#ifdef CONFIG_MAC
199/* 203/*
@@ -252,7 +256,7 @@ static void fbcon_rotate_all(struct fb_info *info, u32 rotate)
252 if (!ops || ops->currcon < 0 || rotate > 3) 256 if (!ops || ops->currcon < 0 || rotate > 3)
253 return; 257 return;
254 258
255 for (i = 0; i < MAX_NR_CONSOLES; i++) { 259 for (i = first_fb_vc; i <= last_fb_vc; i++) {
256 vc = vc_cons[i].d; 260 vc = vc_cons[i].d;
257 if (!vc || vc->vc_mode != KD_TEXT || 261 if (!vc || vc->vc_mode != KD_TEXT ||
258 registered_fb[con2fb_map[i]] != info) 262 registered_fb[con2fb_map[i]] != info)
@@ -389,15 +393,18 @@ static void fb_flashcursor(void *private)
389 int c; 393 int c;
390 int mode; 394 int mode;
391 395
392 if (ops->currcon != -1) 396 acquire_console_sem();
397 if (ops && ops->currcon != -1)
393 vc = vc_cons[ops->currcon].d; 398 vc = vc_cons[ops->currcon].d;
394 399
395 if (!vc || !CON_IS_VISIBLE(vc) || 400 if (!vc || !CON_IS_VISIBLE(vc) ||
396 fbcon_is_inactive(vc, info) || 401 fbcon_is_inactive(vc, info) ||
397 registered_fb[con2fb_map[vc->vc_num]] != info || 402 registered_fb[con2fb_map[vc->vc_num]] != info ||
398 vc_cons[ops->currcon].d->vc_deccm != 1) 403 vc_cons[ops->currcon].d->vc_deccm != 1) {
404 release_console_sem();
399 return; 405 return;
400 acquire_console_sem(); 406 }
407
401 p = &fb_display[vc->vc_num]; 408 p = &fb_display[vc->vc_num];
402 c = scr_readw((u16 *) vc->vc_pos); 409 c = scr_readw((u16 *) vc->vc_pos);
403 mode = (!ops->cursor_flash || ops->cursor_state.enable) ? 410 mode = (!ops->cursor_flash || ops->cursor_state.enable) ?
@@ -528,7 +535,7 @@ static int search_fb_in_map(int idx)
528{ 535{
529 int i, retval = 0; 536 int i, retval = 0;
530 537
531 for (i = 0; i < MAX_NR_CONSOLES; i++) { 538 for (i = first_fb_vc; i <= last_fb_vc; i++) {
532 if (con2fb_map[i] == idx) 539 if (con2fb_map[i] == idx)
533 retval = 1; 540 retval = 1;
534 } 541 }
@@ -539,7 +546,7 @@ static int search_for_mapped_con(void)
539{ 546{
540 int i, retval = 0; 547 int i, retval = 0;
541 548
542 for (i = 0; i < MAX_NR_CONSOLES; i++) { 549 for (i = first_fb_vc; i <= last_fb_vc; i++) {
543 if (con2fb_map[i] != -1) 550 if (con2fb_map[i] != -1)
544 retval = 1; 551 retval = 1;
545 } 552 }
@@ -561,6 +568,7 @@ static int fbcon_takeover(int show_logo)
561 568
562 err = take_over_console(&fb_con, first_fb_vc, last_fb_vc, 569 err = take_over_console(&fb_con, first_fb_vc, last_fb_vc,
563 fbcon_is_default); 570 fbcon_is_default);
571
564 if (err) { 572 if (err) {
565 for (i = first_fb_vc; i <= last_fb_vc; i++) { 573 for (i = first_fb_vc; i <= last_fb_vc; i++) {
566 con2fb_map[i] = -1; 574 con2fb_map[i] = -1;
@@ -795,8 +803,8 @@ static int set_con2fb_map(int unit, int newidx, int user)
795 if (oldidx == newidx) 803 if (oldidx == newidx)
796 return 0; 804 return 0;
797 805
798 if (!info) 806 if (!info || fbcon_has_exited)
799 err = -EINVAL; 807 return -EINVAL;
800 808
801 if (!err && !search_for_mapped_con()) { 809 if (!err && !search_for_mapped_con()) {
802 info_idx = newidx; 810 info_idx = newidx;
@@ -832,6 +840,9 @@ static int set_con2fb_map(int unit, int newidx, int user)
832 con2fb_init_display(vc, info, unit, show_logo); 840 con2fb_init_display(vc, info, unit, show_logo);
833 } 841 }
834 842
843 if (!search_fb_in_map(info_idx))
844 info_idx = newidx;
845
835 release_console_sem(); 846 release_console_sem();
836 return err; 847 return err;
837} 848}
@@ -1034,6 +1045,7 @@ static const char *fbcon_startup(void)
1034#endif /* CONFIG_MAC */ 1045#endif /* CONFIG_MAC */
1035 1046
1036 fbcon_add_cursor_timer(info); 1047 fbcon_add_cursor_timer(info);
1048 fbcon_has_exited = 0;
1037 return display_desc; 1049 return display_desc;
1038} 1050}
1039 1051
@@ -1061,17 +1073,36 @@ static void fbcon_init(struct vc_data *vc, int init)
1061 1073
1062 /* If we are not the first console on this 1074 /* If we are not the first console on this
1063 fb, copy the font from that console */ 1075 fb, copy the font from that console */
1064 t = &fb_display[svc->vc_num]; 1076 t = &fb_display[fg_console];
1065 if (!vc->vc_font.data) { 1077 if (!p->fontdata) {
1066 vc->vc_font.data = (void *)(p->fontdata = t->fontdata); 1078 if (t->fontdata) {
1067 vc->vc_font.width = (*default_mode)->vc_font.width; 1079 struct vc_data *fvc = vc_cons[fg_console].d;
1068 vc->vc_font.height = (*default_mode)->vc_font.height; 1080
1069 p->userfont = t->userfont; 1081 vc->vc_font.data = (void *)(p->fontdata =
1070 if (p->userfont) 1082 fvc->vc_font.data);
1071 REFCOUNT(p->fontdata)++; 1083 vc->vc_font.width = fvc->vc_font.width;
1084 vc->vc_font.height = fvc->vc_font.height;
1085 p->userfont = t->userfont;
1086
1087 if (p->userfont)
1088 REFCOUNT(p->fontdata)++;
1089 } else {
1090 const struct font_desc *font = NULL;
1091
1092 if (!fontname[0] || !(font = find_font(fontname)))
1093 font = get_default_font(info->var.xres,
1094 info->var.yres);
1095 vc->vc_font.width = font->width;
1096 vc->vc_font.height = font->height;
1097 vc->vc_font.data = (void *)(p->fontdata = font->data);
1098 vc->vc_font.charcount = 256; /* FIXME Need to
1099 support more fonts */
1100 }
1072 } 1101 }
1102
1073 if (p->userfont) 1103 if (p->userfont)
1074 charcnt = FNTCHARCNT(p->fontdata); 1104 charcnt = FNTCHARCNT(p->fontdata);
1105
1075 vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1); 1106 vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1);
1076 vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800; 1107 vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
1077 if (charcnt == 256) { 1108 if (charcnt == 256) {
@@ -1145,13 +1176,47 @@ static void fbcon_init(struct vc_data *vc, int init)
1145 ops->p = &fb_display[fg_console]; 1176 ops->p = &fb_display[fg_console];
1146} 1177}
1147 1178
1179static void fbcon_free_font(struct display *p)
1180{
1181 if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
1182 kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
1183 p->fontdata = NULL;
1184 p->userfont = 0;
1185}
1186
1148static void fbcon_deinit(struct vc_data *vc) 1187static void fbcon_deinit(struct vc_data *vc)
1149{ 1188{
1150 struct display *p = &fb_display[vc->vc_num]; 1189 struct display *p = &fb_display[vc->vc_num];
1190 struct fb_info *info;
1191 struct fbcon_ops *ops;
1192 int idx;
1151 1193
1152 if (info_idx != -1)
1153 return;
1154 fbcon_free_font(p); 1194 fbcon_free_font(p);
1195 idx = con2fb_map[vc->vc_num];
1196
1197 if (idx == -1)
1198 goto finished;
1199
1200 info = registered_fb[idx];
1201
1202 if (!info)
1203 goto finished;
1204
1205 ops = info->fbcon_par;
1206
1207 if (!ops)
1208 goto finished;
1209
1210 if (CON_IS_VISIBLE(vc))
1211 fbcon_del_cursor_timer(info);
1212
1213 ops->flags &= ~FBCON_FLAGS_INIT;
1214finished:
1215
1216 if (!con_is_bound(&fb_con))
1217 fbcon_exit();
1218
1219 return;
1155} 1220}
1156 1221
1157/* ====================================================================== */ 1222/* ====================================================================== */
@@ -2099,12 +2164,11 @@ static int fbcon_switch(struct vc_data *vc)
2099 if (info->fbops->fb_set_par) 2164 if (info->fbops->fb_set_par)
2100 info->fbops->fb_set_par(info); 2165 info->fbops->fb_set_par(info);
2101 2166
2102 if (old_info != info) { 2167 if (old_info != info)
2103 fbcon_del_cursor_timer(old_info); 2168 fbcon_del_cursor_timer(old_info);
2104 fbcon_add_cursor_timer(info);
2105 }
2106 } 2169 }
2107 2170
2171 fbcon_add_cursor_timer(info);
2108 set_blitting_type(vc, info); 2172 set_blitting_type(vc, info);
2109 ops->cursor_reset = 1; 2173 ops->cursor_reset = 1;
2110 2174
@@ -2222,14 +2286,6 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
2222 return 0; 2286 return 0;
2223} 2287}
2224 2288
2225static void fbcon_free_font(struct display *p)
2226{
2227 if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
2228 kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
2229 p->fontdata = NULL;
2230 p->userfont = 0;
2231}
2232
2233static int fbcon_get_font(struct vc_data *vc, struct console_font *font) 2289static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
2234{ 2290{
2235 u8 *fontdata = vc->vc_font.data; 2291 u8 *fontdata = vc->vc_font.data;
@@ -2443,7 +2499,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigne
2443 2499
2444 FNTSUM(new_data) = csum; 2500 FNTSUM(new_data) = csum;
2445 /* Check if the same font is on some other console already */ 2501 /* Check if the same font is on some other console already */
2446 for (i = 0; i < MAX_NR_CONSOLES; i++) { 2502 for (i = first_fb_vc; i <= last_fb_vc; i++) {
2447 struct vc_data *tmp = vc_cons[i].d; 2503 struct vc_data *tmp = vc_cons[i].d;
2448 2504
2449 if (fb_display[i].userfont && 2505 if (fb_display[i].userfont &&
@@ -2768,7 +2824,7 @@ static void fbcon_set_all_vcs(struct fb_info *info)
2768 if (!ops || ops->currcon < 0) 2824 if (!ops || ops->currcon < 0)
2769 return; 2825 return;
2770 2826
2771 for (i = 0; i < MAX_NR_CONSOLES; i++) { 2827 for (i = first_fb_vc; i <= last_fb_vc; i++) {
2772 vc = vc_cons[i].d; 2828 vc = vc_cons[i].d;
2773 if (!vc || vc->vc_mode != KD_TEXT || 2829 if (!vc || vc->vc_mode != KD_TEXT ||
2774 registered_fb[con2fb_map[i]] != info) 2830 registered_fb[con2fb_map[i]] != info)
@@ -2830,22 +2886,57 @@ static int fbcon_mode_deleted(struct fb_info *info,
2830 return found; 2886 return found;
2831} 2887}
2832 2888
2889static int fbcon_fb_unregistered(int idx)
2890{
2891 int i;
2892
2893 for (i = first_fb_vc; i <= last_fb_vc; i++) {
2894 if (con2fb_map[i] == idx)
2895 con2fb_map[i] = -1;
2896 }
2897
2898 if (idx == info_idx) {
2899 info_idx = -1;
2900
2901 for (i = 0; i < FB_MAX; i++) {
2902 if (registered_fb[i] != NULL) {
2903 info_idx = i;
2904 break;
2905 }
2906 }
2907 }
2908
2909 if (info_idx != -1) {
2910 for (i = first_fb_vc; i <= last_fb_vc; i++) {
2911 if (con2fb_map[i] == -1)
2912 con2fb_map[i] = info_idx;
2913 }
2914 }
2915
2916 if (!num_registered_fb)
2917 unregister_con_driver(&fb_con);
2918
2919 return 0;
2920}
2921
2833static int fbcon_fb_registered(int idx) 2922static int fbcon_fb_registered(int idx)
2834{ 2923{
2835 int ret = 0, i; 2924 int ret = 0, i;
2836 2925
2837 if (info_idx == -1) { 2926 if (info_idx == -1) {
2838 for (i = 0; i < MAX_NR_CONSOLES; i++) { 2927 for (i = first_fb_vc; i <= last_fb_vc; i++) {
2839 if (con2fb_map_boot[i] == idx) { 2928 if (con2fb_map_boot[i] == idx) {
2840 info_idx = idx; 2929 info_idx = idx;
2841 break; 2930 break;
2842 } 2931 }
2843 } 2932 }
2933
2844 if (info_idx != -1) 2934 if (info_idx != -1)
2845 ret = fbcon_takeover(1); 2935 ret = fbcon_takeover(1);
2846 } else { 2936 } else {
2847 for (i = 0; i < MAX_NR_CONSOLES; i++) { 2937 for (i = first_fb_vc; i <= last_fb_vc; i++) {
2848 if (con2fb_map_boot[i] == idx) 2938 if (con2fb_map_boot[i] == idx &&
2939 con2fb_map[i] == -1)
2849 set_con2fb_map(i, idx, 0); 2940 set_con2fb_map(i, idx, 0);
2850 } 2941 }
2851 } 2942 }
@@ -2882,7 +2973,7 @@ static void fbcon_new_modelist(struct fb_info *info)
2882 struct fb_var_screeninfo var; 2973 struct fb_var_screeninfo var;
2883 struct fb_videomode *mode; 2974 struct fb_videomode *mode;
2884 2975
2885 for (i = 0; i < MAX_NR_CONSOLES; i++) { 2976 for (i = first_fb_vc; i <= last_fb_vc; i++) {
2886 if (registered_fb[con2fb_map[i]] != info) 2977 if (registered_fb[con2fb_map[i]] != info)
2887 continue; 2978 continue;
2888 if (!fb_display[i].mode) 2979 if (!fb_display[i].mode)
@@ -2910,6 +3001,14 @@ static int fbcon_event_notify(struct notifier_block *self,
2910 struct fb_con2fbmap *con2fb; 3001 struct fb_con2fbmap *con2fb;
2911 int ret = 0; 3002 int ret = 0;
2912 3003
3004 /*
3005 * ignore all events except driver registration and deregistration
3006 * if fbcon is not active
3007 */
3008 if (fbcon_has_exited && !(action == FB_EVENT_FB_REGISTERED ||
3009 action == FB_EVENT_FB_UNREGISTERED))
3010 goto done;
3011
2913 switch(action) { 3012 switch(action) {
2914 case FB_EVENT_SUSPEND: 3013 case FB_EVENT_SUSPEND:
2915 fbcon_suspended(info); 3014 fbcon_suspended(info);
@@ -2930,6 +3029,9 @@ static int fbcon_event_notify(struct notifier_block *self,
2930 case FB_EVENT_FB_REGISTERED: 3029 case FB_EVENT_FB_REGISTERED:
2931 ret = fbcon_fb_registered(info->node); 3030 ret = fbcon_fb_registered(info->node);
2932 break; 3031 break;
3032 case FB_EVENT_FB_UNREGISTERED:
3033 ret = fbcon_fb_unregistered(info->node);
3034 break;
2933 case FB_EVENT_SET_CONSOLE_MAP: 3035 case FB_EVENT_SET_CONSOLE_MAP:
2934 con2fb = event->data; 3036 con2fb = event->data;
2935 ret = set_con2fb_map(con2fb->console - 1, 3037 ret = set_con2fb_map(con2fb->console - 1,
@@ -2945,16 +3047,9 @@ static int fbcon_event_notify(struct notifier_block *self,
2945 case FB_EVENT_NEW_MODELIST: 3047 case FB_EVENT_NEW_MODELIST:
2946 fbcon_new_modelist(info); 3048 fbcon_new_modelist(info);
2947 break; 3049 break;
2948 case FB_EVENT_SET_CON_ROTATE:
2949 fbcon_rotate(info, *(int *)event->data);
2950 break;
2951 case FB_EVENT_GET_CON_ROTATE:
2952 ret = fbcon_get_rotate(info);
2953 break;
2954 case FB_EVENT_SET_CON_ROTATE_ALL:
2955 fbcon_rotate_all(info, *(int *)event->data);
2956 } 3050 }
2957 3051
3052done:
2958 return ret; 3053 return ret;
2959} 3054}
2960 3055
@@ -2992,27 +3087,181 @@ static struct notifier_block fbcon_event_notifier = {
2992 .notifier_call = fbcon_event_notify, 3087 .notifier_call = fbcon_event_notify,
2993}; 3088};
2994 3089
2995static int __init fb_console_init(void) 3090static ssize_t store_rotate(struct class_device *class_device,
3091 const char *buf, size_t count)
2996{ 3092{
2997 int i; 3093 struct fb_info *info;
3094 int rotate, idx;
3095 char **last = NULL;
3096
3097 if (fbcon_has_exited)
3098 return count;
2998 3099
2999 acquire_console_sem(); 3100 acquire_console_sem();
3000 fb_register_client(&fbcon_event_notifier); 3101 idx = con2fb_map[fg_console];
3102
3103 if (idx == -1 || registered_fb[idx] == NULL)
3104 goto err;
3105
3106 info = registered_fb[idx];
3107 rotate = simple_strtoul(buf, last, 0);
3108 fbcon_rotate(info, rotate);
3109err:
3001 release_console_sem(); 3110 release_console_sem();
3111 return count;
3112}
3002 3113
3003 for (i = 0; i < MAX_NR_CONSOLES; i++) 3114static ssize_t store_rotate_all(struct class_device *class_device,
3004 con2fb_map[i] = -1; 3115 const char *buf, size_t count)
3116{
3117 struct fb_info *info;
3118 int rotate, idx;
3119 char **last = NULL;
3120
3121 if (fbcon_has_exited)
3122 return count;
3123
3124 acquire_console_sem();
3125 idx = con2fb_map[fg_console];
3126
3127 if (idx == -1 || registered_fb[idx] == NULL)
3128 goto err;
3005 3129
3130 info = registered_fb[idx];
3131 rotate = simple_strtoul(buf, last, 0);
3132 fbcon_rotate_all(info, rotate);
3133err:
3134 release_console_sem();
3135 return count;
3136}
3137
3138static ssize_t show_rotate(struct class_device *class_device, char *buf)
3139{
3140 struct fb_info *info;
3141 int rotate = 0, idx;
3142
3143 if (fbcon_has_exited)
3144 return 0;
3145
3146 acquire_console_sem();
3147 idx = con2fb_map[fg_console];
3148
3149 if (idx == -1 || registered_fb[idx] == NULL)
3150 goto err;
3151
3152 info = registered_fb[idx];
3153 rotate = fbcon_get_rotate(info);
3154err:
3155 release_console_sem();
3156 return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
3157}
3158
3159static struct class_device_attribute class_device_attrs[] = {
3160 __ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate),
3161 __ATTR(rotate_all, S_IWUSR, NULL, store_rotate_all),
3162};
3163
3164static int fbcon_init_class_device(void)
3165{
3166 int i;
3167
3168 for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
3169 class_device_create_file(fbcon_class_device,
3170 &class_device_attrs[i]);
3171 return 0;
3172}
3173
3174static void fbcon_start(void)
3175{
3006 if (num_registered_fb) { 3176 if (num_registered_fb) {
3177 int i;
3178
3179 acquire_console_sem();
3180
3007 for (i = 0; i < FB_MAX; i++) { 3181 for (i = 0; i < FB_MAX; i++) {
3008 if (registered_fb[i] != NULL) { 3182 if (registered_fb[i] != NULL) {
3009 info_idx = i; 3183 info_idx = i;
3010 break; 3184 break;
3011 } 3185 }
3012 } 3186 }
3187
3188 release_console_sem();
3013 fbcon_takeover(0); 3189 fbcon_takeover(0);
3014 } 3190 }
3191}
3192
3193static void fbcon_exit(void)
3194{
3195 struct fb_info *info;
3196 int i, j, mapped;
3197
3198 if (fbcon_has_exited)
3199 return;
3200
3201#ifdef CONFIG_ATARI
3202 free_irq(IRQ_AUTO_4, fbcon_vbl_handler);
3203#endif
3204#ifdef CONFIG_MAC
3205 if (MACH_IS_MAC && vbl_detected)
3206 free_irq(IRQ_MAC_VBL, fbcon_vbl_handler);
3207#endif
3208
3209 kfree((void *)softback_buf);
3210 softback_buf = 0UL;
3211
3212 for (i = 0; i < FB_MAX; i++) {
3213 mapped = 0;
3214 info = registered_fb[i];
3215
3216 if (info == NULL)
3217 continue;
3218
3219 for (j = first_fb_vc; j <= last_fb_vc; j++) {
3220 if (con2fb_map[j] == i)
3221 mapped = 1;
3222 }
3223
3224 if (mapped) {
3225 if (info->fbops->fb_release)
3226 info->fbops->fb_release(info, 0);
3227 module_put(info->fbops->owner);
3228
3229 if (info->fbcon_par) {
3230 fbcon_del_cursor_timer(info);
3231 kfree(info->fbcon_par);
3232 info->fbcon_par = NULL;
3233 }
3015 3234
3235 if (info->queue.func == fb_flashcursor)
3236 info->queue.func = NULL;
3237 }
3238 }
3239
3240 fbcon_has_exited = 1;
3241}
3242
3243static int __init fb_console_init(void)
3244{
3245 int i;
3246
3247 acquire_console_sem();
3248 fb_register_client(&fbcon_event_notifier);
3249 fbcon_class_device =
3250 class_device_create(fb_class, NULL, MKDEV(0, 0), NULL, "fbcon");
3251
3252 if (IS_ERR(fbcon_class_device)) {
3253 printk(KERN_WARNING "Unable to create class_device "
3254 "for fbcon; errno = %ld\n",
3255 PTR_ERR(fbcon_class_device));
3256 fbcon_class_device = NULL;
3257 } else
3258 fbcon_init_class_device();
3259
3260 for (i = 0; i < MAX_NR_CONSOLES; i++)
3261 con2fb_map[i] = -1;
3262
3263 release_console_sem();
3264 fbcon_start();
3016 return 0; 3265 return 0;
3017} 3266}
3018 3267
@@ -3020,12 +3269,24 @@ module_init(fb_console_init);
3020 3269
3021#ifdef MODULE 3270#ifdef MODULE
3022 3271
3272static void __exit fbcon_deinit_class_device(void)
3273{
3274 int i;
3275
3276 for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
3277 class_device_remove_file(fbcon_class_device,
3278 &class_device_attrs[i]);
3279}
3280
3023static void __exit fb_console_exit(void) 3281static void __exit fb_console_exit(void)
3024{ 3282{
3025 acquire_console_sem(); 3283 acquire_console_sem();
3026 fb_unregister_client(&fbcon_event_notifier); 3284 fb_unregister_client(&fbcon_event_notifier);
3285 fbcon_deinit_class_device();
3286 class_device_destroy(fb_class, MKDEV(0, 0));
3287 fbcon_exit();
3027 release_console_sem(); 3288 release_console_sem();
3028 give_up_console(&fb_con); 3289 unregister_con_driver(&fb_con);
3029} 3290}
3030 3291
3031module_exit(fb_console_exit); 3292module_exit(fb_console_exit);
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
index c38c3d8e7a..3487a63637 100644
--- a/drivers/video/console/fbcon.h
+++ b/drivers/video/console/fbcon.h
@@ -175,6 +175,7 @@ extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info);
175#endif 175#endif
176extern void fbcon_set_bitops(struct fbcon_ops *ops); 176extern void fbcon_set_bitops(struct fbcon_ops *ops);
177extern int soft_cursor(struct fb_info *info, struct fb_cursor *cursor); 177extern int soft_cursor(struct fb_info *info, struct fb_cursor *cursor);
178extern struct class *fb_class;
178 179
179#define FBCON_ATTRIBUTE_UNDERLINE 1 180#define FBCON_ATTRIBUTE_UNDERLINE 1
180#define FBCON_ATTRIBUTE_REVERSE 2 181#define FBCON_ATTRIBUTE_REVERSE 2
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index 7f939d066a..c89f90edf8 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -308,7 +308,7 @@ static void __init mda_initialize(void)
308 outb_p(0x00, mda_gfx_port); 308 outb_p(0x00, mda_gfx_port);
309} 309}
310 310
311static const char __init *mdacon_startup(void) 311static const char *mdacon_startup(void)
312{ 312{
313 mda_num_columns = 80; 313 mda_num_columns = 80;
314 mda_num_lines = 25; 314 mda_num_lines = 25;
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index e99fe30e56..0304131171 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -51,6 +51,7 @@ static int topscan;
51static int xcurs_correction = 29; 51static int xcurs_correction = 29;
52static int newport_xsize; 52static int newport_xsize;
53static int newport_ysize; 53static int newport_ysize;
54static int newport_has_init;
54 55
55static int newport_set_def_font(int unit, struct console_font *op); 56static int newport_set_def_font(int unit, struct console_font *op);
56 57
@@ -283,6 +284,15 @@ static void newport_get_revisions(void)
283 xcurs_correction = 21; 284 xcurs_correction = 21;
284} 285}
285 286
287static void newport_exit(void)
288{
289 int i;
290
291 /* free memory used by user font */
292 for (i = 0; i < MAX_NR_CONSOLES; i++)
293 newport_set_def_font(i, NULL);
294}
295
286/* Can't be __init, take_over_console may call it later */ 296/* Can't be __init, take_over_console may call it later */
287static const char *newport_startup(void) 297static const char *newport_startup(void)
288{ 298{
@@ -290,8 +300,10 @@ static const char *newport_startup(void)
290 300
291 if (!sgi_gfxaddr) 301 if (!sgi_gfxaddr)
292 return NULL; 302 return NULL;
293 npregs = (struct newport_regs *) /* ioremap cannot fail */ 303
294 ioremap(sgi_gfxaddr, sizeof(struct newport_regs)); 304 if (!npregs)
305 npregs = (struct newport_regs *)/* ioremap cannot fail */
306 ioremap(sgi_gfxaddr, sizeof(struct newport_regs));
295 npregs->cset.config = NPORT_CFG_GD0; 307 npregs->cset.config = NPORT_CFG_GD0;
296 308
297 if (newport_wait(npregs)) 309 if (newport_wait(npregs))
@@ -307,11 +319,11 @@ static const char *newport_startup(void)
307 newport_reset(); 319 newport_reset();
308 newport_get_revisions(); 320 newport_get_revisions();
309 newport_get_screensize(); 321 newport_get_screensize();
322 newport_has_init = 1;
310 323
311 return "SGI Newport"; 324 return "SGI Newport";
312 325
313out_unmap: 326out_unmap:
314 iounmap((void *)npregs);
315 return NULL; 327 return NULL;
316} 328}
317 329
@@ -324,11 +336,10 @@ static void newport_init(struct vc_data *vc, int init)
324 336
325static void newport_deinit(struct vc_data *c) 337static void newport_deinit(struct vc_data *c)
326{ 338{
327 int i; 339 if (!con_is_bound(&newport_con) && newport_has_init) {
328 340 newport_exit();
329 /* free memory used by user font */ 341 newport_has_init = 0;
330 for (i = 0; i < MAX_NR_CONSOLES; i++) 342 }
331 newport_set_def_font(i, NULL);
332} 343}
333 344
334static void newport_clear(struct vc_data *vc, int sy, int sx, int height, 345static void newport_clear(struct vc_data *vc, int sy, int sx, int height,
@@ -728,16 +739,23 @@ const struct consw newport_con = {
728#ifdef MODULE 739#ifdef MODULE
729static int __init newport_console_init(void) 740static int __init newport_console_init(void)
730{ 741{
742
743 if (!sgi_gfxaddr)
744 return NULL;
745
746 if (!npregs)
747 npregs = (struct newport_regs *)/* ioremap cannot fail */
748 ioremap(sgi_gfxaddr, sizeof(struct newport_regs));
749
731 return take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1); 750 return take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1);
732} 751}
752module_init(newport_console_init);
733 753
734static void __exit newport_console_exit(void) 754static void __exit newport_console_exit(void)
735{ 755{
736 give_up_console(&newport_con); 756 give_up_console(&newport_con);
737 iounmap((void *)npregs); 757 iounmap((void *)npregs);
738} 758}
739
740module_init(newport_console_init);
741module_exit(newport_console_exit); 759module_exit(newport_console_exit);
742#endif 760#endif
743 761
diff --git a/drivers/video/console/promcon.c b/drivers/video/console/promcon.c
index 04f42fcaac..d6e6ad537f 100644
--- a/drivers/video/console/promcon.c
+++ b/drivers/video/console/promcon.c
@@ -109,7 +109,7 @@ promcon_end(struct vc_data *conp, char *b)
109 return b - p; 109 return b - p;
110} 110}
111 111
112const char __init *promcon_startup(void) 112const char *promcon_startup(void)
113{ 113{
114 const char *display_desc = "PROM"; 114 const char *display_desc = "PROM";
115 int node; 115 int node;
@@ -133,7 +133,7 @@ const char __init *promcon_startup(void)
133 return display_desc; 133 return display_desc;
134} 134}
135 135
136static void __init 136static void
137promcon_init_unimap(struct vc_data *conp) 137promcon_init_unimap(struct vc_data *conp)
138{ 138{
139 mm_segment_t old_fs = get_fs(); 139 mm_segment_t old_fs = get_fs();
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index fd5940f412..45c4f227e5 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -75,7 +75,7 @@ static inline void cursor_undrawn(void)
75 cursor_drawn = 0; 75 cursor_drawn = 0;
76} 76}
77 77
78static const char *__init sticon_startup(void) 78static const char *sticon_startup(void)
79{ 79{
80 return "STI console"; 80 return "STI console";
81} 81}
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index e64d42e244..f32b590730 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -114,6 +114,7 @@ static int vga_512_chars;
114static int vga_video_font_height; 114static int vga_video_font_height;
115static int vga_scan_lines; 115static int vga_scan_lines;
116static unsigned int vga_rolled_over = 0; 116static unsigned int vga_rolled_over = 0;
117static int vga_init_done;
117 118
118static int __init no_scroll(char *str) 119static int __init no_scroll(char *str)
119{ 120{
@@ -190,7 +191,7 @@ static void vgacon_scrollback_init(int pitch)
190 } 191 }
191} 192}
192 193
193static void __init vgacon_scrollback_startup(void) 194static void vgacon_scrollback_startup(void)
194{ 195{
195 vgacon_scrollback = alloc_bootmem(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE 196 vgacon_scrollback = alloc_bootmem(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE
196 * 1024); 197 * 1024);
@@ -355,7 +356,7 @@ static int vgacon_scrolldelta(struct vc_data *c, int lines)
355} 356}
356#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */ 357#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */
357 358
358static const char __init *vgacon_startup(void) 359static const char *vgacon_startup(void)
359{ 360{
360 const char *display_desc = NULL; 361 const char *display_desc = NULL;
361 u16 saved1, saved2; 362 u16 saved1, saved2;
@@ -523,7 +524,12 @@ static const char __init *vgacon_startup(void)
523 524
524 vgacon_xres = ORIG_VIDEO_COLS * VGA_FONTWIDTH; 525 vgacon_xres = ORIG_VIDEO_COLS * VGA_FONTWIDTH;
525 vgacon_yres = vga_scan_lines; 526 vgacon_yres = vga_scan_lines;
526 vgacon_scrollback_startup(); 527
528 if (!vga_init_done) {
529 vgacon_scrollback_startup();
530 vga_init_done = 1;
531 }
532
527 return display_desc; 533 return display_desc;
528} 534}
529 535
@@ -531,10 +537,20 @@ static void vgacon_init(struct vc_data *c, int init)
531{ 537{
532 unsigned long p; 538 unsigned long p;
533 539
534 /* We cannot be loaded as a module, therefore init is always 1 */ 540 /*
541 * We cannot be loaded as a module, therefore init is always 1,
542 * but vgacon_init can be called more than once, and init will
543 * not be 1.
544 */
535 c->vc_can_do_color = vga_can_do_color; 545 c->vc_can_do_color = vga_can_do_color;
536 c->vc_cols = vga_video_num_columns; 546
537 c->vc_rows = vga_video_num_lines; 547 /* set dimensions manually if init != 0 since vc_resize() will fail */
548 if (init) {
549 c->vc_cols = vga_video_num_columns;
550 c->vc_rows = vga_video_num_lines;
551 } else
552 vc_resize(c, vga_video_num_columns, vga_video_num_lines);
553
538 c->vc_scan_lines = vga_scan_lines; 554 c->vc_scan_lines = vga_scan_lines;
539 c->vc_font.height = vga_video_font_height; 555 c->vc_font.height = vga_video_font_height;
540 c->vc_complement_mask = 0x7700; 556 c->vc_complement_mask = 0x7700;
diff --git a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c
index 082759447b..f0a621ecc2 100644
--- a/drivers/video/epson1355fb.c
+++ b/drivers/video/epson1355fb.c
@@ -605,11 +605,6 @@ static void clearfb16(struct fb_info *info)
605 fb_writeb(0, dst); 605 fb_writeb(0, dst);
606} 606}
607 607
608static void epson1355fb_platform_release(struct device *device)
609{
610 dev_err(device, "This driver is broken, please bug the authors so they will fix it.\n");
611}
612
613static int epson1355fb_remove(struct platform_device *dev) 608static int epson1355fb_remove(struct platform_device *dev)
614{ 609{
615 struct fb_info *info = platform_get_drvdata(dev); 610 struct fb_info *info = platform_get_drvdata(dev);
@@ -733,13 +728,7 @@ static struct platform_driver epson1355fb_driver = {
733 }, 728 },
734}; 729};
735 730
736static struct platform_device epson1355fb_device = { 731static struct platform_device *epson1355fb_device;
737 .name = "epson1355fb",
738 .id = 0,
739 .dev = {
740 .release = epson1355fb_platform_release,
741 }
742};
743 732
744int __init epson1355fb_init(void) 733int __init epson1355fb_init(void)
745{ 734{
@@ -749,11 +738,21 @@ int __init epson1355fb_init(void)
749 return -ENODEV; 738 return -ENODEV;
750 739
751 ret = platform_driver_register(&epson1355fb_driver); 740 ret = platform_driver_register(&epson1355fb_driver);
741
752 if (!ret) { 742 if (!ret) {
753 ret = platform_device_register(&epson1355fb_device); 743 epson1355fb_device = platform_device_alloc("epson1355fb", 0);
754 if (ret) 744
745 if (epson1355fb_device)
746 ret = platform_device_add(epson1355fb_device);
747 else
748 ret = -ENOMEM;
749
750 if (ret) {
751 platform_device_put(epson1355fb_device);
755 platform_driver_unregister(&epson1355fb_driver); 752 platform_driver_unregister(&epson1355fb_driver);
753 }
756 } 754 }
755
757 return ret; 756 return ret;
758} 757}
759 758
@@ -762,7 +761,7 @@ module_init(epson1355fb_init);
762#ifdef MODULE 761#ifdef MODULE
763static void __exit epson1355fb_exit(void) 762static void __exit epson1355fb_exit(void)
764{ 763{
765 platform_device_unregister(&epson1355fb_device); 764 platform_device_unregister(epson1355fb_device);
766 platform_driver_unregister(&epson1355fb_driver); 765 platform_driver_unregister(&epson1355fb_driver);
767} 766}
768 767
diff --git a/drivers/video/fbcvt.c b/drivers/video/fbcvt.c
index ac90883dc3..b5498999c4 100644
--- a/drivers/video/fbcvt.c
+++ b/drivers/video/fbcvt.c
@@ -376,4 +376,3 @@ int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb)
376 376
377 return 0; 377 return 0;
378} 378}
379EXPORT_SYMBOL(fb_find_mode_cvt);
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 372aa17768..31143afe7c 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -34,7 +34,6 @@
34#endif 34#endif
35#include <linux/devfs_fs_kernel.h> 35#include <linux/devfs_fs_kernel.h>
36#include <linux/err.h> 36#include <linux/err.h>
37#include <linux/kernel.h>
38#include <linux/device.h> 37#include <linux/device.h>
39#include <linux/efi.h> 38#include <linux/efi.h>
40 39
@@ -162,7 +161,6 @@ char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size
162} 161}
163 162
164#ifdef CONFIG_LOGO 163#ifdef CONFIG_LOGO
165#include <linux/linux_logo.h>
166 164
167static inline unsigned safe_shift(unsigned d, int n) 165static inline unsigned safe_shift(unsigned d, int n)
168{ 166{
@@ -336,11 +334,11 @@ static void fb_rotate_logo_ud(const u8 *in, u8 *out, u32 width, u32 height)
336 334
337static void fb_rotate_logo_cw(const u8 *in, u8 *out, u32 width, u32 height) 335static void fb_rotate_logo_cw(const u8 *in, u8 *out, u32 width, u32 height)
338{ 336{
339 int i, j, w = width - 1; 337 int i, j, h = height - 1;
340 338
341 for (i = 0; i < height; i++) 339 for (i = 0; i < height; i++)
342 for (j = 0; j < width; j++) 340 for (j = 0; j < width; j++)
343 out[height * j + w - i] = *in++; 341 out[height * j + h - i] = *in++;
344} 342}
345 343
346static void fb_rotate_logo_ccw(const u8 *in, u8 *out, u32 width, u32 height) 344static void fb_rotate_logo_ccw(const u8 *in, u8 *out, u32 width, u32 height)
@@ -358,24 +356,24 @@ static void fb_rotate_logo(struct fb_info *info, u8 *dst,
358 u32 tmp; 356 u32 tmp;
359 357
360 if (rotate == FB_ROTATE_UD) { 358 if (rotate == FB_ROTATE_UD) {
361 image->dx = info->var.xres - image->width;
362 image->dy = info->var.yres - image->height;
363 fb_rotate_logo_ud(image->data, dst, image->width, 359 fb_rotate_logo_ud(image->data, dst, image->width,
364 image->height); 360 image->height);
361 image->dx = info->var.xres - image->width;
362 image->dy = info->var.yres - image->height;
365 } else if (rotate == FB_ROTATE_CW) { 363 } else if (rotate == FB_ROTATE_CW) {
366 tmp = image->width;
367 image->width = image->height;
368 image->height = tmp;
369 image->dx = info->var.xres - image->height;
370 fb_rotate_logo_cw(image->data, dst, image->width, 364 fb_rotate_logo_cw(image->data, dst, image->width,
371 image->height); 365 image->height);
372 } else if (rotate == FB_ROTATE_CCW) {
373 tmp = image->width; 366 tmp = image->width;
374 image->width = image->height; 367 image->width = image->height;
375 image->height = tmp; 368 image->height = tmp;
376 image->dy = info->var.yres - image->width; 369 image->dx = info->var.xres - image->width;
370 } else if (rotate == FB_ROTATE_CCW) {
377 fb_rotate_logo_ccw(image->data, dst, image->width, 371 fb_rotate_logo_ccw(image->data, dst, image->width,
378 image->height); 372 image->height);
373 tmp = image->width;
374 image->width = image->height;
375 image->height = tmp;
376 image->dy = info->var.yres - image->height;
379 } 377 }
380 378
381 image->data = dst; 379 image->data = dst;
@@ -435,7 +433,7 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
435 depth = info->var.green.length; 433 depth = info->var.green.length;
436 } 434 }
437 435
438 if (info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) { 436 if (info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR && depth > 4) {
439 /* assume console colormap */ 437 /* assume console colormap */
440 depth = 4; 438 depth = 4;
441 } 439 }
@@ -1278,8 +1276,8 @@ static struct file_operations fb_fops = {
1278#endif 1276#endif
1279}; 1277};
1280 1278
1281static struct class *fb_class; 1279struct class *fb_class;
1282 1280EXPORT_SYMBOL(fb_class);
1283/** 1281/**
1284 * register_framebuffer - registers a frame buffer device 1282 * register_framebuffer - registers a frame buffer device
1285 * @fb_info: frame buffer info structure 1283 * @fb_info: frame buffer info structure
@@ -1355,6 +1353,7 @@ register_framebuffer(struct fb_info *fb_info)
1355int 1353int
1356unregister_framebuffer(struct fb_info *fb_info) 1354unregister_framebuffer(struct fb_info *fb_info)
1357{ 1355{
1356 struct fb_event event;
1358 int i; 1357 int i;
1359 1358
1360 i = fb_info->node; 1359 i = fb_info->node;
@@ -1362,13 +1361,17 @@ unregister_framebuffer(struct fb_info *fb_info)
1362 return -EINVAL; 1361 return -EINVAL;
1363 devfs_remove("fb/%d", i); 1362 devfs_remove("fb/%d", i);
1364 1363
1365 if (fb_info->pixmap.addr && (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) 1364 if (fb_info->pixmap.addr &&
1365 (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
1366 kfree(fb_info->pixmap.addr); 1366 kfree(fb_info->pixmap.addr);
1367 fb_destroy_modelist(&fb_info->modelist); 1367 fb_destroy_modelist(&fb_info->modelist);
1368 registered_fb[i]=NULL; 1368 registered_fb[i]=NULL;
1369 num_registered_fb--; 1369 num_registered_fb--;
1370 fb_cleanup_class_device(fb_info); 1370 fb_cleanup_class_device(fb_info);
1371 class_device_destroy(fb_class, MKDEV(FB_MAJOR, i)); 1371 class_device_destroy(fb_class, MKDEV(FB_MAJOR, i));
1372 event.info = fb_info;
1373 blocking_notifier_call_chain(&fb_notifier_list,
1374 FB_EVENT_FB_UNREGISTERED, &event);
1372 return 0; 1375 return 0;
1373} 1376}
1374 1377
@@ -1491,28 +1494,6 @@ int fb_new_modelist(struct fb_info *info)
1491 return err; 1494 return err;
1492} 1495}
1493 1496
1494/**
1495 * fb_con_duit - user<->fbcon passthrough
1496 * @info: struct fb_info
1497 * @event: notification event to be passed to fbcon
1498 * @data: private data
1499 *
1500 * DESCRIPTION
1501 * This function is an fbcon-user event passing channel
1502 * which bypasses fbdev. This is hopefully temporary
1503 * until a user interface for fbcon is created
1504 */
1505int fb_con_duit(struct fb_info *info, int event, void *data)
1506{
1507 struct fb_event evnt;
1508
1509 evnt.info = info;
1510 evnt.data = data;
1511
1512 return blocking_notifier_call_chain(&fb_notifier_list, event, &evnt);
1513}
1514EXPORT_SYMBOL(fb_con_duit);
1515
1516static char *video_options[FB_MAX]; 1497static char *video_options[FB_MAX];
1517static int ofonly; 1498static int ofonly;
1518 1499
@@ -1622,6 +1603,5 @@ EXPORT_SYMBOL(fb_set_suspend);
1622EXPORT_SYMBOL(fb_register_client); 1603EXPORT_SYMBOL(fb_register_client);
1623EXPORT_SYMBOL(fb_unregister_client); 1604EXPORT_SYMBOL(fb_unregister_client);
1624EXPORT_SYMBOL(fb_get_options); 1605EXPORT_SYMBOL(fb_get_options);
1625EXPORT_SYMBOL(fb_new_modelist);
1626 1606
1627MODULE_LICENSE("GPL"); 1607MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index 53beeb4a99..3ccfff715a 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -29,9 +29,9 @@
29#include <linux/tty.h> 29#include <linux/tty.h>
30#include <linux/fb.h> 30#include <linux/fb.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/pci.h>
32#include <video/edid.h> 33#include <video/edid.h>
33#ifdef CONFIG_PPC_OF 34#ifdef CONFIG_PPC_OF
34#include <linux/pci.h>
35#include <asm/prom.h> 35#include <asm/prom.h>
36#include <asm/pci-bridge.h> 36#include <asm/pci-bridge.h>
37#endif 37#endif
@@ -605,6 +605,7 @@ static int fb_get_monitor_limits(unsigned char *edid, struct fb_monspecs *specs)
605 block = edid + DETAILED_TIMING_DESCRIPTIONS_START; 605 block = edid + DETAILED_TIMING_DESCRIPTIONS_START;
606 606
607 DPRINTK(" Monitor Operating Limits: "); 607 DPRINTK(" Monitor Operating Limits: ");
608
608 for (i = 0; i < 4; i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) { 609 for (i = 0; i < 4; i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) {
609 if (edid_is_limits_block(block)) { 610 if (edid_is_limits_block(block)) {
610 specs->hfmin = H_MIN_RATE * 1000; 611 specs->hfmin = H_MIN_RATE * 1000;
@@ -618,11 +619,12 @@ static int fb_get_monitor_limits(unsigned char *edid, struct fb_monspecs *specs)
618 break; 619 break;
619 } 620 }
620 } 621 }
621 622
622 /* estimate monitor limits based on modes supported */ 623 /* estimate monitor limits based on modes supported */
623 if (retval) { 624 if (retval) {
624 struct fb_videomode *modes; 625 struct fb_videomode *modes, *mode;
625 int num_modes, i, hz, hscan, pixclock; 626 int num_modes, i, hz, hscan, pixclock;
627 int vtotal, htotal;
626 628
627 modes = fb_create_modedb(edid, &num_modes); 629 modes = fb_create_modedb(edid, &num_modes);
628 if (!modes) { 630 if (!modes) {
@@ -632,20 +634,38 @@ static int fb_get_monitor_limits(unsigned char *edid, struct fb_monspecs *specs)
632 634
633 retval = 0; 635 retval = 0;
634 for (i = 0; i < num_modes; i++) { 636 for (i = 0; i < num_modes; i++) {
635 hz = modes[i].refresh; 637 mode = &modes[i];
636 pixclock = PICOS2KHZ(modes[i].pixclock) * 1000; 638 pixclock = PICOS2KHZ(modes[i].pixclock) * 1000;
637 hscan = (modes[i].yres * 105 * hz + 5000)/100; 639 htotal = mode->xres + mode->right_margin + mode->hsync_len
640 + mode->left_margin;
641 vtotal = mode->yres + mode->lower_margin + mode->vsync_len
642 + mode->upper_margin;
643
644 if (mode->vmode & FB_VMODE_INTERLACED)
645 vtotal /= 2;
646
647 if (mode->vmode & FB_VMODE_DOUBLE)
648 vtotal *= 2;
649
650 hscan = (pixclock + htotal / 2) / htotal;
651 hscan = (hscan + 500) / 1000 * 1000;
652 hz = (hscan + vtotal / 2) / vtotal;
638 653
639 if (specs->dclkmax == 0 || specs->dclkmax < pixclock) 654 if (specs->dclkmax == 0 || specs->dclkmax < pixclock)
640 specs->dclkmax = pixclock; 655 specs->dclkmax = pixclock;
656
641 if (specs->dclkmin == 0 || specs->dclkmin > pixclock) 657 if (specs->dclkmin == 0 || specs->dclkmin > pixclock)
642 specs->dclkmin = pixclock; 658 specs->dclkmin = pixclock;
659
643 if (specs->hfmax == 0 || specs->hfmax < hscan) 660 if (specs->hfmax == 0 || specs->hfmax < hscan)
644 specs->hfmax = hscan; 661 specs->hfmax = hscan;
662
645 if (specs->hfmin == 0 || specs->hfmin > hscan) 663 if (specs->hfmin == 0 || specs->hfmin > hscan)
646 specs->hfmin = hscan; 664 specs->hfmin = hscan;
665
647 if (specs->vfmax == 0 || specs->vfmax < hz) 666 if (specs->vfmax == 0 || specs->vfmax < hz)
648 specs->vfmax = hz; 667 specs->vfmax = hz;
668
649 if (specs->vfmin == 0 || specs->vfmin > hz) 669 if (specs->vfmin == 0 || specs->vfmin > hz)
650 specs->vfmin = hz; 670 specs->vfmin = hz;
651 } 671 }
@@ -1281,8 +1301,7 @@ int fb_validate_mode(const struct fb_var_screeninfo *var, struct fb_info *info)
1281 -EINVAL : 0; 1301 -EINVAL : 0;
1282} 1302}
1283 1303
1284#if defined(CONFIG_FB_FIRMWARE_EDID) && defined(__i386__) 1304#if defined(CONFIG_FIRMWARE_EDID) && defined(CONFIG_X86)
1285#include <linux/pci.h>
1286 1305
1287/* 1306/*
1288 * We need to ensure that the EDID block is only returned for 1307 * We need to ensure that the EDID block is only returned for
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 3ceb8c1b39..4f78f23447 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -100,13 +100,22 @@ static int mode_string(char *buf, unsigned int offset,
100 const struct fb_videomode *mode) 100 const struct fb_videomode *mode)
101{ 101{
102 char m = 'U'; 102 char m = 'U';
103 char v = 'p';
104
103 if (mode->flag & FB_MODE_IS_DETAILED) 105 if (mode->flag & FB_MODE_IS_DETAILED)
104 m = 'D'; 106 m = 'D';
105 if (mode->flag & FB_MODE_IS_VESA) 107 if (mode->flag & FB_MODE_IS_VESA)
106 m = 'V'; 108 m = 'V';
107 if (mode->flag & FB_MODE_IS_STANDARD) 109 if (mode->flag & FB_MODE_IS_STANDARD)
108 m = 'S'; 110 m = 'S';
109 return snprintf(&buf[offset], PAGE_SIZE - offset, "%c:%dx%d-%d\n", m, mode->xres, mode->yres, mode->refresh); 111
112 if (mode->vmode & FB_VMODE_INTERLACED)
113 v = 'i';
114 if (mode->vmode & FB_VMODE_DOUBLE)
115 v = 'd';
116
117 return snprintf(&buf[offset], PAGE_SIZE - offset, "%c:%dx%d%c-%d\n",
118 m, mode->xres, mode->yres, v, mode->refresh);
110} 119}
111 120
112static ssize_t store_mode(struct class_device *class_device, const char * buf, 121static ssize_t store_mode(struct class_device *class_device, const char * buf,
@@ -238,45 +247,6 @@ static ssize_t show_rotate(struct class_device *class_device, char *buf)
238 return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->var.rotate); 247 return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->var.rotate);
239} 248}
240 249
241static ssize_t store_con_rotate(struct class_device *class_device,
242 const char *buf, size_t count)
243{
244 struct fb_info *fb_info = class_get_devdata(class_device);
245 int rotate;
246 char **last = NULL;
247
248 acquire_console_sem();
249 rotate = simple_strtoul(buf, last, 0);
250 fb_con_duit(fb_info, FB_EVENT_SET_CON_ROTATE, &rotate);
251 release_console_sem();
252 return count;
253}
254
255static ssize_t store_con_rotate_all(struct class_device *class_device,
256 const char *buf, size_t count)
257{
258 struct fb_info *fb_info = class_get_devdata(class_device);
259 int rotate;
260 char **last = NULL;
261
262 acquire_console_sem();
263 rotate = simple_strtoul(buf, last, 0);
264 fb_con_duit(fb_info, FB_EVENT_SET_CON_ROTATE_ALL, &rotate);
265 release_console_sem();
266 return count;
267}
268
269static ssize_t show_con_rotate(struct class_device *class_device, char *buf)
270{
271 struct fb_info *fb_info = class_get_devdata(class_device);
272 int rotate;
273
274 acquire_console_sem();
275 rotate = fb_con_duit(fb_info, FB_EVENT_GET_CON_ROTATE, NULL);
276 release_console_sem();
277 return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
278}
279
280static ssize_t store_virtual(struct class_device *class_device, 250static ssize_t store_virtual(struct class_device *class_device,
281 const char * buf, size_t count) 251 const char * buf, size_t count)
282{ 252{
@@ -493,8 +463,6 @@ static struct class_device_attribute class_device_attrs[] = {
493 __ATTR(name, S_IRUGO, show_name, NULL), 463 __ATTR(name, S_IRUGO, show_name, NULL),
494 __ATTR(stride, S_IRUGO, show_stride, NULL), 464 __ATTR(stride, S_IRUGO, show_stride, NULL),
495 __ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate), 465 __ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate),
496 __ATTR(con_rotate, S_IRUGO|S_IWUSR, show_con_rotate, store_con_rotate),
497 __ATTR(con_rotate_all, S_IWUSR, NULL, store_con_rotate_all),
498 __ATTR(state, S_IRUGO|S_IWUSR, show_fbstate, store_fbstate), 466 __ATTR(state, S_IRUGO|S_IWUSR, show_fbstate, store_fbstate),
499#ifdef CONFIG_FB_BACKLIGHT 467#ifdef CONFIG_FB_BACKLIGHT
500 __ATTR(bl_curve, S_IRUGO|S_IWUSR, show_bl_curve, store_bl_curve), 468 __ATTR(bl_curve, S_IRUGO|S_IWUSR, show_bl_curve, store_bl_curve),
diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
index 20e69156d7..4d3a8871d3 100644
--- a/drivers/video/geode/gx1fb_core.c
+++ b/drivers/video/geode/gx1fb_core.c
@@ -376,8 +376,6 @@ static int __init gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *
376 release_mem_region(gx1_gx_base() + 0x8300, 0x100); 376 release_mem_region(gx1_gx_base() + 0x8300, 0x100);
377 } 377 }
378 378
379 pci_disable_device(pdev);
380
381 if (info) 379 if (info)
382 framebuffer_release(info); 380 framebuffer_release(info);
383 return ret; 381 return ret;
@@ -399,7 +397,6 @@ static void gx1fb_remove(struct pci_dev *pdev)
399 iounmap(par->dc_regs); 397 iounmap(par->dc_regs);
400 release_mem_region(gx1_gx_base() + 0x8300, 0x100); 398 release_mem_region(gx1_gx_base() + 0x8300, 0x100);
401 399
402 pci_disable_device(pdev);
403 pci_set_drvdata(pdev, NULL); 400 pci_set_drvdata(pdev, NULL);
404 401
405 framebuffer_release(info); 402 framebuffer_release(info);
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index 89c34b15f5..5ef12a3dfa 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -354,8 +354,6 @@ static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *i
354 pci_release_region(pdev, 2); 354 pci_release_region(pdev, 2);
355 } 355 }
356 356
357 pci_disable_device(pdev);
358
359 if (info) 357 if (info)
360 framebuffer_release(info); 358 framebuffer_release(info);
361 return ret; 359 return ret;
@@ -377,7 +375,6 @@ static void gxfb_remove(struct pci_dev *pdev)
377 iounmap(par->dc_regs); 375 iounmap(par->dc_regs);
378 pci_release_region(pdev, 2); 376 pci_release_region(pdev, 2);
379 377
380 pci_disable_device(pdev);
381 pci_set_drvdata(pdev, NULL); 378 pci_set_drvdata(pdev, NULL);
382 379
383 framebuffer_release(info); 380 framebuffer_release(info);
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 44aa2ffff9..a1f7d80f0a 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -2110,9 +2110,6 @@ static void i810fb_release_resource(struct fb_info *info,
2110 if (par->res_flags & MMIO_REQ) 2110 if (par->res_flags & MMIO_REQ)
2111 release_mem_region(par->mmio_start_phys, MMIO_SIZE); 2111 release_mem_region(par->mmio_start_phys, MMIO_SIZE);
2112 2112
2113 if (par->res_flags & PCI_DEVICE_ENABLED)
2114 pci_disable_device(par->dev);
2115
2116 framebuffer_release(info); 2113 framebuffer_release(info);
2117 2114
2118} 2115}
diff --git a/drivers/video/imacfb.c b/drivers/video/imacfb.c
new file mode 100644
index 0000000000..7b1c168c83
--- /dev/null
+++ b/drivers/video/imacfb.c
@@ -0,0 +1,345 @@
1/*
2 * framebuffer driver for Intel Based Mac's
3 *
4 * (c) 2006 Edgar Hucek <gimli@dark-green.com>
5 * Original imac driver written by Gerd Knorr <kraxel@goldbach.in-berlin.de>
6 *
7 */
8
9#include <linux/delay.h>
10#include <linux/errno.h>
11#include <linux/fb.h>
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/mm.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20#include <linux/tty.h>
21
22#include <asm/io.h>
23
24#include <video/vga.h>
25
26typedef enum _MAC_TYPE {
27 M_I17,
28 M_I20,
29 M_MINI,
30 M_MACBOOK,
31 M_NEW
32} MAC_TYPE;
33
34/* --------------------------------------------------------------------- */
35
36static struct fb_var_screeninfo imacfb_defined __initdata = {
37 .activate = FB_ACTIVATE_NOW,
38 .height = -1,
39 .width = -1,
40 .right_margin = 32,
41 .upper_margin = 16,
42 .lower_margin = 4,
43 .vsync_len = 4,
44 .vmode = FB_VMODE_NONINTERLACED,
45};
46
47static struct fb_fix_screeninfo imacfb_fix __initdata = {
48 .id = "IMAC VGA",
49 .type = FB_TYPE_PACKED_PIXELS,
50 .accel = FB_ACCEL_NONE,
51 .visual = FB_VISUAL_TRUECOLOR,
52};
53
54static int inverse;
55static int model = M_NEW;
56static int manual_height;
57static int manual_width;
58
59#define DEFAULT_FB_MEM 1024*1024*16
60
61/* --------------------------------------------------------------------- */
62
63static int imacfb_setcolreg(unsigned regno, unsigned red, unsigned green,
64 unsigned blue, unsigned transp,
65 struct fb_info *info)
66{
67 /*
68 * Set a single color register. The values supplied are
69 * already rounded down to the hardware's capabilities
70 * (according to the entries in the `var' structure). Return
71 * != 0 for invalid regno.
72 */
73
74 if (regno >= info->cmap.len)
75 return 1;
76
77 if (regno < 16) {
78 red >>= 8;
79 green >>= 8;
80 blue >>= 8;
81 ((u32 *)(info->pseudo_palette))[regno] =
82 (red << info->var.red.offset) |
83 (green << info->var.green.offset) |
84 (blue << info->var.blue.offset);
85 }
86 return 0;
87}
88
89static struct fb_ops imacfb_ops = {
90 .owner = THIS_MODULE,
91 .fb_setcolreg = imacfb_setcolreg,
92 .fb_fillrect = cfb_fillrect,
93 .fb_copyarea = cfb_copyarea,
94 .fb_imageblit = cfb_imageblit,
95};
96
97static int __init imacfb_setup(char *options)
98{
99 char *this_opt;
100
101 if (!options || !*options)
102 return 0;
103
104 while ((this_opt = strsep(&options, ",")) != NULL) {
105 if (!*this_opt) continue;
106
107 if (!strcmp(this_opt, "inverse"))
108 inverse = 1;
109 else if (!strcmp(this_opt, "i17"))
110 model = M_I17;
111 else if (!strcmp(this_opt, "i20"))
112 model = M_I20;
113 else if (!strcmp(this_opt, "mini"))
114 model = M_MINI;
115 else if (!strcmp(this_opt, "macbook"))
116 model = M_MACBOOK;
117 else if (!strncmp(this_opt, "height:", 7))
118 manual_height = simple_strtoul(this_opt+7, NULL, 0);
119 else if (!strncmp(this_opt, "width:", 6))
120 manual_width = simple_strtoul(this_opt+6, NULL, 0);
121 }
122 return 0;
123}
124
125static int __init imacfb_probe(struct platform_device *dev)
126{
127 struct fb_info *info;
128 int err;
129 unsigned int size_vmode;
130 unsigned int size_remap;
131 unsigned int size_total;
132
133 screen_info.lfb_depth = 32;
134 screen_info.lfb_size = DEFAULT_FB_MEM / 0x10000;
135 screen_info.pages=1;
136 screen_info.blue_size = 8;
137 screen_info.blue_pos = 0;
138 screen_info.green_size = 8;
139 screen_info.green_pos = 8;
140 screen_info.red_size = 8;
141 screen_info.red_pos = 16;
142 screen_info.rsvd_size = 8;
143 screen_info.rsvd_pos = 24;
144
145 switch (model) {
146 case M_I17:
147 screen_info.lfb_width = 1440;
148 screen_info.lfb_height = 900;
149 screen_info.lfb_linelength = 1472 * 4;
150 screen_info.lfb_base = 0x80010000;
151 break;
152 case M_NEW:
153 case M_I20:
154 screen_info.lfb_width = 1680;
155 screen_info.lfb_height = 1050;
156 screen_info.lfb_linelength = 1728 * 4;
157 screen_info.lfb_base = 0x80010000;
158 break;
159 case M_MINI:
160 screen_info.lfb_width = 1024;
161 screen_info.lfb_height = 768;
162 screen_info.lfb_linelength = 2048 * 4;
163 screen_info.lfb_base = 0x80000000;
164 break;
165 case M_MACBOOK:
166 screen_info.lfb_width = 1280;
167 screen_info.lfb_height = 800;
168 screen_info.lfb_linelength = 2048 * 4;
169 screen_info.lfb_base = 0x80000000;
170 break;
171 }
172
173 /* if the user wants to manually specify height/width,
174 we will override the defaults */
175 /* TODO: eventually get auto-detection working */
176 if (manual_height > 0)
177 screen_info.lfb_height = manual_height;
178 if (manual_width > 0)
179 screen_info.lfb_width = manual_width;
180
181 imacfb_fix.smem_start = screen_info.lfb_base;
182 imacfb_defined.bits_per_pixel = screen_info.lfb_depth;
183 imacfb_defined.xres = screen_info.lfb_width;
184 imacfb_defined.yres = screen_info.lfb_height;
185 imacfb_fix.line_length = screen_info.lfb_linelength;
186
187 /* size_vmode -- that is the amount of memory needed for the
188 * used video mode, i.e. the minimum amount of
189 * memory we need. */
190 size_vmode = imacfb_defined.yres * imacfb_fix.line_length;
191
192 /* size_total -- all video memory we have. Used for
193 * entries, ressource allocation and bounds
194 * checking. */
195 size_total = screen_info.lfb_size * 65536;
196 if (size_total < size_vmode)
197 size_total = size_vmode;
198
199 /* size_remap -- the amount of video memory we are going to
200 * use for imacfb. With modern cards it is no
201 * option to simply use size_total as that
202 * wastes plenty of kernel address space. */
203 size_remap = size_vmode * 2;
204 if (size_remap < size_vmode)
205 size_remap = size_vmode;
206 if (size_remap > size_total)
207 size_remap = size_total;
208 imacfb_fix.smem_len = size_remap;
209
210#ifndef __i386__
211 screen_info.imacpm_seg = 0;
212#endif
213
214 if (!request_mem_region(imacfb_fix.smem_start, size_total, "imacfb")) {
215 printk(KERN_WARNING
216 "imacfb: cannot reserve video memory at 0x%lx\n",
217 imacfb_fix.smem_start);
218 /* We cannot make this fatal. Sometimes this comes from magic
219 spaces our resource handlers simply don't know about */
220 }
221
222 info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev);
223 if (!info) {
224 err = -ENOMEM;
225 goto err_release_mem;
226 }
227 info->pseudo_palette = info->par;
228 info->par = NULL;
229
230 info->screen_base = ioremap(imacfb_fix.smem_start, imacfb_fix.smem_len);
231 if (!info->screen_base) {
232 printk(KERN_ERR "imacfb: abort, cannot ioremap video memory "
233 "0x%x @ 0x%lx\n",
234 imacfb_fix.smem_len, imacfb_fix.smem_start);
235 err = -EIO;
236 goto err_unmap;
237 }
238
239 printk(KERN_INFO "imacfb: framebuffer at 0x%lx, mapped to 0x%p, "
240 "using %dk, total %dk\n",
241 imacfb_fix.smem_start, info->screen_base,
242 size_remap/1024, size_total/1024);
243 printk(KERN_INFO "imacfb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
244 imacfb_defined.xres, imacfb_defined.yres,
245 imacfb_defined.bits_per_pixel, imacfb_fix.line_length,
246 screen_info.pages);
247
248 imacfb_defined.xres_virtual = imacfb_defined.xres;
249 imacfb_defined.yres_virtual = imacfb_fix.smem_len /
250 imacfb_fix.line_length;
251 printk(KERN_INFO "imacfb: scrolling: redraw\n");
252 imacfb_defined.yres_virtual = imacfb_defined.yres;
253
254 /* some dummy values for timing to make fbset happy */
255 imacfb_defined.pixclock = 10000000 / imacfb_defined.xres *
256 1000 / imacfb_defined.yres;
257 imacfb_defined.left_margin = (imacfb_defined.xres / 8) & 0xf8;
258 imacfb_defined.hsync_len = (imacfb_defined.xres / 8) & 0xf8;
259
260 imacfb_defined.red.offset = screen_info.red_pos;
261 imacfb_defined.red.length = screen_info.red_size;
262 imacfb_defined.green.offset = screen_info.green_pos;
263 imacfb_defined.green.length = screen_info.green_size;
264 imacfb_defined.blue.offset = screen_info.blue_pos;
265 imacfb_defined.blue.length = screen_info.blue_size;
266 imacfb_defined.transp.offset = screen_info.rsvd_pos;
267 imacfb_defined.transp.length = screen_info.rsvd_size;
268
269 printk(KERN_INFO "imacfb: %s: "
270 "size=%d:%d:%d:%d, shift=%d:%d:%d:%d\n",
271 "Truecolor",
272 screen_info.rsvd_size,
273 screen_info.red_size,
274 screen_info.green_size,
275 screen_info.blue_size,
276 screen_info.rsvd_pos,
277 screen_info.red_pos,
278 screen_info.green_pos,
279 screen_info.blue_pos);
280
281 imacfb_fix.ypanstep = 0;
282 imacfb_fix.ywrapstep = 0;
283
284 /* request failure does not faze us, as vgacon probably has this
285 * region already (FIXME) */
286 request_region(0x3c0, 32, "imacfb");
287
288 info->fbops = &imacfb_ops;
289 info->var = imacfb_defined;
290 info->fix = imacfb_fix;
291 info->flags = FBINFO_FLAG_DEFAULT;
292
293 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
294 err = -ENOMEM;
295 goto err_unmap;
296 }
297 if (register_framebuffer(info)<0) {
298 err = -EINVAL;
299 goto err_fb_dealoc;
300 }
301 printk(KERN_INFO "fb%d: %s frame buffer device\n",
302 info->node, info->fix.id);
303 return 0;
304
305err_fb_dealoc:
306 fb_dealloc_cmap(&info->cmap);
307err_unmap:
308 iounmap(info->screen_base);
309 framebuffer_release(info);
310err_release_mem:
311 release_mem_region(imacfb_fix.smem_start, size_total);
312 return err;
313}
314
315static struct platform_driver imacfb_driver = {
316 .probe = imacfb_probe,
317 .driver = {
318 .name = "imacfb",
319 },
320};
321
322static struct platform_device imacfb_device = {
323 .name = "imacfb",
324};
325
326static int __init imacfb_init(void)
327{
328 int ret;
329 char *option = NULL;
330
331 /* ignore error return of fb_get_options */
332 fb_get_options("imacfb", &option);
333 imacfb_setup(option);
334 ret = platform_driver_register(&imacfb_driver);
335
336 if (!ret) {
337 ret = platform_device_register(&imacfb_device);
338 if (ret)
339 platform_driver_unregister(&imacfb_driver);
340 }
341 return ret;
342}
343module_init(imacfb_init);
344
345MODULE_LICENSE("GPL");
diff --git a/drivers/video/macmodes.c b/drivers/video/macmodes.c
index c0385c6f7d..d21321ca7c 100644
--- a/drivers/video/macmodes.c
+++ b/drivers/video/macmodes.c
@@ -327,7 +327,6 @@ int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode,
327 } 327 }
328 return -EINVAL; 328 return -EINVAL;
329} 329}
330EXPORT_SYMBOL(mac_var_to_vmode);
331 330
332/** 331/**
333 * mac_map_monitor_sense - Convert monitor sense to vmode 332 * mac_map_monitor_sense - Convert monitor sense to vmode
@@ -371,8 +370,9 @@ EXPORT_SYMBOL(mac_map_monitor_sense);
371 * 370 *
372 */ 371 */
373 372
374int __init mac_find_mode(struct fb_var_screeninfo *var, struct fb_info *info, 373int __devinit mac_find_mode(struct fb_var_screeninfo *var,
375 const char *mode_option, unsigned int default_bpp) 374 struct fb_info *info, const char *mode_option,
375 unsigned int default_bpp)
376{ 376{
377 const struct fb_videomode *db = NULL; 377 const struct fb_videomode *db = NULL;
378 unsigned int dbsize = 0; 378 unsigned int dbsize = 0;
diff --git a/drivers/video/macmodes.h b/drivers/video/macmodes.h
index 232f5a09a4..babeb81f46 100644
--- a/drivers/video/macmodes.h
+++ b/drivers/video/macmodes.h
@@ -55,9 +55,10 @@ extern int mac_vmode_to_var(int vmode, int cmode,
55extern int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode, 55extern int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode,
56 int *cmode); 56 int *cmode);
57extern int mac_map_monitor_sense(int sense); 57extern int mac_map_monitor_sense(int sense);
58extern int __init mac_find_mode(struct fb_var_screeninfo *var, 58extern int __devinit mac_find_mode(struct fb_var_screeninfo *var,
59 struct fb_info *info, const char *mode_option, 59 struct fb_info *info,
60 unsigned int default_bpp); 60 const char *mode_option,
61 unsigned int default_bpp);
61 62
62 63
63 /* 64 /*
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index 26a1c618a2..ff5454601e 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -259,6 +259,10 @@ static const struct fb_videomode modedb[] = {
259 /* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */ 259 /* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */
260 NULL, 60, 1152, 768, 15386, 158, 26, 29, 3, 136, 6, 260 NULL, 60, 1152, 768, 15386, 158, 26, 29, 3, 136, 6,
261 FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED 261 FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
262 }, {
263 /* 1366x768, 60 Hz, 47.403 kHz hsync, WXGA 16:9 aspect ratio */
264 NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5,
265 0, FB_VMODE_NONINTERLACED
262 }, 266 },
263}; 267};
264 268
@@ -787,8 +791,9 @@ struct fb_videomode *fb_find_best_mode(struct fb_var_screeninfo *var,
787 if (diff > d) { 791 if (diff > d) {
788 diff = d; 792 diff = d;
789 best = mode; 793 best = mode;
790 } else if (diff == d && mode->refresh > best->refresh) 794 } else if (diff == d && best &&
791 best = mode; 795 mode->refresh > best->refresh)
796 best = mode;
792 } 797 }
793 } 798 }
794 return best; 799 return best;
@@ -1016,8 +1021,6 @@ EXPORT_SYMBOL(fb_videomode_to_var);
1016EXPORT_SYMBOL(fb_var_to_videomode); 1021EXPORT_SYMBOL(fb_var_to_videomode);
1017EXPORT_SYMBOL(fb_mode_is_equal); 1022EXPORT_SYMBOL(fb_mode_is_equal);
1018EXPORT_SYMBOL(fb_add_videomode); 1023EXPORT_SYMBOL(fb_add_videomode);
1019EXPORT_SYMBOL(fb_delete_videomode);
1020EXPORT_SYMBOL(fb_destroy_modelist);
1021EXPORT_SYMBOL(fb_match_mode); 1024EXPORT_SYMBOL(fb_match_mode);
1022EXPORT_SYMBOL(fb_find_best_mode); 1025EXPORT_SYMBOL(fb_find_best_mode);
1023EXPORT_SYMBOL(fb_find_nearest_mode); 1026EXPORT_SYMBOL(fb_find_nearest_mode);
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index 24b12f71d5..2f156b724d 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -1333,17 +1333,22 @@ static int neofb_blank(int blank_mode, struct fb_info *info)
1333 * run "setterm -powersave powerdown" to take advantage 1333 * run "setterm -powersave powerdown" to take advantage
1334 */ 1334 */
1335 struct neofb_par *par = info->par; 1335 struct neofb_par *par = info->par;
1336 int seqflags, lcdflags, dpmsflags, reg; 1336 int seqflags, lcdflags, dpmsflags, reg, tmpdisp;
1337
1338 1337
1339 /* 1338 /*
1340 * Reload the value stored in the register, if sensible. It might have 1339 * Read back the register bits related to display configuration. They might
1341 * been changed via FN keystroke. 1340 * have been changed underneath the driver via Fn key stroke.
1341 */
1342 neoUnlock();
1343 tmpdisp = vga_rgfx(NULL, 0x20) & 0x03;
1344 neoLock(&par->state);
1345
1346 /* In case we blank the screen, we want to store the possibly new
1347 * configuration in the driver. During un-blank, we re-apply this setting,
1348 * since the LCD bit will be cleared in order to switch off the backlight.
1342 */ 1349 */
1343 if (par->PanelDispCntlRegRead) { 1350 if (par->PanelDispCntlRegRead) {
1344 neoUnlock(); 1351 par->PanelDispCntlReg1 = tmpdisp;
1345 par->PanelDispCntlReg1 = vga_rgfx(NULL, 0x20) & 0x03;
1346 neoLock(&par->state);
1347 } 1352 }
1348 par->PanelDispCntlRegRead = !blank_mode; 1353 par->PanelDispCntlRegRead = !blank_mode;
1349 1354
@@ -1378,12 +1383,21 @@ static int neofb_blank(int blank_mode, struct fb_info *info)
1378 break; 1383 break;
1379 case FB_BLANK_NORMAL: /* just blank screen (backlight stays on) */ 1384 case FB_BLANK_NORMAL: /* just blank screen (backlight stays on) */
1380 seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */ 1385 seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */
1381 lcdflags = par->PanelDispCntlReg1 & 0x02; /* LCD normal */ 1386 /*
1387 * During a blank operation with the LID shut, we might store "LCD off"
1388 * by mistake. Due to timing issues, the BIOS may switch the lights
1389 * back on, and we turn it back off once we "unblank".
1390 *
1391 * So here is an attempt to implement ">=" - if we are in the process
1392 * of unblanking, and the LCD bit is unset in the driver but set in the
1393 * register, we must keep it.
1394 */
1395 lcdflags = ((par->PanelDispCntlReg1 | tmpdisp) & 0x02); /* LCD normal */
1382 dpmsflags = 0x00; /* no hsync/vsync suppression */ 1396 dpmsflags = 0x00; /* no hsync/vsync suppression */
1383 break; 1397 break;
1384 case FB_BLANK_UNBLANK: /* unblank */ 1398 case FB_BLANK_UNBLANK: /* unblank */
1385 seqflags = 0; /* Enable sequencer */ 1399 seqflags = 0; /* Enable sequencer */
1386 lcdflags = par->PanelDispCntlReg1 & 0x02; /* LCD normal */ 1400 lcdflags = ((par->PanelDispCntlReg1 | tmpdisp) & 0x02); /* LCD normal */
1387 dpmsflags = 0x00; /* no hsync/vsync suppression */ 1401 dpmsflags = 0x00; /* no hsync/vsync suppression */
1388#ifdef CONFIG_TOSHIBA 1402#ifdef CONFIG_TOSHIBA
1389 /* Do we still need this ? */ 1403 /* Do we still need this ? */
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index 99c3a8e6a2..9ed640d357 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -886,7 +886,10 @@ void NVCalcStateExt(struct nvidia_par *par,
886 case NV_ARCH_20: 886 case NV_ARCH_20:
887 case NV_ARCH_30: 887 case NV_ARCH_30:
888 default: 888 default:
889 if (((par->Chipset & 0xffff) == 0x01A0) || 889 if ((par->Chipset & 0xfff0) == 0x0240) {
890 state->arbitration0 = 256;
891 state->arbitration1 = 0x0480;
892 } else if (((par->Chipset & 0xffff) == 0x01A0) ||
890 ((par->Chipset & 0xffff) == 0x01f0)) { 893 ((par->Chipset & 0xffff) == 0x01f0)) {
891 nForceUpdateArbitrationSettings(VClk, 894 nForceUpdateArbitrationSettings(VClk,
892 pixelDepth * 8, 895 pixelDepth * 8,
@@ -1235,6 +1238,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1235 break; 1238 break;
1236 case 0x0160: 1239 case 0x0160:
1237 case 0x01D0: 1240 case 0x01D0:
1241 case 0x0240:
1238 NV_WR32(par->PMC, 0x1700, 1242 NV_WR32(par->PMC, 0x1700,
1239 NV_RD32(par->PFB, 0x020C)); 1243 NV_RD32(par->PFB, 0x020C));
1240 NV_WR32(par->PMC, 0x1704, 0); 1244 NV_WR32(par->PMC, 0x1704, 0);
@@ -1359,7 +1363,9 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1359 if(((par->Chipset & 0xfff0) 1363 if(((par->Chipset & 0xfff0)
1360 != 0x0160) && 1364 != 0x0160) &&
1361 ((par->Chipset & 0xfff0) 1365 ((par->Chipset & 0xfff0)
1362 != 0x0220)) 1366 != 0x0220) &&
1367 ((par->Chipset & 0xfff0)
1368 != 0x240))
1363 NV_WR32(par->PGRAPH, 1369 NV_WR32(par->PGRAPH,
1364 0x6900 + i*4, 1370 0x6900 + i*4,
1365 NV_RD32(par->PFB, 1371 NV_RD32(par->PFB,
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 03a7c1e9ce..7b5cffb278 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -67,359 +67,10 @@
67#define MAX_CURS 32 67#define MAX_CURS 32
68 68
69static struct pci_device_id nvidiafb_pci_tbl[] = { 69static struct pci_device_id nvidiafb_pci_tbl[] = {
70 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_TNT, 70 {PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
71 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 71 PCI_BASE_CLASS_DISPLAY << 16, 0xff0000, 0},
72 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_TNT2, 72 { 0, }
73 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
74 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_UTNT2,
75 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
76 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_TNT_UNKNOWN,
77 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
78 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_VTNT2,
79 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
80 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_UVTNT2,
81 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
82 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_ITNT2,
83 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
84 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR,
85 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
86 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR,
87 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
88 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO,
89 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
90 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX,
91 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
92 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX2,
93 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
94 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_GO,
95 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
96 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO2_MXR,
97 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
98 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS,
99 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
100 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS2,
101 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
102 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE2_ULTRA,
103 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
104 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO2_PRO,
105 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
106 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_460,
107 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
108 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
110 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
112 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_SE,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
114 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
116 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
118 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_460_GO,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
120 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO_M32,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
122 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_500XGL,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
124 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO_M64,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
126 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_200,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
128 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_550XGL,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
130 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_500_GOGL,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
132 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_410_GO_M16,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
134 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_8X,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
136 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440SE_8X,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
138 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420_8X,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
140 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_4000,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
142 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_448_GO,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
144 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_488_GO,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
146 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_580_XGL,
147 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
148 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_MAC,
149 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
150 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_280_NVS,
151 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
152 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_380_XGL,
153 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
154 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_IGEFORCE2,
155 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
156 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE3,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
158 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE3_1,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
160 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE3_2,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
162 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_DDC,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
164 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4600,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
166 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4400,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
168 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4200,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
170 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_900XGL,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
172 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_750XGL,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
174 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_700XGL,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
176 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
178 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
180 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
182 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE4_4200_GO,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
184 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_980_XGL,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
186 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_780_XGL,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
188 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO4_700_GOGL,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
190 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800_ULTRA,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
192 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
194 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_2000,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
196 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1000,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
198 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600_ULTRA,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
200 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
202 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600SE,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
204 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5600,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
206 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5650,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
208 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO700,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
210 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
212 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_ULTRA,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
214 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_1,
215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
216 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200SE,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
218 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5200,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
220 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
222 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250_32,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
224 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO_5200,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
226 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_NVS_280_PCI,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
228 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_500,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
230 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5300,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
232 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5100,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
234 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900_ULTRA,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
236 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
238 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900XT,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
240 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5950_ULTRA,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
242 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_3000,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
244 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700_ULTRA,
245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
246 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700,
247 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
248 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700LE,
249 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
250 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700VE,
251 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
252 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_1,
253 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
254 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2,
255 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
256 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000,
257 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
258 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
260 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5500,
261 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
262 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5100,
263 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
264 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_700,
265 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
266 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900ZT,
267 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
268 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA,
269 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
270 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6800,
271 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
272 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_LE,
273 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
274 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_GT,
275 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
276 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_4000,
277 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
278 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6600_GT,
279 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
280 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6600,
281 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
282 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6610_XL,
283 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
284 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_QUADRO_FX_540,
285 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
286 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6200,
287 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
288 {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1,
289 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
290 {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT1,
291 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
292 {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2,
293 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
294 {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1,
295 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
296 {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT,
297 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
298 {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280,
299 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
300 {PCI_VENDOR_ID_NVIDIA, 0x0252,
301 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
302 {PCI_VENDOR_ID_NVIDIA, 0x0313,
303 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
304 {PCI_VENDOR_ID_NVIDIA, 0x0316,
305 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
306 {PCI_VENDOR_ID_NVIDIA, 0x0317,
307 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
308 {PCI_VENDOR_ID_NVIDIA, 0x031D,
309 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
310 {PCI_VENDOR_ID_NVIDIA, 0x031E,
311 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
312 {PCI_VENDOR_ID_NVIDIA, 0x031F,
313 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
314 {PCI_VENDOR_ID_NVIDIA, 0x0329,
315 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
316 {PCI_VENDOR_ID_NVIDIA, 0x032F,
317 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
318 {PCI_VENDOR_ID_NVIDIA, 0x0345,
319 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
320 {PCI_VENDOR_ID_NVIDIA, 0x0349,
321 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
322 {PCI_VENDOR_ID_NVIDIA, 0x034B,
323 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
324 {PCI_VENDOR_ID_NVIDIA, 0x034F,
325 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
326 {PCI_VENDOR_ID_NVIDIA, 0x00c0,
327 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
328 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_GEFORCE_6800A,
329 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
330 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_GEFORCE_6800A_LE,
331 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
332 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_GEFORCE_GO_6800,
333 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
334 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_GEFORCE_GO_6800_ULTRA,
335 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
336 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_QUADRO_FX_GO1400,
337 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
338 {PCI_VENDOR_ID_NVIDIA, 0x00cd,
339 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
340 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_QUADRO_FX_1400,
341 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
342 {PCI_VENDOR_ID_NVIDIA, 0x0142,
343 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
344 {PCI_VENDOR_ID_NVIDIA, 0x0143,
345 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
346 {PCI_VENDOR_ID_NVIDIA, 0x0144,
347 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
348 {PCI_VENDOR_ID_NVIDIA, 0x0145,
349 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
350 {PCI_VENDOR_ID_NVIDIA, 0x0146,
351 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
352 {PCI_VENDOR_ID_NVIDIA, 0x0147,
353 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
354 {PCI_VENDOR_ID_NVIDIA, 0x0148,
355 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
356 {PCI_VENDOR_ID_NVIDIA, 0x0149,
357 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
358 {PCI_VENDOR_ID_NVIDIA, 0x014b,
359 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
360 {PCI_VENDOR_ID_NVIDIA, 0x14c,
361 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
362 {PCI_VENDOR_ID_NVIDIA, 0x014d,
363 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
364 {PCI_VENDOR_ID_NVIDIA, 0x0160,
365 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
366 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6200_TURBOCACHE,
367 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
368 {PCI_VENDOR_ID_NVIDIA, 0x0162,
369 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
370 {PCI_VENDOR_ID_NVIDIA, 0x0163,
371 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
372 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200,
373 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
374 {PCI_VENDOR_ID_NVIDIA, 0x0165,
375 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
376 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250,
377 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
378 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200_1,
379 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
380 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250_1,
381 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
382 {PCI_VENDOR_ID_NVIDIA, 0x0169,
383 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
384 {PCI_VENDOR_ID_NVIDIA, 0x016b,
385 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
386 {PCI_VENDOR_ID_NVIDIA, 0x016c,
387 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
388 {PCI_VENDOR_ID_NVIDIA, 0x016d,
389 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
390 {PCI_VENDOR_ID_NVIDIA, 0x016e,
391 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
392 {PCI_VENDOR_ID_NVIDIA, 0x0210,
393 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
394 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B,
395 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
396 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_LE,
397 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
398 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_GT,
399 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
400 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT,
401 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
402 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX,
403 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
404 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800,
405 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
406 {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800_GTX,
407 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
408 {PCI_VENDOR_ID_NVIDIA, 0x021d,
409 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
410 {PCI_VENDOR_ID_NVIDIA, 0x021e,
411 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
412 {PCI_VENDOR_ID_NVIDIA, 0x0220,
413 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
414 {PCI_VENDOR_ID_NVIDIA, 0x0221,
415 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
416 {PCI_VENDOR_ID_NVIDIA, 0x0222,
417 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
418 {PCI_VENDOR_ID_NVIDIA, 0x0228,
419 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
420 {0,} /* terminate list */
421}; 73};
422
423MODULE_DEVICE_TABLE(pci, nvidiafb_pci_tbl); 74MODULE_DEVICE_TABLE(pci, nvidiafb_pci_tbl);
424 75
425/* command line data, set in nvidiafb_setup() */ 76/* command line data, set in nvidiafb_setup() */
@@ -1465,10 +1116,10 @@ static u32 __devinit nvidia_get_chipset(struct fb_info *info)
1465 struct nvidia_par *par = info->par; 1116 struct nvidia_par *par = info->par;
1466 u32 id = (par->pci_dev->vendor << 16) | par->pci_dev->device; 1117 u32 id = (par->pci_dev->vendor << 16) | par->pci_dev->device;
1467 1118
1468 printk("nvidiafb: PCI id - %x\n", id); 1119 printk(KERN_INFO PFX "Device ID: %x \n", id);
1120
1469 if ((id & 0xfff0) == 0x00f0) { 1121 if ((id & 0xfff0) == 0x00f0) {
1470 /* pci-e */ 1122 /* pci-e */
1471 printk("nvidiafb: PCI-E card\n");
1472 id = NV_RD32(par->REGS, 0x1800); 1123 id = NV_RD32(par->REGS, 0x1800);
1473 1124
1474 if ((id & 0x0000ffff) == 0x000010DE) 1125 if ((id & 0x0000ffff) == 0x000010DE)
@@ -1476,9 +1127,9 @@ static u32 __devinit nvidia_get_chipset(struct fb_info *info)
1476 else if ((id & 0xffff0000) == 0xDE100000) /* wrong endian */ 1127 else if ((id & 0xffff0000) == 0xDE100000) /* wrong endian */
1477 id = 0x10DE0000 | ((id << 8) & 0x0000ff00) | 1128 id = 0x10DE0000 | ((id << 8) & 0x0000ff00) |
1478 ((id >> 8) & 0x000000ff); 1129 ((id >> 8) & 0x000000ff);
1130 printk(KERN_INFO PFX "Subsystem ID: %x \n", id);
1479 } 1131 }
1480 1132
1481 printk("nvidiafb: Actual id - %x\n", id);
1482 return id; 1133 return id;
1483} 1134}
1484 1135
@@ -1520,6 +1171,7 @@ static u32 __devinit nvidia_get_arch(struct fb_info *info)
1520 case 0x0210: 1171 case 0x0210:
1521 case 0x0220: 1172 case 0x0220:
1522 case 0x0230: 1173 case 0x0230:
1174 case 0x0240:
1523 case 0x0290: 1175 case 0x0290:
1524 case 0x0390: 1176 case 0x0390:
1525 arch = NV_ARCH_40; 1177 arch = NV_ARCH_40;
@@ -1567,7 +1219,7 @@ static int __devinit nvidiafb_probe(struct pci_dev *pd,
1567 1219
1568 if (pci_request_regions(pd, "nvidiafb")) { 1220 if (pci_request_regions(pd, "nvidiafb")) {
1569 printk(KERN_ERR PFX "cannot request PCI regions\n"); 1221 printk(KERN_ERR PFX "cannot request PCI regions\n");
1570 goto err_out_request; 1222 goto err_out_enable;
1571 } 1223 }
1572 1224
1573 par->FlatPanel = flatpanel; 1225 par->FlatPanel = flatpanel;
@@ -1596,7 +1248,6 @@ static int __devinit nvidiafb_probe(struct pci_dev *pd,
1596 } 1248 }
1597 1249
1598 par->Chipset = nvidia_get_chipset(info); 1250 par->Chipset = nvidia_get_chipset(info);
1599 printk(KERN_INFO PFX "nVidia device/chipset %X\n", par->Chipset);
1600 par->Architecture = nvidia_get_arch(info); 1251 par->Architecture = nvidia_get_arch(info);
1601 1252
1602 if (par->Architecture == 0) { 1253 if (par->Architecture == 0) {
@@ -1687,10 +1338,8 @@ err_out_free_base1:
1687 nvidia_delete_i2c_busses(par); 1338 nvidia_delete_i2c_busses(par);
1688err_out_arch: 1339err_out_arch:
1689 iounmap(par->REGS); 1340 iounmap(par->REGS);
1690err_out_free_base0: 1341 err_out_free_base0:
1691 pci_release_regions(pd); 1342 pci_release_regions(pd);
1692err_out_request:
1693 pci_disable_device(pd);
1694err_out_enable: 1343err_out_enable:
1695 kfree(info->pixmap.addr); 1344 kfree(info->pixmap.addr);
1696err_out_kfree: 1345err_out_kfree:
@@ -1720,7 +1369,6 @@ static void __exit nvidiafb_remove(struct pci_dev *pd)
1720 nvidia_delete_i2c_busses(par); 1369 nvidia_delete_i2c_busses(par);
1721 iounmap(par->REGS); 1370 iounmap(par->REGS);
1722 pci_release_regions(pd); 1371 pci_release_regions(pd);
1723 pci_disable_device(pd);
1724 kfree(info->pixmap.addr); 1372 kfree(info->pixmap.addr);
1725 framebuffer_release(info); 1373 framebuffer_release(info);
1726 pci_set_drvdata(pd, NULL); 1374 pci_set_drvdata(pd, NULL);
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index d4384ab1df..12af58c5cf 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -2152,7 +2152,6 @@ err_iounmap_ctrl_base:
2152err_release_region: 2152err_release_region:
2153 pci_release_regions(pd); 2153 pci_release_regions(pd);
2154err_disable_device: 2154err_disable_device:
2155 pci_disable_device(pd);
2156err_free_pixmap: 2155err_free_pixmap:
2157 kfree(info->pixmap.addr); 2156 kfree(info->pixmap.addr);
2158err_framebuffer_release: 2157err_framebuffer_release:
@@ -2187,7 +2186,6 @@ static void __exit rivafb_remove(struct pci_dev *pd)
2187 if (par->riva.Architecture == NV_ARCH_03) 2186 if (par->riva.Architecture == NV_ARCH_03)
2188 iounmap(par->riva.PRAMIN); 2187 iounmap(par->riva.PRAMIN);
2189 pci_release_regions(pd); 2188 pci_release_regions(pd);
2190 pci_disable_device(pd);
2191 kfree(info->pixmap.addr); 2189 kfree(info->pixmap.addr);
2192 framebuffer_release(info); 2190 framebuffer_release(info);
2193 pci_set_drvdata(pd, NULL); 2191 pci_set_drvdata(pd, NULL);
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 9451932fba..fbc4118506 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -641,6 +641,7 @@ static int __init s3c2410fb_probe(struct platform_device *pdev)
641 int ret; 641 int ret;
642 int irq; 642 int irq;
643 int i; 643 int i;
644 u32 lcdcon1;
644 645
645 mach_info = pdev->dev.platform_data; 646 mach_info = pdev->dev.platform_data;
646 if (mach_info == NULL) { 647 if (mach_info == NULL) {
@@ -672,6 +673,11 @@ static int __init s3c2410fb_probe(struct platform_device *pdev)
672 673
673 memcpy(&info->regs, &mach_info->regs, sizeof(info->regs)); 674 memcpy(&info->regs, &mach_info->regs, sizeof(info->regs));
674 675
676 /* Stop the video and unset ENVID if set */
677 info->regs.lcdcon1 &= ~S3C2410_LCDCON1_ENVID;
678 lcdcon1 = readl(S3C2410_LCDCON1);
679 writel(lcdcon1 & ~S3C2410_LCDCON1_ENVID, S3C2410_LCDCON1);
680
675 info->mach_info = pdev->dev.platform_data; 681 info->mach_info = pdev->dev.platform_data;
676 682
677 fbinfo->fix.type = FB_TYPE_PACKED_PIXELS; 683 fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -794,15 +800,14 @@ dealloc_fb:
794 * shutdown the lcd controller 800 * shutdown the lcd controller
795*/ 801*/
796 802
797static void s3c2410fb_stop_lcd(void) 803static void s3c2410fb_stop_lcd(struct s3c2410fb_info *fbi)
798{ 804{
799 unsigned long flags; 805 unsigned long flags;
800 unsigned long tmp;
801 806
802 local_irq_save(flags); 807 local_irq_save(flags);
803 808
804 tmp = readl(S3C2410_LCDCON1); 809 fbi->regs.lcdcon1 &= ~S3C2410_LCDCON1_ENVID;
805 writel(tmp & ~S3C2410_LCDCON1_ENVID, S3C2410_LCDCON1); 810 writel(fbi->regs.lcdcon1, S3C2410_LCDCON1);
806 811
807 local_irq_restore(flags); 812 local_irq_restore(flags);
808} 813}
@@ -816,7 +821,7 @@ static int s3c2410fb_remove(struct platform_device *pdev)
816 struct s3c2410fb_info *info = fbinfo->par; 821 struct s3c2410fb_info *info = fbinfo->par;
817 int irq; 822 int irq;
818 823
819 s3c2410fb_stop_lcd(); 824 s3c2410fb_stop_lcd(info);
820 msleep(1); 825 msleep(1);
821 826
822 s3c2410fb_unmap_video_memory(info); 827 s3c2410fb_unmap_video_memory(info);
@@ -844,7 +849,7 @@ static int s3c2410fb_suspend(struct platform_device *dev, pm_message_t state)
844 struct fb_info *fbinfo = platform_get_drvdata(dev); 849 struct fb_info *fbinfo = platform_get_drvdata(dev);
845 struct s3c2410fb_info *info = fbinfo->par; 850 struct s3c2410fb_info *info = fbinfo->par;
846 851
847 s3c2410fb_stop_lcd(); 852 s3c2410fb_stop_lcd(info);
848 853
849 /* sleep before disabling the clock, we need to ensure 854 /* sleep before disabling the clock, we need to ensure
850 * the LCD DMA engine is not going to get back on the bus 855 * the LCD DMA engine is not going to get back on the bus
diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h
index 58cfdfb418..e648a6c0f6 100644
--- a/drivers/video/savage/savagefb.h
+++ b/drivers/video/savage/savagefb.h
@@ -147,7 +147,27 @@ struct xtimings {
147 int interlaced; 147 int interlaced;
148}; 148};
149 149
150struct savage_reg {
151 unsigned char MiscOutReg; /* Misc */
152 unsigned char CRTC[25]; /* Crtc Controller */
153 unsigned char Sequencer[5]; /* Video Sequencer */
154 unsigned char Graphics[9]; /* Video Graphics */
155 unsigned char Attribute[21]; /* Video Atribute */
150 156
157 unsigned int mode, refresh;
158 unsigned char SR08, SR0E, SR0F;
159 unsigned char SR10, SR11, SR12, SR13, SR15, SR18, SR29, SR30;
160 unsigned char SR54[8];
161 unsigned char Clock;
162 unsigned char CR31, CR32, CR33, CR34, CR36, CR3A, CR3B, CR3C;
163 unsigned char CR40, CR41, CR42, CR43, CR45;
164 unsigned char CR50, CR51, CR53, CR55, CR58, CR5B, CR5D, CR5E;
165 unsigned char CR60, CR63, CR65, CR66, CR67, CR68, CR69, CR6D, CR6F;
166 unsigned char CR86, CR88;
167 unsigned char CR90, CR91, CRB0;
168 unsigned int STREAMS[22]; /* yuck, streams regs */
169 unsigned int MMPR0, MMPR1, MMPR2, MMPR3;
170};
151/* --------------------------------------------------------------------- */ 171/* --------------------------------------------------------------------- */
152 172
153#define NR_PALETTE 256 173#define NR_PALETTE 256
@@ -167,6 +187,8 @@ struct savagefb_par {
167 struct pci_dev *pcidev; 187 struct pci_dev *pcidev;
168 savage_chipset chip; 188 savage_chipset chip;
169 struct savagefb_i2c_chan chan; 189 struct savagefb_i2c_chan chan;
190 struct savage_reg state;
191 struct savage_reg save;
170 unsigned char *edid; 192 unsigned char *edid;
171 u32 pseudo_palette[16]; 193 u32 pseudo_palette[16];
172 int paletteEnabled; 194 int paletteEnabled;
@@ -179,6 +201,7 @@ struct savagefb_par {
179 int minClock; 201 int minClock;
180 int numClocks; 202 int numClocks;
181 int clock[4]; 203 int clock[4];
204 int MCLK, REFCLK, LCDclk;
182 struct { 205 struct {
183 u8 __iomem *vbase; 206 u8 __iomem *vbase;
184 u32 pbase; 207 u32 pbase;
@@ -196,7 +219,6 @@ struct savagefb_par {
196 219
197 volatile u32 __iomem *bci_base; 220 volatile u32 __iomem *bci_base;
198 unsigned int bci_ptr; 221 unsigned int bci_ptr;
199
200 u32 cob_offset; 222 u32 cob_offset;
201 u32 cob_size; 223 u32 cob_size;
202 int cob_index; 224 int cob_index;
@@ -204,7 +226,6 @@ struct savagefb_par {
204 void (*SavageWaitIdle) (struct savagefb_par *par); 226 void (*SavageWaitIdle) (struct savagefb_par *par);
205 void (*SavageWaitFifo) (struct savagefb_par *par, int space); 227 void (*SavageWaitFifo) (struct savagefb_par *par, int space);
206 228
207 int MCLK, REFCLK, LCDclk;
208 int HorizScaleFactor; 229 int HorizScaleFactor;
209 230
210 /* Panels size */ 231 /* Panels size */
@@ -217,26 +238,6 @@ struct savagefb_par {
217 238
218 int depth; 239 int depth;
219 int vwidth; 240 int vwidth;
220
221 unsigned char MiscOutReg; /* Misc */
222 unsigned char CRTC[25]; /* Crtc Controller */
223 unsigned char Sequencer[5]; /* Video Sequencer */
224 unsigned char Graphics[9]; /* Video Graphics */
225 unsigned char Attribute[21]; /* Video Atribute */
226
227 unsigned int mode, refresh;
228 unsigned char SR08, SR0E, SR0F;
229 unsigned char SR10, SR11, SR12, SR13, SR15, SR18, SR29, SR30;
230 unsigned char SR54[8];
231 unsigned char Clock;
232 unsigned char CR31, CR32, CR33, CR34, CR36, CR3A, CR3B, CR3C;
233 unsigned char CR40, CR41, CR42, CR43, CR45;
234 unsigned char CR50, CR51, CR53, CR55, CR58, CR5B, CR5D, CR5E;
235 unsigned char CR60, CR63, CR65, CR66, CR67, CR68, CR69, CR6D, CR6F;
236 unsigned char CR86, CR88;
237 unsigned char CR90, CR91, CRB0;
238 unsigned int STREAMS[22]; /* yuck, streams regs */
239 unsigned int MMPR0, MMPR1, MMPR2, MMPR3;
240}; 241};
241 242
242#define BCI_BD_BW_DISABLE 0x10000000 243#define BCI_BD_BW_DISABLE 0x10000000
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 0da624e652..78883cf66a 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -86,15 +86,15 @@ MODULE_DESCRIPTION("FBDev driver for S3 Savage PCI/AGP Chips");
86 86
87/* --------------------------------------------------------------------- */ 87/* --------------------------------------------------------------------- */
88 88
89static void vgaHWSeqReset (struct savagefb_par *par, int start) 89static void vgaHWSeqReset(struct savagefb_par *par, int start)
90{ 90{
91 if (start) 91 if (start)
92 VGAwSEQ (0x00, 0x01, par); /* Synchronous Reset */ 92 VGAwSEQ(0x00, 0x01, par); /* Synchronous Reset */
93 else 93 else
94 VGAwSEQ (0x00, 0x03, par); /* End Reset */ 94 VGAwSEQ(0x00, 0x03, par); /* End Reset */
95} 95}
96 96
97static void vgaHWProtect (struct savagefb_par *par, int on) 97static void vgaHWProtect(struct savagefb_par *par, int on)
98{ 98{
99 unsigned char tmp; 99 unsigned char tmp;
100 100
@@ -102,10 +102,10 @@ static void vgaHWProtect (struct savagefb_par *par, int on)
102 /* 102 /*
103 * Turn off screen and disable sequencer. 103 * Turn off screen and disable sequencer.
104 */ 104 */
105 tmp = VGArSEQ (0x01, par); 105 tmp = VGArSEQ(0x01, par);
106 106
107 vgaHWSeqReset (par, 1); /* start synchronous reset */ 107 vgaHWSeqReset(par, 1); /* start synchronous reset */
108 VGAwSEQ (0x01, tmp | 0x20, par);/* disable the display */ 108 VGAwSEQ(0x01, tmp | 0x20, par);/* disable the display */
109 109
110 VGAenablePalette(par); 110 VGAenablePalette(par);
111 } else { 111 } else {
@@ -113,75 +113,76 @@ static void vgaHWProtect (struct savagefb_par *par, int on)
113 * Reenable sequencer, then turn on screen. 113 * Reenable sequencer, then turn on screen.
114 */ 114 */
115 115
116 tmp = VGArSEQ (0x01, par); 116 tmp = VGArSEQ(0x01, par);
117 117
118 VGAwSEQ (0x01, tmp & ~0x20, par);/* reenable display */ 118 VGAwSEQ(0x01, tmp & ~0x20, par);/* reenable display */
119 vgaHWSeqReset (par, 0); /* clear synchronous reset */ 119 vgaHWSeqReset(par, 0); /* clear synchronous reset */
120 120
121 VGAdisablePalette(par); 121 VGAdisablePalette(par);
122 } 122 }
123} 123}
124 124
125static void vgaHWRestore (struct savagefb_par *par) 125static void vgaHWRestore(struct savagefb_par *par, struct savage_reg *reg)
126{ 126{
127 int i; 127 int i;
128 128
129 VGAwMISC (par->MiscOutReg, par); 129 VGAwMISC(reg->MiscOutReg, par);
130 130
131 for (i = 1; i < 5; i++) 131 for (i = 1; i < 5; i++)
132 VGAwSEQ (i, par->Sequencer[i], par); 132 VGAwSEQ(i, reg->Sequencer[i], par);
133 133
134 /* Ensure CRTC registers 0-7 are unlocked by clearing bit 7 or 134 /* Ensure CRTC registers 0-7 are unlocked by clearing bit 7 or
135 CRTC[17] */ 135 CRTC[17] */
136 VGAwCR (17, par->CRTC[17] & ~0x80, par); 136 VGAwCR(17, reg->CRTC[17] & ~0x80, par);
137 137
138 for (i = 0; i < 25; i++) 138 for (i = 0; i < 25; i++)
139 VGAwCR (i, par->CRTC[i], par); 139 VGAwCR(i, reg->CRTC[i], par);
140 140
141 for (i = 0; i < 9; i++) 141 for (i = 0; i < 9; i++)
142 VGAwGR (i, par->Graphics[i], par); 142 VGAwGR(i, reg->Graphics[i], par);
143 143
144 VGAenablePalette(par); 144 VGAenablePalette(par);
145 145
146 for (i = 0; i < 21; i++) 146 for (i = 0; i < 21; i++)
147 VGAwATTR (i, par->Attribute[i], par); 147 VGAwATTR(i, reg->Attribute[i], par);
148 148
149 VGAdisablePalette(par); 149 VGAdisablePalette(par);
150} 150}
151 151
152static void vgaHWInit (struct fb_var_screeninfo *var, 152static void vgaHWInit(struct fb_var_screeninfo *var,
153 struct savagefb_par *par, 153 struct savagefb_par *par,
154 struct xtimings *timings) 154 struct xtimings *timings,
155 struct savage_reg *reg)
155{ 156{
156 par->MiscOutReg = 0x23; 157 reg->MiscOutReg = 0x23;
157 158
158 if (!(timings->sync & FB_SYNC_HOR_HIGH_ACT)) 159 if (!(timings->sync & FB_SYNC_HOR_HIGH_ACT))
159 par->MiscOutReg |= 0x40; 160 reg->MiscOutReg |= 0x40;
160 161
161 if (!(timings->sync & FB_SYNC_VERT_HIGH_ACT)) 162 if (!(timings->sync & FB_SYNC_VERT_HIGH_ACT))
162 par->MiscOutReg |= 0x80; 163 reg->MiscOutReg |= 0x80;
163 164
164 /* 165 /*
165 * Time Sequencer 166 * Time Sequencer
166 */ 167 */
167 par->Sequencer[0x00] = 0x00; 168 reg->Sequencer[0x00] = 0x00;
168 par->Sequencer[0x01] = 0x01; 169 reg->Sequencer[0x01] = 0x01;
169 par->Sequencer[0x02] = 0x0F; 170 reg->Sequencer[0x02] = 0x0F;
170 par->Sequencer[0x03] = 0x00; /* Font select */ 171 reg->Sequencer[0x03] = 0x00; /* Font select */
171 par->Sequencer[0x04] = 0x0E; /* Misc */ 172 reg->Sequencer[0x04] = 0x0E; /* Misc */
172 173
173 /* 174 /*
174 * CRTC Controller 175 * CRTC Controller
175 */ 176 */
176 par->CRTC[0x00] = (timings->HTotal >> 3) - 5; 177 reg->CRTC[0x00] = (timings->HTotal >> 3) - 5;
177 par->CRTC[0x01] = (timings->HDisplay >> 3) - 1; 178 reg->CRTC[0x01] = (timings->HDisplay >> 3) - 1;
178 par->CRTC[0x02] = (timings->HSyncStart >> 3) - 1; 179 reg->CRTC[0x02] = (timings->HSyncStart >> 3) - 1;
179 par->CRTC[0x03] = (((timings->HSyncEnd >> 3) - 1) & 0x1f) | 0x80; 180 reg->CRTC[0x03] = (((timings->HSyncEnd >> 3) - 1) & 0x1f) | 0x80;
180 par->CRTC[0x04] = (timings->HSyncStart >> 3); 181 reg->CRTC[0x04] = (timings->HSyncStart >> 3);
181 par->CRTC[0x05] = ((((timings->HSyncEnd >> 3) - 1) & 0x20) << 2) | 182 reg->CRTC[0x05] = ((((timings->HSyncEnd >> 3) - 1) & 0x20) << 2) |
182 (((timings->HSyncEnd >> 3)) & 0x1f); 183 (((timings->HSyncEnd >> 3)) & 0x1f);
183 par->CRTC[0x06] = (timings->VTotal - 2) & 0xFF; 184 reg->CRTC[0x06] = (timings->VTotal - 2) & 0xFF;
184 par->CRTC[0x07] = (((timings->VTotal - 2) & 0x100) >> 8) | 185 reg->CRTC[0x07] = (((timings->VTotal - 2) & 0x100) >> 8) |
185 (((timings->VDisplay - 1) & 0x100) >> 7) | 186 (((timings->VDisplay - 1) & 0x100) >> 7) |
186 ((timings->VSyncStart & 0x100) >> 6) | 187 ((timings->VSyncStart & 0x100) >> 6) |
187 (((timings->VSyncStart - 1) & 0x100) >> 5) | 188 (((timings->VSyncStart - 1) & 0x100) >> 5) |
@@ -189,27 +190,27 @@ static void vgaHWInit (struct fb_var_screeninfo *var,
189 (((timings->VTotal - 2) & 0x200) >> 4) | 190 (((timings->VTotal - 2) & 0x200) >> 4) |
190 (((timings->VDisplay - 1) & 0x200) >> 3) | 191 (((timings->VDisplay - 1) & 0x200) >> 3) |
191 ((timings->VSyncStart & 0x200) >> 2); 192 ((timings->VSyncStart & 0x200) >> 2);
192 par->CRTC[0x08] = 0x00; 193 reg->CRTC[0x08] = 0x00;
193 par->CRTC[0x09] = (((timings->VSyncStart - 1) & 0x200) >> 4) | 0x40; 194 reg->CRTC[0x09] = (((timings->VSyncStart - 1) & 0x200) >> 4) | 0x40;
194 195
195 if (timings->dblscan) 196 if (timings->dblscan)
196 par->CRTC[0x09] |= 0x80; 197 reg->CRTC[0x09] |= 0x80;
197 198
198 par->CRTC[0x0a] = 0x00; 199 reg->CRTC[0x0a] = 0x00;
199 par->CRTC[0x0b] = 0x00; 200 reg->CRTC[0x0b] = 0x00;
200 par->CRTC[0x0c] = 0x00; 201 reg->CRTC[0x0c] = 0x00;
201 par->CRTC[0x0d] = 0x00; 202 reg->CRTC[0x0d] = 0x00;
202 par->CRTC[0x0e] = 0x00; 203 reg->CRTC[0x0e] = 0x00;
203 par->CRTC[0x0f] = 0x00; 204 reg->CRTC[0x0f] = 0x00;
204 par->CRTC[0x10] = timings->VSyncStart & 0xff; 205 reg->CRTC[0x10] = timings->VSyncStart & 0xff;
205 par->CRTC[0x11] = (timings->VSyncEnd & 0x0f) | 0x20; 206 reg->CRTC[0x11] = (timings->VSyncEnd & 0x0f) | 0x20;
206 par->CRTC[0x12] = (timings->VDisplay - 1) & 0xff; 207 reg->CRTC[0x12] = (timings->VDisplay - 1) & 0xff;
207 par->CRTC[0x13] = var->xres_virtual >> 4; 208 reg->CRTC[0x13] = var->xres_virtual >> 4;
208 par->CRTC[0x14] = 0x00; 209 reg->CRTC[0x14] = 0x00;
209 par->CRTC[0x15] = (timings->VSyncStart - 1) & 0xff; 210 reg->CRTC[0x15] = (timings->VSyncStart - 1) & 0xff;
210 par->CRTC[0x16] = (timings->VSyncEnd - 1) & 0xff; 211 reg->CRTC[0x16] = (timings->VSyncEnd - 1) & 0xff;
211 par->CRTC[0x17] = 0xc3; 212 reg->CRTC[0x17] = 0xc3;
212 par->CRTC[0x18] = 0xff; 213 reg->CRTC[0x18] = 0xff;
213 214
214 /* 215 /*
215 * are these unnecessary? 216 * are these unnecessary?
@@ -220,38 +221,38 @@ static void vgaHWInit (struct fb_var_screeninfo *var,
220 /* 221 /*
221 * Graphics Display Controller 222 * Graphics Display Controller
222 */ 223 */
223 par->Graphics[0x00] = 0x00; 224 reg->Graphics[0x00] = 0x00;
224 par->Graphics[0x01] = 0x00; 225 reg->Graphics[0x01] = 0x00;
225 par->Graphics[0x02] = 0x00; 226 reg->Graphics[0x02] = 0x00;
226 par->Graphics[0x03] = 0x00; 227 reg->Graphics[0x03] = 0x00;
227 par->Graphics[0x04] = 0x00; 228 reg->Graphics[0x04] = 0x00;
228 par->Graphics[0x05] = 0x40; 229 reg->Graphics[0x05] = 0x40;
229 par->Graphics[0x06] = 0x05; /* only map 64k VGA memory !!!! */ 230 reg->Graphics[0x06] = 0x05; /* only map 64k VGA memory !!!! */
230 par->Graphics[0x07] = 0x0F; 231 reg->Graphics[0x07] = 0x0F;
231 par->Graphics[0x08] = 0xFF; 232 reg->Graphics[0x08] = 0xFF;
232 233
233 234
234 par->Attribute[0x00] = 0x00; /* standard colormap translation */ 235 reg->Attribute[0x00] = 0x00; /* standard colormap translation */
235 par->Attribute[0x01] = 0x01; 236 reg->Attribute[0x01] = 0x01;
236 par->Attribute[0x02] = 0x02; 237 reg->Attribute[0x02] = 0x02;
237 par->Attribute[0x03] = 0x03; 238 reg->Attribute[0x03] = 0x03;
238 par->Attribute[0x04] = 0x04; 239 reg->Attribute[0x04] = 0x04;
239 par->Attribute[0x05] = 0x05; 240 reg->Attribute[0x05] = 0x05;
240 par->Attribute[0x06] = 0x06; 241 reg->Attribute[0x06] = 0x06;
241 par->Attribute[0x07] = 0x07; 242 reg->Attribute[0x07] = 0x07;
242 par->Attribute[0x08] = 0x08; 243 reg->Attribute[0x08] = 0x08;
243 par->Attribute[0x09] = 0x09; 244 reg->Attribute[0x09] = 0x09;
244 par->Attribute[0x0a] = 0x0A; 245 reg->Attribute[0x0a] = 0x0A;
245 par->Attribute[0x0b] = 0x0B; 246 reg->Attribute[0x0b] = 0x0B;
246 par->Attribute[0x0c] = 0x0C; 247 reg->Attribute[0x0c] = 0x0C;
247 par->Attribute[0x0d] = 0x0D; 248 reg->Attribute[0x0d] = 0x0D;
248 par->Attribute[0x0e] = 0x0E; 249 reg->Attribute[0x0e] = 0x0E;
249 par->Attribute[0x0f] = 0x0F; 250 reg->Attribute[0x0f] = 0x0F;
250 par->Attribute[0x10] = 0x41; 251 reg->Attribute[0x10] = 0x41;
251 par->Attribute[0x11] = 0xFF; 252 reg->Attribute[0x11] = 0xFF;
252 par->Attribute[0x12] = 0x0F; 253 reg->Attribute[0x12] = 0x0F;
253 par->Attribute[0x13] = 0x00; 254 reg->Attribute[0x13] = 0x00;
254 par->Attribute[0x14] = 0x00; 255 reg->Attribute[0x14] = 0x00;
255} 256}
256 257
257/* -------------------- Hardware specific routines ------------------------- */ 258/* -------------------- Hardware specific routines ------------------------- */
@@ -304,15 +305,15 @@ savage2000_waitidle(struct savagefb_par *par)
304 while ((savage_in32(0x48C60, par) & 0x009fffff)); 305 while ((savage_in32(0x48C60, par) & 0x009fffff));
305} 306}
306 307
307 308#ifdef CONFIG_FB_SAVAGE_ACCEL
308static void 309static void
309SavageSetup2DEngine (struct savagefb_par *par) 310SavageSetup2DEngine(struct savagefb_par *par)
310{ 311{
311 unsigned long GlobalBitmapDescriptor; 312 unsigned long GlobalBitmapDescriptor;
312 313
313 GlobalBitmapDescriptor = 1 | 8 | BCI_BD_BW_DISABLE; 314 GlobalBitmapDescriptor = 1 | 8 | BCI_BD_BW_DISABLE;
314 BCI_BD_SET_BPP (GlobalBitmapDescriptor, par->depth); 315 BCI_BD_SET_BPP(GlobalBitmapDescriptor, par->depth);
315 BCI_BD_SET_STRIDE (GlobalBitmapDescriptor, par->vwidth); 316 BCI_BD_SET_STRIDE(GlobalBitmapDescriptor, par->vwidth);
316 317
317 switch(par->chip) { 318 switch(par->chip) {
318 case S3_SAVAGE3D: 319 case S3_SAVAGE3D:
@@ -361,32 +362,48 @@ SavageSetup2DEngine (struct savagefb_par *par)
361 vga_out8(0x3d5, 0x0c, par); 362 vga_out8(0x3d5, 0x0c, par);
362 363
363 /* Set stride to use GBD. */ 364 /* Set stride to use GBD. */
364 vga_out8 (0x3d4, 0x50, par); 365 vga_out8(0x3d4, 0x50, par);
365 vga_out8 (0x3d5, vga_in8(0x3d5, par) | 0xC1, par); 366 vga_out8(0x3d5, vga_in8(0x3d5, par) | 0xC1, par);
366 367
367 /* Enable 2D engine. */ 368 /* Enable 2D engine. */
368 vga_out8 (0x3d4, 0x40, par); 369 vga_out8(0x3d4, 0x40, par);
369 vga_out8 (0x3d5, 0x01, par); 370 vga_out8(0x3d5, 0x01, par);
370 371
371 savage_out32 (MONO_PAT_0, ~0, par); 372 savage_out32(MONO_PAT_0, ~0, par);
372 savage_out32 (MONO_PAT_1, ~0, par); 373 savage_out32(MONO_PAT_1, ~0, par);
373 374
374 /* Setup plane masks */ 375 /* Setup plane masks */
375 savage_out32 (0x8128, ~0, par); /* enable all write planes */ 376 savage_out32(0x8128, ~0, par); /* enable all write planes */
376 savage_out32 (0x812C, ~0, par); /* enable all read planes */ 377 savage_out32(0x812C, ~0, par); /* enable all read planes */
377 savage_out16 (0x8134, 0x27, par); 378 savage_out16(0x8134, 0x27, par);
378 savage_out16 (0x8136, 0x07, par); 379 savage_out16(0x8136, 0x07, par);
379 380
380 /* Now set the GBD */ 381 /* Now set the GBD */
381 par->bci_ptr = 0; 382 par->bci_ptr = 0;
382 par->SavageWaitFifo (par, 4); 383 par->SavageWaitFifo(par, 4);
383 384
384 BCI_SEND( BCI_CMD_SETREG | (1 << 16) | BCI_GBD1 ); 385 BCI_SEND(BCI_CMD_SETREG | (1 << 16) | BCI_GBD1);
385 BCI_SEND( 0 ); 386 BCI_SEND(0);
386 BCI_SEND( BCI_CMD_SETREG | (1 << 16) | BCI_GBD2 ); 387 BCI_SEND(BCI_CMD_SETREG | (1 << 16) | BCI_GBD2);
387 BCI_SEND( GlobalBitmapDescriptor ); 388 BCI_SEND(GlobalBitmapDescriptor);
388} 389}
389 390
391static void savagefb_set_clip(struct fb_info *info)
392{
393 struct savagefb_par *par = info->par;
394 int cmd;
395
396 cmd = BCI_CMD_NOP | BCI_CMD_CLIP_NEW;
397 par->bci_ptr = 0;
398 par->SavageWaitFifo(par,3);
399 BCI_SEND(cmd);
400 BCI_SEND(BCI_CLIP_TL(0, 0));
401 BCI_SEND(BCI_CLIP_BR(0xfff, 0xfff));
402}
403#else
404static void SavageSetup2DEngine(struct savagefb_par *par) {}
405
406#endif
390 407
391static void SavageCalcClock(long freq, int min_m, int min_n1, int max_n1, 408static void SavageCalcClock(long freq, int min_m, int min_n1, int max_n1,
392 int min_n2, int max_n2, long freq_min, 409 int min_n2, int max_n2, long freq_min,
@@ -398,11 +415,11 @@ static void SavageCalcClock(long freq, int min_m, int min_n1, int max_n1,
398 unsigned char n1, n2, best_n1=16+2, best_n2=2, best_m=125+2; 415 unsigned char n1, n2, best_n1=16+2, best_n2=2, best_m=125+2;
399 416
400 if (freq < freq_min / (1 << max_n2)) { 417 if (freq < freq_min / (1 << max_n2)) {
401 printk (KERN_ERR "invalid frequency %ld Khz\n", freq); 418 printk(KERN_ERR "invalid frequency %ld Khz\n", freq);
402 freq = freq_min / (1 << max_n2); 419 freq = freq_min / (1 << max_n2);
403 } 420 }
404 if (freq > freq_max / (1 << min_n2)) { 421 if (freq > freq_max / (1 << min_n2)) {
405 printk (KERN_ERR "invalid frequency %ld Khz\n", freq); 422 printk(KERN_ERR "invalid frequency %ld Khz\n", freq);
406 freq = freq_max / (1 << min_n2); 423 freq = freq_max / (1 << min_n2);
407 } 424 }
408 425
@@ -453,12 +470,12 @@ static int common_calc_clock(long freq, int min_m, int min_n1, int max_n1,
453 BASE_FREQ; 470 BASE_FREQ;
454 if (m < min_m + 2 || m > 127+2) 471 if (m < min_m + 2 || m > 127+2)
455 continue; 472 continue;
456 if((m * BASE_FREQ >= freq_min * n1) && 473 if ((m * BASE_FREQ >= freq_min * n1) &&
457 (m * BASE_FREQ <= freq_max * n1)) { 474 (m * BASE_FREQ <= freq_max * n1)) {
458 diff = freq * (1 << n2) * n1 - BASE_FREQ * m; 475 diff = freq * (1 << n2) * n1 - BASE_FREQ * m;
459 if(diff < 0) 476 if (diff < 0)
460 diff = -diff; 477 diff = -diff;
461 if(diff < best_diff) { 478 if (diff < best_diff) {
462 best_diff = diff; 479 best_diff = diff;
463 best_m = m; 480 best_m = m;
464 best_n1 = n1; 481 best_n1 = n1;
@@ -468,7 +485,7 @@ static int common_calc_clock(long freq, int min_m, int min_n1, int max_n1,
468 } 485 }
469 } 486 }
470 487
471 if(max_n1 == 63) 488 if (max_n1 == 63)
472 *ndiv = (best_n1 - 2) | (best_n2 << 6); 489 *ndiv = (best_n1 - 2) | (best_n2 << 6);
473 else 490 else
474 *ndiv = (best_n1 - 2) | (best_n2 << 5); 491 *ndiv = (best_n1 - 2) | (best_n2 << 5);
@@ -488,23 +505,23 @@ static void SavagePrintRegs(void)
488 int vgaCRReg = 0x3d5; 505 int vgaCRReg = 0x3d5;
489 506
490 printk(KERN_DEBUG "SR x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE " 507 printk(KERN_DEBUG "SR x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE "
491 "xF" ); 508 "xF");
492 509
493 for( i = 0; i < 0x70; i++ ) { 510 for (i = 0; i < 0x70; i++) {
494 if( !(i % 16) ) 511 if (!(i % 16))
495 printk(KERN_DEBUG "\nSR%xx ", i >> 4 ); 512 printk(KERN_DEBUG "\nSR%xx ", i >> 4);
496 vga_out8( 0x3c4, i, par); 513 vga_out8(0x3c4, i, par);
497 printk(KERN_DEBUG " %02x", vga_in8(0x3c5, par) ); 514 printk(KERN_DEBUG " %02x", vga_in8(0x3c5, par));
498 } 515 }
499 516
500 printk(KERN_DEBUG "\n\nCR x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC " 517 printk(KERN_DEBUG "\n\nCR x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC "
501 "xD xE xF" ); 518 "xD xE xF");
502 519
503 for( i = 0; i < 0xB7; i++ ) { 520 for (i = 0; i < 0xB7; i++) {
504 if( !(i % 16) ) 521 if (!(i % 16))
505 printk(KERN_DEBUG "\nCR%xx ", i >> 4 ); 522 printk(KERN_DEBUG "\nCR%xx ", i >> 4);
506 vga_out8( vgaCRIndex, i, par); 523 vga_out8(vgaCRIndex, i, par);
507 printk(KERN_DEBUG " %02x", vga_in8(vgaCRReg, par) ); 524 printk(KERN_DEBUG " %02x", vga_in8(vgaCRReg, par));
508 } 525 }
509 526
510 printk(KERN_DEBUG "\n\n"); 527 printk(KERN_DEBUG "\n\n");
@@ -513,156 +530,309 @@ static void SavagePrintRegs(void)
513 530
514/* --------------------------------------------------------------------- */ 531/* --------------------------------------------------------------------- */
515 532
516static void savage_get_default_par(struct savagefb_par *par) 533static void savage_get_default_par(struct savagefb_par *par, struct savage_reg *reg)
517{ 534{
518 unsigned char cr3a, cr53, cr66; 535 unsigned char cr3a, cr53, cr66;
519 536
520 vga_out16 (0x3d4, 0x4838, par); 537 vga_out16(0x3d4, 0x4838, par);
521 vga_out16 (0x3d4, 0xa039, par); 538 vga_out16(0x3d4, 0xa039, par);
522 vga_out16 (0x3c4, 0x0608, par); 539 vga_out16(0x3c4, 0x0608, par);
523 540
524 vga_out8 (0x3d4, 0x66, par); 541 vga_out8(0x3d4, 0x66, par);
525 cr66 = vga_in8 (0x3d5, par); 542 cr66 = vga_in8(0x3d5, par);
526 vga_out8 (0x3d5, cr66 | 0x80, par); 543 vga_out8(0x3d5, cr66 | 0x80, par);
527 vga_out8 (0x3d4, 0x3a, par); 544 vga_out8(0x3d4, 0x3a, par);
528 cr3a = vga_in8 (0x3d5, par); 545 cr3a = vga_in8(0x3d5, par);
529 vga_out8 (0x3d5, cr3a | 0x80, par); 546 vga_out8(0x3d5, cr3a | 0x80, par);
530 vga_out8 (0x3d4, 0x53, par); 547 vga_out8(0x3d4, 0x53, par);
531 cr53 = vga_in8 (0x3d5, par); 548 cr53 = vga_in8(0x3d5, par);
532 vga_out8 (0x3d5, cr53 & 0x7f, par); 549 vga_out8(0x3d5, cr53 & 0x7f, par);
533 550
534 vga_out8 (0x3d4, 0x66, par); 551 vga_out8(0x3d4, 0x66, par);
535 vga_out8 (0x3d5, cr66, par); 552 vga_out8(0x3d5, cr66, par);
536 vga_out8 (0x3d4, 0x3a, par); 553 vga_out8(0x3d4, 0x3a, par);
537 vga_out8 (0x3d5, cr3a, par); 554 vga_out8(0x3d5, cr3a, par);
538 555
539 vga_out8 (0x3d4, 0x66, par); 556 vga_out8(0x3d4, 0x66, par);
540 vga_out8 (0x3d5, cr66, par); 557 vga_out8(0x3d5, cr66, par);
541 vga_out8 (0x3d4, 0x3a, par); 558 vga_out8(0x3d4, 0x3a, par);
542 vga_out8 (0x3d5, cr3a, par); 559 vga_out8(0x3d5, cr3a, par);
543 560
544 /* unlock extended seq regs */ 561 /* unlock extended seq regs */
545 vga_out8 (0x3c4, 0x08, par); 562 vga_out8(0x3c4, 0x08, par);
546 par->SR08 = vga_in8 (0x3c5, par); 563 reg->SR08 = vga_in8(0x3c5, par);
547 vga_out8 (0x3c5, 0x06, par); 564 vga_out8(0x3c5, 0x06, par);
548 565
549 /* now save all the extended regs we need */ 566 /* now save all the extended regs we need */
550 vga_out8 (0x3d4, 0x31, par); 567 vga_out8(0x3d4, 0x31, par);
551 par->CR31 = vga_in8 (0x3d5, par); 568 reg->CR31 = vga_in8(0x3d5, par);
552 vga_out8 (0x3d4, 0x32, par); 569 vga_out8(0x3d4, 0x32, par);
553 par->CR32 = vga_in8 (0x3d5, par); 570 reg->CR32 = vga_in8(0x3d5, par);
554 vga_out8 (0x3d4, 0x34, par); 571 vga_out8(0x3d4, 0x34, par);
555 par->CR34 = vga_in8 (0x3d5, par); 572 reg->CR34 = vga_in8(0x3d5, par);
556 vga_out8 (0x3d4, 0x36, par); 573 vga_out8(0x3d4, 0x36, par);
557 par->CR36 = vga_in8 (0x3d5, par); 574 reg->CR36 = vga_in8(0x3d5, par);
558 vga_out8 (0x3d4, 0x3a, par); 575 vga_out8(0x3d4, 0x3a, par);
559 par->CR3A = vga_in8 (0x3d5, par); 576 reg->CR3A = vga_in8(0x3d5, par);
560 vga_out8 (0x3d4, 0x40, par); 577 vga_out8(0x3d4, 0x40, par);
561 par->CR40 = vga_in8 (0x3d5, par); 578 reg->CR40 = vga_in8(0x3d5, par);
562 vga_out8 (0x3d4, 0x42, par); 579 vga_out8(0x3d4, 0x42, par);
563 par->CR42 = vga_in8 (0x3d5, par); 580 reg->CR42 = vga_in8(0x3d5, par);
564 vga_out8 (0x3d4, 0x45, par); 581 vga_out8(0x3d4, 0x45, par);
565 par->CR45 = vga_in8 (0x3d5, par); 582 reg->CR45 = vga_in8(0x3d5, par);
566 vga_out8 (0x3d4, 0x50, par); 583 vga_out8(0x3d4, 0x50, par);
567 par->CR50 = vga_in8 (0x3d5, par); 584 reg->CR50 = vga_in8(0x3d5, par);
568 vga_out8 (0x3d4, 0x51, par); 585 vga_out8(0x3d4, 0x51, par);
569 par->CR51 = vga_in8 (0x3d5, par); 586 reg->CR51 = vga_in8(0x3d5, par);
570 vga_out8 (0x3d4, 0x53, par); 587 vga_out8(0x3d4, 0x53, par);
571 par->CR53 = vga_in8 (0x3d5, par); 588 reg->CR53 = vga_in8(0x3d5, par);
572 vga_out8 (0x3d4, 0x58, par); 589 vga_out8(0x3d4, 0x58, par);
573 par->CR58 = vga_in8 (0x3d5, par); 590 reg->CR58 = vga_in8(0x3d5, par);
574 vga_out8 (0x3d4, 0x60, par); 591 vga_out8(0x3d4, 0x60, par);
575 par->CR60 = vga_in8 (0x3d5, par); 592 reg->CR60 = vga_in8(0x3d5, par);
576 vga_out8 (0x3d4, 0x66, par); 593 vga_out8(0x3d4, 0x66, par);
577 par->CR66 = vga_in8 (0x3d5, par); 594 reg->CR66 = vga_in8(0x3d5, par);
578 vga_out8 (0x3d4, 0x67, par); 595 vga_out8(0x3d4, 0x67, par);
579 par->CR67 = vga_in8 (0x3d5, par); 596 reg->CR67 = vga_in8(0x3d5, par);
580 vga_out8 (0x3d4, 0x68, par); 597 vga_out8(0x3d4, 0x68, par);
581 par->CR68 = vga_in8 (0x3d5, par); 598 reg->CR68 = vga_in8(0x3d5, par);
582 vga_out8 (0x3d4, 0x69, par); 599 vga_out8(0x3d4, 0x69, par);
583 par->CR69 = vga_in8 (0x3d5, par); 600 reg->CR69 = vga_in8(0x3d5, par);
584 vga_out8 (0x3d4, 0x6f, par); 601 vga_out8(0x3d4, 0x6f, par);
585 par->CR6F = vga_in8 (0x3d5, par); 602 reg->CR6F = vga_in8(0x3d5, par);
586 603
587 vga_out8 (0x3d4, 0x33, par); 604 vga_out8(0x3d4, 0x33, par);
588 par->CR33 = vga_in8 (0x3d5, par); 605 reg->CR33 = vga_in8(0x3d5, par);
589 vga_out8 (0x3d4, 0x86, par); 606 vga_out8(0x3d4, 0x86, par);
590 par->CR86 = vga_in8 (0x3d5, par); 607 reg->CR86 = vga_in8(0x3d5, par);
591 vga_out8 (0x3d4, 0x88, par); 608 vga_out8(0x3d4, 0x88, par);
592 par->CR88 = vga_in8 (0x3d5, par); 609 reg->CR88 = vga_in8(0x3d5, par);
593 vga_out8 (0x3d4, 0x90, par); 610 vga_out8(0x3d4, 0x90, par);
594 par->CR90 = vga_in8 (0x3d5, par); 611 reg->CR90 = vga_in8(0x3d5, par);
595 vga_out8 (0x3d4, 0x91, par); 612 vga_out8(0x3d4, 0x91, par);
596 par->CR91 = vga_in8 (0x3d5, par); 613 reg->CR91 = vga_in8(0x3d5, par);
597 vga_out8 (0x3d4, 0xb0, par); 614 vga_out8(0x3d4, 0xb0, par);
598 par->CRB0 = vga_in8 (0x3d5, par) | 0x80; 615 reg->CRB0 = vga_in8(0x3d5, par) | 0x80;
616
617 /* extended mode timing regs */
618 vga_out8(0x3d4, 0x3b, par);
619 reg->CR3B = vga_in8(0x3d5, par);
620 vga_out8(0x3d4, 0x3c, par);
621 reg->CR3C = vga_in8(0x3d5, par);
622 vga_out8(0x3d4, 0x43, par);
623 reg->CR43 = vga_in8(0x3d5, par);
624 vga_out8(0x3d4, 0x5d, par);
625 reg->CR5D = vga_in8(0x3d5, par);
626 vga_out8(0x3d4, 0x5e, par);
627 reg->CR5E = vga_in8(0x3d5, par);
628 vga_out8(0x3d4, 0x65, par);
629 reg->CR65 = vga_in8(0x3d5, par);
630
631 /* save seq extended regs for DCLK PLL programming */
632 vga_out8(0x3c4, 0x0e, par);
633 reg->SR0E = vga_in8(0x3c5, par);
634 vga_out8(0x3c4, 0x0f, par);
635 reg->SR0F = vga_in8(0x3c5, par);
636 vga_out8(0x3c4, 0x10, par);
637 reg->SR10 = vga_in8(0x3c5, par);
638 vga_out8(0x3c4, 0x11, par);
639 reg->SR11 = vga_in8(0x3c5, par);
640 vga_out8(0x3c4, 0x12, par);
641 reg->SR12 = vga_in8(0x3c5, par);
642 vga_out8(0x3c4, 0x13, par);
643 reg->SR13 = vga_in8(0x3c5, par);
644 vga_out8(0x3c4, 0x29, par);
645 reg->SR29 = vga_in8(0x3c5, par);
646
647 vga_out8(0x3c4, 0x15, par);
648 reg->SR15 = vga_in8(0x3c5, par);
649 vga_out8(0x3c4, 0x30, par);
650 reg->SR30 = vga_in8(0x3c5, par);
651 vga_out8(0x3c4, 0x18, par);
652 reg->SR18 = vga_in8(0x3c5, par);
653
654 /* Save flat panel expansion regsters. */
655 if (par->chip == S3_SAVAGE_MX) {
656 int i;
657
658 for (i = 0; i < 8; i++) {
659 vga_out8(0x3c4, 0x54+i, par);
660 reg->SR54[i] = vga_in8(0x3c5, par);
661 }
662 }
663
664 vga_out8(0x3d4, 0x66, par);
665 cr66 = vga_in8(0x3d5, par);
666 vga_out8(0x3d5, cr66 | 0x80, par);
667 vga_out8(0x3d4, 0x3a, par);
668 cr3a = vga_in8(0x3d5, par);
669 vga_out8(0x3d5, cr3a | 0x80, par);
670
671 /* now save MIU regs */
672 if (par->chip != S3_SAVAGE_MX) {
673 reg->MMPR0 = savage_in32(FIFO_CONTROL_REG, par);
674 reg->MMPR1 = savage_in32(MIU_CONTROL_REG, par);
675 reg->MMPR2 = savage_in32(STREAMS_TIMEOUT_REG, par);
676 reg->MMPR3 = savage_in32(MISC_TIMEOUT_REG, par);
677 }
678
679 vga_out8(0x3d4, 0x3a, par);
680 vga_out8(0x3d5, cr3a, par);
681 vga_out8(0x3d4, 0x66, par);
682 vga_out8(0x3d5, cr66, par);
683}
684
685static void savage_set_default_par(struct savagefb_par *par,
686 struct savage_reg *reg)
687{
688 unsigned char cr3a, cr53, cr66;
689
690 vga_out16(0x3d4, 0x4838, par);
691 vga_out16(0x3d4, 0xa039, par);
692 vga_out16(0x3c4, 0x0608, par);
693
694 vga_out8(0x3d4, 0x66, par);
695 cr66 = vga_in8(0x3d5, par);
696 vga_out8(0x3d5, cr66 | 0x80, par);
697 vga_out8(0x3d4, 0x3a, par);
698 cr3a = vga_in8(0x3d5, par);
699 vga_out8(0x3d5, cr3a | 0x80, par);
700 vga_out8(0x3d4, 0x53, par);
701 cr53 = vga_in8(0x3d5, par);
702 vga_out8(0x3d5, cr53 & 0x7f, par);
703
704 vga_out8(0x3d4, 0x66, par);
705 vga_out8(0x3d5, cr66, par);
706 vga_out8(0x3d4, 0x3a, par);
707 vga_out8(0x3d5, cr3a, par);
708
709 vga_out8(0x3d4, 0x66, par);
710 vga_out8(0x3d5, cr66, par);
711 vga_out8(0x3d4, 0x3a, par);
712 vga_out8(0x3d5, cr3a, par);
713
714 /* unlock extended seq regs */
715 vga_out8(0x3c4, 0x08, par);
716 vga_out8(0x3c5, reg->SR08, par);
717 vga_out8(0x3c5, 0x06, par);
718
719 /* now restore all the extended regs we need */
720 vga_out8(0x3d4, 0x31, par);
721 vga_out8(0x3d5, reg->CR31, par);
722 vga_out8(0x3d4, 0x32, par);
723 vga_out8(0x3d5, reg->CR32, par);
724 vga_out8(0x3d4, 0x34, par);
725 vga_out8(0x3d5, reg->CR34, par);
726 vga_out8(0x3d4, 0x36, par);
727 vga_out8(0x3d5,reg->CR36, par);
728 vga_out8(0x3d4, 0x3a, par);
729 vga_out8(0x3d5, reg->CR3A, par);
730 vga_out8(0x3d4, 0x40, par);
731 vga_out8(0x3d5, reg->CR40, par);
732 vga_out8(0x3d4, 0x42, par);
733 vga_out8(0x3d5, reg->CR42, par);
734 vga_out8(0x3d4, 0x45, par);
735 vga_out8(0x3d5, reg->CR45, par);
736 vga_out8(0x3d4, 0x50, par);
737 vga_out8(0x3d5, reg->CR50, par);
738 vga_out8(0x3d4, 0x51, par);
739 vga_out8(0x3d5, reg->CR51, par);
740 vga_out8(0x3d4, 0x53, par);
741 vga_out8(0x3d5, reg->CR53, par);
742 vga_out8(0x3d4, 0x58, par);
743 vga_out8(0x3d5, reg->CR58, par);
744 vga_out8(0x3d4, 0x60, par);
745 vga_out8(0x3d5, reg->CR60, par);
746 vga_out8(0x3d4, 0x66, par);
747 vga_out8(0x3d5, reg->CR66, par);
748 vga_out8(0x3d4, 0x67, par);
749 vga_out8(0x3d5, reg->CR67, par);
750 vga_out8(0x3d4, 0x68, par);
751 vga_out8(0x3d5, reg->CR68, par);
752 vga_out8(0x3d4, 0x69, par);
753 vga_out8(0x3d5, reg->CR69, par);
754 vga_out8(0x3d4, 0x6f, par);
755 vga_out8(0x3d5, reg->CR6F, par);
756
757 vga_out8(0x3d4, 0x33, par);
758 vga_out8(0x3d5, reg->CR33, par);
759 vga_out8(0x3d4, 0x86, par);
760 vga_out8(0x3d5, reg->CR86, par);
761 vga_out8(0x3d4, 0x88, par);
762 vga_out8(0x3d5, reg->CR88, par);
763 vga_out8(0x3d4, 0x90, par);
764 vga_out8(0x3d5, reg->CR90, par);
765 vga_out8(0x3d4, 0x91, par);
766 vga_out8(0x3d5, reg->CR91, par);
767 vga_out8(0x3d4, 0xb0, par);
768 vga_out8(0x3d5, reg->CRB0, par);
599 769
600 /* extended mode timing regs */ 770 /* extended mode timing regs */
601 vga_out8 (0x3d4, 0x3b, par); 771 vga_out8(0x3d4, 0x3b, par);
602 par->CR3B = vga_in8 (0x3d5, par); 772 vga_out8(0x3d5, reg->CR3B, par);
603 vga_out8 (0x3d4, 0x3c, par); 773 vga_out8(0x3d4, 0x3c, par);
604 par->CR3C = vga_in8 (0x3d5, par); 774 vga_out8(0x3d5, reg->CR3C, par);
605 vga_out8 (0x3d4, 0x43, par); 775 vga_out8(0x3d4, 0x43, par);
606 par->CR43 = vga_in8 (0x3d5, par); 776 vga_out8(0x3d5, reg->CR43, par);
607 vga_out8 (0x3d4, 0x5d, par); 777 vga_out8(0x3d4, 0x5d, par);
608 par->CR5D = vga_in8 (0x3d5, par); 778 vga_out8(0x3d5, reg->CR5D, par);
609 vga_out8 (0x3d4, 0x5e, par); 779 vga_out8(0x3d4, 0x5e, par);
610 par->CR5E = vga_in8 (0x3d5, par); 780 vga_out8(0x3d5, reg->CR5E, par);
611 vga_out8 (0x3d4, 0x65, par); 781 vga_out8(0x3d4, 0x65, par);
612 par->CR65 = vga_in8 (0x3d5, par); 782 vga_out8(0x3d5, reg->CR65, par);
613 783
614 /* save seq extended regs for DCLK PLL programming */ 784 /* save seq extended regs for DCLK PLL programming */
615 vga_out8 (0x3c4, 0x0e, par); 785 vga_out8(0x3c4, 0x0e, par);
616 par->SR0E = vga_in8 (0x3c5, par); 786 vga_out8(0x3c5, reg->SR0E, par);
617 vga_out8 (0x3c4, 0x0f, par); 787 vga_out8(0x3c4, 0x0f, par);
618 par->SR0F = vga_in8 (0x3c5, par); 788 vga_out8(0x3c5, reg->SR0F, par);
619 vga_out8 (0x3c4, 0x10, par); 789 vga_out8(0x3c4, 0x10, par);
620 par->SR10 = vga_in8 (0x3c5, par); 790 vga_out8(0x3c5, reg->SR10, par);
621 vga_out8 (0x3c4, 0x11, par); 791 vga_out8(0x3c4, 0x11, par);
622 par->SR11 = vga_in8 (0x3c5, par); 792 vga_out8(0x3c5, reg->SR11, par);
623 vga_out8 (0x3c4, 0x12, par); 793 vga_out8(0x3c4, 0x12, par);
624 par->SR12 = vga_in8 (0x3c5, par); 794 vga_out8(0x3c5, reg->SR12, par);
625 vga_out8 (0x3c4, 0x13, par); 795 vga_out8(0x3c4, 0x13, par);
626 par->SR13 = vga_in8 (0x3c5, par); 796 vga_out8(0x3c5, reg->SR13, par);
627 vga_out8 (0x3c4, 0x29, par); 797 vga_out8(0x3c4, 0x29, par);
628 par->SR29 = vga_in8 (0x3c5, par); 798 vga_out8(0x3c5, reg->SR29, par);
629 799
630 vga_out8 (0x3c4, 0x15, par); 800 vga_out8(0x3c4, 0x15, par);
631 par->SR15 = vga_in8 (0x3c5, par); 801 vga_out8(0x3c5, reg->SR15, par);
632 vga_out8 (0x3c4, 0x30, par); 802 vga_out8(0x3c4, 0x30, par);
633 par->SR30 = vga_in8 (0x3c5, par); 803 vga_out8(0x3c5, reg->SR30, par);
634 vga_out8 (0x3c4, 0x18, par); 804 vga_out8(0x3c4, 0x18, par);
635 par->SR18 = vga_in8 (0x3c5, par); 805 vga_out8(0x3c5, reg->SR18, par);
636 806
637 /* Save flat panel expansion regsters. */ 807 /* Save flat panel expansion regsters. */
638 if (par->chip == S3_SAVAGE_MX) { 808 if (par->chip == S3_SAVAGE_MX) {
639 int i; 809 int i;
640 810
641 for (i = 0; i < 8; i++) { 811 for (i = 0; i < 8; i++) {
642 vga_out8 (0x3c4, 0x54+i, par); 812 vga_out8(0x3c4, 0x54+i, par);
643 par->SR54[i] = vga_in8 (0x3c5, par); 813 vga_out8(0x3c5, reg->SR54[i], par);
644 } 814 }
645 } 815 }
646 816
647 vga_out8 (0x3d4, 0x66, par); 817 vga_out8(0x3d4, 0x66, par);
648 cr66 = vga_in8 (0x3d5, par); 818 cr66 = vga_in8(0x3d5, par);
649 vga_out8 (0x3d5, cr66 | 0x80, par); 819 vga_out8(0x3d5, cr66 | 0x80, par);
650 vga_out8 (0x3d4, 0x3a, par); 820 vga_out8(0x3d4, 0x3a, par);
651 cr3a = vga_in8 (0x3d5, par); 821 cr3a = vga_in8(0x3d5, par);
652 vga_out8 (0x3d5, cr3a | 0x80, par); 822 vga_out8(0x3d5, cr3a | 0x80, par);
653 823
654 /* now save MIU regs */ 824 /* now save MIU regs */
655 if (par->chip != S3_SAVAGE_MX) { 825 if (par->chip != S3_SAVAGE_MX) {
656 par->MMPR0 = savage_in32(FIFO_CONTROL_REG, par); 826 savage_out32(FIFO_CONTROL_REG, reg->MMPR0, par);
657 par->MMPR1 = savage_in32(MIU_CONTROL_REG, par); 827 savage_out32(MIU_CONTROL_REG, reg->MMPR1, par);
658 par->MMPR2 = savage_in32(STREAMS_TIMEOUT_REG, par); 828 savage_out32(STREAMS_TIMEOUT_REG, reg->MMPR2, par);
659 par->MMPR3 = savage_in32(MISC_TIMEOUT_REG, par); 829 savage_out32(MISC_TIMEOUT_REG, reg->MMPR3, par);
660 } 830 }
661 831
662 vga_out8 (0x3d4, 0x3a, par); 832 vga_out8(0x3d4, 0x3a, par);
663 vga_out8 (0x3d5, cr3a, par); 833 vga_out8(0x3d5, cr3a, par);
664 vga_out8 (0x3d4, 0x66, par); 834 vga_out8(0x3d4, 0x66, par);
665 vga_out8 (0x3d5, cr66, par); 835 vga_out8(0x3d5, cr66, par);
666} 836}
667 837
668static void savage_update_var(struct fb_var_screeninfo *var, struct fb_videomode *modedb) 838static void savage_update_var(struct fb_var_screeninfo *var, struct fb_videomode *modedb)
@@ -683,8 +853,8 @@ static void savage_update_var(struct fb_var_screeninfo *var, struct fb_videomode
683 var->vmode = modedb->vmode; 853 var->vmode = modedb->vmode;
684} 854}
685 855
686static int savagefb_check_var (struct fb_var_screeninfo *var, 856static int savagefb_check_var(struct fb_var_screeninfo *var,
687 struct fb_info *info) 857 struct fb_info *info)
688{ 858{
689 struct savagefb_par *par = info->par; 859 struct savagefb_par *par = info->par;
690 int memlen, vramlen, mode_valid = 0; 860 int memlen, vramlen, mode_valid = 0;
@@ -750,10 +920,10 @@ static int savagefb_check_var (struct fb_var_screeninfo *var,
750 if (par->SavagePanelWidth && 920 if (par->SavagePanelWidth &&
751 (var->xres > par->SavagePanelWidth || 921 (var->xres > par->SavagePanelWidth ||
752 var->yres > par->SavagePanelHeight)) { 922 var->yres > par->SavagePanelHeight)) {
753 printk (KERN_INFO "Mode (%dx%d) larger than the LCD panel " 923 printk(KERN_INFO "Mode (%dx%d) larger than the LCD panel "
754 "(%dx%d)\n", var->xres, var->yres, 924 "(%dx%d)\n", var->xres, var->yres,
755 par->SavagePanelWidth, 925 par->SavagePanelWidth,
756 par->SavagePanelHeight); 926 par->SavagePanelHeight);
757 return -1; 927 return -1;
758 } 928 }
759 929
@@ -788,8 +958,9 @@ static int savagefb_check_var (struct fb_var_screeninfo *var,
788} 958}
789 959
790 960
791static int savagefb_decode_var (struct fb_var_screeninfo *var, 961static int savagefb_decode_var(struct fb_var_screeninfo *var,
792 struct savagefb_par *par) 962 struct savagefb_par *par,
963 struct savage_reg *reg)
793{ 964{
794 struct xtimings timings; 965 struct xtimings timings;
795 int width, dclk, i, j; /*, refresh; */ 966 int width, dclk, i, j; /*, refresh; */
@@ -799,7 +970,7 @@ static int savagefb_decode_var (struct fb_var_screeninfo *var,
799 970
800 DBG("savagefb_decode_var"); 971 DBG("savagefb_decode_var");
801 972
802 memset (&timings, 0, sizeof(timings)); 973 memset(&timings, 0, sizeof(timings));
803 974
804 if (!pixclock) pixclock = 10000; /* 10ns = 100MHz */ 975 if (!pixclock) pixclock = 10000; /* 10ns = 100MHz */
805 timings.Clock = 1000000000 / pixclock; 976 timings.Clock = 1000000000 / pixclock;
@@ -831,39 +1002,39 @@ static int savagefb_decode_var (struct fb_var_screeninfo *var,
831 * This will allocate the datastructure and initialize all of the 1002 * This will allocate the datastructure and initialize all of the
832 * generic VGA registers. 1003 * generic VGA registers.
833 */ 1004 */
834 vgaHWInit (var, par, &timings); 1005 vgaHWInit(var, par, &timings, reg);
835 1006
836 /* We need to set CR67 whether or not we use the BIOS. */ 1007 /* We need to set CR67 whether or not we use the BIOS. */
837 1008
838 dclk = timings.Clock; 1009 dclk = timings.Clock;
839 par->CR67 = 0x00; 1010 reg->CR67 = 0x00;
840 1011
841 switch( var->bits_per_pixel ) { 1012 switch(var->bits_per_pixel) {
842 case 8: 1013 case 8:
843 if( (par->chip == S3_SAVAGE2000) && (dclk >= 230000) ) 1014 if ((par->chip == S3_SAVAGE2000) && (dclk >= 230000))
844 par->CR67 = 0x10; /* 8bpp, 2 pixels/clock */ 1015 reg->CR67 = 0x10; /* 8bpp, 2 pixels/clock */
845 else 1016 else
846 par->CR67 = 0x00; /* 8bpp, 1 pixel/clock */ 1017 reg->CR67 = 0x00; /* 8bpp, 1 pixel/clock */
847 break; 1018 break;
848 case 15: 1019 case 15:
849 if ( S3_SAVAGE_MOBILE_SERIES(par->chip) || 1020 if (S3_SAVAGE_MOBILE_SERIES(par->chip) ||
850 ((par->chip == S3_SAVAGE2000) && (dclk >= 230000)) ) 1021 ((par->chip == S3_SAVAGE2000) && (dclk >= 230000)))
851 par->CR67 = 0x30; /* 15bpp, 2 pixel/clock */ 1022 reg->CR67 = 0x30; /* 15bpp, 2 pixel/clock */
852 else 1023 else
853 par->CR67 = 0x20; /* 15bpp, 1 pixels/clock */ 1024 reg->CR67 = 0x20; /* 15bpp, 1 pixels/clock */
854 break; 1025 break;
855 case 16: 1026 case 16:
856 if( S3_SAVAGE_MOBILE_SERIES(par->chip) || 1027 if (S3_SAVAGE_MOBILE_SERIES(par->chip) ||
857 ((par->chip == S3_SAVAGE2000) && (dclk >= 230000)) ) 1028 ((par->chip == S3_SAVAGE2000) && (dclk >= 230000)))
858 par->CR67 = 0x50; /* 16bpp, 2 pixel/clock */ 1029 reg->CR67 = 0x50; /* 16bpp, 2 pixel/clock */
859 else 1030 else
860 par->CR67 = 0x40; /* 16bpp, 1 pixels/clock */ 1031 reg->CR67 = 0x40; /* 16bpp, 1 pixels/clock */
861 break; 1032 break;
862 case 24: 1033 case 24:
863 par->CR67 = 0x70; 1034 reg->CR67 = 0x70;
864 break; 1035 break;
865 case 32: 1036 case 32:
866 par->CR67 = 0xd0; 1037 reg->CR67 = 0xd0;
867 break; 1038 break;
868 } 1039 }
869 1040
@@ -872,61 +1043,61 @@ static int savagefb_decode_var (struct fb_var_screeninfo *var,
872 * match. Fall back to traditional register-crunching. 1043 * match. Fall back to traditional register-crunching.
873 */ 1044 */
874 1045
875 vga_out8 (0x3d4, 0x3a, par); 1046 vga_out8(0x3d4, 0x3a, par);
876 tmp = vga_in8 (0x3d5, par); 1047 tmp = vga_in8(0x3d5, par);
877 if (1 /*FIXME:psav->pci_burst*/) 1048 if (1 /*FIXME:psav->pci_burst*/)
878 par->CR3A = (tmp & 0x7f) | 0x15; 1049 reg->CR3A = (tmp & 0x7f) | 0x15;
879 else 1050 else
880 par->CR3A = tmp | 0x95; 1051 reg->CR3A = tmp | 0x95;
881 1052
882 par->CR53 = 0x00; 1053 reg->CR53 = 0x00;
883 par->CR31 = 0x8c; 1054 reg->CR31 = 0x8c;
884 par->CR66 = 0x89; 1055 reg->CR66 = 0x89;
885 1056
886 vga_out8 (0x3d4, 0x58, par); 1057 vga_out8(0x3d4, 0x58, par);
887 par->CR58 = vga_in8 (0x3d5, par) & 0x80; 1058 reg->CR58 = vga_in8(0x3d5, par) & 0x80;
888 par->CR58 |= 0x13; 1059 reg->CR58 |= 0x13;
889 1060
890 par->SR15 = 0x03 | 0x80; 1061 reg->SR15 = 0x03 | 0x80;
891 par->SR18 = 0x00; 1062 reg->SR18 = 0x00;
892 par->CR43 = par->CR45 = par->CR65 = 0x00; 1063 reg->CR43 = reg->CR45 = reg->CR65 = 0x00;
893 1064
894 vga_out8 (0x3d4, 0x40, par); 1065 vga_out8(0x3d4, 0x40, par);
895 par->CR40 = vga_in8 (0x3d5, par) & ~0x01; 1066 reg->CR40 = vga_in8(0x3d5, par) & ~0x01;
896 1067
897 par->MMPR0 = 0x010400; 1068 reg->MMPR0 = 0x010400;
898 par->MMPR1 = 0x00; 1069 reg->MMPR1 = 0x00;
899 par->MMPR2 = 0x0808; 1070 reg->MMPR2 = 0x0808;
900 par->MMPR3 = 0x08080810; 1071 reg->MMPR3 = 0x08080810;
901 1072
902 SavageCalcClock (dclk, 1, 1, 127, 0, 4, 180000, 360000, &m, &n, &r); 1073 SavageCalcClock(dclk, 1, 1, 127, 0, 4, 180000, 360000, &m, &n, &r);
903 /* m = 107; n = 4; r = 2; */ 1074 /* m = 107; n = 4; r = 2; */
904 1075
905 if (par->MCLK <= 0) { 1076 if (par->MCLK <= 0) {
906 par->SR10 = 255; 1077 reg->SR10 = 255;
907 par->SR11 = 255; 1078 reg->SR11 = 255;
908 } else { 1079 } else {
909 common_calc_clock (par->MCLK, 1, 1, 31, 0, 3, 135000, 270000, 1080 common_calc_clock(par->MCLK, 1, 1, 31, 0, 3, 135000, 270000,
910 &par->SR11, &par->SR10); 1081 &reg->SR11, &reg->SR10);
911 /* par->SR10 = 80; // MCLK == 286000 */ 1082 /* reg->SR10 = 80; // MCLK == 286000 */
912 /* par->SR11 = 125; */ 1083 /* reg->SR11 = 125; */
913 } 1084 }
914 1085
915 par->SR12 = (r << 6) | (n & 0x3f); 1086 reg->SR12 = (r << 6) | (n & 0x3f);
916 par->SR13 = m & 0xff; 1087 reg->SR13 = m & 0xff;
917 par->SR29 = (r & 4) | (m & 0x100) >> 5 | (n & 0x40) >> 2; 1088 reg->SR29 = (r & 4) | (m & 0x100) >> 5 | (n & 0x40) >> 2;
918 1089
919 if (var->bits_per_pixel < 24) 1090 if (var->bits_per_pixel < 24)
920 par->MMPR0 -= 0x8000; 1091 reg->MMPR0 -= 0x8000;
921 else 1092 else
922 par->MMPR0 -= 0x4000; 1093 reg->MMPR0 -= 0x4000;
923 1094
924 if (timings.interlaced) 1095 if (timings.interlaced)
925 par->CR42 = 0x20; 1096 reg->CR42 = 0x20;
926 else 1097 else
927 par->CR42 = 0x00; 1098 reg->CR42 = 0x00;
928 1099
929 par->CR34 = 0x10; /* display fifo */ 1100 reg->CR34 = 0x10; /* display fifo */
930 1101
931 i = ((((timings.HTotal >> 3) - 5) & 0x100) >> 8) | 1102 i = ((((timings.HTotal >> 3) - 5) & 0x100) >> 8) |
932 ((((timings.HDisplay >> 3) - 1) & 0x100) >> 7) | 1103 ((((timings.HDisplay >> 3) - 1) & 0x100) >> 7) |
@@ -938,77 +1109,77 @@ static int savagefb_decode_var (struct fb_var_screeninfo *var,
938 if ((timings.HSyncEnd >> 3) - (timings.HSyncStart >> 3) > 32) 1109 if ((timings.HSyncEnd >> 3) - (timings.HSyncStart >> 3) > 32)
939 i |= 0x20; 1110 i |= 0x20;
940 1111
941 j = (par->CRTC[0] + ((i & 0x01) << 8) + 1112 j = (reg->CRTC[0] + ((i & 0x01) << 8) +
942 par->CRTC[4] + ((i & 0x10) << 4) + 1) / 2; 1113 reg->CRTC[4] + ((i & 0x10) << 4) + 1) / 2;
943 1114
944 if (j - (par->CRTC[4] + ((i & 0x10) << 4)) < 4) { 1115 if (j - (reg->CRTC[4] + ((i & 0x10) << 4)) < 4) {
945 if (par->CRTC[4] + ((i & 0x10) << 4) + 4 <= 1116 if (reg->CRTC[4] + ((i & 0x10) << 4) + 4 <=
946 par->CRTC[0] + ((i & 0x01) << 8)) 1117 reg->CRTC[0] + ((i & 0x01) << 8))
947 j = par->CRTC[4] + ((i & 0x10) << 4) + 4; 1118 j = reg->CRTC[4] + ((i & 0x10) << 4) + 4;
948 else 1119 else
949 j = par->CRTC[0] + ((i & 0x01) << 8) + 1; 1120 j = reg->CRTC[0] + ((i & 0x01) << 8) + 1;
950 } 1121 }
951 1122
952 par->CR3B = j & 0xff; 1123 reg->CR3B = j & 0xff;
953 i |= (j & 0x100) >> 2; 1124 i |= (j & 0x100) >> 2;
954 par->CR3C = (par->CRTC[0] + ((i & 0x01) << 8)) / 2; 1125 reg->CR3C = (reg->CRTC[0] + ((i & 0x01) << 8)) / 2;
955 par->CR5D = i; 1126 reg->CR5D = i;
956 par->CR5E = (((timings.VTotal - 2) & 0x400) >> 10) | 1127 reg->CR5E = (((timings.VTotal - 2) & 0x400) >> 10) |
957 (((timings.VDisplay - 1) & 0x400) >> 9) | 1128 (((timings.VDisplay - 1) & 0x400) >> 9) |
958 (((timings.VSyncStart) & 0x400) >> 8) | 1129 (((timings.VSyncStart) & 0x400) >> 8) |
959 (((timings.VSyncStart) & 0x400) >> 6) | 0x40; 1130 (((timings.VSyncStart) & 0x400) >> 6) | 0x40;
960 width = (var->xres_virtual * ((var->bits_per_pixel+7) / 8)) >> 3; 1131 width = (var->xres_virtual * ((var->bits_per_pixel+7) / 8)) >> 3;
961 par->CR91 = par->CRTC[19] = 0xff & width; 1132 reg->CR91 = reg->CRTC[19] = 0xff & width;
962 par->CR51 = (0x300 & width) >> 4; 1133 reg->CR51 = (0x300 & width) >> 4;
963 par->CR90 = 0x80 | (width >> 8); 1134 reg->CR90 = 0x80 | (width >> 8);
964 par->MiscOutReg |= 0x0c; 1135 reg->MiscOutReg |= 0x0c;
965 1136
966 /* Set frame buffer description. */ 1137 /* Set frame buffer description. */
967 1138
968 if (var->bits_per_pixel <= 8) 1139 if (var->bits_per_pixel <= 8)
969 par->CR50 = 0; 1140 reg->CR50 = 0;
970 else if (var->bits_per_pixel <= 16) 1141 else if (var->bits_per_pixel <= 16)
971 par->CR50 = 0x10; 1142 reg->CR50 = 0x10;
972 else 1143 else
973 par->CR50 = 0x30; 1144 reg->CR50 = 0x30;
974 1145
975 if (var->xres_virtual <= 640) 1146 if (var->xres_virtual <= 640)
976 par->CR50 |= 0x40; 1147 reg->CR50 |= 0x40;
977 else if (var->xres_virtual == 800) 1148 else if (var->xres_virtual == 800)
978 par->CR50 |= 0x80; 1149 reg->CR50 |= 0x80;
979 else if (var->xres_virtual == 1024) 1150 else if (var->xres_virtual == 1024)
980 par->CR50 |= 0x00; 1151 reg->CR50 |= 0x00;
981 else if (var->xres_virtual == 1152) 1152 else if (var->xres_virtual == 1152)
982 par->CR50 |= 0x01; 1153 reg->CR50 |= 0x01;
983 else if (var->xres_virtual == 1280) 1154 else if (var->xres_virtual == 1280)
984 par->CR50 |= 0xc0; 1155 reg->CR50 |= 0xc0;
985 else if (var->xres_virtual == 1600) 1156 else if (var->xres_virtual == 1600)
986 par->CR50 |= 0x81; 1157 reg->CR50 |= 0x81;
987 else 1158 else
988 par->CR50 |= 0xc1; /* Use GBD */ 1159 reg->CR50 |= 0xc1; /* Use GBD */
989 1160
990 if( par->chip == S3_SAVAGE2000 ) 1161 if (par->chip == S3_SAVAGE2000)
991 par->CR33 = 0x08; 1162 reg->CR33 = 0x08;
992 else 1163 else
993 par->CR33 = 0x20; 1164 reg->CR33 = 0x20;
994 1165
995 par->CRTC[0x17] = 0xeb; 1166 reg->CRTC[0x17] = 0xeb;
996 1167
997 par->CR67 |= 1; 1168 reg->CR67 |= 1;
998 1169
999 vga_out8(0x3d4, 0x36, par); 1170 vga_out8(0x3d4, 0x36, par);
1000 par->CR36 = vga_in8 (0x3d5, par); 1171 reg->CR36 = vga_in8(0x3d5, par);
1001 vga_out8 (0x3d4, 0x68, par); 1172 vga_out8(0x3d4, 0x68, par);
1002 par->CR68 = vga_in8 (0x3d5, par); 1173 reg->CR68 = vga_in8(0x3d5, par);
1003 par->CR69 = 0; 1174 reg->CR69 = 0;
1004 vga_out8 (0x3d4, 0x6f, par); 1175 vga_out8(0x3d4, 0x6f, par);
1005 par->CR6F = vga_in8 (0x3d5, par); 1176 reg->CR6F = vga_in8(0x3d5, par);
1006 vga_out8 (0x3d4, 0x86, par); 1177 vga_out8(0x3d4, 0x86, par);
1007 par->CR86 = vga_in8 (0x3d5, par); 1178 reg->CR86 = vga_in8(0x3d5, par);
1008 vga_out8 (0x3d4, 0x88, par); 1179 vga_out8(0x3d4, 0x88, par);
1009 par->CR88 = vga_in8 (0x3d5, par) | 0x08; 1180 reg->CR88 = vga_in8(0x3d5, par) | 0x08;
1010 vga_out8 (0x3d4, 0xb0, par); 1181 vga_out8(0x3d4, 0xb0, par);
1011 par->CRB0 = vga_in8 (0x3d5, par) | 0x80; 1182 reg->CRB0 = vga_in8(0x3d5, par) | 0x80;
1012 1183
1013 return 0; 1184 return 0;
1014} 1185}
@@ -1037,11 +1208,11 @@ static int savagefb_setcolreg(unsigned regno,
1037 1208
1038 switch (info->var.bits_per_pixel) { 1209 switch (info->var.bits_per_pixel) {
1039 case 8: 1210 case 8:
1040 vga_out8 (0x3c8, regno, par); 1211 vga_out8(0x3c8, regno, par);
1041 1212
1042 vga_out8 (0x3c9, red >> 10, par); 1213 vga_out8(0x3c9, red >> 10, par);
1043 vga_out8 (0x3c9, green >> 10, par); 1214 vga_out8(0x3c9, green >> 10, par);
1044 vga_out8 (0x3c9, blue >> 10, par); 1215 vga_out8(0x3c9, blue >> 10, par);
1045 break; 1216 break;
1046 1217
1047 case 16: 1218 case 16:
@@ -1075,21 +1246,21 @@ static int savagefb_setcolreg(unsigned regno,
1075 return 0; 1246 return 0;
1076} 1247}
1077 1248
1078static void savagefb_set_par_int (struct savagefb_par *par) 1249static void savagefb_set_par_int(struct savagefb_par *par, struct savage_reg *reg)
1079{ 1250{
1080 unsigned char tmp, cr3a, cr66, cr67; 1251 unsigned char tmp, cr3a, cr66, cr67;
1081 1252
1082 DBG ("savagefb_set_par_int"); 1253 DBG("savagefb_set_par_int");
1083 1254
1084 par->SavageWaitIdle (par); 1255 par->SavageWaitIdle(par);
1085 1256
1086 vga_out8 (0x3c2, 0x23, par); 1257 vga_out8(0x3c2, 0x23, par);
1087 1258
1088 vga_out16 (0x3d4, 0x4838, par); 1259 vga_out16(0x3d4, 0x4838, par);
1089 vga_out16 (0x3d4, 0xa539, par); 1260 vga_out16(0x3d4, 0xa539, par);
1090 vga_out16 (0x3c4, 0x0608, par); 1261 vga_out16(0x3c4, 0x0608, par);
1091 1262
1092 vgaHWProtect (par, 1); 1263 vgaHWProtect(par, 1);
1093 1264
1094 /* 1265 /*
1095 * Some Savage/MX and /IX systems go nuts when trying to exit the 1266 * Some Savage/MX and /IX systems go nuts when trying to exit the
@@ -1099,203 +1270,202 @@ static void savagefb_set_par_int (struct savagefb_par *par)
1099 */ 1270 */
1100 1271
1101 VerticalRetraceWait(par); 1272 VerticalRetraceWait(par);
1102 vga_out8 (0x3d4, 0x67, par); 1273 vga_out8(0x3d4, 0x67, par);
1103 cr67 = vga_in8 (0x3d5, par); 1274 cr67 = vga_in8(0x3d5, par);
1104 vga_out8 (0x3d5, cr67/*par->CR67*/ & ~0x0c, par); /* no STREAMS yet */ 1275 vga_out8(0x3d5, cr67/*par->CR67*/ & ~0x0c, par); /* no STREAMS yet */
1105 1276
1106 vga_out8 (0x3d4, 0x23, par); 1277 vga_out8(0x3d4, 0x23, par);
1107 vga_out8 (0x3d5, 0x00, par); 1278 vga_out8(0x3d5, 0x00, par);
1108 vga_out8 (0x3d4, 0x26, par); 1279 vga_out8(0x3d4, 0x26, par);
1109 vga_out8 (0x3d5, 0x00, par); 1280 vga_out8(0x3d5, 0x00, par);
1110 1281
1111 /* restore extended regs */ 1282 /* restore extended regs */
1112 vga_out8 (0x3d4, 0x66, par); 1283 vga_out8(0x3d4, 0x66, par);
1113 vga_out8 (0x3d5, par->CR66, par); 1284 vga_out8(0x3d5, reg->CR66, par);
1114 vga_out8 (0x3d4, 0x3a, par); 1285 vga_out8(0x3d4, 0x3a, par);
1115 vga_out8 (0x3d5, par->CR3A, par); 1286 vga_out8(0x3d5, reg->CR3A, par);
1116 vga_out8 (0x3d4, 0x31, par); 1287 vga_out8(0x3d4, 0x31, par);
1117 vga_out8 (0x3d5, par->CR31, par); 1288 vga_out8(0x3d5, reg->CR31, par);
1118 vga_out8 (0x3d4, 0x32, par); 1289 vga_out8(0x3d4, 0x32, par);
1119 vga_out8 (0x3d5, par->CR32, par); 1290 vga_out8(0x3d5, reg->CR32, par);
1120 vga_out8 (0x3d4, 0x58, par); 1291 vga_out8(0x3d4, 0x58, par);
1121 vga_out8 (0x3d5, par->CR58, par); 1292 vga_out8(0x3d5, reg->CR58, par);
1122 vga_out8 (0x3d4, 0x53, par); 1293 vga_out8(0x3d4, 0x53, par);
1123 vga_out8 (0x3d5, par->CR53 & 0x7f, par); 1294 vga_out8(0x3d5, reg->CR53 & 0x7f, par);
1124 1295
1125 vga_out16 (0x3c4, 0x0608, par); 1296 vga_out16(0x3c4, 0x0608, par);
1126 1297
1127 /* Restore DCLK registers. */ 1298 /* Restore DCLK registers. */
1128 1299
1129 vga_out8 (0x3c4, 0x0e, par); 1300 vga_out8(0x3c4, 0x0e, par);
1130 vga_out8 (0x3c5, par->SR0E, par); 1301 vga_out8(0x3c5, reg->SR0E, par);
1131 vga_out8 (0x3c4, 0x0f, par); 1302 vga_out8(0x3c4, 0x0f, par);
1132 vga_out8 (0x3c5, par->SR0F, par); 1303 vga_out8(0x3c5, reg->SR0F, par);
1133 vga_out8 (0x3c4, 0x29, par); 1304 vga_out8(0x3c4, 0x29, par);
1134 vga_out8 (0x3c5, par->SR29, par); 1305 vga_out8(0x3c5, reg->SR29, par);
1135 vga_out8 (0x3c4, 0x15, par); 1306 vga_out8(0x3c4, 0x15, par);
1136 vga_out8 (0x3c5, par->SR15, par); 1307 vga_out8(0x3c5, reg->SR15, par);
1137 1308
1138 /* Restore flat panel expansion regsters. */ 1309 /* Restore flat panel expansion regsters. */
1139 if( par->chip == S3_SAVAGE_MX ) { 1310 if (par->chip == S3_SAVAGE_MX) {
1140 int i; 1311 int i;
1141 1312
1142 for( i = 0; i < 8; i++ ) { 1313 for (i = 0; i < 8; i++) {
1143 vga_out8 (0x3c4, 0x54+i, par); 1314 vga_out8(0x3c4, 0x54+i, par);
1144 vga_out8 (0x3c5, par->SR54[i], par); 1315 vga_out8(0x3c5, reg->SR54[i], par);
1145 } 1316 }
1146 } 1317 }
1147 1318
1148 vgaHWRestore (par); 1319 vgaHWRestore (par, reg);
1149 1320
1150 /* extended mode timing registers */ 1321 /* extended mode timing registers */
1151 vga_out8 (0x3d4, 0x53, par); 1322 vga_out8(0x3d4, 0x53, par);
1152 vga_out8 (0x3d5, par->CR53, par); 1323 vga_out8(0x3d5, reg->CR53, par);
1153 vga_out8 (0x3d4, 0x5d, par); 1324 vga_out8(0x3d4, 0x5d, par);
1154 vga_out8 (0x3d5, par->CR5D, par); 1325 vga_out8(0x3d5, reg->CR5D, par);
1155 vga_out8 (0x3d4, 0x5e, par); 1326 vga_out8(0x3d4, 0x5e, par);
1156 vga_out8 (0x3d5, par->CR5E, par); 1327 vga_out8(0x3d5, reg->CR5E, par);
1157 vga_out8 (0x3d4, 0x3b, par); 1328 vga_out8(0x3d4, 0x3b, par);
1158 vga_out8 (0x3d5, par->CR3B, par); 1329 vga_out8(0x3d5, reg->CR3B, par);
1159 vga_out8 (0x3d4, 0x3c, par); 1330 vga_out8(0x3d4, 0x3c, par);
1160 vga_out8 (0x3d5, par->CR3C, par); 1331 vga_out8(0x3d5, reg->CR3C, par);
1161 vga_out8 (0x3d4, 0x43, par); 1332 vga_out8(0x3d4, 0x43, par);
1162 vga_out8 (0x3d5, par->CR43, par); 1333 vga_out8(0x3d5, reg->CR43, par);
1163 vga_out8 (0x3d4, 0x65, par); 1334 vga_out8(0x3d4, 0x65, par);
1164 vga_out8 (0x3d5, par->CR65, par); 1335 vga_out8(0x3d5, reg->CR65, par);
1165 1336
1166 /* restore the desired video mode with cr67 */ 1337 /* restore the desired video mode with cr67 */
1167 vga_out8 (0x3d4, 0x67, par); 1338 vga_out8(0x3d4, 0x67, par);
1168 /* following part not present in X11 driver */ 1339 /* following part not present in X11 driver */
1169 cr67 = vga_in8 (0x3d5, par) & 0xf; 1340 cr67 = vga_in8(0x3d5, par) & 0xf;
1170 vga_out8 (0x3d5, 0x50 | cr67, par); 1341 vga_out8(0x3d5, 0x50 | cr67, par);
1171 udelay (10000); 1342 udelay(10000);
1172 vga_out8 (0x3d4, 0x67, par); 1343 vga_out8(0x3d4, 0x67, par);
1173 /* end of part */ 1344 /* end of part */
1174 vga_out8 (0x3d5, par->CR67 & ~0x0c, par); 1345 vga_out8(0x3d5, reg->CR67 & ~0x0c, par);
1175 1346
1176 /* other mode timing and extended regs */ 1347 /* other mode timing and extended regs */
1177 vga_out8 (0x3d4, 0x34, par); 1348 vga_out8(0x3d4, 0x34, par);
1178 vga_out8 (0x3d5, par->CR34, par); 1349 vga_out8(0x3d5, reg->CR34, par);
1179 vga_out8 (0x3d4, 0x40, par); 1350 vga_out8(0x3d4, 0x40, par);
1180 vga_out8 (0x3d5, par->CR40, par); 1351 vga_out8(0x3d5, reg->CR40, par);
1181 vga_out8 (0x3d4, 0x42, par); 1352 vga_out8(0x3d4, 0x42, par);
1182 vga_out8 (0x3d5, par->CR42, par); 1353 vga_out8(0x3d5, reg->CR42, par);
1183 vga_out8 (0x3d4, 0x45, par); 1354 vga_out8(0x3d4, 0x45, par);
1184 vga_out8 (0x3d5, par->CR45, par); 1355 vga_out8(0x3d5, reg->CR45, par);
1185 vga_out8 (0x3d4, 0x50, par); 1356 vga_out8(0x3d4, 0x50, par);
1186 vga_out8 (0x3d5, par->CR50, par); 1357 vga_out8(0x3d5, reg->CR50, par);
1187 vga_out8 (0x3d4, 0x51, par); 1358 vga_out8(0x3d4, 0x51, par);
1188 vga_out8 (0x3d5, par->CR51, par); 1359 vga_out8(0x3d5, reg->CR51, par);
1189 1360
1190 /* memory timings */ 1361 /* memory timings */
1191 vga_out8 (0x3d4, 0x36, par); 1362 vga_out8(0x3d4, 0x36, par);
1192 vga_out8 (0x3d5, par->CR36, par); 1363 vga_out8(0x3d5, reg->CR36, par);
1193 vga_out8 (0x3d4, 0x60, par); 1364 vga_out8(0x3d4, 0x60, par);
1194 vga_out8 (0x3d5, par->CR60, par); 1365 vga_out8(0x3d5, reg->CR60, par);
1195 vga_out8 (0x3d4, 0x68, par); 1366 vga_out8(0x3d4, 0x68, par);
1196 vga_out8 (0x3d5, par->CR68, par); 1367 vga_out8(0x3d5, reg->CR68, par);
1197 vga_out8 (0x3d4, 0x69, par); 1368 vga_out8(0x3d4, 0x69, par);
1198 vga_out8 (0x3d5, par->CR69, par); 1369 vga_out8(0x3d5, reg->CR69, par);
1199 vga_out8 (0x3d4, 0x6f, par); 1370 vga_out8(0x3d4, 0x6f, par);
1200 vga_out8 (0x3d5, par->CR6F, par); 1371 vga_out8(0x3d5, reg->CR6F, par);
1201 1372
1202 vga_out8 (0x3d4, 0x33, par); 1373 vga_out8(0x3d4, 0x33, par);
1203 vga_out8 (0x3d5, par->CR33, par); 1374 vga_out8(0x3d5, reg->CR33, par);
1204 vga_out8 (0x3d4, 0x86, par); 1375 vga_out8(0x3d4, 0x86, par);
1205 vga_out8 (0x3d5, par->CR86, par); 1376 vga_out8(0x3d5, reg->CR86, par);
1206 vga_out8 (0x3d4, 0x88, par); 1377 vga_out8(0x3d4, 0x88, par);
1207 vga_out8 (0x3d5, par->CR88, par); 1378 vga_out8(0x3d5, reg->CR88, par);
1208 vga_out8 (0x3d4, 0x90, par); 1379 vga_out8(0x3d4, 0x90, par);
1209 vga_out8 (0x3d5, par->CR90, par); 1380 vga_out8(0x3d5, reg->CR90, par);
1210 vga_out8 (0x3d4, 0x91, par); 1381 vga_out8(0x3d4, 0x91, par);
1211 vga_out8 (0x3d5, par->CR91, par); 1382 vga_out8(0x3d5, reg->CR91, par);
1212 1383
1213 if (par->chip == S3_SAVAGE4) { 1384 if (par->chip == S3_SAVAGE4) {
1214 vga_out8 (0x3d4, 0xb0, par); 1385 vga_out8(0x3d4, 0xb0, par);
1215 vga_out8 (0x3d5, par->CRB0, par); 1386 vga_out8(0x3d5, reg->CRB0, par);
1216 } 1387 }
1217 1388
1218 vga_out8 (0x3d4, 0x32, par); 1389 vga_out8(0x3d4, 0x32, par);
1219 vga_out8 (0x3d5, par->CR32, par); 1390 vga_out8(0x3d5, reg->CR32, par);
1220 1391
1221 /* unlock extended seq regs */ 1392 /* unlock extended seq regs */
1222 vga_out8 (0x3c4, 0x08, par); 1393 vga_out8(0x3c4, 0x08, par);
1223 vga_out8 (0x3c5, 0x06, par); 1394 vga_out8(0x3c5, 0x06, par);
1224 1395
1225 /* Restore extended sequencer regs for MCLK. SR10 == 255 indicates 1396 /* Restore extended sequencer regs for MCLK. SR10 == 255 indicates
1226 * that we should leave the default SR10 and SR11 values there. 1397 * that we should leave the default SR10 and SR11 values there.
1227 */ 1398 */
1228 if (par->SR10 != 255) { 1399 if (reg->SR10 != 255) {
1229 vga_out8 (0x3c4, 0x10, par); 1400 vga_out8(0x3c4, 0x10, par);
1230 vga_out8 (0x3c5, par->SR10, par); 1401 vga_out8(0x3c5, reg->SR10, par);
1231 vga_out8 (0x3c4, 0x11, par); 1402 vga_out8(0x3c4, 0x11, par);
1232 vga_out8 (0x3c5, par->SR11, par); 1403 vga_out8(0x3c5, reg->SR11, par);
1233 } 1404 }
1234 1405
1235 /* restore extended seq regs for dclk */ 1406 /* restore extended seq regs for dclk */
1236 vga_out8 (0x3c4, 0x0e, par); 1407 vga_out8(0x3c4, 0x0e, par);
1237 vga_out8 (0x3c5, par->SR0E, par); 1408 vga_out8(0x3c5, reg->SR0E, par);
1238 vga_out8 (0x3c4, 0x0f, par); 1409 vga_out8(0x3c4, 0x0f, par);
1239 vga_out8 (0x3c5, par->SR0F, par); 1410 vga_out8(0x3c5, reg->SR0F, par);
1240 vga_out8 (0x3c4, 0x12, par); 1411 vga_out8(0x3c4, 0x12, par);
1241 vga_out8 (0x3c5, par->SR12, par); 1412 vga_out8(0x3c5, reg->SR12, par);
1242 vga_out8 (0x3c4, 0x13, par); 1413 vga_out8(0x3c4, 0x13, par);
1243 vga_out8 (0x3c5, par->SR13, par); 1414 vga_out8(0x3c5, reg->SR13, par);
1244 vga_out8 (0x3c4, 0x29, par); 1415 vga_out8(0x3c4, 0x29, par);
1245 vga_out8 (0x3c5, par->SR29, par); 1416 vga_out8(0x3c5, reg->SR29, par);
1246 1417 vga_out8(0x3c4, 0x18, par);
1247 vga_out8 (0x3c4, 0x18, par); 1418 vga_out8(0x3c5, reg->SR18, par);
1248 vga_out8 (0x3c5, par->SR18, par);
1249 1419
1250 /* load new m, n pll values for dclk & mclk */ 1420 /* load new m, n pll values for dclk & mclk */
1251 vga_out8 (0x3c4, 0x15, par); 1421 vga_out8(0x3c4, 0x15, par);
1252 tmp = vga_in8 (0x3c5, par) & ~0x21; 1422 tmp = vga_in8(0x3c5, par) & ~0x21;
1253 1423
1254 vga_out8 (0x3c5, tmp | 0x03, par); 1424 vga_out8(0x3c5, tmp | 0x03, par);
1255 vga_out8 (0x3c5, tmp | 0x23, par); 1425 vga_out8(0x3c5, tmp | 0x23, par);
1256 vga_out8 (0x3c5, tmp | 0x03, par); 1426 vga_out8(0x3c5, tmp | 0x03, par);
1257 vga_out8 (0x3c5, par->SR15, par); 1427 vga_out8(0x3c5, reg->SR15, par);
1258 udelay (100); 1428 udelay(100);
1259 1429
1260 vga_out8 (0x3c4, 0x30, par); 1430 vga_out8(0x3c4, 0x30, par);
1261 vga_out8 (0x3c5, par->SR30, par); 1431 vga_out8(0x3c5, reg->SR30, par);
1262 vga_out8 (0x3c4, 0x08, par); 1432 vga_out8(0x3c4, 0x08, par);
1263 vga_out8 (0x3c5, par->SR08, par); 1433 vga_out8(0x3c5, reg->SR08, par);
1264 1434
1265 /* now write out cr67 in full, possibly starting STREAMS */ 1435 /* now write out cr67 in full, possibly starting STREAMS */
1266 VerticalRetraceWait(par); 1436 VerticalRetraceWait(par);
1267 vga_out8 (0x3d4, 0x67, par); 1437 vga_out8(0x3d4, 0x67, par);
1268 vga_out8 (0x3d5, par->CR67, par); 1438 vga_out8(0x3d5, reg->CR67, par);
1269 1439
1270 vga_out8 (0x3d4, 0x66, par); 1440 vga_out8(0x3d4, 0x66, par);
1271 cr66 = vga_in8 (0x3d5, par); 1441 cr66 = vga_in8(0x3d5, par);
1272 vga_out8 (0x3d5, cr66 | 0x80, par); 1442 vga_out8(0x3d5, cr66 | 0x80, par);
1273 vga_out8 (0x3d4, 0x3a, par); 1443 vga_out8(0x3d4, 0x3a, par);
1274 cr3a = vga_in8 (0x3d5, par); 1444 cr3a = vga_in8(0x3d5, par);
1275 vga_out8 (0x3d5, cr3a | 0x80, par); 1445 vga_out8(0x3d5, cr3a | 0x80, par);
1276 1446
1277 if (par->chip != S3_SAVAGE_MX) { 1447 if (par->chip != S3_SAVAGE_MX) {
1278 VerticalRetraceWait(par); 1448 VerticalRetraceWait(par);
1279 savage_out32 (FIFO_CONTROL_REG, par->MMPR0, par); 1449 savage_out32(FIFO_CONTROL_REG, reg->MMPR0, par);
1280 par->SavageWaitIdle (par); 1450 par->SavageWaitIdle(par);
1281 savage_out32 (MIU_CONTROL_REG, par->MMPR1, par); 1451 savage_out32(MIU_CONTROL_REG, reg->MMPR1, par);
1282 par->SavageWaitIdle (par); 1452 par->SavageWaitIdle(par);
1283 savage_out32 (STREAMS_TIMEOUT_REG, par->MMPR2, par); 1453 savage_out32(STREAMS_TIMEOUT_REG, reg->MMPR2, par);
1284 par->SavageWaitIdle (par); 1454 par->SavageWaitIdle(par);
1285 savage_out32 (MISC_TIMEOUT_REG, par->MMPR3, par); 1455 savage_out32(MISC_TIMEOUT_REG, reg->MMPR3, par);
1286 } 1456 }
1287 1457
1288 vga_out8 (0x3d4, 0x66, par); 1458 vga_out8(0x3d4, 0x66, par);
1289 vga_out8 (0x3d5, cr66, par); 1459 vga_out8(0x3d5, cr66, par);
1290 vga_out8 (0x3d4, 0x3a, par); 1460 vga_out8(0x3d4, 0x3a, par);
1291 vga_out8 (0x3d5, cr3a, par); 1461 vga_out8(0x3d5, cr3a, par);
1292 1462
1293 SavageSetup2DEngine (par); 1463 SavageSetup2DEngine(par);
1294 vgaHWProtect (par, 0); 1464 vgaHWProtect(par, 0);
1295} 1465}
1296 1466
1297static void savagefb_update_start (struct savagefb_par *par, 1467static void savagefb_update_start(struct savagefb_par *par,
1298 struct fb_var_screeninfo *var) 1468 struct fb_var_screeninfo *var)
1299{ 1469{
1300 int base; 1470 int base;
1301 1471
@@ -1305,8 +1475,8 @@ static void savagefb_update_start (struct savagefb_par *par,
1305 /* now program the start address registers */ 1475 /* now program the start address registers */
1306 vga_out16(0x3d4, (base & 0x00ff00) | 0x0c, par); 1476 vga_out16(0x3d4, (base & 0x00ff00) | 0x0c, par);
1307 vga_out16(0x3d4, ((base & 0x00ff) << 8) | 0x0d, par); 1477 vga_out16(0x3d4, ((base & 0x00ff) << 8) | 0x0d, par);
1308 vga_out8 (0x3d4, 0x69, par); 1478 vga_out8(0x3d4, 0x69, par);
1309 vga_out8 (0x3d5, (base & 0x7f0000) >> 16, par); 1479 vga_out8(0x3d5, (base & 0x7f0000) >> 16, par);
1310} 1480}
1311 1481
1312 1482
@@ -1325,29 +1495,14 @@ static void savagefb_set_fix(struct fb_info *info)
1325 1495
1326} 1496}
1327 1497
1328#if defined(CONFIG_FB_SAVAGE_ACCEL) 1498static int savagefb_set_par(struct fb_info *info)
1329static void savagefb_set_clip(struct fb_info *info)
1330{
1331 struct savagefb_par *par = info->par;
1332 int cmd;
1333
1334 cmd = BCI_CMD_NOP | BCI_CMD_CLIP_NEW;
1335 par->bci_ptr = 0;
1336 par->SavageWaitFifo(par,3);
1337 BCI_SEND(cmd);
1338 BCI_SEND(BCI_CLIP_TL(0, 0));
1339 BCI_SEND(BCI_CLIP_BR(0xfff, 0xfff));
1340}
1341#endif
1342
1343static int savagefb_set_par (struct fb_info *info)
1344{ 1499{
1345 struct savagefb_par *par = info->par; 1500 struct savagefb_par *par = info->par;
1346 struct fb_var_screeninfo *var = &info->var; 1501 struct fb_var_screeninfo *var = &info->var;
1347 int err; 1502 int err;
1348 1503
1349 DBG("savagefb_set_par"); 1504 DBG("savagefb_set_par");
1350 err = savagefb_decode_var (var, par); 1505 err = savagefb_decode_var(var, par, &par->state);
1351 if (err) 1506 if (err)
1352 return err; 1507 return err;
1353 1508
@@ -1366,8 +1521,8 @@ static int savagefb_set_par (struct fb_info *info)
1366 par->maxClock = par->dacSpeedBpp; 1521 par->maxClock = par->dacSpeedBpp;
1367 par->minClock = 10000; 1522 par->minClock = 10000;
1368 1523
1369 savagefb_set_par_int (par); 1524 savagefb_set_par_int(par, &par->state);
1370 fb_set_cmap (&info->cmap, info); 1525 fb_set_cmap(&info->cmap, info);
1371 savagefb_set_fix(info); 1526 savagefb_set_fix(info);
1372 savagefb_set_clip(info); 1527 savagefb_set_clip(info);
1373 1528
@@ -1378,12 +1533,12 @@ static int savagefb_set_par (struct fb_info *info)
1378/* 1533/*
1379 * Pan or Wrap the Display 1534 * Pan or Wrap the Display
1380 */ 1535 */
1381static int savagefb_pan_display (struct fb_var_screeninfo *var, 1536static int savagefb_pan_display(struct fb_var_screeninfo *var,
1382 struct fb_info *info) 1537 struct fb_info *info)
1383{ 1538{
1384 struct savagefb_par *par = info->par; 1539 struct savagefb_par *par = info->par;
1385 1540
1386 savagefb_update_start (par, var); 1541 savagefb_update_start(par, var);
1387 return 0; 1542 return 0;
1388} 1543}
1389 1544
@@ -1440,6 +1595,22 @@ static int savagefb_blank(int blank, struct fb_info *info)
1440 return (blank == FB_BLANK_NORMAL) ? 1 : 0; 1595 return (blank == FB_BLANK_NORMAL) ? 1 : 0;
1441} 1596}
1442 1597
1598static void savagefb_save_state(struct fb_info *info)
1599{
1600 struct savagefb_par *par = info->par;
1601
1602 savage_get_default_par(par, &par->save);
1603}
1604
1605static void savagefb_restore_state(struct fb_info *info)
1606{
1607 struct savagefb_par *par = info->par;
1608
1609 savagefb_blank(FB_BLANK_POWERDOWN, info);
1610 savage_set_default_par(par, &par->save);
1611 savagefb_blank(FB_BLANK_UNBLANK, info);
1612}
1613
1443static struct fb_ops savagefb_ops = { 1614static struct fb_ops savagefb_ops = {
1444 .owner = THIS_MODULE, 1615 .owner = THIS_MODULE,
1445 .fb_check_var = savagefb_check_var, 1616 .fb_check_var = savagefb_check_var,
@@ -1447,6 +1618,8 @@ static struct fb_ops savagefb_ops = {
1447 .fb_setcolreg = savagefb_setcolreg, 1618 .fb_setcolreg = savagefb_setcolreg,
1448 .fb_pan_display = savagefb_pan_display, 1619 .fb_pan_display = savagefb_pan_display,
1449 .fb_blank = savagefb_blank, 1620 .fb_blank = savagefb_blank,
1621 .fb_save_state = savagefb_save_state,
1622 .fb_restore_state = savagefb_restore_state,
1450#if defined(CONFIG_FB_SAVAGE_ACCEL) 1623#if defined(CONFIG_FB_SAVAGE_ACCEL)
1451 .fb_fillrect = savagefb_fillrect, 1624 .fb_fillrect = savagefb_fillrect,
1452 .fb_copyarea = savagefb_copyarea, 1625 .fb_copyarea = savagefb_copyarea,
@@ -1479,59 +1652,59 @@ static struct fb_var_screeninfo __devinitdata savagefb_var800x600x8 = {
1479 .vmode = FB_VMODE_NONINTERLACED 1652 .vmode = FB_VMODE_NONINTERLACED
1480}; 1653};
1481 1654
1482static void savage_enable_mmio (struct savagefb_par *par) 1655static void savage_enable_mmio(struct savagefb_par *par)
1483{ 1656{
1484 unsigned char val; 1657 unsigned char val;
1485 1658
1486 DBG ("savage_enable_mmio\n"); 1659 DBG("savage_enable_mmio\n");
1487 1660
1488 val = vga_in8 (0x3c3, par); 1661 val = vga_in8(0x3c3, par);
1489 vga_out8 (0x3c3, val | 0x01, par); 1662 vga_out8(0x3c3, val | 0x01, par);
1490 val = vga_in8 (0x3cc, par); 1663 val = vga_in8(0x3cc, par);
1491 vga_out8 (0x3c2, val | 0x01, par); 1664 vga_out8(0x3c2, val | 0x01, par);
1492 1665
1493 if (par->chip >= S3_SAVAGE4) { 1666 if (par->chip >= S3_SAVAGE4) {
1494 vga_out8 (0x3d4, 0x40, par); 1667 vga_out8(0x3d4, 0x40, par);
1495 val = vga_in8 (0x3d5, par); 1668 val = vga_in8(0x3d5, par);
1496 vga_out8 (0x3d5, val | 1, par); 1669 vga_out8(0x3d5, val | 1, par);
1497 } 1670 }
1498} 1671}
1499 1672
1500 1673
1501static void savage_disable_mmio (struct savagefb_par *par) 1674static void savage_disable_mmio(struct savagefb_par *par)
1502{ 1675{
1503 unsigned char val; 1676 unsigned char val;
1504 1677
1505 DBG ("savage_disable_mmio\n"); 1678 DBG("savage_disable_mmio\n");
1506 1679
1507 if(par->chip >= S3_SAVAGE4 ) { 1680 if (par->chip >= S3_SAVAGE4) {
1508 vga_out8 (0x3d4, 0x40, par); 1681 vga_out8(0x3d4, 0x40, par);
1509 val = vga_in8 (0x3d5, par); 1682 val = vga_in8(0x3d5, par);
1510 vga_out8 (0x3d5, val | 1, par); 1683 vga_out8(0x3d5, val | 1, par);
1511 } 1684 }
1512} 1685}
1513 1686
1514 1687
1515static int __devinit savage_map_mmio (struct fb_info *info) 1688static int __devinit savage_map_mmio(struct fb_info *info)
1516{ 1689{
1517 struct savagefb_par *par = info->par; 1690 struct savagefb_par *par = info->par;
1518 DBG ("savage_map_mmio"); 1691 DBG("savage_map_mmio");
1519 1692
1520 if (S3_SAVAGE3D_SERIES (par->chip)) 1693 if (S3_SAVAGE3D_SERIES(par->chip))
1521 par->mmio.pbase = pci_resource_start (par->pcidev, 0) + 1694 par->mmio.pbase = pci_resource_start(par->pcidev, 0) +
1522 SAVAGE_NEWMMIO_REGBASE_S3; 1695 SAVAGE_NEWMMIO_REGBASE_S3;
1523 else 1696 else
1524 par->mmio.pbase = pci_resource_start (par->pcidev, 0) + 1697 par->mmio.pbase = pci_resource_start(par->pcidev, 0) +
1525 SAVAGE_NEWMMIO_REGBASE_S4; 1698 SAVAGE_NEWMMIO_REGBASE_S4;
1526 1699
1527 par->mmio.len = SAVAGE_NEWMMIO_REGSIZE; 1700 par->mmio.len = SAVAGE_NEWMMIO_REGSIZE;
1528 1701
1529 par->mmio.vbase = ioremap (par->mmio.pbase, par->mmio.len); 1702 par->mmio.vbase = ioremap(par->mmio.pbase, par->mmio.len);
1530 if (!par->mmio.vbase) { 1703 if (!par->mmio.vbase) {
1531 printk ("savagefb: unable to map memory mapped IO\n"); 1704 printk("savagefb: unable to map memory mapped IO\n");
1532 return -ENOMEM; 1705 return -ENOMEM;
1533 } else 1706 } else
1534 printk (KERN_INFO "savagefb: mapped io at %p\n", 1707 printk(KERN_INFO "savagefb: mapped io at %p\n",
1535 par->mmio.vbase); 1708 par->mmio.vbase);
1536 1709
1537 info->fix.mmio_start = par->mmio.pbase; 1710 info->fix.mmio_start = par->mmio.pbase;
@@ -1540,15 +1713,15 @@ static int __devinit savage_map_mmio (struct fb_info *info)
1540 par->bci_base = (u32 __iomem *)(par->mmio.vbase + BCI_BUFFER_OFFSET); 1713 par->bci_base = (u32 __iomem *)(par->mmio.vbase + BCI_BUFFER_OFFSET);
1541 par->bci_ptr = 0; 1714 par->bci_ptr = 0;
1542 1715
1543 savage_enable_mmio (par); 1716 savage_enable_mmio(par);
1544 1717
1545 return 0; 1718 return 0;
1546} 1719}
1547 1720
1548static void savage_unmap_mmio (struct fb_info *info) 1721static void savage_unmap_mmio(struct fb_info *info)
1549{ 1722{
1550 struct savagefb_par *par = info->par; 1723 struct savagefb_par *par = info->par;
1551 DBG ("savage_unmap_mmio"); 1724 DBG("savage_unmap_mmio");
1552 1725
1553 savage_disable_mmio(par); 1726 savage_disable_mmio(par);
1554 1727
@@ -1558,46 +1731,46 @@ static void savage_unmap_mmio (struct fb_info *info)
1558 } 1731 }
1559} 1732}
1560 1733
1561static int __devinit savage_map_video (struct fb_info *info, 1734static int __devinit savage_map_video(struct fb_info *info,
1562 int video_len) 1735 int video_len)
1563{ 1736{
1564 struct savagefb_par *par = info->par; 1737 struct savagefb_par *par = info->par;
1565 int resource; 1738 int resource;
1566 1739
1567 DBG("savage_map_video"); 1740 DBG("savage_map_video");
1568 1741
1569 if (S3_SAVAGE3D_SERIES (par->chip)) 1742 if (S3_SAVAGE3D_SERIES(par->chip))
1570 resource = 0; 1743 resource = 0;
1571 else 1744 else
1572 resource = 1; 1745 resource = 1;
1573 1746
1574 par->video.pbase = pci_resource_start (par->pcidev, resource); 1747 par->video.pbase = pci_resource_start(par->pcidev, resource);
1575 par->video.len = video_len; 1748 par->video.len = video_len;
1576 par->video.vbase = ioremap (par->video.pbase, par->video.len); 1749 par->video.vbase = ioremap(par->video.pbase, par->video.len);
1577 1750
1578 if (!par->video.vbase) { 1751 if (!par->video.vbase) {
1579 printk ("savagefb: unable to map screen memory\n"); 1752 printk("savagefb: unable to map screen memory\n");
1580 return -ENOMEM; 1753 return -ENOMEM;
1581 } else 1754 } else
1582 printk (KERN_INFO "savagefb: mapped framebuffer at %p, " 1755 printk(KERN_INFO "savagefb: mapped framebuffer at %p, "
1583 "pbase == %x\n", par->video.vbase, par->video.pbase); 1756 "pbase == %x\n", par->video.vbase, par->video.pbase);
1584 1757
1585 info->fix.smem_start = par->video.pbase; 1758 info->fix.smem_start = par->video.pbase;
1586 info->fix.smem_len = par->video.len - par->cob_size; 1759 info->fix.smem_len = par->video.len - par->cob_size;
1587 info->screen_base = par->video.vbase; 1760 info->screen_base = par->video.vbase;
1588 1761
1589#ifdef CONFIG_MTRR 1762#ifdef CONFIG_MTRR
1590 par->video.mtrr = mtrr_add (par->video.pbase, video_len, 1763 par->video.mtrr = mtrr_add(par->video.pbase, video_len,
1591 MTRR_TYPE_WRCOMB, 1); 1764 MTRR_TYPE_WRCOMB, 1);
1592#endif 1765#endif
1593 1766
1594 /* Clear framebuffer, it's all white in memory after boot */ 1767 /* Clear framebuffer, it's all white in memory after boot */
1595 memset_io (par->video.vbase, 0, par->video.len); 1768 memset_io(par->video.vbase, 0, par->video.len);
1596 1769
1597 return 0; 1770 return 0;
1598} 1771}
1599 1772
1600static void savage_unmap_video (struct fb_info *info) 1773static void savage_unmap_video(struct fb_info *info)
1601{ 1774{
1602 struct savagefb_par *par = info->par; 1775 struct savagefb_par *par = info->par;
1603 1776
@@ -1605,16 +1778,16 @@ static void savage_unmap_video (struct fb_info *info)
1605 1778
1606 if (par->video.vbase) { 1779 if (par->video.vbase) {
1607#ifdef CONFIG_MTRR 1780#ifdef CONFIG_MTRR
1608 mtrr_del (par->video.mtrr, par->video.pbase, par->video.len); 1781 mtrr_del(par->video.mtrr, par->video.pbase, par->video.len);
1609#endif 1782#endif
1610 1783
1611 iounmap (par->video.vbase); 1784 iounmap(par->video.vbase);
1612 par->video.vbase = NULL; 1785 par->video.vbase = NULL;
1613 info->screen_base = NULL; 1786 info->screen_base = NULL;
1614 } 1787 }
1615} 1788}
1616 1789
1617static int savage_init_hw (struct savagefb_par *par) 1790static int savage_init_hw(struct savagefb_par *par)
1618{ 1791{
1619 unsigned char config1, m, n, n1, n2, sr8, cr3f, cr66 = 0, tmp; 1792 unsigned char config1, m, n, n1, n2, sr8, cr3f, cr66 = 0, tmp;
1620 1793
@@ -1656,7 +1829,7 @@ static int savage_init_hw (struct savagefb_par *par)
1656 1829
1657 switch (par->chip) { 1830 switch (par->chip) {
1658 case S3_SAVAGE3D: 1831 case S3_SAVAGE3D:
1659 videoRam = RamSavage3D[ (config1 & 0xC0) >> 6 ] * 1024; 1832 videoRam = RamSavage3D[(config1 & 0xC0) >> 6 ] * 1024;
1660 break; 1833 break;
1661 1834
1662 case S3_SAVAGE4: 1835 case S3_SAVAGE4:
@@ -1667,22 +1840,22 @@ static int savage_init_hw (struct savagefb_par *par)
1667 * can do it different... 1840 * can do it different...
1668 */ 1841 */
1669 vga_out8(0x3d4, 0x68, par); /* memory control 1 */ 1842 vga_out8(0x3d4, 0x68, par); /* memory control 1 */
1670 if( (vga_in8(0x3d5, par) & 0xC0) == (0x01 << 6) ) 1843 if ((vga_in8(0x3d5, par) & 0xC0) == (0x01 << 6))
1671 RamSavage4[1] = 8; 1844 RamSavage4[1] = 8;
1672 1845
1673 /*FALLTHROUGH*/ 1846 /*FALLTHROUGH*/
1674 1847
1675 case S3_SAVAGE2000: 1848 case S3_SAVAGE2000:
1676 videoRam = RamSavage4[ (config1 & 0xE0) >> 5 ] * 1024; 1849 videoRam = RamSavage4[(config1 & 0xE0) >> 5] * 1024;
1677 break; 1850 break;
1678 1851
1679 case S3_SAVAGE_MX: 1852 case S3_SAVAGE_MX:
1680 case S3_SUPERSAVAGE: 1853 case S3_SUPERSAVAGE:
1681 videoRam = RamSavageMX[ (config1 & 0x0E) >> 1 ] * 1024; 1854 videoRam = RamSavageMX[(config1 & 0x0E) >> 1] * 1024;
1682 break; 1855 break;
1683 1856
1684 case S3_PROSAVAGE: 1857 case S3_PROSAVAGE:
1685 videoRam = RamSavageNB[ (config1 & 0xE0) >> 5 ] * 1024; 1858 videoRam = RamSavageNB[(config1 & 0xE0) >> 5] * 1024;
1686 break; 1859 break;
1687 1860
1688 default: 1861 default:
@@ -1693,31 +1866,31 @@ static int savage_init_hw (struct savagefb_par *par)
1693 1866
1694 videoRambytes = videoRam * 1024; 1867 videoRambytes = videoRam * 1024;
1695 1868
1696 printk (KERN_INFO "savagefb: probed videoram: %dk\n", videoRam); 1869 printk(KERN_INFO "savagefb: probed videoram: %dk\n", videoRam);
1697 1870
1698 /* reset graphics engine to avoid memory corruption */ 1871 /* reset graphics engine to avoid memory corruption */
1699 vga_out8 (0x3d4, 0x66, par); 1872 vga_out8(0x3d4, 0x66, par);
1700 cr66 = vga_in8 (0x3d5, par); 1873 cr66 = vga_in8(0x3d5, par);
1701 vga_out8 (0x3d5, cr66 | 0x02, par); 1874 vga_out8(0x3d5, cr66 | 0x02, par);
1702 udelay (10000); 1875 udelay(10000);
1703 1876
1704 vga_out8 (0x3d4, 0x66, par); 1877 vga_out8(0x3d4, 0x66, par);
1705 vga_out8 (0x3d5, cr66 & ~0x02, par); /* clear reset flag */ 1878 vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */
1706 udelay (10000); 1879 udelay(10000);
1707 1880
1708 1881
1709 /* 1882 /*
1710 * reset memory interface, 3D engine, AGP master, PCI master, 1883 * reset memory interface, 3D engine, AGP master, PCI master,
1711 * master engine unit, motion compensation/LPB 1884 * master engine unit, motion compensation/LPB
1712 */ 1885 */
1713 vga_out8 (0x3d4, 0x3f, par); 1886 vga_out8(0x3d4, 0x3f, par);
1714 cr3f = vga_in8 (0x3d5, par); 1887 cr3f = vga_in8(0x3d5, par);
1715 vga_out8 (0x3d5, cr3f | 0x08, par); 1888 vga_out8(0x3d5, cr3f | 0x08, par);
1716 udelay (10000); 1889 udelay(10000);
1717 1890
1718 vga_out8 (0x3d4, 0x3f, par); 1891 vga_out8(0x3d4, 0x3f, par);
1719 vga_out8 (0x3d5, cr3f & ~0x08, par); /* clear reset flags */ 1892 vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */
1720 udelay (10000); 1893 udelay(10000);
1721 1894
1722 /* Savage ramdac speeds */ 1895 /* Savage ramdac speeds */
1723 par->numClocks = 4; 1896 par->numClocks = 4;
@@ -1740,7 +1913,7 @@ static int savage_init_hw (struct savagefb_par *par)
1740 n1 = n & 0x1f; 1913 n1 = n & 0x1f;
1741 n2 = (n >> 5) & 0x03; 1914 n2 = (n >> 5) & 0x03;
1742 par->MCLK = ((1431818 * (m+2)) / (n1+2) / (1 << n2) + 50) / 100; 1915 par->MCLK = ((1431818 * (m+2)) / (n1+2) / (1 << n2) + 50) / 100;
1743 printk (KERN_INFO "savagefb: Detected current MCLK value of %d kHz\n", 1916 printk(KERN_INFO "savagefb: Detected current MCLK value of %d kHz\n",
1744 par->MCLK); 1917 par->MCLK);
1745 1918
1746 /* check for DVI/flat panel */ 1919 /* check for DVI/flat panel */
@@ -1769,12 +1942,12 @@ static int savage_init_hw (struct savagefb_par *par)
1769 /* Check LCD panel parrmation */ 1942 /* Check LCD panel parrmation */
1770 1943
1771 if (par->display_type == DISP_LCD) { 1944 if (par->display_type == DISP_LCD) {
1772 unsigned char cr6b = VGArCR( 0x6b, par); 1945 unsigned char cr6b = VGArCR(0x6b, par);
1773 1946
1774 int panelX = (VGArSEQ (0x61, par) + 1947 int panelX = (VGArSEQ(0x61, par) +
1775 ((VGArSEQ (0x66, par) & 0x02) << 7) + 1) * 8; 1948 ((VGArSEQ(0x66, par) & 0x02) << 7) + 1) * 8;
1776 int panelY = (VGArSEQ (0x69, par) + 1949 int panelY = (VGArSEQ(0x69, par) +
1777 ((VGArSEQ (0x6e, par) & 0x70) << 4) + 1); 1950 ((VGArSEQ(0x6e, par) & 0x70) << 4) + 1);
1778 1951
1779 char * sTechnology = "Unknown"; 1952 char * sTechnology = "Unknown";
1780 1953
@@ -1796,26 +1969,26 @@ static int savage_init_hw (struct savagefb_par *par)
1796 ActiveDUO = 0x80 1969 ActiveDUO = 0x80
1797 }; 1970 };
1798 1971
1799 if ((VGArSEQ (0x39, par) & 0x03) == 0) { 1972 if ((VGArSEQ(0x39, par) & 0x03) == 0) {
1800 sTechnology = "TFT"; 1973 sTechnology = "TFT";
1801 } else if ((VGArSEQ (0x30, par) & 0x01) == 0) { 1974 } else if ((VGArSEQ(0x30, par) & 0x01) == 0) {
1802 sTechnology = "DSTN"; 1975 sTechnology = "DSTN";
1803 } else { 1976 } else {
1804 sTechnology = "STN"; 1977 sTechnology = "STN";
1805 } 1978 }
1806 1979
1807 printk (KERN_INFO "savagefb: %dx%d %s LCD panel detected %s\n", 1980 printk(KERN_INFO "savagefb: %dx%d %s LCD panel detected %s\n",
1808 panelX, panelY, sTechnology, 1981 panelX, panelY, sTechnology,
1809 cr6b & ActiveLCD ? "and active" : "but not active"); 1982 cr6b & ActiveLCD ? "and active" : "but not active");
1810 1983
1811 if( cr6b & ActiveLCD ) { 1984 if (cr6b & ActiveLCD) {
1812 /* 1985 /*
1813 * If the LCD is active and panel expansion is enabled, 1986 * If the LCD is active and panel expansion is enabled,
1814 * we probably want to kill the HW cursor. 1987 * we probably want to kill the HW cursor.
1815 */ 1988 */
1816 1989
1817 printk (KERN_INFO "savagefb: Limiting video mode to " 1990 printk(KERN_INFO "savagefb: Limiting video mode to "
1818 "%dx%d\n", panelX, panelY ); 1991 "%dx%d\n", panelX, panelY);
1819 1992
1820 par->SavagePanelWidth = panelX; 1993 par->SavagePanelWidth = panelX;
1821 par->SavagePanelHeight = panelY; 1994 par->SavagePanelHeight = panelY;
@@ -1824,9 +1997,10 @@ static int savage_init_hw (struct savagefb_par *par)
1824 par->display_type = DISP_CRT; 1997 par->display_type = DISP_CRT;
1825 } 1998 }
1826 1999
1827 savage_get_default_par (par); 2000 savage_get_default_par(par, &par->state);
2001 par->save = par->state;
1828 2002
1829 if( S3_SAVAGE4_SERIES(par->chip) ) { 2003 if (S3_SAVAGE4_SERIES(par->chip)) {
1830 /* 2004 /*
1831 * The Savage4 and ProSavage have COB coherency bugs which 2005 * The Savage4 and ProSavage have COB coherency bugs which
1832 * render the buffer useless. We disable it. 2006 * render the buffer useless. We disable it.
@@ -1845,9 +2019,9 @@ static int savage_init_hw (struct savagefb_par *par)
1845 return videoRambytes; 2019 return videoRambytes;
1846} 2020}
1847 2021
1848static int __devinit savage_init_fb_info (struct fb_info *info, 2022static int __devinit savage_init_fb_info(struct fb_info *info,
1849 struct pci_dev *dev, 2023 struct pci_dev *dev,
1850 const struct pci_device_id *id) 2024 const struct pci_device_id *id)
1851{ 2025{
1852 struct savagefb_par *par = info->par; 2026 struct savagefb_par *par = info->par;
1853 int err = 0; 2027 int err = 0;
@@ -1863,63 +2037,63 @@ static int __devinit savage_init_fb_info (struct fb_info *info,
1863 switch (info->fix.accel) { 2037 switch (info->fix.accel) {
1864 case FB_ACCEL_SUPERSAVAGE: 2038 case FB_ACCEL_SUPERSAVAGE:
1865 par->chip = S3_SUPERSAVAGE; 2039 par->chip = S3_SUPERSAVAGE;
1866 snprintf (info->fix.id, 16, "SuperSavage"); 2040 snprintf(info->fix.id, 16, "SuperSavage");
1867 break; 2041 break;
1868 case FB_ACCEL_SAVAGE4: 2042 case FB_ACCEL_SAVAGE4:
1869 par->chip = S3_SAVAGE4; 2043 par->chip = S3_SAVAGE4;
1870 snprintf (info->fix.id, 16, "Savage4"); 2044 snprintf(info->fix.id, 16, "Savage4");
1871 break; 2045 break;
1872 case FB_ACCEL_SAVAGE3D: 2046 case FB_ACCEL_SAVAGE3D:
1873 par->chip = S3_SAVAGE3D; 2047 par->chip = S3_SAVAGE3D;
1874 snprintf (info->fix.id, 16, "Savage3D"); 2048 snprintf(info->fix.id, 16, "Savage3D");
1875 break; 2049 break;
1876 case FB_ACCEL_SAVAGE3D_MV: 2050 case FB_ACCEL_SAVAGE3D_MV:
1877 par->chip = S3_SAVAGE3D; 2051 par->chip = S3_SAVAGE3D;
1878 snprintf (info->fix.id, 16, "Savage3D-MV"); 2052 snprintf(info->fix.id, 16, "Savage3D-MV");
1879 break; 2053 break;
1880 case FB_ACCEL_SAVAGE2000: 2054 case FB_ACCEL_SAVAGE2000:
1881 par->chip = S3_SAVAGE2000; 2055 par->chip = S3_SAVAGE2000;
1882 snprintf (info->fix.id, 16, "Savage2000"); 2056 snprintf(info->fix.id, 16, "Savage2000");
1883 break; 2057 break;
1884 case FB_ACCEL_SAVAGE_MX_MV: 2058 case FB_ACCEL_SAVAGE_MX_MV:
1885 par->chip = S3_SAVAGE_MX; 2059 par->chip = S3_SAVAGE_MX;
1886 snprintf (info->fix.id, 16, "Savage/MX-MV"); 2060 snprintf(info->fix.id, 16, "Savage/MX-MV");
1887 break; 2061 break;
1888 case FB_ACCEL_SAVAGE_MX: 2062 case FB_ACCEL_SAVAGE_MX:
1889 par->chip = S3_SAVAGE_MX; 2063 par->chip = S3_SAVAGE_MX;
1890 snprintf (info->fix.id, 16, "Savage/MX"); 2064 snprintf(info->fix.id, 16, "Savage/MX");
1891 break; 2065 break;
1892 case FB_ACCEL_SAVAGE_IX_MV: 2066 case FB_ACCEL_SAVAGE_IX_MV:
1893 par->chip = S3_SAVAGE_MX; 2067 par->chip = S3_SAVAGE_MX;
1894 snprintf (info->fix.id, 16, "Savage/IX-MV"); 2068 snprintf(info->fix.id, 16, "Savage/IX-MV");
1895 break; 2069 break;
1896 case FB_ACCEL_SAVAGE_IX: 2070 case FB_ACCEL_SAVAGE_IX:
1897 par->chip = S3_SAVAGE_MX; 2071 par->chip = S3_SAVAGE_MX;
1898 snprintf (info->fix.id, 16, "Savage/IX"); 2072 snprintf(info->fix.id, 16, "Savage/IX");
1899 break; 2073 break;
1900 case FB_ACCEL_PROSAVAGE_PM: 2074 case FB_ACCEL_PROSAVAGE_PM:
1901 par->chip = S3_PROSAVAGE; 2075 par->chip = S3_PROSAVAGE;
1902 snprintf (info->fix.id, 16, "ProSavagePM"); 2076 snprintf(info->fix.id, 16, "ProSavagePM");
1903 break; 2077 break;
1904 case FB_ACCEL_PROSAVAGE_KM: 2078 case FB_ACCEL_PROSAVAGE_KM:
1905 par->chip = S3_PROSAVAGE; 2079 par->chip = S3_PROSAVAGE;
1906 snprintf (info->fix.id, 16, "ProSavageKM"); 2080 snprintf(info->fix.id, 16, "ProSavageKM");
1907 break; 2081 break;
1908 case FB_ACCEL_S3TWISTER_P: 2082 case FB_ACCEL_S3TWISTER_P:
1909 par->chip = S3_PROSAVAGE; 2083 par->chip = S3_PROSAVAGE;
1910 snprintf (info->fix.id, 16, "TwisterP"); 2084 snprintf(info->fix.id, 16, "TwisterP");
1911 break; 2085 break;
1912 case FB_ACCEL_S3TWISTER_K: 2086 case FB_ACCEL_S3TWISTER_K:
1913 par->chip = S3_PROSAVAGE; 2087 par->chip = S3_PROSAVAGE;
1914 snprintf (info->fix.id, 16, "TwisterK"); 2088 snprintf(info->fix.id, 16, "TwisterK");
1915 break; 2089 break;
1916 case FB_ACCEL_PROSAVAGE_DDR: 2090 case FB_ACCEL_PROSAVAGE_DDR:
1917 par->chip = S3_PROSAVAGE; 2091 par->chip = S3_PROSAVAGE;
1918 snprintf (info->fix.id, 16, "ProSavageDDR"); 2092 snprintf(info->fix.id, 16, "ProSavageDDR");
1919 break; 2093 break;
1920 case FB_ACCEL_PROSAVAGE_DDRK: 2094 case FB_ACCEL_PROSAVAGE_DDRK:
1921 par->chip = S3_PROSAVAGE; 2095 par->chip = S3_PROSAVAGE;
1922 snprintf (info->fix.id, 16, "ProSavage8"); 2096 snprintf(info->fix.id, 16, "ProSavage8");
1923 break; 2097 break;
1924 } 2098 }
1925 2099
@@ -1960,7 +2134,7 @@ static int __devinit savage_init_fb_info (struct fb_info *info,
1960 info->pixmap.buf_align = 4; 2134 info->pixmap.buf_align = 4;
1961 info->pixmap.access_align = 32; 2135 info->pixmap.access_align = 32;
1962 2136
1963 err = fb_alloc_cmap (&info->cmap, NR_PALETTE, 0); 2137 err = fb_alloc_cmap(&info->cmap, NR_PALETTE, 0);
1964 if (!err) 2138 if (!err)
1965 info->flags |= FBINFO_HWACCEL_COPYAREA | 2139 info->flags |= FBINFO_HWACCEL_COPYAREA |
1966 FBINFO_HWACCEL_FILLRECT | 2140 FBINFO_HWACCEL_FILLRECT |
@@ -1972,8 +2146,8 @@ static int __devinit savage_init_fb_info (struct fb_info *info,
1972 2146
1973/* --------------------------------------------------------------------- */ 2147/* --------------------------------------------------------------------- */
1974 2148
1975static int __devinit savagefb_probe (struct pci_dev* dev, 2149static int __devinit savagefb_probe(struct pci_dev* dev,
1976 const struct pci_device_id* id) 2150 const struct pci_device_id* id)
1977{ 2151{
1978 struct fb_info *info; 2152 struct fb_info *info;
1979 struct savagefb_par *par; 2153 struct savagefb_par *par;
@@ -2085,12 +2259,12 @@ static int __devinit savagefb_probe (struct pci_dev* dev,
2085 fb_destroy_modedb(info->monspecs.modedb); 2259 fb_destroy_modedb(info->monspecs.modedb);
2086 info->monspecs.modedb = NULL; 2260 info->monspecs.modedb = NULL;
2087 2261
2088 err = register_framebuffer (info); 2262 err = register_framebuffer(info);
2089 if (err < 0) 2263 if (err < 0)
2090 goto failed; 2264 goto failed;
2091 2265
2092 printk (KERN_INFO "fb: S3 %s frame buffer device\n", 2266 printk(KERN_INFO "fb: S3 %s frame buffer device\n",
2093 info->fix.id); 2267 info->fix.id);
2094 2268
2095 /* 2269 /*
2096 * Our driver data 2270 * Our driver data
@@ -2103,10 +2277,10 @@ static int __devinit savagefb_probe (struct pci_dev* dev,
2103#ifdef CONFIG_FB_SAVAGE_I2C 2277#ifdef CONFIG_FB_SAVAGE_I2C
2104 savagefb_delete_i2c_busses(info); 2278 savagefb_delete_i2c_busses(info);
2105#endif 2279#endif
2106 fb_alloc_cmap (&info->cmap, 0, 0); 2280 fb_alloc_cmap(&info->cmap, 0, 0);
2107 savage_unmap_video(info); 2281 savage_unmap_video(info);
2108 failed_video: 2282 failed_video:
2109 savage_unmap_mmio (info); 2283 savage_unmap_mmio(info);
2110 failed_mmio: 2284 failed_mmio:
2111 kfree(info->pixmap.addr); 2285 kfree(info->pixmap.addr);
2112 failed_init: 2286 failed_init:
@@ -2117,7 +2291,7 @@ static int __devinit savagefb_probe (struct pci_dev* dev,
2117 return err; 2291 return err;
2118} 2292}
2119 2293
2120static void __devexit savagefb_remove (struct pci_dev *dev) 2294static void __devexit savagefb_remove(struct pci_dev *dev)
2121{ 2295{
2122 struct fb_info *info = pci_get_drvdata(dev); 2296 struct fb_info *info = pci_get_drvdata(dev);
2123 2297
@@ -2129,16 +2303,16 @@ static void __devexit savagefb_remove (struct pci_dev *dev)
2129 * we will be leaving hooks that could cause 2303 * we will be leaving hooks that could cause
2130 * oopsen laying around. 2304 * oopsen laying around.
2131 */ 2305 */
2132 if (unregister_framebuffer (info)) 2306 if (unregister_framebuffer(info))
2133 printk (KERN_WARNING "savagefb: danger danger! " 2307 printk(KERN_WARNING "savagefb: danger danger! "
2134 "Oopsen imminent!\n"); 2308 "Oopsen imminent!\n");
2135 2309
2136#ifdef CONFIG_FB_SAVAGE_I2C 2310#ifdef CONFIG_FB_SAVAGE_I2C
2137 savagefb_delete_i2c_busses(info); 2311 savagefb_delete_i2c_busses(info);
2138#endif 2312#endif
2139 fb_alloc_cmap (&info->cmap, 0, 0); 2313 fb_alloc_cmap(&info->cmap, 0, 0);
2140 savage_unmap_video (info); 2314 savage_unmap_video(info);
2141 savage_unmap_mmio (info); 2315 savage_unmap_mmio(info);
2142 kfree(info->pixmap.addr); 2316 kfree(info->pixmap.addr);
2143 pci_release_regions(dev); 2317 pci_release_regions(dev);
2144 framebuffer_release(info); 2318 framebuffer_release(info);
@@ -2151,7 +2325,7 @@ static void __devexit savagefb_remove (struct pci_dev *dev)
2151 } 2325 }
2152} 2326}
2153 2327
2154static int savagefb_suspend (struct pci_dev* dev, pm_message_t state) 2328static int savagefb_suspend(struct pci_dev* dev, pm_message_t state)
2155{ 2329{
2156 struct fb_info *info = pci_get_drvdata(dev); 2330 struct fb_info *info = pci_get_drvdata(dev);
2157 struct savagefb_par *par = info->par; 2331 struct savagefb_par *par = info->par;
@@ -2177,6 +2351,7 @@ static int savagefb_suspend (struct pci_dev* dev, pm_message_t state)
2177 info->fbops->fb_sync(info); 2351 info->fbops->fb_sync(info);
2178 2352
2179 savagefb_blank(FB_BLANK_POWERDOWN, info); 2353 savagefb_blank(FB_BLANK_POWERDOWN, info);
2354 savage_set_default_par(par, &par->save);
2180 savage_disable_mmio(par); 2355 savage_disable_mmio(par);
2181 pci_save_state(dev); 2356 pci_save_state(dev);
2182 pci_disable_device(dev); 2357 pci_disable_device(dev);
@@ -2186,7 +2361,7 @@ static int savagefb_suspend (struct pci_dev* dev, pm_message_t state)
2186 return 0; 2361 return 0;
2187} 2362}
2188 2363
2189static int savagefb_resume (struct pci_dev* dev) 2364static int savagefb_resume(struct pci_dev* dev)
2190{ 2365{
2191 struct fb_info *info = pci_get_drvdata(dev); 2366 struct fb_info *info = pci_get_drvdata(dev);
2192 struct savagefb_par *par = info->par; 2367 struct savagefb_par *par = info->par;
@@ -2210,15 +2385,15 @@ static int savagefb_resume (struct pci_dev* dev)
2210 pci_set_power_state(dev, PCI_D0); 2385 pci_set_power_state(dev, PCI_D0);
2211 pci_restore_state(dev); 2386 pci_restore_state(dev);
2212 2387
2213 if(pci_enable_device(dev)) 2388 if (pci_enable_device(dev))
2214 DBG("err"); 2389 DBG("err");
2215 2390
2216 pci_set_master(dev); 2391 pci_set_master(dev);
2217 savage_enable_mmio(par); 2392 savage_enable_mmio(par);
2218 savage_init_hw(par); 2393 savage_init_hw(par);
2219 savagefb_set_par (info); 2394 savagefb_set_par(info);
2395 fb_set_suspend(info, 0);
2220 savagefb_blank(FB_BLANK_UNBLANK, info); 2396 savagefb_blank(FB_BLANK_UNBLANK, info);
2221 fb_set_suspend (info, 0);
2222 release_console_sem(); 2397 release_console_sem();
2223 2398
2224 return 0; 2399 return 0;
@@ -2311,10 +2486,10 @@ static struct pci_driver savagefb_driver = {
2311 2486
2312/* **************************** exit-time only **************************** */ 2487/* **************************** exit-time only **************************** */
2313 2488
2314static void __exit savage_done (void) 2489static void __exit savage_done(void)
2315{ 2490{
2316 DBG("savage_done"); 2491 DBG("savage_done");
2317 pci_unregister_driver (&savagefb_driver); 2492 pci_unregister_driver(&savagefb_driver);
2318} 2493}
2319 2494
2320 2495
@@ -2345,7 +2520,7 @@ static int __init savagefb_init(void)
2345 return -ENODEV; 2520 return -ENODEV;
2346 2521
2347 savagefb_setup(option); 2522 savagefb_setup(option);
2348 return pci_register_driver (&savagefb_driver); 2523 return pci_register_driver(&savagefb_driver);
2349 2524
2350} 2525}
2351 2526
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 8adf5bf91e..c63c0e721b 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -275,7 +275,7 @@ sisfb_search_mode(char *name, BOOLEAN quiet)
275static void __devinit 275static void __devinit
276sisfb_get_vga_mode_from_kernel(void) 276sisfb_get_vga_mode_from_kernel(void)
277{ 277{
278#if (defined(__i386__) || defined(__x86_64__)) && defined(CONFIG_VIDEO_SELECT) 278#ifdef CONFIG_X86
279 char mymode[32]; 279 char mymode[32];
280 int mydepth = screen_info.lfb_depth; 280 int mydepth = screen_info.lfb_depth;
281 281
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 9b707771d7..67f429e931 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -906,11 +906,6 @@ static void __exit xxxfb_exit(void)
906} 906}
907#endif 907#endif
908 908
909MODULE_LICENSE("GPL");
910module_init(xxxfb_init);
911module_exit(xxxfb_exit);
912
913
914 /* 909 /*
915 * Setup 910 * Setup
916 */ 911 */
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index 7398bd48ba..6c2c78ab98 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -26,7 +26,6 @@
26#include <linux/selection.h> 26#include <linux/selection.h>
27#include <asm/io.h> 27#include <asm/io.h>
28#include <video/tgafb.h> 28#include <video/tgafb.h>
29#include <linux/selection.h>
30 29
31/* 30/*
32 * Local functions. 31 * Local functions.
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index b0b9acfdd4..5718924b67 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -51,7 +51,7 @@ static int inverse = 0;
51static int mtrr = 0; /* disable mtrr */ 51static int mtrr = 0; /* disable mtrr */
52static int vram_remap __initdata = 0; /* Set amount of memory to be used */ 52static int vram_remap __initdata = 0; /* Set amount of memory to be used */
53static int vram_total __initdata = 0; /* Set total amount of memory */ 53static int vram_total __initdata = 0; /* Set total amount of memory */
54static int pmi_setpal = 0; /* pmi for palette changes ??? */ 54static int pmi_setpal = 1; /* pmi for palette changes ??? */
55static int ypan = 0; /* 0..nothing, 1..ypan, 2..ywrap */ 55static int ypan = 0; /* 0..nothing, 1..ypan, 2..ywrap */
56static unsigned short *pmi_base = NULL; 56static unsigned short *pmi_base = NULL;
57static void (*pmi_start)(void); 57static void (*pmi_start)(void);
@@ -80,15 +80,30 @@ static int vesafb_pan_display(struct fb_var_screeninfo *var,
80 return 0; 80 return 0;
81} 81}
82 82
83static void vesa_setpalette(int regno, unsigned red, unsigned green, 83static int vesa_setpalette(int regno, unsigned red, unsigned green,
84 unsigned blue) 84 unsigned blue)
85{ 85{
86 int shift = 16 - depth; 86 int shift = 16 - depth;
87 int err = -EINVAL;
88
89/*
90 * Try VGA registers first...
91 */
92 if (vga_compat) {
93 outb_p(regno, dac_reg);
94 outb_p(red >> shift, dac_val);
95 outb_p(green >> shift, dac_val);
96 outb_p(blue >> shift, dac_val);
97 err = 0;
98 }
87 99
88#ifdef __i386__ 100#ifdef __i386__
89 struct { u_char blue, green, red, pad; } entry; 101/*
102 * Fallback to the PMI....
103 */
104 if (err && pmi_setpal) {
105 struct { u_char blue, green, red, pad; } entry;
90 106
91 if (pmi_setpal) {
92 entry.red = red >> shift; 107 entry.red = red >> shift;
93 entry.green = green >> shift; 108 entry.green = green >> shift;
94 entry.blue = blue >> shift; 109 entry.blue = blue >> shift;
@@ -102,26 +117,19 @@ static void vesa_setpalette(int regno, unsigned red, unsigned green,
102 "d" (regno), /* EDX */ 117 "d" (regno), /* EDX */
103 "D" (&entry), /* EDI */ 118 "D" (&entry), /* EDI */
104 "S" (&pmi_pal)); /* ESI */ 119 "S" (&pmi_pal)); /* ESI */
105 return; 120 err = 0;
106 } 121 }
107#endif 122#endif
108 123
109/* 124 return err;
110 * without protected mode interface and if VGA compatible,
111 * try VGA registers...
112 */
113 if (vga_compat) {
114 outb_p(regno, dac_reg);
115 outb_p(red >> shift, dac_val);
116 outb_p(green >> shift, dac_val);
117 outb_p(blue >> shift, dac_val);
118 }
119} 125}
120 126
121static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green, 127static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
122 unsigned blue, unsigned transp, 128 unsigned blue, unsigned transp,
123 struct fb_info *info) 129 struct fb_info *info)
124{ 130{
131 int err = 0;
132
125 /* 133 /*
126 * Set a single color register. The values supplied are 134 * Set a single color register. The values supplied are
127 * already rounded down to the hardware's capabilities 135 * already rounded down to the hardware's capabilities
@@ -133,7 +141,7 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
133 return 1; 141 return 1;
134 142
135 if (info->var.bits_per_pixel == 8) 143 if (info->var.bits_per_pixel == 8)
136 vesa_setpalette(regno,red,green,blue); 144 err = vesa_setpalette(regno,red,green,blue);
137 else if (regno < 16) { 145 else if (regno < 16) {
138 switch (info->var.bits_per_pixel) { 146 switch (info->var.bits_per_pixel) {
139 case 16: 147 case 16:
@@ -164,7 +172,7 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
164 } 172 }
165 } 173 }
166 174
167 return 0; 175 return err;
168} 176}
169 177
170static struct fb_ops vesafb_ops = { 178static struct fb_ops vesafb_ops = {
@@ -460,9 +468,7 @@ static struct platform_driver vesafb_driver = {
460 }, 468 },
461}; 469};
462 470
463static struct platform_device vesafb_device = { 471static struct platform_device *vesafb_device;
464 .name = "vesafb",
465};
466 472
467static int __init vesafb_init(void) 473static int __init vesafb_init(void)
468{ 474{
@@ -475,10 +481,19 @@ static int __init vesafb_init(void)
475 ret = platform_driver_register(&vesafb_driver); 481 ret = platform_driver_register(&vesafb_driver);
476 482
477 if (!ret) { 483 if (!ret) {
478 ret = platform_device_register(&vesafb_device); 484 vesafb_device = platform_device_alloc("vesafb", 0);
479 if (ret) 485
486 if (vesafb_device)
487 ret = platform_device_add(vesafb_device);
488 else
489 ret = -ENOMEM;
490
491 if (ret) {
492 platform_device_put(vesafb_device);
480 platform_driver_unregister(&vesafb_driver); 493 platform_driver_unregister(&vesafb_driver);
494 }
481 } 495 }
496
482 return ret; 497 return ret;
483} 498}
484module_init(vesafb_init); 499module_init(vesafb_init);
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index 77eed1fd99..d073ffb6e1 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -398,12 +398,6 @@ static int __init vfb_setup(char *options)
398 * Initialisation 398 * Initialisation
399 */ 399 */
400 400
401static void vfb_platform_release(struct device *device)
402{
403 // This is called when the reference count goes to zero.
404 dev_err(device, "This driver is broken, please bug the authors so they will fix it.\n");
405}
406
407static int __init vfb_probe(struct platform_device *dev) 401static int __init vfb_probe(struct platform_device *dev)
408{ 402{
409 struct fb_info *info; 403 struct fb_info *info;
@@ -482,13 +476,7 @@ static struct platform_driver vfb_driver = {
482 }, 476 },
483}; 477};
484 478
485static struct platform_device vfb_device = { 479static struct platform_device *vfb_device;
486 .name = "vfb",
487 .id = 0,
488 .dev = {
489 .release = vfb_platform_release,
490 }
491};
492 480
493static int __init vfb_init(void) 481static int __init vfb_init(void)
494{ 482{
@@ -508,10 +496,19 @@ static int __init vfb_init(void)
508 ret = platform_driver_register(&vfb_driver); 496 ret = platform_driver_register(&vfb_driver);
509 497
510 if (!ret) { 498 if (!ret) {
511 ret = platform_device_register(&vfb_device); 499 vfb_device = platform_device_alloc("vfb", 0);
512 if (ret) 500
501 if (vfb_device)
502 ret = platform_device_add(vfb_device);
503 else
504 ret = -ENOMEM;
505
506 if (ret) {
507 platform_device_put(vfb_device);
513 platform_driver_unregister(&vfb_driver); 508 platform_driver_unregister(&vfb_driver);
509 }
514 } 510 }
511
515 return ret; 512 return ret;
516} 513}
517 514
@@ -520,7 +517,7 @@ module_init(vfb_init);
520#ifdef MODULE 517#ifdef MODULE
521static void __exit vfb_exit(void) 518static void __exit vfb_exit(void)
522{ 519{
523 platform_device_unregister(&vfb_device); 520 platform_device_unregister(vfb_device);
524 platform_driver_unregister(&vfb_driver); 521 platform_driver_unregister(&vfb_driver);
525} 522}
526 523
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 4fd2a272e0..3c404c9bd3 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -1334,9 +1334,8 @@ static int vga16fb_setup(char *options)
1334} 1334}
1335#endif 1335#endif
1336 1336
1337static int __init vga16fb_probe(struct device *device) 1337static int __init vga16fb_probe(struct platform_device *dev)
1338{ 1338{
1339 struct platform_device *dev = to_platform_device(device);
1340 struct fb_info *info; 1339 struct fb_info *info;
1341 struct vga16fb_par *par; 1340 struct vga16fb_par *par;
1342 int i; 1341 int i;
@@ -1403,7 +1402,7 @@ static int __init vga16fb_probe(struct device *device)
1403 1402
1404 printk(KERN_INFO "fb%d: %s frame buffer device\n", 1403 printk(KERN_INFO "fb%d: %s frame buffer device\n",
1405 info->node, info->fix.id); 1404 info->node, info->fix.id);
1406 dev_set_drvdata(device, info); 1405 platform_set_drvdata(dev, info);
1407 1406
1408 return 0; 1407 return 0;
1409 1408
@@ -1417,9 +1416,9 @@ static int __init vga16fb_probe(struct device *device)
1417 return ret; 1416 return ret;
1418} 1417}
1419 1418
1420static int vga16fb_remove(struct device *device) 1419static int vga16fb_remove(struct platform_device *dev)
1421{ 1420{
1422 struct fb_info *info = dev_get_drvdata(device); 1421 struct fb_info *info = platform_get_drvdata(dev);
1423 1422
1424 if (info) { 1423 if (info) {
1425 unregister_framebuffer(info); 1424 unregister_framebuffer(info);
@@ -1432,16 +1431,15 @@ static int vga16fb_remove(struct device *device)
1432 return 0; 1431 return 0;
1433} 1432}
1434 1433
1435static struct device_driver vga16fb_driver = { 1434static struct platform_driver vga16fb_driver = {
1436 .name = "vga16fb",
1437 .bus = &platform_bus_type,
1438 .probe = vga16fb_probe, 1435 .probe = vga16fb_probe,
1439 .remove = vga16fb_remove, 1436 .remove = vga16fb_remove,
1437 .driver = {
1438 .name = "vga16fb",
1439 },
1440}; 1440};
1441 1441
1442static struct platform_device vga16fb_device = { 1442static struct platform_device *vga16fb_device;
1443 .name = "vga16fb",
1444};
1445 1443
1446static int __init vga16fb_init(void) 1444static int __init vga16fb_init(void)
1447{ 1445{
@@ -1454,12 +1452,20 @@ static int __init vga16fb_init(void)
1454 1452
1455 vga16fb_setup(option); 1453 vga16fb_setup(option);
1456#endif 1454#endif
1457 ret = driver_register(&vga16fb_driver); 1455 ret = platform_driver_register(&vga16fb_driver);
1458 1456
1459 if (!ret) { 1457 if (!ret) {
1460 ret = platform_device_register(&vga16fb_device); 1458 vga16fb_device = platform_device_alloc("vga16fb", 0);
1461 if (ret) 1459
1462 driver_unregister(&vga16fb_driver); 1460 if (vga16fb_device)
1461 ret = platform_device_add(vga16fb_device);
1462 else
1463 ret = -ENOMEM;
1464
1465 if (ret) {
1466 platform_device_put(vga16fb_device);
1467 platform_driver_unregister(&vga16fb_driver);
1468 }
1463 } 1469 }
1464 1470
1465 return ret; 1471 return ret;
@@ -1467,8 +1473,8 @@ static int __init vga16fb_init(void)
1467 1473
1468static void __exit vga16fb_exit(void) 1474static void __exit vga16fb_exit(void)
1469{ 1475{
1470 platform_device_unregister(&vga16fb_device); 1476 platform_device_unregister(vga16fb_device);
1471 driver_unregister(&vga16fb_driver); 1477 platform_driver_unregister(&vga16fb_driver);
1472} 1478}
1473 1479
1474MODULE_LICENSE("GPL"); 1480MODULE_LICENSE("GPL");
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index f4407eb276..12e1baa450 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -712,7 +712,7 @@ static void v9fs_read_work(void *a)
712 * v9fs_send_request - send 9P request 712 * v9fs_send_request - send 9P request
713 * The function can sleep until the request is scheduled for sending. 713 * The function can sleep until the request is scheduled for sending.
714 * The function can be interrupted. Return from the function is not 714 * The function can be interrupted. Return from the function is not
715 * a guarantee that the request is sent succesfully. Can return errors 715 * a guarantee that the request is sent successfully. Can return errors
716 * that can be retrieved by PTR_ERR macros. 716 * that can be retrieved by PTR_ERR macros.
717 * 717 *
718 * @m: mux data 718 * @m: mux data
diff --git a/fs/Kconfig b/fs/Kconfig
index 1cdc043922..6dc8cfd6d8 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -1116,7 +1116,7 @@ config JFFS2_SUMMARY
1116 1116
1117config JFFS2_FS_XATTR 1117config JFFS2_FS_XATTR
1118 bool "JFFS2 XATTR support (EXPERIMENTAL)" 1118 bool "JFFS2 XATTR support (EXPERIMENTAL)"
1119 depends on JFFS2_FS && EXPERIMENTAL && !JFFS2_FS_WRITEBUFFER 1119 depends on JFFS2_FS && EXPERIMENTAL
1120 default n 1120 default n
1121 help 1121 help
1122 Extended attributes are name:value pairs associated with inodes by 1122 Extended attributes are name:value pairs associated with inodes by
@@ -1490,7 +1490,12 @@ config NFSD
1490 select LOCKD 1490 select LOCKD
1491 select SUNRPC 1491 select SUNRPC
1492 select EXPORTFS 1492 select EXPORTFS
1493 select NFS_ACL_SUPPORT if NFSD_V3_ACL || NFSD_V2_ACL 1493 select NFSD_V2_ACL if NFSD_V3_ACL
1494 select NFS_ACL_SUPPORT if NFSD_V2_ACL
1495 select NFSD_TCP if NFSD_V4
1496 select CRYPTO_MD5 if NFSD_V4
1497 select CRYPTO if NFSD_V4
1498 select FS_POSIX_ACL if NFSD_V4
1494 help 1499 help
1495 If you want your Linux box to act as an NFS *server*, so that other 1500 If you want your Linux box to act as an NFS *server*, so that other
1496 computers on your local network which support NFS can access certain 1501 computers on your local network which support NFS can access certain
@@ -1528,7 +1533,6 @@ config NFSD_V3
1528config NFSD_V3_ACL 1533config NFSD_V3_ACL
1529 bool "Provide server support for the NFSv3 ACL protocol extension" 1534 bool "Provide server support for the NFSv3 ACL protocol extension"
1530 depends on NFSD_V3 1535 depends on NFSD_V3
1531 select NFSD_V2_ACL
1532 help 1536 help
1533 Implement the NFSv3 ACL protocol extension for manipulating POSIX 1537 Implement the NFSv3 ACL protocol extension for manipulating POSIX
1534 Access Control Lists on exported file systems. NFS clients should 1538 Access Control Lists on exported file systems. NFS clients should
@@ -1538,10 +1542,6 @@ config NFSD_V3_ACL
1538config NFSD_V4 1542config NFSD_V4
1539 bool "Provide NFSv4 server support (EXPERIMENTAL)" 1543 bool "Provide NFSv4 server support (EXPERIMENTAL)"
1540 depends on NFSD_V3 && EXPERIMENTAL 1544 depends on NFSD_V3 && EXPERIMENTAL
1541 select NFSD_TCP
1542 select CRYPTO_MD5
1543 select CRYPTO
1544 select FS_POSIX_ACL
1545 help 1545 help
1546 If you would like to include the NFSv4 server as well as the NFSv2 1546 If you would like to include the NFSv4 server as well as the NFSv2
1547 and NFSv3 servers, say Y here. This feature is experimental, and 1547 and NFSv3 servers, say Y here. This feature is experimental, and
@@ -1722,7 +1722,7 @@ config CIFS_STATS
1722 mounted by the cifs client to be displayed in /proc/fs/cifs/Stats 1722 mounted by the cifs client to be displayed in /proc/fs/cifs/Stats
1723 1723
1724config CIFS_STATS2 1724config CIFS_STATS2
1725 bool "CIFS extended statistics" 1725 bool "Extended statistics"
1726 depends on CIFS_STATS 1726 depends on CIFS_STATS
1727 help 1727 help
1728 Enabling this option will allow more detailed statistics on SMB 1728 Enabling this option will allow more detailed statistics on SMB
@@ -1735,6 +1735,32 @@ config CIFS_STATS2
1735 Unless you are a developer or are doing network performance analysis 1735 Unless you are a developer or are doing network performance analysis
1736 or tuning, say N. 1736 or tuning, say N.
1737 1737
1738config CIFS_WEAK_PW_HASH
1739 bool "Support legacy servers which use weaker LANMAN security"
1740 depends on CIFS
1741 help
1742 Modern CIFS servers including Samba and most Windows versions
1743 (since 1997) support stronger NTLM (and even NTLMv2 and Kerberos)
1744 security mechanisms. These hash the password more securely
1745 than the mechanisms used in the older LANMAN version of the
1746 SMB protocol needed to establish sessions with old SMB servers.
1747
1748 Enabling this option allows the cifs module to mount to older
1749 LANMAN based servers such as OS/2 and Windows 95, but such
1750 mounts may be less secure than mounts using NTLM or more recent
1751 security mechanisms if you are on a public network. Unless you
1752 have a need to access old SMB servers (and are on a private
1753 network) you probably want to say N. Even if this support
1754 is enabled in the kernel build, they will not be used
1755 automatically. At runtime LANMAN mounts are disabled but
1756 can be set to required (or optional) either in
1757 /proc/fs/cifs (see fs/cifs/README for more detail) or via an
1758 option on the mount command. This support is disabled by
1759 default in order to reduce the possibility of a downgrade
1760 attack.
1761
1762 If unsure, say N.
1763
1738config CIFS_XATTR 1764config CIFS_XATTR
1739 bool "CIFS extended attributes" 1765 bool "CIFS extended attributes"
1740 depends on CIFS 1766 depends on CIFS
@@ -1763,6 +1789,16 @@ config CIFS_POSIX
1763 (such as Samba 3.10 and later) which can negotiate 1789 (such as Samba 3.10 and later) which can negotiate
1764 CIFS POSIX ACL support. If unsure, say N. 1790 CIFS POSIX ACL support. If unsure, say N.
1765 1791
1792config CIFS_DEBUG2
1793 bool "Enable additional CIFS debugging routines"
1794 help
1795 Enabling this option adds a few more debugging routines
1796 to the cifs code which slightly increases the size of
1797 the cifs module and can cause additional logging of debug
1798 messages in some error paths, slowing performance. This
1799 option can be turned off unless you are debugging
1800 cifs problems. If unsure, say N.
1801
1766config CIFS_EXPERIMENTAL 1802config CIFS_EXPERIMENTAL
1767 bool "CIFS Experimental Features (EXPERIMENTAL)" 1803 bool "CIFS Experimental Features (EXPERIMENTAL)"
1768 depends on CIFS && EXPERIMENTAL 1804 depends on CIFS && EXPERIMENTAL
@@ -1778,7 +1814,7 @@ config CIFS_EXPERIMENTAL
1778 If unsure, say N. 1814 If unsure, say N.
1779 1815
1780config CIFS_UPCALL 1816config CIFS_UPCALL
1781 bool "CIFS Kerberos/SPNEGO advanced session setup (EXPERIMENTAL)" 1817 bool "Kerberos/SPNEGO advanced session setup (EXPERIMENTAL)"
1782 depends on CIFS_EXPERIMENTAL 1818 depends on CIFS_EXPERIMENTAL
1783 select CONNECTOR 1819 select CONNECTOR
1784 help 1820 help
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 009a9ae88d..bfc1fd22d5 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -413,8 +413,7 @@ int afs_server_find_by_peer(const struct rxrpc_peer *peer,
413 413
414 /* we found it in the graveyard - resurrect it */ 414 /* we found it in the graveyard - resurrect it */
415 found_dead_server: 415 found_dead_server:
416 list_del(&server->link); 416 list_move_tail(&server->link, &cell->sv_list);
417 list_add_tail(&server->link, &cell->sv_list);
418 afs_get_server(server); 417 afs_get_server(server);
419 afs_kafstimod_del_timer(&server->timeout); 418 afs_kafstimod_del_timer(&server->timeout);
420 spin_unlock(&cell->sv_gylock); 419 spin_unlock(&cell->sv_gylock);
diff --git a/fs/afs/kafsasyncd.c b/fs/afs/kafsasyncd.c
index 7ac07d0d47..f09a794f24 100644
--- a/fs/afs/kafsasyncd.c
+++ b/fs/afs/kafsasyncd.c
@@ -136,8 +136,7 @@ static int kafsasyncd(void *arg)
136 if (!list_empty(&kafsasyncd_async_attnq)) { 136 if (!list_empty(&kafsasyncd_async_attnq)) {
137 op = list_entry(kafsasyncd_async_attnq.next, 137 op = list_entry(kafsasyncd_async_attnq.next,
138 struct afs_async_op, link); 138 struct afs_async_op, link);
139 list_del(&op->link); 139 list_move_tail(&op->link,
140 list_add_tail(&op->link,
141 &kafsasyncd_async_busyq); 140 &kafsasyncd_async_busyq);
142 } 141 }
143 142
@@ -204,8 +203,7 @@ void afs_kafsasyncd_begin_op(struct afs_async_op *op)
204 init_waitqueue_entry(&op->waiter, kafsasyncd_task); 203 init_waitqueue_entry(&op->waiter, kafsasyncd_task);
205 add_wait_queue(&op->call->waitq, &op->waiter); 204 add_wait_queue(&op->call->waitq, &op->waiter);
206 205
207 list_del(&op->link); 206 list_move_tail(&op->link, &kafsasyncd_async_busyq);
208 list_add_tail(&op->link, &kafsasyncd_async_busyq);
209 207
210 spin_unlock(&kafsasyncd_async_lock); 208 spin_unlock(&kafsasyncd_async_lock);
211 209
@@ -223,8 +221,7 @@ void afs_kafsasyncd_attend_op(struct afs_async_op *op)
223 221
224 spin_lock(&kafsasyncd_async_lock); 222 spin_lock(&kafsasyncd_async_lock);
225 223
226 list_del(&op->link); 224 list_move_tail(&op->link, &kafsasyncd_async_attnq);
227 list_add_tail(&op->link, &kafsasyncd_async_attnq);
228 225
229 spin_unlock(&kafsasyncd_async_lock); 226 spin_unlock(&kafsasyncd_async_lock);
230 227
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 62b093aa41..22afaae1a4 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -123,8 +123,7 @@ int afs_server_lookup(struct afs_cell *cell, const struct in_addr *addr,
123 resurrect_server: 123 resurrect_server:
124 _debug("resurrecting server"); 124 _debug("resurrecting server");
125 125
126 list_del(&zombie->link); 126 list_move_tail(&zombie->link, &cell->sv_list);
127 list_add_tail(&zombie->link, &cell->sv_list);
128 afs_get_server(zombie); 127 afs_get_server(zombie);
129 afs_kafstimod_del_timer(&zombie->timeout); 128 afs_kafstimod_del_timer(&zombie->timeout);
130 spin_unlock(&cell->sv_gylock); 129 spin_unlock(&cell->sv_gylock);
@@ -168,8 +167,7 @@ void afs_put_server(struct afs_server *server)
168 } 167 }
169 168
170 spin_lock(&cell->sv_gylock); 169 spin_lock(&cell->sv_gylock);
171 list_del(&server->link); 170 list_move_tail(&server->link, &cell->sv_graveyard);
172 list_add_tail(&server->link, &cell->sv_graveyard);
173 171
174 /* time out in 10 secs */ 172 /* time out in 10 secs */
175 afs_kafstimod_add_timer(&server->timeout, 10 * HZ); 173 afs_kafstimod_add_timer(&server->timeout, 10 * HZ);
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index eced20618e..331f730a1f 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -326,8 +326,7 @@ int afs_vlocation_lookup(struct afs_cell *cell,
326 /* found in the graveyard - resurrect */ 326 /* found in the graveyard - resurrect */
327 _debug("found in graveyard"); 327 _debug("found in graveyard");
328 atomic_inc(&vlocation->usage); 328 atomic_inc(&vlocation->usage);
329 list_del(&vlocation->link); 329 list_move_tail(&vlocation->link, &cell->vl_list);
330 list_add_tail(&vlocation->link, &cell->vl_list);
331 spin_unlock(&cell->vl_gylock); 330 spin_unlock(&cell->vl_gylock);
332 331
333 afs_kafstimod_del_timer(&vlocation->timeout); 332 afs_kafstimod_del_timer(&vlocation->timeout);
@@ -478,8 +477,7 @@ static void __afs_put_vlocation(struct afs_vlocation *vlocation)
478 } 477 }
479 478
480 /* move to graveyard queue */ 479 /* move to graveyard queue */
481 list_del(&vlocation->link); 480 list_move_tail(&vlocation->link,&cell->vl_graveyard);
482 list_add_tail(&vlocation->link,&cell->vl_graveyard);
483 481
484 /* remove from pending timeout queue (refcounted if actually being 482 /* remove from pending timeout queue (refcounted if actually being
485 * updated) */ 483 * updated) */
diff --git a/fs/afs/vnode.c b/fs/afs/vnode.c
index 9867fef326..cf62da5d78 100644
--- a/fs/afs/vnode.c
+++ b/fs/afs/vnode.c
@@ -104,8 +104,7 @@ static void afs_vnode_finalise_status_update(struct afs_vnode *vnode,
104 vnode->cb_expiry * HZ); 104 vnode->cb_expiry * HZ);
105 105
106 spin_lock(&afs_cb_hash_lock); 106 spin_lock(&afs_cb_hash_lock);
107 list_del(&vnode->cb_hash_link); 107 list_move_tail(&vnode->cb_hash_link,
108 list_add_tail(&vnode->cb_hash_link,
109 &afs_cb_hash(server, &vnode->fid)); 108 &afs_cb_hash(server, &vnode->fid));
110 spin_unlock(&afs_cb_hash_lock); 109 spin_unlock(&afs_cb_hash_lock);
111 110
diff --git a/fs/aio.c b/fs/aio.c
index 8c34a62df7..950630187a 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -641,7 +641,7 @@ static inline int __queue_kicked_iocb(struct kiocb *iocb)
641 * invoked both for initial i/o submission and 641 * invoked both for initial i/o submission and
642 * subsequent retries via the aio_kick_handler. 642 * subsequent retries via the aio_kick_handler.
643 * Expects to be invoked with iocb->ki_ctx->lock 643 * Expects to be invoked with iocb->ki_ctx->lock
644 * already held. The lock is released and reaquired 644 * already held. The lock is released and reacquired
645 * as needed during processing. 645 * as needed during processing.
646 * 646 *
647 * Calls the iocb retry method (already setup for the 647 * Calls the iocb retry method (already setup for the
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 4456d1daa4..8dbd44f10e 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -376,8 +376,7 @@ next:
376 DPRINTK("returning %p %.*s", 376 DPRINTK("returning %p %.*s",
377 expired, (int)expired->d_name.len, expired->d_name.name); 377 expired, (int)expired->d_name.len, expired->d_name.name);
378 spin_lock(&dcache_lock); 378 spin_lock(&dcache_lock);
379 list_del(&expired->d_parent->d_subdirs); 379 list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
380 list_add(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
381 spin_unlock(&dcache_lock); 380 spin_unlock(&dcache_lock);
382 return expired; 381 return expired;
383 } 382 }
diff --git a/fs/buffer.c b/fs/buffer.c
index 373bb6292b..f23bb647db 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -564,7 +564,7 @@ still_busy:
564 * Completion handler for block_write_full_page() - pages which are unlocked 564 * Completion handler for block_write_full_page() - pages which are unlocked
565 * during I/O, and which have PageWriteback cleared upon I/O completion. 565 * during I/O, and which have PageWriteback cleared upon I/O completion.
566 */ 566 */
567void end_buffer_async_write(struct buffer_head *bh, int uptodate) 567static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
568{ 568{
569 char b[BDEVNAME_SIZE]; 569 char b[BDEVNAME_SIZE];
570 unsigned long flags; 570 unsigned long flags;
@@ -3166,7 +3166,6 @@ EXPORT_SYMBOL(block_sync_page);
3166EXPORT_SYMBOL(block_truncate_page); 3166EXPORT_SYMBOL(block_truncate_page);
3167EXPORT_SYMBOL(block_write_full_page); 3167EXPORT_SYMBOL(block_write_full_page);
3168EXPORT_SYMBOL(cont_prepare_write); 3168EXPORT_SYMBOL(cont_prepare_write);
3169EXPORT_SYMBOL(end_buffer_async_write);
3170EXPORT_SYMBOL(end_buffer_read_sync); 3169EXPORT_SYMBOL(end_buffer_read_sync);
3171EXPORT_SYMBOL(end_buffer_write_sync); 3170EXPORT_SYMBOL(end_buffer_write_sync);
3172EXPORT_SYMBOL(file_fsync); 3171EXPORT_SYMBOL(file_fsync);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 7271bb0257..a61d17ed18 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,9 +1,24 @@
1Version 1.44
2------------
3Rewritten sessionsetup support, including support for legacy SMB
4session setup needed for OS/2 and older servers such as Windows 95 and 98.
5Fix oops on ls to OS/2 servers. Add support for level 1 FindFirst
6so we can do search (ls etc.) to OS/2. Do not send NTCreateX
7or recent levels of FindFirst unless server says it supports NT SMBs
8(instead use legacy equivalents from LANMAN dialect). Fix to allow
9NTLMv2 authentication support (now can use stronger password hashing
10on mount if corresponding /proc/fs/cifs/SecurityFlags is set (0x4004).
11Allow override of global cifs security flags on mount via "sec=" option(s).
12
1Version 1.43 13Version 1.43
2------------ 14------------
3POSIX locking to servers which support CIFS POSIX Extensions 15POSIX locking to servers which support CIFS POSIX Extensions
4(disabled by default controlled by proc/fs/cifs/Experimental). 16(disabled by default controlled by proc/fs/cifs/Experimental).
5Handle conversion of long share names (especially Asian languages) 17Handle conversion of long share names (especially Asian languages)
6to Unicode during mount. 18to Unicode during mount. Fix memory leak in sess struct on reconnect.
19Fix rare oops after acpi suspend. Fix O_TRUNC opens to overwrite on
20cifs open which helps rare case when setpathinfo fails or server does
21not support it.
7 22
8Version 1.42 23Version 1.42
9------------ 24------------
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 58c77254a2..a26f26ed5a 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -3,4 +3,4 @@
3# 3#
4obj-$(CONFIG_CIFS) += cifs.o 4obj-$(CONFIG_CIFS) += cifs.o
5 5
6cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o ntlmssp.o 6cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o sess.o
diff --git a/fs/cifs/README b/fs/cifs/README
index 0355003f4f..7986d0d97a 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -443,7 +443,10 @@ A partial list of the supported mount options follows:
443 SFU does). In the future the bottom 9 bits of the mode 443 SFU does). In the future the bottom 9 bits of the mode
444 mode also will be emulated using queries of the security 444 mode also will be emulated using queries of the security
445 descriptor (ACL). 445 descriptor (ACL).
446sec Security mode. Allowed values are: 446 sign Must use packet signing (helps avoid unwanted data modification
447 by intermediate systems in the route). Note that signing
448 does not work with lanman or plaintext authentication.
449 sec Security mode. Allowed values are:
447 none attempt to connection as a null user (no name) 450 none attempt to connection as a null user (no name)
448 krb5 Use Kerberos version 5 authentication 451 krb5 Use Kerberos version 5 authentication
449 krb5i Use Kerberos authentication and packet signing 452 krb5i Use Kerberos authentication and packet signing
@@ -453,6 +456,8 @@ sec Security mode. Allowed values are:
453 server requires signing also can be the default) 456 server requires signing also can be the default)
454 ntlmv2 Use NTLMv2 password hashing 457 ntlmv2 Use NTLMv2 password hashing
455 ntlmv2i Use NTLMv2 password hashing with packet signing 458 ntlmv2i Use NTLMv2 password hashing with packet signing
459 lanman (if configured in kernel config) use older
460 lanman hash
456 461
457The mount.cifs mount helper also accepts a few mount options before -o 462The mount.cifs mount helper also accepts a few mount options before -o
458including: 463including:
@@ -485,14 +490,34 @@ PacketSigningEnabled If set to one, cifs packet signing is enabled
485 it. If set to two, cifs packet signing is 490 it. If set to two, cifs packet signing is
486 required even if the server considers packet 491 required even if the server considers packet
487 signing optional. (default 1) 492 signing optional. (default 1)
493SecurityFlags Flags which control security negotiation and
494 also packet signing. Authentication (may/must)
495 flags (e.g. for NTLM and/or NTLMv2) may be combined with
496 the signing flags. Specifying two different password
497 hashing mechanisms (as "must use") on the other hand
498 does not make much sense. Default flags are
499 0x07007
500 (NTLM, NTLMv2 and packet signing allowed). Maximum
501 allowable flags if you want to allow mounts to servers
502 using weaker password hashes is 0x37037 (lanman,
503 plaintext, ntlm, ntlmv2, signing allowed):
504
505 may use packet signing 0x00001
506 must use packet signing 0x01001
507 may use NTLM (most common password hash) 0x00002
508 must use NTLM 0x02002
509 may use NTLMv2 0x00004
510 must use NTLMv2 0x04004
511 may use Kerberos security (not implemented yet) 0x00008
512 must use Kerberos (not implemented yet) 0x08008
513 may use lanman (weak) password hash 0x00010
514 must use lanman password hash 0x10010
515 may use plaintext passwords 0x00020
516 must use plaintext passwords 0x20020
517 (reserved for future packet encryption) 0x00040
518
488cifsFYI If set to one, additional debug information is 519cifsFYI If set to one, additional debug information is
489 logged to the system error log. (default 0) 520 logged to the system error log. (default 0)
490ExtendedSecurity If set to one, SPNEGO session establishment
491 is allowed which enables more advanced
492 secure CIFS session establishment (default 0)
493NTLMV2Enabled If set to one, more secure password hashes
494 are used when the server supports them and
495 when kerberos is not negotiated (default 0)
496traceSMB If set to one, debug information is logged to the 521traceSMB If set to one, debug information is logged to the
497 system error log with the start of smb requests 522 system error log with the start of smb requests
498 and responses (default 0) 523 and responses (default 0)
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index 086ae8f4a2..031cdf2932 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -467,7 +467,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
467 asn1_open(&ctx, security_blob, length); 467 asn1_open(&ctx, security_blob, length);
468 468
469 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { 469 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
470 cFYI(1, ("Error decoding negTokenInit header ")); 470 cFYI(1, ("Error decoding negTokenInit header"));
471 return 0; 471 return 0;
472 } else if ((cls != ASN1_APL) || (con != ASN1_CON) 472 } else if ((cls != ASN1_APL) || (con != ASN1_CON)
473 || (tag != ASN1_EOC)) { 473 || (tag != ASN1_EOC)) {
@@ -495,7 +495,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
495 } 495 }
496 496
497 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { 497 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
498 cFYI(1, ("Error decoding negTokenInit ")); 498 cFYI(1, ("Error decoding negTokenInit"));
499 return 0; 499 return 0;
500 } else if ((cls != ASN1_CTX) || (con != ASN1_CON) 500 } else if ((cls != ASN1_CTX) || (con != ASN1_CON)
501 || (tag != ASN1_EOC)) { 501 || (tag != ASN1_EOC)) {
@@ -505,7 +505,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
505 } 505 }
506 506
507 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { 507 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
508 cFYI(1, ("Error decoding negTokenInit ")); 508 cFYI(1, ("Error decoding negTokenInit"));
509 return 0; 509 return 0;
510 } else if ((cls != ASN1_UNI) || (con != ASN1_CON) 510 } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
511 || (tag != ASN1_SEQ)) { 511 || (tag != ASN1_SEQ)) {
@@ -515,7 +515,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
515 } 515 }
516 516
517 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { 517 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
518 cFYI(1, ("Error decoding 2nd part of negTokenInit ")); 518 cFYI(1, ("Error decoding 2nd part of negTokenInit"));
519 return 0; 519 return 0;
520 } else if ((cls != ASN1_CTX) || (con != ASN1_CON) 520 } else if ((cls != ASN1_CTX) || (con != ASN1_CON)
521 || (tag != ASN1_EOC)) { 521 || (tag != ASN1_EOC)) {
@@ -527,7 +527,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
527 527
528 if (asn1_header_decode 528 if (asn1_header_decode
529 (&ctx, &sequence_end, &cls, &con, &tag) == 0) { 529 (&ctx, &sequence_end, &cls, &con, &tag) == 0) {
530 cFYI(1, ("Error decoding 2nd part of negTokenInit ")); 530 cFYI(1, ("Error decoding 2nd part of negTokenInit"));
531 return 0; 531 return 0;
532 } else if ((cls != ASN1_UNI) || (con != ASN1_CON) 532 } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
533 || (tag != ASN1_SEQ)) { 533 || (tag != ASN1_SEQ)) {
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index f4124a32be..96abeb7389 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -39,7 +39,7 @@ cifs_dump_mem(char *label, void *data, int length)
39 char *charptr = data; 39 char *charptr = data;
40 char buf[10], line[80]; 40 char buf[10], line[80];
41 41
42 printk(KERN_DEBUG "%s: dump of %d bytes of data at 0x%p\n\n", 42 printk(KERN_DEBUG "%s: dump of %d bytes of data at 0x%p\n",
43 label, length, data); 43 label, length, data);
44 for (i = 0; i < length; i += 16) { 44 for (i = 0; i < length; i += 16) {
45 line[0] = 0; 45 line[0] = 0;
@@ -57,6 +57,57 @@ cifs_dump_mem(char *label, void *data, int length)
57 } 57 }
58} 58}
59 59
60#ifdef CONFIG_CIFS_DEBUG2
61void cifs_dump_detail(struct smb_hdr * smb)
62{
63 cERROR(1,("Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
64 smb->Command, smb->Status.CifsError,
65 smb->Flags, smb->Flags2, smb->Mid, smb->Pid));
66 cERROR(1,("smb buf %p len %d", smb, smbCalcSize_LE(smb)));
67}
68
69
70void cifs_dump_mids(struct TCP_Server_Info * server)
71{
72 struct list_head *tmp;
73 struct mid_q_entry * mid_entry;
74
75 if(server == NULL)
76 return;
77
78 cERROR(1,("Dump pending requests:"));
79 spin_lock(&GlobalMid_Lock);
80 list_for_each(tmp, &server->pending_mid_q) {
81 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
82 if(mid_entry) {
83 cERROR(1,("State: %d Cmd: %d Pid: %d Tsk: %p Mid %d",
84 mid_entry->midState,
85 (int)mid_entry->command,
86 mid_entry->pid,
87 mid_entry->tsk,
88 mid_entry->mid));
89#ifdef CONFIG_CIFS_STATS2
90 cERROR(1,("IsLarge: %d buf: %p time rcv: %ld now: %ld",
91 mid_entry->largeBuf,
92 mid_entry->resp_buf,
93 mid_entry->when_received,
94 jiffies));
95#endif /* STATS2 */
96 cERROR(1,("IsMult: %d IsEnd: %d", mid_entry->multiRsp,
97 mid_entry->multiEnd));
98 if(mid_entry->resp_buf) {
99 cifs_dump_detail(mid_entry->resp_buf);
100 cifs_dump_mem("existing buf: ",
101 mid_entry->resp_buf,
102 62 /* fixme */);
103 }
104
105 }
106 }
107 spin_unlock(&GlobalMid_Lock);
108}
109#endif /* CONFIG_CIFS_DEBUG2 */
110
60#ifdef CONFIG_PROC_FS 111#ifdef CONFIG_PROC_FS
61static int 112static int
62cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset, 113cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset,
@@ -73,7 +124,6 @@ cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset,
73 124
74 *beginBuffer = buf + offset; 125 *beginBuffer = buf + offset;
75 126
76
77 length = 127 length =
78 sprintf(buf, 128 sprintf(buf,
79 "Display Internal CIFS Data Structures for Debugging\n" 129 "Display Internal CIFS Data Structures for Debugging\n"
@@ -395,12 +445,12 @@ static read_proc_t traceSMB_read;
395static write_proc_t traceSMB_write; 445static write_proc_t traceSMB_write;
396static read_proc_t multiuser_mount_read; 446static read_proc_t multiuser_mount_read;
397static write_proc_t multiuser_mount_write; 447static write_proc_t multiuser_mount_write;
398static read_proc_t extended_security_read; 448static read_proc_t security_flags_read;
399static write_proc_t extended_security_write; 449static write_proc_t security_flags_write;
400static read_proc_t ntlmv2_enabled_read; 450/* static read_proc_t ntlmv2_enabled_read;
401static write_proc_t ntlmv2_enabled_write; 451static write_proc_t ntlmv2_enabled_write;
402static read_proc_t packet_signing_enabled_read; 452static read_proc_t packet_signing_enabled_read;
403static write_proc_t packet_signing_enabled_write; 453static write_proc_t packet_signing_enabled_write;*/
404static read_proc_t experimEnabled_read; 454static read_proc_t experimEnabled_read;
405static write_proc_t experimEnabled_write; 455static write_proc_t experimEnabled_write;
406static read_proc_t linuxExtensionsEnabled_read; 456static read_proc_t linuxExtensionsEnabled_read;
@@ -458,10 +508,10 @@ cifs_proc_init(void)
458 pde->write_proc = multiuser_mount_write; 508 pde->write_proc = multiuser_mount_write;
459 509
460 pde = 510 pde =
461 create_proc_read_entry("ExtendedSecurity", 0, proc_fs_cifs, 511 create_proc_read_entry("SecurityFlags", 0, proc_fs_cifs,
462 extended_security_read, NULL); 512 security_flags_read, NULL);
463 if (pde) 513 if (pde)
464 pde->write_proc = extended_security_write; 514 pde->write_proc = security_flags_write;
465 515
466 pde = 516 pde =
467 create_proc_read_entry("LookupCacheEnabled", 0, proc_fs_cifs, 517 create_proc_read_entry("LookupCacheEnabled", 0, proc_fs_cifs,
@@ -469,7 +519,7 @@ cifs_proc_init(void)
469 if (pde) 519 if (pde)
470 pde->write_proc = lookupFlag_write; 520 pde->write_proc = lookupFlag_write;
471 521
472 pde = 522/* pde =
473 create_proc_read_entry("NTLMV2Enabled", 0, proc_fs_cifs, 523 create_proc_read_entry("NTLMV2Enabled", 0, proc_fs_cifs,
474 ntlmv2_enabled_read, NULL); 524 ntlmv2_enabled_read, NULL);
475 if (pde) 525 if (pde)
@@ -479,7 +529,7 @@ cifs_proc_init(void)
479 create_proc_read_entry("PacketSigningEnabled", 0, proc_fs_cifs, 529 create_proc_read_entry("PacketSigningEnabled", 0, proc_fs_cifs,
480 packet_signing_enabled_read, NULL); 530 packet_signing_enabled_read, NULL);
481 if (pde) 531 if (pde)
482 pde->write_proc = packet_signing_enabled_write; 532 pde->write_proc = packet_signing_enabled_write;*/
483} 533}
484 534
485void 535void
@@ -496,9 +546,9 @@ cifs_proc_clean(void)
496#endif 546#endif
497 remove_proc_entry("MultiuserMount", proc_fs_cifs); 547 remove_proc_entry("MultiuserMount", proc_fs_cifs);
498 remove_proc_entry("OplockEnabled", proc_fs_cifs); 548 remove_proc_entry("OplockEnabled", proc_fs_cifs);
499 remove_proc_entry("NTLMV2Enabled",proc_fs_cifs); 549/* remove_proc_entry("NTLMV2Enabled",proc_fs_cifs); */
500 remove_proc_entry("ExtendedSecurity",proc_fs_cifs); 550 remove_proc_entry("SecurityFlags",proc_fs_cifs);
501 remove_proc_entry("PacketSigningEnabled",proc_fs_cifs); 551/* remove_proc_entry("PacketSigningEnabled",proc_fs_cifs); */
502 remove_proc_entry("LinuxExtensionsEnabled",proc_fs_cifs); 552 remove_proc_entry("LinuxExtensionsEnabled",proc_fs_cifs);
503 remove_proc_entry("Experimental",proc_fs_cifs); 553 remove_proc_entry("Experimental",proc_fs_cifs);
504 remove_proc_entry("LookupCacheEnabled",proc_fs_cifs); 554 remove_proc_entry("LookupCacheEnabled",proc_fs_cifs);
@@ -782,12 +832,12 @@ multiuser_mount_write(struct file *file, const char __user *buffer,
782} 832}
783 833
784static int 834static int
785extended_security_read(char *page, char **start, off_t off, 835security_flags_read(char *page, char **start, off_t off,
786 int count, int *eof, void *data) 836 int count, int *eof, void *data)
787{ 837{
788 int len; 838 int len;
789 839
790 len = sprintf(page, "%d\n", extended_security); 840 len = sprintf(page, "0x%x\n", extended_security);
791 841
792 len -= off; 842 len -= off;
793 *start = page + off; 843 *start = page + off;
@@ -803,24 +853,52 @@ extended_security_read(char *page, char **start, off_t off,
803 return len; 853 return len;
804} 854}
805static int 855static int
806extended_security_write(struct file *file, const char __user *buffer, 856security_flags_write(struct file *file, const char __user *buffer,
807 unsigned long count, void *data) 857 unsigned long count, void *data)
808{ 858{
859 unsigned int flags;
860 char flags_string[12];
809 char c; 861 char c;
810 int rc;
811 862
812 rc = get_user(c, buffer); 863 if((count < 1) || (count > 11))
813 if (rc) 864 return -EINVAL;
814 return rc; 865
815 if (c == '0' || c == 'n' || c == 'N') 866 memset(flags_string, 0, 12);
816 extended_security = 0; 867
817 else if (c == '1' || c == 'y' || c == 'Y') 868 if(copy_from_user(flags_string, buffer, count))
818 extended_security = 1; 869 return -EFAULT;
870
871 if(count < 3) {
872 /* single char or single char followed by null */
873 c = flags_string[0];
874 if (c == '0' || c == 'n' || c == 'N')
875 extended_security = CIFSSEC_DEF; /* default */
876 else if (c == '1' || c == 'y' || c == 'Y')
877 extended_security = CIFSSEC_MAX;
878 return count;
879 }
880 /* else we have a number */
881
882 flags = simple_strtoul(flags_string, NULL, 0);
883
884 cFYI(1,("sec flags 0x%x", flags));
885
886 if(flags <= 0) {
887 cERROR(1,("invalid security flags %s",flags_string));
888 return -EINVAL;
889 }
819 890
891 if(flags & ~CIFSSEC_MASK) {
892 cERROR(1,("attempt to set unsupported security flags 0x%x",
893 flags & ~CIFSSEC_MASK));
894 return -EINVAL;
895 }
896 /* flags look ok - update the global security flags for cifs module */
897 extended_security = flags;
820 return count; 898 return count;
821} 899}
822 900
823static int 901/* static int
824ntlmv2_enabled_read(char *page, char **start, off_t off, 902ntlmv2_enabled_read(char *page, char **start, off_t off,
825 int count, int *eof, void *data) 903 int count, int *eof, void *data)
826{ 904{
@@ -855,6 +933,8 @@ ntlmv2_enabled_write(struct file *file, const char __user *buffer,
855 ntlmv2_support = 0; 933 ntlmv2_support = 0;
856 else if (c == '1' || c == 'y' || c == 'Y') 934 else if (c == '1' || c == 'y' || c == 'Y')
857 ntlmv2_support = 1; 935 ntlmv2_support = 1;
936 else if (c == '2')
937 ntlmv2_support = 2;
858 938
859 return count; 939 return count;
860} 940}
@@ -898,7 +978,7 @@ packet_signing_enabled_write(struct file *file, const char __user *buffer,
898 sign_CIFS_PDUs = 2; 978 sign_CIFS_PDUs = 2;
899 979
900 return count; 980 return count;
901} 981} */
902 982
903 983
904#endif 984#endif
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 4304d9dcfb..c26cd0d2c6 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -24,6 +24,10 @@
24#define _H_CIFS_DEBUG 24#define _H_CIFS_DEBUG
25 25
26void cifs_dump_mem(char *label, void *data, int length); 26void cifs_dump_mem(char *label, void *data, int length);
27#ifdef CONFIG_CIFS_DEBUG2
28void cifs_dump_detail(struct smb_hdr *);
29void cifs_dump_mids(struct TCP_Server_Info *);
30#endif
27extern int traceSMB; /* flag which enables the function below */ 31extern int traceSMB; /* flag which enables the function below */
28void dump_smb(struct smb_hdr *, int); 32void dump_smb(struct smb_hdr *, int);
29#define CIFS_INFO 0x01 33#define CIFS_INFO 0x01
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index d2b1282559..d2a8b2941f 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -22,6 +22,7 @@
22#include "cifs_unicode.h" 22#include "cifs_unicode.h"
23#include "cifs_uniupr.h" 23#include "cifs_uniupr.h"
24#include "cifspdu.h" 24#include "cifspdu.h"
25#include "cifsglob.h"
25#include "cifs_debug.h" 26#include "cifs_debug.h"
26 27
27/* 28/*
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index e7d63737e6..a89efaf78a 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -26,6 +26,8 @@
26#include "md5.h" 26#include "md5.h"
27#include "cifs_unicode.h" 27#include "cifs_unicode.h"
28#include "cifsproto.h" 28#include "cifsproto.h"
29#include <linux/ctype.h>
30#include <linux/random.h>
29 31
30/* Calculate and return the CIFS signature based on the mac key and the smb pdu */ 32/* Calculate and return the CIFS signature based on the mac key and the smb pdu */
31/* the 16 byte signature must be allocated by the caller */ 33/* the 16 byte signature must be allocated by the caller */
@@ -35,6 +37,8 @@
35 37
36extern void mdfour(unsigned char *out, unsigned char *in, int n); 38extern void mdfour(unsigned char *out, unsigned char *in, int n);
37extern void E_md4hash(const unsigned char *passwd, unsigned char *p16); 39extern void E_md4hash(const unsigned char *passwd, unsigned char *p16);
40extern void SMBencrypt(unsigned char *passwd, unsigned char *c8,
41 unsigned char *p24);
38 42
39static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu, 43static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu,
40 const char * key, char * signature) 44 const char * key, char * signature)
@@ -45,7 +49,7 @@ static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu,
45 return -EINVAL; 49 return -EINVAL;
46 50
47 MD5Init(&context); 51 MD5Init(&context);
48 MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16); 52 MD5Update(&context,key,CIFS_SESS_KEY_SIZE+16);
49 MD5Update(&context,cifs_pdu->Protocol,cifs_pdu->smb_buf_length); 53 MD5Update(&context,cifs_pdu->Protocol,cifs_pdu->smb_buf_length);
50 MD5Final(signature,&context); 54 MD5Final(signature,&context);
51 return 0; 55 return 0;
@@ -90,7 +94,7 @@ static int cifs_calc_signature2(const struct kvec * iov, int n_vec,
90 return -EINVAL; 94 return -EINVAL;
91 95
92 MD5Init(&context); 96 MD5Init(&context);
93 MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16); 97 MD5Update(&context,key,CIFS_SESS_KEY_SIZE+16);
94 for(i=0;i<n_vec;i++) { 98 for(i=0;i<n_vec;i++) {
95 if(iov[i].iov_base == NULL) { 99 if(iov[i].iov_base == NULL) {
96 cERROR(1,("null iovec entry")); 100 cERROR(1,("null iovec entry"));
@@ -204,11 +208,12 @@ int cifs_calculate_mac_key(char * key, const char * rn, const char * password)
204 208
205 E_md4hash(password, temp_key); 209 E_md4hash(password, temp_key);
206 mdfour(key,temp_key,16); 210 mdfour(key,temp_key,16);
207 memcpy(key+16,rn, CIFS_SESSION_KEY_SIZE); 211 memcpy(key+16,rn, CIFS_SESS_KEY_SIZE);
208 return 0; 212 return 0;
209} 213}
210 214
211int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses, struct nls_table * nls_info) 215int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses,
216 const struct nls_table * nls_info)
212{ 217{
213 char temp_hash[16]; 218 char temp_hash[16];
214 struct HMACMD5Context ctx; 219 struct HMACMD5Context ctx;
@@ -225,6 +230,8 @@ int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses, struct nls_table * nls_
225 user_name_len = strlen(ses->userName); 230 user_name_len = strlen(ses->userName);
226 if(user_name_len > MAX_USERNAME_SIZE) 231 if(user_name_len > MAX_USERNAME_SIZE)
227 return -EINVAL; 232 return -EINVAL;
233 if(ses->domainName == NULL)
234 return -EINVAL; /* BB should we use CIFS_LINUX_DOM */
228 dom_name_len = strlen(ses->domainName); 235 dom_name_len = strlen(ses->domainName);
229 if(dom_name_len > MAX_USERNAME_SIZE) 236 if(dom_name_len > MAX_USERNAME_SIZE)
230 return -EINVAL; 237 return -EINVAL;
@@ -259,16 +266,131 @@ int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses, struct nls_table * nls_
259 kfree(unicode_buf); 266 kfree(unicode_buf);
260 return 0; 267 return 0;
261} 268}
262void CalcNTLMv2_response(const struct cifsSesInfo * ses,char * v2_session_response) 269
270#ifdef CONFIG_CIFS_WEAK_PW_HASH
271void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key)
272{
273 int i;
274 char password_with_pad[CIFS_ENCPWD_SIZE];
275
276 if(ses->server == NULL)
277 return;
278
279 memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
280 strncpy(password_with_pad, ses->password, CIFS_ENCPWD_SIZE);
281
282 if((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0)
283 if(extended_security & CIFSSEC_MAY_PLNTXT) {
284 memcpy(lnm_session_key, password_with_pad, CIFS_ENCPWD_SIZE);
285 return;
286 }
287
288 /* calculate old style session key */
289 /* calling toupper is less broken than repeatedly
290 calling nls_toupper would be since that will never
291 work for UTF8, but neither handles multibyte code pages
292 but the only alternative would be converting to UCS-16 (Unicode)
293 (using a routine something like UniStrupr) then
294 uppercasing and then converting back from Unicode - which
295 would only worth doing it if we knew it were utf8. Basically
296 utf8 and other multibyte codepages each need their own strupper
297 function since a byte at a time will ont work. */
298
299 for(i = 0; i < CIFS_ENCPWD_SIZE; i++) {
300 password_with_pad[i] = toupper(password_with_pad[i]);
301 }
302
303 SMBencrypt(password_with_pad, ses->server->cryptKey, lnm_session_key);
304 /* clear password before we return/free memory */
305 memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
306}
307#endif /* CIFS_WEAK_PW_HASH */
308
309static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
310 const struct nls_table * nls_cp)
311{
312 int rc = 0;
313 int len;
314 char nt_hash[16];
315 struct HMACMD5Context * pctxt;
316 wchar_t * user;
317 wchar_t * domain;
318
319 pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL);
320
321 if(pctxt == NULL)
322 return -ENOMEM;
323
324 /* calculate md4 hash of password */
325 E_md4hash(ses->password, nt_hash);
326
327 /* convert Domainname to unicode and uppercase */
328 hmac_md5_init_limK_to_64(nt_hash, 16, pctxt);
329
330 /* convert ses->userName to unicode and uppercase */
331 len = strlen(ses->userName);
332 user = kmalloc(2 + (len * 2), GFP_KERNEL);
333 if(user == NULL)
334 goto calc_exit_2;
335 len = cifs_strtoUCS(user, ses->userName, len, nls_cp);
336 UniStrupr(user);
337 hmac_md5_update((char *)user, 2*len, pctxt);
338
339 /* convert ses->domainName to unicode and uppercase */
340 if(ses->domainName) {
341 len = strlen(ses->domainName);
342
343 domain = kmalloc(2 + (len * 2), GFP_KERNEL);
344 if(domain == NULL)
345 goto calc_exit_1;
346 len = cifs_strtoUCS(domain, ses->domainName, len, nls_cp);
347 UniStrupr(domain);
348
349 hmac_md5_update((char *)domain, 2*len, pctxt);
350
351 kfree(domain);
352 }
353calc_exit_1:
354 kfree(user);
355calc_exit_2:
356 /* BB FIXME what about bytes 24 through 40 of the signing key?
357 compare with the NTLM example */
358 hmac_md5_final(ses->server->mac_signing_key, pctxt);
359
360 return rc;
361}
362
363void setup_ntlmv2_rsp(struct cifsSesInfo * ses, char * resp_buf,
364 const struct nls_table * nls_cp)
365{
366 int rc;
367 struct ntlmv2_resp * buf = (struct ntlmv2_resp *)resp_buf;
368
369 buf->blob_signature = cpu_to_le32(0x00000101);
370 buf->reserved = 0;
371 buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
372 get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
373 buf->reserved2 = 0;
374 buf->names[0].type = 0;
375 buf->names[0].length = 0;
376
377 /* calculate buf->ntlmv2_hash */
378 rc = calc_ntlmv2_hash(ses, nls_cp);
379 if(rc)
380 cERROR(1,("could not get v2 hash rc %d",rc));
381 CalcNTLMv2_response(ses, resp_buf);
382}
383
384void CalcNTLMv2_response(const struct cifsSesInfo * ses, char * v2_session_response)
263{ 385{
264 struct HMACMD5Context context; 386 struct HMACMD5Context context;
387 /* rest of v2 struct already generated */
265 memcpy(v2_session_response + 8, ses->server->cryptKey,8); 388 memcpy(v2_session_response + 8, ses->server->cryptKey,8);
266 /* gen_blob(v2_session_response + 16); */
267 hmac_md5_init_limK_to_64(ses->server->mac_signing_key, 16, &context); 389 hmac_md5_init_limK_to_64(ses->server->mac_signing_key, 16, &context);
268 390
269 hmac_md5_update(ses->server->cryptKey,8,&context); 391 hmac_md5_update(v2_session_response+8,
270/* hmac_md5_update(v2_session_response+16)client thing,8,&context); */ /* BB fix */ 392 sizeof(struct ntlmv2_resp) - 8, &context);
271 393
272 hmac_md5_final(v2_session_response,&context); 394 hmac_md5_final(v2_session_response,&context);
273 cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); /* BB removeme BB */ 395/* cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */
274} 396}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8b4de6eaab..c28ede5999 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -56,8 +56,8 @@ unsigned int experimEnabled = 0;
56unsigned int linuxExtEnabled = 1; 56unsigned int linuxExtEnabled = 1;
57unsigned int lookupCacheEnabled = 1; 57unsigned int lookupCacheEnabled = 1;
58unsigned int multiuser_mount = 0; 58unsigned int multiuser_mount = 0;
59unsigned int extended_security = 0; 59unsigned int extended_security = CIFSSEC_DEF;
60unsigned int ntlmv2_support = 0; 60/* unsigned int ntlmv2_support = 0; */
61unsigned int sign_CIFS_PDUs = 1; 61unsigned int sign_CIFS_PDUs = 1;
62extern struct task_struct * oplockThread; /* remove sparse warning */ 62extern struct task_struct * oplockThread; /* remove sparse warning */
63struct task_struct * oplockThread = NULL; 63struct task_struct * oplockThread = NULL;
@@ -908,7 +908,7 @@ static int cifs_dnotify_thread(void * dummyarg)
908 struct cifsSesInfo *ses; 908 struct cifsSesInfo *ses;
909 909
910 do { 910 do {
911 if(try_to_freeze()) 911 if (try_to_freeze())
912 continue; 912 continue;
913 set_current_state(TASK_INTERRUPTIBLE); 913 set_current_state(TASK_INTERRUPTIBLE);
914 schedule_timeout(15*HZ); 914 schedule_timeout(15*HZ);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index d56c0577c7..a6384d83fd 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -33,6 +33,7 @@
33#endif 33#endif
34 34
35extern struct address_space_operations cifs_addr_ops; 35extern struct address_space_operations cifs_addr_ops;
36extern struct address_space_operations cifs_addr_ops_smallbuf;
36 37
37/* Functions related to super block operations */ 38/* Functions related to super block operations */
38extern struct super_operations cifs_super_ops; 39extern struct super_operations cifs_super_ops;
@@ -99,5 +100,5 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
99extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); 100extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
100extern int cifs_ioctl (struct inode * inode, struct file * filep, 101extern int cifs_ioctl (struct inode * inode, struct file * filep,
101 unsigned int command, unsigned long arg); 102 unsigned int command, unsigned long arg);
102#define CIFS_VERSION "1.43" 103#define CIFS_VERSION "1.44"
103#endif /* _CIFSFS_H */ 104#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 006eb33bff..6d7cf5f3bc 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -88,7 +88,8 @@ enum statusEnum {
88}; 88};
89 89
90enum securityEnum { 90enum securityEnum {
91 NTLM = 0, /* Legacy NTLM012 auth with NTLM hash */ 91 LANMAN = 0, /* Legacy LANMAN auth */
92 NTLM, /* Legacy NTLM012 auth with NTLM hash */
92 NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */ 93 NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */
93 RawNTLMSSP, /* NTLMSSP without SPNEGO */ 94 RawNTLMSSP, /* NTLMSSP without SPNEGO */
94 NTLMSSP, /* NTLMSSP via SPNEGO */ 95 NTLMSSP, /* NTLMSSP via SPNEGO */
@@ -157,7 +158,7 @@ struct TCP_Server_Info {
157 /* 16th byte of RFC1001 workstation name is always null */ 158 /* 16th byte of RFC1001 workstation name is always null */
158 char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL]; 159 char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL];
159 __u32 sequence_number; /* needed for CIFS PDU signature */ 160 __u32 sequence_number; /* needed for CIFS PDU signature */
160 char mac_signing_key[CIFS_SESSION_KEY_SIZE + 16]; 161 char mac_signing_key[CIFS_SESS_KEY_SIZE + 16];
161}; 162};
162 163
163/* 164/*
@@ -179,10 +180,13 @@ struct cifsUidInfo {
179struct cifsSesInfo { 180struct cifsSesInfo {
180 struct list_head cifsSessionList; 181 struct list_head cifsSessionList;
181 struct semaphore sesSem; 182 struct semaphore sesSem;
183#if 0
182 struct cifsUidInfo *uidInfo; /* pointer to user info */ 184 struct cifsUidInfo *uidInfo; /* pointer to user info */
185#endif
183 struct TCP_Server_Info *server; /* pointer to server info */ 186 struct TCP_Server_Info *server; /* pointer to server info */
184 atomic_t inUse; /* # of mounts (tree connections) on this ses */ 187 atomic_t inUse; /* # of mounts (tree connections) on this ses */
185 enum statusEnum status; 188 enum statusEnum status;
189 unsigned overrideSecFlg; /* if non-zero override global sec flags */
186 __u16 ipc_tid; /* special tid for connection to IPC share */ 190 __u16 ipc_tid; /* special tid for connection to IPC share */
187 __u16 flags; 191 __u16 flags;
188 char *serverOS; /* name of operating system underlying server */ 192 char *serverOS; /* name of operating system underlying server */
@@ -194,7 +198,7 @@ struct cifsSesInfo {
194 char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for 198 char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
195 TCP names - will ipv6 and sctp addresses fit? */ 199 TCP names - will ipv6 and sctp addresses fit? */
196 char userName[MAX_USERNAME_SIZE + 1]; 200 char userName[MAX_USERNAME_SIZE + 1];
197 char domainName[MAX_USERNAME_SIZE + 1]; 201 char * domainName;
198 char * password; 202 char * password;
199}; 203};
200/* session flags */ 204/* session flags */
@@ -209,12 +213,12 @@ struct cifsTconInfo {
209 struct list_head openFileList; 213 struct list_head openFileList;
210 struct semaphore tconSem; 214 struct semaphore tconSem;
211 struct cifsSesInfo *ses; /* pointer to session associated with */ 215 struct cifsSesInfo *ses; /* pointer to session associated with */
212 char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource (in ASCII not UTF) */ 216 char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
213 char *nativeFileSystem; 217 char *nativeFileSystem;
214 __u16 tid; /* The 2 byte tree id */ 218 __u16 tid; /* The 2 byte tree id */
215 __u16 Flags; /* optional support bits */ 219 __u16 Flags; /* optional support bits */
216 enum statusEnum tidStatus; 220 enum statusEnum tidStatus;
217 atomic_t useCount; /* how many mounts (explicit or implicit) to this share */ 221 atomic_t useCount; /* how many explicit/implicit mounts to share */
218#ifdef CONFIG_CIFS_STATS 222#ifdef CONFIG_CIFS_STATS
219 atomic_t num_smbs_sent; 223 atomic_t num_smbs_sent;
220 atomic_t num_writes; 224 atomic_t num_writes;
@@ -254,7 +258,7 @@ struct cifsTconInfo {
254 spinlock_t stat_lock; 258 spinlock_t stat_lock;
255#endif /* CONFIG_CIFS_STATS */ 259#endif /* CONFIG_CIFS_STATS */
256 FILE_SYSTEM_DEVICE_INFO fsDevInfo; 260 FILE_SYSTEM_DEVICE_INFO fsDevInfo;
257 FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if file system name truncated */ 261 FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
258 FILE_SYSTEM_UNIX_INFO fsUnixInfo; 262 FILE_SYSTEM_UNIX_INFO fsUnixInfo;
259 unsigned retry:1; 263 unsigned retry:1;
260 unsigned nocase:1; 264 unsigned nocase:1;
@@ -305,7 +309,6 @@ struct cifsFileInfo {
305 atomic_t wrtPending; /* handle in use - defer close */ 309 atomic_t wrtPending; /* handle in use - defer close */
306 struct semaphore fh_sem; /* prevents reopen race after dead ses*/ 310 struct semaphore fh_sem; /* prevents reopen race after dead ses*/
307 char * search_resume_name; /* BB removeme BB */ 311 char * search_resume_name; /* BB removeme BB */
308 unsigned int resume_name_length; /* BB removeme - field renamed and moved BB */
309 struct cifs_search_info srch_inf; 312 struct cifs_search_info srch_inf;
310}; 313};
311 314
@@ -391,9 +394,9 @@ struct mid_q_entry {
391 struct smb_hdr *resp_buf; /* response buffer */ 394 struct smb_hdr *resp_buf; /* response buffer */
392 int midState; /* wish this were enum but can not pass to wait_event */ 395 int midState; /* wish this were enum but can not pass to wait_event */
393 __u8 command; /* smb command code */ 396 __u8 command; /* smb command code */
394 unsigned multiPart:1; /* multiple responses to one SMB request */
395 unsigned largeBuf:1; /* if valid response, is pointer to large buf */ 397 unsigned largeBuf:1; /* if valid response, is pointer to large buf */
396 unsigned multiResp:1; /* multiple trans2 responses for one request */ 398 unsigned multiRsp:1; /* multiple trans2 responses for one request */
399 unsigned multiEnd:1; /* both received */
397}; 400};
398 401
399struct oplock_q_entry { 402struct oplock_q_entry {
@@ -430,15 +433,35 @@ struct dir_notify_req {
430#define CIFS_LARGE_BUFFER 2 433#define CIFS_LARGE_BUFFER 2
431#define CIFS_IOVEC 4 /* array of response buffers */ 434#define CIFS_IOVEC 4 /* array of response buffers */
432 435
433/* Type of session setup needed */ 436/* Security Flags: indicate type of session setup needed */
434#define CIFS_PLAINTEXT 0 437#define CIFSSEC_MAY_SIGN 0x00001
435#define CIFS_LANMAN 1 438#define CIFSSEC_MAY_NTLM 0x00002
436#define CIFS_NTLM 2 439#define CIFSSEC_MAY_NTLMV2 0x00004
437#define CIFS_NTLMSSP_NEG 3 440#define CIFSSEC_MAY_KRB5 0x00008
438#define CIFS_NTLMSSP_AUTH 4 441#ifdef CONFIG_CIFS_WEAK_PW_HASH
439#define CIFS_SPNEGO_INIT 5 442#define CIFSSEC_MAY_LANMAN 0x00010
440#define CIFS_SPNEGO_TARG 6 443#define CIFSSEC_MAY_PLNTXT 0x00020
441 444#endif /* weak passwords */
445#define CIFSSEC_MAY_SEAL 0x00040 /* not supported yet */
446
447#define CIFSSEC_MUST_SIGN 0x01001
448/* note that only one of the following can be set so the
449result of setting MUST flags more than once will be to
450require use of the stronger protocol */
451#define CIFSSEC_MUST_NTLM 0x02002
452#define CIFSSEC_MUST_NTLMV2 0x04004
453#define CIFSSEC_MUST_KRB5 0x08008
454#ifdef CONFIG_CIFS_WEAK_PW_HASH
455#define CIFSSEC_MUST_LANMAN 0x10010
456#define CIFSSEC_MUST_PLNTXT 0x20020
457#define CIFSSEC_MASK 0x37037 /* current flags supported if weak */
458#else
459#define CIFSSEC_MASK 0x07007 /* flags supported if no weak config */
460#endif /* WEAK_PW_HASH */
461#define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */
462
463#define CIFSSEC_DEF CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLM | CIFSSEC_MAY_NTLMV2
464#define CIFSSEC_MAX CIFSSEC_MUST_SIGN | CIFSSEC_MUST_NTLMV2
442/* 465/*
443 ***************************************************************** 466 *****************************************************************
444 * All constants go here 467 * All constants go here
@@ -500,16 +523,16 @@ GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */
500GLOBAL_EXTERN struct list_head GlobalOplock_Q; 523GLOBAL_EXTERN struct list_head GlobalOplock_Q;
501 524
502GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; /* Outstanding dir notify requests */ 525GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; /* Outstanding dir notify requests */
503GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q; /* Dir notify response queue */ 526GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q;/* DirNotify response queue */
504 527
505/* 528/*
506 * Global transaction id (XID) information 529 * Global transaction id (XID) information
507 */ 530 */
508GLOBAL_EXTERN unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ 531GLOBAL_EXTERN unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
509GLOBAL_EXTERN unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ 532GLOBAL_EXTERN unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
510GLOBAL_EXTERN unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ 533GLOBAL_EXTERN unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
511GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above and list operations */ 534GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above & list operations */
512 /* on midQ entries */ 535 /* on midQ entries */
513GLOBAL_EXTERN char Local_System_Name[15]; 536GLOBAL_EXTERN char Local_System_Name[15];
514 537
515/* 538/*
@@ -531,7 +554,7 @@ GLOBAL_EXTERN atomic_t smBufAllocCount;
531GLOBAL_EXTERN atomic_t midCount; 554GLOBAL_EXTERN atomic_t midCount;
532 555
533/* Misc globals */ 556/* Misc globals */
534GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions 557GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
535 to be established on existing mount if we 558 to be established on existing mount if we
536 have the uid/password or Kerberos credential 559 have the uid/password or Kerberos credential
537 or equivalent for current user */ 560 or equivalent for current user */
@@ -540,8 +563,8 @@ GLOBAL_EXTERN unsigned int experimEnabled;
540GLOBAL_EXTERN unsigned int lookupCacheEnabled; 563GLOBAL_EXTERN unsigned int lookupCacheEnabled;
541GLOBAL_EXTERN unsigned int extended_security; /* if on, session setup sent 564GLOBAL_EXTERN unsigned int extended_security; /* if on, session setup sent
542 with more secure ntlmssp2 challenge/resp */ 565 with more secure ntlmssp2 challenge/resp */
543GLOBAL_EXTERN unsigned int ntlmv2_support; /* better optional password hash */
544GLOBAL_EXTERN unsigned int sign_CIFS_PDUs; /* enable smb packet signing */ 566GLOBAL_EXTERN unsigned int sign_CIFS_PDUs; /* enable smb packet signing */
567GLOBAL_EXTERN unsigned int secFlags;
545GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/ 568GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
546GLOBAL_EXTERN unsigned int CIFSMaxBufSize; /* max size not including hdr */ 569GLOBAL_EXTERN unsigned int CIFSMaxBufSize; /* max size not including hdr */
547GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */ 570GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index b2233ac05b..8623902354 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -16,7 +16,7 @@
16 * 16 *
17 * You should have received a copy of the GNU Lesser General Public License 17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software 18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22#ifndef _CIFSPDU_H 22#ifndef _CIFSPDU_H
@@ -24,8 +24,14 @@
24 24
25#include <net/sock.h> 25#include <net/sock.h>
26 26
27#ifdef CONFIG_CIFS_WEAK_PW_HASH
28#define LANMAN_PROT 0
29#define CIFS_PROT 1
30#else
27#define CIFS_PROT 0 31#define CIFS_PROT 0
28#define BAD_PROT CIFS_PROT+1 32#endif
33#define POSIX_PROT CIFS_PROT+1
34#define BAD_PROT 0xFFFF
29 35
30/* SMB command codes */ 36/* SMB command codes */
31/* Some commands have minimal (wct=0,bcc=0), or uninteresting, responses 37/* Some commands have minimal (wct=0,bcc=0), or uninteresting, responses
@@ -110,7 +116,7 @@
110/* 116/*
111 * Size of the session key (crypto key encrypted with the password 117 * Size of the session key (crypto key encrypted with the password
112 */ 118 */
113#define CIFS_SESSION_KEY_SIZE (24) 119#define CIFS_SESS_KEY_SIZE (24)
114 120
115/* 121/*
116 * Maximum user name length 122 * Maximum user name length
@@ -400,6 +406,29 @@ typedef struct negotiate_req {
400 unsigned char DialectsArray[1]; 406 unsigned char DialectsArray[1];
401} __attribute__((packed)) NEGOTIATE_REQ; 407} __attribute__((packed)) NEGOTIATE_REQ;
402 408
409/* Dialect index is 13 for LANMAN */
410
411typedef struct lanman_neg_rsp {
412 struct smb_hdr hdr; /* wct = 13 */
413 __le16 DialectIndex;
414 __le16 SecurityMode;
415 __le16 MaxBufSize;
416 __le16 MaxMpxCount;
417 __le16 MaxNumberVcs;
418 __le16 RawMode;
419 __le32 SessionKey;
420 __le32 ServerTime;
421 __le16 ServerTimeZone;
422 __le16 EncryptionKeyLength;
423 __le16 Reserved;
424 __u16 ByteCount;
425 unsigned char EncryptionKey[1];
426} __attribute__((packed)) LANMAN_NEG_RSP;
427
428#define READ_RAW_ENABLE 1
429#define WRITE_RAW_ENABLE 2
430#define RAW_ENABLE (READ_RAW_ENABLE | WRITE_RAW_ENABLE)
431
403typedef struct negotiate_rsp { 432typedef struct negotiate_rsp {
404 struct smb_hdr hdr; /* wct = 17 */ 433 struct smb_hdr hdr; /* wct = 17 */
405 __le16 DialectIndex; 434 __le16 DialectIndex;
@@ -509,7 +538,7 @@ typedef union smb_com_session_setup_andx {
509/* unsigned char * NativeOS; */ 538/* unsigned char * NativeOS; */
510/* unsigned char * NativeLanMan; */ 539/* unsigned char * NativeLanMan; */
511/* unsigned char * PrimaryDomain; */ 540/* unsigned char * PrimaryDomain; */
512 } __attribute__((packed)) resp; /* NTLM response format (with or without extended security */ 541 } __attribute__((packed)) resp; /* NTLM response with or without extended sec*/
513 542
514 struct { /* request format */ 543 struct { /* request format */
515 struct smb_hdr hdr; /* wct = 10 */ 544 struct smb_hdr hdr; /* wct = 10 */
@@ -520,8 +549,8 @@ typedef union smb_com_session_setup_andx {
520 __le16 MaxMpxCount; 549 __le16 MaxMpxCount;
521 __le16 VcNumber; 550 __le16 VcNumber;
522 __u32 SessionKey; 551 __u32 SessionKey;
523 __le16 PassswordLength; 552 __le16 PasswordLength;
524 __u32 Reserved; 553 __u32 Reserved; /* encrypt key len and offset */
525 __le16 ByteCount; 554 __le16 ByteCount;
526 unsigned char AccountPassword[1]; /* followed by */ 555 unsigned char AccountPassword[1]; /* followed by */
527 /* STRING AccountName */ 556 /* STRING AccountName */
@@ -543,6 +572,26 @@ typedef union smb_com_session_setup_andx {
543 } __attribute__((packed)) old_resp; /* pre-NTLM (LANMAN2.1) response */ 572 } __attribute__((packed)) old_resp; /* pre-NTLM (LANMAN2.1) response */
544} __attribute__((packed)) SESSION_SETUP_ANDX; 573} __attribute__((packed)) SESSION_SETUP_ANDX;
545 574
575/* format of NLTMv2 Response ie "case sensitive password" hash when NTLMv2 */
576
577struct ntlmssp2_name {
578 __le16 type;
579 __le16 length;
580/* char name[length]; */
581} __attribute__((packed));
582
583struct ntlmv2_resp {
584 char ntlmv2_hash[CIFS_ENCPWD_SIZE];
585 __le32 blob_signature;
586 __u32 reserved;
587 __le64 time;
588 __u64 client_chal; /* random */
589 __u32 reserved2;
590 struct ntlmssp2_name names[1];
591 /* array of name entries could follow ending in minimum 4 byte struct */
592} __attribute__((packed));
593
594
546#define CIFS_NETWORK_OPSYS "CIFS VFS Client for Linux" 595#define CIFS_NETWORK_OPSYS "CIFS VFS Client for Linux"
547 596
548/* Capabilities bits (for NTLM SessSetup request) */ 597/* Capabilities bits (for NTLM SessSetup request) */
@@ -573,7 +622,9 @@ typedef struct smb_com_tconx_req {
573} __attribute__((packed)) TCONX_REQ; 622} __attribute__((packed)) TCONX_REQ;
574 623
575typedef struct smb_com_tconx_rsp { 624typedef struct smb_com_tconx_rsp {
576 struct smb_hdr hdr; /* wct = 3 *//* note that Win2000 has sent wct=7 in some cases on responses. Four unspecified words followed OptionalSupport */ 625 struct smb_hdr hdr; /* wct = 3 note that Win2000 has sent wct = 7
626 in some cases on responses. Four unspecified
627 words followed OptionalSupport */
577 __u8 AndXCommand; 628 __u8 AndXCommand;
578 __u8 AndXReserved; 629 __u8 AndXReserved;
579 __le16 AndXOffset; 630 __le16 AndXOffset;
@@ -1323,6 +1374,9 @@ struct smb_t2_rsp {
1323#define SMB_FILE_MAXIMUM_INFO 0x40d 1374#define SMB_FILE_MAXIMUM_INFO 0x40d
1324 1375
1325/* Find File infolevels */ 1376/* Find File infolevels */
1377#define SMB_FIND_FILE_INFO_STANDARD 0x001
1378#define SMB_FIND_FILE_QUERY_EA_SIZE 0x002
1379#define SMB_FIND_FILE_QUERY_EAS_FROM_LIST 0x003
1326#define SMB_FIND_FILE_DIRECTORY_INFO 0x101 1380#define SMB_FIND_FILE_DIRECTORY_INFO 0x101
1327#define SMB_FIND_FILE_FULL_DIRECTORY_INFO 0x102 1381#define SMB_FIND_FILE_FULL_DIRECTORY_INFO 0x102
1328#define SMB_FIND_FILE_NAMES_INFO 0x103 1382#define SMB_FIND_FILE_NAMES_INFO 0x103
@@ -1844,13 +1898,13 @@ typedef struct {
1844typedef struct { 1898typedef struct {
1845 __le32 DeviceType; 1899 __le32 DeviceType;
1846 __le32 DeviceCharacteristics; 1900 __le32 DeviceCharacteristics;
1847} __attribute__((packed)) FILE_SYSTEM_DEVICE_INFO; /* device info, level 0x104 */ 1901} __attribute__((packed)) FILE_SYSTEM_DEVICE_INFO; /* device info level 0x104 */
1848 1902
1849typedef struct { 1903typedef struct {
1850 __le32 Attributes; 1904 __le32 Attributes;
1851 __le32 MaxPathNameComponentLength; 1905 __le32 MaxPathNameComponentLength;
1852 __le32 FileSystemNameLen; 1906 __le32 FileSystemNameLen;
1853 char FileSystemName[52]; /* do not really need to save this - so potentially get only subset of name */ 1907 char FileSystemName[52]; /* do not have to save this - get subset? */
1854} __attribute__((packed)) FILE_SYSTEM_ATTRIBUTE_INFO; 1908} __attribute__((packed)) FILE_SYSTEM_ATTRIBUTE_INFO;
1855 1909
1856/******************************************************************************/ 1910/******************************************************************************/
@@ -1947,7 +2001,8 @@ typedef struct {
1947 2001
1948struct file_allocation_info { 2002struct file_allocation_info {
1949 __le64 AllocationSize; /* Note old Samba srvr rounds this up too much */ 2003 __le64 AllocationSize; /* Note old Samba srvr rounds this up too much */
1950} __attribute__((packed)); /* size used on disk, level 0x103 for set, 0x105 for query */ 2004} __attribute__((packed)); /* size used on disk, for level 0x103 for set,
2005 0x105 for query */
1951 2006
1952struct file_end_of_file_info { 2007struct file_end_of_file_info {
1953 __le64 FileSize; /* offset to end of file */ 2008 __le64 FileSize; /* offset to end of file */
@@ -2054,7 +2109,7 @@ typedef struct {
2054 __le32 ExtFileAttributes; 2109 __le32 ExtFileAttributes;
2055 __le32 FileNameLength; 2110 __le32 FileNameLength;
2056 char FileName[1]; 2111 char FileName[1];
2057} __attribute__((packed)) FILE_DIRECTORY_INFO; /* level 0x101 FF response data area */ 2112} __attribute__((packed)) FILE_DIRECTORY_INFO; /* level 0x101 FF resp data */
2058 2113
2059typedef struct { 2114typedef struct {
2060 __le32 NextEntryOffset; 2115 __le32 NextEntryOffset;
@@ -2069,7 +2124,7 @@ typedef struct {
2069 __le32 FileNameLength; 2124 __le32 FileNameLength;
2070 __le32 EaSize; /* length of the xattrs */ 2125 __le32 EaSize; /* length of the xattrs */
2071 char FileName[1]; 2126 char FileName[1];
2072} __attribute__((packed)) FILE_FULL_DIRECTORY_INFO; /* level 0x102 FF response data area */ 2127} __attribute__((packed)) FILE_FULL_DIRECTORY_INFO; /* level 0x102 rsp data */
2073 2128
2074typedef struct { 2129typedef struct {
2075 __le32 NextEntryOffset; 2130 __le32 NextEntryOffset;
@@ -2086,7 +2141,7 @@ typedef struct {
2086 __le32 Reserved; 2141 __le32 Reserved;
2087 __u64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/ 2142 __u64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/
2088 char FileName[1]; 2143 char FileName[1];
2089} __attribute__((packed)) SEARCH_ID_FULL_DIR_INFO; /* level 0x105 FF response data area */ 2144} __attribute__((packed)) SEARCH_ID_FULL_DIR_INFO; /* level 0x105 FF rsp data */
2090 2145
2091typedef struct { 2146typedef struct {
2092 __le32 NextEntryOffset; 2147 __le32 NextEntryOffset;
@@ -2104,7 +2159,22 @@ typedef struct {
2104 __u8 Reserved; 2159 __u8 Reserved;
2105 __u8 ShortName[12]; 2160 __u8 ShortName[12];
2106 char FileName[1]; 2161 char FileName[1];
2107} __attribute__((packed)) FILE_BOTH_DIRECTORY_INFO; /* level 0x104 FF response data area */ 2162} __attribute__((packed)) FILE_BOTH_DIRECTORY_INFO; /* level 0x104 FFrsp data */
2163
2164typedef struct {
2165 __u32 ResumeKey;
2166 __le16 CreationDate; /* SMB Date */
2167 __le16 CreationTime; /* SMB Time */
2168 __le16 LastAccessDate;
2169 __le16 LastAccessTime;
2170 __le16 LastWriteDate;
2171 __le16 LastWriteTime;
2172 __le32 DataSize; /* File Size (EOF) */
2173 __le32 AllocationSize;
2174 __le16 Attributes; /* verify not u32 */
2175 __u8 FileNameLength;
2176 char FileName[1];
2177} __attribute__((packed)) FIND_FILE_STANDARD_INFO; /* level 0x1 FF resp data */
2108 2178
2109 2179
2110struct win_dev { 2180struct win_dev {
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 310ea2f0e0..a5ddc62d6f 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -64,14 +64,12 @@ extern int map_smb_to_linux_error(struct smb_hdr *smb);
64extern void header_assemble(struct smb_hdr *, char /* command */ , 64extern void header_assemble(struct smb_hdr *, char /* command */ ,
65 const struct cifsTconInfo *, int /* length of 65 const struct cifsTconInfo *, int /* length of
66 fixed section (word count) in two byte units */); 66 fixed section (word count) in two byte units */);
67#ifdef CONFIG_CIFS_EXPERIMENTAL
68extern int small_smb_init_no_tc(const int smb_cmd, const int wct, 67extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
69 struct cifsSesInfo *ses, 68 struct cifsSesInfo *ses,
70 void ** request_buf); 69 void ** request_buf);
71extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, 70extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
72 const int stage, int * pNTLMv2_flg, 71 const int stage,
73 const struct nls_table *nls_cp); 72 const struct nls_table *nls_cp);
74#endif
75extern __u16 GetNextMid(struct TCP_Server_Info *server); 73extern __u16 GetNextMid(struct TCP_Server_Info *server);
76extern struct oplock_q_entry * AllocOplockQEntry(struct inode *, u16, 74extern struct oplock_q_entry * AllocOplockQEntry(struct inode *, u16,
77 struct cifsTconInfo *); 75 struct cifsTconInfo *);
@@ -285,8 +283,14 @@ extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
285extern int cifs_verify_signature(struct smb_hdr *, const char * mac_key, 283extern int cifs_verify_signature(struct smb_hdr *, const char * mac_key,
286 __u32 expected_sequence_number); 284 __u32 expected_sequence_number);
287extern int cifs_calculate_mac_key(char * key,const char * rn,const char * pass); 285extern int cifs_calculate_mac_key(char * key,const char * rn,const char * pass);
288extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *, struct nls_table *); 286extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *,
289extern void CalcNTLMv2_response(const struct cifsSesInfo *,char * ); 287 const struct nls_table *);
288extern void CalcNTLMv2_response(const struct cifsSesInfo *, char * );
289extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
290 const struct nls_table *);
291#ifdef CONFIG_CIFS_WEAK_PW_HASH
292extern void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key);
293#endif /* CIFS_WEAK_PW_HASH */
290extern int CIFSSMBCopy(int xid, 294extern int CIFSSMBCopy(int xid,
291 struct cifsTconInfo *source_tcon, 295 struct cifsTconInfo *source_tcon,
292 const char *fromName, 296 const char *fromName,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 925881e00f..19678c575d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -44,8 +44,11 @@ static struct {
44 int index; 44 int index;
45 char *name; 45 char *name;
46} protocols[] = { 46} protocols[] = {
47#ifdef CONFIG_CIFS_WEAK_PW_HASH
48 {LANMAN_PROT, "\2LM1.2X002"},
49#endif /* weak password hashing for legacy clients */
47 {CIFS_PROT, "\2NT LM 0.12"}, 50 {CIFS_PROT, "\2NT LM 0.12"},
48 {CIFS_PROT, "\2POSIX 2"}, 51 {POSIX_PROT, "\2POSIX 2"},
49 {BAD_PROT, "\2"} 52 {BAD_PROT, "\2"}
50}; 53};
51#else 54#else
@@ -53,11 +56,29 @@ static struct {
53 int index; 56 int index;
54 char *name; 57 char *name;
55} protocols[] = { 58} protocols[] = {
59#ifdef CONFIG_CIFS_WEAK_PW_HASH
60 {LANMAN_PROT, "\2LM1.2X002"},
61#endif /* weak password hashing for legacy clients */
56 {CIFS_PROT, "\2NT LM 0.12"}, 62 {CIFS_PROT, "\2NT LM 0.12"},
57 {BAD_PROT, "\2"} 63 {BAD_PROT, "\2"}
58}; 64};
59#endif 65#endif
60 66
67/* define the number of elements in the cifs dialect array */
68#ifdef CONFIG_CIFS_POSIX
69#ifdef CONFIG_CIFS_WEAK_PW_HASH
70#define CIFS_NUM_PROT 3
71#else
72#define CIFS_NUM_PROT 2
73#endif /* CIFS_WEAK_PW_HASH */
74#else /* not posix */
75#ifdef CONFIG_CIFS_WEAK_PW_HASH
76#define CIFS_NUM_PROT 2
77#else
78#define CIFS_NUM_PROT 1
79#endif /* CONFIG_CIFS_WEAK_PW_HASH */
80#endif /* CIFS_POSIX */
81
61 82
62/* Mark as invalid, all open files on tree connections since they 83/* Mark as invalid, all open files on tree connections since they
63 were closed when session to server was lost */ 84 were closed when session to server was lost */
@@ -188,7 +209,6 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
188 return rc; 209 return rc;
189} 210}
190 211
191#ifdef CONFIG_CIFS_EXPERIMENTAL
192int 212int
193small_smb_init_no_tc(const int smb_command, const int wct, 213small_smb_init_no_tc(const int smb_command, const int wct,
194 struct cifsSesInfo *ses, void **request_buf) 214 struct cifsSesInfo *ses, void **request_buf)
@@ -214,7 +234,6 @@ small_smb_init_no_tc(const int smb_command, const int wct,
214 234
215 return rc; 235 return rc;
216} 236}
217#endif /* CONFIG_CIFS_EXPERIMENTAL */
218 237
219/* If the return code is zero, this function must fill in request_buf pointer */ 238/* If the return code is zero, this function must fill in request_buf pointer */
220static int 239static int
@@ -322,7 +341,8 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
322 /* potential retries of smb operations it turns out we can determine */ 341 /* potential retries of smb operations it turns out we can determine */
323 /* from the mid flags when the request buffer can be resent without */ 342 /* from the mid flags when the request buffer can be resent without */
324 /* having to use a second distinct buffer for the response */ 343 /* having to use a second distinct buffer for the response */
325 *response_buf = *request_buf; 344 if(response_buf)
345 *response_buf = *request_buf;
326 346
327 header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon, 347 header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon,
328 wct /*wct */ ); 348 wct /*wct */ );
@@ -373,8 +393,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
373 NEGOTIATE_RSP *pSMBr; 393 NEGOTIATE_RSP *pSMBr;
374 int rc = 0; 394 int rc = 0;
375 int bytes_returned; 395 int bytes_returned;
396 int i;
376 struct TCP_Server_Info * server; 397 struct TCP_Server_Info * server;
377 u16 count; 398 u16 count;
399 unsigned int secFlags;
378 400
379 if(ses->server) 401 if(ses->server)
380 server = ses->server; 402 server = ses->server;
@@ -386,101 +408,200 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
386 (void **) &pSMB, (void **) &pSMBr); 408 (void **) &pSMB, (void **) &pSMBr);
387 if (rc) 409 if (rc)
388 return rc; 410 return rc;
411
412 /* if any of auth flags (ie not sign or seal) are overriden use them */
413 if(ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
414 secFlags = ses->overrideSecFlg;
415 else /* if override flags set only sign/seal OR them with global auth */
416 secFlags = extended_security | ses->overrideSecFlg;
417
418 cFYI(1,("secFlags 0x%x",secFlags));
419
389 pSMB->hdr.Mid = GetNextMid(server); 420 pSMB->hdr.Mid = GetNextMid(server);
390 pSMB->hdr.Flags2 |= SMBFLG2_UNICODE; 421 pSMB->hdr.Flags2 |= SMBFLG2_UNICODE;
391 if (extended_security) 422 if((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
392 pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; 423 pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
393 424
394 count = strlen(protocols[0].name) + 1; 425 count = 0;
395 strncpy(pSMB->DialectsArray, protocols[0].name, 30); 426 for(i=0;i<CIFS_NUM_PROT;i++) {
396 /* null guaranteed to be at end of source and target buffers anyway */ 427 strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
397 428 count += strlen(protocols[i].name) + 1;
429 /* null at end of source and target buffers anyway */
430 }
398 pSMB->hdr.smb_buf_length += count; 431 pSMB->hdr.smb_buf_length += count;
399 pSMB->ByteCount = cpu_to_le16(count); 432 pSMB->ByteCount = cpu_to_le16(count);
400 433
401 rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB, 434 rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
402 (struct smb_hdr *) pSMBr, &bytes_returned, 0); 435 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
403 if (rc == 0) { 436 if (rc != 0)
404 server->secMode = pSMBr->SecurityMode; 437 goto neg_err_exit;
405 if((server->secMode & SECMODE_USER) == 0) 438
406 cFYI(1,("share mode security")); 439 cFYI(1,("Dialect: %d", pSMBr->DialectIndex));
407 server->secType = NTLM; /* BB override default for 440 /* Check wct = 1 error case */
408 NTLMv2 or kerberos v5 */ 441 if((pSMBr->hdr.WordCount < 13) || (pSMBr->DialectIndex == BAD_PROT)) {
409 /* one byte - no need to convert this or EncryptionKeyLen 442 /* core returns wct = 1, but we do not ask for core - otherwise
410 from little endian */ 443 small wct just comes when dialect index is -1 indicating we
411 server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount); 444 could not negotiate a common dialect */
412 /* probably no need to store and check maxvcs */ 445 rc = -EOPNOTSUPP;
413 server->maxBuf = 446 goto neg_err_exit;
414 min(le32_to_cpu(pSMBr->MaxBufferSize), 447#ifdef CONFIG_CIFS_WEAK_PW_HASH
448 } else if((pSMBr->hdr.WordCount == 13)
449 && (pSMBr->DialectIndex == LANMAN_PROT)) {
450 struct lanman_neg_rsp * rsp = (struct lanman_neg_rsp *)pSMBr;
451
452 if((secFlags & CIFSSEC_MAY_LANMAN) ||
453 (secFlags & CIFSSEC_MAY_PLNTXT))
454 server->secType = LANMAN;
455 else {
456 cERROR(1, ("mount failed weak security disabled"
457 " in /proc/fs/cifs/SecurityFlags"));
458 rc = -EOPNOTSUPP;
459 goto neg_err_exit;
460 }
461 server->secMode = (__u8)le16_to_cpu(rsp->SecurityMode);
462 server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
463 server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize),
464 (__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
465 GETU32(server->sessid) = le32_to_cpu(rsp->SessionKey);
466 /* even though we do not use raw we might as well set this
467 accurately, in case we ever find a need for it */
468 if((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
469 server->maxRw = 0xFF00;
470 server->capabilities = CAP_MPX_MODE | CAP_RAW_MODE;
471 } else {
472 server->maxRw = 0;/* we do not need to use raw anyway */
473 server->capabilities = CAP_MPX_MODE;
474 }
475 server->timeZone = le16_to_cpu(rsp->ServerTimeZone);
476
477 /* BB get server time for time conversions and add
478 code to use it and timezone since this is not UTC */
479
480 if (rsp->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
481 memcpy(server->cryptKey, rsp->EncryptionKey,
482 CIFS_CRYPTO_KEY_SIZE);
483 } else if (server->secMode & SECMODE_PW_ENCRYPT) {
484 rc = -EIO; /* need cryptkey unless plain text */
485 goto neg_err_exit;
486 }
487
488 cFYI(1,("LANMAN negotiated"));
489 /* we will not end up setting signing flags - as no signing
490 was in LANMAN and server did not return the flags on */
491 goto signing_check;
492#else /* weak security disabled */
493 } else if(pSMBr->hdr.WordCount == 13) {
494 cERROR(1,("mount failed, cifs module not built "
495 "with CIFS_WEAK_PW_HASH support"));
496 rc = -EOPNOTSUPP;
497#endif /* WEAK_PW_HASH */
498 goto neg_err_exit;
499 } else if(pSMBr->hdr.WordCount != 17) {
500 /* unknown wct */
501 rc = -EOPNOTSUPP;
502 goto neg_err_exit;
503 }
504 /* else wct == 17 NTLM */
505 server->secMode = pSMBr->SecurityMode;
506 if((server->secMode & SECMODE_USER) == 0)
507 cFYI(1,("share mode security"));
508
509 if((server->secMode & SECMODE_PW_ENCRYPT) == 0)
510#ifdef CONFIG_CIFS_WEAK_PW_HASH
511 if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0)
512#endif /* CIFS_WEAK_PW_HASH */
513 cERROR(1,("Server requests plain text password"
514 " but client support disabled"));
515
516 if((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2)
517 server->secType = NTLMv2;
518 else if(secFlags & CIFSSEC_MAY_NTLM)
519 server->secType = NTLM;
520 else if(secFlags & CIFSSEC_MAY_NTLMV2)
521 server->secType = NTLMv2;
522 /* else krb5 ... any others ... */
523
524 /* one byte, so no need to convert this or EncryptionKeyLen from
525 little endian */
526 server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount);
527 /* probably no need to store and check maxvcs */
528 server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize),
415 (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); 529 (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
416 server->maxRw = le32_to_cpu(pSMBr->MaxRawSize); 530 server->maxRw = le32_to_cpu(pSMBr->MaxRawSize);
417 cFYI(0, ("Max buf = %d", ses->server->maxBuf)); 531 cFYI(0, ("Max buf = %d", ses->server->maxBuf));
418 GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey); 532 GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
419 server->capabilities = le32_to_cpu(pSMBr->Capabilities); 533 server->capabilities = le32_to_cpu(pSMBr->Capabilities);
420 server->timeZone = le16_to_cpu(pSMBr->ServerTimeZone); 534 server->timeZone = le16_to_cpu(pSMBr->ServerTimeZone);
421 /* BB with UTC do we ever need to be using srvr timezone? */ 535 if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
422 if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { 536 memcpy(server->cryptKey, pSMBr->u.EncryptionKey,
423 memcpy(server->cryptKey, pSMBr->u.EncryptionKey, 537 CIFS_CRYPTO_KEY_SIZE);
424 CIFS_CRYPTO_KEY_SIZE); 538 } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC)
425 } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) 539 && (pSMBr->EncryptionKeyLength == 0)) {
426 && (pSMBr->EncryptionKeyLength == 0)) { 540 /* decode security blob */
427 /* decode security blob */ 541 } else if (server->secMode & SECMODE_PW_ENCRYPT) {
428 } else 542 rc = -EIO; /* no crypt key only if plain text pwd */
429 rc = -EIO; 543 goto neg_err_exit;
544 }
430 545
431 /* BB might be helpful to save off the domain of server here */ 546 /* BB might be helpful to save off the domain of server here */
432 547
433 if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) && 548 if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) &&
434 (server->capabilities & CAP_EXTENDED_SECURITY)) { 549 (server->capabilities & CAP_EXTENDED_SECURITY)) {
435 count = pSMBr->ByteCount; 550 count = pSMBr->ByteCount;
436 if (count < 16) 551 if (count < 16)
437 rc = -EIO; 552 rc = -EIO;
438 else if (count == 16) { 553 else if (count == 16) {
439 server->secType = RawNTLMSSP; 554 server->secType = RawNTLMSSP;
440 if (server->socketUseCount.counter > 1) { 555 if (server->socketUseCount.counter > 1) {
441 if (memcmp 556 if (memcmp(server->server_GUID,
442 (server->server_GUID, 557 pSMBr->u.extended_response.
443 pSMBr->u.extended_response. 558 GUID, 16) != 0) {
444 GUID, 16) != 0) { 559 cFYI(1, ("server UID changed"));
445 cFYI(1, ("server UID changed"));
446 memcpy(server->
447 server_GUID,
448 pSMBr->u.
449 extended_response.
450 GUID, 16);
451 }
452 } else
453 memcpy(server->server_GUID, 560 memcpy(server->server_GUID,
454 pSMBr->u.extended_response. 561 pSMBr->u.extended_response.GUID,
455 GUID, 16); 562 16);
456 } else {
457 rc = decode_negTokenInit(pSMBr->u.
458 extended_response.
459 SecurityBlob,
460 count - 16,
461 &server->secType);
462 if(rc == 1) {
463 /* BB Need to fill struct for sessetup here */
464 rc = -EOPNOTSUPP;
465 } else {
466 rc = -EINVAL;
467 } 563 }
564 } else
565 memcpy(server->server_GUID,
566 pSMBr->u.extended_response.GUID, 16);
567 } else {
568 rc = decode_negTokenInit(pSMBr->u.extended_response.
569 SecurityBlob,
570 count - 16,
571 &server->secType);
572 if(rc == 1) {
573 /* BB Need to fill struct for sessetup here */
574 rc = -EOPNOTSUPP;
575 } else {
576 rc = -EINVAL;
468 } 577 }
469 } else
470 server->capabilities &= ~CAP_EXTENDED_SECURITY;
471 if(sign_CIFS_PDUs == FALSE) {
472 if(server->secMode & SECMODE_SIGN_REQUIRED)
473 cERROR(1,
474 ("Server requires /proc/fs/cifs/PacketSigningEnabled"));
475 server->secMode &= ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
476 } else if(sign_CIFS_PDUs == 1) {
477 if((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
478 server->secMode &= ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
479 } 578 }
480 579 } else
580 server->capabilities &= ~CAP_EXTENDED_SECURITY;
581
582#ifdef CONFIG_CIFS_WEAK_PW_HASH
583signing_check:
584#endif
585 if(sign_CIFS_PDUs == FALSE) {
586 if(server->secMode & SECMODE_SIGN_REQUIRED)
587 cERROR(1,("Server requires "
588 "/proc/fs/cifs/PacketSigningEnabled to be on"));
589 server->secMode &=
590 ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
591 } else if(sign_CIFS_PDUs == 1) {
592 if((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
593 server->secMode &=
594 ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
595 } else if(sign_CIFS_PDUs == 2) {
596 if((server->secMode &
597 (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
598 cERROR(1,("signing required but server lacks support"));
599 }
481 } 600 }
482 601neg_err_exit:
483 cifs_buf_release(pSMB); 602 cifs_buf_release(pSMB);
603
604 cFYI(1,("negprot rc %d",rc));
484 return rc; 605 return rc;
485} 606}
486 607
@@ -2239,7 +2360,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
2239 } 2360 }
2240 symlinkinfo[buflen] = 0; /* just in case so the caller 2361 symlinkinfo[buflen] = 0; /* just in case so the caller
2241 does not go off the end of the buffer */ 2362 does not go off the end of the buffer */
2242 cFYI(1,("readlink result - %s ",symlinkinfo)); 2363 cFYI(1,("readlink result - %s",symlinkinfo));
2243 } 2364 }
2244 } 2365 }
2245qreparse_out: 2366qreparse_out:
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index bae1479318..876eb9ef85 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -49,8 +49,6 @@
49 49
50static DECLARE_COMPLETION(cifsd_complete); 50static DECLARE_COMPLETION(cifsd_complete);
51 51
52extern void SMBencrypt(unsigned char *passwd, unsigned char *c8,
53 unsigned char *p24);
54extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, 52extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
55 unsigned char *p24); 53 unsigned char *p24);
56 54
@@ -70,6 +68,7 @@ struct smb_vol {
70 gid_t linux_gid; 68 gid_t linux_gid;
71 mode_t file_mode; 69 mode_t file_mode;
72 mode_t dir_mode; 70 mode_t dir_mode;
71 unsigned secFlg;
73 unsigned rw:1; 72 unsigned rw:1;
74 unsigned retry:1; 73 unsigned retry:1;
75 unsigned intr:1; 74 unsigned intr:1;
@@ -83,12 +82,7 @@ struct smb_vol {
83 unsigned remap:1; /* set to remap seven reserved chars in filenames */ 82 unsigned remap:1; /* set to remap seven reserved chars in filenames */
84 unsigned posix_paths:1; /* unset to not ask for posix pathnames. */ 83 unsigned posix_paths:1; /* unset to not ask for posix pathnames. */
85 unsigned sfu_emul:1; 84 unsigned sfu_emul:1;
86 unsigned krb5:1;
87 unsigned ntlm:1;
88 unsigned ntlmv2:1;
89 unsigned nullauth:1; /* attempt to authenticate with null user */ 85 unsigned nullauth:1; /* attempt to authenticate with null user */
90 unsigned sign:1;
91 unsigned seal:1; /* encrypt */
92 unsigned nocase; /* request case insensitive filenames */ 86 unsigned nocase; /* request case insensitive filenames */
93 unsigned nobrl; /* disable sending byte range locks to srv */ 87 unsigned nobrl; /* disable sending byte range locks to srv */
94 unsigned int rsize; 88 unsigned int rsize;
@@ -369,21 +363,21 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
369 continue; 363 continue;
370 if (bigbuf == NULL) { 364 if (bigbuf == NULL) {
371 bigbuf = cifs_buf_get(); 365 bigbuf = cifs_buf_get();
372 if(bigbuf == NULL) { 366 if (!bigbuf) {
373 cERROR(1,("No memory for large SMB response")); 367 cERROR(1, ("No memory for large SMB response"));
374 msleep(3000); 368 msleep(3000);
375 /* retry will check if exiting */ 369 /* retry will check if exiting */
376 continue; 370 continue;
377 } 371 }
378 } else if(isLargeBuf) { 372 } else if (isLargeBuf) {
379 /* we are reusing a dirtry large buf, clear its start */ 373 /* we are reusing a dirty large buf, clear its start */
380 memset(bigbuf, 0, sizeof (struct smb_hdr)); 374 memset(bigbuf, 0, sizeof (struct smb_hdr));
381 } 375 }
382 376
383 if (smallbuf == NULL) { 377 if (smallbuf == NULL) {
384 smallbuf = cifs_small_buf_get(); 378 smallbuf = cifs_small_buf_get();
385 if(smallbuf == NULL) { 379 if (!smallbuf) {
386 cERROR(1,("No memory for SMB response")); 380 cERROR(1, ("No memory for SMB response"));
387 msleep(1000); 381 msleep(1000);
388 /* retry will check if exiting */ 382 /* retry will check if exiting */
389 continue; 383 continue;
@@ -403,12 +397,12 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
403 kernel_recvmsg(csocket, &smb_msg, 397 kernel_recvmsg(csocket, &smb_msg,
404 &iov, 1, 4, 0 /* BB see socket.h flags */); 398 &iov, 1, 4, 0 /* BB see socket.h flags */);
405 399
406 if(server->tcpStatus == CifsExiting) { 400 if (server->tcpStatus == CifsExiting) {
407 break; 401 break;
408 } else if (server->tcpStatus == CifsNeedReconnect) { 402 } else if (server->tcpStatus == CifsNeedReconnect) {
409 cFYI(1,("Reconnect after server stopped responding")); 403 cFYI(1, ("Reconnect after server stopped responding"));
410 cifs_reconnect(server); 404 cifs_reconnect(server);
411 cFYI(1,("call to reconnect done")); 405 cFYI(1, ("call to reconnect done"));
412 csocket = server->ssocket; 406 csocket = server->ssocket;
413 continue; 407 continue;
414 } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) { 408 } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) {
@@ -417,15 +411,15 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
417 tcpStatus CifsNeedReconnect if server hung */ 411 tcpStatus CifsNeedReconnect if server hung */
418 continue; 412 continue;
419 } else if (length <= 0) { 413 } else if (length <= 0) {
420 if(server->tcpStatus == CifsNew) { 414 if (server->tcpStatus == CifsNew) {
421 cFYI(1,("tcp session abend after SMBnegprot")); 415 cFYI(1, ("tcp session abend after SMBnegprot"));
422 /* some servers kill the TCP session rather than 416 /* some servers kill the TCP session rather than
423 returning an SMB negprot error, in which 417 returning an SMB negprot error, in which
424 case reconnecting here is not going to help, 418 case reconnecting here is not going to help,
425 and so simply return error to mount */ 419 and so simply return error to mount */
426 break; 420 break;
427 } 421 }
428 if(length == -EINTR) { 422 if (!try_to_freeze() && (length == -EINTR)) {
429 cFYI(1,("cifsd thread killed")); 423 cFYI(1,("cifsd thread killed"));
430 break; 424 break;
431 } 425 }
@@ -585,9 +579,11 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
585 /* merge response - fix up 1st*/ 579 /* merge response - fix up 1st*/
586 if(coalesce_t2(smb_buffer, 580 if(coalesce_t2(smb_buffer,
587 mid_entry->resp_buf)) { 581 mid_entry->resp_buf)) {
582 mid_entry->multiRsp = 1;
588 break; 583 break;
589 } else { 584 } else {
590 /* all parts received */ 585 /* all parts received */
586 mid_entry->multiEnd = 1;
591 goto multi_t2_fnd; 587 goto multi_t2_fnd;
592 } 588 }
593 } else { 589 } else {
@@ -632,9 +628,14 @@ multi_t2_fnd:
632 wake_up_process(task_to_wake); 628 wake_up_process(task_to_wake);
633 } else if ((is_valid_oplock_break(smb_buffer, server) == FALSE) 629 } else if ((is_valid_oplock_break(smb_buffer, server) == FALSE)
634 && (isMultiRsp == FALSE)) { 630 && (isMultiRsp == FALSE)) {
635 cERROR(1, ("No task to wake, unknown frame rcvd!")); 631 cERROR(1, ("No task to wake, unknown frame rcvd! NumMids %d", midCount.counter));
636 cifs_dump_mem("Received Data is: ",(char *)smb_buffer, 632 cifs_dump_mem("Received Data is: ",(char *)smb_buffer,
637 sizeof(struct smb_hdr)); 633 sizeof(struct smb_hdr));
634#ifdef CONFIG_CIFS_DEBUG2
635 cifs_dump_detail(smb_buffer);
636 cifs_dump_mids(server);
637#endif /* CIFS_DEBUG2 */
638
638 } 639 }
639 } /* end while !EXITING */ 640 } /* end while !EXITING */
640 641
@@ -784,7 +785,6 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
784 785
785 /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */ 786 /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */
786 vol->rw = TRUE; 787 vol->rw = TRUE;
787 vol->ntlm = TRUE;
788 /* default is always to request posix paths. */ 788 /* default is always to request posix paths. */
789 vol->posix_paths = 1; 789 vol->posix_paths = 1;
790 790
@@ -915,30 +915,35 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
915 cERROR(1,("no security value specified")); 915 cERROR(1,("no security value specified"));
916 continue; 916 continue;
917 } else if (strnicmp(value, "krb5i", 5) == 0) { 917 } else if (strnicmp(value, "krb5i", 5) == 0) {
918 vol->sign = 1; 918 vol->secFlg |= CIFSSEC_MAY_KRB5 |
919 vol->krb5 = 1; 919 CIFSSEC_MUST_SIGN;
920 } else if (strnicmp(value, "krb5p", 5) == 0) { 920 } else if (strnicmp(value, "krb5p", 5) == 0) {
921 /* vol->seal = 1; 921 /* vol->secFlg |= CIFSSEC_MUST_SEAL |
922 vol->krb5 = 1; */ 922 CIFSSEC_MAY_KRB5; */
923 cERROR(1,("Krb5 cifs privacy not supported")); 923 cERROR(1,("Krb5 cifs privacy not supported"));
924 return 1; 924 return 1;
925 } else if (strnicmp(value, "krb5", 4) == 0) { 925 } else if (strnicmp(value, "krb5", 4) == 0) {
926 vol->krb5 = 1; 926 vol->secFlg |= CIFSSEC_MAY_KRB5;
927 } else if (strnicmp(value, "ntlmv2i", 7) == 0) { 927 } else if (strnicmp(value, "ntlmv2i", 7) == 0) {
928 vol->ntlmv2 = 1; 928 vol->secFlg |= CIFSSEC_MAY_NTLMV2 |
929 vol->sign = 1; 929 CIFSSEC_MUST_SIGN;
930 } else if (strnicmp(value, "ntlmv2", 6) == 0) { 930 } else if (strnicmp(value, "ntlmv2", 6) == 0) {
931 vol->ntlmv2 = 1; 931 vol->secFlg |= CIFSSEC_MAY_NTLMV2;
932 } else if (strnicmp(value, "ntlmi", 5) == 0) { 932 } else if (strnicmp(value, "ntlmi", 5) == 0) {
933 vol->ntlm = 1; 933 vol->secFlg |= CIFSSEC_MAY_NTLM |
934 vol->sign = 1; 934 CIFSSEC_MUST_SIGN;
935 } else if (strnicmp(value, "ntlm", 4) == 0) { 935 } else if (strnicmp(value, "ntlm", 4) == 0) {
936 /* ntlm is default so can be turned off too */ 936 /* ntlm is default so can be turned off too */
937 vol->ntlm = 1; 937 vol->secFlg |= CIFSSEC_MAY_NTLM;
938 } else if (strnicmp(value, "nontlm", 6) == 0) { 938 } else if (strnicmp(value, "nontlm", 6) == 0) {
939 vol->ntlm = 0; 939 /* BB is there a better way to do this? */
940 vol->secFlg |= CIFSSEC_MAY_NTLMV2;
941#ifdef CONFIG_CIFS_WEAK_PW_HASH
942 } else if (strnicmp(value, "lanman", 6) == 0) {
943 vol->secFlg |= CIFSSEC_MAY_LANMAN;
944#endif
940 } else if (strnicmp(value, "none", 4) == 0) { 945 } else if (strnicmp(value, "none", 4) == 0) {
941 vol->nullauth = 1; 946 vol->nullauth = 1;
942 } else { 947 } else {
943 cERROR(1,("bad security option: %s", value)); 948 cERROR(1,("bad security option: %s", value));
944 return 1; 949 return 1;
@@ -976,7 +981,7 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
976 } 981 }
977 /* BB are there cases in which a comma can be valid in 982 /* BB are there cases in which a comma can be valid in
978 a domain name and need special handling? */ 983 a domain name and need special handling? */
979 if (strnlen(value, 65) < 65) { 984 if (strnlen(value, 256) < 256) {
980 vol->domainname = value; 985 vol->domainname = value;
981 cFYI(1, ("Domain name set")); 986 cFYI(1, ("Domain name set"));
982 } else { 987 } else {
@@ -1168,6 +1173,10 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
1168 vol->no_psx_acl = 0; 1173 vol->no_psx_acl = 0;
1169 } else if (strnicmp(data, "noacl",5) == 0) { 1174 } else if (strnicmp(data, "noacl",5) == 0) {
1170 vol->no_psx_acl = 1; 1175 vol->no_psx_acl = 1;
1176 } else if (strnicmp(data, "sign",4) == 0) {
1177 vol->secFlg |= CIFSSEC_MUST_SIGN;
1178/* } else if (strnicmp(data, "seal",4) == 0) {
1179 vol->secFlg |= CIFSSEC_MUST_SEAL; */
1171 } else if (strnicmp(data, "direct",6) == 0) { 1180 } else if (strnicmp(data, "direct",6) == 0) {
1172 vol->direct_io = 1; 1181 vol->direct_io = 1;
1173 } else if (strnicmp(data, "forcedirectio",13) == 0) { 1182 } else if (strnicmp(data, "forcedirectio",13) == 0) {
@@ -1762,11 +1771,18 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
1762 if (volume_info.username) 1771 if (volume_info.username)
1763 strncpy(pSesInfo->userName, 1772 strncpy(pSesInfo->userName,
1764 volume_info.username,MAX_USERNAME_SIZE); 1773 volume_info.username,MAX_USERNAME_SIZE);
1765 if (volume_info.domainname) 1774 if (volume_info.domainname) {
1766 strncpy(pSesInfo->domainName, 1775 int len = strlen(volume_info.domainname);
1767 volume_info.domainname,MAX_USERNAME_SIZE); 1776 pSesInfo->domainName =
1777 kmalloc(len + 1, GFP_KERNEL);
1778 if(pSesInfo->domainName)
1779 strcpy(pSesInfo->domainName,
1780 volume_info.domainname);
1781 }
1768 pSesInfo->linux_uid = volume_info.linux_uid; 1782 pSesInfo->linux_uid = volume_info.linux_uid;
1783 pSesInfo->overrideSecFlg = volume_info.secFlg;
1769 down(&pSesInfo->sesSem); 1784 down(&pSesInfo->sesSem);
1785 /* BB FIXME need to pass vol->secFlgs BB */
1770 rc = cifs_setup_session(xid,pSesInfo, cifs_sb->local_nls); 1786 rc = cifs_setup_session(xid,pSesInfo, cifs_sb->local_nls);
1771 up(&pSesInfo->sesSem); 1787 up(&pSesInfo->sesSem);
1772 if(!rc) 1788 if(!rc)
@@ -1980,7 +1996,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
1980 1996
1981static int 1997static int
1982CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses, 1998CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
1983 char session_key[CIFS_SESSION_KEY_SIZE], 1999 char session_key[CIFS_SESS_KEY_SIZE],
1984 const struct nls_table *nls_codepage) 2000 const struct nls_table *nls_codepage)
1985{ 2001{
1986 struct smb_hdr *smb_buffer; 2002 struct smb_hdr *smb_buffer;
@@ -2038,15 +2054,15 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2038 pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); 2054 pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
2039 2055
2040 pSMB->req_no_secext.CaseInsensitivePasswordLength = 2056 pSMB->req_no_secext.CaseInsensitivePasswordLength =
2041 cpu_to_le16(CIFS_SESSION_KEY_SIZE); 2057 cpu_to_le16(CIFS_SESS_KEY_SIZE);
2042 2058
2043 pSMB->req_no_secext.CaseSensitivePasswordLength = 2059 pSMB->req_no_secext.CaseSensitivePasswordLength =
2044 cpu_to_le16(CIFS_SESSION_KEY_SIZE); 2060 cpu_to_le16(CIFS_SESS_KEY_SIZE);
2045 bcc_ptr = pByteArea(smb_buffer); 2061 bcc_ptr = pByteArea(smb_buffer);
2046 memcpy(bcc_ptr, (char *) session_key, CIFS_SESSION_KEY_SIZE); 2062 memcpy(bcc_ptr, (char *) session_key, CIFS_SESS_KEY_SIZE);
2047 bcc_ptr += CIFS_SESSION_KEY_SIZE; 2063 bcc_ptr += CIFS_SESS_KEY_SIZE;
2048 memcpy(bcc_ptr, (char *) session_key, CIFS_SESSION_KEY_SIZE); 2064 memcpy(bcc_ptr, (char *) session_key, CIFS_SESS_KEY_SIZE);
2049 bcc_ptr += CIFS_SESSION_KEY_SIZE; 2065 bcc_ptr += CIFS_SESS_KEY_SIZE;
2050 2066
2051 if (ses->capabilities & CAP_UNICODE) { 2067 if (ses->capabilities & CAP_UNICODE) {
2052 if ((long) bcc_ptr % 2) { /* must be word aligned for Unicode */ 2068 if ((long) bcc_ptr % 2) { /* must be word aligned for Unicode */
@@ -2054,7 +2070,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2054 bcc_ptr++; 2070 bcc_ptr++;
2055 } 2071 }
2056 if(user == NULL) 2072 if(user == NULL)
2057 bytes_returned = 0; /* skill null user */ 2073 bytes_returned = 0; /* skip null user */
2058 else 2074 else
2059 bytes_returned = 2075 bytes_returned =
2060 cifs_strtoUCS((__le16 *) bcc_ptr, user, 100, 2076 cifs_strtoUCS((__le16 *) bcc_ptr, user, 100,
@@ -2162,8 +2178,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2162 if (remaining_words > 0) { 2178 if (remaining_words > 0) {
2163 len = UniStrnlen((wchar_t *)bcc_ptr, 2179 len = UniStrnlen((wchar_t *)bcc_ptr,
2164 remaining_words-1); 2180 remaining_words-1);
2165 if(ses->serverNOS) 2181 kfree(ses->serverNOS);
2166 kfree(ses->serverNOS);
2167 ses->serverNOS = kzalloc(2 * (len + 1),GFP_KERNEL); 2182 ses->serverNOS = kzalloc(2 * (len + 1),GFP_KERNEL);
2168 if(ses->serverNOS == NULL) 2183 if(ses->serverNOS == NULL)
2169 goto sesssetup_nomem; 2184 goto sesssetup_nomem;
@@ -2203,12 +2218,10 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2203 /* if these kcallocs fail not much we 2218 /* if these kcallocs fail not much we
2204 can do, but better to not fail the 2219 can do, but better to not fail the
2205 sesssetup itself */ 2220 sesssetup itself */
2206 if(ses->serverDomain) 2221 kfree(ses->serverDomain);
2207 kfree(ses->serverDomain);
2208 ses->serverDomain = 2222 ses->serverDomain =
2209 kzalloc(2, GFP_KERNEL); 2223 kzalloc(2, GFP_KERNEL);
2210 if(ses->serverNOS) 2224 kfree(ses->serverNOS);
2211 kfree(ses->serverNOS);
2212 ses->serverNOS = 2225 ses->serverNOS =
2213 kzalloc(2, GFP_KERNEL); 2226 kzalloc(2, GFP_KERNEL);
2214 } 2227 }
@@ -2217,8 +2230,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2217 if (((long) bcc_ptr + len) - (long) 2230 if (((long) bcc_ptr + len) - (long)
2218 pByteArea(smb_buffer_response) 2231 pByteArea(smb_buffer_response)
2219 <= BCC(smb_buffer_response)) { 2232 <= BCC(smb_buffer_response)) {
2220 if(ses->serverOS) 2233 kfree(ses->serverOS);
2221 kfree(ses->serverOS);
2222 ses->serverOS = kzalloc(len + 1,GFP_KERNEL); 2234 ses->serverOS = kzalloc(len + 1,GFP_KERNEL);
2223 if(ses->serverOS == NULL) 2235 if(ses->serverOS == NULL)
2224 goto sesssetup_nomem; 2236 goto sesssetup_nomem;
@@ -2229,8 +2241,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2229 bcc_ptr++; 2241 bcc_ptr++;
2230 2242
2231 len = strnlen(bcc_ptr, 1024); 2243 len = strnlen(bcc_ptr, 1024);
2232 if(ses->serverNOS) 2244 kfree(ses->serverNOS);
2233 kfree(ses->serverNOS);
2234 ses->serverNOS = kzalloc(len + 1,GFP_KERNEL); 2245 ses->serverNOS = kzalloc(len + 1,GFP_KERNEL);
2235 if(ses->serverNOS == NULL) 2246 if(ses->serverNOS == NULL)
2236 goto sesssetup_nomem; 2247 goto sesssetup_nomem;
@@ -2274,292 +2285,6 @@ sesssetup_nomem: /* do not return an error on nomem for the info strings,
2274} 2285}
2275 2286
2276static int 2287static int
2277CIFSSpnegoSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2278 char *SecurityBlob,int SecurityBlobLength,
2279 const struct nls_table *nls_codepage)
2280{
2281 struct smb_hdr *smb_buffer;
2282 struct smb_hdr *smb_buffer_response;
2283 SESSION_SETUP_ANDX *pSMB;
2284 SESSION_SETUP_ANDX *pSMBr;
2285 char *bcc_ptr;
2286 char *user;
2287 char *domain;
2288 int rc = 0;
2289 int remaining_words = 0;
2290 int bytes_returned = 0;
2291 int len;
2292 __u32 capabilities;
2293 __u16 count;
2294
2295 cFYI(1, ("In spnego sesssetup "));
2296 if(ses == NULL)
2297 return -EINVAL;
2298 user = ses->userName;
2299 domain = ses->domainName;
2300
2301 smb_buffer = cifs_buf_get();
2302 if (smb_buffer == NULL) {
2303 return -ENOMEM;
2304 }
2305 smb_buffer_response = smb_buffer;
2306 pSMBr = pSMB = (SESSION_SETUP_ANDX *) smb_buffer;
2307
2308 /* send SMBsessionSetup here */
2309 header_assemble(smb_buffer, SMB_COM_SESSION_SETUP_ANDX,
2310 NULL /* no tCon exists yet */ , 12 /* wct */ );
2311
2312 smb_buffer->Mid = GetNextMid(ses->server);
2313 pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
2314 pSMB->req.AndXCommand = 0xFF;
2315 if(ses->server->maxBuf > 64*1024)
2316 ses->server->maxBuf = (64*1023);
2317 pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
2318 pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
2319
2320 if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
2321 smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
2322
2323 capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
2324 CAP_EXTENDED_SECURITY;
2325 if (ses->capabilities & CAP_UNICODE) {
2326 smb_buffer->Flags2 |= SMBFLG2_UNICODE;
2327 capabilities |= CAP_UNICODE;
2328 }
2329 if (ses->capabilities & CAP_STATUS32) {
2330 smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
2331 capabilities |= CAP_STATUS32;
2332 }
2333 if (ses->capabilities & CAP_DFS) {
2334 smb_buffer->Flags2 |= SMBFLG2_DFS;
2335 capabilities |= CAP_DFS;
2336 }
2337 pSMB->req.Capabilities = cpu_to_le32(capabilities);
2338
2339 pSMB->req.SecurityBlobLength = cpu_to_le16(SecurityBlobLength);
2340 bcc_ptr = pByteArea(smb_buffer);
2341 memcpy(bcc_ptr, SecurityBlob, SecurityBlobLength);
2342 bcc_ptr += SecurityBlobLength;
2343
2344 if (ses->capabilities & CAP_UNICODE) {
2345 if ((long) bcc_ptr % 2) { /* must be word aligned for Unicode strings */
2346 *bcc_ptr = 0;
2347 bcc_ptr++;
2348 }
2349 bytes_returned =
2350 cifs_strtoUCS((__le16 *) bcc_ptr, user, 100, nls_codepage);
2351 bcc_ptr += 2 * bytes_returned; /* convert num of 16 bit words to bytes */
2352 bcc_ptr += 2; /* trailing null */
2353 if (domain == NULL)
2354 bytes_returned =
2355 cifs_strtoUCS((__le16 *) bcc_ptr,
2356 "CIFS_LINUX_DOM", 32, nls_codepage);
2357 else
2358 bytes_returned =
2359 cifs_strtoUCS((__le16 *) bcc_ptr, domain, 64,
2360 nls_codepage);
2361 bcc_ptr += 2 * bytes_returned;
2362 bcc_ptr += 2;
2363 bytes_returned =
2364 cifs_strtoUCS((__le16 *) bcc_ptr, "Linux version ",
2365 32, nls_codepage);
2366 bcc_ptr += 2 * bytes_returned;
2367 bytes_returned =
2368 cifs_strtoUCS((__le16 *) bcc_ptr, system_utsname.release, 32,
2369 nls_codepage);
2370 bcc_ptr += 2 * bytes_returned;
2371 bcc_ptr += 2;
2372 bytes_returned =
2373 cifs_strtoUCS((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS,
2374 64, nls_codepage);
2375 bcc_ptr += 2 * bytes_returned;
2376 bcc_ptr += 2;
2377 } else {
2378 strncpy(bcc_ptr, user, 200);
2379 bcc_ptr += strnlen(user, 200);
2380 *bcc_ptr = 0;
2381 bcc_ptr++;
2382 if (domain == NULL) {
2383 strcpy(bcc_ptr, "CIFS_LINUX_DOM");
2384 bcc_ptr += strlen("CIFS_LINUX_DOM") + 1;
2385 } else {
2386 strncpy(bcc_ptr, domain, 64);
2387 bcc_ptr += strnlen(domain, 64);
2388 *bcc_ptr = 0;
2389 bcc_ptr++;
2390 }
2391 strcpy(bcc_ptr, "Linux version ");
2392 bcc_ptr += strlen("Linux version ");
2393 strcpy(bcc_ptr, system_utsname.release);
2394 bcc_ptr += strlen(system_utsname.release) + 1;
2395 strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
2396 bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
2397 }
2398 count = (long) bcc_ptr - (long) pByteArea(smb_buffer);
2399 smb_buffer->smb_buf_length += count;
2400 pSMB->req.ByteCount = cpu_to_le16(count);
2401
2402 rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response,
2403 &bytes_returned, 1);
2404 if (rc) {
2405/* rc = map_smb_to_linux_error(smb_buffer_response); *//* done in SendReceive now */
2406 } else if ((smb_buffer_response->WordCount == 3)
2407 || (smb_buffer_response->WordCount == 4)) {
2408 __u16 action = le16_to_cpu(pSMBr->resp.Action);
2409 __u16 blob_len =
2410 le16_to_cpu(pSMBr->resp.SecurityBlobLength);
2411 if (action & GUEST_LOGIN)
2412 cFYI(1, (" Guest login")); /* BB do we want to set anything in SesInfo struct ? */
2413 if (ses) {
2414 ses->Suid = smb_buffer_response->Uid; /* UID left in wire format (le) */
2415 cFYI(1, ("UID = %d ", ses->Suid));
2416 bcc_ptr = pByteArea(smb_buffer_response); /* response can have either 3 or 4 word count - Samba sends 3 */
2417
2418 /* BB Fix below to make endian neutral !! */
2419
2420 if ((pSMBr->resp.hdr.WordCount == 3)
2421 || ((pSMBr->resp.hdr.WordCount == 4)
2422 && (blob_len <
2423 pSMBr->resp.ByteCount))) {
2424 if (pSMBr->resp.hdr.WordCount == 4) {
2425 bcc_ptr +=
2426 blob_len;
2427 cFYI(1,
2428 ("Security Blob Length %d ",
2429 blob_len));
2430 }
2431
2432 if (smb_buffer->Flags2 & SMBFLG2_UNICODE) {
2433 if ((long) (bcc_ptr) % 2) {
2434 remaining_words =
2435 (BCC(smb_buffer_response)
2436 - 1) / 2;
2437 bcc_ptr++; /* Unicode strings must be word aligned */
2438 } else {
2439 remaining_words =
2440 BCC
2441 (smb_buffer_response) / 2;
2442 }
2443 len =
2444 UniStrnlen((wchar_t *) bcc_ptr,
2445 remaining_words - 1);
2446/* We look for obvious messed up bcc or strings in response so we do not go off
2447 the end since (at least) WIN2K and Windows XP have a major bug in not null
2448 terminating last Unicode string in response */
2449 if(ses->serverOS)
2450 kfree(ses->serverOS);
2451 ses->serverOS =
2452 kzalloc(2 * (len + 1), GFP_KERNEL);
2453 cifs_strfromUCS_le(ses->serverOS,
2454 (__le16 *)
2455 bcc_ptr, len,
2456 nls_codepage);
2457 bcc_ptr += 2 * (len + 1);
2458 remaining_words -= len + 1;
2459 ses->serverOS[2 * len] = 0;
2460 ses->serverOS[1 + (2 * len)] = 0;
2461 if (remaining_words > 0) {
2462 len = UniStrnlen((wchar_t *)bcc_ptr,
2463 remaining_words
2464 - 1);
2465 if(ses->serverNOS)
2466 kfree(ses->serverNOS);
2467 ses->serverNOS =
2468 kzalloc(2 * (len + 1),
2469 GFP_KERNEL);
2470 cifs_strfromUCS_le(ses->serverNOS,
2471 (__le16 *)bcc_ptr,
2472 len,
2473 nls_codepage);
2474 bcc_ptr += 2 * (len + 1);
2475 ses->serverNOS[2 * len] = 0;
2476 ses->serverNOS[1 + (2 * len)] = 0;
2477 remaining_words -= len + 1;
2478 if (remaining_words > 0) {
2479 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
2480 /* last string not null terminated (e.g.Windows XP/2000) */
2481 if(ses->serverDomain)
2482 kfree(ses->serverDomain);
2483 ses->serverDomain = kzalloc(2*(len+1),GFP_KERNEL);
2484 cifs_strfromUCS_le(ses->serverDomain,
2485 (__le16 *)bcc_ptr,
2486 len, nls_codepage);
2487 bcc_ptr += 2*(len+1);
2488 ses->serverDomain[2*len] = 0;
2489 ses->serverDomain[1+(2*len)] = 0;
2490 } /* else no more room so create dummy domain string */
2491 else {
2492 if(ses->serverDomain)
2493 kfree(ses->serverDomain);
2494 ses->serverDomain =
2495 kzalloc(2,GFP_KERNEL);
2496 }
2497 } else {/* no room use dummy domain&NOS */
2498 if(ses->serverDomain)
2499 kfree(ses->serverDomain);
2500 ses->serverDomain = kzalloc(2, GFP_KERNEL);
2501 if(ses->serverNOS)
2502 kfree(ses->serverNOS);
2503 ses->serverNOS = kzalloc(2, GFP_KERNEL);
2504 }
2505 } else { /* ASCII */
2506
2507 len = strnlen(bcc_ptr, 1024);
2508 if (((long) bcc_ptr + len) - (long)
2509 pByteArea(smb_buffer_response)
2510 <= BCC(smb_buffer_response)) {
2511 if(ses->serverOS)
2512 kfree(ses->serverOS);
2513 ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
2514 strncpy(ses->serverOS, bcc_ptr, len);
2515
2516 bcc_ptr += len;
2517 bcc_ptr[0] = 0; /* null terminate the string */
2518 bcc_ptr++;
2519
2520 len = strnlen(bcc_ptr, 1024);
2521 if(ses->serverNOS)
2522 kfree(ses->serverNOS);
2523 ses->serverNOS = kzalloc(len + 1,GFP_KERNEL);
2524 strncpy(ses->serverNOS, bcc_ptr, len);
2525 bcc_ptr += len;
2526 bcc_ptr[0] = 0;
2527 bcc_ptr++;
2528
2529 len = strnlen(bcc_ptr, 1024);
2530 if(ses->serverDomain)
2531 kfree(ses->serverDomain);
2532 ses->serverDomain = kzalloc(len + 1, GFP_KERNEL);
2533 strncpy(ses->serverDomain, bcc_ptr, len);
2534 bcc_ptr += len;
2535 bcc_ptr[0] = 0;
2536 bcc_ptr++;
2537 } else
2538 cFYI(1,
2539 ("Variable field of length %d extends beyond end of smb ",
2540 len));
2541 }
2542 } else {
2543 cERROR(1,
2544 (" Security Blob Length extends beyond end of SMB"));
2545 }
2546 } else {
2547 cERROR(1, ("No session structure passed in."));
2548 }
2549 } else {
2550 cERROR(1,
2551 (" Invalid Word count %d: ",
2552 smb_buffer_response->WordCount));
2553 rc = -EIO;
2554 }
2555
2556 if (smb_buffer)
2557 cifs_buf_release(smb_buffer);
2558
2559 return rc;
2560}
2561
2562static int
2563CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, 2288CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2564 struct cifsSesInfo *ses, int * pNTLMv2_flag, 2289 struct cifsSesInfo *ses, int * pNTLMv2_flag,
2565 const struct nls_table *nls_codepage) 2290 const struct nls_table *nls_codepage)
@@ -2635,8 +2360,8 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2635 /* NTLMSSP_NEGOTIATE_ALWAYS_SIGN | */ NTLMSSP_NEGOTIATE_128; 2360 /* NTLMSSP_NEGOTIATE_ALWAYS_SIGN | */ NTLMSSP_NEGOTIATE_128;
2636 if(sign_CIFS_PDUs) 2361 if(sign_CIFS_PDUs)
2637 negotiate_flags |= NTLMSSP_NEGOTIATE_SIGN; 2362 negotiate_flags |= NTLMSSP_NEGOTIATE_SIGN;
2638 if(ntlmv2_support) 2363/* if(ntlmv2_support)
2639 negotiate_flags |= NTLMSSP_NEGOTIATE_NTLMV2; 2364 negotiate_flags |= NTLMSSP_NEGOTIATE_NTLMV2;*/
2640 /* setup pointers to domain name and workstation name */ 2365 /* setup pointers to domain name and workstation name */
2641 bcc_ptr += SecurityBlobLength; 2366 bcc_ptr += SecurityBlobLength;
2642 2367
@@ -2783,8 +2508,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2783 bcc_ptr, 2508 bcc_ptr,
2784 remaining_words 2509 remaining_words
2785 - 1); 2510 - 1);
2786 if(ses->serverNOS) 2511 kfree(ses->serverNOS);
2787 kfree(ses->serverNOS);
2788 ses->serverNOS = 2512 ses->serverNOS =
2789 kzalloc(2 * (len + 1), 2513 kzalloc(2 * (len + 1),
2790 GFP_KERNEL); 2514 GFP_KERNEL);
@@ -2802,8 +2526,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2802 if (remaining_words > 0) { 2526 if (remaining_words > 0) {
2803 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); 2527 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
2804 /* last string is not always null terminated (for e.g. for Windows XP & 2000) */ 2528 /* last string is not always null terminated (for e.g. for Windows XP & 2000) */
2805 if(ses->serverDomain) 2529 kfree(ses->serverDomain);
2806 kfree(ses->serverDomain);
2807 ses->serverDomain = 2530 ses->serverDomain =
2808 kzalloc(2 * 2531 kzalloc(2 *
2809 (len + 2532 (len +
@@ -2822,19 +2545,16 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2822 = 0; 2545 = 0;
2823 } /* else no more room so create dummy domain string */ 2546 } /* else no more room so create dummy domain string */
2824 else { 2547 else {
2825 if(ses->serverDomain) 2548 kfree(ses->serverDomain);
2826 kfree(ses->serverDomain);
2827 ses->serverDomain = 2549 ses->serverDomain =
2828 kzalloc(2, 2550 kzalloc(2,
2829 GFP_KERNEL); 2551 GFP_KERNEL);
2830 } 2552 }
2831 } else { /* no room so create dummy domain and NOS string */ 2553 } else { /* no room so create dummy domain and NOS string */
2832 if(ses->serverDomain); 2554 kfree(ses->serverDomain);
2833 kfree(ses->serverDomain);
2834 ses->serverDomain = 2555 ses->serverDomain =
2835 kzalloc(2, GFP_KERNEL); 2556 kzalloc(2, GFP_KERNEL);
2836 if(ses->serverNOS) 2557 kfree(ses->serverNOS);
2837 kfree(ses->serverNOS);
2838 ses->serverNOS = 2558 ses->serverNOS =
2839 kzalloc(2, GFP_KERNEL); 2559 kzalloc(2, GFP_KERNEL);
2840 } 2560 }
@@ -2856,8 +2576,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2856 bcc_ptr++; 2576 bcc_ptr++;
2857 2577
2858 len = strnlen(bcc_ptr, 1024); 2578 len = strnlen(bcc_ptr, 1024);
2859 if(ses->serverNOS) 2579 kfree(ses->serverNOS);
2860 kfree(ses->serverNOS);
2861 ses->serverNOS = 2580 ses->serverNOS =
2862 kzalloc(len + 1, 2581 kzalloc(len + 1,
2863 GFP_KERNEL); 2582 GFP_KERNEL);
@@ -2867,8 +2586,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2867 bcc_ptr++; 2586 bcc_ptr++;
2868 2587
2869 len = strnlen(bcc_ptr, 1024); 2588 len = strnlen(bcc_ptr, 1024);
2870 if(ses->serverDomain) 2589 kfree(ses->serverDomain);
2871 kfree(ses->serverDomain);
2872 ses->serverDomain = 2590 ses->serverDomain =
2873 kzalloc(len + 1, 2591 kzalloc(len + 1,
2874 GFP_KERNEL); 2592 GFP_KERNEL);
@@ -2994,14 +2712,14 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2994 SecurityBlob->LmChallengeResponse.Buffer = 0; 2712 SecurityBlob->LmChallengeResponse.Buffer = 0;
2995 2713
2996 SecurityBlob->NtChallengeResponse.Length = 2714 SecurityBlob->NtChallengeResponse.Length =
2997 cpu_to_le16(CIFS_SESSION_KEY_SIZE); 2715 cpu_to_le16(CIFS_SESS_KEY_SIZE);
2998 SecurityBlob->NtChallengeResponse.MaximumLength = 2716 SecurityBlob->NtChallengeResponse.MaximumLength =
2999 cpu_to_le16(CIFS_SESSION_KEY_SIZE); 2717 cpu_to_le16(CIFS_SESS_KEY_SIZE);
3000 memcpy(bcc_ptr, ntlm_session_key, CIFS_SESSION_KEY_SIZE); 2718 memcpy(bcc_ptr, ntlm_session_key, CIFS_SESS_KEY_SIZE);
3001 SecurityBlob->NtChallengeResponse.Buffer = 2719 SecurityBlob->NtChallengeResponse.Buffer =
3002 cpu_to_le32(SecurityBlobLength); 2720 cpu_to_le32(SecurityBlobLength);
3003 SecurityBlobLength += CIFS_SESSION_KEY_SIZE; 2721 SecurityBlobLength += CIFS_SESS_KEY_SIZE;
3004 bcc_ptr += CIFS_SESSION_KEY_SIZE; 2722 bcc_ptr += CIFS_SESS_KEY_SIZE;
3005 2723
3006 if (ses->capabilities & CAP_UNICODE) { 2724 if (ses->capabilities & CAP_UNICODE) {
3007 if (domain == NULL) { 2725 if (domain == NULL) {
@@ -3190,8 +2908,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
3190 bcc_ptr, 2908 bcc_ptr,
3191 remaining_words 2909 remaining_words
3192 - 1); 2910 - 1);
3193 if(ses->serverNOS) 2911 kfree(ses->serverNOS);
3194 kfree(ses->serverNOS);
3195 ses->serverNOS = 2912 ses->serverNOS =
3196 kzalloc(2 * (len + 1), 2913 kzalloc(2 * (len + 1),
3197 GFP_KERNEL); 2914 GFP_KERNEL);
@@ -3244,8 +2961,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
3244 if(ses->serverDomain) 2961 if(ses->serverDomain)
3245 kfree(ses->serverDomain); 2962 kfree(ses->serverDomain);
3246 ses->serverDomain = kzalloc(2, GFP_KERNEL); 2963 ses->serverDomain = kzalloc(2, GFP_KERNEL);
3247 if(ses->serverNOS) 2964 kfree(ses->serverNOS);
3248 kfree(ses->serverNOS);
3249 ses->serverNOS = kzalloc(2, GFP_KERNEL); 2965 ses->serverNOS = kzalloc(2, GFP_KERNEL);
3250 } 2966 }
3251 } else { /* ASCII */ 2967 } else { /* ASCII */
@@ -3263,8 +2979,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
3263 bcc_ptr++; 2979 bcc_ptr++;
3264 2980
3265 len = strnlen(bcc_ptr, 1024); 2981 len = strnlen(bcc_ptr, 1024);
3266 if(ses->serverNOS) 2982 kfree(ses->serverNOS);
3267 kfree(ses->serverNOS);
3268 ses->serverNOS = kzalloc(len+1,GFP_KERNEL); 2983 ses->serverNOS = kzalloc(len+1,GFP_KERNEL);
3269 strncpy(ses->serverNOS, bcc_ptr, len); 2984 strncpy(ses->serverNOS, bcc_ptr, len);
3270 bcc_ptr += len; 2985 bcc_ptr += len;
@@ -3340,22 +3055,33 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
3340 bcc_ptr = &pSMB->Password[0]; 3055 bcc_ptr = &pSMB->Password[0];
3341 if((ses->server->secMode) & SECMODE_USER) { 3056 if((ses->server->secMode) & SECMODE_USER) {
3342 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ 3057 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
3058 *bcc_ptr = 0; /* password is null byte */
3343 bcc_ptr++; /* skip password */ 3059 bcc_ptr++; /* skip password */
3060 /* already aligned so no need to do it below */
3344 } else { 3061 } else {
3345 pSMB->PasswordLength = cpu_to_le16(CIFS_SESSION_KEY_SIZE); 3062 pSMB->PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
3346 /* BB FIXME add code to fail this if NTLMv2 or Kerberos 3063 /* BB FIXME add code to fail this if NTLMv2 or Kerberos
3347 specified as required (when that support is added to 3064 specified as required (when that support is added to
3348 the vfs in the future) as only NTLM or the much 3065 the vfs in the future) as only NTLM or the much
3349 weaker LANMAN (which we do not send) is accepted 3066 weaker LANMAN (which we do not send by default) is accepted
3350 by Samba (not sure whether other servers allow 3067 by Samba (not sure whether other servers allow
3351 NTLMv2 password here) */ 3068 NTLMv2 password here) */
3069#ifdef CONFIG_CIFS_WEAK_PW_HASH
3070 if((extended_security & CIFSSEC_MAY_LANMAN) &&
3071 (ses->server->secType == LANMAN))
3072 calc_lanman_hash(ses, bcc_ptr);
3073 else
3074#endif /* CIFS_WEAK_PW_HASH */
3352 SMBNTencrypt(ses->password, 3075 SMBNTencrypt(ses->password,
3353 ses->server->cryptKey, 3076 ses->server->cryptKey,
3354 bcc_ptr); 3077 bcc_ptr);
3355 3078
3356 bcc_ptr += CIFS_SESSION_KEY_SIZE; 3079 bcc_ptr += CIFS_SESS_KEY_SIZE;
3357 *bcc_ptr = 0; 3080 if(ses->capabilities & CAP_UNICODE) {
3358 bcc_ptr++; /* align */ 3081 /* must align unicode strings */
3082 *bcc_ptr = 0; /* null byte password */
3083 bcc_ptr++;
3084 }
3359 } 3085 }
3360 3086
3361 if(ses->server->secMode & 3087 if(ses->server->secMode &
@@ -3429,7 +3155,10 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
3429 } 3155 }
3430 /* else do not bother copying these informational fields */ 3156 /* else do not bother copying these informational fields */
3431 } 3157 }
3432 tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport); 3158 if(smb_buffer_response->WordCount == 3)
3159 tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
3160 else
3161 tcon->Flags = 0;
3433 cFYI(1, ("Tcon flags: 0x%x ", tcon->Flags)); 3162 cFYI(1, ("Tcon flags: 0x%x ", tcon->Flags));
3434 } else if ((rc == 0) && tcon == NULL) { 3163 } else if ((rc == 0) && tcon == NULL) {
3435 /* all we need to save for IPC$ connection */ 3164 /* all we need to save for IPC$ connection */
@@ -3494,7 +3223,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
3494 struct nls_table * nls_info) 3223 struct nls_table * nls_info)
3495{ 3224{
3496 int rc = 0; 3225 int rc = 0;
3497 char ntlm_session_key[CIFS_SESSION_KEY_SIZE]; 3226 char ntlm_session_key[CIFS_SESS_KEY_SIZE];
3498 int ntlmv2_flag = FALSE; 3227 int ntlmv2_flag = FALSE;
3499 int first_time = 0; 3228 int first_time = 0;
3500 3229
@@ -3526,20 +3255,13 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
3526 pSesInfo->server->secMode, 3255 pSesInfo->server->secMode,
3527 pSesInfo->server->capabilities, 3256 pSesInfo->server->capabilities,
3528 pSesInfo->server->timeZone)); 3257 pSesInfo->server->timeZone));
3529#ifdef CONFIG_CIFS_EXPERIMENTAL 3258 if(experimEnabled < 2)
3530 if(experimEnabled > 1) 3259 rc = CIFS_SessSetup(xid, pSesInfo,
3531 rc = CIFS_SessSetup(xid, pSesInfo, CIFS_NTLM /* type */, 3260 first_time, nls_info);
3532 &ntlmv2_flag, nls_info); 3261 else if (extended_security
3533 else
3534#endif
3535 if (extended_security
3536 && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY) 3262 && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
3537 && (pSesInfo->server->secType == NTLMSSP)) { 3263 && (pSesInfo->server->secType == NTLMSSP)) {
3538 cFYI(1, ("New style sesssetup")); 3264 rc = -EOPNOTSUPP;
3539 rc = CIFSSpnegoSessSetup(xid, pSesInfo,
3540 NULL /* security blob */,
3541 0 /* blob length */,
3542 nls_info);
3543 } else if (extended_security 3265 } else if (extended_security
3544 && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY) 3266 && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
3545 && (pSesInfo->server->secType == RawNTLMSSP)) { 3267 && (pSesInfo->server->secType == RawNTLMSSP)) {
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 82315edc77..ba4cbe9b06 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -113,7 +113,7 @@ cifs_bp_rename_retry:
113 full_path[namelen+2] = 0; 113 full_path[namelen+2] = 0;
114BB remove above eight lines BB */ 114BB remove above eight lines BB */
115 115
116/* Inode operations in similar order to how they appear in the Linux file fs.h */ 116/* Inode operations in similar order to how they appear in Linux file fs.h */
117 117
118int 118int
119cifs_create(struct inode *inode, struct dentry *direntry, int mode, 119cifs_create(struct inode *inode, struct dentry *direntry, int mode,
@@ -178,11 +178,14 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
178 FreeXid(xid); 178 FreeXid(xid);
179 return -ENOMEM; 179 return -ENOMEM;
180 } 180 }
181 181 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
182 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, 182 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
183 desiredAccess, CREATE_NOT_DIR, 183 desiredAccess, CREATE_NOT_DIR,
184 &fileHandle, &oplock, buf, cifs_sb->local_nls, 184 &fileHandle, &oplock, buf, cifs_sb->local_nls,
185 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 185 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
186 else
187 rc = -EIO; /* no NT SMB support fall into legacy open below */
188
186 if(rc == -EIO) { 189 if(rc == -EIO) {
187 /* old server, retry the open legacy style */ 190 /* old server, retry the open legacy style */
188 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition, 191 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
@@ -191,7 +194,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
191 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 194 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
192 } 195 }
193 if (rc) { 196 if (rc) {
194 cFYI(1, ("cifs_create returned 0x%x ", rc)); 197 cFYI(1, ("cifs_create returned 0x%x", rc));
195 } else { 198 } else {
196 /* If Open reported that we actually created a file 199 /* If Open reported that we actually created a file
197 then we now have to set the mode if possible */ 200 then we now have to set the mode if possible */
@@ -369,6 +372,10 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
369 cifs_sb->mnt_cifs_flags & 372 cifs_sb->mnt_cifs_flags &
370 CIFS_MOUNT_MAP_SPECIAL_CHR); 373 CIFS_MOUNT_MAP_SPECIAL_CHR);
371 374
375 /* BB FIXME - add handling for backlevel servers
376 which need legacy open and check for all
377 calls to SMBOpen for fallback to
378 SMBLeagcyOpen */
372 if(!rc) { 379 if(!rc) {
373 /* BB Do not bother to decode buf since no 380 /* BB Do not bother to decode buf since no
374 local inode yet to put timestamps in, 381 local inode yet to put timestamps in,
diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c
index 633a938113..d91a3d44e9 100644
--- a/fs/cifs/fcntl.c
+++ b/fs/cifs/fcntl.c
@@ -91,14 +91,14 @@ int cifs_dir_notify(struct file * file, unsigned long arg)
91 if(full_path == NULL) { 91 if(full_path == NULL) {
92 rc = -ENOMEM; 92 rc = -ENOMEM;
93 } else { 93 } else {
94 cERROR(1,("cifs dir notify on file %s with arg 0x%lx",full_path,arg)); /* BB removeme BB */ 94 cFYI(1,("dir notify on file %s Arg 0x%lx",full_path,arg));
95 rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, 95 rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN,
96 GENERIC_READ | SYNCHRONIZE, 0 /* create options */, 96 GENERIC_READ | SYNCHRONIZE, 0 /* create options */,
97 &netfid, &oplock,NULL, cifs_sb->local_nls, 97 &netfid, &oplock,NULL, cifs_sb->local_nls,
98 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 98 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
99 /* BB fixme - add this handle to a notify handle list */ 99 /* BB fixme - add this handle to a notify handle list */
100 if(rc) { 100 if(rc) {
101 cERROR(1,("Could not open directory for notify")); /* BB remove BB */ 101 cFYI(1,("Could not open directory for notify"));
102 } else { 102 } else {
103 filter = convert_to_cifs_notify_flags(arg); 103 filter = convert_to_cifs_notify_flags(arg);
104 if(filter != 0) { 104 if(filter != 0) {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index b4a18c1cab..e9c1573f6a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -110,7 +110,6 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
110 &pCifsInode->openFileList); 110 &pCifsInode->openFileList);
111 } 111 }
112 write_unlock(&GlobalSMBSeslock); 112 write_unlock(&GlobalSMBSeslock);
113 write_unlock(&file->f_owner.lock);
114 if (pCifsInode->clientCanCacheRead) { 113 if (pCifsInode->clientCanCacheRead) {
115 /* we have the inode open somewhere else 114 /* we have the inode open somewhere else
116 no need to discard cache data */ 115 no need to discard cache data */
@@ -201,7 +200,7 @@ int cifs_open(struct inode *inode, struct file *file)
201 } else { 200 } else {
202 if (file->f_flags & O_EXCL) 201 if (file->f_flags & O_EXCL)
203 cERROR(1, ("could not find file instance for " 202 cERROR(1, ("could not find file instance for "
204 "new file %p ", file)); 203 "new file %p", file));
205 } 204 }
206 } 205 }
207 206
@@ -260,10 +259,15 @@ int cifs_open(struct inode *inode, struct file *file)
260 rc = -ENOMEM; 259 rc = -ENOMEM;
261 goto out; 260 goto out;
262 } 261 }
263 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess, 262
264 CREATE_NOT_DIR, &netfid, &oplock, buf, 263 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
264 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
265 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
265 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags 266 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
266 & CIFS_MOUNT_MAP_SPECIAL_CHR); 267 & CIFS_MOUNT_MAP_SPECIAL_CHR);
268 else
269 rc = -EIO; /* no NT SMB support fall into legacy open below */
270
267 if (rc == -EIO) { 271 if (rc == -EIO) {
268 /* Old server, try legacy style OpenX */ 272 /* Old server, try legacy style OpenX */
269 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition, 273 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
@@ -272,7 +276,7 @@ int cifs_open(struct inode *inode, struct file *file)
272 & CIFS_MOUNT_MAP_SPECIAL_CHR); 276 & CIFS_MOUNT_MAP_SPECIAL_CHR);
273 } 277 }
274 if (rc) { 278 if (rc) {
275 cFYI(1, ("cifs_open returned 0x%x ", rc)); 279 cFYI(1, ("cifs_open returned 0x%x", rc));
276 goto out; 280 goto out;
277 } 281 }
278 file->private_data = 282 file->private_data =
@@ -282,7 +286,6 @@ int cifs_open(struct inode *inode, struct file *file)
282 goto out; 286 goto out;
283 } 287 }
284 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid); 288 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
285 write_lock(&file->f_owner.lock);
286 write_lock(&GlobalSMBSeslock); 289 write_lock(&GlobalSMBSeslock);
287 list_add(&pCifsFile->tlist, &pTcon->openFileList); 290 list_add(&pCifsFile->tlist, &pTcon->openFileList);
288 291
@@ -293,7 +296,6 @@ int cifs_open(struct inode *inode, struct file *file)
293 &oplock, buf, full_path, xid); 296 &oplock, buf, full_path, xid);
294 } else { 297 } else {
295 write_unlock(&GlobalSMBSeslock); 298 write_unlock(&GlobalSMBSeslock);
296 write_unlock(&file->f_owner.lock);
297 } 299 }
298 300
299 if (oplock & CIFS_CREATE_ACTION) { 301 if (oplock & CIFS_CREATE_ACTION) {
@@ -409,8 +411,8 @@ static int cifs_reopen_file(struct inode *inode, struct file *file,
409 CIFS_MOUNT_MAP_SPECIAL_CHR); 411 CIFS_MOUNT_MAP_SPECIAL_CHR);
410 if (rc) { 412 if (rc) {
411 up(&pCifsFile->fh_sem); 413 up(&pCifsFile->fh_sem);
412 cFYI(1, ("cifs_open returned 0x%x ", rc)); 414 cFYI(1, ("cifs_open returned 0x%x", rc));
413 cFYI(1, ("oplock: %d ", oplock)); 415 cFYI(1, ("oplock: %d", oplock));
414 } else { 416 } else {
415 pCifsFile->netfid = netfid; 417 pCifsFile->netfid = netfid;
416 pCifsFile->invalidHandle = FALSE; 418 pCifsFile->invalidHandle = FALSE;
@@ -472,7 +474,6 @@ int cifs_close(struct inode *inode, struct file *file)
472 pTcon = cifs_sb->tcon; 474 pTcon = cifs_sb->tcon;
473 if (pSMBFile) { 475 if (pSMBFile) {
474 pSMBFile->closePend = TRUE; 476 pSMBFile->closePend = TRUE;
475 write_lock(&file->f_owner.lock);
476 if (pTcon) { 477 if (pTcon) {
477 /* no sense reconnecting to close a file that is 478 /* no sense reconnecting to close a file that is
478 already closed */ 479 already closed */
@@ -487,23 +488,18 @@ int cifs_close(struct inode *inode, struct file *file)
487 the struct would be in each open file, 488 the struct would be in each open file,
488 but this should give enough time to 489 but this should give enough time to
489 clear the socket */ 490 clear the socket */
490 write_unlock(&file->f_owner.lock);
491 cERROR(1,("close with pending writes")); 491 cERROR(1,("close with pending writes"));
492 msleep(timeout); 492 msleep(timeout);
493 write_lock(&file->f_owner.lock);
494 timeout *= 4; 493 timeout *= 4;
495 } 494 }
496 write_unlock(&file->f_owner.lock);
497 rc = CIFSSMBClose(xid, pTcon, 495 rc = CIFSSMBClose(xid, pTcon,
498 pSMBFile->netfid); 496 pSMBFile->netfid);
499 write_lock(&file->f_owner.lock);
500 } 497 }
501 } 498 }
502 write_lock(&GlobalSMBSeslock); 499 write_lock(&GlobalSMBSeslock);
503 list_del(&pSMBFile->flist); 500 list_del(&pSMBFile->flist);
504 list_del(&pSMBFile->tlist); 501 list_del(&pSMBFile->tlist);
505 write_unlock(&GlobalSMBSeslock); 502 write_unlock(&GlobalSMBSeslock);
506 write_unlock(&file->f_owner.lock);
507 kfree(pSMBFile->search_resume_name); 503 kfree(pSMBFile->search_resume_name);
508 kfree(file->private_data); 504 kfree(file->private_data);
509 file->private_data = NULL; 505 file->private_data = NULL;
@@ -531,7 +527,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
531 (struct cifsFileInfo *)file->private_data; 527 (struct cifsFileInfo *)file->private_data;
532 char *ptmp; 528 char *ptmp;
533 529
534 cFYI(1, ("Closedir inode = 0x%p with ", inode)); 530 cFYI(1, ("Closedir inode = 0x%p", inode));
535 531
536 xid = GetXid(); 532 xid = GetXid();
537 533
@@ -605,7 +601,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
605 } 601 }
606 if (pfLock->fl_flags & FL_ACCESS) 602 if (pfLock->fl_flags & FL_ACCESS)
607 cFYI(1, ("Process suspended by mandatory locking - " 603 cFYI(1, ("Process suspended by mandatory locking - "
608 "not implemented yet ")); 604 "not implemented yet"));
609 if (pfLock->fl_flags & FL_LEASE) 605 if (pfLock->fl_flags & FL_LEASE)
610 cFYI(1, ("Lease on file - not implemented yet")); 606 cFYI(1, ("Lease on file - not implemented yet"));
611 if (pfLock->fl_flags & 607 if (pfLock->fl_flags &
@@ -1375,7 +1371,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1375 1371
1376 xid = GetXid(); 1372 xid = GetXid();
1377 1373
1378 cFYI(1, ("Sync file - name: %s datasync: 0x%x ", 1374 cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1379 dentry->d_name.name, datasync)); 1375 dentry->d_name.name, datasync));
1380 1376
1381 rc = filemap_fdatawrite(inode->i_mapping); 1377 rc = filemap_fdatawrite(inode->i_mapping);
@@ -1404,7 +1400,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1404/* fill in rpages then 1400/* fill in rpages then
1405 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */ 1401 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1406 1402
1407/* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index)); 1403/* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1408 1404
1409#if 0 1405#if 0
1410 if (rc < 0) 1406 if (rc < 0)
@@ -1836,7 +1832,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
1836 if (rc < 0) 1832 if (rc < 0)
1837 goto io_error; 1833 goto io_error;
1838 else 1834 else
1839 cFYI(1, ("Bytes read %d ",rc)); 1835 cFYI(1, ("Bytes read %d",rc));
1840 1836
1841 file->f_dentry->d_inode->i_atime = 1837 file->f_dentry->d_inode->i_atime =
1842 current_fs_time(file->f_dentry->d_inode->i_sb); 1838 current_fs_time(file->f_dentry->d_inode->i_sb);
@@ -1957,3 +1953,19 @@ struct address_space_operations cifs_addr_ops = {
1957 /* .sync_page = cifs_sync_page, */ 1953 /* .sync_page = cifs_sync_page, */
1958 /* .direct_IO = */ 1954 /* .direct_IO = */
1959}; 1955};
1956
1957/*
1958 * cifs_readpages requires the server to support a buffer large enough to
1959 * contain the header plus one complete page of data. Otherwise, we need
1960 * to leave cifs_readpages out of the address space operations.
1961 */
1962struct address_space_operations cifs_addr_ops_smallbuf = {
1963 .readpage = cifs_readpage,
1964 .writepage = cifs_writepage,
1965 .writepages = cifs_writepages,
1966 .prepare_write = cifs_prepare_write,
1967 .commit_write = cifs_commit_write,
1968 .set_page_dirty = __set_page_dirty_nobuffers,
1969 /* .sync_page = cifs_sync_page, */
1970 /* .direct_IO = */
1971};
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 4093764ef4..b88147c1dc 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -41,7 +41,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
41 char *tmp_path; 41 char *tmp_path;
42 42
43 pTcon = cifs_sb->tcon; 43 pTcon = cifs_sb->tcon;
44 cFYI(1, ("Getting info on %s ", search_path)); 44 cFYI(1, ("Getting info on %s", search_path));
45 /* could have done a find first instead but this returns more info */ 45 /* could have done a find first instead but this returns more info */
46 rc = CIFSSMBUnixQPathInfo(xid, pTcon, search_path, &findData, 46 rc = CIFSSMBUnixQPathInfo(xid, pTcon, search_path, &findData,
47 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & 47 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
@@ -97,9 +97,9 @@ int cifs_get_inode_info_unix(struct inode **pinode,
97 inode = *pinode; 97 inode = *pinode;
98 cifsInfo = CIFS_I(inode); 98 cifsInfo = CIFS_I(inode);
99 99
100 cFYI(1, ("Old time %ld ", cifsInfo->time)); 100 cFYI(1, ("Old time %ld", cifsInfo->time));
101 cifsInfo->time = jiffies; 101 cifsInfo->time = jiffies;
102 cFYI(1, ("New time %ld ", cifsInfo->time)); 102 cFYI(1, ("New time %ld", cifsInfo->time));
103 /* this is ok to set on every inode revalidate */ 103 /* this is ok to set on every inode revalidate */
104 atomic_set(&cifsInfo->inUse,1); 104 atomic_set(&cifsInfo->inUse,1);
105 105
@@ -180,11 +180,12 @@ int cifs_get_inode_info_unix(struct inode **pinode,
180 else /* not direct, send byte range locks */ 180 else /* not direct, send byte range locks */
181 inode->i_fop = &cifs_file_ops; 181 inode->i_fop = &cifs_file_ops;
182 182
183 inode->i_data.a_ops = &cifs_addr_ops;
184 /* check if server can support readpages */ 183 /* check if server can support readpages */
185 if(pTcon->ses->server->maxBuf < 184 if(pTcon->ses->server->maxBuf <
186 4096 + MAX_CIFS_HDR_SIZE) 185 PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
187 inode->i_data.a_ops->readpages = NULL; 186 inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
187 else
188 inode->i_data.a_ops = &cifs_addr_ops;
188 } else if (S_ISDIR(inode->i_mode)) { 189 } else if (S_ISDIR(inode->i_mode)) {
189 cFYI(1, ("Directory inode")); 190 cFYI(1, ("Directory inode"));
190 inode->i_op = &cifs_dir_inode_ops; 191 inode->i_op = &cifs_dir_inode_ops;
@@ -421,23 +422,23 @@ int cifs_get_inode_info(struct inode **pinode,
421 inode = *pinode; 422 inode = *pinode;
422 cifsInfo = CIFS_I(inode); 423 cifsInfo = CIFS_I(inode);
423 cifsInfo->cifsAttrs = attr; 424 cifsInfo->cifsAttrs = attr;
424 cFYI(1, ("Old time %ld ", cifsInfo->time)); 425 cFYI(1, ("Old time %ld", cifsInfo->time));
425 cifsInfo->time = jiffies; 426 cifsInfo->time = jiffies;
426 cFYI(1, ("New time %ld ", cifsInfo->time)); 427 cFYI(1, ("New time %ld", cifsInfo->time));
427 428
428 /* blksize needs to be multiple of two. So safer to default to 429 /* blksize needs to be multiple of two. So safer to default to
429 blksize and blkbits set in superblock so 2**blkbits and blksize 430 blksize and blkbits set in superblock so 2**blkbits and blksize
430 will match rather than setting to: 431 will match rather than setting to:
431 (pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/ 432 (pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/
432 433
433 /* Linux can not store file creation time unfortunately so we ignore it */ 434 /* Linux can not store file creation time so ignore it */
434 inode->i_atime = 435 inode->i_atime =
435 cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime)); 436 cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime));
436 inode->i_mtime = 437 inode->i_mtime =
437 cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime)); 438 cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime));
438 inode->i_ctime = 439 inode->i_ctime =
439 cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime)); 440 cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
440 cFYI(0, ("Attributes came in as 0x%x ", attr)); 441 cFYI(0, ("Attributes came in as 0x%x", attr));
441 442
442 /* set default mode. will override for dirs below */ 443 /* set default mode. will override for dirs below */
443 if (atomic_read(&cifsInfo->inUse) == 0) 444 if (atomic_read(&cifsInfo->inUse) == 0)
@@ -519,10 +520,11 @@ int cifs_get_inode_info(struct inode **pinode,
519 else /* not direct, send byte range locks */ 520 else /* not direct, send byte range locks */
520 inode->i_fop = &cifs_file_ops; 521 inode->i_fop = &cifs_file_ops;
521 522
522 inode->i_data.a_ops = &cifs_addr_ops;
523 if(pTcon->ses->server->maxBuf < 523 if(pTcon->ses->server->maxBuf <
524 4096 + MAX_CIFS_HDR_SIZE) 524 PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
525 inode->i_data.a_ops->readpages = NULL; 525 inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
526 else
527 inode->i_data.a_ops = &cifs_addr_ops;
526 } else if (S_ISDIR(inode->i_mode)) { 528 } else if (S_ISDIR(inode->i_mode)) {
527 cFYI(1, ("Directory inode")); 529 cFYI(1, ("Directory inode"));
528 inode->i_op = &cifs_dir_inode_ops; 530 inode->i_op = &cifs_dir_inode_ops;
@@ -731,7 +733,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
731 rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls, 733 rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls,
732 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 734 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
733 if (rc) { 735 if (rc) {
734 cFYI(1, ("cifs_mkdir returned 0x%x ", rc)); 736 cFYI(1, ("cifs_mkdir returned 0x%x", rc));
735 d_drop(direntry); 737 d_drop(direntry);
736 } else { 738 } else {
737 inode->i_nlink++; 739 inode->i_nlink++;
@@ -798,7 +800,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
798 char *full_path = NULL; 800 char *full_path = NULL;
799 struct cifsInodeInfo *cifsInode; 801 struct cifsInodeInfo *cifsInode;
800 802
801 cFYI(1, ("cifs_rmdir, inode = 0x%p with ", inode)); 803 cFYI(1, ("cifs_rmdir, inode = 0x%p", inode));
802 804
803 xid = GetXid(); 805 xid = GetXid();
804 806
@@ -1121,7 +1123,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
1121 1123
1122 xid = GetXid(); 1124 xid = GetXid();
1123 1125
1124 cFYI(1, ("In cifs_setattr, name = %s attrs->iavalid 0x%x ", 1126 cFYI(1, ("setattr on file %s attrs->iavalid 0x%x",
1125 direntry->d_name.name, attrs->ia_valid)); 1127 direntry->d_name.name, attrs->ia_valid));
1126 1128
1127 cifs_sb = CIFS_SB(direntry->d_inode->i_sb); 1129 cifs_sb = CIFS_SB(direntry->d_inode->i_sb);
@@ -1157,6 +1159,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
1157 when the local oplock break takes longer to flush 1159 when the local oplock break takes longer to flush
1158 writebehind data than the SMB timeout for the SetPathInfo 1160 writebehind data than the SMB timeout for the SetPathInfo
1159 request would allow */ 1161 request would allow */
1162
1160 open_file = find_writable_file(cifsInode); 1163 open_file = find_writable_file(cifsInode);
1161 if (open_file) { 1164 if (open_file) {
1162 __u16 nfid = open_file->netfid; 1165 __u16 nfid = open_file->netfid;
@@ -1289,7 +1292,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
1289 it may be useful to Windows - but we do 1292 it may be useful to Windows - but we do
1290 not want to set ctime unless some other 1293 not want to set ctime unless some other
1291 timestamp is changing */ 1294 timestamp is changing */
1292 cFYI(1, ("CIFS - CTIME changed ")); 1295 cFYI(1, ("CIFS - CTIME changed"));
1293 time_buf.ChangeTime = 1296 time_buf.ChangeTime =
1294 cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime)); 1297 cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime));
1295 } else 1298 } else
@@ -1356,7 +1359,7 @@ cifs_setattr_exit:
1356 1359
1357void cifs_delete_inode(struct inode *inode) 1360void cifs_delete_inode(struct inode *inode)
1358{ 1361{
1359 cFYI(1, ("In cifs_delete_inode, inode = 0x%p ", inode)); 1362 cFYI(1, ("In cifs_delete_inode, inode = 0x%p", inode));
1360 /* may have to add back in if and when safe distributed caching of 1363 /* may have to add back in if and when safe distributed caching of
1361 directories added e.g. via FindNotify */ 1364 directories added e.g. via FindNotify */
1362} 1365}
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 2ec99f8331..a57f5d6e62 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -167,7 +167,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
167 return -ENOMEM; 167 return -ENOMEM;
168 } 168 }
169 169
170 cFYI(1, ("Full path: %s ", full_path)); 170 cFYI(1, ("Full path: %s", full_path));
171 cFYI(1, ("symname is %s", symname)); 171 cFYI(1, ("symname is %s", symname));
172 172
173 /* BB what if DFS and this volume is on different share? BB */ 173 /* BB what if DFS and this volume is on different share? BB */
@@ -186,8 +186,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
186 inode->i_sb,xid); 186 inode->i_sb,xid);
187 187
188 if (rc != 0) { 188 if (rc != 0) {
189 cFYI(1, 189 cFYI(1, ("Create symlink ok, getinodeinfo fail rc = %d",
190 ("Create symlink worked but get_inode_info failed with rc = %d ",
191 rc)); 190 rc));
192 } else { 191 } else {
193 if (pTcon->nocase) 192 if (pTcon->nocase)
@@ -289,7 +288,7 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
289 else { 288 else {
290 cFYI(1,("num referral: %d",num_referrals)); 289 cFYI(1,("num referral: %d",num_referrals));
291 if(referrals) { 290 if(referrals) {
292 cFYI(1,("referral string: %s ",referrals)); 291 cFYI(1,("referral string: %s",referrals));
293 strncpy(tmpbuffer, referrals, len-1); 292 strncpy(tmpbuffer, referrals, len-1);
294 } 293 }
295 } 294 }
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index fafd056426..22c937e588 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -101,6 +101,7 @@ sesInfoFree(struct cifsSesInfo *buf_to_free)
101 kfree(buf_to_free->serverDomain); 101 kfree(buf_to_free->serverDomain);
102 kfree(buf_to_free->serverNOS); 102 kfree(buf_to_free->serverNOS);
103 kfree(buf_to_free->password); 103 kfree(buf_to_free->password);
104 kfree(buf_to_free->domainName);
104 kfree(buf_to_free); 105 kfree(buf_to_free);
105} 106}
106 107
@@ -499,11 +500,12 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
499 if(pSMBr->ByteCount > sizeof(struct file_notify_information)) { 500 if(pSMBr->ByteCount > sizeof(struct file_notify_information)) {
500 data_offset = le32_to_cpu(pSMBr->DataOffset); 501 data_offset = le32_to_cpu(pSMBr->DataOffset);
501 502
502 pnotify = (struct file_notify_information *)((char *)&pSMBr->hdr.Protocol 503 pnotify = (struct file_notify_information *)
503 + data_offset); 504 ((char *)&pSMBr->hdr.Protocol + data_offset);
504 cFYI(1,("dnotify on %s with action: 0x%x",pnotify->FileName, 505 cFYI(1,("dnotify on %s Action: 0x%x",pnotify->FileName,
505 pnotify->Action)); /* BB removeme BB */ 506 pnotify->Action)); /* BB removeme BB */
506 /* cifs_dump_mem("Received notify Data is: ",buf,sizeof(struct smb_hdr)+60); */ 507 /* cifs_dump_mem("Rcvd notify Data: ",buf,
508 sizeof(struct smb_hdr)+60); */
507 return TRUE; 509 return TRUE;
508 } 510 }
509 if(pSMBr->hdr.Status.CifsError) { 511 if(pSMBr->hdr.Status.CifsError) {
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 5de74d216f..b66eff5dc6 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -84,11 +84,11 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = {
84 84
85static const struct smb_to_posix_error mapping_table_ERRSRV[] = { 85static const struct smb_to_posix_error mapping_table_ERRSRV[] = {
86 {ERRerror, -EIO}, 86 {ERRerror, -EIO},
87 {ERRbadpw, -EPERM}, 87 {ERRbadpw, -EACCES}, /* was EPERM */
88 {ERRbadtype, -EREMOTE}, 88 {ERRbadtype, -EREMOTE},
89 {ERRaccess, -EACCES}, 89 {ERRaccess, -EACCES},
90 {ERRinvtid, -ENXIO}, 90 {ERRinvtid, -ENXIO},
91 {ERRinvnetname, -ENODEV}, 91 {ERRinvnetname, -ENXIO},
92 {ERRinvdevice, -ENXIO}, 92 {ERRinvdevice, -ENXIO},
93 {ERRqfull, -ENOSPC}, 93 {ERRqfull, -ENOSPC},
94 {ERRqtoobig, -ENOSPC}, 94 {ERRqtoobig, -ENOSPC},
diff --git a/fs/cifs/ntlmssp.c b/fs/cifs/ntlmssp.c
deleted file mode 100644
index 115359cc7a..0000000000
--- a/fs/cifs/ntlmssp.c
+++ /dev/null
@@ -1,143 +0,0 @@
1/*
2 * fs/cifs/ntlmssp.h
3 *
4 * Copyright (c) International Business Machines Corp., 2006
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include "cifspdu.h"
23#include "cifsglob.h"
24#include "cifsproto.h"
25#include "cifs_unicode.h"
26#include "cifs_debug.h"
27#include "ntlmssp.h"
28#include "nterr.h"
29
30#ifdef CONFIG_CIFS_EXPERIMENTAL
31static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
32{
33 __u32 capabilities = 0;
34
35 /* init fields common to all four types of SessSetup */
36 /* note that header is initialized to zero in header_assemble */
37 pSMB->req.AndXCommand = 0xFF;
38 pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
39 pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
40
41 /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
42
43 /* BB verify whether signing required on neg or just on auth frame
44 (and NTLM case) */
45
46 capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
47 CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
48
49 if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
50 pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
51
52 if (ses->capabilities & CAP_UNICODE) {
53 pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE;
54 capabilities |= CAP_UNICODE;
55 }
56 if (ses->capabilities & CAP_STATUS32) {
57 pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS;
58 capabilities |= CAP_STATUS32;
59 }
60 if (ses->capabilities & CAP_DFS) {
61 pSMB->req.hdr.Flags2 |= SMBFLG2_DFS;
62 capabilities |= CAP_DFS;
63 }
64
65 /* BB check whether to init vcnum BB */
66 return capabilities;
67}
68int
69CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, const int type,
70 int * pNTLMv2_flg, const struct nls_table *nls_cp)
71{
72 int rc = 0;
73 int wct;
74 struct smb_hdr *smb_buffer;
75 char *bcc_ptr;
76 SESSION_SETUP_ANDX *pSMB;
77 __u32 capabilities;
78
79 if(ses == NULL)
80 return -EINVAL;
81
82 cFYI(1,("SStp type: %d",type));
83 if(type < CIFS_NTLM) {
84#ifndef CONFIG_CIFS_WEAK_PW_HASH
85 /* LANMAN and plaintext are less secure and off by default.
86 So we make this explicitly be turned on in kconfig (in the
87 build) and turned on at runtime (changed from the default)
88 in proc/fs/cifs or via mount parm. Unfortunately this is
89 needed for old Win (e.g. Win95), some obscure NAS and OS/2 */
90 return -EOPNOTSUPP;
91#endif
92 wct = 10; /* lanman 2 style sessionsetup */
93 } else if(type < CIFS_NTLMSSP_NEG)
94 wct = 13; /* old style NTLM sessionsetup */
95 else /* same size for negotiate or auth, NTLMSSP or extended security */
96 wct = 12;
97
98 rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
99 (void **)&smb_buffer);
100 if(rc)
101 return rc;
102
103 pSMB = (SESSION_SETUP_ANDX *)smb_buffer;
104
105 capabilities = cifs_ssetup_hdr(ses, pSMB);
106 bcc_ptr = pByteArea(smb_buffer);
107 if(type > CIFS_NTLM) {
108 pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
109 capabilities |= CAP_EXTENDED_SECURITY;
110 pSMB->req.Capabilities = cpu_to_le32(capabilities);
111 /* BB set password lengths */
112 } else if(type < CIFS_NTLM) /* lanman */ {
113 /* no capabilities flags in old lanman negotiation */
114 /* pSMB->old_req.PasswordLength = */ /* BB fixme BB */
115 } else /* type CIFS_NTLM */ {
116 pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
117 pSMB->req_no_secext.CaseInsensitivePasswordLength =
118 cpu_to_le16(CIFS_SESSION_KEY_SIZE);
119 pSMB->req_no_secext.CaseSensitivePasswordLength =
120 cpu_to_le16(CIFS_SESSION_KEY_SIZE);
121 }
122
123
124 /* copy session key */
125
126 /* if Unicode, align strings to two byte boundary */
127
128 /* copy user name */ /* BB Do we need to special case null user name? */
129
130 /* copy domain name */
131
132 /* copy Linux version */
133
134 /* copy network operating system name */
135
136 /* update bcc and smb buffer length */
137
138/* rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buf_type, 0); */
139 /* SMB request buf freed in SendReceive2 */
140
141 return rc;
142}
143#endif /* CONFIG_CIFS_EXPERIMENTAL */
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index b689c50351..03bbcb3779 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -21,6 +21,7 @@
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */ 22 */
23#include <linux/fs.h> 23#include <linux/fs.h>
24#include <linux/pagemap.h>
24#include <linux/stat.h> 25#include <linux/stat.h>
25#include <linux/smp_lock.h> 26#include <linux/smp_lock.h>
26#include "cifspdu.h" 27#include "cifspdu.h"
@@ -31,8 +32,8 @@
31#include "cifs_fs_sb.h" 32#include "cifs_fs_sb.h"
32#include "cifsfs.h" 33#include "cifsfs.h"
33 34
34/* BB fixme - add debug wrappers around this function to disable it fixme BB */ 35#ifdef CONFIG_CIFS_DEBUG2
35/* static void dump_cifs_file_struct(struct file *file, char *label) 36static void dump_cifs_file_struct(struct file *file, char *label)
36{ 37{
37 struct cifsFileInfo * cf; 38 struct cifsFileInfo * cf;
38 39
@@ -53,7 +54,8 @@
53 } 54 }
54 55
55 } 56 }
56} */ 57}
58#endif /* DEBUG2 */
57 59
58/* Returns one if new inode created (which therefore needs to be hashed) */ 60/* Returns one if new inode created (which therefore needs to be hashed) */
59/* Might check in the future if inode number changed so we can rehash inode */ 61/* Might check in the future if inode number changed so we can rehash inode */
@@ -107,32 +109,52 @@ static int construct_dentry(struct qstr *qstring, struct file *file,
107 return rc; 109 return rc;
108} 110}
109 111
110static void fill_in_inode(struct inode *tmp_inode, 112static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
111 FILE_DIRECTORY_INFO *pfindData, int *pobject_type, int isNewInode) 113 char * buf, int *pobject_type, int isNewInode)
112{ 114{
113 loff_t local_size; 115 loff_t local_size;
114 struct timespec local_mtime; 116 struct timespec local_mtime;
115 117
116 struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode); 118 struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
117 struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb); 119 struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb);
118 __u32 attr = le32_to_cpu(pfindData->ExtFileAttributes); 120 __u32 attr;
119 __u64 allocation_size = le64_to_cpu(pfindData->AllocationSize); 121 __u64 allocation_size;
120 __u64 end_of_file = le64_to_cpu(pfindData->EndOfFile); 122 __u64 end_of_file;
121
122 cifsInfo->cifsAttrs = attr;
123 cifsInfo->time = jiffies;
124 123
125 /* save mtime and size */ 124 /* save mtime and size */
126 local_mtime = tmp_inode->i_mtime; 125 local_mtime = tmp_inode->i_mtime;
127 local_size = tmp_inode->i_size; 126 local_size = tmp_inode->i_size;
128 127
128 if(new_buf_type) {
129 FILE_DIRECTORY_INFO *pfindData = (FILE_DIRECTORY_INFO *)buf;
130
131 attr = le32_to_cpu(pfindData->ExtFileAttributes);
132 allocation_size = le64_to_cpu(pfindData->AllocationSize);
133 end_of_file = le64_to_cpu(pfindData->EndOfFile);
134 tmp_inode->i_atime =
135 cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime));
136 tmp_inode->i_mtime =
137 cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime));
138 tmp_inode->i_ctime =
139 cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
140 } else { /* legacy, OS2 and DOS style */
141 FIND_FILE_STANDARD_INFO * pfindData =
142 (FIND_FILE_STANDARD_INFO *)buf;
143
144 attr = le16_to_cpu(pfindData->Attributes);
145 allocation_size = le32_to_cpu(pfindData->AllocationSize);
146 end_of_file = le32_to_cpu(pfindData->DataSize);
147 tmp_inode->i_atime = CURRENT_TIME;
148 /* tmp_inode->i_mtime = BB FIXME - add dos time handling
149 tmp_inode->i_ctime = 0; BB FIXME */
150
151 }
152
129 /* Linux can not store file creation time unfortunately so ignore it */ 153 /* Linux can not store file creation time unfortunately so ignore it */
130 tmp_inode->i_atime = 154
131 cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime)); 155 cifsInfo->cifsAttrs = attr;
132 tmp_inode->i_mtime = 156 cifsInfo->time = jiffies;
133 cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime)); 157
134 tmp_inode->i_ctime =
135 cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
136 /* treat dos attribute of read-only as read-only mode bit e.g. 555? */ 158 /* treat dos attribute of read-only as read-only mode bit e.g. 555? */
137 /* 2767 perms - indicate mandatory locking */ 159 /* 2767 perms - indicate mandatory locking */
138 /* BB fill in uid and gid here? with help from winbind? 160 /* BB fill in uid and gid here? with help from winbind?
@@ -215,11 +237,13 @@ static void fill_in_inode(struct inode *tmp_inode,
215 else 237 else
216 tmp_inode->i_fop = &cifs_file_ops; 238 tmp_inode->i_fop = &cifs_file_ops;
217 239
218 tmp_inode->i_data.a_ops = &cifs_addr_ops;
219 if((cifs_sb->tcon) && (cifs_sb->tcon->ses) && 240 if((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
220 (cifs_sb->tcon->ses->server->maxBuf < 241 (cifs_sb->tcon->ses->server->maxBuf <
221 4096 + MAX_CIFS_HDR_SIZE)) 242 PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
222 tmp_inode->i_data.a_ops->readpages = NULL; 243 tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
244 else
245 tmp_inode->i_data.a_ops = &cifs_addr_ops;
246
223 if(isNewInode) 247 if(isNewInode)
224 return; /* No sense invalidating pages for new inode 248 return; /* No sense invalidating pages for new inode
225 since have not started caching readahead file 249 since have not started caching readahead file
@@ -338,11 +362,12 @@ static void unix_fill_in_inode(struct inode *tmp_inode,
338 else 362 else
339 tmp_inode->i_fop = &cifs_file_ops; 363 tmp_inode->i_fop = &cifs_file_ops;
340 364
341 tmp_inode->i_data.a_ops = &cifs_addr_ops;
342 if((cifs_sb->tcon) && (cifs_sb->tcon->ses) && 365 if((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
343 (cifs_sb->tcon->ses->server->maxBuf < 366 (cifs_sb->tcon->ses->server->maxBuf <
344 4096 + MAX_CIFS_HDR_SIZE)) 367 PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
345 tmp_inode->i_data.a_ops->readpages = NULL; 368 tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
369 else
370 tmp_inode->i_data.a_ops = &cifs_addr_ops;
346 371
347 if(isNewInode) 372 if(isNewInode)
348 return; /* No sense invalidating pages for new inode since we 373 return; /* No sense invalidating pages for new inode since we
@@ -415,7 +440,10 @@ static int initiate_cifs_search(const int xid, struct file *file)
415ffirst_retry: 440ffirst_retry:
416 /* test for Unix extensions */ 441 /* test for Unix extensions */
417 if (pTcon->ses->capabilities & CAP_UNIX) { 442 if (pTcon->ses->capabilities & CAP_UNIX) {
418 cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX; 443 cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX;
444 } else if ((pTcon->ses->capabilities &
445 (CAP_NT_SMBS | CAP_NT_FIND)) == 0) {
446 cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD;
419 } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 447 } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
420 cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO; 448 cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
421 } else /* not srvinos - BB fixme add check for backlevel? */ { 449 } else /* not srvinos - BB fixme add check for backlevel? */ {
@@ -451,12 +479,19 @@ static int cifs_unicode_bytelen(char *str)
451 return len << 1; 479 return len << 1;
452} 480}
453 481
454static char *nxt_dir_entry(char *old_entry, char *end_of_smb) 482static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
455{ 483{
456 char * new_entry; 484 char * new_entry;
457 FILE_DIRECTORY_INFO * pDirInfo = (FILE_DIRECTORY_INFO *)old_entry; 485 FILE_DIRECTORY_INFO * pDirInfo = (FILE_DIRECTORY_INFO *)old_entry;
458 486
459 new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset); 487 if(level == SMB_FIND_FILE_INFO_STANDARD) {
488 FIND_FILE_STANDARD_INFO * pfData;
489 pfData = (FIND_FILE_STANDARD_INFO *)pDirInfo;
490
491 new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
492 pfData->FileNameLength;
493 } else
494 new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
460 cFYI(1,("new entry %p old entry %p",new_entry,old_entry)); 495 cFYI(1,("new entry %p old entry %p",new_entry,old_entry));
461 /* validate that new_entry is not past end of SMB */ 496 /* validate that new_entry is not past end of SMB */
462 if(new_entry >= end_of_smb) { 497 if(new_entry >= end_of_smb) {
@@ -464,7 +499,10 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb)
464 ("search entry %p began after end of SMB %p old entry %p", 499 ("search entry %p began after end of SMB %p old entry %p",
465 new_entry, end_of_smb, old_entry)); 500 new_entry, end_of_smb, old_entry));
466 return NULL; 501 return NULL;
467 } else if (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb) { 502 } else if(((level == SMB_FIND_FILE_INFO_STANDARD) &&
503 (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb)) ||
504 ((level != SMB_FIND_FILE_INFO_STANDARD) &&
505 (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb))) {
468 cERROR(1,("search entry %p extends after end of SMB %p", 506 cERROR(1,("search entry %p extends after end of SMB %p",
469 new_entry, end_of_smb)); 507 new_entry, end_of_smb));
470 return NULL; 508 return NULL;
@@ -482,7 +520,7 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
482 char * filename = NULL; 520 char * filename = NULL;
483 int len = 0; 521 int len = 0;
484 522
485 if(cfile->srch_inf.info_level == 0x202) { 523 if(cfile->srch_inf.info_level == SMB_FIND_FILE_UNIX) {
486 FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry; 524 FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
487 filename = &pFindData->FileName[0]; 525 filename = &pFindData->FileName[0];
488 if(cfile->srch_inf.unicode) { 526 if(cfile->srch_inf.unicode) {
@@ -491,26 +529,34 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
491 /* BB should we make this strnlen of PATH_MAX? */ 529 /* BB should we make this strnlen of PATH_MAX? */
492 len = strnlen(filename, 5); 530 len = strnlen(filename, 5);
493 } 531 }
494 } else if(cfile->srch_inf.info_level == 0x101) { 532 } else if(cfile->srch_inf.info_level == SMB_FIND_FILE_DIRECTORY_INFO) {
495 FILE_DIRECTORY_INFO * pFindData = 533 FILE_DIRECTORY_INFO * pFindData =
496 (FILE_DIRECTORY_INFO *)current_entry; 534 (FILE_DIRECTORY_INFO *)current_entry;
497 filename = &pFindData->FileName[0]; 535 filename = &pFindData->FileName[0];
498 len = le32_to_cpu(pFindData->FileNameLength); 536 len = le32_to_cpu(pFindData->FileNameLength);
499 } else if(cfile->srch_inf.info_level == 0x102) { 537 } else if(cfile->srch_inf.info_level ==
538 SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
500 FILE_FULL_DIRECTORY_INFO * pFindData = 539 FILE_FULL_DIRECTORY_INFO * pFindData =
501 (FILE_FULL_DIRECTORY_INFO *)current_entry; 540 (FILE_FULL_DIRECTORY_INFO *)current_entry;
502 filename = &pFindData->FileName[0]; 541 filename = &pFindData->FileName[0];
503 len = le32_to_cpu(pFindData->FileNameLength); 542 len = le32_to_cpu(pFindData->FileNameLength);
504 } else if(cfile->srch_inf.info_level == 0x105) { 543 } else if(cfile->srch_inf.info_level ==
544 SMB_FIND_FILE_ID_FULL_DIR_INFO) {
505 SEARCH_ID_FULL_DIR_INFO * pFindData = 545 SEARCH_ID_FULL_DIR_INFO * pFindData =
506 (SEARCH_ID_FULL_DIR_INFO *)current_entry; 546 (SEARCH_ID_FULL_DIR_INFO *)current_entry;
507 filename = &pFindData->FileName[0]; 547 filename = &pFindData->FileName[0];
508 len = le32_to_cpu(pFindData->FileNameLength); 548 len = le32_to_cpu(pFindData->FileNameLength);
509 } else if(cfile->srch_inf.info_level == 0x104) { 549 } else if(cfile->srch_inf.info_level ==
550 SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
510 FILE_BOTH_DIRECTORY_INFO * pFindData = 551 FILE_BOTH_DIRECTORY_INFO * pFindData =
511 (FILE_BOTH_DIRECTORY_INFO *)current_entry; 552 (FILE_BOTH_DIRECTORY_INFO *)current_entry;
512 filename = &pFindData->FileName[0]; 553 filename = &pFindData->FileName[0];
513 len = le32_to_cpu(pFindData->FileNameLength); 554 len = le32_to_cpu(pFindData->FileNameLength);
555 } else if(cfile->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD) {
556 FIND_FILE_STANDARD_INFO * pFindData =
557 (FIND_FILE_STANDARD_INFO *)current_entry;
558 filename = &pFindData->FileName[0];
559 len = le32_to_cpu(pFindData->FileNameLength);
514 } else { 560 } else {
515 cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level)); 561 cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level));
516 } 562 }
@@ -597,7 +643,9 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
597 . and .. for the root of a drive and for those we need 643 . and .. for the root of a drive and for those we need
598 to start two entries earlier */ 644 to start two entries earlier */
599 645
600/* dump_cifs_file_struct(file, "In fce ");*/ 646#ifdef CONFIG_CIFS_DEBUG2
647 dump_cifs_file_struct(file, "In fce ");
648#endif
601 if(((index_to_find < cifsFile->srch_inf.index_of_last_entry) && 649 if(((index_to_find < cifsFile->srch_inf.index_of_last_entry) &&
602 is_dir_changed(file)) || 650 is_dir_changed(file)) ||
603 (index_to_find < first_entry_in_buffer)) { 651 (index_to_find < first_entry_in_buffer)) {
@@ -644,10 +692,12 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
644 first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry 692 first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry
645 - cifsFile->srch_inf.entries_in_buffer; 693 - cifsFile->srch_inf.entries_in_buffer;
646 pos_in_buf = index_to_find - first_entry_in_buffer; 694 pos_in_buf = index_to_find - first_entry_in_buffer;
647 cFYI(1,("found entry - pos_in_buf %d",pos_in_buf)); 695 cFYI(1,("found entry - pos_in_buf %d",pos_in_buf));
696
648 for(i=0;(i<(pos_in_buf)) && (current_entry != NULL);i++) { 697 for(i=0;(i<(pos_in_buf)) && (current_entry != NULL);i++) {
649 /* go entry by entry figuring out which is first */ 698 /* go entry by entry figuring out which is first */
650 current_entry = nxt_dir_entry(current_entry,end_of_smb); 699 current_entry = nxt_dir_entry(current_entry,end_of_smb,
700 cifsFile->srch_inf.info_level);
651 } 701 }
652 if((current_entry == NULL) && (i < pos_in_buf)) { 702 if((current_entry == NULL) && (i < pos_in_buf)) {
653 /* BB fixme - check if we should flag this error */ 703 /* BB fixme - check if we should flag this error */
@@ -674,7 +724,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
674/* inode num, inode type and filename returned */ 724/* inode num, inode type and filename returned */
675static int cifs_get_name_from_search_buf(struct qstr *pqst, 725static int cifs_get_name_from_search_buf(struct qstr *pqst,
676 char *current_entry, __u16 level, unsigned int unicode, 726 char *current_entry, __u16 level, unsigned int unicode,
677 struct cifs_sb_info * cifs_sb, ino_t *pinum) 727 struct cifs_sb_info * cifs_sb, int max_len, ino_t *pinum)
678{ 728{
679 int rc = 0; 729 int rc = 0;
680 unsigned int len = 0; 730 unsigned int len = 0;
@@ -718,10 +768,22 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
718 (FILE_BOTH_DIRECTORY_INFO *)current_entry; 768 (FILE_BOTH_DIRECTORY_INFO *)current_entry;
719 filename = &pFindData->FileName[0]; 769 filename = &pFindData->FileName[0];
720 len = le32_to_cpu(pFindData->FileNameLength); 770 len = le32_to_cpu(pFindData->FileNameLength);
771 } else if(level == SMB_FIND_FILE_INFO_STANDARD) {
772 FIND_FILE_STANDARD_INFO * pFindData =
773 (FIND_FILE_STANDARD_INFO *)current_entry;
774 filename = &pFindData->FileName[0];
775 /* one byte length, no name conversion */
776 len = (unsigned int)pFindData->FileNameLength;
721 } else { 777 } else {
722 cFYI(1,("Unknown findfirst level %d",level)); 778 cFYI(1,("Unknown findfirst level %d",level));
723 return -EINVAL; 779 return -EINVAL;
724 } 780 }
781
782 if(len > max_len) {
783 cERROR(1,("bad search response length %d past smb end", len));
784 return -EINVAL;
785 }
786
725 if(unicode) { 787 if(unicode) {
726 /* BB fixme - test with long names */ 788 /* BB fixme - test with long names */
727 /* Note converted filename can be longer than in unicode */ 789 /* Note converted filename can be longer than in unicode */
@@ -741,7 +803,7 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
741} 803}
742 804
743static int cifs_filldir(char *pfindEntry, struct file *file, 805static int cifs_filldir(char *pfindEntry, struct file *file,
744 filldir_t filldir, void *direntry, char *scratch_buf) 806 filldir_t filldir, void *direntry, char *scratch_buf, int max_len)
745{ 807{
746 int rc = 0; 808 int rc = 0;
747 struct qstr qstring; 809 struct qstr qstring;
@@ -777,6 +839,7 @@ static int cifs_filldir(char *pfindEntry, struct file *file,
777 rc = cifs_get_name_from_search_buf(&qstring,pfindEntry, 839 rc = cifs_get_name_from_search_buf(&qstring,pfindEntry,
778 pCifsF->srch_inf.info_level, 840 pCifsF->srch_inf.info_level,
779 pCifsF->srch_inf.unicode,cifs_sb, 841 pCifsF->srch_inf.unicode,cifs_sb,
842 max_len,
780 &inum /* returned */); 843 &inum /* returned */);
781 844
782 if(rc) 845 if(rc)
@@ -798,13 +861,16 @@ static int cifs_filldir(char *pfindEntry, struct file *file,
798 /* we pass in rc below, indicating whether it is a new inode, 861 /* we pass in rc below, indicating whether it is a new inode,
799 so we can figure out whether to invalidate the inode cached 862 so we can figure out whether to invalidate the inode cached
800 data if the file has changed */ 863 data if the file has changed */
801 if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX) { 864 if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX)
802 unix_fill_in_inode(tmp_inode, 865 unix_fill_in_inode(tmp_inode,
803 (FILE_UNIX_INFO *)pfindEntry,&obj_type, rc); 866 (FILE_UNIX_INFO *)pfindEntry,
804 } else { 867 &obj_type, rc);
805 fill_in_inode(tmp_inode, 868 else if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD)
806 (FILE_DIRECTORY_INFO *)pfindEntry,&obj_type, rc); 869 fill_in_inode(tmp_inode, 0 /* old level 1 buffer type */,
807 } 870 pfindEntry, &obj_type, rc);
871 else
872 fill_in_inode(tmp_inode, 1 /* NT */, pfindEntry, &obj_type, rc);
873
808 874
809 rc = filldir(direntry,qstring.name,qstring.len,file->f_pos, 875 rc = filldir(direntry,qstring.name,qstring.len,file->f_pos,
810 tmp_inode->i_ino,obj_type); 876 tmp_inode->i_ino,obj_type);
@@ -864,6 +930,12 @@ static int cifs_save_resume_key(const char *current_entry,
864 filename = &pFindData->FileName[0]; 930 filename = &pFindData->FileName[0];
865 len = le32_to_cpu(pFindData->FileNameLength); 931 len = le32_to_cpu(pFindData->FileNameLength);
866 cifsFile->srch_inf.resume_key = pFindData->FileIndex; 932 cifsFile->srch_inf.resume_key = pFindData->FileIndex;
933 } else if(level == SMB_FIND_FILE_INFO_STANDARD) {
934 FIND_FILE_STANDARD_INFO * pFindData =
935 (FIND_FILE_STANDARD_INFO *)current_entry;
936 filename = &pFindData->FileName[0];
937 /* one byte length, no name conversion */
938 len = (unsigned int)pFindData->FileNameLength;
867 } else { 939 } else {
868 cFYI(1,("Unknown findfirst level %d",level)); 940 cFYI(1,("Unknown findfirst level %d",level));
869 return -EINVAL; 941 return -EINVAL;
@@ -884,6 +956,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
884 int num_to_fill = 0; 956 int num_to_fill = 0;
885 char * tmp_buf = NULL; 957 char * tmp_buf = NULL;
886 char * end_of_smb; 958 char * end_of_smb;
959 int max_len;
887 960
888 xid = GetXid(); 961 xid = GetXid();
889 962
@@ -909,7 +982,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
909 case 1: 982 case 1:
910 if (filldir(direntry, "..", 2, file->f_pos, 983 if (filldir(direntry, "..", 2, file->f_pos,
911 file->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) { 984 file->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) {
912 cERROR(1, ("Filldir for parent dir failed ")); 985 cERROR(1, ("Filldir for parent dir failed"));
913 rc = -ENOMEM; 986 rc = -ENOMEM;
914 break; 987 break;
915 } 988 }
@@ -959,10 +1032,11 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
959 goto rddir2_exit; 1032 goto rddir2_exit;
960 } 1033 }
961 cFYI(1,("loop through %d times filling dir for net buf %p", 1034 cFYI(1,("loop through %d times filling dir for net buf %p",
962 num_to_fill,cifsFile->srch_inf.ntwrk_buf_start)); 1035 num_to_fill,cifsFile->srch_inf.ntwrk_buf_start));
963 end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + 1036 max_len = smbCalcSize((struct smb_hdr *)
964 smbCalcSize((struct smb_hdr *) 1037 cifsFile->srch_inf.ntwrk_buf_start);
965 cifsFile->srch_inf.ntwrk_buf_start); 1038 end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
1039
966 /* To be safe - for UCS to UTF-8 with strings loaded 1040 /* To be safe - for UCS to UTF-8 with strings loaded
967 with the rare long characters alloc more to account for 1041 with the rare long characters alloc more to account for
968 such multibyte target UTF-8 characters. cifs_unicode.c, 1042 such multibyte target UTF-8 characters. cifs_unicode.c,
@@ -977,17 +1051,19 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
977 } 1051 }
978 /* if buggy server returns . and .. late do 1052 /* if buggy server returns . and .. late do
979 we want to check for that here? */ 1053 we want to check for that here? */
980 rc = cifs_filldir(current_entry, file, 1054 rc = cifs_filldir(current_entry, file,
981 filldir, direntry,tmp_buf); 1055 filldir, direntry, tmp_buf, max_len);
982 file->f_pos++; 1056 file->f_pos++;
983 if(file->f_pos == cifsFile->srch_inf.index_of_last_entry) { 1057 if(file->f_pos ==
1058 cifsFile->srch_inf.index_of_last_entry) {
984 cFYI(1,("last entry in buf at pos %lld %s", 1059 cFYI(1,("last entry in buf at pos %lld %s",
985 file->f_pos,tmp_buf)); /* BB removeme BB */ 1060 file->f_pos,tmp_buf));
986 cifs_save_resume_key(current_entry,cifsFile); 1061 cifs_save_resume_key(current_entry,cifsFile);
987 break; 1062 break;
988 } else 1063 } else
989 current_entry = nxt_dir_entry(current_entry, 1064 current_entry =
990 end_of_smb); 1065 nxt_dir_entry(current_entry, end_of_smb,
1066 cifsFile->srch_inf.info_level);
991 } 1067 }
992 kfree(tmp_buf); 1068 kfree(tmp_buf);
993 break; 1069 break;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
new file mode 100644
index 0000000000..7202d534ef
--- /dev/null
+++ b/fs/cifs/sess.c
@@ -0,0 +1,538 @@
1/*
2 * fs/cifs/sess.c
3 *
4 * SMB/CIFS session setup handling routines
5 *
6 * Copyright (c) International Business Machines Corp., 2006
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#include "cifspdu.h"
25#include "cifsglob.h"
26#include "cifsproto.h"
27#include "cifs_unicode.h"
28#include "cifs_debug.h"
29#include "ntlmssp.h"
30#include "nterr.h"
31#include <linux/utsname.h>
32
33extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
34 unsigned char *p24);
35
36static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
37{
38 __u32 capabilities = 0;
39
40 /* init fields common to all four types of SessSetup */
41 /* note that header is initialized to zero in header_assemble */
42 pSMB->req.AndXCommand = 0xFF;
43 pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
44 pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
45
46 /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
47
48 /* BB verify whether signing required on neg or just on auth frame
49 (and NTLM case) */
50
51 capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
52 CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
53
54 if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
55 pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
56
57 if (ses->capabilities & CAP_UNICODE) {
58 pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE;
59 capabilities |= CAP_UNICODE;
60 }
61 if (ses->capabilities & CAP_STATUS32) {
62 pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS;
63 capabilities |= CAP_STATUS32;
64 }
65 if (ses->capabilities & CAP_DFS) {
66 pSMB->req.hdr.Flags2 |= SMBFLG2_DFS;
67 capabilities |= CAP_DFS;
68 }
69 if (ses->capabilities & CAP_UNIX) {
70 capabilities |= CAP_UNIX;
71 }
72
73 /* BB check whether to init vcnum BB */
74 return capabilities;
75}
76
77static void unicode_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses,
78 const struct nls_table * nls_cp)
79{
80 char * bcc_ptr = *pbcc_area;
81 int bytes_ret = 0;
82
83 /* BB FIXME add check that strings total less
84 than 335 or will need to send them as arrays */
85
86 /* unicode strings, must be word aligned before the call */
87/* if ((long) bcc_ptr % 2) {
88 *bcc_ptr = 0;
89 bcc_ptr++;
90 } */
91 /* copy user */
92 if(ses->userName == NULL) {
93 /* BB what about null user mounts - check that we do this BB */
94 } else { /* 300 should be long enough for any conceivable user name */
95 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
96 300, nls_cp);
97 }
98 bcc_ptr += 2 * bytes_ret;
99 bcc_ptr += 2; /* account for null termination */
100 /* copy domain */
101 if(ses->domainName == NULL)
102 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr,
103 "CIFS_LINUX_DOM", 32, nls_cp);
104 else
105 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->domainName,
106 256, nls_cp);
107 bcc_ptr += 2 * bytes_ret;
108 bcc_ptr += 2; /* account for null terminator */
109
110 /* Copy OS version */
111 bytes_ret = cifs_strtoUCS((__le16 *)bcc_ptr, "Linux version ", 32,
112 nls_cp);
113 bcc_ptr += 2 * bytes_ret;
114 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, system_utsname.release,
115 32, nls_cp);
116 bcc_ptr += 2 * bytes_ret;
117 bcc_ptr += 2; /* trailing null */
118
119 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS,
120 32, nls_cp);
121 bcc_ptr += 2 * bytes_ret;
122 bcc_ptr += 2; /* trailing null */
123
124 *pbcc_area = bcc_ptr;
125}
126
127static void ascii_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses,
128 const struct nls_table * nls_cp)
129{
130 char * bcc_ptr = *pbcc_area;
131
132 /* copy user */
133 /* BB what about null user mounts - check that we do this BB */
134 /* copy user */
135 if(ses->userName == NULL) {
136 /* BB what about null user mounts - check that we do this BB */
137 } else { /* 300 should be long enough for any conceivable user name */
138 strncpy(bcc_ptr, ses->userName, 300);
139 }
140 /* BB improve check for overflow */
141 bcc_ptr += strnlen(ses->userName, 300);
142 *bcc_ptr = 0;
143 bcc_ptr++; /* account for null termination */
144
145 /* copy domain */
146
147 if(ses->domainName == NULL) {
148 strcpy(bcc_ptr, "CIFS_LINUX_DOM");
149 bcc_ptr += 14; /* strlen(CIFS_LINUX_DOM) */
150 } else {
151 strncpy(bcc_ptr, ses->domainName, 256);
152 bcc_ptr += strnlen(ses->domainName, 256);
153 }
154 *bcc_ptr = 0;
155 bcc_ptr++;
156
157 /* BB check for overflow here */
158
159 strcpy(bcc_ptr, "Linux version ");
160 bcc_ptr += strlen("Linux version ");
161 strcpy(bcc_ptr, system_utsname.release);
162 bcc_ptr += strlen(system_utsname.release) + 1;
163
164 strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
165 bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
166
167 *pbcc_area = bcc_ptr;
168}
169
170static int decode_unicode_ssetup(char ** pbcc_area, int bleft, struct cifsSesInfo *ses,
171 const struct nls_table * nls_cp)
172{
173 int rc = 0;
174 int words_left, len;
175 char * data = *pbcc_area;
176
177
178
179 cFYI(1,("bleft %d",bleft));
180
181
182 /* word align, if bytes remaining is not even */
183 if(bleft % 2) {
184 bleft--;
185 data++;
186 }
187 words_left = bleft / 2;
188
189 /* save off server operating system */
190 len = UniStrnlen((wchar_t *) data, words_left);
191
192/* We look for obvious messed up bcc or strings in response so we do not go off
193 the end since (at least) WIN2K and Windows XP have a major bug in not null
194 terminating last Unicode string in response */
195 if(len >= words_left)
196 return rc;
197
198 if(ses->serverOS)
199 kfree(ses->serverOS);
200 /* UTF-8 string will not grow more than four times as big as UCS-16 */
201 ses->serverOS = kzalloc(4 * len, GFP_KERNEL);
202 if(ses->serverOS != NULL) {
203 cifs_strfromUCS_le(ses->serverOS, (__le16 *)data, len,
204 nls_cp);
205 }
206 data += 2 * (len + 1);
207 words_left -= len + 1;
208
209 /* save off server network operating system */
210 len = UniStrnlen((wchar_t *) data, words_left);
211
212 if(len >= words_left)
213 return rc;
214
215 if(ses->serverNOS)
216 kfree(ses->serverNOS);
217 ses->serverNOS = kzalloc(4 * len, GFP_KERNEL); /* BB this is wrong length FIXME BB */
218 if(ses->serverNOS != NULL) {
219 cifs_strfromUCS_le(ses->serverNOS, (__le16 *)data, len,
220 nls_cp);
221 if(strncmp(ses->serverNOS, "NT LAN Manager 4",16) == 0) {
222 cFYI(1,("NT4 server"));
223 ses->flags |= CIFS_SES_NT4;
224 }
225 }
226 data += 2 * (len + 1);
227 words_left -= len + 1;
228
229 /* save off server domain */
230 len = UniStrnlen((wchar_t *) data, words_left);
231
232 if(len > words_left)
233 return rc;
234
235 if(ses->serverDomain)
236 kfree(ses->serverDomain);
237 ses->serverDomain = kzalloc(2 * (len + 1), GFP_KERNEL); /* BB FIXME wrong length */
238 if(ses->serverDomain != NULL) {
239 cifs_strfromUCS_le(ses->serverDomain, (__le16 *)data, len,
240 nls_cp);
241 ses->serverDomain[2*len] = 0;
242 ses->serverDomain[(2*len) + 1] = 0;
243 }
244 data += 2 * (len + 1);
245 words_left -= len + 1;
246
247 cFYI(1,("words left: %d",words_left));
248
249 return rc;
250}
251
252static int decode_ascii_ssetup(char ** pbcc_area, int bleft, struct cifsSesInfo *ses,
253 const struct nls_table * nls_cp)
254{
255 int rc = 0;
256 int len;
257 char * bcc_ptr = *pbcc_area;
258
259 cFYI(1,("decode sessetup ascii. bleft %d", bleft));
260
261 len = strnlen(bcc_ptr, bleft);
262 if(len >= bleft)
263 return rc;
264
265 if(ses->serverOS)
266 kfree(ses->serverOS);
267
268 ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
269 if(ses->serverOS)
270 strncpy(ses->serverOS, bcc_ptr, len);
271
272 bcc_ptr += len + 1;
273 bleft -= len + 1;
274
275 len = strnlen(bcc_ptr, bleft);
276 if(len >= bleft)
277 return rc;
278
279 if(ses->serverNOS)
280 kfree(ses->serverNOS);
281
282 ses->serverNOS = kzalloc(len + 1, GFP_KERNEL);
283 if(ses->serverNOS)
284 strncpy(ses->serverNOS, bcc_ptr, len);
285
286 bcc_ptr += len + 1;
287 bleft -= len + 1;
288
289 len = strnlen(bcc_ptr, bleft);
290 if(len > bleft)
291 return rc;
292
293 if(ses->serverDomain)
294 kfree(ses->serverDomain);
295
296 ses->serverDomain = kzalloc(len + 1, GFP_KERNEL);
297 if(ses->serverOS)
298 strncpy(ses->serverOS, bcc_ptr, len);
299
300 bcc_ptr += len + 1;
301 bleft -= len + 1;
302
303 cFYI(1,("ascii: bytes left %d",bleft));
304
305 return rc;
306}
307
308int
309CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
310 const struct nls_table *nls_cp)
311{
312 int rc = 0;
313 int wct;
314 struct smb_hdr *smb_buf;
315 char *bcc_ptr;
316 char *str_area;
317 SESSION_SETUP_ANDX *pSMB;
318 __u32 capabilities;
319 int count;
320 int resp_buf_type = 0;
321 struct kvec iov[2];
322 enum securityEnum type;
323 __u16 action;
324 int bytes_remaining;
325
326 if(ses == NULL)
327 return -EINVAL;
328
329 type = ses->server->secType;
330
331 cFYI(1,("sess setup type %d",type));
332 if(type == LANMAN) {
333#ifndef CONFIG_CIFS_WEAK_PW_HASH
334 /* LANMAN and plaintext are less secure and off by default.
335 So we make this explicitly be turned on in kconfig (in the
336 build) and turned on at runtime (changed from the default)
337 in proc/fs/cifs or via mount parm. Unfortunately this is
338 needed for old Win (e.g. Win95), some obscure NAS and OS/2 */
339 return -EOPNOTSUPP;
340#endif
341 wct = 10; /* lanman 2 style sessionsetup */
342 } else if((type == NTLM) || (type == NTLMv2)) {
343 /* For NTLMv2 failures eventually may need to retry NTLM */
344 wct = 13; /* old style NTLM sessionsetup */
345 } else /* same size for negotiate or auth, NTLMSSP or extended security */
346 wct = 12;
347
348 rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
349 (void **)&smb_buf);
350 if(rc)
351 return rc;
352
353 pSMB = (SESSION_SETUP_ANDX *)smb_buf;
354
355 capabilities = cifs_ssetup_hdr(ses, pSMB);
356
357 /* we will send the SMB in two pieces,
358 a fixed length beginning part, and a
359 second part which will include the strings
360 and rest of bcc area, in order to avoid having
361 to do a large buffer 17K allocation */
362 iov[0].iov_base = (char *)pSMB;
363 iov[0].iov_len = smb_buf->smb_buf_length + 4;
364
365 /* 2000 big enough to fit max user, domain, NOS name etc. */
366 str_area = kmalloc(2000, GFP_KERNEL);
367 bcc_ptr = str_area;
368
369 if(type == LANMAN) {
370#ifdef CONFIG_CIFS_WEAK_PW_HASH
371 char lnm_session_key[CIFS_SESS_KEY_SIZE];
372
373 /* no capabilities flags in old lanman negotiation */
374
375 pSMB->old_req.PasswordLength = CIFS_SESS_KEY_SIZE;
376 /* BB calculate hash with password */
377 /* and copy into bcc */
378
379 calc_lanman_hash(ses, lnm_session_key);
380
381/* #ifdef CONFIG_CIFS_DEBUG2
382 cifs_dump_mem("cryptkey: ",ses->server->cryptKey,
383 CIFS_SESS_KEY_SIZE);
384#endif */
385 memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE);
386 bcc_ptr += CIFS_SESS_KEY_SIZE;
387
388 /* can not sign if LANMAN negotiated so no need
389 to calculate signing key? but what if server
390 changed to do higher than lanman dialect and
391 we reconnected would we ever calc signing_key? */
392
393 cFYI(1,("Negotiating LANMAN setting up strings"));
394 /* Unicode not allowed for LANMAN dialects */
395 ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
396#endif
397 } else if (type == NTLM) {
398 char ntlm_session_key[CIFS_SESS_KEY_SIZE];
399
400 pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
401 pSMB->req_no_secext.CaseInsensitivePasswordLength =
402 cpu_to_le16(CIFS_SESS_KEY_SIZE);
403 pSMB->req_no_secext.CaseSensitivePasswordLength =
404 cpu_to_le16(CIFS_SESS_KEY_SIZE);
405
406 /* calculate session key */
407 SMBNTencrypt(ses->password, ses->server->cryptKey,
408 ntlm_session_key);
409
410 if(first_time) /* should this be moved into common code
411 with similar ntlmv2 path? */
412 cifs_calculate_mac_key(ses->server->mac_signing_key,
413 ntlm_session_key, ses->password);
414 /* copy session key */
415
416 memcpy(bcc_ptr, (char *)ntlm_session_key,CIFS_SESS_KEY_SIZE);
417 bcc_ptr += CIFS_SESS_KEY_SIZE;
418 memcpy(bcc_ptr, (char *)ntlm_session_key,CIFS_SESS_KEY_SIZE);
419 bcc_ptr += CIFS_SESS_KEY_SIZE;
420 if(ses->capabilities & CAP_UNICODE) {
421 /* unicode strings must be word aligned */
422 if (iov[0].iov_len % 2) {
423 *bcc_ptr = 0;
424 bcc_ptr++;
425 }
426 unicode_ssetup_strings(&bcc_ptr, ses, nls_cp);
427 } else
428 ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
429 } else if (type == NTLMv2) {
430 char * v2_sess_key =
431 kmalloc(sizeof(struct ntlmv2_resp), GFP_KERNEL);
432
433 /* BB FIXME change all users of v2_sess_key to
434 struct ntlmv2_resp */
435
436 if(v2_sess_key == NULL) {
437 cifs_small_buf_release(smb_buf);
438 return -ENOMEM;
439 }
440
441 pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
442
443 /* LM2 password would be here if we supported it */
444 pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
445 /* cpu_to_le16(LM2_SESS_KEY_SIZE); */
446
447 pSMB->req_no_secext.CaseSensitivePasswordLength =
448 cpu_to_le16(sizeof(struct ntlmv2_resp));
449
450 /* calculate session key */
451 setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
452 if(first_time) /* should this be moved into common code
453 with similar ntlmv2 path? */
454 /* cifs_calculate_ntlmv2_mac_key(ses->server->mac_signing_key,
455 response BB FIXME, v2_sess_key); */
456
457 /* copy session key */
458
459 /* memcpy(bcc_ptr, (char *)ntlm_session_key,LM2_SESS_KEY_SIZE);
460 bcc_ptr += LM2_SESS_KEY_SIZE; */
461 memcpy(bcc_ptr, (char *)v2_sess_key, sizeof(struct ntlmv2_resp));
462 bcc_ptr += sizeof(struct ntlmv2_resp);
463 kfree(v2_sess_key);
464 if(ses->capabilities & CAP_UNICODE) {
465 if(iov[0].iov_len % 2) {
466 *bcc_ptr = 0;
467 } bcc_ptr++;
468 unicode_ssetup_strings(&bcc_ptr, ses, nls_cp);
469 } else
470 ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
471 } else /* NTLMSSP or SPNEGO */ {
472 pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
473 capabilities |= CAP_EXTENDED_SECURITY;
474 pSMB->req.Capabilities = cpu_to_le32(capabilities);
475 /* BB set password lengths */
476 }
477
478 count = (long) bcc_ptr - (long) str_area;
479 smb_buf->smb_buf_length += count;
480
481 BCC_LE(smb_buf) = cpu_to_le16(count);
482
483 iov[1].iov_base = str_area;
484 iov[1].iov_len = count;
485 rc = SendReceive2(xid, ses, iov, 2 /* num_iovecs */, &resp_buf_type, 0);
486 /* SMB request buf freed in SendReceive2 */
487
488 cFYI(1,("ssetup rc from sendrecv2 is %d",rc));
489 if(rc)
490 goto ssetup_exit;
491
492 pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base;
493 smb_buf = (struct smb_hdr *)iov[0].iov_base;
494
495 if((smb_buf->WordCount != 3) && (smb_buf->WordCount != 4)) {
496 rc = -EIO;
497 cERROR(1,("bad word count %d", smb_buf->WordCount));
498 goto ssetup_exit;
499 }
500 action = le16_to_cpu(pSMB->resp.Action);
501 if (action & GUEST_LOGIN)
502 cFYI(1, ("Guest login")); /* BB mark SesInfo struct? */
503 ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */
504 cFYI(1, ("UID = %d ", ses->Suid));
505 /* response can have either 3 or 4 word count - Samba sends 3 */
506 /* and lanman response is 3 */
507 bytes_remaining = BCC(smb_buf);
508 bcc_ptr = pByteArea(smb_buf);
509
510 if(smb_buf->WordCount == 4) {
511 __u16 blob_len;
512 blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
513 bcc_ptr += blob_len;
514 if(blob_len > bytes_remaining) {
515 cERROR(1,("bad security blob length %d", blob_len));
516 rc = -EINVAL;
517 goto ssetup_exit;
518 }
519 bytes_remaining -= blob_len;
520 }
521
522 /* BB check if Unicode and decode strings */
523 if(smb_buf->Flags2 & SMBFLG2_UNICODE)
524 rc = decode_unicode_ssetup(&bcc_ptr, bytes_remaining,
525 ses, nls_cp);
526 else
527 rc = decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,nls_cp);
528
529ssetup_exit:
530 kfree(str_area);
531 if(resp_buf_type == CIFS_SMALL_BUFFER) {
532 cFYI(1,("ssetup freeing small buf %p", iov[0].iov_base));
533 cifs_small_buf_release(iov[0].iov_base);
534 } else if(resp_buf_type == CIFS_LARGE_BUFFER)
535 cifs_buf_release(iov[0].iov_base);
536
537 return rc;
538}
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 6103bcdfb1..f518c5e450 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -30,6 +30,7 @@
30#include <linux/random.h> 30#include <linux/random.h>
31#include "cifs_unicode.h" 31#include "cifs_unicode.h"
32#include "cifspdu.h" 32#include "cifspdu.h"
33#include "cifsglob.h"
33#include "md5.h" 34#include "md5.h"
34#include "cifs_debug.h" 35#include "cifs_debug.h"
35#include "cifsencrypt.h" 36#include "cifsencrypt.h"
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 3da8040946..17ba329e2b 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -654,8 +654,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
654 654
655 if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { 655 if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
656 up(&ses->server->tcpSem); 656 up(&ses->server->tcpSem);
657 cERROR(1, 657 cERROR(1, ("Illegal length, greater than maximum frame, %d",
658 ("Illegal length, greater than maximum frame, %d ",
659 in_buf->smb_buf_length)); 658 in_buf->smb_buf_length));
660 DeleteMidQEntry(midQ); 659 DeleteMidQEntry(midQ);
661 /* If not lock req, update # of requests on wire to server */ 660 /* If not lock req, update # of requests on wire to server */
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index 6c6771db36..7caee8d8ea 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -259,7 +259,7 @@ static ssize_t coda_psdev_read(struct file * file, char __user * buf,
259 /* If request was not a signal, enqueue and don't free */ 259 /* If request was not a signal, enqueue and don't free */
260 if (!(req->uc_flags & REQ_ASYNC)) { 260 if (!(req->uc_flags & REQ_ASYNC)) {
261 req->uc_flags |= REQ_READ; 261 req->uc_flags |= REQ_READ;
262 list_add(&(req->uc_chain), vcp->vc_processing.prev); 262 list_add_tail(&(req->uc_chain), &vcp->vc_processing);
263 goto out; 263 goto out;
264 } 264 }
265 265
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index b040eba13a..a5b5e631ba 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -725,7 +725,7 @@ static int coda_upcall(struct coda_sb_info *sbi,
725 ((union inputArgs *)buffer)->ih.unique = req->uc_unique; 725 ((union inputArgs *)buffer)->ih.unique = req->uc_unique;
726 726
727 /* Append msg to pending queue and poke Venus. */ 727 /* Append msg to pending queue and poke Venus. */
728 list_add(&(req->uc_chain), vcommp->vc_pending.prev); 728 list_add_tail(&(req->uc_chain), &vcommp->vc_pending);
729 729
730 wake_up_interruptible(&vcommp->vc_waitq); 730 wake_up_interruptible(&vcommp->vc_waitq);
731 /* We can be interrupted while we wait for Venus to process 731 /* We can be interrupted while we wait for Venus to process
diff --git a/fs/compat.c b/fs/compat.c
index 7e7e5bc4f3..e31e9cf966 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -55,6 +55,20 @@
55 55
56extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat); 56extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat);
57 57
58int compat_log = 1;
59
60int compat_printk(const char *fmt, ...)
61{
62 va_list ap;
63 int ret;
64 if (!compat_log)
65 return 0;
66 va_start(ap, fmt);
67 ret = vprintk(fmt, ap);
68 va_end(ap);
69 return ret;
70}
71
58/* 72/*
59 * Not all architectures have sys_utime, so implement this in terms 73 * Not all architectures have sys_utime, so implement this in terms
60 * of sys_utimes. 74 * of sys_utimes.
@@ -359,7 +373,7 @@ static void compat_ioctl_error(struct file *filp, unsigned int fd,
359 sprintf(buf,"'%c'", (cmd>>24) & 0x3f); 373 sprintf(buf,"'%c'", (cmd>>24) & 0x3f);
360 if (!isprint(buf[1])) 374 if (!isprint(buf[1]))
361 sprintf(buf, "%02x", buf[1]); 375 sprintf(buf, "%02x", buf[1]);
362 printk("ioctl32(%s:%d): Unknown cmd fd(%d) " 376 compat_printk("ioctl32(%s:%d): Unknown cmd fd(%d) "
363 "cmd(%08x){%s} arg(%08x) on %s\n", 377 "cmd(%08x){%s} arg(%08x) on %s\n",
364 current->comm, current->pid, 378 current->comm, current->pid,
365 (int)fd, (unsigned int)cmd, buf, 379 (int)fd, (unsigned int)cmd, buf,
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 9eb9824dd3..d8ecfedef1 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -80,6 +80,7 @@
80#include <net/bluetooth/rfcomm.h> 80#include <net/bluetooth/rfcomm.h>
81 81
82#include <linux/capi.h> 82#include <linux/capi.h>
83#include <linux/gigaset_dev.h>
83 84
84#include <scsi/scsi.h> 85#include <scsi/scsi.h>
85#include <scsi/scsi_ioctl.h> 86#include <scsi/scsi_ioctl.h>
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 5f952187fc..207f8006fd 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1009,8 +1009,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
1009 /* fallthrough */ 1009 /* fallthrough */
1010 default: 1010 default:
1011 if (filp->f_pos == 2) { 1011 if (filp->f_pos == 2) {
1012 list_del(q); 1012 list_move(q, &parent_sd->s_children);
1013 list_add(q, &parent_sd->s_children);
1014 } 1013 }
1015 for (p=q->next; p!= &parent_sd->s_children; p=p->next) { 1014 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
1016 struct configfs_dirent *next; 1015 struct configfs_dirent *next;
@@ -1033,8 +1032,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
1033 dt_type(next)) < 0) 1032 dt_type(next)) < 0)
1034 return 0; 1033 return 0;
1035 1034
1036 list_del(q); 1035 list_move(q, p);
1037 list_add(q, p);
1038 p = q; 1036 p = q;
1039 filp->f_pos++; 1037 filp->f_pos++;
1040 } 1038 }
diff --git a/fs/dcache.c b/fs/dcache.c
index b85fda3605..48b44a714b 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -522,8 +522,7 @@ void shrink_dcache_sb(struct super_block * sb)
522 dentry = list_entry(tmp, struct dentry, d_lru); 522 dentry = list_entry(tmp, struct dentry, d_lru);
523 if (dentry->d_sb != sb) 523 if (dentry->d_sb != sb)
524 continue; 524 continue;
525 list_del(tmp); 525 list_move(tmp, &dentry_unused);
526 list_add(tmp, &dentry_unused);
527 } 526 }
528 527
529 /* 528 /*
@@ -638,7 +637,7 @@ resume:
638 * of the unused list for prune_dcache 637 * of the unused list for prune_dcache
639 */ 638 */
640 if (!atomic_read(&dentry->d_count)) { 639 if (!atomic_read(&dentry->d_count)) {
641 list_add(&dentry->d_lru, dentry_unused.prev); 640 list_add_tail(&dentry->d_lru, &dentry_unused);
642 dentry_stat.nr_unused++; 641 dentry_stat.nr_unused++;
643 found++; 642 found++;
644 } 643 }
diff --git a/fs/dquot.c b/fs/dquot.c
index 81d87a413c..0122a27910 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -250,7 +250,7 @@ static inline struct dquot *find_dquot(unsigned int hashent, struct super_block
250/* Add a dquot to the tail of the free list */ 250/* Add a dquot to the tail of the free list */
251static inline void put_dquot_last(struct dquot *dquot) 251static inline void put_dquot_last(struct dquot *dquot)
252{ 252{
253 list_add(&dquot->dq_free, free_dquots.prev); 253 list_add_tail(&dquot->dq_free, &free_dquots);
254 dqstats.free_dquots++; 254 dqstats.free_dquots++;
255} 255}
256 256
@@ -266,7 +266,7 @@ static inline void put_inuse(struct dquot *dquot)
266{ 266{
267 /* We add to the back of inuse list so we don't have to restart 267 /* We add to the back of inuse list so we don't have to restart
268 * when traversing this list and we block */ 268 * when traversing this list and we block */
269 list_add(&dquot->dq_inuse, inuse_list.prev); 269 list_add_tail(&dquot->dq_inuse, &inuse_list);
270 dqstats.allocated_dquots++; 270 dqstats.allocated_dquots++;
271} 271}
272 272
diff --git a/fs/exec.c b/fs/exec.c
index 0b88bf6461..c8494f513e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -666,8 +666,6 @@ static int de_thread(struct task_struct *tsk)
666 * and to assume its PID: 666 * and to assume its PID:
667 */ 667 */
668 if (!thread_group_leader(current)) { 668 if (!thread_group_leader(current)) {
669 struct dentry *proc_dentry1, *proc_dentry2;
670
671 /* 669 /*
672 * Wait for the thread group leader to be a zombie. 670 * Wait for the thread group leader to be a zombie.
673 * It should already be zombie at this point, most 671 * It should already be zombie at this point, most
@@ -689,10 +687,6 @@ static int de_thread(struct task_struct *tsk)
689 */ 687 */
690 current->start_time = leader->start_time; 688 current->start_time = leader->start_time;
691 689
692 spin_lock(&leader->proc_lock);
693 spin_lock(&current->proc_lock);
694 proc_dentry1 = proc_pid_unhash(current);
695 proc_dentry2 = proc_pid_unhash(leader);
696 write_lock_irq(&tasklist_lock); 690 write_lock_irq(&tasklist_lock);
697 691
698 BUG_ON(leader->tgid != current->tgid); 692 BUG_ON(leader->tgid != current->tgid);
@@ -713,7 +707,7 @@ static int de_thread(struct task_struct *tsk)
713 attach_pid(current, PIDTYPE_PID, current->pid); 707 attach_pid(current, PIDTYPE_PID, current->pid);
714 attach_pid(current, PIDTYPE_PGID, current->signal->pgrp); 708 attach_pid(current, PIDTYPE_PGID, current->signal->pgrp);
715 attach_pid(current, PIDTYPE_SID, current->signal->session); 709 attach_pid(current, PIDTYPE_SID, current->signal->session);
716 list_add_tail_rcu(&current->tasks, &init_task.tasks); 710 list_replace_rcu(&leader->tasks, &current->tasks);
717 711
718 current->group_leader = current; 712 current->group_leader = current;
719 leader->group_leader = current; 713 leader->group_leader = current;
@@ -721,7 +715,6 @@ static int de_thread(struct task_struct *tsk)
721 /* Reduce leader to a thread */ 715 /* Reduce leader to a thread */
722 detach_pid(leader, PIDTYPE_PGID); 716 detach_pid(leader, PIDTYPE_PGID);
723 detach_pid(leader, PIDTYPE_SID); 717 detach_pid(leader, PIDTYPE_SID);
724 list_del_init(&leader->tasks);
725 718
726 current->exit_signal = SIGCHLD; 719 current->exit_signal = SIGCHLD;
727 720
@@ -729,10 +722,6 @@ static int de_thread(struct task_struct *tsk)
729 leader->exit_state = EXIT_DEAD; 722 leader->exit_state = EXIT_DEAD;
730 723
731 write_unlock_irq(&tasklist_lock); 724 write_unlock_irq(&tasklist_lock);
732 spin_unlock(&leader->proc_lock);
733 spin_unlock(&current->proc_lock);
734 proc_pid_flush(proc_dentry1);
735 proc_pid_flush(proc_dentry2);
736 } 725 }
737 726
738 /* 727 /*
@@ -1379,67 +1368,102 @@ static void format_corename(char *corename, const char *pattern, long signr)
1379 *out_ptr = 0; 1368 *out_ptr = 0;
1380} 1369}
1381 1370
1382static void zap_threads (struct mm_struct *mm) 1371static void zap_process(struct task_struct *start)
1383{ 1372{
1384 struct task_struct *g, *p; 1373 struct task_struct *t;
1385 struct task_struct *tsk = current;
1386 struct completion *vfork_done = tsk->vfork_done;
1387 int traced = 0;
1388 1374
1389 /* 1375 start->signal->flags = SIGNAL_GROUP_EXIT;
1390 * Make sure nobody is waiting for us to release the VM, 1376 start->signal->group_stop_count = 0;
1391 * otherwise we can deadlock when we wait on each other
1392 */
1393 if (vfork_done) {
1394 tsk->vfork_done = NULL;
1395 complete(vfork_done);
1396 }
1397 1377
1398 read_lock(&tasklist_lock); 1378 t = start;
1399 do_each_thread(g,p) 1379 do {
1400 if (mm == p->mm && p != tsk) { 1380 if (t != current && t->mm) {
1401 force_sig_specific(SIGKILL, p); 1381 t->mm->core_waiters++;
1402 mm->core_waiters++; 1382 sigaddset(&t->pending.signal, SIGKILL);
1403 if (unlikely(p->ptrace) && 1383 signal_wake_up(t, 1);
1404 unlikely(p->parent->mm == mm))
1405 traced = 1;
1406 } 1384 }
1407 while_each_thread(g,p); 1385 } while ((t = next_thread(t)) != start);
1386}
1408 1387
1409 read_unlock(&tasklist_lock); 1388static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1389 int exit_code)
1390{
1391 struct task_struct *g, *p;
1392 unsigned long flags;
1393 int err = -EAGAIN;
1394
1395 spin_lock_irq(&tsk->sighand->siglock);
1396 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
1397 tsk->signal->group_exit_code = exit_code;
1398 zap_process(tsk);
1399 err = 0;
1400 }
1401 spin_unlock_irq(&tsk->sighand->siglock);
1402 if (err)
1403 return err;
1410 1404
1411 if (unlikely(traced)) { 1405 if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
1412 /* 1406 goto done;
1413 * We are zapping a thread and the thread it ptraces. 1407
1414 * If the tracee went into a ptrace stop for exit tracing, 1408 rcu_read_lock();
1415 * we could deadlock since the tracer is waiting for this 1409 for_each_process(g) {
1416 * coredump to finish. Detach them so they can both die. 1410 if (g == tsk->group_leader)
1417 */ 1411 continue;
1418 write_lock_irq(&tasklist_lock); 1412
1419 do_each_thread(g,p) { 1413 p = g;
1420 if (mm == p->mm && p != tsk && 1414 do {
1421 p->ptrace && p->parent->mm == mm) { 1415 if (p->mm) {
1422 __ptrace_detach(p, 0); 1416 if (p->mm == mm) {
1417 /*
1418 * p->sighand can't disappear, but
1419 * may be changed by de_thread()
1420 */
1421 lock_task_sighand(p, &flags);
1422 zap_process(p);
1423 unlock_task_sighand(p, &flags);
1424 }
1425 break;
1423 } 1426 }
1424 } while_each_thread(g,p); 1427 } while ((p = next_thread(p)) != g);
1425 write_unlock_irq(&tasklist_lock);
1426 } 1428 }
1429 rcu_read_unlock();
1430done:
1431 return mm->core_waiters;
1427} 1432}
1428 1433
1429static void coredump_wait(struct mm_struct *mm) 1434static int coredump_wait(int exit_code)
1430{ 1435{
1431 DECLARE_COMPLETION(startup_done); 1436 struct task_struct *tsk = current;
1437 struct mm_struct *mm = tsk->mm;
1438 struct completion startup_done;
1439 struct completion *vfork_done;
1432 int core_waiters; 1440 int core_waiters;
1433 1441
1442 init_completion(&mm->core_done);
1443 init_completion(&startup_done);
1434 mm->core_startup_done = &startup_done; 1444 mm->core_startup_done = &startup_done;
1435 1445
1436 zap_threads(mm); 1446 core_waiters = zap_threads(tsk, mm, exit_code);
1437 core_waiters = mm->core_waiters;
1438 up_write(&mm->mmap_sem); 1447 up_write(&mm->mmap_sem);
1439 1448
1449 if (unlikely(core_waiters < 0))
1450 goto fail;
1451
1452 /*
1453 * Make sure nobody is waiting for us to release the VM,
1454 * otherwise we can deadlock when we wait on each other
1455 */
1456 vfork_done = tsk->vfork_done;
1457 if (vfork_done) {
1458 tsk->vfork_done = NULL;
1459 complete(vfork_done);
1460 }
1461
1440 if (core_waiters) 1462 if (core_waiters)
1441 wait_for_completion(&startup_done); 1463 wait_for_completion(&startup_done);
1464fail:
1442 BUG_ON(mm->core_waiters); 1465 BUG_ON(mm->core_waiters);
1466 return core_waiters;
1443} 1467}
1444 1468
1445int do_coredump(long signr, int exit_code, struct pt_regs * regs) 1469int do_coredump(long signr, int exit_code, struct pt_regs * regs)
@@ -1473,22 +1497,9 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1473 } 1497 }
1474 mm->dumpable = 0; 1498 mm->dumpable = 0;
1475 1499
1476 retval = -EAGAIN; 1500 retval = coredump_wait(exit_code);
1477 spin_lock_irq(&current->sighand->siglock); 1501 if (retval < 0)
1478 if (!(current->signal->flags & SIGNAL_GROUP_EXIT)) {
1479 current->signal->flags = SIGNAL_GROUP_EXIT;
1480 current->signal->group_exit_code = exit_code;
1481 current->signal->group_stop_count = 0;
1482 retval = 0;
1483 }
1484 spin_unlock_irq(&current->sighand->siglock);
1485 if (retval) {
1486 up_write(&mm->mmap_sem);
1487 goto fail; 1502 goto fail;
1488 }
1489
1490 init_completion(&mm->core_done);
1491 coredump_wait(mm);
1492 1503
1493 /* 1504 /*
1494 * Clear any false indication of pending signals that might 1505 * Clear any false indication of pending signals that might
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index b2891cc29d..b7483360a2 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -630,7 +630,7 @@ enum {
630 Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, 630 Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
631 Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov, 631 Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov,
632 Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, 632 Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
633 Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, 633 Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
634 Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev, 634 Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
635 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, 635 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
636 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, 636 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
@@ -666,6 +666,7 @@ static match_table_t tokens = {
666 {Opt_noreservation, "noreservation"}, 666 {Opt_noreservation, "noreservation"},
667 {Opt_noload, "noload"}, 667 {Opt_noload, "noload"},
668 {Opt_nobh, "nobh"}, 668 {Opt_nobh, "nobh"},
669 {Opt_bh, "bh"},
669 {Opt_commit, "commit=%u"}, 670 {Opt_commit, "commit=%u"},
670 {Opt_journal_update, "journal=update"}, 671 {Opt_journal_update, "journal=update"},
671 {Opt_journal_inum, "journal=%u"}, 672 {Opt_journal_inum, "journal=%u"},
@@ -1014,6 +1015,9 @@ clear_qf_name:
1014 case Opt_nobh: 1015 case Opt_nobh:
1015 set_opt(sbi->s_mount_opt, NOBH); 1016 set_opt(sbi->s_mount_opt, NOBH);
1016 break; 1017 break;
1018 case Opt_bh:
1019 clear_opt(sbi->s_mount_opt, NOBH);
1020 break;
1017 default: 1021 default:
1018 printk (KERN_ERR 1022 printk (KERN_ERR
1019 "EXT3-fs: Unrecognized mount option \"%s\" " 1023 "EXT3-fs: Unrecognized mount option \"%s\" "
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 7f96b5cb67..8c9b28dff1 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -34,6 +34,7 @@
34#include <linux/suspend.h> 34#include <linux/suspend.h>
35#include <linux/pagemap.h> 35#include <linux/pagemap.h>
36#include <linux/kthread.h> 36#include <linux/kthread.h>
37#include <linux/poison.h>
37#include <linux/proc_fs.h> 38#include <linux/proc_fs.h>
38 39
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
@@ -1675,7 +1676,7 @@ static void journal_free_journal_head(struct journal_head *jh)
1675{ 1676{
1676#ifdef CONFIG_JBD_DEBUG 1677#ifdef CONFIG_JBD_DEBUG
1677 atomic_dec(&nr_journal_heads); 1678 atomic_dec(&nr_journal_heads);
1678 memset(jh, 0x5b, sizeof(*jh)); 1679 memset(jh, JBD_POISON_FREE, sizeof(*jh));
1679#endif 1680#endif
1680 kmem_cache_free(journal_head_cache, jh); 1681 kmem_cache_free(journal_head_cache, jh);
1681} 1682}
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 320dd48b83..9c2077e7e0 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -267,6 +267,8 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
267 } 267 }
268 268
269 rc = do_jffs2_setxattr(inode, xprefix, "", value, size, 0); 269 rc = do_jffs2_setxattr(inode, xprefix, "", value, size, 0);
270 if (!value && rc == -ENODATA)
271 rc = 0;
270 if (value) 272 if (value)
271 kfree(value); 273 kfree(value);
272 if (!rc) { 274 if (!rc) {
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index 1862e8bc10..ad0121088d 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -53,8 +53,7 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
53 if (!instr) { 53 if (!instr) {
54 printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); 54 printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
55 spin_lock(&c->erase_completion_lock); 55 spin_lock(&c->erase_completion_lock);
56 list_del(&jeb->list); 56 list_move(&jeb->list, &c->erase_pending_list);
57 list_add(&jeb->list, &c->erase_pending_list);
58 c->erasing_size -= c->sector_size; 57 c->erasing_size -= c->sector_size;
59 c->dirty_size += c->sector_size; 58 c->dirty_size += c->sector_size;
60 jeb->dirty_size = c->sector_size; 59 jeb->dirty_size = c->sector_size;
@@ -86,8 +85,7 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
86 /* Erase failed immediately. Refile it on the list */ 85 /* Erase failed immediately. Refile it on the list */
87 D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret)); 86 D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret));
88 spin_lock(&c->erase_completion_lock); 87 spin_lock(&c->erase_completion_lock);
89 list_del(&jeb->list); 88 list_move(&jeb->list, &c->erase_pending_list);
90 list_add(&jeb->list, &c->erase_pending_list);
91 c->erasing_size -= c->sector_size; 89 c->erasing_size -= c->sector_size;
92 c->dirty_size += c->sector_size; 90 c->dirty_size += c->sector_size;
93 jeb->dirty_size = c->sector_size; 91 jeb->dirty_size = c->sector_size;
@@ -161,8 +159,7 @@ static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblo
161{ 159{
162 D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset)); 160 D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset));
163 spin_lock(&c->erase_completion_lock); 161 spin_lock(&c->erase_completion_lock);
164 list_del(&jeb->list); 162 list_move_tail(&jeb->list, &c->erase_complete_list);
165 list_add_tail(&jeb->list, &c->erase_complete_list);
166 spin_unlock(&c->erase_completion_lock); 163 spin_unlock(&c->erase_completion_lock);
167 /* Ensure that kupdated calls us again to mark them clean */ 164 /* Ensure that kupdated calls us again to mark them clean */
168 jffs2_erase_pending_trigger(c); 165 jffs2_erase_pending_trigger(c);
@@ -178,8 +175,7 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock
178 if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { 175 if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
179 /* We'd like to give this block another try. */ 176 /* We'd like to give this block another try. */
180 spin_lock(&c->erase_completion_lock); 177 spin_lock(&c->erase_completion_lock);
181 list_del(&jeb->list); 178 list_move(&jeb->list, &c->erase_pending_list);
182 list_add(&jeb->list, &c->erase_pending_list);
183 c->erasing_size -= c->sector_size; 179 c->erasing_size -= c->sector_size;
184 c->dirty_size += c->sector_size; 180 c->dirty_size += c->sector_size;
185 jeb->dirty_size = c->sector_size; 181 jeb->dirty_size = c->sector_size;
@@ -191,8 +187,7 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock
191 spin_lock(&c->erase_completion_lock); 187 spin_lock(&c->erase_completion_lock);
192 c->erasing_size -= c->sector_size; 188 c->erasing_size -= c->sector_size;
193 c->bad_size += c->sector_size; 189 c->bad_size += c->sector_size;
194 list_del(&jeb->list); 190 list_move(&jeb->list, &c->bad_list);
195 list_add(&jeb->list, &c->bad_list);
196 c->nr_erasing_blocks--; 191 c->nr_erasing_blocks--;
197 spin_unlock(&c->erase_completion_lock); 192 spin_unlock(&c->erase_completion_lock);
198 wake_up(&c->erase_wait); 193 wake_up(&c->erase_wait);
@@ -230,7 +225,6 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
230 at the end of the linked list. Stash it and continue 225 at the end of the linked list. Stash it and continue
231 from the beginning of the list */ 226 from the beginning of the list */
232 ic = (struct jffs2_inode_cache *)(*prev); 227 ic = (struct jffs2_inode_cache *)(*prev);
233 BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE);
234 prev = &ic->nodes; 228 prev = &ic->nodes;
235 continue; 229 continue;
236 } 230 }
@@ -254,7 +248,8 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
254 248
255 /* PARANOIA */ 249 /* PARANOIA */
256 if (!ic) { 250 if (!ic) {
257 printk(KERN_WARNING "inode_cache not found in remove_node_refs()!!\n"); 251 JFFS2_WARNING("inode_cache/xattr_datum/xattr_ref"
252 " not found in remove_node_refs()!!\n");
258 return; 253 return;
259 } 254 }
260 255
@@ -279,8 +274,19 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
279 printk("\n"); 274 printk("\n");
280 }); 275 });
281 276
282 if (ic->nodes == (void *)ic && ic->nlink == 0) 277 switch (ic->class) {
283 jffs2_del_ino_cache(c, ic); 278#ifdef CONFIG_JFFS2_FS_XATTR
279 case RAWNODE_CLASS_XATTR_DATUM:
280 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
281 break;
282 case RAWNODE_CLASS_XATTR_REF:
283 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
284 break;
285#endif
286 default:
287 if (ic->nodes == (void *)ic && ic->nlink == 0)
288 jffs2_del_ino_cache(c, ic);
289 }
284} 290}
285 291
286void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 292void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 2900ec3ec3..97caa77d60 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -227,8 +227,6 @@ void jffs2_clear_inode (struct inode *inode)
227 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 227 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
228 228
229 D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); 229 D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
230
231 jffs2_xattr_delete_inode(c, f->inocache);
232 jffs2_do_clear_inode(c, f); 230 jffs2_do_clear_inode(c, f);
233} 231}
234 232
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 477c526d63..daff3341ff 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -165,6 +165,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
165 D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink zero\n", 165 D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink zero\n",
166 ic->ino)); 166 ic->ino));
167 spin_unlock(&c->inocache_lock); 167 spin_unlock(&c->inocache_lock);
168 jffs2_xattr_delete_inode(c, ic);
168 continue; 169 continue;
169 } 170 }
170 switch(ic->state) { 171 switch(ic->state) {
@@ -275,13 +276,12 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
275 * We can decide whether this node is inode or xattr by ic->class. */ 276 * We can decide whether this node is inode or xattr by ic->class. */
276 if (ic->class == RAWNODE_CLASS_XATTR_DATUM 277 if (ic->class == RAWNODE_CLASS_XATTR_DATUM
277 || ic->class == RAWNODE_CLASS_XATTR_REF) { 278 || ic->class == RAWNODE_CLASS_XATTR_REF) {
278 BUG_ON(raw->next_in_ino != (void *)ic);
279 spin_unlock(&c->erase_completion_lock); 279 spin_unlock(&c->erase_completion_lock);
280 280
281 if (ic->class == RAWNODE_CLASS_XATTR_DATUM) { 281 if (ic->class == RAWNODE_CLASS_XATTR_DATUM) {
282 ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic); 282 ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic, raw);
283 } else { 283 } else {
284 ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic); 284 ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic, raw);
285 } 285 }
286 goto release_sem; 286 goto release_sem;
287 } 287 }
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index 935fec1b12..b98594992e 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -119,8 +119,11 @@ struct jffs2_sb_info {
119#ifdef CONFIG_JFFS2_FS_XATTR 119#ifdef CONFIG_JFFS2_FS_XATTR
120#define XATTRINDEX_HASHSIZE (57) 120#define XATTRINDEX_HASHSIZE (57)
121 uint32_t highest_xid; 121 uint32_t highest_xid;
122 uint32_t highest_xseqno;
122 struct list_head xattrindex[XATTRINDEX_HASHSIZE]; 123 struct list_head xattrindex[XATTRINDEX_HASHSIZE];
123 struct list_head xattr_unchecked; 124 struct list_head xattr_unchecked;
125 struct list_head xattr_dead_list;
126 struct jffs2_xattr_ref *xref_dead_list;
124 struct jffs2_xattr_ref *xref_temp; 127 struct jffs2_xattr_ref *xref_temp;
125 struct rw_semaphore xattr_sem; 128 struct rw_semaphore xattr_sem;
126 uint32_t xdatum_mem_usage; 129 uint32_t xdatum_mem_usage;
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index 4889d0700c..8310c95478 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -291,6 +291,7 @@ struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void)
291 291
292 memset(xd, 0, sizeof(struct jffs2_xattr_datum)); 292 memset(xd, 0, sizeof(struct jffs2_xattr_datum));
293 xd->class = RAWNODE_CLASS_XATTR_DATUM; 293 xd->class = RAWNODE_CLASS_XATTR_DATUM;
294 xd->node = (void *)xd;
294 INIT_LIST_HEAD(&xd->xindex); 295 INIT_LIST_HEAD(&xd->xindex);
295 return xd; 296 return xd;
296} 297}
@@ -309,6 +310,7 @@ struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void)
309 310
310 memset(ref, 0, sizeof(struct jffs2_xattr_ref)); 311 memset(ref, 0, sizeof(struct jffs2_xattr_ref));
311 ref->class = RAWNODE_CLASS_XATTR_REF; 312 ref->class = RAWNODE_CLASS_XATTR_REF;
313 ref->node = (void *)ref;
312 return ref; 314 return ref;
313} 315}
314 316
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 927dfe42ba..7675b33396 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -906,6 +906,9 @@ void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old)
906{ 906{
907 struct jffs2_inode_cache **prev; 907 struct jffs2_inode_cache **prev;
908 908
909#ifdef CONFIG_JFFS2_FS_XATTR
910 BUG_ON(old->xref);
911#endif
909 dbg_inocache("del %p (ino #%u)\n", old, old->ino); 912 dbg_inocache("del %p (ino #%u)\n", old, old->ino);
910 spin_lock(&c->inocache_lock); 913 spin_lock(&c->inocache_lock);
911 914
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 8bedfd2ff6..d88376992e 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -211,8 +211,7 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
211 struct jffs2_eraseblock *ejeb; 211 struct jffs2_eraseblock *ejeb;
212 212
213 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); 213 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
214 list_del(&ejeb->list); 214 list_move_tail(&ejeb->list, &c->erase_pending_list);
215 list_add_tail(&ejeb->list, &c->erase_pending_list);
216 c->nr_erasing_blocks++; 215 c->nr_erasing_blocks++;
217 jffs2_erase_pending_trigger(c); 216 jffs2_erase_pending_trigger(c);
218 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n", 217 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
@@ -684,19 +683,26 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
684 spin_lock(&c->erase_completion_lock); 683 spin_lock(&c->erase_completion_lock);
685 684
686 ic = jffs2_raw_ref_to_ic(ref); 685 ic = jffs2_raw_ref_to_ic(ref);
687 /* It seems we should never call jffs2_mark_node_obsolete() for
688 XATTR nodes.... yet. Make sure we notice if/when we change
689 that :) */
690 BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE);
691 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino)) 686 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
692 ; 687 ;
693 688
694 *p = ref->next_in_ino; 689 *p = ref->next_in_ino;
695 ref->next_in_ino = NULL; 690 ref->next_in_ino = NULL;
696 691
697 if (ic->nodes == (void *)ic && ic->nlink == 0) 692 switch (ic->class) {
698 jffs2_del_ino_cache(c, ic); 693#ifdef CONFIG_JFFS2_FS_XATTR
699 694 case RAWNODE_CLASS_XATTR_DATUM:
695 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
696 break;
697 case RAWNODE_CLASS_XATTR_REF:
698 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
699 break;
700#endif
701 default:
702 if (ic->nodes == (void *)ic && ic->nlink == 0)
703 jffs2_del_ino_cache(c, ic);
704 break;
705 }
700 spin_unlock(&c->erase_completion_lock); 706 spin_unlock(&c->erase_completion_lock);
701 } 707 }
702 708
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 5fec012b02..cc1899268c 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -968,6 +968,7 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
968 struct jffs2_full_dirent *fd, *fds; 968 struct jffs2_full_dirent *fd, *fds;
969 int deleted; 969 int deleted;
970 970
971 jffs2_xattr_delete_inode(c, f->inocache);
971 down(&f->sem); 972 down(&f->sem);
972 deleted = f->inocache && !f->inocache->nlink; 973 deleted = f->inocache && !f->inocache->nlink;
973 974
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 61618080b8..2bfdc33752 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -317,20 +317,23 @@ static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
317 struct jffs2_summary *s) 317 struct jffs2_summary *s)
318{ 318{
319 struct jffs2_xattr_datum *xd; 319 struct jffs2_xattr_datum *xd;
320 uint32_t totlen, crc; 320 uint32_t xid, version, totlen, crc;
321 int err; 321 int err;
322 322
323 crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4); 323 crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4);
324 if (crc != je32_to_cpu(rx->node_crc)) { 324 if (crc != je32_to_cpu(rx->node_crc)) {
325 if (je32_to_cpu(rx->node_crc) != 0xffffffff) 325 JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
326 JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", 326 ofs, je32_to_cpu(rx->node_crc), crc);
327 ofs, je32_to_cpu(rx->node_crc), crc);
328 if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen)))) 327 if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
329 return err; 328 return err;
330 return 0; 329 return 0;
331 } 330 }
332 331
333 totlen = PAD(sizeof(*rx) + rx->name_len + 1 + je16_to_cpu(rx->value_len)); 332 xid = je32_to_cpu(rx->xid);
333 version = je32_to_cpu(rx->version);
334
335 totlen = PAD(sizeof(struct jffs2_raw_xattr)
336 + rx->name_len + 1 + je16_to_cpu(rx->value_len));
334 if (totlen != je32_to_cpu(rx->totlen)) { 337 if (totlen != je32_to_cpu(rx->totlen)) {
335 JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n", 338 JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n",
336 ofs, je32_to_cpu(rx->totlen), totlen); 339 ofs, je32_to_cpu(rx->totlen), totlen);
@@ -339,22 +342,24 @@ static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
339 return 0; 342 return 0;
340 } 343 }
341 344
342 xd = jffs2_setup_xattr_datum(c, je32_to_cpu(rx->xid), je32_to_cpu(rx->version)); 345 xd = jffs2_setup_xattr_datum(c, xid, version);
343 if (IS_ERR(xd)) { 346 if (IS_ERR(xd))
344 if (PTR_ERR(xd) == -EEXIST) {
345 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rx->totlen)))))
346 return err;
347 return 0;
348 }
349 return PTR_ERR(xd); 347 return PTR_ERR(xd);
350 }
351 xd->xprefix = rx->xprefix;
352 xd->name_len = rx->name_len;
353 xd->value_len = je16_to_cpu(rx->value_len);
354 xd->data_crc = je32_to_cpu(rx->data_crc);
355 348
356 xd->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL); 349 if (xd->version > version) {
357 /* FIXME */ xd->node->next_in_ino = (void *)xd; 350 struct jffs2_raw_node_ref *raw
351 = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL);
352 raw->next_in_ino = xd->node->next_in_ino;
353 xd->node->next_in_ino = raw;
354 } else {
355 xd->version = version;
356 xd->xprefix = rx->xprefix;
357 xd->name_len = rx->name_len;
358 xd->value_len = je16_to_cpu(rx->value_len);
359 xd->data_crc = je32_to_cpu(rx->data_crc);
360
361 jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, (void *)xd);
362 }
358 363
359 if (jffs2_sum_active()) 364 if (jffs2_sum_active())
360 jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset); 365 jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset);
@@ -373,9 +378,8 @@ static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock
373 378
374 crc = crc32(0, rr, sizeof(*rr) - 4); 379 crc = crc32(0, rr, sizeof(*rr) - 4);
375 if (crc != je32_to_cpu(rr->node_crc)) { 380 if (crc != je32_to_cpu(rr->node_crc)) {
376 if (je32_to_cpu(rr->node_crc) != 0xffffffff) 381 JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
377 JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", 382 ofs, je32_to_cpu(rr->node_crc), crc);
378 ofs, je32_to_cpu(rr->node_crc), crc);
379 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen))))) 383 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen)))))
380 return err; 384 return err;
381 return 0; 385 return 0;
@@ -395,6 +399,7 @@ static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock
395 return -ENOMEM; 399 return -ENOMEM;
396 400
397 /* BEFORE jffs2_build_xattr_subsystem() called, 401 /* BEFORE jffs2_build_xattr_subsystem() called,
402 * and AFTER xattr_ref is marked as a dead xref,
398 * ref->xid is used to store 32bit xid, xd is not used 403 * ref->xid is used to store 32bit xid, xd is not used
399 * ref->ino is used to store 32bit inode-number, ic is not used 404 * ref->ino is used to store 32bit inode-number, ic is not used
400 * Thoes variables are declared as union, thus using those 405 * Thoes variables are declared as union, thus using those
@@ -404,11 +409,13 @@ static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock
404 */ 409 */
405 ref->ino = je32_to_cpu(rr->ino); 410 ref->ino = je32_to_cpu(rr->ino);
406 ref->xid = je32_to_cpu(rr->xid); 411 ref->xid = je32_to_cpu(rr->xid);
412 ref->xseqno = je32_to_cpu(rr->xseqno);
413 if (ref->xseqno > c->highest_xseqno)
414 c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER);
407 ref->next = c->xref_temp; 415 ref->next = c->xref_temp;
408 c->xref_temp = ref; 416 c->xref_temp = ref;
409 417
410 ref->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), NULL); 418 jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref);
411 /* FIXME */ ref->node->next_in_ino = (void *)ref;
412 419
413 if (jffs2_sum_active()) 420 if (jffs2_sum_active())
414 jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset); 421 jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset);
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index 0b02fc79e4..c19bd476e8 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -5,7 +5,7 @@
5 * Zoltan Sogor <weth@inf.u-szeged.hu>, 5 * Zoltan Sogor <weth@inf.u-szeged.hu>,
6 * Patrik Kluba <pajko@halom.u-szeged.hu>, 6 * Patrik Kluba <pajko@halom.u-szeged.hu>,
7 * University of Szeged, Hungary 7 * University of Szeged, Hungary
8 * 2005 KaiGai Kohei <kaigai@ak.jp.nec.com> 8 * 2006 KaiGai Kohei <kaigai@ak.jp.nec.com>
9 * 9 *
10 * For licensing information, see the file 'LICENCE' in this directory. 10 * For licensing information, see the file 'LICENCE' in this directory.
11 * 11 *
@@ -43,7 +43,7 @@ int jffs2_sum_init(struct jffs2_sb_info *c)
43 return -ENOMEM; 43 return -ENOMEM;
44 } 44 }
45 45
46 dbg_summary("returned succesfully\n"); 46 dbg_summary("returned successfully\n");
47 47
48 return 0; 48 return 0;
49} 49}
@@ -310,8 +310,6 @@ int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs,
310#ifdef CONFIG_JFFS2_FS_XATTR 310#ifdef CONFIG_JFFS2_FS_XATTR
311 case JFFS2_NODETYPE_XATTR: { 311 case JFFS2_NODETYPE_XATTR: {
312 struct jffs2_sum_xattr_mem *temp; 312 struct jffs2_sum_xattr_mem *temp;
313 if (je32_to_cpu(node->x.version) == 0xffffffff)
314 return 0;
315 temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL); 313 temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL);
316 if (!temp) 314 if (!temp)
317 goto no_mem; 315 goto no_mem;
@@ -327,10 +325,6 @@ int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs,
327 } 325 }
328 case JFFS2_NODETYPE_XREF: { 326 case JFFS2_NODETYPE_XREF: {
329 struct jffs2_sum_xref_mem *temp; 327 struct jffs2_sum_xref_mem *temp;
330
331 if (je32_to_cpu(node->r.ino) == 0xffffffff
332 && je32_to_cpu(node->r.xid) == 0xffffffff)
333 return 0;
334 temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL); 328 temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL);
335 if (!temp) 329 if (!temp)
336 goto no_mem; 330 goto no_mem;
@@ -483,22 +477,20 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
483 477
484 xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid), 478 xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid),
485 je32_to_cpu(spx->version)); 479 je32_to_cpu(spx->version));
486 if (IS_ERR(xd)) { 480 if (IS_ERR(xd))
487 if (PTR_ERR(xd) == -EEXIST) {
488 /* a newer version of xd exists */
489 if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(spx->totlen))))
490 return err;
491 sp += JFFS2_SUMMARY_XATTR_SIZE;
492 break;
493 }
494 JFFS2_NOTICE("allocation of xattr_datum failed\n");
495 return PTR_ERR(xd); 481 return PTR_ERR(xd);
482 if (xd->version > je32_to_cpu(spx->version)) {
483 /* node is not the newest one */
484 struct jffs2_raw_node_ref *raw
485 = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED,
486 PAD(je32_to_cpu(spx->totlen)), NULL);
487 raw->next_in_ino = xd->node->next_in_ino;
488 xd->node->next_in_ino = raw;
489 } else {
490 xd->version = je32_to_cpu(spx->version);
491 sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED,
492 PAD(je32_to_cpu(spx->totlen)), (void *)xd);
496 } 493 }
497
498 xd->node = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED,
499 PAD(je32_to_cpu(spx->totlen)), NULL);
500 /* FIXME */ xd->node->next_in_ino = (void *)xd;
501
502 *pseudo_random += je32_to_cpu(spx->xid); 494 *pseudo_random += je32_to_cpu(spx->xid);
503 sp += JFFS2_SUMMARY_XATTR_SIZE; 495 sp += JFFS2_SUMMARY_XATTR_SIZE;
504 496
@@ -519,14 +511,11 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
519 JFFS2_NOTICE("allocation of xattr_datum failed\n"); 511 JFFS2_NOTICE("allocation of xattr_datum failed\n");
520 return -ENOMEM; 512 return -ENOMEM;
521 } 513 }
522 ref->ino = 0xfffffffe;
523 ref->xid = 0xfffffffd;
524 ref->next = c->xref_temp; 514 ref->next = c->xref_temp;
525 c->xref_temp = ref; 515 c->xref_temp = ref;
526 516
527 ref->node = sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED, 517 sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED,
528 PAD(sizeof(struct jffs2_raw_xref)), NULL); 518 PAD(sizeof(struct jffs2_raw_xref)), (void *)ref);
529 /* FIXME */ ref->node->next_in_ino = (void *)ref;
530 519
531 *pseudo_random += ref->node->flash_offset; 520 *pseudo_random += ref->node->flash_offset;
532 sp += JFFS2_SUMMARY_XREF_SIZE; 521 sp += JFFS2_SUMMARY_XREF_SIZE;
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index a7f153f79e..b9b700730d 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -495,8 +495,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
495 /* Fix up the original jeb now it's on the bad_list */ 495 /* Fix up the original jeb now it's on the bad_list */
496 if (first_raw == jeb->first_node) { 496 if (first_raw == jeb->first_node) {
497 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset)); 497 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
498 list_del(&jeb->list); 498 list_move(&jeb->list, &c->erase_pending_list);
499 list_add(&jeb->list, &c->erase_pending_list);
500 c->nr_erasing_blocks++; 499 c->nr_erasing_blocks++;
501 jffs2_erase_pending_trigger(c); 500 jffs2_erase_pending_trigger(c);
502 } 501 }
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 2d82e250be..18e66dbf23 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -23,18 +23,15 @@
23 * xattr_datum_hashkey(xprefix, xname, xvalue, xsize) 23 * xattr_datum_hashkey(xprefix, xname, xvalue, xsize)
24 * is used to calcurate xdatum hashkey. The reminder of hashkey into XATTRINDEX_HASHSIZE is 24 * is used to calcurate xdatum hashkey. The reminder of hashkey into XATTRINDEX_HASHSIZE is
25 * the index of the xattr name/value pair cache (c->xattrindex). 25 * the index of the xattr name/value pair cache (c->xattrindex).
26 * is_xattr_datum_unchecked(c, xd)
27 * returns 1, if xdatum contains any unchecked raw nodes. if all raw nodes are not
28 * unchecked, it returns 0.
26 * unload_xattr_datum(c, xd) 29 * unload_xattr_datum(c, xd)
27 * is used to release xattr name/value pair and detach from c->xattrindex. 30 * is used to release xattr name/value pair and detach from c->xattrindex.
28 * reclaim_xattr_datum(c) 31 * reclaim_xattr_datum(c)
29 * is used to reclaim xattr name/value pairs on the xattr name/value pair cache when 32 * is used to reclaim xattr name/value pairs on the xattr name/value pair cache when
30 * memory usage by cache is over c->xdatum_mem_threshold. Currentry, this threshold 33 * memory usage by cache is over c->xdatum_mem_threshold. Currentry, this threshold
31 * is hard coded as 32KiB. 34 * is hard coded as 32KiB.
32 * delete_xattr_datum_node(c, xd)
33 * is used to delete a jffs2 node is dominated by xdatum. When EBS(Erase Block Summary) is
34 * enabled, it overwrites the obsolete node by myself.
35 * delete_xattr_datum(c, xd)
36 * is used to delete jffs2_xattr_datum object. It must be called with 0-value of reference
37 * counter. (It means how many jffs2_xattr_ref object refers this xdatum.)
38 * do_verify_xattr_datum(c, xd) 35 * do_verify_xattr_datum(c, xd)
39 * is used to load the xdatum informations without name/value pair from the medium. 36 * is used to load the xdatum informations without name/value pair from the medium.
40 * It's necessary once, because those informations are not collected during mounting 37 * It's necessary once, because those informations are not collected during mounting
@@ -53,8 +50,10 @@
53 * is used to write xdatum to medium. xd->version will be incremented. 50 * is used to write xdatum to medium. xd->version will be incremented.
54 * create_xattr_datum(c, xprefix, xname, xvalue, xsize) 51 * create_xattr_datum(c, xprefix, xname, xvalue, xsize)
55 * is used to create new xdatum and write to medium. 52 * is used to create new xdatum and write to medium.
53 * delete_xattr_datum(c, xd)
54 * is used to delete a xdatum. It marks xd JFFS2_XFLAGS_DEAD, and allows
55 * GC to reclaim those physical nodes.
56 * -------------------------------------------------- */ 56 * -------------------------------------------------- */
57
58static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *xvalue, int xsize) 57static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *xvalue, int xsize)
59{ 58{
60 int name_len = strlen(xname); 59 int name_len = strlen(xname);
@@ -62,6 +61,22 @@ static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *
62 return crc32(xprefix, xname, name_len) ^ crc32(xprefix, xvalue, xsize); 61 return crc32(xprefix, xname, name_len) ^ crc32(xprefix, xvalue, xsize);
63} 62}
64 63
64static int is_xattr_datum_unchecked(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
65{
66 struct jffs2_raw_node_ref *raw;
67 int rc = 0;
68
69 spin_lock(&c->erase_completion_lock);
70 for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) {
71 if (ref_flags(raw) == REF_UNCHECKED) {
72 rc = 1;
73 break;
74 }
75 }
76 spin_unlock(&c->erase_completion_lock);
77 return rc;
78}
79
65static void unload_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) 80static void unload_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
66{ 81{
67 /* must be called under down_write(xattr_sem) */ 82 /* must be called under down_write(xattr_sem) */
@@ -107,77 +122,33 @@ static void reclaim_xattr_datum(struct jffs2_sb_info *c)
107 before, c->xdatum_mem_usage, before - c->xdatum_mem_usage); 122 before, c->xdatum_mem_usage, before - c->xdatum_mem_usage);
108} 123}
109 124
110static void delete_xattr_datum_node(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
111{
112 /* must be called under down_write(xattr_sem) */
113 struct jffs2_raw_xattr rx;
114 size_t length;
115 int rc;
116
117 if (!xd->node) {
118 JFFS2_WARNING("xdatum (xid=%u) is removed twice.\n", xd->xid);
119 return;
120 }
121 if (jffs2_sum_active()) {
122 memset(&rx, 0xff, sizeof(struct jffs2_raw_xattr));
123 rc = jffs2_flash_read(c, ref_offset(xd->node),
124 sizeof(struct jffs2_unknown_node),
125 &length, (char *)&rx);
126 if (rc || length != sizeof(struct jffs2_unknown_node)) {
127 JFFS2_ERROR("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
128 rc, sizeof(struct jffs2_unknown_node),
129 length, ref_offset(xd->node));
130 }
131 rc = jffs2_flash_write(c, ref_offset(xd->node), sizeof(rx),
132 &length, (char *)&rx);
133 if (rc || length != sizeof(struct jffs2_raw_xattr)) {
134 JFFS2_ERROR("jffs2_flash_write()=%d, req=%zu, wrote=%zu ar %#08x\n",
135 rc, sizeof(rx), length, ref_offset(xd->node));
136 }
137 }
138 spin_lock(&c->erase_completion_lock);
139 xd->node->next_in_ino = NULL;
140 spin_unlock(&c->erase_completion_lock);
141 jffs2_mark_node_obsolete(c, xd->node);
142 xd->node = NULL;
143}
144
145static void delete_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
146{
147 /* must be called under down_write(xattr_sem) */
148 BUG_ON(xd->refcnt);
149
150 unload_xattr_datum(c, xd);
151 if (xd->node) {
152 delete_xattr_datum_node(c, xd);
153 xd->node = NULL;
154 }
155 jffs2_free_xattr_datum(xd);
156}
157
158static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) 125static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
159{ 126{
160 /* must be called under down_write(xattr_sem) */ 127 /* must be called under down_write(xattr_sem) */
161 struct jffs2_eraseblock *jeb; 128 struct jffs2_eraseblock *jeb;
129 struct jffs2_raw_node_ref *raw;
162 struct jffs2_raw_xattr rx; 130 struct jffs2_raw_xattr rx;
163 size_t readlen; 131 size_t readlen;
164 uint32_t crc, totlen; 132 uint32_t crc, offset, totlen;
165 int rc; 133 int rc;
166 134
167 BUG_ON(!xd->node); 135 spin_lock(&c->erase_completion_lock);
168 BUG_ON(ref_flags(xd->node) != REF_UNCHECKED); 136 offset = ref_offset(xd->node);
137 if (ref_flags(xd->node) == REF_PRISTINE)
138 goto complete;
139 spin_unlock(&c->erase_completion_lock);
169 140
170 rc = jffs2_flash_read(c, ref_offset(xd->node), sizeof(rx), &readlen, (char *)&rx); 141 rc = jffs2_flash_read(c, offset, sizeof(rx), &readlen, (char *)&rx);
171 if (rc || readlen != sizeof(rx)) { 142 if (rc || readlen != sizeof(rx)) {
172 JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n", 143 JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
173 rc, sizeof(rx), readlen, ref_offset(xd->node)); 144 rc, sizeof(rx), readlen, offset);
174 return rc ? rc : -EIO; 145 return rc ? rc : -EIO;
175 } 146 }
176 crc = crc32(0, &rx, sizeof(rx) - 4); 147 crc = crc32(0, &rx, sizeof(rx) - 4);
177 if (crc != je32_to_cpu(rx.node_crc)) { 148 if (crc != je32_to_cpu(rx.node_crc)) {
178 if (je32_to_cpu(rx.node_crc) != 0xffffffff) 149 JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
179 JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", 150 offset, je32_to_cpu(rx.hdr_crc), crc);
180 ref_offset(xd->node), je32_to_cpu(rx.hdr_crc), crc); 151 xd->flags |= JFFS2_XFLAGS_INVALID;
181 return EIO; 152 return EIO;
182 } 153 }
183 totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len)); 154 totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len));
@@ -188,11 +159,12 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat
188 || je32_to_cpu(rx.version) != xd->version) { 159 || je32_to_cpu(rx.version) != xd->version) {
189 JFFS2_ERROR("inconsistent xdatum at %#08x, magic=%#04x/%#04x, " 160 JFFS2_ERROR("inconsistent xdatum at %#08x, magic=%#04x/%#04x, "
190 "nodetype=%#04x/%#04x, totlen=%u/%u, xid=%u/%u, version=%u/%u\n", 161 "nodetype=%#04x/%#04x, totlen=%u/%u, xid=%u/%u, version=%u/%u\n",
191 ref_offset(xd->node), je16_to_cpu(rx.magic), JFFS2_MAGIC_BITMASK, 162 offset, je16_to_cpu(rx.magic), JFFS2_MAGIC_BITMASK,
192 je16_to_cpu(rx.nodetype), JFFS2_NODETYPE_XATTR, 163 je16_to_cpu(rx.nodetype), JFFS2_NODETYPE_XATTR,
193 je32_to_cpu(rx.totlen), totlen, 164 je32_to_cpu(rx.totlen), totlen,
194 je32_to_cpu(rx.xid), xd->xid, 165 je32_to_cpu(rx.xid), xd->xid,
195 je32_to_cpu(rx.version), xd->version); 166 je32_to_cpu(rx.version), xd->version);
167 xd->flags |= JFFS2_XFLAGS_INVALID;
196 return EIO; 168 return EIO;
197 } 169 }
198 xd->xprefix = rx.xprefix; 170 xd->xprefix = rx.xprefix;
@@ -200,14 +172,17 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat
200 xd->value_len = je16_to_cpu(rx.value_len); 172 xd->value_len = je16_to_cpu(rx.value_len);
201 xd->data_crc = je32_to_cpu(rx.data_crc); 173 xd->data_crc = je32_to_cpu(rx.data_crc);
202 174
203 /* This JFFS2_NODETYPE_XATTR node is checked */
204 jeb = &c->blocks[ref_offset(xd->node) / c->sector_size];
205 totlen = PAD(je32_to_cpu(rx.totlen));
206
207 spin_lock(&c->erase_completion_lock); 175 spin_lock(&c->erase_completion_lock);
208 c->unchecked_size -= totlen; c->used_size += totlen; 176 complete:
209 jeb->unchecked_size -= totlen; jeb->used_size += totlen; 177 for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) {
210 xd->node->flash_offset = ref_offset(xd->node) | REF_PRISTINE; 178 jeb = &c->blocks[ref_offset(raw) / c->sector_size];
179 totlen = PAD(ref_totlen(c, jeb, raw));
180 if (ref_flags(raw) == REF_UNCHECKED) {
181 c->unchecked_size -= totlen; c->used_size += totlen;
182 jeb->unchecked_size -= totlen; jeb->used_size += totlen;
183 }
184 raw->flash_offset = ref_offset(raw) | ((xd->node==raw) ? REF_PRISTINE : REF_NORMAL);
185 }
211 spin_unlock(&c->erase_completion_lock); 186 spin_unlock(&c->erase_completion_lock);
212 187
213 /* unchecked xdatum is chained with c->xattr_unchecked */ 188 /* unchecked xdatum is chained with c->xattr_unchecked */
@@ -227,7 +202,6 @@ static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum
227 uint32_t crc, length; 202 uint32_t crc, length;
228 int i, ret, retry = 0; 203 int i, ret, retry = 0;
229 204
230 BUG_ON(!xd->node);
231 BUG_ON(ref_flags(xd->node) != REF_PRISTINE); 205 BUG_ON(ref_flags(xd->node) != REF_PRISTINE);
232 BUG_ON(!list_empty(&xd->xindex)); 206 BUG_ON(!list_empty(&xd->xindex));
233 retry: 207 retry:
@@ -253,6 +227,7 @@ static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum
253 " at %#08x, read: 0x%08x calculated: 0x%08x\n", 227 " at %#08x, read: 0x%08x calculated: 0x%08x\n",
254 ref_offset(xd->node), xd->data_crc, crc); 228 ref_offset(xd->node), xd->data_crc, crc);
255 kfree(data); 229 kfree(data);
230 xd->flags |= JFFS2_XFLAGS_INVALID;
256 return EIO; 231 return EIO;
257 } 232 }
258 233
@@ -286,16 +261,14 @@ static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
286 * rc > 0 : Unrecoverable error, this node should be deleted. 261 * rc > 0 : Unrecoverable error, this node should be deleted.
287 */ 262 */
288 int rc = 0; 263 int rc = 0;
289 BUG_ON(xd->xname); 264
290 if (!xd->node) 265 BUG_ON(xd->flags & JFFS2_XFLAGS_DEAD);
266 if (xd->xname)
267 return 0;
268 if (xd->flags & JFFS2_XFLAGS_INVALID)
291 return EIO; 269 return EIO;
292 if (unlikely(ref_flags(xd->node) != REF_PRISTINE)) { 270 if (unlikely(is_xattr_datum_unchecked(c, xd)))
293 rc = do_verify_xattr_datum(c, xd); 271 rc = do_verify_xattr_datum(c, xd);
294 if (rc > 0) {
295 list_del_init(&xd->xindex);
296 delete_xattr_datum_node(c, xd);
297 }
298 }
299 if (!rc) 272 if (!rc)
300 rc = do_load_xattr_datum(c, xd); 273 rc = do_load_xattr_datum(c, xd);
301 return rc; 274 return rc;
@@ -304,7 +277,6 @@ static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
304static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) 277static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
305{ 278{
306 /* must be called under down_write(xattr_sem) */ 279 /* must be called under down_write(xattr_sem) */
307 struct jffs2_raw_node_ref *raw;
308 struct jffs2_raw_xattr rx; 280 struct jffs2_raw_xattr rx;
309 struct kvec vecs[2]; 281 struct kvec vecs[2];
310 size_t length; 282 size_t length;
@@ -312,14 +284,16 @@ static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
312 uint32_t phys_ofs = write_ofs(c); 284 uint32_t phys_ofs = write_ofs(c);
313 285
314 BUG_ON(!xd->xname); 286 BUG_ON(!xd->xname);
287 BUG_ON(xd->flags & (JFFS2_XFLAGS_DEAD|JFFS2_XFLAGS_INVALID));
315 288
316 vecs[0].iov_base = &rx; 289 vecs[0].iov_base = &rx;
317 vecs[0].iov_len = PAD(sizeof(rx)); 290 vecs[0].iov_len = sizeof(rx);
318 vecs[1].iov_base = xd->xname; 291 vecs[1].iov_base = xd->xname;
319 vecs[1].iov_len = xd->name_len + 1 + xd->value_len; 292 vecs[1].iov_len = xd->name_len + 1 + xd->value_len;
320 totlen = vecs[0].iov_len + vecs[1].iov_len; 293 totlen = vecs[0].iov_len + vecs[1].iov_len;
321 294
322 /* Setup raw-xattr */ 295 /* Setup raw-xattr */
296 memset(&rx, 0, sizeof(rx));
323 rx.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); 297 rx.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
324 rx.nodetype = cpu_to_je16(JFFS2_NODETYPE_XATTR); 298 rx.nodetype = cpu_to_je16(JFFS2_NODETYPE_XATTR);
325 rx.totlen = cpu_to_je32(PAD(totlen)); 299 rx.totlen = cpu_to_je32(PAD(totlen));
@@ -343,14 +317,8 @@ static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
343 317
344 return rc; 318 return rc;
345 } 319 }
346
347 /* success */ 320 /* success */
348 raw = jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(totlen), NULL); 321 jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(totlen), (void *)xd);
349 /* FIXME */ raw->next_in_ino = (void *)xd;
350
351 if (xd->node)
352 delete_xattr_datum_node(c, xd);
353 xd->node = raw;
354 322
355 dbg_xattr("success on saving xdatum (xid=%u, version=%u, xprefix=%u, xname='%s')\n", 323 dbg_xattr("success on saving xdatum (xid=%u, version=%u, xprefix=%u, xname='%s')\n",
356 xd->xid, xd->version, xd->xprefix, xd->xname); 324 xd->xid, xd->version, xd->xprefix, xd->xname);
@@ -377,7 +345,7 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
377 && xd->value_len==xsize 345 && xd->value_len==xsize
378 && !strcmp(xd->xname, xname) 346 && !strcmp(xd->xname, xname)
379 && !memcmp(xd->xvalue, xvalue, xsize)) { 347 && !memcmp(xd->xvalue, xvalue, xsize)) {
380 xd->refcnt++; 348 atomic_inc(&xd->refcnt);
381 return xd; 349 return xd;
382 } 350 }
383 } 351 }
@@ -397,7 +365,7 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
397 strcpy(data, xname); 365 strcpy(data, xname);
398 memcpy(data + name_len + 1, xvalue, xsize); 366 memcpy(data + name_len + 1, xvalue, xsize);
399 367
400 xd->refcnt = 1; 368 atomic_set(&xd->refcnt, 1);
401 xd->xid = ++c->highest_xid; 369 xd->xid = ++c->highest_xid;
402 xd->flags |= JFFS2_XFLAGS_HOT; 370 xd->flags |= JFFS2_XFLAGS_HOT;
403 xd->xprefix = xprefix; 371 xd->xprefix = xprefix;
@@ -426,20 +394,36 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
426 return xd; 394 return xd;
427} 395}
428 396
397static void delete_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
398{
399 /* must be called under down_write(xattr_sem) */
400 BUG_ON(atomic_read(&xd->refcnt));
401
402 unload_xattr_datum(c, xd);
403 xd->flags |= JFFS2_XFLAGS_DEAD;
404 spin_lock(&c->erase_completion_lock);
405 if (xd->node == (void *)xd) {
406 BUG_ON(!(xd->flags & JFFS2_XFLAGS_INVALID));
407 jffs2_free_xattr_datum(xd);
408 } else {
409 list_add(&xd->xindex, &c->xattr_dead_list);
410 }
411 spin_unlock(&c->erase_completion_lock);
412 dbg_xattr("xdatum(xid=%u, version=%u) was removed.\n", xd->xid, xd->version);
413}
414
429/* -------- xref related functions ------------------ 415/* -------- xref related functions ------------------
430 * verify_xattr_ref(c, ref) 416 * verify_xattr_ref(c, ref)
431 * is used to load xref information from medium. Because summary data does not 417 * is used to load xref information from medium. Because summary data does not
432 * contain xid/ino, it's necessary to verify once while mounting process. 418 * contain xid/ino, it's necessary to verify once while mounting process.
433 * delete_xattr_ref_node(c, ref)
434 * is used to delete a jffs2 node is dominated by xref. When EBS is enabled,
435 * it overwrites the obsolete node by myself.
436 * delete_xattr_ref(c, ref)
437 * is used to delete jffs2_xattr_ref object. If the reference counter of xdatum
438 * is refered by this xref become 0, delete_xattr_datum() is called later.
439 * save_xattr_ref(c, ref) 419 * save_xattr_ref(c, ref)
440 * is used to write xref to medium. 420 * is used to write xref to medium. If delete marker is marked, it write
421 * a delete marker of xref into medium.
441 * create_xattr_ref(c, ic, xd) 422 * create_xattr_ref(c, ic, xd)
442 * is used to create a new xref and write to medium. 423 * is used to create a new xref and write to medium.
424 * delete_xattr_ref(c, ref)
425 * is used to delete jffs2_xattr_ref. It marks xref XREF_DELETE_MARKER,
426 * and allows GC to reclaim those physical nodes.
443 * jffs2_xattr_delete_inode(c, ic) 427 * jffs2_xattr_delete_inode(c, ic)
444 * is called to remove xrefs related to obsolete inode when inode is unlinked. 428 * is called to remove xrefs related to obsolete inode when inode is unlinked.
445 * jffs2_xattr_free_inode(c, ic) 429 * jffs2_xattr_free_inode(c, ic)
@@ -450,25 +434,29 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
450static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) 434static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
451{ 435{
452 struct jffs2_eraseblock *jeb; 436 struct jffs2_eraseblock *jeb;
437 struct jffs2_raw_node_ref *raw;
453 struct jffs2_raw_xref rr; 438 struct jffs2_raw_xref rr;
454 size_t readlen; 439 size_t readlen;
455 uint32_t crc, totlen; 440 uint32_t crc, offset, totlen;
456 int rc; 441 int rc;
457 442
458 BUG_ON(ref_flags(ref->node) != REF_UNCHECKED); 443 spin_lock(&c->erase_completion_lock);
444 if (ref_flags(ref->node) != REF_UNCHECKED)
445 goto complete;
446 offset = ref_offset(ref->node);
447 spin_unlock(&c->erase_completion_lock);
459 448
460 rc = jffs2_flash_read(c, ref_offset(ref->node), sizeof(rr), &readlen, (char *)&rr); 449 rc = jffs2_flash_read(c, offset, sizeof(rr), &readlen, (char *)&rr);
461 if (rc || sizeof(rr) != readlen) { 450 if (rc || sizeof(rr) != readlen) {
462 JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu, at %#08x\n", 451 JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu, at %#08x\n",
463 rc, sizeof(rr), readlen, ref_offset(ref->node)); 452 rc, sizeof(rr), readlen, offset);
464 return rc ? rc : -EIO; 453 return rc ? rc : -EIO;
465 } 454 }
466 /* obsolete node */ 455 /* obsolete node */
467 crc = crc32(0, &rr, sizeof(rr) - 4); 456 crc = crc32(0, &rr, sizeof(rr) - 4);
468 if (crc != je32_to_cpu(rr.node_crc)) { 457 if (crc != je32_to_cpu(rr.node_crc)) {
469 if (je32_to_cpu(rr.node_crc) != 0xffffffff) 458 JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
470 JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", 459 offset, je32_to_cpu(rr.node_crc), crc);
471 ref_offset(ref->node), je32_to_cpu(rr.node_crc), crc);
472 return EIO; 460 return EIO;
473 } 461 }
474 if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK 462 if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK
@@ -476,22 +464,28 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref
476 || je32_to_cpu(rr.totlen) != PAD(sizeof(rr))) { 464 || je32_to_cpu(rr.totlen) != PAD(sizeof(rr))) {
477 JFFS2_ERROR("inconsistent xref at %#08x, magic=%#04x/%#04x, " 465 JFFS2_ERROR("inconsistent xref at %#08x, magic=%#04x/%#04x, "
478 "nodetype=%#04x/%#04x, totlen=%u/%zu\n", 466 "nodetype=%#04x/%#04x, totlen=%u/%zu\n",
479 ref_offset(ref->node), je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK, 467 offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
480 je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF, 468 je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF,
481 je32_to_cpu(rr.totlen), PAD(sizeof(rr))); 469 je32_to_cpu(rr.totlen), PAD(sizeof(rr)));
482 return EIO; 470 return EIO;
483 } 471 }
484 ref->ino = je32_to_cpu(rr.ino); 472 ref->ino = je32_to_cpu(rr.ino);
485 ref->xid = je32_to_cpu(rr.xid); 473 ref->xid = je32_to_cpu(rr.xid);
486 474 ref->xseqno = je32_to_cpu(rr.xseqno);
487 /* fixup superblock/eraseblock info */ 475 if (ref->xseqno > c->highest_xseqno)
488 jeb = &c->blocks[ref_offset(ref->node) / c->sector_size]; 476 c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER);
489 totlen = PAD(sizeof(rr));
490 477
491 spin_lock(&c->erase_completion_lock); 478 spin_lock(&c->erase_completion_lock);
492 c->unchecked_size -= totlen; c->used_size += totlen; 479 complete:
493 jeb->unchecked_size -= totlen; jeb->used_size += totlen; 480 for (raw=ref->node; raw != (void *)ref; raw=raw->next_in_ino) {
494 ref->node->flash_offset = ref_offset(ref->node) | REF_PRISTINE; 481 jeb = &c->blocks[ref_offset(raw) / c->sector_size];
482 totlen = PAD(ref_totlen(c, jeb, raw));
483 if (ref_flags(raw) == REF_UNCHECKED) {
484 c->unchecked_size -= totlen; c->used_size += totlen;
485 jeb->unchecked_size -= totlen; jeb->used_size += totlen;
486 }
487 raw->flash_offset = ref_offset(raw) | ((ref->node==raw) ? REF_PRISTINE : REF_NORMAL);
488 }
495 spin_unlock(&c->erase_completion_lock); 489 spin_unlock(&c->erase_completion_lock);
496 490
497 dbg_xattr("success on verifying xref (ino=%u, xid=%u) at %#08x\n", 491 dbg_xattr("success on verifying xref (ino=%u, xid=%u) at %#08x\n",
@@ -499,58 +493,12 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref
499 return 0; 493 return 0;
500} 494}
501 495
502static void delete_xattr_ref_node(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
503{
504 struct jffs2_raw_xref rr;
505 size_t length;
506 int rc;
507
508 if (jffs2_sum_active()) {
509 memset(&rr, 0xff, sizeof(rr));
510 rc = jffs2_flash_read(c, ref_offset(ref->node),
511 sizeof(struct jffs2_unknown_node),
512 &length, (char *)&rr);
513 if (rc || length != sizeof(struct jffs2_unknown_node)) {
514 JFFS2_ERROR("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
515 rc, sizeof(struct jffs2_unknown_node),
516 length, ref_offset(ref->node));
517 }
518 rc = jffs2_flash_write(c, ref_offset(ref->node), sizeof(rr),
519 &length, (char *)&rr);
520 if (rc || length != sizeof(struct jffs2_raw_xref)) {
521 JFFS2_ERROR("jffs2_flash_write()=%d, req=%zu, wrote=%zu at %#08x\n",
522 rc, sizeof(rr), length, ref_offset(ref->node));
523 }
524 }
525 spin_lock(&c->erase_completion_lock);
526 ref->node->next_in_ino = NULL;
527 spin_unlock(&c->erase_completion_lock);
528 jffs2_mark_node_obsolete(c, ref->node);
529 ref->node = NULL;
530}
531
532static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
533{
534 /* must be called under down_write(xattr_sem) */
535 struct jffs2_xattr_datum *xd;
536
537 BUG_ON(!ref->node);
538 delete_xattr_ref_node(c, ref);
539
540 xd = ref->xd;
541 xd->refcnt--;
542 if (!xd->refcnt)
543 delete_xattr_datum(c, xd);
544 jffs2_free_xattr_ref(ref);
545}
546
547static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) 496static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
548{ 497{
549 /* must be called under down_write(xattr_sem) */ 498 /* must be called under down_write(xattr_sem) */
550 struct jffs2_raw_node_ref *raw;
551 struct jffs2_raw_xref rr; 499 struct jffs2_raw_xref rr;
552 size_t length; 500 size_t length;
553 uint32_t phys_ofs = write_ofs(c); 501 uint32_t xseqno, phys_ofs = write_ofs(c);
554 int ret; 502 int ret;
555 503
556 rr.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); 504 rr.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -558,8 +506,16 @@ static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
558 rr.totlen = cpu_to_je32(PAD(sizeof(rr))); 506 rr.totlen = cpu_to_je32(PAD(sizeof(rr)));
559 rr.hdr_crc = cpu_to_je32(crc32(0, &rr, sizeof(struct jffs2_unknown_node) - 4)); 507 rr.hdr_crc = cpu_to_je32(crc32(0, &rr, sizeof(struct jffs2_unknown_node) - 4));
560 508
561 rr.ino = cpu_to_je32(ref->ic->ino); 509 xseqno = (c->highest_xseqno += 2);
562 rr.xid = cpu_to_je32(ref->xd->xid); 510 if (is_xattr_ref_dead(ref)) {
511 xseqno |= XREF_DELETE_MARKER;
512 rr.ino = cpu_to_je32(ref->ino);
513 rr.xid = cpu_to_je32(ref->xid);
514 } else {
515 rr.ino = cpu_to_je32(ref->ic->ino);
516 rr.xid = cpu_to_je32(ref->xd->xid);
517 }
518 rr.xseqno = cpu_to_je32(xseqno);
563 rr.node_crc = cpu_to_je32(crc32(0, &rr, sizeof(rr) - 4)); 519 rr.node_crc = cpu_to_je32(crc32(0, &rr, sizeof(rr) - 4));
564 520
565 ret = jffs2_flash_write(c, phys_ofs, sizeof(rr), &length, (char *)&rr); 521 ret = jffs2_flash_write(c, phys_ofs, sizeof(rr), &length, (char *)&rr);
@@ -572,12 +528,9 @@ static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
572 528
573 return ret; 529 return ret;
574 } 530 }
575 531 /* success */
576 raw = jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), NULL); 532 ref->xseqno = xseqno;
577 /* FIXME */ raw->next_in_ino = (void *)ref; 533 jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), (void *)ref);
578 if (ref->node)
579 delete_xattr_ref_node(c, ref);
580 ref->node = raw;
581 534
582 dbg_xattr("success on saving xref (ino=%u, xid=%u)\n", ref->ic->ino, ref->xd->xid); 535 dbg_xattr("success on saving xref (ino=%u, xid=%u)\n", ref->ic->ino, ref->xd->xid);
583 536
@@ -610,6 +563,27 @@ static struct jffs2_xattr_ref *create_xattr_ref(struct jffs2_sb_info *c, struct
610 return ref; /* success */ 563 return ref; /* success */
611} 564}
612 565
566static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
567{
568 /* must be called under down_write(xattr_sem) */
569 struct jffs2_xattr_datum *xd;
570
571 xd = ref->xd;
572 ref->xseqno |= XREF_DELETE_MARKER;
573 ref->ino = ref->ic->ino;
574 ref->xid = ref->xd->xid;
575 spin_lock(&c->erase_completion_lock);
576 ref->next = c->xref_dead_list;
577 c->xref_dead_list = ref;
578 spin_unlock(&c->erase_completion_lock);
579
580 dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) was removed.\n",
581 ref->ino, ref->xid, ref->xseqno);
582
583 if (atomic_dec_and_test(&xd->refcnt))
584 delete_xattr_datum(c, xd);
585}
586
613void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) 587void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
614{ 588{
615 /* It's called from jffs2_clear_inode() on inode removing. 589 /* It's called from jffs2_clear_inode() on inode removing.
@@ -638,8 +612,7 @@ void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *i
638 for (ref = ic->xref; ref; ref = _ref) { 612 for (ref = ic->xref; ref; ref = _ref) {
639 _ref = ref->next; 613 _ref = ref->next;
640 xd = ref->xd; 614 xd = ref->xd;
641 xd->refcnt--; 615 if (atomic_dec_and_test(&xd->refcnt)) {
642 if (!xd->refcnt) {
643 unload_xattr_datum(c, xd); 616 unload_xattr_datum(c, xd);
644 jffs2_free_xattr_datum(xd); 617 jffs2_free_xattr_datum(xd);
645 } 618 }
@@ -655,7 +628,7 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac
655 * duplicate name/value pairs. If duplicate name/value pair would be found, 628 * duplicate name/value pairs. If duplicate name/value pair would be found,
656 * one will be removed. 629 * one will be removed.
657 */ 630 */
658 struct jffs2_xattr_ref *ref, *cmp, **pref; 631 struct jffs2_xattr_ref *ref, *cmp, **pref, **pcmp;
659 int rc = 0; 632 int rc = 0;
660 633
661 if (likely(ic->flags & INO_FLAGS_XATTR_CHECKED)) 634 if (likely(ic->flags & INO_FLAGS_XATTR_CHECKED))
@@ -673,13 +646,13 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac
673 } else if (unlikely(rc < 0)) 646 } else if (unlikely(rc < 0))
674 goto out; 647 goto out;
675 } 648 }
676 for (cmp=ref->next, pref=&ref->next; cmp; pref=&cmp->next, cmp=cmp->next) { 649 for (cmp=ref->next, pcmp=&ref->next; cmp; pcmp=&cmp->next, cmp=cmp->next) {
677 if (!cmp->xd->xname) { 650 if (!cmp->xd->xname) {
678 ref->xd->flags |= JFFS2_XFLAGS_BIND; 651 ref->xd->flags |= JFFS2_XFLAGS_BIND;
679 rc = load_xattr_datum(c, cmp->xd); 652 rc = load_xattr_datum(c, cmp->xd);
680 ref->xd->flags &= ~JFFS2_XFLAGS_BIND; 653 ref->xd->flags &= ~JFFS2_XFLAGS_BIND;
681 if (unlikely(rc > 0)) { 654 if (unlikely(rc > 0)) {
682 *pref = cmp->next; 655 *pcmp = cmp->next;
683 delete_xattr_ref(c, cmp); 656 delete_xattr_ref(c, cmp);
684 goto retry; 657 goto retry;
685 } else if (unlikely(rc < 0)) 658 } else if (unlikely(rc < 0))
@@ -687,8 +660,13 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac
687 } 660 }
688 if (ref->xd->xprefix == cmp->xd->xprefix 661 if (ref->xd->xprefix == cmp->xd->xprefix
689 && !strcmp(ref->xd->xname, cmp->xd->xname)) { 662 && !strcmp(ref->xd->xname, cmp->xd->xname)) {
690 *pref = cmp->next; 663 if (ref->xseqno > cmp->xseqno) {
691 delete_xattr_ref(c, cmp); 664 *pcmp = cmp->next;
665 delete_xattr_ref(c, cmp);
666 } else {
667 *pref = ref->next;
668 delete_xattr_ref(c, ref);
669 }
692 goto retry; 670 goto retry;
693 } 671 }
694 } 672 }
@@ -719,9 +697,13 @@ void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c)
719 for (i=0; i < XATTRINDEX_HASHSIZE; i++) 697 for (i=0; i < XATTRINDEX_HASHSIZE; i++)
720 INIT_LIST_HEAD(&c->xattrindex[i]); 698 INIT_LIST_HEAD(&c->xattrindex[i]);
721 INIT_LIST_HEAD(&c->xattr_unchecked); 699 INIT_LIST_HEAD(&c->xattr_unchecked);
700 INIT_LIST_HEAD(&c->xattr_dead_list);
701 c->xref_dead_list = NULL;
722 c->xref_temp = NULL; 702 c->xref_temp = NULL;
723 703
724 init_rwsem(&c->xattr_sem); 704 init_rwsem(&c->xattr_sem);
705 c->highest_xid = 0;
706 c->highest_xseqno = 0;
725 c->xdatum_mem_usage = 0; 707 c->xdatum_mem_usage = 0;
726 c->xdatum_mem_threshold = 32 * 1024; /* Default 32KB */ 708 c->xdatum_mem_threshold = 32 * 1024; /* Default 32KB */
727} 709}
@@ -751,7 +733,11 @@ void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c)
751 _ref = ref->next; 733 _ref = ref->next;
752 jffs2_free_xattr_ref(ref); 734 jffs2_free_xattr_ref(ref);
753 } 735 }
754 c->xref_temp = NULL; 736
737 for (ref=c->xref_dead_list; ref; ref = _ref) {
738 _ref = ref->next;
739 jffs2_free_xattr_ref(ref);
740 }
755 741
756 for (i=0; i < XATTRINDEX_HASHSIZE; i++) { 742 for (i=0; i < XATTRINDEX_HASHSIZE; i++) {
757 list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) { 743 list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) {
@@ -761,100 +747,143 @@ void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c)
761 jffs2_free_xattr_datum(xd); 747 jffs2_free_xattr_datum(xd);
762 } 748 }
763 } 749 }
750
751 list_for_each_entry_safe(xd, _xd, &c->xattr_dead_list, xindex) {
752 list_del(&xd->xindex);
753 jffs2_free_xattr_datum(xd);
754 }
764} 755}
765 756
757#define XREF_TMPHASH_SIZE (128)
766void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c) 758void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
767{ 759{
768 struct jffs2_xattr_ref *ref, *_ref; 760 struct jffs2_xattr_ref *ref, *_ref;
761 struct jffs2_xattr_ref *xref_tmphash[XREF_TMPHASH_SIZE];
769 struct jffs2_xattr_datum *xd, *_xd; 762 struct jffs2_xattr_datum *xd, *_xd;
770 struct jffs2_inode_cache *ic; 763 struct jffs2_inode_cache *ic;
771 int i, xdatum_count =0, xdatum_unchecked_count = 0, xref_count = 0; 764 struct jffs2_raw_node_ref *raw;
765 int i, xdatum_count = 0, xdatum_unchecked_count = 0, xref_count = 0;
766 int xdatum_orphan_count = 0, xref_orphan_count = 0, xref_dead_count = 0;
772 767
773 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING)); 768 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
774 769
775 /* Phase.1 */ 770 /* Phase.1 : Merge same xref */
771 for (i=0; i < XREF_TMPHASH_SIZE; i++)
772 xref_tmphash[i] = NULL;
776 for (ref=c->xref_temp; ref; ref=_ref) { 773 for (ref=c->xref_temp; ref; ref=_ref) {
774 struct jffs2_xattr_ref *tmp;
775
777 _ref = ref->next; 776 _ref = ref->next;
778 /* checking REF_UNCHECKED nodes */
779 if (ref_flags(ref->node) != REF_PRISTINE) { 777 if (ref_flags(ref->node) != REF_PRISTINE) {
780 if (verify_xattr_ref(c, ref)) { 778 if (verify_xattr_ref(c, ref)) {
781 delete_xattr_ref_node(c, ref); 779 BUG_ON(ref->node->next_in_ino != (void *)ref);
780 ref->node->next_in_ino = NULL;
781 jffs2_mark_node_obsolete(c, ref->node);
782 jffs2_free_xattr_ref(ref); 782 jffs2_free_xattr_ref(ref);
783 continue; 783 continue;
784 } 784 }
785 } 785 }
786 /* At this point, ref->xid and ref->ino contain XID and inode number. 786
787 ref->xd and ref->ic are not valid yet. */ 787 i = (ref->ino ^ ref->xid) % XREF_TMPHASH_SIZE;
788 xd = jffs2_find_xattr_datum(c, ref->xid); 788 for (tmp=xref_tmphash[i]; tmp; tmp=tmp->next) {
789 ic = jffs2_get_ino_cache(c, ref->ino); 789 if (tmp->ino == ref->ino && tmp->xid == ref->xid)
790 if (!xd || !ic) { 790 break;
791 if (ref_flags(ref->node) != REF_UNCHECKED) 791 }
792 JFFS2_WARNING("xref(ino=%u, xid=%u) is orphan. \n", 792 if (tmp) {
793 ref->ino, ref->xid); 793 raw = ref->node;
794 delete_xattr_ref_node(c, ref); 794 if (ref->xseqno > tmp->xseqno) {
795 tmp->xseqno = ref->xseqno;
796 raw->next_in_ino = tmp->node;
797 tmp->node = raw;
798 } else {
799 raw->next_in_ino = tmp->node->next_in_ino;
800 tmp->node->next_in_ino = raw;
801 }
795 jffs2_free_xattr_ref(ref); 802 jffs2_free_xattr_ref(ref);
796 continue; 803 continue;
804 } else {
805 ref->next = xref_tmphash[i];
806 xref_tmphash[i] = ref;
797 } 807 }
798 ref->xd = xd;
799 ref->ic = ic;
800 xd->refcnt++;
801 ref->next = ic->xref;
802 ic->xref = ref;
803 xref_count++;
804 } 808 }
805 c->xref_temp = NULL; 809 c->xref_temp = NULL;
806 /* After this, ref->xid/ino are NEVER used. */
807 810
808 /* Phase.2 */ 811 /* Phase.2 : Bind xref with inode_cache and xattr_datum */
812 for (i=0; i < XREF_TMPHASH_SIZE; i++) {
813 for (ref=xref_tmphash[i]; ref; ref=_ref) {
814 xref_count++;
815 _ref = ref->next;
816 if (is_xattr_ref_dead(ref)) {
817 ref->next = c->xref_dead_list;
818 c->xref_dead_list = ref;
819 xref_dead_count++;
820 continue;
821 }
822 /* At this point, ref->xid and ref->ino contain XID and inode number.
823 ref->xd and ref->ic are not valid yet. */
824 xd = jffs2_find_xattr_datum(c, ref->xid);
825 ic = jffs2_get_ino_cache(c, ref->ino);
826 if (!xd || !ic) {
827 dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) is orphan.\n",
828 ref->ino, ref->xid, ref->xseqno);
829 ref->xseqno |= XREF_DELETE_MARKER;
830 ref->next = c->xref_dead_list;
831 c->xref_dead_list = ref;
832 xref_orphan_count++;
833 continue;
834 }
835 ref->xd = xd;
836 ref->ic = ic;
837 atomic_inc(&xd->refcnt);
838 ref->next = ic->xref;
839 ic->xref = ref;
840 }
841 }
842
843 /* Phase.3 : Link unchecked xdatum to xattr_unchecked list */
809 for (i=0; i < XATTRINDEX_HASHSIZE; i++) { 844 for (i=0; i < XATTRINDEX_HASHSIZE; i++) {
810 list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) { 845 list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) {
846 xdatum_count++;
811 list_del_init(&xd->xindex); 847 list_del_init(&xd->xindex);
812 if (!xd->refcnt) { 848 if (!atomic_read(&xd->refcnt)) {
813 if (ref_flags(xd->node) != REF_UNCHECKED) 849 dbg_xattr("xdatum(xid=%u, version=%u) is orphan.\n",
814 JFFS2_WARNING("orphan xdatum(xid=%u, version=%u) at %#08x\n", 850 xd->xid, xd->version);
815 xd->xid, xd->version, ref_offset(xd->node)); 851 xd->flags |= JFFS2_XFLAGS_DEAD;
816 delete_xattr_datum(c, xd); 852 list_add(&xd->xindex, &c->xattr_unchecked);
853 xdatum_orphan_count++;
817 continue; 854 continue;
818 } 855 }
819 if (ref_flags(xd->node) != REF_PRISTINE) { 856 if (is_xattr_datum_unchecked(c, xd)) {
820 dbg_xattr("unchecked xdatum(xid=%u) at %#08x\n", 857 dbg_xattr("unchecked xdatum(xid=%u, version=%u)\n",
821 xd->xid, ref_offset(xd->node)); 858 xd->xid, xd->version);
822 list_add(&xd->xindex, &c->xattr_unchecked); 859 list_add(&xd->xindex, &c->xattr_unchecked);
823 xdatum_unchecked_count++; 860 xdatum_unchecked_count++;
824 } 861 }
825 xdatum_count++;
826 } 862 }
827 } 863 }
828 /* build complete */ 864 /* build complete */
829 JFFS2_NOTICE("complete building xattr subsystem, %u of xdatum (%u unchecked) and " 865 JFFS2_NOTICE("complete building xattr subsystem, %u of xdatum"
830 "%u of xref found.\n", xdatum_count, xdatum_unchecked_count, xref_count); 866 " (%u unchecked, %u orphan) and "
867 "%u of xref (%u dead, %u orphan) found.\n",
868 xdatum_count, xdatum_unchecked_count, xdatum_orphan_count,
869 xref_count, xref_dead_count, xref_orphan_count);
831} 870}
832 871
833struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c, 872struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
834 uint32_t xid, uint32_t version) 873 uint32_t xid, uint32_t version)
835{ 874{
836 struct jffs2_xattr_datum *xd, *_xd; 875 struct jffs2_xattr_datum *xd;
837 876
838 _xd = jffs2_find_xattr_datum(c, xid); 877 xd = jffs2_find_xattr_datum(c, xid);
839 if (_xd) { 878 if (!xd) {
840 dbg_xattr("duplicate xdatum (xid=%u, version=%u/%u) at %#08x\n", 879 xd = jffs2_alloc_xattr_datum();
841 xid, version, _xd->version, ref_offset(_xd->node)); 880 if (!xd)
842 if (version < _xd->version) 881 return ERR_PTR(-ENOMEM);
843 return ERR_PTR(-EEXIST); 882 xd->xid = xid;
844 } 883 xd->version = version;
845 xd = jffs2_alloc_xattr_datum(); 884 if (xd->xid > c->highest_xid)
846 if (!xd) 885 c->highest_xid = xd->xid;
847 return ERR_PTR(-ENOMEM); 886 list_add_tail(&xd->xindex, &c->xattrindex[xid % XATTRINDEX_HASHSIZE]);
848 xd->xid = xid;
849 xd->version = version;
850 if (xd->xid > c->highest_xid)
851 c->highest_xid = xd->xid;
852 list_add_tail(&xd->xindex, &c->xattrindex[xid % XATTRINDEX_HASHSIZE]);
853
854 if (_xd) {
855 list_del_init(&_xd->xindex);
856 delete_xattr_datum_node(c, _xd);
857 jffs2_free_xattr_datum(_xd);
858 } 887 }
859 return xd; 888 return xd;
860} 889}
@@ -1080,9 +1109,23 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
1080 goto out; 1109 goto out;
1081 } 1110 }
1082 if (!buffer) { 1111 if (!buffer) {
1083 *pref = ref->next; 1112 ref->ino = ic->ino;
1084 delete_xattr_ref(c, ref); 1113 ref->xid = xd->xid;
1085 rc = 0; 1114 ref->xseqno |= XREF_DELETE_MARKER;
1115 rc = save_xattr_ref(c, ref);
1116 if (!rc) {
1117 *pref = ref->next;
1118 spin_lock(&c->erase_completion_lock);
1119 ref->next = c->xref_dead_list;
1120 c->xref_dead_list = ref;
1121 spin_unlock(&c->erase_completion_lock);
1122 if (atomic_dec_and_test(&xd->refcnt))
1123 delete_xattr_datum(c, xd);
1124 } else {
1125 ref->ic = ic;
1126 ref->xd = xd;
1127 ref->xseqno &= ~XREF_DELETE_MARKER;
1128 }
1086 goto out; 1129 goto out;
1087 } 1130 }
1088 goto found; 1131 goto found;
@@ -1094,7 +1137,7 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
1094 goto out; 1137 goto out;
1095 } 1138 }
1096 if (!buffer) { 1139 if (!buffer) {
1097 rc = -EINVAL; 1140 rc = -ENODATA;
1098 goto out; 1141 goto out;
1099 } 1142 }
1100 found: 1143 found:
@@ -1110,16 +1153,14 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
1110 request = PAD(sizeof(struct jffs2_raw_xref)); 1153 request = PAD(sizeof(struct jffs2_raw_xref));
1111 rc = jffs2_reserve_space(c, request, &length, 1154 rc = jffs2_reserve_space(c, request, &length,
1112 ALLOC_NORMAL, JFFS2_SUMMARY_XREF_SIZE); 1155 ALLOC_NORMAL, JFFS2_SUMMARY_XREF_SIZE);
1156 down_write(&c->xattr_sem);
1113 if (rc) { 1157 if (rc) {
1114 JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request); 1158 JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request);
1115 down_write(&c->xattr_sem); 1159 if (atomic_dec_and_test(&xd->refcnt))
1116 xd->refcnt--;
1117 if (!xd->refcnt)
1118 delete_xattr_datum(c, xd); 1160 delete_xattr_datum(c, xd);
1119 up_write(&c->xattr_sem); 1161 up_write(&c->xattr_sem);
1120 return rc; 1162 return rc;
1121 } 1163 }
1122 down_write(&c->xattr_sem);
1123 if (ref) 1164 if (ref)
1124 *pref = ref->next; 1165 *pref = ref->next;
1125 newref = create_xattr_ref(c, ic, xd); 1166 newref = create_xattr_ref(c, ic, xd);
@@ -1129,8 +1170,7 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
1129 ic->xref = ref; 1170 ic->xref = ref;
1130 } 1171 }
1131 rc = PTR_ERR(newref); 1172 rc = PTR_ERR(newref);
1132 xd->refcnt--; 1173 if (atomic_dec_and_test(&xd->refcnt))
1133 if (!xd->refcnt)
1134 delete_xattr_datum(c, xd); 1174 delete_xattr_datum(c, xd);
1135 } else if (ref) { 1175 } else if (ref) {
1136 delete_xattr_ref(c, ref); 1176 delete_xattr_ref(c, ref);
@@ -1142,38 +1182,40 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
1142} 1182}
1143 1183
1144/* -------- garbage collector functions ------------- 1184/* -------- garbage collector functions -------------
1145 * jffs2_garbage_collect_xattr_datum(c, xd) 1185 * jffs2_garbage_collect_xattr_datum(c, xd, raw)
1146 * is used to move xdatum into new node. 1186 * is used to move xdatum into new node.
1147 * jffs2_garbage_collect_xattr_ref(c, ref) 1187 * jffs2_garbage_collect_xattr_ref(c, ref, raw)
1148 * is used to move xref into new node. 1188 * is used to move xref into new node.
1149 * jffs2_verify_xattr(c) 1189 * jffs2_verify_xattr(c)
1150 * is used to call do_verify_xattr_datum() before garbage collecting. 1190 * is used to call do_verify_xattr_datum() before garbage collecting.
1191 * jffs2_release_xattr_datum(c, xd)
1192 * is used to release an in-memory object of xdatum.
1193 * jffs2_release_xattr_ref(c, ref)
1194 * is used to release an in-memory object of xref.
1151 * -------------------------------------------------- */ 1195 * -------------------------------------------------- */
1152int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) 1196int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd,
1197 struct jffs2_raw_node_ref *raw)
1153{ 1198{
1154 uint32_t totlen, length, old_ofs; 1199 uint32_t totlen, length, old_ofs;
1155 int rc = -EINVAL; 1200 int rc = 0;
1156 1201
1157 down_write(&c->xattr_sem); 1202 down_write(&c->xattr_sem);
1158 BUG_ON(!xd->node); 1203 if (xd->node != raw)
1159 1204 goto out;
1160 old_ofs = ref_offset(xd->node); 1205 if (xd->flags & (JFFS2_XFLAGS_DEAD|JFFS2_XFLAGS_INVALID))
1161 totlen = ref_totlen(c, c->gcblock, xd->node);
1162 if (totlen < sizeof(struct jffs2_raw_xattr))
1163 goto out; 1206 goto out;
1164 1207
1165 if (!xd->xname) { 1208 rc = load_xattr_datum(c, xd);
1166 rc = load_xattr_datum(c, xd); 1209 if (unlikely(rc)) {
1167 if (unlikely(rc > 0)) { 1210 rc = (rc > 0) ? 0 : rc;
1168 delete_xattr_datum_node(c, xd); 1211 goto out;
1169 rc = 0;
1170 goto out;
1171 } else if (unlikely(rc < 0))
1172 goto out;
1173 } 1212 }
1213 old_ofs = ref_offset(xd->node);
1214 totlen = PAD(sizeof(struct jffs2_raw_xattr)
1215 + xd->name_len + 1 + xd->value_len);
1174 rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XATTR_SIZE); 1216 rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XATTR_SIZE);
1175 if (rc || length < totlen) { 1217 if (rc) {
1176 JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, totlen); 1218 JFFS2_WARNING("jffs2_reserve_space_gc()=%d, request=%u\n", rc, totlen);
1177 rc = rc ? rc : -EBADFD; 1219 rc = rc ? rc : -EBADFD;
1178 goto out; 1220 goto out;
1179 } 1221 }
@@ -1182,27 +1224,32 @@ int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xatt
1182 dbg_xattr("xdatum (xid=%u, version=%u) GC'ed from %#08x to %08x\n", 1224 dbg_xattr("xdatum (xid=%u, version=%u) GC'ed from %#08x to %08x\n",
1183 xd->xid, xd->version, old_ofs, ref_offset(xd->node)); 1225 xd->xid, xd->version, old_ofs, ref_offset(xd->node));
1184 out: 1226 out:
1227 if (!rc)
1228 jffs2_mark_node_obsolete(c, raw);
1185 up_write(&c->xattr_sem); 1229 up_write(&c->xattr_sem);
1186 return rc; 1230 return rc;
1187} 1231}
1188 1232
1189 1233int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref,
1190int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) 1234 struct jffs2_raw_node_ref *raw)
1191{ 1235{
1192 uint32_t totlen, length, old_ofs; 1236 uint32_t totlen, length, old_ofs;
1193 int rc = -EINVAL; 1237 int rc = 0;
1194 1238
1195 down_write(&c->xattr_sem); 1239 down_write(&c->xattr_sem);
1196 BUG_ON(!ref->node); 1240 BUG_ON(!ref->node);
1197 1241
1242 if (ref->node != raw)
1243 goto out;
1244 if (is_xattr_ref_dead(ref) && (raw->next_in_ino == (void *)ref))
1245 goto out;
1246
1198 old_ofs = ref_offset(ref->node); 1247 old_ofs = ref_offset(ref->node);
1199 totlen = ref_totlen(c, c->gcblock, ref->node); 1248 totlen = ref_totlen(c, c->gcblock, ref->node);
1200 if (totlen != sizeof(struct jffs2_raw_xref))
1201 goto out;
1202 1249
1203 rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XREF_SIZE); 1250 rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XREF_SIZE);
1204 if (rc || length < totlen) { 1251 if (rc) {
1205 JFFS2_WARNING("%s: jffs2_reserve_space() = %d, request = %u\n", 1252 JFFS2_WARNING("%s: jffs2_reserve_space_gc() = %d, request = %u\n",
1206 __FUNCTION__, rc, totlen); 1253 __FUNCTION__, rc, totlen);
1207 rc = rc ? rc : -EBADFD; 1254 rc = rc ? rc : -EBADFD;
1208 goto out; 1255 goto out;
@@ -1212,6 +1259,8 @@ int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_
1212 dbg_xattr("xref (ino=%u, xid=%u) GC'ed from %#08x to %08x\n", 1259 dbg_xattr("xref (ino=%u, xid=%u) GC'ed from %#08x to %08x\n",
1213 ref->ic->ino, ref->xd->xid, old_ofs, ref_offset(ref->node)); 1260 ref->ic->ino, ref->xd->xid, old_ofs, ref_offset(ref->node));
1214 out: 1261 out:
1262 if (!rc)
1263 jffs2_mark_node_obsolete(c, raw);
1215 up_write(&c->xattr_sem); 1264 up_write(&c->xattr_sem);
1216 return rc; 1265 return rc;
1217} 1266}
@@ -1219,20 +1268,59 @@ int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_
1219int jffs2_verify_xattr(struct jffs2_sb_info *c) 1268int jffs2_verify_xattr(struct jffs2_sb_info *c)
1220{ 1269{
1221 struct jffs2_xattr_datum *xd, *_xd; 1270 struct jffs2_xattr_datum *xd, *_xd;
1271 struct jffs2_eraseblock *jeb;
1272 struct jffs2_raw_node_ref *raw;
1273 uint32_t totlen;
1222 int rc; 1274 int rc;
1223 1275
1224 down_write(&c->xattr_sem); 1276 down_write(&c->xattr_sem);
1225 list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) { 1277 list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) {
1226 rc = do_verify_xattr_datum(c, xd); 1278 rc = do_verify_xattr_datum(c, xd);
1227 if (rc == 0) { 1279 if (rc < 0)
1228 list_del_init(&xd->xindex); 1280 continue;
1229 break; 1281 list_del_init(&xd->xindex);
1230 } else if (rc > 0) { 1282 spin_lock(&c->erase_completion_lock);
1231 list_del_init(&xd->xindex); 1283 for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) {
1232 delete_xattr_datum_node(c, xd); 1284 if (ref_flags(raw) != REF_UNCHECKED)
1285 continue;
1286 jeb = &c->blocks[ref_offset(raw) / c->sector_size];
1287 totlen = PAD(ref_totlen(c, jeb, raw));
1288 c->unchecked_size -= totlen; c->used_size += totlen;
1289 jeb->unchecked_size -= totlen; jeb->used_size += totlen;
1290 raw->flash_offset = ref_offset(raw)
1291 | ((xd->node == (void *)raw) ? REF_PRISTINE : REF_NORMAL);
1233 } 1292 }
1293 if (xd->flags & JFFS2_XFLAGS_DEAD)
1294 list_add(&xd->xindex, &c->xattr_dead_list);
1295 spin_unlock(&c->erase_completion_lock);
1234 } 1296 }
1235 up_write(&c->xattr_sem); 1297 up_write(&c->xattr_sem);
1236
1237 return list_empty(&c->xattr_unchecked) ? 1 : 0; 1298 return list_empty(&c->xattr_unchecked) ? 1 : 0;
1238} 1299}
1300
1301void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
1302{
1303 /* must be called under spin_lock(&c->erase_completion_lock) */
1304 if (atomic_read(&xd->refcnt) || xd->node != (void *)xd)
1305 return;
1306
1307 list_del(&xd->xindex);
1308 jffs2_free_xattr_datum(xd);
1309}
1310
1311void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
1312{
1313 /* must be called under spin_lock(&c->erase_completion_lock) */
1314 struct jffs2_xattr_ref *tmp, **ptmp;
1315
1316 if (ref->node != (void *)ref)
1317 return;
1318
1319 for (tmp=c->xref_dead_list, ptmp=&c->xref_dead_list; tmp; ptmp=&tmp->next, tmp=tmp->next) {
1320 if (ref == tmp) {
1321 *ptmp = tmp->next;
1322 break;
1323 }
1324 }
1325 jffs2_free_xattr_ref(ref);
1326}
diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h
index 2c199856c5..06a5c69dcf 100644
--- a/fs/jffs2/xattr.h
+++ b/fs/jffs2/xattr.h
@@ -16,6 +16,8 @@
16 16
17#define JFFS2_XFLAGS_HOT (0x01) /* This datum is HOT */ 17#define JFFS2_XFLAGS_HOT (0x01) /* This datum is HOT */
18#define JFFS2_XFLAGS_BIND (0x02) /* This datum is not reclaimed */ 18#define JFFS2_XFLAGS_BIND (0x02) /* This datum is not reclaimed */
19#define JFFS2_XFLAGS_DEAD (0x40) /* This datum is already dead */
20#define JFFS2_XFLAGS_INVALID (0x80) /* This datum contains crc error */
19 21
20struct jffs2_xattr_datum 22struct jffs2_xattr_datum
21{ 23{
@@ -23,10 +25,10 @@ struct jffs2_xattr_datum
23 struct jffs2_raw_node_ref *node; 25 struct jffs2_raw_node_ref *node;
24 uint8_t class; 26 uint8_t class;
25 uint8_t flags; 27 uint8_t flags;
26 uint16_t xprefix; /* see JFFS2_XATTR_PREFIX_* */ 28 uint16_t xprefix; /* see JFFS2_XATTR_PREFIX_* */
27 29
28 struct list_head xindex; /* chained from c->xattrindex[n] */ 30 struct list_head xindex; /* chained from c->xattrindex[n] */
29 uint32_t refcnt; /* # of xattr_ref refers this */ 31 atomic_t refcnt; /* # of xattr_ref refers this */
30 uint32_t xid; 32 uint32_t xid;
31 uint32_t version; 33 uint32_t version;
32 34
@@ -47,6 +49,7 @@ struct jffs2_xattr_ref
47 uint8_t flags; /* Currently unused */ 49 uint8_t flags; /* Currently unused */
48 u16 unused; 50 u16 unused;
49 51
52 uint32_t xseqno;
50 union { 53 union {
51 struct jffs2_inode_cache *ic; /* reference to jffs2_inode_cache */ 54 struct jffs2_inode_cache *ic; /* reference to jffs2_inode_cache */
52 uint32_t ino; /* only used in scanning/building */ 55 uint32_t ino; /* only used in scanning/building */
@@ -58,6 +61,12 @@ struct jffs2_xattr_ref
58 struct jffs2_xattr_ref *next; /* chained from ic->xref_list */ 61 struct jffs2_xattr_ref *next; /* chained from ic->xref_list */
59}; 62};
60 63
64#define XREF_DELETE_MARKER (0x00000001)
65static inline int is_xattr_ref_dead(struct jffs2_xattr_ref *ref)
66{
67 return ((ref->xseqno & XREF_DELETE_MARKER) != 0);
68}
69
61#ifdef CONFIG_JFFS2_FS_XATTR 70#ifdef CONFIG_JFFS2_FS_XATTR
62 71
63extern void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c); 72extern void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c);
@@ -70,9 +79,13 @@ extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c
70extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); 79extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
71extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); 80extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
72 81
73extern int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd); 82extern int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd,
74extern int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref); 83 struct jffs2_raw_node_ref *raw);
84extern int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref,
85 struct jffs2_raw_node_ref *raw);
75extern int jffs2_verify_xattr(struct jffs2_sb_info *c); 86extern int jffs2_verify_xattr(struct jffs2_sb_info *c);
87extern void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd);
88extern void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref);
76 89
77extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname, 90extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname,
78 char *buffer, size_t size); 91 char *buffer, size_t size);
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 5549378358..4d52593a5f 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -126,7 +126,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr)
126 126
127 /* allocate the disk blocks for the extent. initially, extBalloc() 127 /* allocate the disk blocks for the extent. initially, extBalloc()
128 * will try to allocate disk blocks for the requested size (xlen). 128 * will try to allocate disk blocks for the requested size (xlen).
129 * if this fails (xlen contigious free blocks not avaliable), it'll 129 * if this fails (xlen contiguous free blocks not avaliable), it'll
130 * try to allocate a smaller number of blocks (producing a smaller 130 * try to allocate a smaller number of blocks (producing a smaller
131 * extent), with this smaller number of blocks consisting of the 131 * extent), with this smaller number of blocks consisting of the
132 * requested number of blocks rounded down to the next smaller 132 * requested number of blocks rounded down to the next smaller
@@ -493,7 +493,7 @@ int extFill(struct inode *ip, xad_t * xp)
493 * 493 *
494 * initially, we will try to allocate disk blocks for the 494 * initially, we will try to allocate disk blocks for the
495 * requested size (nblocks). if this fails (nblocks 495 * requested size (nblocks). if this fails (nblocks
496 * contigious free blocks not avaliable), we'll try to allocate 496 * contiguous free blocks not avaliable), we'll try to allocate
497 * a smaller number of blocks (producing a smaller extent), with 497 * a smaller number of blocks (producing a smaller extent), with
498 * this smaller number of blocks consisting of the requested 498 * this smaller number of blocks consisting of the requested
499 * number of blocks rounded down to the next smaller power of 2 499 * number of blocks rounded down to the next smaller power of 2
@@ -529,7 +529,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
529 529
530 /* get the number of blocks to initially attempt to allocate. 530 /* get the number of blocks to initially attempt to allocate.
531 * we'll first try the number of blocks requested unless this 531 * we'll first try the number of blocks requested unless this
532 * number is greater than the maximum number of contigious free 532 * number is greater than the maximum number of contiguous free
533 * blocks in the map. in that case, we'll start off with the 533 * blocks in the map. in that case, we'll start off with the
534 * maximum free. 534 * maximum free.
535 */ 535 */
@@ -586,7 +586,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
586 * in place. if this fails, we'll try to move the extent 586 * in place. if this fails, we'll try to move the extent
587 * to a new set of blocks. if moving the extent, we initially 587 * to a new set of blocks. if moving the extent, we initially
588 * will try to allocate disk blocks for the requested size 588 * will try to allocate disk blocks for the requested size
589 * (nnew). if this fails (nnew contigious free blocks not 589 * (nnew). if this fails (new contiguous free blocks not
590 * avaliable), we'll try to allocate a smaller number of 590 * avaliable), we'll try to allocate a smaller number of
591 * blocks (producing a smaller extent), with this smaller 591 * blocks (producing a smaller extent), with this smaller
592 * number of blocks consisting of the requested number of 592 * number of blocks consisting of the requested number of
diff --git a/fs/libfs.c b/fs/libfs.c
index fc785d8bef..ac02ea602c 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -149,10 +149,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
149 /* fallthrough */ 149 /* fallthrough */
150 default: 150 default:
151 spin_lock(&dcache_lock); 151 spin_lock(&dcache_lock);
152 if (filp->f_pos == 2) { 152 if (filp->f_pos == 2)
153 list_del(q); 153 list_move(q, &dentry->d_subdirs);
154 list_add(q, &dentry->d_subdirs); 154
155 }
156 for (p=q->next; p != &dentry->d_subdirs; p=p->next) { 155 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
157 struct dentry *next; 156 struct dentry *next;
158 next = list_entry(p, struct dentry, d_u.d_child); 157 next = list_entry(p, struct dentry, d_u.d_child);
@@ -164,8 +163,7 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
164 return 0; 163 return 0;
165 spin_lock(&dcache_lock); 164 spin_lock(&dcache_lock);
166 /* next is still alive */ 165 /* next is still alive */
167 list_del(q); 166 list_move(q, p);
168 list_add(q, p);
169 p = q; 167 p = q;
170 filp->f_pos++; 168 filp->f_pos++;
171 } 169 }
diff --git a/fs/namespace.c b/fs/namespace.c
index 866430bb02..b3ed212ea4 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -526,10 +526,8 @@ void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
526{ 526{
527 struct vfsmount *p; 527 struct vfsmount *p;
528 528
529 for (p = mnt; p; p = next_mnt(p, mnt)) { 529 for (p = mnt; p; p = next_mnt(p, mnt))
530 list_del(&p->mnt_hash); 530 list_move(&p->mnt_hash, kill);
531 list_add(&p->mnt_hash, kill);
532 }
533 531
534 if (propagate) 532 if (propagate)
535 propagate_umount(kill); 533 propagate_umount(kill);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 402005c35a..8ca9707be6 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -909,7 +909,7 @@ int __init nfs_init_directcache(void)
909 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures 909 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
910 * 910 *
911 */ 911 */
912void __exit nfs_destroy_directcache(void) 912void nfs_destroy_directcache(void)
913{ 913{
914 if (kmem_cache_destroy(nfs_direct_cachep)) 914 if (kmem_cache_destroy(nfs_direct_cachep))
915 printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n"); 915 printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 51bc88b662..c5b916605f 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1132,7 +1132,7 @@ static int __init nfs_init_inodecache(void)
1132 return 0; 1132 return 0;
1133} 1133}
1134 1134
1135static void __exit nfs_destroy_inodecache(void) 1135static void nfs_destroy_inodecache(void)
1136{ 1136{
1137 if (kmem_cache_destroy(nfs_inode_cachep)) 1137 if (kmem_cache_destroy(nfs_inode_cachep))
1138 printk(KERN_INFO "nfs_inode_cache: not all structures were freed\n"); 1138 printk(KERN_INFO "nfs_inode_cache: not all structures were freed\n");
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index bd2815e2de..4fe51c1292 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -31,15 +31,15 @@ extern struct svc_version nfs4_callback_version1;
31 31
32/* pagelist.c */ 32/* pagelist.c */
33extern int __init nfs_init_nfspagecache(void); 33extern int __init nfs_init_nfspagecache(void);
34extern void __exit nfs_destroy_nfspagecache(void); 34extern void nfs_destroy_nfspagecache(void);
35extern int __init nfs_init_readpagecache(void); 35extern int __init nfs_init_readpagecache(void);
36extern void __exit nfs_destroy_readpagecache(void); 36extern void nfs_destroy_readpagecache(void);
37extern int __init nfs_init_writepagecache(void); 37extern int __init nfs_init_writepagecache(void);
38extern void __exit nfs_destroy_writepagecache(void); 38extern void nfs_destroy_writepagecache(void);
39 39
40#ifdef CONFIG_NFS_DIRECTIO 40#ifdef CONFIG_NFS_DIRECTIO
41extern int __init nfs_init_directcache(void); 41extern int __init nfs_init_directcache(void);
42extern void __exit nfs_destroy_directcache(void); 42extern void nfs_destroy_directcache(void);
43#else 43#else
44#define nfs_init_directcache() (0) 44#define nfs_init_directcache() (0)
45#define nfs_destroy_directcache() do {} while(0) 45#define nfs_destroy_directcache() do {} while(0)
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index ef9429643e..d89f6fb3b3 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -390,7 +390,7 @@ int __init nfs_init_nfspagecache(void)
390 return 0; 390 return 0;
391} 391}
392 392
393void __exit nfs_destroy_nfspagecache(void) 393void nfs_destroy_nfspagecache(void)
394{ 394{
395 if (kmem_cache_destroy(nfs_page_cachep)) 395 if (kmem_cache_destroy(nfs_page_cachep))
396 printk(KERN_INFO "nfs_page: not all structures were freed\n"); 396 printk(KERN_INFO "nfs_page: not all structures were freed\n");
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 41c2ffee24..32cf3773af 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -711,7 +711,7 @@ int __init nfs_init_readpagecache(void)
711 return 0; 711 return 0;
712} 712}
713 713
714void __exit nfs_destroy_readpagecache(void) 714void nfs_destroy_readpagecache(void)
715{ 715{
716 mempool_destroy(nfs_rdata_mempool); 716 mempool_destroy(nfs_rdata_mempool);
717 if (kmem_cache_destroy(nfs_rdata_cachep)) 717 if (kmem_cache_destroy(nfs_rdata_cachep))
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index b383fdd3a1..8fccb9cb17 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1551,7 +1551,7 @@ int __init nfs_init_writepagecache(void)
1551 return 0; 1551 return 0;
1552} 1552}
1553 1553
1554void __exit nfs_destroy_writepagecache(void) 1554void nfs_destroy_writepagecache(void)
1555{ 1555{
1556 mempool_destroy(nfs_commit_mempool); 1556 mempool_destroy(nfs_commit_mempool);
1557 mempool_destroy(nfs_wdata_mempool); 1557 mempool_destroy(nfs_wdata_mempool);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 96c7578cbe..7c7d01672d 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -123,7 +123,7 @@ static void release_stateid(struct nfs4_stateid *stp, int flags);
123 */ 123 */
124 124
125/* recall_lock protects the del_recall_lru */ 125/* recall_lock protects the del_recall_lru */
126static spinlock_t recall_lock = SPIN_LOCK_UNLOCKED; 126static DEFINE_SPINLOCK(recall_lock);
127static struct list_head del_recall_lru; 127static struct list_head del_recall_lru;
128 128
129static void 129static void
@@ -529,8 +529,7 @@ move_to_confirmed(struct nfs4_client *clp)
529 529
530 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp); 530 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
531 list_del_init(&clp->cl_strhash); 531 list_del_init(&clp->cl_strhash);
532 list_del_init(&clp->cl_idhash); 532 list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
533 list_add(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
534 strhashval = clientstr_hashval(clp->cl_recdir); 533 strhashval = clientstr_hashval(clp->cl_recdir);
535 list_add(&clp->cl_strhash, &conf_str_hashtbl[strhashval]); 534 list_add(&clp->cl_strhash, &conf_str_hashtbl[strhashval]);
536 renew_client(clp); 535 renew_client(clp);
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index d852ebb538..fdf7cf3dfa 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -103,8 +103,7 @@ nfsd_cache_shutdown(void)
103static void 103static void
104lru_put_end(struct svc_cacherep *rp) 104lru_put_end(struct svc_cacherep *rp)
105{ 105{
106 list_del(&rp->c_lru); 106 list_move_tail(&rp->c_lru, &lru_head);
107 list_add_tail(&rp->c_lru, &lru_head);
108} 107}
109 108
110/* 109/*
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 21f38accd0..1d26cfcd9f 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -54,7 +54,7 @@ static DECLARE_RWSEM(o2hb_callback_sem);
54 * multiple hb threads are watching multiple regions. A node is live 54 * multiple hb threads are watching multiple regions. A node is live
55 * whenever any of the threads sees activity from the node in its region. 55 * whenever any of the threads sees activity from the node in its region.
56 */ 56 */
57static spinlock_t o2hb_live_lock = SPIN_LOCK_UNLOCKED; 57static DEFINE_SPINLOCK(o2hb_live_lock);
58static struct list_head o2hb_live_slots[O2NM_MAX_NODES]; 58static struct list_head o2hb_live_slots[O2NM_MAX_NODES];
59static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 59static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
60static LIST_HEAD(o2hb_node_events); 60static LIST_HEAD(o2hb_node_events);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 0f60cc0d39..1591eb37a7 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -108,7 +108,7 @@
108 ##args); \ 108 ##args); \
109} while (0) 109} while (0)
110 110
111static rwlock_t o2net_handler_lock = RW_LOCK_UNLOCKED; 111static DEFINE_RWLOCK(o2net_handler_lock);
112static struct rb_root o2net_handler_tree = RB_ROOT; 112static struct rb_root o2net_handler_tree = RB_ROOT;
113 113
114static struct o2net_node o2net_nodes[O2NM_MAX_NODES]; 114static struct o2net_node o2net_nodes[O2NM_MAX_NODES];
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 355593dd8e..42775e2bbe 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -197,12 +197,14 @@ static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
197 lock->ml.node == dlm->node_num ? "master" : 197 lock->ml.node == dlm->node_num ? "master" :
198 "remote"); 198 "remote");
199 memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN); 199 memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN);
200 } else if (lksb->flags & DLM_LKSB_PUT_LVB) {
201 mlog(0, "setting lvb from lockres for %s node\n",
202 lock->ml.node == dlm->node_num ? "master" :
203 "remote");
204 memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
205 } 200 }
201 /* Do nothing for lvb put requests - they should be done in
202 * place when the lock is downconverted - otherwise we risk
203 * racing gets and puts which could result in old lvb data
204 * being propagated. We leave the put flag set and clear it
205 * here. In the future we might want to clear it at the time
206 * the put is actually done.
207 */
206 spin_unlock(&res->spinlock); 208 spin_unlock(&res->spinlock);
207 } 209 }
208 210
@@ -381,8 +383,7 @@ do_ast:
381 ret = DLM_NORMAL; 383 ret = DLM_NORMAL;
382 if (past->type == DLM_AST) { 384 if (past->type == DLM_AST) {
383 /* do not alter lock refcount. switching lists. */ 385 /* do not alter lock refcount. switching lists. */
384 list_del_init(&lock->list); 386 list_move_tail(&lock->list, &res->granted);
385 list_add_tail(&lock->list, &res->granted);
386 mlog(0, "ast: adding to granted list... type=%d, " 387 mlog(0, "ast: adding to granted list... type=%d, "
387 "convert_type=%d\n", lock->ml.type, lock->ml.convert_type); 388 "convert_type=%d\n", lock->ml.type, lock->ml.convert_type);
388 if (lock->ml.convert_type != LKM_IVMODE) { 389 if (lock->ml.convert_type != LKM_IVMODE) {
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 88cc43df18..9bdc9cf659 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -37,7 +37,17 @@
37#define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes 37#define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes
38#define DLM_THREAD_MS 200 // flush at least every 200 ms 38#define DLM_THREAD_MS 200 // flush at least every 200 ms
39 39
40#define DLM_HASH_BUCKETS (PAGE_SIZE / sizeof(struct hlist_head)) 40#define DLM_HASH_SIZE_DEFAULT (1 << 14)
41#if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE
42# define DLM_HASH_PAGES 1
43#else
44# define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE)
45#endif
46#define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head))
47#define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE)
48
49/* Intended to make it easier for us to switch out hash functions */
50#define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
41 51
42enum dlm_ast_type { 52enum dlm_ast_type {
43 DLM_AST = 0, 53 DLM_AST = 0,
@@ -61,7 +71,8 @@ static inline int dlm_is_recovery_lock(const char *lock_name, int name_len)
61 return 0; 71 return 0;
62} 72}
63 73
64#define DLM_RECO_STATE_ACTIVE 0x0001 74#define DLM_RECO_STATE_ACTIVE 0x0001
75#define DLM_RECO_STATE_FINALIZE 0x0002
65 76
66struct dlm_recovery_ctxt 77struct dlm_recovery_ctxt
67{ 78{
@@ -85,7 +96,7 @@ enum dlm_ctxt_state {
85struct dlm_ctxt 96struct dlm_ctxt
86{ 97{
87 struct list_head list; 98 struct list_head list;
88 struct hlist_head *lockres_hash; 99 struct hlist_head **lockres_hash;
89 struct list_head dirty_list; 100 struct list_head dirty_list;
90 struct list_head purge_list; 101 struct list_head purge_list;
91 struct list_head pending_asts; 102 struct list_head pending_asts;
@@ -120,6 +131,7 @@ struct dlm_ctxt
120 struct o2hb_callback_func dlm_hb_down; 131 struct o2hb_callback_func dlm_hb_down;
121 struct task_struct *dlm_thread_task; 132 struct task_struct *dlm_thread_task;
122 struct task_struct *dlm_reco_thread_task; 133 struct task_struct *dlm_reco_thread_task;
134 struct workqueue_struct *dlm_worker;
123 wait_queue_head_t dlm_thread_wq; 135 wait_queue_head_t dlm_thread_wq;
124 wait_queue_head_t dlm_reco_thread_wq; 136 wait_queue_head_t dlm_reco_thread_wq;
125 wait_queue_head_t ast_wq; 137 wait_queue_head_t ast_wq;
@@ -132,6 +144,11 @@ struct dlm_ctxt
132 struct list_head dlm_eviction_callbacks; 144 struct list_head dlm_eviction_callbacks;
133}; 145};
134 146
147static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i)
148{
149 return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE);
150}
151
135/* these keventd work queue items are for less-frequently 152/* these keventd work queue items are for less-frequently
136 * called functions that cannot be directly called from the 153 * called functions that cannot be directly called from the
137 * net message handlers for some reason, usually because 154 * net message handlers for some reason, usually because
@@ -216,20 +233,29 @@ struct dlm_lock_resource
216 /* WARNING: Please see the comment in dlm_init_lockres before 233 /* WARNING: Please see the comment in dlm_init_lockres before
217 * adding fields here. */ 234 * adding fields here. */
218 struct hlist_node hash_node; 235 struct hlist_node hash_node;
236 struct qstr lockname;
219 struct kref refs; 237 struct kref refs;
220 238
221 /* please keep these next 3 in this order 239 /*
222 * some funcs want to iterate over all lists */ 240 * Please keep granted, converting, and blocked in this order,
241 * as some funcs want to iterate over all lists.
242 *
243 * All four lists are protected by the hash's reference.
244 */
223 struct list_head granted; 245 struct list_head granted;
224 struct list_head converting; 246 struct list_head converting;
225 struct list_head blocked; 247 struct list_head blocked;
248 struct list_head purge;
226 249
250 /*
251 * These two lists require you to hold an additional reference
252 * while they are on the list.
253 */
227 struct list_head dirty; 254 struct list_head dirty;
228 struct list_head recovering; // dlm_recovery_ctxt.resources list 255 struct list_head recovering; // dlm_recovery_ctxt.resources list
229 256
230 /* unused lock resources have their last_used stamped and are 257 /* unused lock resources have their last_used stamped and are
231 * put on a list for the dlm thread to run. */ 258 * put on a list for the dlm thread to run. */
232 struct list_head purge;
233 unsigned long last_used; 259 unsigned long last_used;
234 260
235 unsigned migration_pending:1; 261 unsigned migration_pending:1;
@@ -238,7 +264,6 @@ struct dlm_lock_resource
238 wait_queue_head_t wq; 264 wait_queue_head_t wq;
239 u8 owner; //node which owns the lock resource, or unknown 265 u8 owner; //node which owns the lock resource, or unknown
240 u16 state; 266 u16 state;
241 struct qstr lockname;
242 char lvb[DLM_LVB_LEN]; 267 char lvb[DLM_LVB_LEN];
243}; 268};
244 269
@@ -300,6 +325,15 @@ enum dlm_lockres_list {
300 DLM_BLOCKED_LIST 325 DLM_BLOCKED_LIST
301}; 326};
302 327
328static inline int dlm_lvb_is_empty(char *lvb)
329{
330 int i;
331 for (i=0; i<DLM_LVB_LEN; i++)
332 if (lvb[i])
333 return 0;
334 return 1;
335}
336
303static inline struct list_head * 337static inline struct list_head *
304dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) 338dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx)
305{ 339{
@@ -609,7 +643,8 @@ struct dlm_finalize_reco
609{ 643{
610 u8 node_idx; 644 u8 node_idx;
611 u8 dead_node; 645 u8 dead_node;
612 __be16 pad1; 646 u8 flags;
647 u8 pad1;
613 __be32 pad2; 648 __be32 pad2;
614}; 649};
615 650
@@ -676,6 +711,7 @@ void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
676void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); 711void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
677int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); 712int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
678int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); 713int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
714int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
679 715
680void dlm_put(struct dlm_ctxt *dlm); 716void dlm_put(struct dlm_ctxt *dlm);
681struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); 717struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
@@ -687,14 +723,20 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
687 struct dlm_lock_resource *res); 723 struct dlm_lock_resource *res);
688void dlm_purge_lockres(struct dlm_ctxt *dlm, 724void dlm_purge_lockres(struct dlm_ctxt *dlm,
689 struct dlm_lock_resource *lockres); 725 struct dlm_lock_resource *lockres);
690void dlm_lockres_get(struct dlm_lock_resource *res); 726static inline void dlm_lockres_get(struct dlm_lock_resource *res)
727{
728 /* This is called on every lookup, so it might be worth
729 * inlining. */
730 kref_get(&res->refs);
731}
691void dlm_lockres_put(struct dlm_lock_resource *res); 732void dlm_lockres_put(struct dlm_lock_resource *res);
692void __dlm_unhash_lockres(struct dlm_lock_resource *res); 733void __dlm_unhash_lockres(struct dlm_lock_resource *res);
693void __dlm_insert_lockres(struct dlm_ctxt *dlm, 734void __dlm_insert_lockres(struct dlm_ctxt *dlm,
694 struct dlm_lock_resource *res); 735 struct dlm_lock_resource *res);
695struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, 736struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
696 const char *name, 737 const char *name,
697 unsigned int len); 738 unsigned int len,
739 unsigned int hash);
698struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, 740struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
699 const char *name, 741 const char *name,
700 unsigned int len); 742 unsigned int len);
@@ -819,6 +861,7 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm,
819 u8 dead_node); 861 u8 dead_node);
820int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); 862int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
821 863
864int __dlm_lockres_unused(struct dlm_lock_resource *res);
822 865
823static inline const char * dlm_lock_mode_name(int mode) 866static inline const char * dlm_lock_mode_name(int mode)
824{ 867{
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index 8285228d9e..c764dc8e40 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -214,6 +214,9 @@ grant:
214 if (lock->ml.node == dlm->node_num) 214 if (lock->ml.node == dlm->node_num)
215 mlog(0, "doing in-place convert for nonlocal lock\n"); 215 mlog(0, "doing in-place convert for nonlocal lock\n");
216 lock->ml.type = type; 216 lock->ml.type = type;
217 if (lock->lksb->flags & DLM_LKSB_PUT_LVB)
218 memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN);
219
217 status = DLM_NORMAL; 220 status = DLM_NORMAL;
218 *call_ast = 1; 221 *call_ast = 1;
219 goto unlock_exit; 222 goto unlock_exit;
@@ -231,8 +234,7 @@ switch_queues:
231 234
232 lock->ml.convert_type = type; 235 lock->ml.convert_type = type;
233 /* do not alter lock refcount. switching lists. */ 236 /* do not alter lock refcount. switching lists. */
234 list_del_init(&lock->list); 237 list_move_tail(&lock->list, &res->converting);
235 list_add_tail(&lock->list, &res->converting);
236 238
237unlock_exit: 239unlock_exit:
238 spin_unlock(&lock->spinlock); 240 spin_unlock(&lock->spinlock);
@@ -248,8 +250,7 @@ void dlm_revert_pending_convert(struct dlm_lock_resource *res,
248 struct dlm_lock *lock) 250 struct dlm_lock *lock)
249{ 251{
250 /* do not alter lock refcount. switching lists. */ 252 /* do not alter lock refcount. switching lists. */
251 list_del_init(&lock->list); 253 list_move_tail(&lock->list, &res->granted);
252 list_add_tail(&lock->list, &res->granted);
253 lock->ml.convert_type = LKM_IVMODE; 254 lock->ml.convert_type = LKM_IVMODE;
254 lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); 255 lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
255} 256}
@@ -294,8 +295,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
294 res->state |= DLM_LOCK_RES_IN_PROGRESS; 295 res->state |= DLM_LOCK_RES_IN_PROGRESS;
295 /* move lock to local convert queue */ 296 /* move lock to local convert queue */
296 /* do not alter lock refcount. switching lists. */ 297 /* do not alter lock refcount. switching lists. */
297 list_del_init(&lock->list); 298 list_move_tail(&lock->list, &res->converting);
298 list_add_tail(&lock->list, &res->converting);
299 lock->convert_pending = 1; 299 lock->convert_pending = 1;
300 lock->ml.convert_type = type; 300 lock->ml.convert_type = type;
301 301
@@ -464,6 +464,12 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
464 } 464 }
465 465
466 spin_lock(&res->spinlock); 466 spin_lock(&res->spinlock);
467 status = __dlm_lockres_state_to_status(res);
468 if (status != DLM_NORMAL) {
469 spin_unlock(&res->spinlock);
470 dlm_error(status);
471 goto leave;
472 }
467 list_for_each(iter, &res->granted) { 473 list_for_each(iter, &res->granted) {
468 lock = list_entry(iter, struct dlm_lock, list); 474 lock = list_entry(iter, struct dlm_lock, list);
469 if (lock->ml.cookie == cnv->cookie && 475 if (lock->ml.cookie == cnv->cookie &&
@@ -473,6 +479,21 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
473 } 479 }
474 lock = NULL; 480 lock = NULL;
475 } 481 }
482 if (!lock) {
483 __dlm_print_one_lock_resource(res);
484 list_for_each(iter, &res->granted) {
485 lock = list_entry(iter, struct dlm_lock, list);
486 if (lock->ml.node == cnv->node_idx) {
487 mlog(ML_ERROR, "There is something here "
488 "for node %u, lock->ml.cookie=%llu, "
489 "cnv->cookie=%llu\n", cnv->node_idx,
490 (unsigned long long)lock->ml.cookie,
491 (unsigned long long)cnv->cookie);
492 break;
493 }
494 }
495 lock = NULL;
496 }
476 spin_unlock(&res->spinlock); 497 spin_unlock(&res->spinlock);
477 if (!lock) { 498 if (!lock) {
478 status = DLM_IVLOCKID; 499 status = DLM_IVLOCKID;
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index c7eae5d332..3f6c8d88f7 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -37,10 +37,8 @@
37 37
38#include "dlmapi.h" 38#include "dlmapi.h"
39#include "dlmcommon.h" 39#include "dlmcommon.h"
40#include "dlmdebug.h"
41 40
42#include "dlmdomain.h" 41#include "dlmdomain.h"
43#include "dlmdebug.h"
44 42
45#define MLOG_MASK_PREFIX ML_DLM 43#define MLOG_MASK_PREFIX ML_DLM
46#include "cluster/masklog.h" 44#include "cluster/masklog.h"
@@ -120,6 +118,7 @@ void dlm_print_one_lock(struct dlm_lock *lockid)
120} 118}
121EXPORT_SYMBOL_GPL(dlm_print_one_lock); 119EXPORT_SYMBOL_GPL(dlm_print_one_lock);
122 120
121#if 0
123void dlm_dump_lock_resources(struct dlm_ctxt *dlm) 122void dlm_dump_lock_resources(struct dlm_ctxt *dlm)
124{ 123{
125 struct dlm_lock_resource *res; 124 struct dlm_lock_resource *res;
@@ -136,12 +135,13 @@ void dlm_dump_lock_resources(struct dlm_ctxt *dlm)
136 135
137 spin_lock(&dlm->spinlock); 136 spin_lock(&dlm->spinlock);
138 for (i=0; i<DLM_HASH_BUCKETS; i++) { 137 for (i=0; i<DLM_HASH_BUCKETS; i++) {
139 bucket = &(dlm->lockres_hash[i]); 138 bucket = dlm_lockres_hash(dlm, i);
140 hlist_for_each_entry(res, iter, bucket, hash_node) 139 hlist_for_each_entry(res, iter, bucket, hash_node)
141 dlm_print_one_lock_resource(res); 140 dlm_print_one_lock_resource(res);
142 } 141 }
143 spin_unlock(&dlm->spinlock); 142 spin_unlock(&dlm->spinlock);
144} 143}
144#endif /* 0 */
145 145
146static const char *dlm_errnames[] = { 146static const char *dlm_errnames[] = {
147 [DLM_NORMAL] = "DLM_NORMAL", 147 [DLM_NORMAL] = "DLM_NORMAL",
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h
deleted file mode 100644
index 6858510c3c..0000000000
--- a/fs/ocfs2/dlm/dlmdebug.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmdebug.h
5 *
6 * Copyright (C) 2004 Oracle. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public
19 * License along with this program; if not, write to the
20 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 021110-1307, USA.
22 *
23 */
24
25#ifndef DLMDEBUG_H
26#define DLMDEBUG_H
27
28void dlm_dump_lock_resources(struct dlm_ctxt *dlm);
29
30#endif
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 8f3a9e3106..b8c23f7ba6 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -41,7 +41,6 @@
41#include "dlmapi.h" 41#include "dlmapi.h"
42#include "dlmcommon.h" 42#include "dlmcommon.h"
43 43
44#include "dlmdebug.h"
45#include "dlmdomain.h" 44#include "dlmdomain.h"
46 45
47#include "dlmver.h" 46#include "dlmver.h"
@@ -49,6 +48,33 @@
49#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN) 48#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
50#include "cluster/masklog.h" 49#include "cluster/masklog.h"
51 50
51static void dlm_free_pagevec(void **vec, int pages)
52{
53 while (pages--)
54 free_page((unsigned long)vec[pages]);
55 kfree(vec);
56}
57
58static void **dlm_alloc_pagevec(int pages)
59{
60 void **vec = kmalloc(pages * sizeof(void *), GFP_KERNEL);
61 int i;
62
63 if (!vec)
64 return NULL;
65
66 for (i = 0; i < pages; i++)
67 if (!(vec[i] = (void *)__get_free_page(GFP_KERNEL)))
68 goto out_free;
69
70 mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n",
71 pages, DLM_HASH_PAGES, (unsigned long)DLM_BUCKETS_PER_PAGE);
72 return vec;
73out_free:
74 dlm_free_pagevec(vec, i);
75 return NULL;
76}
77
52/* 78/*
53 * 79 *
54 * spinlock lock ordering: if multiple locks are needed, obey this ordering: 80 * spinlock lock ordering: if multiple locks are needed, obey this ordering:
@@ -62,7 +88,7 @@
62 * 88 *
63 */ 89 */
64 90
65spinlock_t dlm_domain_lock = SPIN_LOCK_UNLOCKED; 91DEFINE_SPINLOCK(dlm_domain_lock);
66LIST_HEAD(dlm_domains); 92LIST_HEAD(dlm_domains);
67static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events); 93static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
68 94
@@ -90,8 +116,7 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm,
90 assert_spin_locked(&dlm->spinlock); 116 assert_spin_locked(&dlm->spinlock);
91 117
92 q = &res->lockname; 118 q = &res->lockname;
93 q->hash = full_name_hash(q->name, q->len); 119 bucket = dlm_lockres_hash(dlm, q->hash);
94 bucket = &(dlm->lockres_hash[q->hash % DLM_HASH_BUCKETS]);
95 120
96 /* get a reference for our hashtable */ 121 /* get a reference for our hashtable */
97 dlm_lockres_get(res); 122 dlm_lockres_get(res);
@@ -100,34 +125,32 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm,
100} 125}
101 126
102struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, 127struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
103 const char *name, 128 const char *name,
104 unsigned int len) 129 unsigned int len,
130 unsigned int hash)
105{ 131{
106 unsigned int hash;
107 struct hlist_node *iter;
108 struct dlm_lock_resource *tmpres=NULL;
109 struct hlist_head *bucket; 132 struct hlist_head *bucket;
133 struct hlist_node *list;
110 134
111 mlog_entry("%.*s\n", len, name); 135 mlog_entry("%.*s\n", len, name);
112 136
113 assert_spin_locked(&dlm->spinlock); 137 assert_spin_locked(&dlm->spinlock);
114 138
115 hash = full_name_hash(name, len); 139 bucket = dlm_lockres_hash(dlm, hash);
116
117 bucket = &(dlm->lockres_hash[hash % DLM_HASH_BUCKETS]);
118
119 /* check for pre-existing lock */
120 hlist_for_each(iter, bucket) {
121 tmpres = hlist_entry(iter, struct dlm_lock_resource, hash_node);
122 if (tmpres->lockname.len == len &&
123 memcmp(tmpres->lockname.name, name, len) == 0) {
124 dlm_lockres_get(tmpres);
125 break;
126 }
127 140
128 tmpres = NULL; 141 hlist_for_each(list, bucket) {
142 struct dlm_lock_resource *res = hlist_entry(list,
143 struct dlm_lock_resource, hash_node);
144 if (res->lockname.name[0] != name[0])
145 continue;
146 if (unlikely(res->lockname.len != len))
147 continue;
148 if (memcmp(res->lockname.name + 1, name + 1, len - 1))
149 continue;
150 dlm_lockres_get(res);
151 return res;
129 } 152 }
130 return tmpres; 153 return NULL;
131} 154}
132 155
133struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, 156struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
@@ -135,9 +158,10 @@ struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
135 unsigned int len) 158 unsigned int len)
136{ 159{
137 struct dlm_lock_resource *res; 160 struct dlm_lock_resource *res;
161 unsigned int hash = dlm_lockid_hash(name, len);
138 162
139 spin_lock(&dlm->spinlock); 163 spin_lock(&dlm->spinlock);
140 res = __dlm_lookup_lockres(dlm, name, len); 164 res = __dlm_lookup_lockres(dlm, name, len, hash);
141 spin_unlock(&dlm->spinlock); 165 spin_unlock(&dlm->spinlock);
142 return res; 166 return res;
143} 167}
@@ -194,7 +218,7 @@ static int dlm_wait_on_domain_helper(const char *domain)
194static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm) 218static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
195{ 219{
196 if (dlm->lockres_hash) 220 if (dlm->lockres_hash)
197 free_page((unsigned long) dlm->lockres_hash); 221 dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
198 222
199 if (dlm->name) 223 if (dlm->name)
200 kfree(dlm->name); 224 kfree(dlm->name);
@@ -278,11 +302,21 @@ int dlm_domain_fully_joined(struct dlm_ctxt *dlm)
278 return ret; 302 return ret;
279} 303}
280 304
305static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm)
306{
307 if (dlm->dlm_worker) {
308 flush_workqueue(dlm->dlm_worker);
309 destroy_workqueue(dlm->dlm_worker);
310 dlm->dlm_worker = NULL;
311 }
312}
313
281static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm) 314static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
282{ 315{
283 dlm_unregister_domain_handlers(dlm); 316 dlm_unregister_domain_handlers(dlm);
284 dlm_complete_thread(dlm); 317 dlm_complete_thread(dlm);
285 dlm_complete_recovery_thread(dlm); 318 dlm_complete_recovery_thread(dlm);
319 dlm_destroy_dlm_worker(dlm);
286 320
287 /* We've left the domain. Now we can take ourselves out of the 321 /* We've left the domain. Now we can take ourselves out of the
288 * list and allow the kref stuff to help us free the 322 * list and allow the kref stuff to help us free the
@@ -304,8 +338,8 @@ static void dlm_migrate_all_locks(struct dlm_ctxt *dlm)
304restart: 338restart:
305 spin_lock(&dlm->spinlock); 339 spin_lock(&dlm->spinlock);
306 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 340 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
307 while (!hlist_empty(&dlm->lockres_hash[i])) { 341 while (!hlist_empty(dlm_lockres_hash(dlm, i))) {
308 res = hlist_entry(dlm->lockres_hash[i].first, 342 res = hlist_entry(dlm_lockres_hash(dlm, i)->first,
309 struct dlm_lock_resource, hash_node); 343 struct dlm_lock_resource, hash_node);
310 /* need reference when manually grabbing lockres */ 344 /* need reference when manually grabbing lockres */
311 dlm_lockres_get(res); 345 dlm_lockres_get(res);
@@ -1126,6 +1160,13 @@ static int dlm_join_domain(struct dlm_ctxt *dlm)
1126 goto bail; 1160 goto bail;
1127 } 1161 }
1128 1162
1163 dlm->dlm_worker = create_singlethread_workqueue("dlm_wq");
1164 if (!dlm->dlm_worker) {
1165 status = -ENOMEM;
1166 mlog_errno(status);
1167 goto bail;
1168 }
1169
1129 do { 1170 do {
1130 unsigned int backoff; 1171 unsigned int backoff;
1131 status = dlm_try_to_join_domain(dlm); 1172 status = dlm_try_to_join_domain(dlm);
@@ -1166,6 +1207,7 @@ bail:
1166 dlm_unregister_domain_handlers(dlm); 1207 dlm_unregister_domain_handlers(dlm);
1167 dlm_complete_thread(dlm); 1208 dlm_complete_thread(dlm);
1168 dlm_complete_recovery_thread(dlm); 1209 dlm_complete_recovery_thread(dlm);
1210 dlm_destroy_dlm_worker(dlm);
1169 } 1211 }
1170 1212
1171 return status; 1213 return status;
@@ -1191,7 +1233,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
1191 goto leave; 1233 goto leave;
1192 } 1234 }
1193 1235
1194 dlm->lockres_hash = (struct hlist_head *) __get_free_page(GFP_KERNEL); 1236 dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES);
1195 if (!dlm->lockres_hash) { 1237 if (!dlm->lockres_hash) {
1196 mlog_errno(-ENOMEM); 1238 mlog_errno(-ENOMEM);
1197 kfree(dlm->name); 1239 kfree(dlm->name);
@@ -1200,8 +1242,8 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
1200 goto leave; 1242 goto leave;
1201 } 1243 }
1202 1244
1203 for (i=0; i<DLM_HASH_BUCKETS; i++) 1245 for (i = 0; i < DLM_HASH_BUCKETS; i++)
1204 INIT_HLIST_HEAD(&dlm->lockres_hash[i]); 1246 INIT_HLIST_HEAD(dlm_lockres_hash(dlm, i));
1205 1247
1206 strcpy(dlm->name, domain); 1248 strcpy(dlm->name, domain);
1207 dlm->key = key; 1249 dlm->key = key;
@@ -1231,6 +1273,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
1231 1273
1232 dlm->dlm_thread_task = NULL; 1274 dlm->dlm_thread_task = NULL;
1233 dlm->dlm_reco_thread_task = NULL; 1275 dlm->dlm_reco_thread_task = NULL;
1276 dlm->dlm_worker = NULL;
1234 init_waitqueue_head(&dlm->dlm_thread_wq); 1277 init_waitqueue_head(&dlm->dlm_thread_wq);
1235 init_waitqueue_head(&dlm->dlm_reco_thread_wq); 1278 init_waitqueue_head(&dlm->dlm_reco_thread_wq);
1236 init_waitqueue_head(&dlm->reco.event); 1279 init_waitqueue_head(&dlm->reco.event);
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 7273d9fa6b..033ad17012 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -116,7 +116,7 @@ static int dlmfs_file_open(struct inode *inode,
116 * doesn't make sense for LVB writes. */ 116 * doesn't make sense for LVB writes. */
117 file->f_flags &= ~O_APPEND; 117 file->f_flags &= ~O_APPEND;
118 118
119 fp = kmalloc(sizeof(*fp), GFP_KERNEL); 119 fp = kmalloc(sizeof(*fp), GFP_NOFS);
120 if (!fp) { 120 if (!fp) {
121 status = -ENOMEM; 121 status = -ENOMEM;
122 goto bail; 122 goto bail;
@@ -196,7 +196,7 @@ static ssize_t dlmfs_file_read(struct file *filp,
196 else 196 else
197 readlen = count - *ppos; 197 readlen = count - *ppos;
198 198
199 lvb_buf = kmalloc(readlen, GFP_KERNEL); 199 lvb_buf = kmalloc(readlen, GFP_NOFS);
200 if (!lvb_buf) 200 if (!lvb_buf)
201 return -ENOMEM; 201 return -ENOMEM;
202 202
@@ -240,7 +240,7 @@ static ssize_t dlmfs_file_write(struct file *filp,
240 else 240 else
241 writelen = count - *ppos; 241 writelen = count - *ppos;
242 242
243 lvb_buf = kmalloc(writelen, GFP_KERNEL); 243 lvb_buf = kmalloc(writelen, GFP_NOFS);
244 if (!lvb_buf) 244 if (!lvb_buf)
245 return -ENOMEM; 245 return -ENOMEM;
246 246
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 6fea28318d..5ca57ec650 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -53,7 +53,7 @@
53#define MLOG_MASK_PREFIX ML_DLM 53#define MLOG_MASK_PREFIX ML_DLM
54#include "cluster/masklog.h" 54#include "cluster/masklog.h"
55 55
56static spinlock_t dlm_cookie_lock = SPIN_LOCK_UNLOCKED; 56static DEFINE_SPINLOCK(dlm_cookie_lock);
57static u64 dlm_next_cookie = 1; 57static u64 dlm_next_cookie = 1;
58 58
59static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, 59static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
@@ -201,6 +201,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
201 struct dlm_lock *lock, int flags) 201 struct dlm_lock *lock, int flags)
202{ 202{
203 enum dlm_status status = DLM_DENIED; 203 enum dlm_status status = DLM_DENIED;
204 int lockres_changed = 1;
204 205
205 mlog_entry("type=%d\n", lock->ml.type); 206 mlog_entry("type=%d\n", lock->ml.type);
206 mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len, 207 mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len,
@@ -226,8 +227,25 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
226 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 227 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
227 lock->lock_pending = 0; 228 lock->lock_pending = 0;
228 if (status != DLM_NORMAL) { 229 if (status != DLM_NORMAL) {
229 if (status != DLM_NOTQUEUED) 230 if (status == DLM_RECOVERING &&
231 dlm_is_recovery_lock(res->lockname.name,
232 res->lockname.len)) {
233 /* recovery lock was mastered by dead node.
234 * we need to have calc_usage shoot down this
235 * lockres and completely remaster it. */
236 mlog(0, "%s: recovery lock was owned by "
237 "dead node %u, remaster it now.\n",
238 dlm->name, res->owner);
239 } else if (status != DLM_NOTQUEUED) {
240 /*
241 * DO NOT call calc_usage, as this would unhash
242 * the remote lockres before we ever get to use
243 * it. treat as if we never made any change to
244 * the lockres.
245 */
246 lockres_changed = 0;
230 dlm_error(status); 247 dlm_error(status);
248 }
231 dlm_revert_pending_lock(res, lock); 249 dlm_revert_pending_lock(res, lock);
232 dlm_lock_put(lock); 250 dlm_lock_put(lock);
233 } else if (dlm_is_recovery_lock(res->lockname.name, 251 } else if (dlm_is_recovery_lock(res->lockname.name,
@@ -239,12 +257,12 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
239 mlog(0, "%s: $RECOVERY lock for this node (%u) is " 257 mlog(0, "%s: $RECOVERY lock for this node (%u) is "
240 "mastered by %u; got lock, manually granting (no ast)\n", 258 "mastered by %u; got lock, manually granting (no ast)\n",
241 dlm->name, dlm->node_num, res->owner); 259 dlm->name, dlm->node_num, res->owner);
242 list_del_init(&lock->list); 260 list_move_tail(&lock->list, &res->granted);
243 list_add_tail(&lock->list, &res->granted);
244 } 261 }
245 spin_unlock(&res->spinlock); 262 spin_unlock(&res->spinlock);
246 263
247 dlm_lockres_calc_usage(dlm, res); 264 if (lockres_changed)
265 dlm_lockres_calc_usage(dlm, res);
248 266
249 wake_up(&res->wq); 267 wake_up(&res->wq);
250 return status; 268 return status;
@@ -281,6 +299,14 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
281 if (tmpret >= 0) { 299 if (tmpret >= 0) {
282 // successfully sent and received 300 // successfully sent and received
283 ret = status; // this is already a dlm_status 301 ret = status; // this is already a dlm_status
302 if (ret == DLM_REJECTED) {
303 mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres "
304 "no longer owned by %u. that node is coming back "
305 "up currently.\n", dlm->name, create.namelen,
306 create.name, res->owner);
307 dlm_print_one_lock_resource(res);
308 BUG();
309 }
284 } else { 310 } else {
285 mlog_errno(tmpret); 311 mlog_errno(tmpret);
286 if (dlm_is_host_down(tmpret)) { 312 if (dlm_is_host_down(tmpret)) {
@@ -382,13 +408,13 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
382 struct dlm_lock *lock; 408 struct dlm_lock *lock;
383 int kernel_allocated = 0; 409 int kernel_allocated = 0;
384 410
385 lock = kcalloc(1, sizeof(*lock), GFP_KERNEL); 411 lock = kcalloc(1, sizeof(*lock), GFP_NOFS);
386 if (!lock) 412 if (!lock)
387 return NULL; 413 return NULL;
388 414
389 if (!lksb) { 415 if (!lksb) {
390 /* zero memory only if kernel-allocated */ 416 /* zero memory only if kernel-allocated */
391 lksb = kcalloc(1, sizeof(*lksb), GFP_KERNEL); 417 lksb = kcalloc(1, sizeof(*lksb), GFP_NOFS);
392 if (!lksb) { 418 if (!lksb) {
393 kfree(lock); 419 kfree(lock);
394 return NULL; 420 return NULL;
@@ -429,11 +455,16 @@ int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data)
429 if (!dlm_grab(dlm)) 455 if (!dlm_grab(dlm))
430 return DLM_REJECTED; 456 return DLM_REJECTED;
431 457
432 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
433 "Domain %s not fully joined!\n", dlm->name);
434
435 name = create->name; 458 name = create->name;
436 namelen = create->namelen; 459 namelen = create->namelen;
460 status = DLM_REJECTED;
461 if (!dlm_domain_fully_joined(dlm)) {
462 mlog(ML_ERROR, "Domain %s not fully joined, but node %u is "
463 "sending a create_lock message for lock %.*s!\n",
464 dlm->name, create->node_idx, namelen, name);
465 dlm_error(status);
466 goto leave;
467 }
437 468
438 status = DLM_IVBUFLEN; 469 status = DLM_IVBUFLEN;
439 if (namelen > DLM_LOCKID_NAME_MAX) { 470 if (namelen > DLM_LOCKID_NAME_MAX) {
@@ -669,18 +700,22 @@ retry_lock:
669 msleep(100); 700 msleep(100);
670 /* no waiting for dlm_reco_thread */ 701 /* no waiting for dlm_reco_thread */
671 if (recovery) { 702 if (recovery) {
672 if (status == DLM_RECOVERING) { 703 if (status != DLM_RECOVERING)
673 mlog(0, "%s: got RECOVERING " 704 goto retry_lock;
674 "for $REOCVERY lock, master " 705
675 "was %u\n", dlm->name, 706 mlog(0, "%s: got RECOVERING "
676 res->owner); 707 "for $RECOVERY lock, master "
677 dlm_wait_for_node_death(dlm, res->owner, 708 "was %u\n", dlm->name,
678 DLM_NODE_DEATH_WAIT_MAX); 709 res->owner);
679 } 710 /* wait to see the node go down, then
711 * drop down and allow the lockres to
712 * get cleaned up. need to remaster. */
713 dlm_wait_for_node_death(dlm, res->owner,
714 DLM_NODE_DEATH_WAIT_MAX);
680 } else { 715 } else {
681 dlm_wait_for_recovery(dlm); 716 dlm_wait_for_recovery(dlm);
717 goto retry_lock;
682 } 718 }
683 goto retry_lock;
684 } 719 }
685 720
686 if (status != DLM_NORMAL) { 721 if (status != DLM_NORMAL) {
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 940be4c13b..1b8346dd05 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -47,7 +47,6 @@
47 47
48#include "dlmapi.h" 48#include "dlmapi.h"
49#include "dlmcommon.h" 49#include "dlmcommon.h"
50#include "dlmdebug.h"
51#include "dlmdomain.h" 50#include "dlmdomain.h"
52 51
53#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER) 52#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
@@ -74,6 +73,7 @@ struct dlm_master_list_entry
74 wait_queue_head_t wq; 73 wait_queue_head_t wq;
75 atomic_t woken; 74 atomic_t woken;
76 struct kref mle_refs; 75 struct kref mle_refs;
76 int inuse;
77 unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 77 unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
78 unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 78 unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
79 unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; 79 unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
@@ -127,18 +127,30 @@ static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
127 return 1; 127 return 1;
128} 128}
129 129
130#if 0 130#define dlm_print_nodemap(m) _dlm_print_nodemap(m,#m)
131/* Code here is included but defined out as it aids debugging */ 131static void _dlm_print_nodemap(unsigned long *map, const char *mapname)
132{
133 int i;
134 printk("%s=[ ", mapname);
135 for (i=0; i<O2NM_MAX_NODES; i++)
136 if (test_bit(i, map))
137 printk("%d ", i);
138 printk("]");
139}
132 140
133void dlm_print_one_mle(struct dlm_master_list_entry *mle) 141static void dlm_print_one_mle(struct dlm_master_list_entry *mle)
134{ 142{
135 int i = 0, refs; 143 int refs;
136 char *type; 144 char *type;
137 char attached; 145 char attached;
138 u8 master; 146 u8 master;
139 unsigned int namelen; 147 unsigned int namelen;
140 const char *name; 148 const char *name;
141 struct kref *k; 149 struct kref *k;
150 unsigned long *maybe = mle->maybe_map,
151 *vote = mle->vote_map,
152 *resp = mle->response_map,
153 *node = mle->node_map;
142 154
143 k = &mle->mle_refs; 155 k = &mle->mle_refs;
144 if (mle->type == DLM_MLE_BLOCK) 156 if (mle->type == DLM_MLE_BLOCK)
@@ -159,18 +171,29 @@ void dlm_print_one_mle(struct dlm_master_list_entry *mle)
159 name = mle->u.res->lockname.name; 171 name = mle->u.res->lockname.name;
160 } 172 }
161 173
162 mlog(ML_NOTICE, " #%3d: %3s %3d %3u %3u %c (%d)%.*s\n", 174 mlog(ML_NOTICE, "%.*s: %3s refs=%3d mas=%3u new=%3u evt=%c inuse=%d ",
163 i, type, refs, master, mle->new_master, attached, 175 namelen, name, type, refs, master, mle->new_master, attached,
164 namelen, namelen, name); 176 mle->inuse);
177 dlm_print_nodemap(maybe);
178 printk(", ");
179 dlm_print_nodemap(vote);
180 printk(", ");
181 dlm_print_nodemap(resp);
182 printk(", ");
183 dlm_print_nodemap(node);
184 printk(", ");
185 printk("\n");
165} 186}
166 187
188#if 0
189/* Code here is included but defined out as it aids debugging */
190
167static void dlm_dump_mles(struct dlm_ctxt *dlm) 191static void dlm_dump_mles(struct dlm_ctxt *dlm)
168{ 192{
169 struct dlm_master_list_entry *mle; 193 struct dlm_master_list_entry *mle;
170 struct list_head *iter; 194 struct list_head *iter;
171 195
172 mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name); 196 mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name);
173 mlog(ML_NOTICE, " ####: type refs owner new events? lockname nodemap votemap respmap maybemap\n");
174 spin_lock(&dlm->master_lock); 197 spin_lock(&dlm->master_lock);
175 list_for_each(iter, &dlm->master_list) { 198 list_for_each(iter, &dlm->master_list) {
176 mle = list_entry(iter, struct dlm_master_list_entry, list); 199 mle = list_entry(iter, struct dlm_master_list_entry, list);
@@ -314,6 +337,31 @@ static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
314 spin_unlock(&dlm->spinlock); 337 spin_unlock(&dlm->spinlock);
315} 338}
316 339
340static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
341{
342 struct dlm_ctxt *dlm;
343 dlm = mle->dlm;
344
345 assert_spin_locked(&dlm->spinlock);
346 assert_spin_locked(&dlm->master_lock);
347 mle->inuse++;
348 kref_get(&mle->mle_refs);
349}
350
351static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
352{
353 struct dlm_ctxt *dlm;
354 dlm = mle->dlm;
355
356 spin_lock(&dlm->spinlock);
357 spin_lock(&dlm->master_lock);
358 mle->inuse--;
359 __dlm_put_mle(mle);
360 spin_unlock(&dlm->master_lock);
361 spin_unlock(&dlm->spinlock);
362
363}
364
317/* remove from list and free */ 365/* remove from list and free */
318static void __dlm_put_mle(struct dlm_master_list_entry *mle) 366static void __dlm_put_mle(struct dlm_master_list_entry *mle)
319{ 367{
@@ -322,9 +370,14 @@ static void __dlm_put_mle(struct dlm_master_list_entry *mle)
322 370
323 assert_spin_locked(&dlm->spinlock); 371 assert_spin_locked(&dlm->spinlock);
324 assert_spin_locked(&dlm->master_lock); 372 assert_spin_locked(&dlm->master_lock);
325 BUG_ON(!atomic_read(&mle->mle_refs.refcount)); 373 if (!atomic_read(&mle->mle_refs.refcount)) {
326 374 /* this may or may not crash, but who cares.
327 kref_put(&mle->mle_refs, dlm_mle_release); 375 * it's a BUG. */
376 mlog(ML_ERROR, "bad mle: %p\n", mle);
377 dlm_print_one_mle(mle);
378 BUG();
379 } else
380 kref_put(&mle->mle_refs, dlm_mle_release);
328} 381}
329 382
330 383
@@ -367,6 +420,7 @@ static void dlm_init_mle(struct dlm_master_list_entry *mle,
367 memset(mle->response_map, 0, sizeof(mle->response_map)); 420 memset(mle->response_map, 0, sizeof(mle->response_map));
368 mle->master = O2NM_MAX_NODES; 421 mle->master = O2NM_MAX_NODES;
369 mle->new_master = O2NM_MAX_NODES; 422 mle->new_master = O2NM_MAX_NODES;
423 mle->inuse = 0;
370 424
371 if (mle->type == DLM_MLE_MASTER) { 425 if (mle->type == DLM_MLE_MASTER) {
372 BUG_ON(!res); 426 BUG_ON(!res);
@@ -564,6 +618,28 @@ static void dlm_lockres_release(struct kref *kref)
564 mlog(0, "destroying lockres %.*s\n", res->lockname.len, 618 mlog(0, "destroying lockres %.*s\n", res->lockname.len,
565 res->lockname.name); 619 res->lockname.name);
566 620
621 if (!hlist_unhashed(&res->hash_node) ||
622 !list_empty(&res->granted) ||
623 !list_empty(&res->converting) ||
624 !list_empty(&res->blocked) ||
625 !list_empty(&res->dirty) ||
626 !list_empty(&res->recovering) ||
627 !list_empty(&res->purge)) {
628 mlog(ML_ERROR,
629 "Going to BUG for resource %.*s."
630 " We're on a list! [%c%c%c%c%c%c%c]\n",
631 res->lockname.len, res->lockname.name,
632 !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
633 !list_empty(&res->granted) ? 'G' : ' ',
634 !list_empty(&res->converting) ? 'C' : ' ',
635 !list_empty(&res->blocked) ? 'B' : ' ',
636 !list_empty(&res->dirty) ? 'D' : ' ',
637 !list_empty(&res->recovering) ? 'R' : ' ',
638 !list_empty(&res->purge) ? 'P' : ' ');
639
640 dlm_print_one_lock_resource(res);
641 }
642
567 /* By the time we're ready to blow this guy away, we shouldn't 643 /* By the time we're ready to blow this guy away, we shouldn't
568 * be on any lists. */ 644 * be on any lists. */
569 BUG_ON(!hlist_unhashed(&res->hash_node)); 645 BUG_ON(!hlist_unhashed(&res->hash_node));
@@ -579,11 +655,6 @@ static void dlm_lockres_release(struct kref *kref)
579 kfree(res); 655 kfree(res);
580} 656}
581 657
582void dlm_lockres_get(struct dlm_lock_resource *res)
583{
584 kref_get(&res->refs);
585}
586
587void dlm_lockres_put(struct dlm_lock_resource *res) 658void dlm_lockres_put(struct dlm_lock_resource *res)
588{ 659{
589 kref_put(&res->refs, dlm_lockres_release); 660 kref_put(&res->refs, dlm_lockres_release);
@@ -603,7 +674,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
603 memcpy(qname, name, namelen); 674 memcpy(qname, name, namelen);
604 675
605 res->lockname.len = namelen; 676 res->lockname.len = namelen;
606 res->lockname.hash = full_name_hash(name, namelen); 677 res->lockname.hash = dlm_lockid_hash(name, namelen);
607 678
608 init_waitqueue_head(&res->wq); 679 init_waitqueue_head(&res->wq);
609 spin_lock_init(&res->spinlock); 680 spin_lock_init(&res->spinlock);
@@ -637,11 +708,11 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
637{ 708{
638 struct dlm_lock_resource *res; 709 struct dlm_lock_resource *res;
639 710
640 res = kmalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL); 711 res = kmalloc(sizeof(struct dlm_lock_resource), GFP_NOFS);
641 if (!res) 712 if (!res)
642 return NULL; 713 return NULL;
643 714
644 res->lockname.name = kmalloc(namelen, GFP_KERNEL); 715 res->lockname.name = kmalloc(namelen, GFP_NOFS);
645 if (!res->lockname.name) { 716 if (!res->lockname.name) {
646 kfree(res); 717 kfree(res);
647 return NULL; 718 return NULL;
@@ -677,19 +748,20 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
677 int blocked = 0; 748 int blocked = 0;
678 int ret, nodenum; 749 int ret, nodenum;
679 struct dlm_node_iter iter; 750 struct dlm_node_iter iter;
680 unsigned int namelen; 751 unsigned int namelen, hash;
681 int tries = 0; 752 int tries = 0;
682 int bit, wait_on_recovery = 0; 753 int bit, wait_on_recovery = 0;
683 754
684 BUG_ON(!lockid); 755 BUG_ON(!lockid);
685 756
686 namelen = strlen(lockid); 757 namelen = strlen(lockid);
758 hash = dlm_lockid_hash(lockid, namelen);
687 759
688 mlog(0, "get lockres %s (len %d)\n", lockid, namelen); 760 mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
689 761
690lookup: 762lookup:
691 spin_lock(&dlm->spinlock); 763 spin_lock(&dlm->spinlock);
692 tmpres = __dlm_lookup_lockres(dlm, lockid, namelen); 764 tmpres = __dlm_lookup_lockres(dlm, lockid, namelen, hash);
693 if (tmpres) { 765 if (tmpres) {
694 spin_unlock(&dlm->spinlock); 766 spin_unlock(&dlm->spinlock);
695 mlog(0, "found in hash!\n"); 767 mlog(0, "found in hash!\n");
@@ -704,7 +776,7 @@ lookup:
704 mlog(0, "allocating a new resource\n"); 776 mlog(0, "allocating a new resource\n");
705 /* nothing found and we need to allocate one. */ 777 /* nothing found and we need to allocate one. */
706 alloc_mle = (struct dlm_master_list_entry *) 778 alloc_mle = (struct dlm_master_list_entry *)
707 kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL); 779 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
708 if (!alloc_mle) 780 if (!alloc_mle)
709 goto leave; 781 goto leave;
710 res = dlm_new_lockres(dlm, lockid, namelen); 782 res = dlm_new_lockres(dlm, lockid, namelen);
@@ -790,10 +862,11 @@ lookup:
790 * if so, the creator of the BLOCK may try to put the last 862 * if so, the creator of the BLOCK may try to put the last
791 * ref at this time in the assert master handler, so we 863 * ref at this time in the assert master handler, so we
792 * need an extra one to keep from a bad ptr deref. */ 864 * need an extra one to keep from a bad ptr deref. */
793 dlm_get_mle(mle); 865 dlm_get_mle_inuse(mle);
794 spin_unlock(&dlm->master_lock); 866 spin_unlock(&dlm->master_lock);
795 spin_unlock(&dlm->spinlock); 867 spin_unlock(&dlm->spinlock);
796 868
869redo_request:
797 while (wait_on_recovery) { 870 while (wait_on_recovery) {
798 /* any cluster changes that occurred after dropping the 871 /* any cluster changes that occurred after dropping the
799 * dlm spinlock would be detectable be a change on the mle, 872 * dlm spinlock would be detectable be a change on the mle,
@@ -812,7 +885,7 @@ lookup:
812 } 885 }
813 886
814 dlm_kick_recovery_thread(dlm); 887 dlm_kick_recovery_thread(dlm);
815 msleep(100); 888 msleep(1000);
816 dlm_wait_for_recovery(dlm); 889 dlm_wait_for_recovery(dlm);
817 890
818 spin_lock(&dlm->spinlock); 891 spin_lock(&dlm->spinlock);
@@ -825,13 +898,15 @@ lookup:
825 } else 898 } else
826 wait_on_recovery = 0; 899 wait_on_recovery = 0;
827 spin_unlock(&dlm->spinlock); 900 spin_unlock(&dlm->spinlock);
901
902 if (wait_on_recovery)
903 dlm_wait_for_node_recovery(dlm, bit, 10000);
828 } 904 }
829 905
830 /* must wait for lock to be mastered elsewhere */ 906 /* must wait for lock to be mastered elsewhere */
831 if (blocked) 907 if (blocked)
832 goto wait; 908 goto wait;
833 909
834redo_request:
835 ret = -EINVAL; 910 ret = -EINVAL;
836 dlm_node_iter_init(mle->vote_map, &iter); 911 dlm_node_iter_init(mle->vote_map, &iter);
837 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 912 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
@@ -856,6 +931,7 @@ wait:
856 /* keep going until the response map includes all nodes */ 931 /* keep going until the response map includes all nodes */
857 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); 932 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
858 if (ret < 0) { 933 if (ret < 0) {
934 wait_on_recovery = 1;
859 mlog(0, "%s:%.*s: node map changed, redo the " 935 mlog(0, "%s:%.*s: node map changed, redo the "
860 "master request now, blocked=%d\n", 936 "master request now, blocked=%d\n",
861 dlm->name, res->lockname.len, 937 dlm->name, res->lockname.len,
@@ -866,7 +942,7 @@ wait:
866 dlm->name, res->lockname.len, 942 dlm->name, res->lockname.len,
867 res->lockname.name, blocked); 943 res->lockname.name, blocked);
868 dlm_print_one_lock_resource(res); 944 dlm_print_one_lock_resource(res);
869 /* dlm_print_one_mle(mle); */ 945 dlm_print_one_mle(mle);
870 tries = 0; 946 tries = 0;
871 } 947 }
872 goto redo_request; 948 goto redo_request;
@@ -880,7 +956,7 @@ wait:
880 dlm_mle_detach_hb_events(dlm, mle); 956 dlm_mle_detach_hb_events(dlm, mle);
881 dlm_put_mle(mle); 957 dlm_put_mle(mle);
882 /* put the extra ref */ 958 /* put the extra ref */
883 dlm_put_mle(mle); 959 dlm_put_mle_inuse(mle);
884 960
885wake_waiters: 961wake_waiters:
886 spin_lock(&res->spinlock); 962 spin_lock(&res->spinlock);
@@ -921,12 +997,14 @@ recheck:
921 spin_unlock(&res->spinlock); 997 spin_unlock(&res->spinlock);
922 /* this will cause the master to re-assert across 998 /* this will cause the master to re-assert across
923 * the whole cluster, freeing up mles */ 999 * the whole cluster, freeing up mles */
924 ret = dlm_do_master_request(mle, res->owner); 1000 if (res->owner != dlm->node_num) {
925 if (ret < 0) { 1001 ret = dlm_do_master_request(mle, res->owner);
926 /* give recovery a chance to run */ 1002 if (ret < 0) {
927 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); 1003 /* give recovery a chance to run */
928 msleep(500); 1004 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
929 goto recheck; 1005 msleep(500);
1006 goto recheck;
1007 }
930 } 1008 }
931 ret = 0; 1009 ret = 0;
932 goto leave; 1010 goto leave;
@@ -962,6 +1040,12 @@ recheck:
962 "rechecking now\n", dlm->name, res->lockname.len, 1040 "rechecking now\n", dlm->name, res->lockname.len,
963 res->lockname.name); 1041 res->lockname.name);
964 goto recheck; 1042 goto recheck;
1043 } else {
1044 if (!voting_done) {
1045 mlog(0, "map not changed and voting not done "
1046 "for %s:%.*s\n", dlm->name, res->lockname.len,
1047 res->lockname.name);
1048 }
965 } 1049 }
966 1050
967 if (m != O2NM_MAX_NODES) { 1051 if (m != O2NM_MAX_NODES) {
@@ -1129,18 +1213,6 @@ static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1129 set_bit(node, mle->vote_map); 1213 set_bit(node, mle->vote_map);
1130 } else { 1214 } else {
1131 mlog(ML_ERROR, "node down! %d\n", node); 1215 mlog(ML_ERROR, "node down! %d\n", node);
1132
1133 /* if the node wasn't involved in mastery skip it,
1134 * but clear it out from the maps so that it will
1135 * not affect mastery of this lockres */
1136 clear_bit(node, mle->response_map);
1137 clear_bit(node, mle->vote_map);
1138 if (!test_bit(node, mle->maybe_map))
1139 goto next;
1140
1141 /* if we're already blocked on lock mastery, and the
1142 * dead node wasn't the expected master, or there is
1143 * another node in the maybe_map, keep waiting */
1144 if (blocked) { 1216 if (blocked) {
1145 int lowest = find_next_bit(mle->maybe_map, 1217 int lowest = find_next_bit(mle->maybe_map,
1146 O2NM_MAX_NODES, 0); 1218 O2NM_MAX_NODES, 0);
@@ -1148,54 +1220,53 @@ static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1148 /* act like it was never there */ 1220 /* act like it was never there */
1149 clear_bit(node, mle->maybe_map); 1221 clear_bit(node, mle->maybe_map);
1150 1222
1151 if (node != lowest) 1223 if (node == lowest) {
1152 goto next; 1224 mlog(0, "expected master %u died"
1153 1225 " while this node was blocked "
1154 mlog(ML_ERROR, "expected master %u died while " 1226 "waiting on it!\n", node);
1155 "this node was blocked waiting on it!\n", 1227 lowest = find_next_bit(mle->maybe_map,
1156 node); 1228 O2NM_MAX_NODES,
1157 lowest = find_next_bit(mle->maybe_map, 1229 lowest+1);
1158 O2NM_MAX_NODES, 1230 if (lowest < O2NM_MAX_NODES) {
1159 lowest+1); 1231 mlog(0, "%s:%.*s:still "
1160 if (lowest < O2NM_MAX_NODES) { 1232 "blocked. waiting on %u "
1161 mlog(0, "still blocked. waiting " 1233 "now\n", dlm->name,
1162 "on %u now\n", lowest); 1234 res->lockname.len,
1163 goto next; 1235 res->lockname.name,
1236 lowest);
1237 } else {
1238 /* mle is an MLE_BLOCK, but
1239 * there is now nothing left to
1240 * block on. we need to return
1241 * all the way back out and try
1242 * again with an MLE_MASTER.
1243 * dlm_do_local_recovery_cleanup
1244 * has already run, so the mle
1245 * refcount is ok */
1246 mlog(0, "%s:%.*s: no "
1247 "longer blocking. try to "
1248 "master this here\n",
1249 dlm->name,
1250 res->lockname.len,
1251 res->lockname.name);
1252 mle->type = DLM_MLE_MASTER;
1253 mle->u.res = res;
1254 }
1164 } 1255 }
1165
1166 /* mle is an MLE_BLOCK, but there is now
1167 * nothing left to block on. we need to return
1168 * all the way back out and try again with
1169 * an MLE_MASTER. dlm_do_local_recovery_cleanup
1170 * has already run, so the mle refcount is ok */
1171 mlog(0, "no longer blocking. we can "
1172 "try to master this here\n");
1173 mle->type = DLM_MLE_MASTER;
1174 memset(mle->maybe_map, 0,
1175 sizeof(mle->maybe_map));
1176 memset(mle->response_map, 0,
1177 sizeof(mle->maybe_map));
1178 memcpy(mle->vote_map, mle->node_map,
1179 sizeof(mle->node_map));
1180 mle->u.res = res;
1181 set_bit(dlm->node_num, mle->maybe_map);
1182
1183 ret = -EAGAIN;
1184 goto next;
1185 } 1256 }
1186 1257
1187 clear_bit(node, mle->maybe_map); 1258 /* now blank out everything, as if we had never
1188 if (node > dlm->node_num) 1259 * contacted anyone */
1189 goto next; 1260 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1190 1261 memset(mle->response_map, 0, sizeof(mle->response_map));
1191 mlog(0, "dead node in map!\n"); 1262 /* reset the vote_map to the current node_map */
1192 /* yuck. go back and re-contact all nodes 1263 memcpy(mle->vote_map, mle->node_map,
1193 * in the vote_map, removing this node. */ 1264 sizeof(mle->node_map));
1194 memset(mle->response_map, 0, 1265 /* put myself into the maybe map */
1195 sizeof(mle->response_map)); 1266 if (mle->type != DLM_MLE_BLOCK)
1267 set_bit(dlm->node_num, mle->maybe_map);
1196 } 1268 }
1197 ret = -EAGAIN; 1269 ret = -EAGAIN;
1198next:
1199 node = dlm_bitmap_diff_iter_next(&bdi, &sc); 1270 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1200 } 1271 }
1201 return ret; 1272 return ret;
@@ -1316,7 +1387,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data)
1316 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; 1387 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1317 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; 1388 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1318 char *name; 1389 char *name;
1319 unsigned int namelen; 1390 unsigned int namelen, hash;
1320 int found, ret; 1391 int found, ret;
1321 int set_maybe; 1392 int set_maybe;
1322 int dispatch_assert = 0; 1393 int dispatch_assert = 0;
@@ -1331,6 +1402,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data)
1331 1402
1332 name = request->name; 1403 name = request->name;
1333 namelen = request->namelen; 1404 namelen = request->namelen;
1405 hash = dlm_lockid_hash(name, namelen);
1334 1406
1335 if (namelen > DLM_LOCKID_NAME_MAX) { 1407 if (namelen > DLM_LOCKID_NAME_MAX) {
1336 response = DLM_IVBUFLEN; 1408 response = DLM_IVBUFLEN;
@@ -1339,7 +1411,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data)
1339 1411
1340way_up_top: 1412way_up_top:
1341 spin_lock(&dlm->spinlock); 1413 spin_lock(&dlm->spinlock);
1342 res = __dlm_lookup_lockres(dlm, name, namelen); 1414 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1343 if (res) { 1415 if (res) {
1344 spin_unlock(&dlm->spinlock); 1416 spin_unlock(&dlm->spinlock);
1345 1417
@@ -1459,21 +1531,18 @@ way_up_top:
1459 spin_unlock(&dlm->spinlock); 1531 spin_unlock(&dlm->spinlock);
1460 1532
1461 mle = (struct dlm_master_list_entry *) 1533 mle = (struct dlm_master_list_entry *)
1462 kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL); 1534 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1463 if (!mle) { 1535 if (!mle) {
1464 response = DLM_MASTER_RESP_ERROR; 1536 response = DLM_MASTER_RESP_ERROR;
1465 mlog_errno(-ENOMEM); 1537 mlog_errno(-ENOMEM);
1466 goto send_response; 1538 goto send_response;
1467 } 1539 }
1468 spin_lock(&dlm->spinlock);
1469 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL,
1470 name, namelen);
1471 spin_unlock(&dlm->spinlock);
1472 goto way_up_top; 1540 goto way_up_top;
1473 } 1541 }
1474 1542
1475 // mlog(0, "this is second time thru, already allocated, " 1543 // mlog(0, "this is second time thru, already allocated, "
1476 // "add the block.\n"); 1544 // "add the block.\n");
1545 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1477 set_bit(request->node_idx, mle->maybe_map); 1546 set_bit(request->node_idx, mle->maybe_map);
1478 list_add(&mle->list, &dlm->master_list); 1547 list_add(&mle->list, &dlm->master_list);
1479 response = DLM_MASTER_RESP_NO; 1548 response = DLM_MASTER_RESP_NO;
@@ -1556,6 +1625,8 @@ again:
1556 dlm_node_iter_init(nodemap, &iter); 1625 dlm_node_iter_init(nodemap, &iter);
1557 while ((to = dlm_node_iter_next(&iter)) >= 0) { 1626 while ((to = dlm_node_iter_next(&iter)) >= 0) {
1558 int r = 0; 1627 int r = 0;
1628 struct dlm_master_list_entry *mle = NULL;
1629
1559 mlog(0, "sending assert master to %d (%.*s)\n", to, 1630 mlog(0, "sending assert master to %d (%.*s)\n", to,
1560 namelen, lockname); 1631 namelen, lockname);
1561 memset(&assert, 0, sizeof(assert)); 1632 memset(&assert, 0, sizeof(assert));
@@ -1567,20 +1638,28 @@ again:
1567 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, 1638 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1568 &assert, sizeof(assert), to, &r); 1639 &assert, sizeof(assert), to, &r);
1569 if (tmpret < 0) { 1640 if (tmpret < 0) {
1570 mlog(ML_ERROR, "assert_master returned %d!\n", tmpret); 1641 mlog(0, "assert_master returned %d!\n", tmpret);
1571 if (!dlm_is_host_down(tmpret)) { 1642 if (!dlm_is_host_down(tmpret)) {
1572 mlog(ML_ERROR, "unhandled error!\n"); 1643 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1573 BUG(); 1644 BUG();
1574 } 1645 }
1575 /* a node died. finish out the rest of the nodes. */ 1646 /* a node died. finish out the rest of the nodes. */
1576 mlog(ML_ERROR, "link to %d went down!\n", to); 1647 mlog(0, "link to %d went down!\n", to);
1577 /* any nonzero status return will do */ 1648 /* any nonzero status return will do */
1578 ret = tmpret; 1649 ret = tmpret;
1579 } else if (r < 0) { 1650 } else if (r < 0) {
1580 /* ok, something horribly messed. kill thyself. */ 1651 /* ok, something horribly messed. kill thyself. */
1581 mlog(ML_ERROR,"during assert master of %.*s to %u, " 1652 mlog(ML_ERROR,"during assert master of %.*s to %u, "
1582 "got %d.\n", namelen, lockname, to, r); 1653 "got %d.\n", namelen, lockname, to, r);
1583 dlm_dump_lock_resources(dlm); 1654 spin_lock(&dlm->spinlock);
1655 spin_lock(&dlm->master_lock);
1656 if (dlm_find_mle(dlm, &mle, (char *)lockname,
1657 namelen)) {
1658 dlm_print_one_mle(mle);
1659 __dlm_put_mle(mle);
1660 }
1661 spin_unlock(&dlm->master_lock);
1662 spin_unlock(&dlm->spinlock);
1584 BUG(); 1663 BUG();
1585 } else if (r == EAGAIN) { 1664 } else if (r == EAGAIN) {
1586 mlog(0, "%.*s: node %u create mles on other " 1665 mlog(0, "%.*s: node %u create mles on other "
@@ -1612,7 +1691,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
1612 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; 1691 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1613 struct dlm_lock_resource *res = NULL; 1692 struct dlm_lock_resource *res = NULL;
1614 char *name; 1693 char *name;
1615 unsigned int namelen; 1694 unsigned int namelen, hash;
1616 u32 flags; 1695 u32 flags;
1617 int master_request = 0; 1696 int master_request = 0;
1618 int ret = 0; 1697 int ret = 0;
@@ -1622,6 +1701,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
1622 1701
1623 name = assert->name; 1702 name = assert->name;
1624 namelen = assert->namelen; 1703 namelen = assert->namelen;
1704 hash = dlm_lockid_hash(name, namelen);
1625 flags = be32_to_cpu(assert->flags); 1705 flags = be32_to_cpu(assert->flags);
1626 1706
1627 if (namelen > DLM_LOCKID_NAME_MAX) { 1707 if (namelen > DLM_LOCKID_NAME_MAX) {
@@ -1646,7 +1726,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
1646 if (bit >= O2NM_MAX_NODES) { 1726 if (bit >= O2NM_MAX_NODES) {
1647 /* not necessarily an error, though less likely. 1727 /* not necessarily an error, though less likely.
1648 * could be master just re-asserting. */ 1728 * could be master just re-asserting. */
1649 mlog(ML_ERROR, "no bits set in the maybe_map, but %u " 1729 mlog(0, "no bits set in the maybe_map, but %u "
1650 "is asserting! (%.*s)\n", assert->node_idx, 1730 "is asserting! (%.*s)\n", assert->node_idx,
1651 namelen, name); 1731 namelen, name);
1652 } else if (bit != assert->node_idx) { 1732 } else if (bit != assert->node_idx) {
@@ -1658,19 +1738,36 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
1658 * number winning the mastery will respond 1738 * number winning the mastery will respond
1659 * YES to mastery requests, but this node 1739 * YES to mastery requests, but this node
1660 * had no way of knowing. let it pass. */ 1740 * had no way of knowing. let it pass. */
1661 mlog(ML_ERROR, "%u is the lowest node, " 1741 mlog(0, "%u is the lowest node, "
1662 "%u is asserting. (%.*s) %u must " 1742 "%u is asserting. (%.*s) %u must "
1663 "have begun after %u won.\n", bit, 1743 "have begun after %u won.\n", bit,
1664 assert->node_idx, namelen, name, bit, 1744 assert->node_idx, namelen, name, bit,
1665 assert->node_idx); 1745 assert->node_idx);
1666 } 1746 }
1667 } 1747 }
1748 if (mle->type == DLM_MLE_MIGRATION) {
1749 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1750 mlog(0, "%s:%.*s: got cleanup assert"
1751 " from %u for migration\n",
1752 dlm->name, namelen, name,
1753 assert->node_idx);
1754 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1755 mlog(0, "%s:%.*s: got unrelated assert"
1756 " from %u for migration, ignoring\n",
1757 dlm->name, namelen, name,
1758 assert->node_idx);
1759 __dlm_put_mle(mle);
1760 spin_unlock(&dlm->master_lock);
1761 spin_unlock(&dlm->spinlock);
1762 goto done;
1763 }
1764 }
1668 } 1765 }
1669 spin_unlock(&dlm->master_lock); 1766 spin_unlock(&dlm->master_lock);
1670 1767
1671 /* ok everything checks out with the MLE 1768 /* ok everything checks out with the MLE
1672 * now check to see if there is a lockres */ 1769 * now check to see if there is a lockres */
1673 res = __dlm_lookup_lockres(dlm, name, namelen); 1770 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1674 if (res) { 1771 if (res) {
1675 spin_lock(&res->spinlock); 1772 spin_lock(&res->spinlock);
1676 if (res->state & DLM_LOCK_RES_RECOVERING) { 1773 if (res->state & DLM_LOCK_RES_RECOVERING) {
@@ -1679,7 +1776,8 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
1679 goto kill; 1776 goto kill;
1680 } 1777 }
1681 if (!mle) { 1778 if (!mle) {
1682 if (res->owner != assert->node_idx) { 1779 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1780 res->owner != assert->node_idx) {
1683 mlog(ML_ERROR, "assert_master from " 1781 mlog(ML_ERROR, "assert_master from "
1684 "%u, but current owner is " 1782 "%u, but current owner is "
1685 "%u! (%.*s)\n", 1783 "%u! (%.*s)\n",
@@ -1732,6 +1830,7 @@ ok:
1732 if (mle) { 1830 if (mle) {
1733 int extra_ref = 0; 1831 int extra_ref = 0;
1734 int nn = -1; 1832 int nn = -1;
1833 int rr, err = 0;
1735 1834
1736 spin_lock(&mle->spinlock); 1835 spin_lock(&mle->spinlock);
1737 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) 1836 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
@@ -1751,27 +1850,64 @@ ok:
1751 wake_up(&mle->wq); 1850 wake_up(&mle->wq);
1752 spin_unlock(&mle->spinlock); 1851 spin_unlock(&mle->spinlock);
1753 1852
1754 if (mle->type == DLM_MLE_MIGRATION && res) { 1853 if (res) {
1755 mlog(0, "finishing off migration of lockres %.*s, "
1756 "from %u to %u\n",
1757 res->lockname.len, res->lockname.name,
1758 dlm->node_num, mle->new_master);
1759 spin_lock(&res->spinlock); 1854 spin_lock(&res->spinlock);
1760 res->state &= ~DLM_LOCK_RES_MIGRATING; 1855 if (mle->type == DLM_MLE_MIGRATION) {
1761 dlm_change_lockres_owner(dlm, res, mle->new_master); 1856 mlog(0, "finishing off migration of lockres %.*s, "
1762 BUG_ON(res->state & DLM_LOCK_RES_DIRTY); 1857 "from %u to %u\n",
1858 res->lockname.len, res->lockname.name,
1859 dlm->node_num, mle->new_master);
1860 res->state &= ~DLM_LOCK_RES_MIGRATING;
1861 dlm_change_lockres_owner(dlm, res, mle->new_master);
1862 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1863 } else {
1864 dlm_change_lockres_owner(dlm, res, mle->master);
1865 }
1763 spin_unlock(&res->spinlock); 1866 spin_unlock(&res->spinlock);
1764 } 1867 }
1765 /* master is known, detach if not already detached */ 1868
1766 dlm_mle_detach_hb_events(dlm, mle); 1869 /* master is known, detach if not already detached.
1767 dlm_put_mle(mle); 1870 * ensures that only one assert_master call will happen
1768 1871 * on this mle. */
1872 spin_lock(&dlm->spinlock);
1873 spin_lock(&dlm->master_lock);
1874
1875 rr = atomic_read(&mle->mle_refs.refcount);
1876 if (mle->inuse > 0) {
1877 if (extra_ref && rr < 3)
1878 err = 1;
1879 else if (!extra_ref && rr < 2)
1880 err = 1;
1881 } else {
1882 if (extra_ref && rr < 2)
1883 err = 1;
1884 else if (!extra_ref && rr < 1)
1885 err = 1;
1886 }
1887 if (err) {
1888 mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1889 "that will mess up this node, refs=%d, extra=%d, "
1890 "inuse=%d\n", dlm->name, namelen, name,
1891 assert->node_idx, rr, extra_ref, mle->inuse);
1892 dlm_print_one_mle(mle);
1893 }
1894 list_del_init(&mle->list);
1895 __dlm_mle_detach_hb_events(dlm, mle);
1896 __dlm_put_mle(mle);
1769 if (extra_ref) { 1897 if (extra_ref) {
1770 /* the assert master message now balances the extra 1898 /* the assert master message now balances the extra
1771 * ref given by the master / migration request message. 1899 * ref given by the master / migration request message.
1772 * if this is the last put, it will be removed 1900 * if this is the last put, it will be removed
1773 * from the list. */ 1901 * from the list. */
1774 dlm_put_mle(mle); 1902 __dlm_put_mle(mle);
1903 }
1904 spin_unlock(&dlm->master_lock);
1905 spin_unlock(&dlm->spinlock);
1906 } else if (res) {
1907 if (res->owner != assert->node_idx) {
1908 mlog(0, "assert_master from %u, but current "
1909 "owner is %u (%.*s), no mle\n", assert->node_idx,
1910 res->owner, namelen, name);
1775 } 1911 }
1776 } 1912 }
1777 1913
@@ -1788,12 +1924,12 @@ done:
1788 1924
1789kill: 1925kill:
1790 /* kill the caller! */ 1926 /* kill the caller! */
1927 mlog(ML_ERROR, "Bad message received from another node. Dumping state "
1928 "and killing the other node now! This node is OK and can continue.\n");
1929 __dlm_print_one_lock_resource(res);
1791 spin_unlock(&res->spinlock); 1930 spin_unlock(&res->spinlock);
1792 spin_unlock(&dlm->spinlock); 1931 spin_unlock(&dlm->spinlock);
1793 dlm_lockres_put(res); 1932 dlm_lockres_put(res);
1794 mlog(ML_ERROR, "Bad message received from another node. Dumping state "
1795 "and killing the other node now! This node is OK and can continue.\n");
1796 dlm_dump_lock_resources(dlm);
1797 dlm_put(dlm); 1933 dlm_put(dlm);
1798 return -EINVAL; 1934 return -EINVAL;
1799} 1935}
@@ -1803,7 +1939,7 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
1803 int ignore_higher, u8 request_from, u32 flags) 1939 int ignore_higher, u8 request_from, u32 flags)
1804{ 1940{
1805 struct dlm_work_item *item; 1941 struct dlm_work_item *item;
1806 item = kcalloc(1, sizeof(*item), GFP_KERNEL); 1942 item = kcalloc(1, sizeof(*item), GFP_NOFS);
1807 if (!item) 1943 if (!item)
1808 return -ENOMEM; 1944 return -ENOMEM;
1809 1945
@@ -1825,7 +1961,7 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
1825 list_add_tail(&item->list, &dlm->work_list); 1961 list_add_tail(&item->list, &dlm->work_list);
1826 spin_unlock(&dlm->work_lock); 1962 spin_unlock(&dlm->work_lock);
1827 1963
1828 schedule_work(&dlm->dispatched_work); 1964 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1829 return 0; 1965 return 0;
1830} 1966}
1831 1967
@@ -1866,6 +2002,23 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
1866 } 2002 }
1867 } 2003 }
1868 2004
2005 /*
2006 * If we're migrating this lock to someone else, we are no
2007 * longer allowed to assert out own mastery. OTOH, we need to
2008 * prevent migration from starting while we're still asserting
2009 * our dominance. The reserved ast delays migration.
2010 */
2011 spin_lock(&res->spinlock);
2012 if (res->state & DLM_LOCK_RES_MIGRATING) {
2013 mlog(0, "Someone asked us to assert mastery, but we're "
2014 "in the middle of migration. Skipping assert, "
2015 "the new master will handle that.\n");
2016 spin_unlock(&res->spinlock);
2017 goto put;
2018 } else
2019 __dlm_lockres_reserve_ast(res);
2020 spin_unlock(&res->spinlock);
2021
1869 /* this call now finishes out the nodemap 2022 /* this call now finishes out the nodemap
1870 * even if one or more nodes die */ 2023 * even if one or more nodes die */
1871 mlog(0, "worker about to master %.*s here, this=%u\n", 2024 mlog(0, "worker about to master %.*s here, this=%u\n",
@@ -1875,9 +2028,14 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
1875 nodemap, flags); 2028 nodemap, flags);
1876 if (ret < 0) { 2029 if (ret < 0) {
1877 /* no need to restart, we are done */ 2030 /* no need to restart, we are done */
1878 mlog_errno(ret); 2031 if (!dlm_is_host_down(ret))
2032 mlog_errno(ret);
1879 } 2033 }
1880 2034
2035 /* Ok, we've asserted ourselves. Let's let migration start. */
2036 dlm_lockres_release_ast(dlm, res);
2037
2038put:
1881 dlm_lockres_put(res); 2039 dlm_lockres_put(res);
1882 2040
1883 mlog(0, "finished with dlm_assert_master_worker\n"); 2041 mlog(0, "finished with dlm_assert_master_worker\n");
@@ -1916,6 +2074,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
1916 BUG(); 2074 BUG();
1917 /* host is down, so answer for that node would be 2075 /* host is down, so answer for that node would be
1918 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ 2076 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
2077 ret = 0;
1919 } 2078 }
1920 2079
1921 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) { 2080 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
@@ -2016,14 +2175,14 @@ int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
2016 */ 2175 */
2017 2176
2018 ret = -ENOMEM; 2177 ret = -ENOMEM;
2019 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_KERNEL); 2178 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2020 if (!mres) { 2179 if (!mres) {
2021 mlog_errno(ret); 2180 mlog_errno(ret);
2022 goto leave; 2181 goto leave;
2023 } 2182 }
2024 2183
2025 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache, 2184 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
2026 GFP_KERNEL); 2185 GFP_NOFS);
2027 if (!mle) { 2186 if (!mle) {
2028 mlog_errno(ret); 2187 mlog_errno(ret);
2029 goto leave; 2188 goto leave;
@@ -2117,7 +2276,7 @@ fail:
2117 * take both dlm->spinlock and dlm->master_lock */ 2276 * take both dlm->spinlock and dlm->master_lock */
2118 spin_lock(&dlm->spinlock); 2277 spin_lock(&dlm->spinlock);
2119 spin_lock(&dlm->master_lock); 2278 spin_lock(&dlm->master_lock);
2120 dlm_get_mle(mle); 2279 dlm_get_mle_inuse(mle);
2121 spin_unlock(&dlm->master_lock); 2280 spin_unlock(&dlm->master_lock);
2122 spin_unlock(&dlm->spinlock); 2281 spin_unlock(&dlm->spinlock);
2123 2282
@@ -2134,7 +2293,10 @@ fail:
2134 /* migration failed, detach and clean up mle */ 2293 /* migration failed, detach and clean up mle */
2135 dlm_mle_detach_hb_events(dlm, mle); 2294 dlm_mle_detach_hb_events(dlm, mle);
2136 dlm_put_mle(mle); 2295 dlm_put_mle(mle);
2137 dlm_put_mle(mle); 2296 dlm_put_mle_inuse(mle);
2297 spin_lock(&res->spinlock);
2298 res->state &= ~DLM_LOCK_RES_MIGRATING;
2299 spin_unlock(&res->spinlock);
2138 goto leave; 2300 goto leave;
2139 } 2301 }
2140 2302
@@ -2164,8 +2326,8 @@ fail:
2164 /* avoid hang during shutdown when migrating lockres 2326 /* avoid hang during shutdown when migrating lockres
2165 * to a node which also goes down */ 2327 * to a node which also goes down */
2166 if (dlm_is_node_dead(dlm, target)) { 2328 if (dlm_is_node_dead(dlm, target)) {
2167 mlog(0, "%s:%.*s: expected migration target %u " 2329 mlog(0, "%s:%.*s: expected migration "
2168 "is no longer up. restarting.\n", 2330 "target %u is no longer up, restarting\n",
2169 dlm->name, res->lockname.len, 2331 dlm->name, res->lockname.len,
2170 res->lockname.name, target); 2332 res->lockname.name, target);
2171 ret = -ERESTARTSYS; 2333 ret = -ERESTARTSYS;
@@ -2175,7 +2337,10 @@ fail:
2175 /* migration failed, detach and clean up mle */ 2337 /* migration failed, detach and clean up mle */
2176 dlm_mle_detach_hb_events(dlm, mle); 2338 dlm_mle_detach_hb_events(dlm, mle);
2177 dlm_put_mle(mle); 2339 dlm_put_mle(mle);
2178 dlm_put_mle(mle); 2340 dlm_put_mle_inuse(mle);
2341 spin_lock(&res->spinlock);
2342 res->state &= ~DLM_LOCK_RES_MIGRATING;
2343 spin_unlock(&res->spinlock);
2179 goto leave; 2344 goto leave;
2180 } 2345 }
2181 /* TODO: if node died: stop, clean up, return error */ 2346 /* TODO: if node died: stop, clean up, return error */
@@ -2191,7 +2356,7 @@ fail:
2191 2356
2192 /* master is known, detach if not already detached */ 2357 /* master is known, detach if not already detached */
2193 dlm_mle_detach_hb_events(dlm, mle); 2358 dlm_mle_detach_hb_events(dlm, mle);
2194 dlm_put_mle(mle); 2359 dlm_put_mle_inuse(mle);
2195 ret = 0; 2360 ret = 0;
2196 2361
2197 dlm_lockres_calc_usage(dlm, res); 2362 dlm_lockres_calc_usage(dlm, res);
@@ -2462,7 +2627,7 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data)
2462 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; 2627 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
2463 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; 2628 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
2464 const char *name; 2629 const char *name;
2465 unsigned int namelen; 2630 unsigned int namelen, hash;
2466 int ret = 0; 2631 int ret = 0;
2467 2632
2468 if (!dlm_grab(dlm)) 2633 if (!dlm_grab(dlm))
@@ -2470,10 +2635,11 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data)
2470 2635
2471 name = migrate->name; 2636 name = migrate->name;
2472 namelen = migrate->namelen; 2637 namelen = migrate->namelen;
2638 hash = dlm_lockid_hash(name, namelen);
2473 2639
2474 /* preallocate.. if this fails, abort */ 2640 /* preallocate.. if this fails, abort */
2475 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache, 2641 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
2476 GFP_KERNEL); 2642 GFP_NOFS);
2477 2643
2478 if (!mle) { 2644 if (!mle) {
2479 ret = -ENOMEM; 2645 ret = -ENOMEM;
@@ -2482,7 +2648,7 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data)
2482 2648
2483 /* check for pre-existing lock */ 2649 /* check for pre-existing lock */
2484 spin_lock(&dlm->spinlock); 2650 spin_lock(&dlm->spinlock);
2485 res = __dlm_lookup_lockres(dlm, name, namelen); 2651 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
2486 spin_lock(&dlm->master_lock); 2652 spin_lock(&dlm->master_lock);
2487 2653
2488 if (res) { 2654 if (res) {
@@ -2580,6 +2746,7 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
2580 /* remove it from the list so that only one 2746 /* remove it from the list so that only one
2581 * mle will be found */ 2747 * mle will be found */
2582 list_del_init(&tmp->list); 2748 list_del_init(&tmp->list);
2749 __dlm_mle_detach_hb_events(dlm, mle);
2583 } 2750 }
2584 spin_unlock(&tmp->spinlock); 2751 spin_unlock(&tmp->spinlock);
2585 } 2752 }
@@ -2601,6 +2768,7 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
2601 struct list_head *iter, *iter2; 2768 struct list_head *iter, *iter2;
2602 struct dlm_master_list_entry *mle; 2769 struct dlm_master_list_entry *mle;
2603 struct dlm_lock_resource *res; 2770 struct dlm_lock_resource *res;
2771 unsigned int hash;
2604 2772
2605 mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node); 2773 mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
2606top: 2774top:
@@ -2640,7 +2808,7 @@ top:
2640 * may result in the mle being unlinked and 2808 * may result in the mle being unlinked and
2641 * freed, but there may still be a process 2809 * freed, but there may still be a process
2642 * waiting in the dlmlock path which is fine. */ 2810 * waiting in the dlmlock path which is fine. */
2643 mlog(ML_ERROR, "node %u was expected master\n", 2811 mlog(0, "node %u was expected master\n",
2644 dead_node); 2812 dead_node);
2645 atomic_set(&mle->woken, 1); 2813 atomic_set(&mle->woken, 1);
2646 spin_unlock(&mle->spinlock); 2814 spin_unlock(&mle->spinlock);
@@ -2673,19 +2841,21 @@ top:
2673 2841
2674 /* remove from the list early. NOTE: unlinking 2842 /* remove from the list early. NOTE: unlinking
2675 * list_head while in list_for_each_safe */ 2843 * list_head while in list_for_each_safe */
2844 __dlm_mle_detach_hb_events(dlm, mle);
2676 spin_lock(&mle->spinlock); 2845 spin_lock(&mle->spinlock);
2677 list_del_init(&mle->list); 2846 list_del_init(&mle->list);
2678 atomic_set(&mle->woken, 1); 2847 atomic_set(&mle->woken, 1);
2679 spin_unlock(&mle->spinlock); 2848 spin_unlock(&mle->spinlock);
2680 wake_up(&mle->wq); 2849 wake_up(&mle->wq);
2681 2850
2682 mlog(0, "node %u died during migration from " 2851 mlog(0, "%s: node %u died during migration from "
2683 "%u to %u!\n", dead_node, 2852 "%u to %u!\n", dlm->name, dead_node,
2684 mle->master, mle->new_master); 2853 mle->master, mle->new_master);
2685 /* if there is a lockres associated with this 2854 /* if there is a lockres associated with this
2686 * mle, find it and set its owner to UNKNOWN */ 2855 * mle, find it and set its owner to UNKNOWN */
2856 hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len);
2687 res = __dlm_lookup_lockres(dlm, mle->u.name.name, 2857 res = __dlm_lookup_lockres(dlm, mle->u.name.name,
2688 mle->u.name.len); 2858 mle->u.name.len, hash);
2689 if (res) { 2859 if (res) {
2690 /* unfortunately if we hit this rare case, our 2860 /* unfortunately if we hit this rare case, our
2691 * lock ordering is messed. we need to drop 2861 * lock ordering is messed. we need to drop
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 805cbabac0..29b2845f37 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -98,8 +98,8 @@ static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
98 98
99static u64 dlm_get_next_mig_cookie(void); 99static u64 dlm_get_next_mig_cookie(void);
100 100
101static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED; 101static DEFINE_SPINLOCK(dlm_reco_state_lock);
102static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED; 102static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
103static u64 dlm_mig_cookie = 1; 103static u64 dlm_mig_cookie = 1;
104 104
105static u64 dlm_get_next_mig_cookie(void) 105static u64 dlm_get_next_mig_cookie(void)
@@ -115,12 +115,37 @@ static u64 dlm_get_next_mig_cookie(void)
115 return c; 115 return c;
116} 116}
117 117
118static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
119 u8 dead_node)
120{
121 assert_spin_locked(&dlm->spinlock);
122 if (dlm->reco.dead_node != dead_node)
123 mlog(0, "%s: changing dead_node from %u to %u\n",
124 dlm->name, dlm->reco.dead_node, dead_node);
125 dlm->reco.dead_node = dead_node;
126}
127
128static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
129 u8 master)
130{
131 assert_spin_locked(&dlm->spinlock);
132 mlog(0, "%s: changing new_master from %u to %u\n",
133 dlm->name, dlm->reco.new_master, master);
134 dlm->reco.new_master = master;
135}
136
137static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
138{
139 assert_spin_locked(&dlm->spinlock);
140 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
141 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
142 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
143}
144
118static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) 145static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
119{ 146{
120 spin_lock(&dlm->spinlock); 147 spin_lock(&dlm->spinlock);
121 clear_bit(dlm->reco.dead_node, dlm->recovery_map); 148 __dlm_reset_recovery(dlm);
122 dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
123 dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
124 spin_unlock(&dlm->spinlock); 149 spin_unlock(&dlm->spinlock);
125} 150}
126 151
@@ -132,12 +157,21 @@ void dlm_dispatch_work(void *data)
132 struct list_head *iter, *iter2; 157 struct list_head *iter, *iter2;
133 struct dlm_work_item *item; 158 struct dlm_work_item *item;
134 dlm_workfunc_t *workfunc; 159 dlm_workfunc_t *workfunc;
160 int tot=0;
161
162 if (!dlm_joined(dlm))
163 return;
135 164
136 spin_lock(&dlm->work_lock); 165 spin_lock(&dlm->work_lock);
137 list_splice_init(&dlm->work_list, &tmp_list); 166 list_splice_init(&dlm->work_list, &tmp_list);
138 spin_unlock(&dlm->work_lock); 167 spin_unlock(&dlm->work_lock);
139 168
140 list_for_each_safe(iter, iter2, &tmp_list) { 169 list_for_each_safe(iter, iter2, &tmp_list) {
170 tot++;
171 }
172 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
173
174 list_for_each_safe(iter, iter2, &tmp_list) {
141 item = list_entry(iter, struct dlm_work_item, list); 175 item = list_entry(iter, struct dlm_work_item, list);
142 workfunc = item->func; 176 workfunc = item->func;
143 list_del_init(&item->list); 177 list_del_init(&item->list);
@@ -220,6 +254,52 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
220 * 254 *
221 */ 255 */
222 256
257static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
258{
259 struct dlm_reco_node_data *ndata;
260 struct dlm_lock_resource *res;
261
262 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
263 dlm->name, dlm->dlm_reco_thread_task->pid,
264 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
265 dlm->reco.dead_node, dlm->reco.new_master);
266
267 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
268 char *st = "unknown";
269 switch (ndata->state) {
270 case DLM_RECO_NODE_DATA_INIT:
271 st = "init";
272 break;
273 case DLM_RECO_NODE_DATA_REQUESTING:
274 st = "requesting";
275 break;
276 case DLM_RECO_NODE_DATA_DEAD:
277 st = "dead";
278 break;
279 case DLM_RECO_NODE_DATA_RECEIVING:
280 st = "receiving";
281 break;
282 case DLM_RECO_NODE_DATA_REQUESTED:
283 st = "requested";
284 break;
285 case DLM_RECO_NODE_DATA_DONE:
286 st = "done";
287 break;
288 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
289 st = "finalize-sent";
290 break;
291 default:
292 st = "bad";
293 break;
294 }
295 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
296 dlm->name, ndata->node_num, st);
297 }
298 list_for_each_entry(res, &dlm->reco.resources, recovering) {
299 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
300 dlm->name, res->lockname.len, res->lockname.name);
301 }
302}
223 303
224#define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000) 304#define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
225 305
@@ -267,11 +347,23 @@ int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
267{ 347{
268 int dead; 348 int dead;
269 spin_lock(&dlm->spinlock); 349 spin_lock(&dlm->spinlock);
270 dead = test_bit(node, dlm->domain_map); 350 dead = !test_bit(node, dlm->domain_map);
271 spin_unlock(&dlm->spinlock); 351 spin_unlock(&dlm->spinlock);
272 return dead; 352 return dead;
273} 353}
274 354
355/* returns true if node is no longer in the domain
356 * could be dead or just not joined */
357static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
358{
359 int recovered;
360 spin_lock(&dlm->spinlock);
361 recovered = !test_bit(node, dlm->recovery_map);
362 spin_unlock(&dlm->spinlock);
363 return recovered;
364}
365
366
275int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) 367int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
276{ 368{
277 if (timeout) { 369 if (timeout) {
@@ -290,6 +382,24 @@ int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
290 return 0; 382 return 0;
291} 383}
292 384
385int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
386{
387 if (timeout) {
388 mlog(0, "%s: waiting %dms for notification of "
389 "recovery of node %u\n", dlm->name, timeout, node);
390 wait_event_timeout(dlm->dlm_reco_thread_wq,
391 dlm_is_node_recovered(dlm, node),
392 msecs_to_jiffies(timeout));
393 } else {
394 mlog(0, "%s: waiting indefinitely for notification "
395 "of recovery of node %u\n", dlm->name, node);
396 wait_event(dlm->dlm_reco_thread_wq,
397 dlm_is_node_recovered(dlm, node));
398 }
399 /* for now, return 0 */
400 return 0;
401}
402
293/* callers of the top-level api calls (dlmlock/dlmunlock) should 403/* callers of the top-level api calls (dlmlock/dlmunlock) should
294 * block on the dlm->reco.event when recovery is in progress. 404 * block on the dlm->reco.event when recovery is in progress.
295 * the dlm recovery thread will set this state when it begins 405 * the dlm recovery thread will set this state when it begins
@@ -308,6 +418,13 @@ static int dlm_in_recovery(struct dlm_ctxt *dlm)
308 418
309void dlm_wait_for_recovery(struct dlm_ctxt *dlm) 419void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
310{ 420{
421 if (dlm_in_recovery(dlm)) {
422 mlog(0, "%s: reco thread %d in recovery: "
423 "state=%d, master=%u, dead=%u\n",
424 dlm->name, dlm->dlm_reco_thread_task->pid,
425 dlm->reco.state, dlm->reco.new_master,
426 dlm->reco.dead_node);
427 }
311 wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); 428 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
312} 429}
313 430
@@ -341,7 +458,7 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
341 mlog(0, "new master %u died while recovering %u!\n", 458 mlog(0, "new master %u died while recovering %u!\n",
342 dlm->reco.new_master, dlm->reco.dead_node); 459 dlm->reco.new_master, dlm->reco.dead_node);
343 /* unset the new_master, leave dead_node */ 460 /* unset the new_master, leave dead_node */
344 dlm->reco.new_master = O2NM_INVALID_NODE_NUM; 461 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
345 } 462 }
346 463
347 /* select a target to recover */ 464 /* select a target to recover */
@@ -350,14 +467,14 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
350 467
351 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0); 468 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
352 if (bit >= O2NM_MAX_NODES || bit < 0) 469 if (bit >= O2NM_MAX_NODES || bit < 0)
353 dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; 470 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
354 else 471 else
355 dlm->reco.dead_node = bit; 472 dlm_set_reco_dead_node(dlm, bit);
356 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { 473 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
357 /* BUG? */ 474 /* BUG? */
358 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n", 475 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
359 dlm->reco.dead_node); 476 dlm->reco.dead_node);
360 dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; 477 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
361 } 478 }
362 479
363 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 480 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
@@ -366,7 +483,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
366 /* return to main thread loop and sleep. */ 483 /* return to main thread loop and sleep. */
367 return 0; 484 return 0;
368 } 485 }
369 mlog(0, "recovery thread found node %u in the recovery map!\n", 486 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
487 dlm->name, dlm->dlm_reco_thread_task->pid,
370 dlm->reco.dead_node); 488 dlm->reco.dead_node);
371 spin_unlock(&dlm->spinlock); 489 spin_unlock(&dlm->spinlock);
372 490
@@ -389,8 +507,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
389 } 507 }
390 mlog(0, "another node will master this recovery session.\n"); 508 mlog(0, "another node will master this recovery session.\n");
391 } 509 }
392 mlog(0, "dlm=%s, new_master=%u, this node=%u, dead_node=%u\n", 510 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
393 dlm->name, dlm->reco.new_master, 511 dlm->name, dlm->dlm_reco_thread_task->pid, dlm->reco.new_master,
394 dlm->node_num, dlm->reco.dead_node); 512 dlm->node_num, dlm->reco.dead_node);
395 513
396 /* it is safe to start everything back up here 514 /* it is safe to start everything back up here
@@ -402,11 +520,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
402 return 0; 520 return 0;
403 521
404master_here: 522master_here:
405 mlog(0, "mastering recovery of %s:%u here(this=%u)!\n", 523 mlog(0, "(%d) mastering recovery of %s:%u here(this=%u)!\n",
524 dlm->dlm_reco_thread_task->pid,
406 dlm->name, dlm->reco.dead_node, dlm->node_num); 525 dlm->name, dlm->reco.dead_node, dlm->node_num);
407 526
408 status = dlm_remaster_locks(dlm, dlm->reco.dead_node); 527 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
409 if (status < 0) { 528 if (status < 0) {
529 /* we should never hit this anymore */
410 mlog(ML_ERROR, "error %d remastering locks for node %u, " 530 mlog(ML_ERROR, "error %d remastering locks for node %u, "
411 "retrying.\n", status, dlm->reco.dead_node); 531 "retrying.\n", status, dlm->reco.dead_node);
412 /* yield a bit to allow any final network messages 532 /* yield a bit to allow any final network messages
@@ -433,9 +553,16 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
433 int destroy = 0; 553 int destroy = 0;
434 int pass = 0; 554 int pass = 0;
435 555
436 status = dlm_init_recovery_area(dlm, dead_node); 556 do {
437 if (status < 0) 557 /* we have become recovery master. there is no escaping
438 goto leave; 558 * this, so just keep trying until we get it. */
559 status = dlm_init_recovery_area(dlm, dead_node);
560 if (status < 0) {
561 mlog(ML_ERROR, "%s: failed to alloc recovery area, "
562 "retrying\n", dlm->name);
563 msleep(1000);
564 }
565 } while (status != 0);
439 566
440 /* safe to access the node data list without a lock, since this 567 /* safe to access the node data list without a lock, since this
441 * process is the only one to change the list */ 568 * process is the only one to change the list */
@@ -452,16 +579,36 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
452 continue; 579 continue;
453 } 580 }
454 581
455 status = dlm_request_all_locks(dlm, ndata->node_num, dead_node); 582 do {
456 if (status < 0) { 583 status = dlm_request_all_locks(dlm, ndata->node_num,
457 mlog_errno(status); 584 dead_node);
458 if (dlm_is_host_down(status)) 585 if (status < 0) {
459 ndata->state = DLM_RECO_NODE_DATA_DEAD; 586 mlog_errno(status);
460 else { 587 if (dlm_is_host_down(status)) {
461 destroy = 1; 588 /* node died, ignore it for recovery */
462 goto leave; 589 status = 0;
590 ndata->state = DLM_RECO_NODE_DATA_DEAD;
591 /* wait for the domain map to catch up
592 * with the network state. */
593 wait_event_timeout(dlm->dlm_reco_thread_wq,
594 dlm_is_node_dead(dlm,
595 ndata->node_num),
596 msecs_to_jiffies(1000));
597 mlog(0, "waited 1 sec for %u, "
598 "dead? %s\n", ndata->node_num,
599 dlm_is_node_dead(dlm, ndata->node_num) ?
600 "yes" : "no");
601 } else {
602 /* -ENOMEM on the other node */
603 mlog(0, "%s: node %u returned "
604 "%d during recovery, retrying "
605 "after a short wait\n",
606 dlm->name, ndata->node_num,
607 status);
608 msleep(100);
609 }
463 } 610 }
464 } 611 } while (status != 0);
465 612
466 switch (ndata->state) { 613 switch (ndata->state) {
467 case DLM_RECO_NODE_DATA_INIT: 614 case DLM_RECO_NODE_DATA_INIT:
@@ -473,10 +620,9 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
473 mlog(0, "node %u died after requesting " 620 mlog(0, "node %u died after requesting "
474 "recovery info for node %u\n", 621 "recovery info for node %u\n",
475 ndata->node_num, dead_node); 622 ndata->node_num, dead_node);
476 // start all over 623 /* fine. don't need this node's info.
477 destroy = 1; 624 * continue without it. */
478 status = -EAGAIN; 625 break;
479 goto leave;
480 case DLM_RECO_NODE_DATA_REQUESTING: 626 case DLM_RECO_NODE_DATA_REQUESTING:
481 ndata->state = DLM_RECO_NODE_DATA_REQUESTED; 627 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
482 mlog(0, "now receiving recovery data from " 628 mlog(0, "now receiving recovery data from "
@@ -520,35 +666,26 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
520 BUG(); 666 BUG();
521 break; 667 break;
522 case DLM_RECO_NODE_DATA_DEAD: 668 case DLM_RECO_NODE_DATA_DEAD:
523 mlog(ML_NOTICE, "node %u died after " 669 mlog(0, "node %u died after "
524 "requesting recovery info for " 670 "requesting recovery info for "
525 "node %u\n", ndata->node_num, 671 "node %u\n", ndata->node_num,
526 dead_node); 672 dead_node);
527 spin_unlock(&dlm_reco_state_lock); 673 break;
528 // start all over
529 destroy = 1;
530 status = -EAGAIN;
531 /* instead of spinning like crazy here,
532 * wait for the domain map to catch up
533 * with the network state. otherwise this
534 * can be hit hundreds of times before
535 * the node is really seen as dead. */
536 wait_event_timeout(dlm->dlm_reco_thread_wq,
537 dlm_is_node_dead(dlm,
538 ndata->node_num),
539 msecs_to_jiffies(1000));
540 mlog(0, "waited 1 sec for %u, "
541 "dead? %s\n", ndata->node_num,
542 dlm_is_node_dead(dlm, ndata->node_num) ?
543 "yes" : "no");
544 goto leave;
545 case DLM_RECO_NODE_DATA_RECEIVING: 674 case DLM_RECO_NODE_DATA_RECEIVING:
546 case DLM_RECO_NODE_DATA_REQUESTED: 675 case DLM_RECO_NODE_DATA_REQUESTED:
676 mlog(0, "%s: node %u still in state %s\n",
677 dlm->name, ndata->node_num,
678 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
679 "receiving" : "requested");
547 all_nodes_done = 0; 680 all_nodes_done = 0;
548 break; 681 break;
549 case DLM_RECO_NODE_DATA_DONE: 682 case DLM_RECO_NODE_DATA_DONE:
683 mlog(0, "%s: node %u state is done\n",
684 dlm->name, ndata->node_num);
550 break; 685 break;
551 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 686 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
687 mlog(0, "%s: node %u state is finalize\n",
688 dlm->name, ndata->node_num);
552 break; 689 break;
553 } 690 }
554 } 691 }
@@ -578,7 +715,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
578 jiffies, dlm->reco.dead_node, 715 jiffies, dlm->reco.dead_node,
579 dlm->node_num, dlm->reco.new_master); 716 dlm->node_num, dlm->reco.new_master);
580 destroy = 1; 717 destroy = 1;
581 status = ret; 718 status = 0;
582 /* rescan everything marked dirty along the way */ 719 /* rescan everything marked dirty along the way */
583 dlm_kick_thread(dlm, NULL); 720 dlm_kick_thread(dlm, NULL);
584 break; 721 break;
@@ -591,7 +728,6 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
591 728
592 } 729 }
593 730
594leave:
595 if (destroy) 731 if (destroy)
596 dlm_destroy_recovery_area(dlm, dead_node); 732 dlm_destroy_recovery_area(dlm, dead_node);
597 733
@@ -617,7 +753,7 @@ static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
617 } 753 }
618 BUG_ON(num == dead_node); 754 BUG_ON(num == dead_node);
619 755
620 ndata = kcalloc(1, sizeof(*ndata), GFP_KERNEL); 756 ndata = kcalloc(1, sizeof(*ndata), GFP_NOFS);
621 if (!ndata) { 757 if (!ndata) {
622 dlm_destroy_recovery_area(dlm, dead_node); 758 dlm_destroy_recovery_area(dlm, dead_node);
623 return -ENOMEM; 759 return -ENOMEM;
@@ -691,16 +827,25 @@ int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
691 if (!dlm_grab(dlm)) 827 if (!dlm_grab(dlm))
692 return -EINVAL; 828 return -EINVAL;
693 829
830 if (lr->dead_node != dlm->reco.dead_node) {
831 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
832 "dead_node is %u\n", dlm->name, lr->node_idx,
833 lr->dead_node, dlm->reco.dead_node);
834 dlm_print_reco_node_status(dlm);
835 /* this is a hack */
836 dlm_put(dlm);
837 return -ENOMEM;
838 }
694 BUG_ON(lr->dead_node != dlm->reco.dead_node); 839 BUG_ON(lr->dead_node != dlm->reco.dead_node);
695 840
696 item = kcalloc(1, sizeof(*item), GFP_KERNEL); 841 item = kcalloc(1, sizeof(*item), GFP_NOFS);
697 if (!item) { 842 if (!item) {
698 dlm_put(dlm); 843 dlm_put(dlm);
699 return -ENOMEM; 844 return -ENOMEM;
700 } 845 }
701 846
702 /* this will get freed by dlm_request_all_locks_worker */ 847 /* this will get freed by dlm_request_all_locks_worker */
703 buf = (char *) __get_free_page(GFP_KERNEL); 848 buf = (char *) __get_free_page(GFP_NOFS);
704 if (!buf) { 849 if (!buf) {
705 kfree(item); 850 kfree(item);
706 dlm_put(dlm); 851 dlm_put(dlm);
@@ -715,7 +860,7 @@ int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
715 spin_lock(&dlm->work_lock); 860 spin_lock(&dlm->work_lock);
716 list_add_tail(&item->list, &dlm->work_list); 861 list_add_tail(&item->list, &dlm->work_list);
717 spin_unlock(&dlm->work_lock); 862 spin_unlock(&dlm->work_lock);
718 schedule_work(&dlm->dispatched_work); 863 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
719 864
720 dlm_put(dlm); 865 dlm_put(dlm);
721 return 0; 866 return 0;
@@ -730,32 +875,34 @@ static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
730 struct list_head *iter; 875 struct list_head *iter;
731 int ret; 876 int ret;
732 u8 dead_node, reco_master; 877 u8 dead_node, reco_master;
878 int skip_all_done = 0;
733 879
734 dlm = item->dlm; 880 dlm = item->dlm;
735 dead_node = item->u.ral.dead_node; 881 dead_node = item->u.ral.dead_node;
736 reco_master = item->u.ral.reco_master; 882 reco_master = item->u.ral.reco_master;
737 mres = (struct dlm_migratable_lockres *)data; 883 mres = (struct dlm_migratable_lockres *)data;
738 884
885 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
886 dlm->name, dead_node, reco_master);
887
739 if (dead_node != dlm->reco.dead_node || 888 if (dead_node != dlm->reco.dead_node ||
740 reco_master != dlm->reco.new_master) { 889 reco_master != dlm->reco.new_master) {
741 /* show extra debug info if the recovery state is messed */ 890 /* worker could have been created before the recovery master
742 mlog(ML_ERROR, "%s: bad reco state: reco(dead=%u, master=%u), " 891 * died. if so, do not continue, but do not error. */
743 "request(dead=%u, master=%u)\n", 892 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
744 dlm->name, dlm->reco.dead_node, dlm->reco.new_master, 893 mlog(ML_NOTICE, "%s: will not send recovery state, "
745 dead_node, reco_master); 894 "recovery master %u died, thread=(dead=%u,mas=%u)"
746 mlog(ML_ERROR, "%s: name=%.*s master=%u locks=%u/%u flags=%u " 895 " current=(dead=%u,mas=%u)\n", dlm->name,
747 "entry[0]={c=%u:%llu,l=%u,f=%u,t=%d,ct=%d,hb=%d,n=%u}\n", 896 reco_master, dead_node, reco_master,
748 dlm->name, mres->lockname_len, mres->lockname, mres->master, 897 dlm->reco.dead_node, dlm->reco.new_master);
749 mres->num_locks, mres->total_locks, mres->flags, 898 } else {
750 dlm_get_lock_cookie_node(mres->ml[0].cookie), 899 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
751 dlm_get_lock_cookie_seq(mres->ml[0].cookie), 900 "master=%u), request(dead=%u, master=%u)\n",
752 mres->ml[0].list, mres->ml[0].flags, 901 dlm->name, dlm->reco.dead_node,
753 mres->ml[0].type, mres->ml[0].convert_type, 902 dlm->reco.new_master, dead_node, reco_master);
754 mres->ml[0].highest_blocked, mres->ml[0].node); 903 }
755 BUG(); 904 goto leave;
756 } 905 }
757 BUG_ON(dead_node != dlm->reco.dead_node);
758 BUG_ON(reco_master != dlm->reco.new_master);
759 906
760 /* lock resources should have already been moved to the 907 /* lock resources should have already been moved to the
761 * dlm->reco.resources list. now move items from that list 908 * dlm->reco.resources list. now move items from that list
@@ -766,12 +913,20 @@ static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
766 dlm_move_reco_locks_to_list(dlm, &resources, dead_node); 913 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
767 914
768 /* now we can begin blasting lockreses without the dlm lock */ 915 /* now we can begin blasting lockreses without the dlm lock */
916
917 /* any errors returned will be due to the new_master dying,
918 * the dlm_reco_thread should detect this */
769 list_for_each(iter, &resources) { 919 list_for_each(iter, &resources) {
770 res = list_entry (iter, struct dlm_lock_resource, recovering); 920 res = list_entry (iter, struct dlm_lock_resource, recovering);
771 ret = dlm_send_one_lockres(dlm, res, mres, reco_master, 921 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
772 DLM_MRES_RECOVERY); 922 DLM_MRES_RECOVERY);
773 if (ret < 0) 923 if (ret < 0) {
774 mlog_errno(ret); 924 mlog(ML_ERROR, "%s: node %u went down while sending "
925 "recovery state for dead node %u, ret=%d\n", dlm->name,
926 reco_master, dead_node, ret);
927 skip_all_done = 1;
928 break;
929 }
775 } 930 }
776 931
777 /* move the resources back to the list */ 932 /* move the resources back to the list */
@@ -779,10 +934,15 @@ static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
779 list_splice_init(&resources, &dlm->reco.resources); 934 list_splice_init(&resources, &dlm->reco.resources);
780 spin_unlock(&dlm->spinlock); 935 spin_unlock(&dlm->spinlock);
781 936
782 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); 937 if (!skip_all_done) {
783 if (ret < 0) 938 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
784 mlog_errno(ret); 939 if (ret < 0) {
785 940 mlog(ML_ERROR, "%s: node %u went down while sending "
941 "recovery all-done for dead node %u, ret=%d\n",
942 dlm->name, reco_master, dead_node, ret);
943 }
944 }
945leave:
786 free_page((unsigned long)data); 946 free_page((unsigned long)data);
787} 947}
788 948
@@ -801,8 +961,14 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
801 961
802 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, 962 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
803 sizeof(done_msg), send_to, &tmpret); 963 sizeof(done_msg), send_to, &tmpret);
804 /* negative status is ignored by the caller */ 964 if (ret < 0) {
805 if (ret >= 0) 965 if (!dlm_is_host_down(ret)) {
966 mlog_errno(ret);
967 mlog(ML_ERROR, "%s: unknown error sending data-done "
968 "to %u\n", dlm->name, send_to);
969 BUG();
970 }
971 } else
806 ret = tmpret; 972 ret = tmpret;
807 return ret; 973 return ret;
808} 974}
@@ -822,7 +988,11 @@ int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data)
822 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " 988 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
823 "node_idx=%u, this node=%u\n", done->dead_node, 989 "node_idx=%u, this node=%u\n", done->dead_node,
824 dlm->reco.dead_node, done->node_idx, dlm->node_num); 990 dlm->reco.dead_node, done->node_idx, dlm->node_num);
825 BUG_ON(done->dead_node != dlm->reco.dead_node); 991
992 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
993 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
994 "node_idx=%u, this node=%u\n", done->dead_node,
995 dlm->reco.dead_node, done->node_idx, dlm->node_num);
826 996
827 spin_lock(&dlm_reco_state_lock); 997 spin_lock(&dlm_reco_state_lock);
828 list_for_each(iter, &dlm->reco.node_data) { 998 list_for_each(iter, &dlm->reco.node_data) {
@@ -905,13 +1075,11 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
905 mlog(0, "found lockres owned by dead node while " 1075 mlog(0, "found lockres owned by dead node while "
906 "doing recovery for node %u. sending it.\n", 1076 "doing recovery for node %u. sending it.\n",
907 dead_node); 1077 dead_node);
908 list_del_init(&res->recovering); 1078 list_move_tail(&res->recovering, list);
909 list_add_tail(&res->recovering, list);
910 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 1079 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
911 mlog(0, "found UNKNOWN owner while doing recovery " 1080 mlog(0, "found UNKNOWN owner while doing recovery "
912 "for node %u. sending it.\n", dead_node); 1081 "for node %u. sending it.\n", dead_node);
913 list_del_init(&res->recovering); 1082 list_move_tail(&res->recovering, list);
914 list_add_tail(&res->recovering, list);
915 } 1083 }
916 } 1084 }
917 spin_unlock(&dlm->spinlock); 1085 spin_unlock(&dlm->spinlock);
@@ -1023,8 +1191,9 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock,
1023 ml->type == LKM_PRMODE) { 1191 ml->type == LKM_PRMODE) {
1024 /* if it is already set, this had better be a PR 1192 /* if it is already set, this had better be a PR
1025 * and it has to match */ 1193 * and it has to match */
1026 if (mres->lvb[0] && (ml->type == LKM_EXMODE || 1194 if (!dlm_lvb_is_empty(mres->lvb) &&
1027 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) { 1195 (ml->type == LKM_EXMODE ||
1196 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
1028 mlog(ML_ERROR, "mismatched lvbs!\n"); 1197 mlog(ML_ERROR, "mismatched lvbs!\n");
1029 __dlm_print_one_lock_resource(lock->lockres); 1198 __dlm_print_one_lock_resource(lock->lockres);
1030 BUG(); 1199 BUG();
@@ -1083,22 +1252,25 @@ int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1083 * we must send it immediately. */ 1252 * we must send it immediately. */
1084 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, 1253 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1085 res, total_locks); 1254 res, total_locks);
1086 if (ret < 0) { 1255 if (ret < 0)
1087 // TODO 1256 goto error;
1088 mlog(ML_ERROR, "dlm_send_mig_lockres_msg "
1089 "returned %d, TODO\n", ret);
1090 BUG();
1091 }
1092 } 1257 }
1093 } 1258 }
1094 /* flush any remaining locks */ 1259 /* flush any remaining locks */
1095 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); 1260 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1096 if (ret < 0) { 1261 if (ret < 0)
1097 // TODO 1262 goto error;
1098 mlog(ML_ERROR, "dlm_send_mig_lockres_msg returned %d, " 1263 return ret;
1099 "TODO\n", ret); 1264
1265error:
1266 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1267 dlm->name, ret);
1268 if (!dlm_is_host_down(ret))
1100 BUG(); 1269 BUG();
1101 } 1270 mlog(0, "%s: node %u went down while sending %s "
1271 "lockres %.*s\n", dlm->name, send_to,
1272 flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
1273 res->lockname.len, res->lockname.name);
1102 return ret; 1274 return ret;
1103} 1275}
1104 1276
@@ -1146,8 +1318,8 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
1146 mlog(0, "all done flag. all lockres data received!\n"); 1318 mlog(0, "all done flag. all lockres data received!\n");
1147 1319
1148 ret = -ENOMEM; 1320 ret = -ENOMEM;
1149 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_KERNEL); 1321 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1150 item = kcalloc(1, sizeof(*item), GFP_KERNEL); 1322 item = kcalloc(1, sizeof(*item), GFP_NOFS);
1151 if (!buf || !item) 1323 if (!buf || !item)
1152 goto leave; 1324 goto leave;
1153 1325
@@ -1238,7 +1410,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
1238 spin_lock(&dlm->work_lock); 1410 spin_lock(&dlm->work_lock);
1239 list_add_tail(&item->list, &dlm->work_list); 1411 list_add_tail(&item->list, &dlm->work_list);
1240 spin_unlock(&dlm->work_lock); 1412 spin_unlock(&dlm->work_lock);
1241 schedule_work(&dlm->dispatched_work); 1413 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1242 1414
1243leave: 1415leave:
1244 dlm_put(dlm); 1416 dlm_put(dlm);
@@ -1406,6 +1578,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data)
1406 struct dlm_ctxt *dlm = data; 1578 struct dlm_ctxt *dlm = data;
1407 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; 1579 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1408 struct dlm_lock_resource *res = NULL; 1580 struct dlm_lock_resource *res = NULL;
1581 unsigned int hash;
1409 int master = DLM_LOCK_RES_OWNER_UNKNOWN; 1582 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1410 u32 flags = DLM_ASSERT_MASTER_REQUERY; 1583 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1411 1584
@@ -1415,8 +1588,10 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data)
1415 return master; 1588 return master;
1416 } 1589 }
1417 1590
1591 hash = dlm_lockid_hash(req->name, req->namelen);
1592
1418 spin_lock(&dlm->spinlock); 1593 spin_lock(&dlm->spinlock);
1419 res = __dlm_lookup_lockres(dlm, req->name, req->namelen); 1594 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1420 if (res) { 1595 if (res) {
1421 spin_lock(&res->spinlock); 1596 spin_lock(&res->spinlock);
1422 master = res->owner; 1597 master = res->owner;
@@ -1483,7 +1658,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1483 struct dlm_lock *newlock = NULL; 1658 struct dlm_lock *newlock = NULL;
1484 struct dlm_lockstatus *lksb = NULL; 1659 struct dlm_lockstatus *lksb = NULL;
1485 int ret = 0; 1660 int ret = 0;
1486 int i; 1661 int i, bad;
1487 struct list_head *iter; 1662 struct list_head *iter;
1488 struct dlm_lock *lock = NULL; 1663 struct dlm_lock *lock = NULL;
1489 1664
@@ -1529,8 +1704,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1529 1704
1530 /* move the lock to its proper place */ 1705 /* move the lock to its proper place */
1531 /* do not alter lock refcount. switching lists. */ 1706 /* do not alter lock refcount. switching lists. */
1532 list_del_init(&lock->list); 1707 list_move_tail(&lock->list, queue);
1533 list_add_tail(&lock->list, queue);
1534 spin_unlock(&res->spinlock); 1708 spin_unlock(&res->spinlock);
1535 1709
1536 mlog(0, "just reordered a local lock!\n"); 1710 mlog(0, "just reordered a local lock!\n");
@@ -1553,28 +1727,48 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1553 } 1727 }
1554 lksb->flags |= (ml->flags & 1728 lksb->flags |= (ml->flags &
1555 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); 1729 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1556 1730
1557 if (mres->lvb[0]) { 1731 if (ml->type == LKM_NLMODE)
1732 goto skip_lvb;
1733
1734 if (!dlm_lvb_is_empty(mres->lvb)) {
1558 if (lksb->flags & DLM_LKSB_PUT_LVB) { 1735 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1559 /* other node was trying to update 1736 /* other node was trying to update
1560 * lvb when node died. recreate the 1737 * lvb when node died. recreate the
1561 * lksb with the updated lvb. */ 1738 * lksb with the updated lvb. */
1562 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); 1739 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1740 /* the lock resource lvb update must happen
1741 * NOW, before the spinlock is dropped.
1742 * we no longer wait for the AST to update
1743 * the lvb. */
1744 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1563 } else { 1745 } else {
1564 /* otherwise, the node is sending its 1746 /* otherwise, the node is sending its
1565 * most recent valid lvb info */ 1747 * most recent valid lvb info */
1566 BUG_ON(ml->type != LKM_EXMODE && 1748 BUG_ON(ml->type != LKM_EXMODE &&
1567 ml->type != LKM_PRMODE); 1749 ml->type != LKM_PRMODE);
1568 if (res->lvb[0] && (ml->type == LKM_EXMODE || 1750 if (!dlm_lvb_is_empty(res->lvb) &&
1569 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { 1751 (ml->type == LKM_EXMODE ||
1570 mlog(ML_ERROR, "received bad lvb!\n"); 1752 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1571 __dlm_print_one_lock_resource(res); 1753 int i;
1572 BUG(); 1754 mlog(ML_ERROR, "%s:%.*s: received bad "
1755 "lvb! type=%d\n", dlm->name,
1756 res->lockname.len,
1757 res->lockname.name, ml->type);
1758 printk("lockres lvb=[");
1759 for (i=0; i<DLM_LVB_LEN; i++)
1760 printk("%02x", res->lvb[i]);
1761 printk("]\nmigrated lvb=[");
1762 for (i=0; i<DLM_LVB_LEN; i++)
1763 printk("%02x", mres->lvb[i]);
1764 printk("]\n");
1765 dlm_print_one_lock_resource(res);
1766 BUG();
1573 } 1767 }
1574 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1768 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1575 } 1769 }
1576 } 1770 }
1577 1771skip_lvb:
1578 1772
1579 /* NOTE: 1773 /* NOTE:
1580 * wrt lock queue ordering and recovery: 1774 * wrt lock queue ordering and recovery:
@@ -1592,9 +1786,33 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1592 * relative to each other, but clearly *not* 1786 * relative to each other, but clearly *not*
1593 * preserved relative to locks from other nodes. 1787 * preserved relative to locks from other nodes.
1594 */ 1788 */
1789 bad = 0;
1595 spin_lock(&res->spinlock); 1790 spin_lock(&res->spinlock);
1596 dlm_lock_get(newlock); 1791 list_for_each_entry(lock, queue, list) {
1597 list_add_tail(&newlock->list, queue); 1792 if (lock->ml.cookie == ml->cookie) {
1793 u64 c = lock->ml.cookie;
1794 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1795 "exists on this lockres!\n", dlm->name,
1796 res->lockname.len, res->lockname.name,
1797 dlm_get_lock_cookie_node(c),
1798 dlm_get_lock_cookie_seq(c));
1799
1800 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1801 "node=%u, cookie=%u:%llu, queue=%d\n",
1802 ml->type, ml->convert_type, ml->node,
1803 dlm_get_lock_cookie_node(ml->cookie),
1804 dlm_get_lock_cookie_seq(ml->cookie),
1805 ml->list);
1806
1807 __dlm_print_one_lock_resource(res);
1808 bad = 1;
1809 break;
1810 }
1811 }
1812 if (!bad) {
1813 dlm_lock_get(newlock);
1814 list_add_tail(&newlock->list, queue);
1815 }
1598 spin_unlock(&res->spinlock); 1816 spin_unlock(&res->spinlock);
1599 } 1817 }
1600 mlog(0, "done running all the locks\n"); 1818 mlog(0, "done running all the locks\n");
@@ -1618,8 +1836,14 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1618 struct dlm_lock *lock; 1836 struct dlm_lock *lock;
1619 1837
1620 res->state |= DLM_LOCK_RES_RECOVERING; 1838 res->state |= DLM_LOCK_RES_RECOVERING;
1621 if (!list_empty(&res->recovering)) 1839 if (!list_empty(&res->recovering)) {
1840 mlog(0,
1841 "Recovering res %s:%.*s, is already on recovery list!\n",
1842 dlm->name, res->lockname.len, res->lockname.name);
1622 list_del_init(&res->recovering); 1843 list_del_init(&res->recovering);
1844 }
1845 /* We need to hold a reference while on the recovery list */
1846 dlm_lockres_get(res);
1623 list_add_tail(&res->recovering, &dlm->reco.resources); 1847 list_add_tail(&res->recovering, &dlm->reco.resources);
1624 1848
1625 /* find any pending locks and put them back on proper list */ 1849 /* find any pending locks and put them back on proper list */
@@ -1708,9 +1932,11 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
1708 spin_lock(&res->spinlock); 1932 spin_lock(&res->spinlock);
1709 dlm_change_lockres_owner(dlm, res, new_master); 1933 dlm_change_lockres_owner(dlm, res, new_master);
1710 res->state &= ~DLM_LOCK_RES_RECOVERING; 1934 res->state &= ~DLM_LOCK_RES_RECOVERING;
1711 __dlm_dirty_lockres(dlm, res); 1935 if (!__dlm_lockres_unused(res))
1936 __dlm_dirty_lockres(dlm, res);
1712 spin_unlock(&res->spinlock); 1937 spin_unlock(&res->spinlock);
1713 wake_up(&res->wq); 1938 wake_up(&res->wq);
1939 dlm_lockres_put(res);
1714 } 1940 }
1715 } 1941 }
1716 1942
@@ -1719,7 +1945,7 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
1719 * the RECOVERING state and set the owner 1945 * the RECOVERING state and set the owner
1720 * if necessary */ 1946 * if necessary */
1721 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 1947 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
1722 bucket = &(dlm->lockres_hash[i]); 1948 bucket = dlm_lockres_hash(dlm, i);
1723 hlist_for_each_entry(res, hash_iter, bucket, hash_node) { 1949 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
1724 if (res->state & DLM_LOCK_RES_RECOVERING) { 1950 if (res->state & DLM_LOCK_RES_RECOVERING) {
1725 if (res->owner == dead_node) { 1951 if (res->owner == dead_node) {
@@ -1743,11 +1969,13 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
1743 dlm->name, res->lockname.len, 1969 dlm->name, res->lockname.len,
1744 res->lockname.name, res->owner); 1970 res->lockname.name, res->owner);
1745 list_del_init(&res->recovering); 1971 list_del_init(&res->recovering);
1972 dlm_lockres_put(res);
1746 } 1973 }
1747 spin_lock(&res->spinlock); 1974 spin_lock(&res->spinlock);
1748 dlm_change_lockres_owner(dlm, res, new_master); 1975 dlm_change_lockres_owner(dlm, res, new_master);
1749 res->state &= ~DLM_LOCK_RES_RECOVERING; 1976 res->state &= ~DLM_LOCK_RES_RECOVERING;
1750 __dlm_dirty_lockres(dlm, res); 1977 if (!__dlm_lockres_unused(res))
1978 __dlm_dirty_lockres(dlm, res);
1751 spin_unlock(&res->spinlock); 1979 spin_unlock(&res->spinlock);
1752 wake_up(&res->wq); 1980 wake_up(&res->wq);
1753 } 1981 }
@@ -1884,7 +2112,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
1884 * need to be fired as a result. 2112 * need to be fired as a result.
1885 */ 2113 */
1886 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2114 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
1887 bucket = &(dlm->lockres_hash[i]); 2115 bucket = dlm_lockres_hash(dlm, i);
1888 hlist_for_each_entry(res, iter, bucket, hash_node) { 2116 hlist_for_each_entry(res, iter, bucket, hash_node) {
1889 /* always prune any $RECOVERY entries for dead nodes, 2117 /* always prune any $RECOVERY entries for dead nodes,
1890 * otherwise hangs can occur during later recovery */ 2118 * otherwise hangs can occur during later recovery */
@@ -1924,6 +2152,20 @@ static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
1924{ 2152{
1925 assert_spin_locked(&dlm->spinlock); 2153 assert_spin_locked(&dlm->spinlock);
1926 2154
2155 if (dlm->reco.new_master == idx) {
2156 mlog(0, "%s: recovery master %d just died\n",
2157 dlm->name, idx);
2158 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2159 /* finalize1 was reached, so it is safe to clear
2160 * the new_master and dead_node. that recovery
2161 * is complete. */
2162 mlog(0, "%s: dead master %d had reached "
2163 "finalize1 state, clearing\n", dlm->name, idx);
2164 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2165 __dlm_reset_recovery(dlm);
2166 }
2167 }
2168
1927 /* check to see if the node is already considered dead */ 2169 /* check to see if the node is already considered dead */
1928 if (!test_bit(idx, dlm->live_nodes_map)) { 2170 if (!test_bit(idx, dlm->live_nodes_map)) {
1929 mlog(0, "for domain %s, node %d is already dead. " 2171 mlog(0, "for domain %s, node %d is already dead. "
@@ -2087,7 +2329,7 @@ again:
2087 2329
2088 /* set the new_master to this node */ 2330 /* set the new_master to this node */
2089 spin_lock(&dlm->spinlock); 2331 spin_lock(&dlm->spinlock);
2090 dlm->reco.new_master = dlm->node_num; 2332 dlm_set_reco_master(dlm, dlm->node_num);
2091 spin_unlock(&dlm->spinlock); 2333 spin_unlock(&dlm->spinlock);
2092 } 2334 }
2093 2335
@@ -2125,6 +2367,10 @@ again:
2125 mlog(0, "%s: reco master %u is ready to recover %u\n", 2367 mlog(0, "%s: reco master %u is ready to recover %u\n",
2126 dlm->name, dlm->reco.new_master, dlm->reco.dead_node); 2368 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2127 status = -EEXIST; 2369 status = -EEXIST;
2370 } else if (ret == DLM_RECOVERING) {
2371 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2372 dlm->name, dlm->node_num);
2373 goto again;
2128 } else { 2374 } else {
2129 struct dlm_lock_resource *res; 2375 struct dlm_lock_resource *res;
2130 2376
@@ -2156,7 +2402,7 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2156 2402
2157 mlog_entry("%u\n", dead_node); 2403 mlog_entry("%u\n", dead_node);
2158 2404
2159 mlog(0, "dead node is %u\n", dead_node); 2405 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2160 2406
2161 spin_lock(&dlm->spinlock); 2407 spin_lock(&dlm->spinlock);
2162 dlm_node_iter_init(dlm->domain_map, &iter); 2408 dlm_node_iter_init(dlm->domain_map, &iter);
@@ -2214,6 +2460,14 @@ retry:
2214 * another ENOMEM */ 2460 * another ENOMEM */
2215 msleep(100); 2461 msleep(100);
2216 goto retry; 2462 goto retry;
2463 } else if (ret == EAGAIN) {
2464 mlog(0, "%s: trying to start recovery of node "
2465 "%u, but node %u is waiting for last recovery "
2466 "to complete, backoff for a bit\n", dlm->name,
2467 dead_node, nodenum);
2468 /* TODO Look into replacing msleep with cond_resched() */
2469 msleep(100);
2470 goto retry;
2217 } 2471 }
2218 } 2472 }
2219 2473
@@ -2229,8 +2483,20 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2229 if (!dlm_grab(dlm)) 2483 if (!dlm_grab(dlm))
2230 return 0; 2484 return 0;
2231 2485
2232 mlog(0, "node %u wants to recover node %u\n", 2486 spin_lock(&dlm->spinlock);
2233 br->node_idx, br->dead_node); 2487 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2488 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2489 "but this node is in finalize state, waiting on finalize2\n",
2490 dlm->name, br->node_idx, br->dead_node,
2491 dlm->reco.dead_node, dlm->reco.new_master);
2492 spin_unlock(&dlm->spinlock);
2493 return EAGAIN;
2494 }
2495 spin_unlock(&dlm->spinlock);
2496
2497 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2498 dlm->name, br->node_idx, br->dead_node,
2499 dlm->reco.dead_node, dlm->reco.new_master);
2234 2500
2235 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node); 2501 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2236 2502
@@ -2252,8 +2518,8 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2252 "node %u changing it to %u\n", dlm->name, 2518 "node %u changing it to %u\n", dlm->name,
2253 dlm->reco.dead_node, br->node_idx, br->dead_node); 2519 dlm->reco.dead_node, br->node_idx, br->dead_node);
2254 } 2520 }
2255 dlm->reco.new_master = br->node_idx; 2521 dlm_set_reco_master(dlm, br->node_idx);
2256 dlm->reco.dead_node = br->dead_node; 2522 dlm_set_reco_dead_node(dlm, br->dead_node);
2257 if (!test_bit(br->dead_node, dlm->recovery_map)) { 2523 if (!test_bit(br->dead_node, dlm->recovery_map)) {
2258 mlog(0, "recovery master %u sees %u as dead, but this " 2524 mlog(0, "recovery master %u sees %u as dead, but this "
2259 "node has not yet. marking %u as dead\n", 2525 "node has not yet. marking %u as dead\n",
@@ -2272,10 +2538,16 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2272 spin_unlock(&dlm->spinlock); 2538 spin_unlock(&dlm->spinlock);
2273 2539
2274 dlm_kick_recovery_thread(dlm); 2540 dlm_kick_recovery_thread(dlm);
2541
2542 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2543 dlm->name, br->node_idx, br->dead_node,
2544 dlm->reco.dead_node, dlm->reco.new_master);
2545
2275 dlm_put(dlm); 2546 dlm_put(dlm);
2276 return 0; 2547 return 0;
2277} 2548}
2278 2549
2550#define DLM_FINALIZE_STAGE2 0x01
2279static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) 2551static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2280{ 2552{
2281 int ret = 0; 2553 int ret = 0;
@@ -2283,25 +2555,31 @@ static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2283 struct dlm_node_iter iter; 2555 struct dlm_node_iter iter;
2284 int nodenum; 2556 int nodenum;
2285 int status; 2557 int status;
2558 int stage = 1;
2286 2559
2287 mlog(0, "finishing recovery for node %s:%u\n", 2560 mlog(0, "finishing recovery for node %s:%u, "
2288 dlm->name, dlm->reco.dead_node); 2561 "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2289 2562
2290 spin_lock(&dlm->spinlock); 2563 spin_lock(&dlm->spinlock);
2291 dlm_node_iter_init(dlm->domain_map, &iter); 2564 dlm_node_iter_init(dlm->domain_map, &iter);
2292 spin_unlock(&dlm->spinlock); 2565 spin_unlock(&dlm->spinlock);
2293 2566
2567stage2:
2294 memset(&fr, 0, sizeof(fr)); 2568 memset(&fr, 0, sizeof(fr));
2295 fr.node_idx = dlm->node_num; 2569 fr.node_idx = dlm->node_num;
2296 fr.dead_node = dlm->reco.dead_node; 2570 fr.dead_node = dlm->reco.dead_node;
2571 if (stage == 2)
2572 fr.flags |= DLM_FINALIZE_STAGE2;
2297 2573
2298 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2574 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2299 if (nodenum == dlm->node_num) 2575 if (nodenum == dlm->node_num)
2300 continue; 2576 continue;
2301 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, 2577 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2302 &fr, sizeof(fr), nodenum, &status); 2578 &fr, sizeof(fr), nodenum, &status);
2303 if (ret >= 0) { 2579 if (ret >= 0)
2304 ret = status; 2580 ret = status;
2581 if (ret < 0) {
2582 mlog_errno(ret);
2305 if (dlm_is_host_down(ret)) { 2583 if (dlm_is_host_down(ret)) {
2306 /* this has no effect on this recovery 2584 /* this has no effect on this recovery
2307 * session, so set the status to zero to 2585 * session, so set the status to zero to
@@ -2309,13 +2587,17 @@ static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2309 mlog(ML_ERROR, "node %u went down after this " 2587 mlog(ML_ERROR, "node %u went down after this "
2310 "node finished recovery.\n", nodenum); 2588 "node finished recovery.\n", nodenum);
2311 ret = 0; 2589 ret = 0;
2590 continue;
2312 } 2591 }
2313 }
2314 if (ret < 0) {
2315 mlog_errno(ret);
2316 break; 2592 break;
2317 } 2593 }
2318 } 2594 }
2595 if (stage == 1) {
2596 /* reset the node_iter back to the top and send finalize2 */
2597 iter.curnode = -1;
2598 stage = 2;
2599 goto stage2;
2600 }
2319 2601
2320 return ret; 2602 return ret;
2321} 2603}
@@ -2324,14 +2606,19 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2324{ 2606{
2325 struct dlm_ctxt *dlm = data; 2607 struct dlm_ctxt *dlm = data;
2326 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; 2608 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2609 int stage = 1;
2327 2610
2328 /* ok to return 0, domain has gone away */ 2611 /* ok to return 0, domain has gone away */
2329 if (!dlm_grab(dlm)) 2612 if (!dlm_grab(dlm))
2330 return 0; 2613 return 0;
2331 2614
2332 mlog(0, "node %u finalizing recovery of node %u\n", 2615 if (fr->flags & DLM_FINALIZE_STAGE2)
2333 fr->node_idx, fr->dead_node); 2616 stage = 2;
2334 2617
2618 mlog(0, "%s: node %u finalizing recovery stage%d of "
2619 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2620 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2621
2335 spin_lock(&dlm->spinlock); 2622 spin_lock(&dlm->spinlock);
2336 2623
2337 if (dlm->reco.new_master != fr->node_idx) { 2624 if (dlm->reco.new_master != fr->node_idx) {
@@ -2347,13 +2634,41 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2347 BUG(); 2634 BUG();
2348 } 2635 }
2349 2636
2350 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); 2637 switch (stage) {
2351 2638 case 1:
2352 spin_unlock(&dlm->spinlock); 2639 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2640 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2641 mlog(ML_ERROR, "%s: received finalize1 from "
2642 "new master %u for dead node %u, but "
2643 "this node has already received it!\n",
2644 dlm->name, fr->node_idx, fr->dead_node);
2645 dlm_print_reco_node_status(dlm);
2646 BUG();
2647 }
2648 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2649 spin_unlock(&dlm->spinlock);
2650 break;
2651 case 2:
2652 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2653 mlog(ML_ERROR, "%s: received finalize2 from "
2654 "new master %u for dead node %u, but "
2655 "this node did not have finalize1!\n",
2656 dlm->name, fr->node_idx, fr->dead_node);
2657 dlm_print_reco_node_status(dlm);
2658 BUG();
2659 }
2660 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2661 spin_unlock(&dlm->spinlock);
2662 dlm_reset_recovery(dlm);
2663 dlm_kick_recovery_thread(dlm);
2664 break;
2665 default:
2666 BUG();
2667 }
2353 2668
2354 dlm_reset_recovery(dlm); 2669 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2670 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2355 2671
2356 dlm_kick_recovery_thread(dlm);
2357 dlm_put(dlm); 2672 dlm_put(dlm);
2358 return 0; 2673 return 0;
2359} 2674}
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 5be9d14f12..0c822f3ffb 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -39,6 +39,7 @@
39#include <linux/inet.h> 39#include <linux/inet.h>
40#include <linux/timer.h> 40#include <linux/timer.h>
41#include <linux/kthread.h> 41#include <linux/kthread.h>
42#include <linux/delay.h>
42 43
43 44
44#include "cluster/heartbeat.h" 45#include "cluster/heartbeat.h"
@@ -53,6 +54,8 @@
53#include "cluster/masklog.h" 54#include "cluster/masklog.h"
54 55
55static int dlm_thread(void *data); 56static int dlm_thread(void *data);
57static void dlm_purge_lockres_now(struct dlm_ctxt *dlm,
58 struct dlm_lock_resource *lockres);
56 59
57static void dlm_flush_asts(struct dlm_ctxt *dlm); 60static void dlm_flush_asts(struct dlm_ctxt *dlm);
58 61
@@ -80,7 +83,7 @@ repeat:
80} 83}
81 84
82 85
83static int __dlm_lockres_unused(struct dlm_lock_resource *res) 86int __dlm_lockres_unused(struct dlm_lock_resource *res)
84{ 87{
85 if (list_empty(&res->granted) && 88 if (list_empty(&res->granted) &&
86 list_empty(&res->converting) && 89 list_empty(&res->converting) &&
@@ -103,6 +106,20 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
103 assert_spin_locked(&res->spinlock); 106 assert_spin_locked(&res->spinlock);
104 107
105 if (__dlm_lockres_unused(res)){ 108 if (__dlm_lockres_unused(res)){
109 /* For now, just keep any resource we master */
110 if (res->owner == dlm->node_num)
111 {
112 if (!list_empty(&res->purge)) {
113 mlog(0, "we master %s:%.*s, but it is on "
114 "the purge list. Removing\n",
115 dlm->name, res->lockname.len,
116 res->lockname.name);
117 list_del_init(&res->purge);
118 dlm->purge_count--;
119 }
120 return;
121 }
122
106 if (list_empty(&res->purge)) { 123 if (list_empty(&res->purge)) {
107 mlog(0, "putting lockres %.*s from purge list\n", 124 mlog(0, "putting lockres %.*s from purge list\n",
108 res->lockname.len, res->lockname.name); 125 res->lockname.len, res->lockname.name);
@@ -110,10 +127,23 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
110 res->last_used = jiffies; 127 res->last_used = jiffies;
111 list_add_tail(&res->purge, &dlm->purge_list); 128 list_add_tail(&res->purge, &dlm->purge_list);
112 dlm->purge_count++; 129 dlm->purge_count++;
130
131 /* if this node is not the owner, there is
132 * no way to keep track of who the owner could be.
133 * unhash it to avoid serious problems. */
134 if (res->owner != dlm->node_num) {
135 mlog(0, "%s:%.*s: doing immediate "
136 "purge of lockres owned by %u\n",
137 dlm->name, res->lockname.len,
138 res->lockname.name, res->owner);
139
140 dlm_purge_lockres_now(dlm, res);
141 }
113 } 142 }
114 } else if (!list_empty(&res->purge)) { 143 } else if (!list_empty(&res->purge)) {
115 mlog(0, "removing lockres %.*s from purge list\n", 144 mlog(0, "removing lockres %.*s from purge list, "
116 res->lockname.len, res->lockname.name); 145 "owner=%u\n", res->lockname.len, res->lockname.name,
146 res->owner);
117 147
118 list_del_init(&res->purge); 148 list_del_init(&res->purge);
119 dlm->purge_count--; 149 dlm->purge_count--;
@@ -165,6 +195,7 @@ again:
165 } else if (ret < 0) { 195 } else if (ret < 0) {
166 mlog(ML_NOTICE, "lockres %.*s: migrate failed, retrying\n", 196 mlog(ML_NOTICE, "lockres %.*s: migrate failed, retrying\n",
167 lockres->lockname.len, lockres->lockname.name); 197 lockres->lockname.len, lockres->lockname.name);
198 msleep(100);
168 goto again; 199 goto again;
169 } 200 }
170 201
@@ -178,6 +209,24 @@ finish:
178 __dlm_unhash_lockres(lockres); 209 __dlm_unhash_lockres(lockres);
179} 210}
180 211
212/* make an unused lockres go away immediately.
213 * as soon as the dlm spinlock is dropped, this lockres
214 * will not be found. kfree still happens on last put. */
215static void dlm_purge_lockres_now(struct dlm_ctxt *dlm,
216 struct dlm_lock_resource *lockres)
217{
218 assert_spin_locked(&dlm->spinlock);
219 assert_spin_locked(&lockres->spinlock);
220
221 BUG_ON(!__dlm_lockres_unused(lockres));
222
223 if (!list_empty(&lockres->purge)) {
224 list_del_init(&lockres->purge);
225 dlm->purge_count--;
226 }
227 __dlm_unhash_lockres(lockres);
228}
229
181static void dlm_run_purge_list(struct dlm_ctxt *dlm, 230static void dlm_run_purge_list(struct dlm_ctxt *dlm,
182 int purge_now) 231 int purge_now)
183{ 232{
@@ -318,8 +367,7 @@ converting:
318 367
319 target->ml.type = target->ml.convert_type; 368 target->ml.type = target->ml.convert_type;
320 target->ml.convert_type = LKM_IVMODE; 369 target->ml.convert_type = LKM_IVMODE;
321 list_del_init(&target->list); 370 list_move_tail(&target->list, &res->granted);
322 list_add_tail(&target->list, &res->granted);
323 371
324 BUG_ON(!target->lksb); 372 BUG_ON(!target->lksb);
325 target->lksb->status = DLM_NORMAL; 373 target->lksb->status = DLM_NORMAL;
@@ -380,8 +428,7 @@ blocked:
380 target->ml.type, target->ml.node); 428 target->ml.type, target->ml.node);
381 429
382 // target->ml.type is already correct 430 // target->ml.type is already correct
383 list_del_init(&target->list); 431 list_move_tail(&target->list, &res->granted);
384 list_add_tail(&target->list, &res->granted);
385 432
386 BUG_ON(!target->lksb); 433 BUG_ON(!target->lksb);
387 target->lksb->status = DLM_NORMAL; 434 target->lksb->status = DLM_NORMAL;
@@ -422,6 +469,8 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
422 /* don't shuffle secondary queues */ 469 /* don't shuffle secondary queues */
423 if ((res->owner == dlm->node_num) && 470 if ((res->owner == dlm->node_num) &&
424 !(res->state & DLM_LOCK_RES_DIRTY)) { 471 !(res->state & DLM_LOCK_RES_DIRTY)) {
472 /* ref for dirty_list */
473 dlm_lockres_get(res);
425 list_add_tail(&res->dirty, &dlm->dirty_list); 474 list_add_tail(&res->dirty, &dlm->dirty_list);
426 res->state |= DLM_LOCK_RES_DIRTY; 475 res->state |= DLM_LOCK_RES_DIRTY;
427 } 476 }
@@ -606,6 +655,8 @@ static int dlm_thread(void *data)
606 list_del_init(&res->dirty); 655 list_del_init(&res->dirty);
607 spin_unlock(&res->spinlock); 656 spin_unlock(&res->spinlock);
608 spin_unlock(&dlm->spinlock); 657 spin_unlock(&dlm->spinlock);
658 /* Drop dirty_list ref */
659 dlm_lockres_put(res);
609 660
610 /* lockres can be re-dirtied/re-added to the 661 /* lockres can be re-dirtied/re-added to the
611 * dirty_list in this gap, but that is ok */ 662 * dirty_list in this gap, but that is ok */
@@ -642,8 +693,9 @@ static int dlm_thread(void *data)
642 * spinlock and do NOT have the dlm lock. 693 * spinlock and do NOT have the dlm lock.
643 * safe to reserve/queue asts and run the lists. */ 694 * safe to reserve/queue asts and run the lists. */
644 695
645 mlog(0, "calling dlm_shuffle_lists with dlm=%p, " 696 mlog(0, "calling dlm_shuffle_lists with dlm=%s, "
646 "res=%p\n", dlm, res); 697 "res=%.*s\n", dlm->name,
698 res->lockname.len, res->lockname.name);
647 699
648 /* called while holding lockres lock */ 700 /* called while holding lockres lock */
649 dlm_shuffle_lists(dlm, res); 701 dlm_shuffle_lists(dlm, res);
@@ -657,6 +709,8 @@ in_progress:
657 /* if the lock was in-progress, stick 709 /* if the lock was in-progress, stick
658 * it on the back of the list */ 710 * it on the back of the list */
659 if (delay) { 711 if (delay) {
712 /* ref for dirty_list */
713 dlm_lockres_get(res);
660 spin_lock(&res->spinlock); 714 spin_lock(&res->spinlock);
661 list_add_tail(&res->dirty, &dlm->dirty_list); 715 list_add_tail(&res->dirty, &dlm->dirty_list);
662 res->state |= DLM_LOCK_RES_DIRTY; 716 res->state |= DLM_LOCK_RES_DIRTY;
@@ -677,7 +731,7 @@ in_progress:
677 731
678 /* yield and continue right away if there is more work to do */ 732 /* yield and continue right away if there is more work to do */
679 if (!n) { 733 if (!n) {
680 yield(); 734 cond_resched();
681 continue; 735 continue;
682 } 736 }
683 737
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 7b1a275426..b0c3134f4f 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -271,8 +271,7 @@ void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
271void dlm_commit_pending_cancel(struct dlm_lock_resource *res, 271void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
272 struct dlm_lock *lock) 272 struct dlm_lock *lock)
273{ 273{
274 list_del_init(&lock->list); 274 list_move_tail(&lock->list, &res->granted);
275 list_add_tail(&lock->list, &res->granted);
276 lock->ml.convert_type = LKM_IVMODE; 275 lock->ml.convert_type = LKM_IVMODE;
277} 276}
278 277
@@ -319,6 +318,16 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
319 318
320 mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); 319 mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
321 320
321 if (owner == dlm->node_num) {
322 /* ended up trying to contact ourself. this means
323 * that the lockres had been remote but became local
324 * via a migration. just retry it, now as local */
325 mlog(0, "%s:%.*s: this node became the master due to a "
326 "migration, re-evaluate now\n", dlm->name,
327 res->lockname.len, res->lockname.name);
328 return DLM_FORWARD;
329 }
330
322 memset(&unlock, 0, sizeof(unlock)); 331 memset(&unlock, 0, sizeof(unlock));
323 unlock.node_idx = dlm->node_num; 332 unlock.node_idx = dlm->node_num;
324 unlock.flags = cpu_to_be32(flags); 333 unlock.flags = cpu_to_be32(flags);
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c
index 74ca4e5f97..e641b084b3 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlm/userdlm.c
@@ -672,7 +672,7 @@ struct dlm_ctxt *user_dlm_register_context(struct qstr *name)
672 u32 dlm_key; 672 u32 dlm_key;
673 char *domain; 673 char *domain;
674 674
675 domain = kmalloc(name->len + 1, GFP_KERNEL); 675 domain = kmalloc(name->len + 1, GFP_NOFS);
676 if (!domain) { 676 if (!domain) {
677 mlog_errno(-ENOMEM); 677 mlog_errno(-ENOMEM);
678 return ERR_PTR(-ENOMEM); 678 return ERR_PTR(-ENOMEM);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 64cd52860c..4acd37286b 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -242,7 +242,7 @@ static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
242 mlog_exit_void(); 242 mlog_exit_void();
243} 243}
244 244
245static spinlock_t ocfs2_dlm_tracking_lock = SPIN_LOCK_UNLOCKED; 245static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
246 246
247static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res, 247static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
248 struct ocfs2_dlm_debug *dlm_debug) 248 struct ocfs2_dlm_debug *dlm_debug)
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index eebc3cfa6b..910a601b2e 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -49,7 +49,7 @@
49 49
50#include "buffer_head_io.h" 50#include "buffer_head_io.h"
51 51
52spinlock_t trans_inc_lock = SPIN_LOCK_UNLOCKED; 52DEFINE_SPINLOCK(trans_inc_lock);
53 53
54static int ocfs2_force_read_journal(struct inode *inode); 54static int ocfs2_force_read_journal(struct inode *inode);
55static int ocfs2_recover_node(struct ocfs2_super *osb, 55static int ocfs2_recover_node(struct ocfs2_super *osb,
@@ -222,8 +222,7 @@ void ocfs2_handle_add_inode(struct ocfs2_journal_handle *handle,
222 BUG_ON(!list_empty(&OCFS2_I(inode)->ip_handle_list)); 222 BUG_ON(!list_empty(&OCFS2_I(inode)->ip_handle_list));
223 223
224 OCFS2_I(inode)->ip_handle = handle; 224 OCFS2_I(inode)->ip_handle = handle;
225 list_del(&(OCFS2_I(inode)->ip_handle_list)); 225 list_move_tail(&(OCFS2_I(inode)->ip_handle_list), &(handle->inode_list));
226 list_add_tail(&(OCFS2_I(inode)->ip_handle_list), &(handle->inode_list));
227} 226}
228 227
229static void ocfs2_handle_unlock_inodes(struct ocfs2_journal_handle *handle) 228static void ocfs2_handle_unlock_inodes(struct ocfs2_journal_handle *handle)
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c
index ee42765a85..cf70fe2075 100644
--- a/fs/ocfs2/vote.c
+++ b/fs/ocfs2/vote.c
@@ -988,9 +988,7 @@ int ocfs2_request_mount_vote(struct ocfs2_super *osb)
988 } 988 }
989 989
990bail: 990bail:
991 if (request) 991 kfree(request);
992 kfree(request);
993
994 return status; 992 return status;
995} 993}
996 994
@@ -1021,9 +1019,7 @@ int ocfs2_request_umount_vote(struct ocfs2_super *osb)
1021 } 1019 }
1022 1020
1023bail: 1021bail:
1024 if (request) 1022 kfree(request);
1025 kfree(request);
1026
1027 return status; 1023 return status;
1028} 1024}
1029 1025
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index efc7c91128..93a56bd4a2 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -1,5 +1,4 @@
1/* $Id: inode.c,v 1.15 2001/11/12 09:43:39 davem Exp $ 1/* inode.c: /proc/openprom handling routines
2 * openpromfs.c: /proc/openprom handling routines
3 * 2 *
4 * Copyright (C) 1996-1999 Jakub Jelinek (jakub@redhat.com) 3 * Copyright (C) 1996-1999 Jakub Jelinek (jakub@redhat.com)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) 4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
@@ -12,762 +11,245 @@
12#include <linux/openprom_fs.h> 11#include <linux/openprom_fs.h>
13#include <linux/init.h> 12#include <linux/init.h>
14#include <linux/slab.h> 13#include <linux/slab.h>
15#include <linux/smp_lock.h> 14#include <linux/seq_file.h>
16 15
17#include <asm/openprom.h> 16#include <asm/openprom.h>
18#include <asm/oplib.h> 17#include <asm/oplib.h>
18#include <asm/prom.h>
19#include <asm/uaccess.h> 19#include <asm/uaccess.h>
20 20
21#define ALIASES_NNODES 64 21static DEFINE_MUTEX(op_mutex);
22
23typedef struct {
24 u16 parent;
25 u16 next;
26 u16 child;
27 u16 first_prop;
28 u32 node;
29} openpromfs_node;
30
31typedef struct {
32#define OPP_STRING 0x10
33#define OPP_STRINGLIST 0x20
34#define OPP_BINARY 0x40
35#define OPP_HEXSTRING 0x80
36#define OPP_DIRTY 0x01
37#define OPP_QUOTED 0x02
38#define OPP_NOTQUOTED 0x04
39#define OPP_ASCIIZ 0x08
40 u32 flag;
41 u32 alloclen;
42 u32 len;
43 char *value;
44 char name[8];
45} openprom_property;
46
47static openpromfs_node *nodes;
48static int alloced;
49static u16 last_node;
50static u16 first_prop;
51static u16 options = 0xffff;
52static u16 aliases = 0xffff;
53static int aliases_nodes;
54static char *alias_names [ALIASES_NNODES];
55
56#define OPENPROM_ROOT_INO 16
57#define OPENPROM_FIRST_INO OPENPROM_ROOT_INO
58#define NODE(ino) nodes[ino - OPENPROM_FIRST_INO]
59#define NODE2INO(node) (node + OPENPROM_FIRST_INO)
60#define NODEP2INO(no) (no + OPENPROM_FIRST_INO + last_node)
61
62static int openpromfs_create (struct inode *, struct dentry *, int, struct nameidata *);
63static int openpromfs_readdir(struct file *, void *, filldir_t);
64static struct dentry *openpromfs_lookup(struct inode *, struct dentry *dentry, struct nameidata *nd);
65static int openpromfs_unlink (struct inode *, struct dentry *dentry);
66 22
67static inline u16 ptr_nod(void *p) 23#define OPENPROM_ROOT_INO 0
68{
69 return (long)p & 0xFFFF;
70}
71 24
72static ssize_t nodenum_read(struct file *file, char __user *buf, 25enum op_inode_type {
73 size_t count, loff_t *ppos) 26 op_inode_node,
27 op_inode_prop,
28};
29
30union op_inode_data {
31 struct device_node *node;
32 struct property *prop;
33};
34
35struct op_inode_info {
36 struct inode vfs_inode;
37 enum op_inode_type type;
38 union op_inode_data u;
39};
40
41static inline struct op_inode_info *OP_I(struct inode *inode)
74{ 42{
75 struct inode *inode = file->f_dentry->d_inode; 43 return container_of(inode, struct op_inode_info, vfs_inode);
76 char buffer[10];
77
78 if (count < 0 || !inode->u.generic_ip)
79 return -EINVAL;
80 sprintf (buffer, "%8.8lx\n", (long)inode->u.generic_ip);
81 if (file->f_pos >= 9)
82 return 0;
83 if (count > 9 - file->f_pos)
84 count = 9 - file->f_pos;
85 if (copy_to_user(buf, buffer + file->f_pos, count))
86 return -EFAULT;
87 *ppos += count;
88 return count;
89} 44}
90 45
91static ssize_t property_read(struct file *filp, char __user *buf, 46static int is_string(unsigned char *p, int len)
92 size_t count, loff_t *ppos)
93{ 47{
94 struct inode *inode = filp->f_dentry->d_inode; 48 int i;
95 int i, j, k;
96 u32 node;
97 char *p, *s;
98 u32 *q;
99 openprom_property *op;
100 char buffer[64];
101
102 if (!filp->private_data) {
103 node = nodes[ptr_nod(inode->u.generic_ip)].node;
104 i = ((u32)(long)inode->u.generic_ip) >> 16;
105 if (ptr_nod(inode->u.generic_ip) == aliases) {
106 if (i >= aliases_nodes)
107 p = NULL;
108 else
109 p = alias_names [i];
110 } else
111 for (p = prom_firstprop (node, buffer);
112 i && p && *p;
113 p = prom_nextprop (node, p, buffer), i--)
114 /* nothing */ ;
115 if (!p || !*p)
116 return -EIO;
117 i = prom_getproplen (node, p);
118 if (i < 0) {
119 if (ptr_nod(inode->u.generic_ip) == aliases)
120 i = 0;
121 else
122 return -EIO;
123 }
124 k = i;
125 if (i < 64) i = 64;
126 filp->private_data = kmalloc (sizeof (openprom_property)
127 + (j = strlen (p)) + 2 * i,
128 GFP_KERNEL);
129 if (!filp->private_data)
130 return -ENOMEM;
131 op = filp->private_data;
132 op->flag = 0;
133 op->alloclen = 2 * i;
134 strcpy (op->name, p);
135 op->value = (char *)(((unsigned long)(op->name + j + 4)) & ~3);
136 op->len = k;
137 if (k && prom_getproperty (node, p, op->value, i) < 0)
138 return -EIO;
139 op->value [k] = 0;
140 if (k) {
141 for (s = NULL, p = op->value; p < op->value + k; p++) {
142 if ((*p >= ' ' && *p <= '~') || *p == '\n') {
143 op->flag |= OPP_STRING;
144 s = p;
145 continue;
146 }
147 if (p > op->value && !*p && s == p - 1) {
148 if (p < op->value + k - 1)
149 op->flag |= OPP_STRINGLIST;
150 else
151 op->flag |= OPP_ASCIIZ;
152 continue;
153 }
154 if (k == 1 && !*p) {
155 op->flag |= (OPP_STRING|OPP_ASCIIZ);
156 break;
157 }
158 op->flag &= ~(OPP_STRING|OPP_STRINGLIST);
159 if (k & 3)
160 op->flag |= OPP_HEXSTRING;
161 else
162 op->flag |= OPP_BINARY;
163 break;
164 }
165 if (op->flag & OPP_STRINGLIST)
166 op->flag &= ~(OPP_STRING);
167 if (op->flag & OPP_ASCIIZ)
168 op->len--;
169 }
170 } else
171 op = filp->private_data;
172 if (!count || !(op->len || (op->flag & OPP_ASCIIZ)))
173 return 0;
174 if (*ppos >= 0xffffff || count >= 0xffffff)
175 return -EINVAL;
176 if (op->flag & OPP_STRINGLIST) {
177 for (k = 0, p = op->value; p < op->value + op->len; p++)
178 if (!*p)
179 k++;
180 i = op->len + 4 * k + 3;
181 } else if (op->flag & OPP_STRING) {
182 i = op->len + 3;
183 } else if (op->flag & OPP_BINARY) {
184 i = (op->len * 9) >> 2;
185 } else {
186 i = (op->len << 1) + 1;
187 }
188 k = *ppos;
189 if (k >= i) return 0;
190 if (count > i - k) count = i - k;
191 if (op->flag & OPP_STRING) {
192 if (!k) {
193 if (put_user('\'', buf))
194 return -EFAULT;
195 k++;
196 count--;
197 }
198 49
199 if (k + count >= i - 2) 50 for (i = 0; i < len; i++) {
200 j = i - 2 - k; 51 unsigned char val = p[i];
201 else
202 j = count;
203
204 if (j >= 0) {
205 if (copy_to_user(buf + k - *ppos,
206 op->value + k - 1, j))
207 return -EFAULT;
208 count -= j;
209 k += j;
210 }
211 52
212 if (count) { 53 if ((i && !val) ||
213 if (put_user('\'', &buf [k++ - *ppos])) 54 (val >= ' ' && val <= '~'))
214 return -EFAULT; 55 continue;
215 }
216 if (count > 1) {
217 if (put_user('\n', &buf [k++ - *ppos]))
218 return -EFAULT;
219 }
220 } else if (op->flag & OPP_STRINGLIST) {
221 char *tmp;
222
223 tmp = kmalloc (i, GFP_KERNEL);
224 if (!tmp)
225 return -ENOMEM;
226
227 s = tmp;
228 *s++ = '\'';
229 for (p = op->value; p < op->value + op->len; p++) {
230 if (!*p) {
231 strcpy(s, "' + '");
232 s += 5;
233 continue;
234 }
235 *s++ = *p;
236 }
237 strcpy(s, "'\n");
238
239 if (copy_to_user(buf, tmp + k, count))
240 return -EFAULT;
241
242 kfree(tmp);
243 k += count;
244
245 } else if (op->flag & OPP_BINARY) {
246 char buffer[10];
247 u32 *first, *last;
248 int first_off, last_cnt;
249
250 first = ((u32 *)op->value) + k / 9;
251 first_off = k % 9;
252 last = ((u32 *)op->value) + (k + count - 1) / 9;
253 last_cnt = (k + count) % 9;
254 if (!last_cnt) last_cnt = 9;
255
256 if (first == last) {
257 sprintf (buffer, "%08x.", *first);
258 if (copy_to_user(buf, buffer + first_off,
259 last_cnt - first_off))
260 return -EFAULT;
261 buf += last_cnt - first_off;
262 } else {
263 for (q = first; q <= last; q++) {
264 sprintf (buffer, "%08x.", *q);
265 if (q == first) {
266 if (copy_to_user(buf, buffer + first_off,
267 9 - first_off))
268 return -EFAULT;
269 buf += 9 - first_off;
270 } else if (q == last) {
271 if (copy_to_user(buf, buffer, last_cnt))
272 return -EFAULT;
273 buf += last_cnt;
274 } else {
275 if (copy_to_user(buf, buffer, 9))
276 return -EFAULT;
277 buf += 9;
278 }
279 }
280 }
281 56
282 if (last == (u32 *)(op->value + op->len - 4) && last_cnt == 9) { 57 return 0;
283 if (put_user('\n', (buf - 1))) 58 }
284 return -EFAULT;
285 }
286 59
287 k += count; 60 return 1;
61}
288 62
289 } else if (op->flag & OPP_HEXSTRING) { 63static int property_show(struct seq_file *f, void *v)
290 char buffer[3]; 64{
65 struct property *prop = f->private;
66 void *pval;
67 int len;
291 68
292 if ((k < i - 1) && (k & 1)) { 69 len = prop->length;
293 sprintf (buffer, "%02x", 70 pval = prop->value;
294 (unsigned char) *(op->value + (k >> 1)) & 0xff);
295 if (put_user(buffer[1], &buf[k++ - *ppos]))
296 return -EFAULT;
297 count--;
298 }
299 71
300 for (; (count > 1) && (k < i - 1); k += 2) { 72 if (is_string(pval, len)) {
301 sprintf (buffer, "%02x", 73 while (len > 0) {
302 (unsigned char) *(op->value + (k >> 1)) & 0xff); 74 int n = strlen(pval);
303 if (copy_to_user(buf + k - *ppos, buffer, 2))
304 return -EFAULT;
305 count -= 2;
306 }
307 75
308 if (count && (k < i - 1)) { 76 seq_printf(f, "%s", (char *) pval);
309 sprintf (buffer, "%02x",
310 (unsigned char) *(op->value + (k >> 1)) & 0xff);
311 if (put_user(buffer[0], &buf[k++ - *ppos]))
312 return -EFAULT;
313 count--;
314 }
315 77
316 if (count) { 78 /* Skip over the NULL byte too. */
317 if (put_user('\n', &buf [k++ - *ppos])) 79 pval += n + 1;
318 return -EFAULT; 80 len -= n + 1;
319 }
320 }
321 count = k - *ppos;
322 *ppos = k;
323 return count;
324}
325 81
326static ssize_t property_write(struct file *filp, const char __user *buf, 82 if (len > 0)
327 size_t count, loff_t *ppos) 83 seq_printf(f, " + ");
328{
329 int i, j, k;
330 char *p;
331 u32 *q;
332 void *b;
333 openprom_property *op;
334
335 if (*ppos >= 0xffffff || count >= 0xffffff)
336 return -EINVAL;
337 if (!filp->private_data) {
338 i = property_read (filp, NULL, 0, NULL);
339 if (i)
340 return i;
341 }
342 k = *ppos;
343 op = filp->private_data;
344 if (!(op->flag & OPP_STRING)) {
345 u32 *first, *last;
346 int first_off, last_cnt;
347 u32 mask, mask2;
348 char tmp [9];
349 int forcelen = 0;
350
351 j = k % 9;
352 for (i = 0; i < count; i++, j++) {
353 if (j == 9) j = 0;
354 if (!j) {
355 char ctmp;
356 if (get_user(ctmp, &buf[i]))
357 return -EFAULT;
358 if (ctmp != '.') {
359 if (ctmp != '\n') {
360 if (op->flag & OPP_BINARY)
361 return -EINVAL;
362 else
363 goto write_try_string;
364 } else {
365 count = i + 1;
366 forcelen = 1;
367 break;
368 }
369 }
370 } else {
371 char ctmp;
372 if (get_user(ctmp, &buf[i]))
373 return -EFAULT;
374 if (ctmp < '0' ||
375 (ctmp > '9' && ctmp < 'A') ||
376 (ctmp > 'F' && ctmp < 'a') ||
377 ctmp > 'f') {
378 if (op->flag & OPP_BINARY)
379 return -EINVAL;
380 else
381 goto write_try_string;
382 }
383 }
384 }
385 op->flag |= OPP_BINARY;
386 tmp [8] = 0;
387 i = ((count + k + 8) / 9) << 2;
388 if (op->alloclen <= i) {
389 b = kmalloc (sizeof (openprom_property) + 2 * i,
390 GFP_KERNEL);
391 if (!b)
392 return -ENOMEM;
393 memcpy (b, filp->private_data,
394 sizeof (openprom_property)
395 + strlen (op->name) + op->alloclen);
396 memset (b + sizeof (openprom_property)
397 + strlen (op->name) + op->alloclen,
398 0, 2 * i - op->alloclen);
399 op = b;
400 op->alloclen = 2*i;
401 b = filp->private_data;
402 filp->private_data = op;
403 kfree (b);
404 } 84 }
405 first = ((u32 *)op->value) + (k / 9); 85 } else {
406 first_off = k % 9; 86 if (len & 3) {
407 last = (u32 *)(op->value + i); 87 while (len) {
408 last_cnt = (k + count) % 9; 88 len--;
409 if (first + 1 == last) { 89 if (len)
410 memset (tmp, '0', 8); 90 seq_printf(f, "%02x.",
411 if (copy_from_user(tmp + first_off, buf, 91 *(unsigned char *) pval);
412 (count + first_off > 8) ? 92 else
413 8 - first_off : count)) 93 seq_printf(f, "%02x",
414 return -EFAULT; 94 *(unsigned char *) pval);
415 mask = 0xffffffff; 95 pval++;
416 mask2 = 0xffffffff;
417 for (j = 0; j < first_off; j++)
418 mask >>= 1;
419 for (j = 8 - count - first_off; j > 0; j--)
420 mask2 <<= 1;
421 mask &= mask2;
422 if (mask) {
423 *first &= ~mask;
424 *first |= simple_strtoul (tmp, NULL, 16);
425 op->flag |= OPP_DIRTY;
426 } 96 }
427 } else { 97 } else {
428 op->flag |= OPP_DIRTY; 98 while (len >= 4) {
429 for (q = first; q < last; q++) { 99 len -= 4;
430 if (q == first) { 100
431 if (first_off < 8) { 101 if (len)
432 memset (tmp, '0', 8); 102 seq_printf(f, "%08x.",
433 if (copy_from_user(tmp + first_off, 103 *(unsigned int *) pval);
434 buf, 104 else
435 8 - first_off)) 105 seq_printf(f, "%08x",
436 return -EFAULT; 106 *(unsigned int *) pval);
437 mask = 0xffffffff; 107 pval += 4;
438 for (j = 0; j < first_off; j++)
439 mask >>= 1;
440 *q &= ~mask;
441 *q |= simple_strtoul (tmp,NULL,16);
442 }
443 buf += 9;
444 } else if ((q == last - 1) && last_cnt
445 && (last_cnt < 8)) {
446 memset (tmp, '0', 8);
447 if (copy_from_user(tmp, buf, last_cnt))
448 return -EFAULT;
449 mask = 0xffffffff;
450 for (j = 0; j < 8 - last_cnt; j++)
451 mask <<= 1;
452 *q &= ~mask;
453 *q |= simple_strtoul (tmp, NULL, 16);
454 buf += last_cnt;
455 } else {
456 char tchars[2 * sizeof(long) + 1];
457
458 if (copy_from_user(tchars, buf, sizeof(tchars) - 1))
459 return -EFAULT;
460 tchars[sizeof(tchars) - 1] = '\0';
461 *q = simple_strtoul (tchars, NULL, 16);
462 buf += 9;
463 }
464 }
465 }
466 if (!forcelen) {
467 if (op->len < i)
468 op->len = i;
469 } else
470 op->len = i;
471 *ppos += count;
472 }
473write_try_string:
474 if (!(op->flag & OPP_BINARY)) {
475 if (!(op->flag & (OPP_QUOTED | OPP_NOTQUOTED))) {
476 char ctmp;
477
478 /* No way, if somebody starts writing from the middle,
479 * we don't know whether he uses quotes around or not
480 */
481 if (k > 0)
482 return -EINVAL;
483 if (get_user(ctmp, buf))
484 return -EFAULT;
485 if (ctmp == '\'') {
486 op->flag |= OPP_QUOTED;
487 buf++;
488 count--;
489 (*ppos)++;
490 if (!count) {
491 op->flag |= OPP_STRING;
492 return 1;
493 }
494 } else
495 op->flag |= OPP_NOTQUOTED;
496 }
497 op->flag |= OPP_STRING;
498 if (op->alloclen <= count + *ppos) {
499 b = kmalloc (sizeof (openprom_property)
500 + 2 * (count + *ppos), GFP_KERNEL);
501 if (!b)
502 return -ENOMEM;
503 memcpy (b, filp->private_data,
504 sizeof (openprom_property)
505 + strlen (op->name) + op->alloclen);
506 memset (b + sizeof (openprom_property)
507 + strlen (op->name) + op->alloclen,
508 0, 2*(count - *ppos) - op->alloclen);
509 op = b;
510 op->alloclen = 2*(count + *ppos);
511 b = filp->private_data;
512 filp->private_data = op;
513 kfree (b);
514 }
515 p = op->value + *ppos - ((op->flag & OPP_QUOTED) ? 1 : 0);
516 if (copy_from_user(p, buf, count))
517 return -EFAULT;
518 op->flag |= OPP_DIRTY;
519 for (i = 0; i < count; i++, p++)
520 if (*p == '\n') {
521 *p = 0;
522 break;
523 } 108 }
524 if (i < count) {
525 op->len = p - op->value;
526 *ppos += i + 1;
527 if ((p > op->value) && (op->flag & OPP_QUOTED)
528 && (*(p - 1) == '\''))
529 op->len--;
530 } else {
531 if (p - op->value > op->len)
532 op->len = p - op->value;
533 *ppos += count;
534 } 109 }
535 } 110 }
536 return *ppos - k; 111 seq_printf(f, "\n");
112
113 return 0;
537} 114}
538 115
539int property_release (struct inode *inode, struct file *filp) 116static void *property_start(struct seq_file *f, loff_t *pos)
540{ 117{
541 openprom_property *op = filp->private_data; 118 if (*pos == 0)
542 int error; 119 return pos;
543 u32 node; 120 return NULL;
544 121}
545 if (!op) 122
546 return 0; 123static void *property_next(struct seq_file *f, void *v, loff_t *pos)
547 lock_kernel(); 124{
548 node = nodes[ptr_nod(inode->u.generic_ip)].node; 125 (*pos)++;
549 if (ptr_nod(inode->u.generic_ip) == aliases) { 126 return NULL;
550 if ((op->flag & OPP_DIRTY) && (op->flag & OPP_STRING)) { 127}
551 char *p = op->name; 128
552 int i = (op->value - op->name) - strlen (op->name) - 1; 129static void property_stop(struct seq_file *f, void *v)
553 op->value [op->len] = 0; 130{
554 *(op->value - 1) = ' '; 131 /* Nothing to do */
555 if (i) { 132}
556 for (p = op->value - i - 2; p >= op->name; p--) 133
557 p[i] = *p; 134static struct seq_operations property_op = {
558 p = op->name + i; 135 .start = property_start,
559 } 136 .next = property_next,
560 memcpy (p - 8, "nvalias ", 8); 137 .stop = property_stop,
561 prom_feval (p - 8); 138 .show = property_show
562 } 139};
563 } else if (op->flag & OPP_DIRTY) { 140
564 if (op->flag & OPP_STRING) { 141static int property_open(struct inode *inode, struct file *file)
565 op->value [op->len] = 0; 142{
566 error = prom_setprop (node, op->name, 143 struct op_inode_info *oi = OP_I(inode);
567 op->value, op->len + 1); 144 int ret;
568 if (error <= 0) 145
569 printk (KERN_WARNING "openpromfs: " 146 BUG_ON(oi->type != op_inode_prop);
570 "Couldn't write property %s\n", 147
571 op->name); 148 ret = seq_open(file, &property_op);
572 } else if ((op->flag & OPP_BINARY) || !op->len) { 149 if (!ret) {
573 error = prom_setprop (node, op->name, 150 struct seq_file *m = file->private_data;
574 op->value, op->len); 151 m->private = oi->u.prop;
575 if (error <= 0)
576 printk (KERN_WARNING "openpromfs: "
577 "Couldn't write property %s\n",
578 op->name);
579 } else {
580 printk (KERN_WARNING "openpromfs: "
581 "Unknown property type of %s\n",
582 op->name);
583 }
584 } 152 }
585 unlock_kernel(); 153 return ret;
586 kfree (filp->private_data);
587 return 0;
588} 154}
589 155
590static const struct file_operations openpromfs_prop_ops = { 156static const struct file_operations openpromfs_prop_ops = {
591 .read = property_read, 157 .open = property_open,
592 .write = property_write, 158 .read = seq_read,
593 .release = property_release, 159 .llseek = seq_lseek,
160 .release = seq_release,
594}; 161};
595 162
596static const struct file_operations openpromfs_nodenum_ops = { 163static int openpromfs_readdir(struct file *, void *, filldir_t);
597 .read = nodenum_read,
598};
599 164
600static const struct file_operations openprom_operations = { 165static const struct file_operations openprom_operations = {
601 .read = generic_read_dir, 166 .read = generic_read_dir,
602 .readdir = openpromfs_readdir, 167 .readdir = openpromfs_readdir,
603}; 168};
604 169
605static struct inode_operations openprom_alias_inode_operations = { 170static struct dentry *openpromfs_lookup(struct inode *, struct dentry *, struct nameidata *);
606 .create = openpromfs_create,
607 .lookup = openpromfs_lookup,
608 .unlink = openpromfs_unlink,
609};
610 171
611static struct inode_operations openprom_inode_operations = { 172static struct inode_operations openprom_inode_operations = {
612 .lookup = openpromfs_lookup, 173 .lookup = openpromfs_lookup,
613}; 174};
614 175
615static int lookup_children(u16 n, const char * name, int len) 176static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
616{ 177{
617 int ret; 178 struct op_inode_info *ent_oi, *oi = OP_I(dir);
618 u16 node; 179 struct device_node *dp, *child;
619 for (; n != 0xffff; n = nodes[n].next) { 180 struct property *prop;
620 node = nodes[n].child; 181 enum op_inode_type ent_type;
621 if (node != 0xffff) { 182 union op_inode_data ent_data;
622 char buffer[128];
623 int i;
624 char *p;
625
626 while (node != 0xffff) {
627 if (prom_getname (nodes[node].node,
628 buffer, 128) >= 0) {
629 i = strlen (buffer);
630 if ((len == i)
631 && !strncmp (buffer, name, len))
632 return NODE2INO(node);
633 p = strchr (buffer, '@');
634 if (p && (len == p - buffer)
635 && !strncmp (buffer, name, len))
636 return NODE2INO(node);
637 }
638 node = nodes[node].next;
639 }
640 } else
641 continue;
642 ret = lookup_children (nodes[n].child, name, len);
643 if (ret) return ret;
644 }
645 return 0;
646}
647
648static struct dentry *openpromfs_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
649{
650 int ino = 0;
651#define OPFSL_DIR 0
652#define OPFSL_PROPERTY 1
653#define OPFSL_NODENUM 2
654 int type = 0;
655 char buffer[128];
656 char *p;
657 const char *name; 183 const char *name;
658 u32 n;
659 u16 dirnode;
660 unsigned int len;
661 int i;
662 struct inode *inode; 184 struct inode *inode;
663 char buffer2[64]; 185 unsigned int ino;
186 int len;
664 187
665 inode = NULL; 188 BUG_ON(oi->type != op_inode_node);
189
190 dp = oi->u.node;
191
666 name = dentry->d_name.name; 192 name = dentry->d_name.name;
667 len = dentry->d_name.len; 193 len = dentry->d_name.len;
668 lock_kernel(); 194
669 if (name [0] == '.' && len == 5 && !strncmp (name + 1, "node", 4)) { 195 mutex_lock(&op_mutex);
670 ino = NODEP2INO(NODE(dir->i_ino).first_prop); 196
671 type = OPFSL_NODENUM; 197 child = dp->child;
672 } 198 while (child) {
673 if (!ino) { 199 int n = strlen(child->path_component_name);
674 u16 node = NODE(dir->i_ino).child; 200
675 while (node != 0xffff) { 201 if (len == n &&
676 if (prom_getname (nodes[node].node, buffer, 128) >= 0) { 202 !strncmp(child->path_component_name, name, len)) {
677 i = strlen (buffer); 203 ent_type = op_inode_node;
678 if (len == i && !strncmp (buffer, name, len)) { 204 ent_data.node = child;
679 ino = NODE2INO(node); 205 ino = child->unique_id;
680 type = OPFSL_DIR; 206 goto found;
681 break;
682 }
683 p = strchr (buffer, '@');
684 if (p && (len == p - buffer)
685 && !strncmp (buffer, name, len)) {
686 ino = NODE2INO(node);
687 type = OPFSL_DIR;
688 break;
689 }
690 }
691 node = nodes[node].next;
692 }
693 }
694 n = NODE(dir->i_ino).node;
695 dirnode = dir->i_ino - OPENPROM_FIRST_INO;
696 if (!ino) {
697 int j = NODEP2INO(NODE(dir->i_ino).first_prop);
698 if (dirnode != aliases) {
699 for (p = prom_firstprop (n, buffer2);
700 p && *p;
701 p = prom_nextprop (n, p, buffer2)) {
702 j++;
703 if ((len == strlen (p))
704 && !strncmp (p, name, len)) {
705 ino = j;
706 type = OPFSL_PROPERTY;
707 break;
708 }
709 }
710 } else {
711 int k;
712 for (k = 0; k < aliases_nodes; k++) {
713 j++;
714 if (alias_names [k]
715 && (len == strlen (alias_names [k]))
716 && !strncmp (alias_names [k], name, len)) {
717 ino = j;
718 type = OPFSL_PROPERTY;
719 break;
720 }
721 }
722 } 207 }
208 child = child->sibling;
723 } 209 }
724 if (!ino) { 210
725 ino = lookup_children (NODE(dir->i_ino).child, name, len); 211 prop = dp->properties;
726 if (ino) 212 while (prop) {
727 type = OPFSL_DIR; 213 int n = strlen(prop->name);
728 else { 214
729 unlock_kernel(); 215 if (len == n && !strncmp(prop->name, name, len)) {
730 return ERR_PTR(-ENOENT); 216 ent_type = op_inode_prop;
217 ent_data.prop = prop;
218 ino = prop->unique_id;
219 goto found;
731 } 220 }
221
222 prop = prop->next;
732 } 223 }
733 inode = iget (dir->i_sb, ino); 224
734 unlock_kernel(); 225 mutex_unlock(&op_mutex);
226 return ERR_PTR(-ENOENT);
227
228found:
229 inode = iget(dir->i_sb, ino);
230 mutex_unlock(&op_mutex);
735 if (!inode) 231 if (!inode)
736 return ERR_PTR(-EINVAL); 232 return ERR_PTR(-EINVAL);
737 switch (type) { 233 ent_oi = OP_I(inode);
738 case OPFSL_DIR: 234 ent_oi->type = ent_type;
235 ent_oi->u = ent_data;
236
237 switch (ent_type) {
238 case op_inode_node:
739 inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; 239 inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
740 if (ino == OPENPROM_FIRST_INO + aliases) { 240 inode->i_op = &openprom_inode_operations;
741 inode->i_mode |= S_IWUSR;
742 inode->i_op = &openprom_alias_inode_operations;
743 } else
744 inode->i_op = &openprom_inode_operations;
745 inode->i_fop = &openprom_operations; 241 inode->i_fop = &openprom_operations;
746 inode->i_nlink = 2; 242 inode->i_nlink = 2;
747 break; 243 break;
748 case OPFSL_NODENUM: 244 case op_inode_prop:
749 inode->i_mode = S_IFREG | S_IRUGO; 245 if (!strcmp(dp->name, "options") && (len == 17) &&
750 inode->i_fop = &openpromfs_nodenum_ops; 246 !strncmp (name, "security-password", 17))
751 inode->i_nlink = 1;
752 inode->u.generic_ip = (void *)(long)(n);
753 break;
754 case OPFSL_PROPERTY:
755 if ((dirnode == options) && (len == 17)
756 && !strncmp (name, "security-password", 17))
757 inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR; 247 inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR;
758 else { 248 else
759 inode->i_mode = S_IFREG | S_IRUGO; 249 inode->i_mode = S_IFREG | S_IRUGO;
760 if (dirnode == options || dirnode == aliases) {
761 if (len != 4 || strncmp (name, "name", 4))
762 inode->i_mode |= S_IWUSR;
763 }
764 }
765 inode->i_fop = &openpromfs_prop_ops; 250 inode->i_fop = &openpromfs_prop_ops;
766 inode->i_nlink = 1; 251 inode->i_nlink = 1;
767 if (inode->i_size < 0) 252 inode->i_size = ent_oi->u.prop->length;
768 inode->i_size = 0;
769 inode->u.generic_ip = (void *)(long)(((u16)dirnode) |
770 (((u16)(ino - NODEP2INO(NODE(dir->i_ino).first_prop) - 1)) << 16));
771 break; 253 break;
772 } 254 }
773 255
@@ -781,237 +263,89 @@ static struct dentry *openpromfs_lookup(struct inode * dir, struct dentry *dentr
781static int openpromfs_readdir(struct file * filp, void * dirent, filldir_t filldir) 263static int openpromfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
782{ 264{
783 struct inode *inode = filp->f_dentry->d_inode; 265 struct inode *inode = filp->f_dentry->d_inode;
266 struct op_inode_info *oi = OP_I(inode);
267 struct device_node *dp = oi->u.node;
268 struct device_node *child;
269 struct property *prop;
784 unsigned int ino; 270 unsigned int ino;
785 u32 n; 271 int i;
786 int i, j; 272
787 char buffer[128]; 273 mutex_lock(&op_mutex);
788 u16 node;
789 char *p;
790 char buffer2[64];
791
792 lock_kernel();
793 274
794 ino = inode->i_ino; 275 ino = inode->i_ino;
795 i = filp->f_pos; 276 i = filp->f_pos;
796 switch (i) { 277 switch (i) {
797 case 0: 278 case 0:
798 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) goto out; 279 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
280 goto out;
799 i++; 281 i++;
800 filp->f_pos++; 282 filp->f_pos++;
801 /* fall thru */ 283 /* fall thru */
802 case 1: 284 case 1:
803 if (filldir(dirent, "..", 2, i, 285 if (filldir(dirent, "..", 2, i,
804 (NODE(ino).parent == 0xffff) ? 286 (dp->parent == NULL ?
805 OPENPROM_ROOT_INO : NODE2INO(NODE(ino).parent), DT_DIR) < 0) 287 OPENPROM_ROOT_INO :
288 dp->parent->unique_id), DT_DIR) < 0)
806 goto out; 289 goto out;
807 i++; 290 i++;
808 filp->f_pos++; 291 filp->f_pos++;
809 /* fall thru */ 292 /* fall thru */
810 default: 293 default:
811 i -= 2; 294 i -= 2;
812 node = NODE(ino).child; 295
813 while (i && node != 0xffff) { 296 /* First, the children nodes as directories. */
814 node = nodes[node].next; 297 child = dp->child;
298 while (i && child) {
299 child = child->sibling;
815 i--; 300 i--;
816 } 301 }
817 while (node != 0xffff) { 302 while (child) {
818 if (prom_getname (nodes[node].node, buffer, 128) < 0) 303 if (filldir(dirent,
819 goto out; 304 child->path_component_name,
820 if (filldir(dirent, buffer, strlen(buffer), 305 strlen(child->path_component_name),
821 filp->f_pos, NODE2INO(node), DT_DIR) < 0) 306 filp->f_pos, child->unique_id, DT_DIR) < 0)
822 goto out; 307 goto out;
308
823 filp->f_pos++; 309 filp->f_pos++;
824 node = nodes[node].next; 310 child = child->sibling;
825 } 311 }
826 j = NODEP2INO(NODE(ino).first_prop); 312
827 if (!i) { 313 /* Next, the properties as files. */
828 if (filldir(dirent, ".node", 5, filp->f_pos, j, DT_REG) < 0) 314 prop = dp->properties;
315 while (i && prop) {
316 prop = prop->next;
317 i--;
318 }
319 while (prop) {
320 if (filldir(dirent, prop->name, strlen(prop->name),
321 filp->f_pos, prop->unique_id, DT_REG) < 0)
829 goto out; 322 goto out;
323
830 filp->f_pos++; 324 filp->f_pos++;
831 } else 325 prop = prop->next;
832 i--;
833 n = NODE(ino).node;
834 if (ino == OPENPROM_FIRST_INO + aliases) {
835 for (j++; i < aliases_nodes; i++, j++) {
836 if (alias_names [i]) {
837 if (filldir (dirent, alias_names [i],
838 strlen (alias_names [i]),
839 filp->f_pos, j, DT_REG) < 0) goto out;
840 filp->f_pos++;
841 }
842 }
843 } else {
844 for (p = prom_firstprop (n, buffer2);
845 p && *p;
846 p = prom_nextprop (n, p, buffer2)) {
847 j++;
848 if (i) i--;
849 else {
850 if (filldir(dirent, p, strlen(p),
851 filp->f_pos, j, DT_REG) < 0)
852 goto out;
853 filp->f_pos++;
854 }
855 }
856 } 326 }
857 } 327 }
858out: 328out:
859 unlock_kernel(); 329 mutex_unlock(&op_mutex);
860 return 0;
861}
862
863static int openpromfs_create (struct inode *dir, struct dentry *dentry, int mode,
864 struct nameidata *nd)
865{
866 char *p;
867 struct inode *inode;
868
869 if (!dir)
870 return -ENOENT;
871 if (dentry->d_name.len > 256)
872 return -EINVAL;
873 p = kmalloc (dentry->d_name.len + 1, GFP_KERNEL);
874 if (!p)
875 return -ENOMEM;
876 strncpy (p, dentry->d_name.name, dentry->d_name.len);
877 p [dentry->d_name.len] = 0;
878 lock_kernel();
879 if (aliases_nodes == ALIASES_NNODES) {
880 kfree(p);
881 unlock_kernel();
882 return -EIO;
883 }
884 alias_names [aliases_nodes++] = p;
885 inode = iget (dir->i_sb,
886 NODEP2INO(NODE(dir->i_ino).first_prop) + aliases_nodes);
887 if (!inode) {
888 unlock_kernel();
889 return -EINVAL;
890 }
891 inode->i_mode = S_IFREG | S_IRUGO | S_IWUSR;
892 inode->i_fop = &openpromfs_prop_ops;
893 inode->i_nlink = 1;
894 if (inode->i_size < 0) inode->i_size = 0;
895 inode->u.generic_ip = (void *)(long)(((u16)aliases) |
896 (((u16)(aliases_nodes - 1)) << 16));
897 unlock_kernel();
898 d_instantiate(dentry, inode);
899 return 0; 330 return 0;
900} 331}
901 332
902static int openpromfs_unlink (struct inode *dir, struct dentry *dentry) 333static kmem_cache_t *op_inode_cachep;
903{
904 unsigned int len;
905 char *p;
906 const char *name;
907 int i;
908
909 name = dentry->d_name.name;
910 len = dentry->d_name.len;
911 lock_kernel();
912 for (i = 0; i < aliases_nodes; i++)
913 if ((strlen (alias_names [i]) == len)
914 && !strncmp (name, alias_names[i], len)) {
915 char buffer[512];
916
917 p = alias_names [i];
918 alias_names [i] = NULL;
919 kfree (p);
920 strcpy (buffer, "nvunalias ");
921 memcpy (buffer + 10, name, len);
922 buffer [10 + len] = 0;
923 prom_feval (buffer);
924 }
925 unlock_kernel();
926 return 0;
927}
928 334
929/* {{{ init section */ 335static struct inode *openprom_alloc_inode(struct super_block *sb)
930static int __init check_space (u16 n)
931{ 336{
932 unsigned long pages; 337 struct op_inode_info *oi;
933 338
934 if ((1 << alloced) * PAGE_SIZE < (n + 2) * sizeof(openpromfs_node)) { 339 oi = kmem_cache_alloc(op_inode_cachep, SLAB_KERNEL);
935 pages = __get_free_pages (GFP_KERNEL, alloced + 1); 340 if (!oi)
936 if (!pages) 341 return NULL;
937 return -1;
938 342
939 if (nodes) { 343 return &oi->vfs_inode;
940 memcpy ((char *)pages, nodes,
941 (1 << alloced) * PAGE_SIZE);
942 free_pages ((unsigned long)nodes, alloced);
943 }
944 alloced++;
945 nodes = (openpromfs_node *)pages;
946 }
947 return 0;
948} 344}
949 345
950static u16 __init get_nodes (u16 parent, u32 node) 346static void openprom_destroy_inode(struct inode *inode)
951{ 347{
952 char *p; 348 kmem_cache_free(op_inode_cachep, OP_I(inode));
953 u16 n = last_node++, i;
954 char buffer[64];
955
956 if (check_space (n) < 0)
957 return 0xffff;
958 nodes[n].parent = parent;
959 nodes[n].node = node;
960 nodes[n].next = 0xffff;
961 nodes[n].child = 0xffff;
962 nodes[n].first_prop = first_prop++;
963 if (!parent) {
964 char buffer[8];
965 int j;
966
967 if ((j = prom_getproperty (node, "name", buffer, 8)) >= 0) {
968 buffer[j] = 0;
969 if (!strcmp (buffer, "options"))
970 options = n;
971 else if (!strcmp (buffer, "aliases"))
972 aliases = n;
973 }
974 }
975 if (n != aliases)
976 for (p = prom_firstprop (node, buffer);
977 p && p != (char *)-1 && *p;
978 p = prom_nextprop (node, p, buffer))
979 first_prop++;
980 else {
981 char *q;
982 for (p = prom_firstprop (node, buffer);
983 p && p != (char *)-1 && *p;
984 p = prom_nextprop (node, p, buffer)) {
985 if (aliases_nodes == ALIASES_NNODES)
986 break;
987 for (i = 0; i < aliases_nodes; i++)
988 if (!strcmp (p, alias_names [i]))
989 break;
990 if (i < aliases_nodes)
991 continue;
992 q = kmalloc (strlen (p) + 1, GFP_KERNEL);
993 if (!q)
994 return 0xffff;
995 strcpy (q, p);
996 alias_names [aliases_nodes++] = q;
997 }
998 first_prop += ALIASES_NNODES;
999 }
1000 node = prom_getchild (node);
1001 if (node) {
1002 parent = get_nodes (n, node);
1003 if (parent == 0xffff)
1004 return 0xffff;
1005 nodes[n].child = parent;
1006 while ((node = prom_getsibling (node)) != 0) {
1007 i = get_nodes (n, node);
1008 if (i == 0xffff)
1009 return 0xffff;
1010 nodes[parent].next = i;
1011 parent = i;
1012 }
1013 }
1014 return n;
1015} 349}
1016 350
1017static void openprom_read_inode(struct inode * inode) 351static void openprom_read_inode(struct inode * inode)
@@ -1031,6 +365,8 @@ static int openprom_remount(struct super_block *sb, int *flags, char *data)
1031} 365}
1032 366
1033static struct super_operations openprom_sops = { 367static struct super_operations openprom_sops = {
368 .alloc_inode = openprom_alloc_inode,
369 .destroy_inode = openprom_destroy_inode,
1034 .read_inode = openprom_read_inode, 370 .read_inode = openprom_read_inode,
1035 .statfs = simple_statfs, 371 .statfs = simple_statfs,
1036 .remount_fs = openprom_remount, 372 .remount_fs = openprom_remount,
@@ -1038,7 +374,8 @@ static struct super_operations openprom_sops = {
1038 374
1039static int openprom_fill_super(struct super_block *s, void *data, int silent) 375static int openprom_fill_super(struct super_block *s, void *data, int silent)
1040{ 376{
1041 struct inode * root_inode; 377 struct inode *root_inode;
378 struct op_inode_info *oi;
1042 379
1043 s->s_flags |= MS_NOATIME; 380 s->s_flags |= MS_NOATIME;
1044 s->s_blocksize = 1024; 381 s->s_blocksize = 1024;
@@ -1049,6 +386,11 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent)
1049 root_inode = iget(s, OPENPROM_ROOT_INO); 386 root_inode = iget(s, OPENPROM_ROOT_INO);
1050 if (!root_inode) 387 if (!root_inode)
1051 goto out_no_root; 388 goto out_no_root;
389
390 oi = OP_I(root_inode);
391 oi->type = op_inode_node;
392 oi->u.node = of_find_node_by_path("/");
393
1052 s->s_root = d_alloc_root(root_inode); 394 s->s_root = d_alloc_root(root_inode);
1053 if (!s->s_root) 395 if (!s->s_root)
1054 goto out_no_root; 396 goto out_no_root;
@@ -1073,29 +415,39 @@ static struct file_system_type openprom_fs_type = {
1073 .kill_sb = kill_anon_super, 415 .kill_sb = kill_anon_super,
1074}; 416};
1075 417
418static void op_inode_init_once(void *data, kmem_cache_t * cachep, unsigned long flags)
419{
420 struct op_inode_info *oi = (struct op_inode_info *) data;
421
422 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
423 SLAB_CTOR_CONSTRUCTOR)
424 inode_init_once(&oi->vfs_inode);
425}
426
1076static int __init init_openprom_fs(void) 427static int __init init_openprom_fs(void)
1077{ 428{
1078 nodes = (openpromfs_node *)__get_free_pages(GFP_KERNEL, 0); 429 int err;
1079 if (!nodes) { 430
1080 printk (KERN_WARNING "openpromfs: can't get free page\n"); 431 op_inode_cachep = kmem_cache_create("op_inode_cache",
1081 return -EIO; 432 sizeof(struct op_inode_info),
1082 } 433 0,
1083 if (get_nodes (0xffff, prom_root_node) == 0xffff) { 434 (SLAB_RECLAIM_ACCOUNT |
1084 printk (KERN_WARNING "openpromfs: couldn't setup tree\n"); 435 SLAB_MEM_SPREAD),
1085 return -EIO; 436 op_inode_init_once, NULL);
1086 } 437 if (!op_inode_cachep)
1087 nodes[last_node].first_prop = first_prop; 438 return -ENOMEM;
1088 return register_filesystem(&openprom_fs_type); 439
440 err = register_filesystem(&openprom_fs_type);
441 if (err)
442 kmem_cache_destroy(op_inode_cachep);
443
444 return err;
1089} 445}
1090 446
1091static void __exit exit_openprom_fs(void) 447static void __exit exit_openprom_fs(void)
1092{ 448{
1093 int i;
1094 unregister_filesystem(&openprom_fs_type); 449 unregister_filesystem(&openprom_fs_type);
1095 free_pages ((unsigned long)nodes, alloced); 450 kmem_cache_destroy(op_inode_cachep);
1096 for (i = 0; i < aliases_nodes; i++)
1097 kfree (alias_names [i]);
1098 nodes = NULL;
1099} 451}
1100 452
1101module_init(init_openprom_fs) 453module_init(init_openprom_fs)
diff --git a/fs/pnode.c b/fs/pnode.c
index 37b568ed0e..da42ee61c1 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -53,8 +53,7 @@ static int do_make_slave(struct vfsmount *mnt)
53 if (master) { 53 if (master) {
54 list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave) 54 list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
55 slave_mnt->mnt_master = master; 55 slave_mnt->mnt_master = master;
56 list_del(&mnt->mnt_slave); 56 list_move(&mnt->mnt_slave, &master->mnt_slave_list);
57 list_add(&mnt->mnt_slave, &master->mnt_slave_list);
58 list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev); 57 list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
59 INIT_LIST_HEAD(&mnt->mnt_slave_list); 58 INIT_LIST_HEAD(&mnt->mnt_slave_list);
60 } else { 59 } else {
@@ -283,10 +282,8 @@ static void __propagate_umount(struct vfsmount *mnt)
283 * umount the child only if the child has no 282 * umount the child only if the child has no
284 * other children 283 * other children
285 */ 284 */
286 if (child && list_empty(&child->mnt_mounts)) { 285 if (child && list_empty(&child->mnt_mounts))
287 list_del(&child->mnt_hash); 286 list_move_tail(&child->mnt_hash, &mnt->mnt_hash);
288 list_add_tail(&child->mnt_hash, &mnt->mnt_hash);
289 }
290 } 287 }
291} 288}
292 289
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 6afff725a8..6ba7785319 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -74,6 +74,16 @@
74#include <linux/poll.h> 74#include <linux/poll.h>
75#include "internal.h" 75#include "internal.h"
76 76
77/* NOTE:
78 * Implementing inode permission operations in /proc is almost
79 * certainly an error. Permission checks need to happen during
80 * each system call not at open time. The reason is that most of
81 * what we wish to check for permissions in /proc varies at runtime.
82 *
83 * The classic example of a problem is opening file descriptors
84 * in /proc for a task before it execs a suid executable.
85 */
86
77/* 87/*
78 * For hysterical raisins we keep the same inumbers as in the old procfs. 88 * For hysterical raisins we keep the same inumbers as in the old procfs.
79 * Feel free to change the macro below - just keep the range distinct from 89 * Feel free to change the macro below - just keep the range distinct from
@@ -121,6 +131,8 @@ enum pid_directory_inos {
121 PROC_TGID_ATTR_PREV, 131 PROC_TGID_ATTR_PREV,
122 PROC_TGID_ATTR_EXEC, 132 PROC_TGID_ATTR_EXEC,
123 PROC_TGID_ATTR_FSCREATE, 133 PROC_TGID_ATTR_FSCREATE,
134 PROC_TGID_ATTR_KEYCREATE,
135 PROC_TGID_ATTR_SOCKCREATE,
124#endif 136#endif
125#ifdef CONFIG_AUDITSYSCALL 137#ifdef CONFIG_AUDITSYSCALL
126 PROC_TGID_LOGINUID, 138 PROC_TGID_LOGINUID,
@@ -162,6 +174,8 @@ enum pid_directory_inos {
162 PROC_TID_ATTR_PREV, 174 PROC_TID_ATTR_PREV,
163 PROC_TID_ATTR_EXEC, 175 PROC_TID_ATTR_EXEC,
164 PROC_TID_ATTR_FSCREATE, 176 PROC_TID_ATTR_FSCREATE,
177 PROC_TID_ATTR_KEYCREATE,
178 PROC_TID_ATTR_SOCKCREATE,
165#endif 179#endif
166#ifdef CONFIG_AUDITSYSCALL 180#ifdef CONFIG_AUDITSYSCALL
167 PROC_TID_LOGINUID, 181 PROC_TID_LOGINUID,
@@ -173,6 +187,9 @@ enum pid_directory_inos {
173 PROC_TID_FD_DIR = 0x8000, /* 0x8000-0xffff */ 187 PROC_TID_FD_DIR = 0x8000, /* 0x8000-0xffff */
174}; 188};
175 189
190/* Worst case buffer size needed for holding an integer. */
191#define PROC_NUMBUF 10
192
176struct pid_entry { 193struct pid_entry {
177 int type; 194 int type;
178 int len; 195 int len;
@@ -275,6 +292,8 @@ static struct pid_entry tgid_attr_stuff[] = {
275 E(PROC_TGID_ATTR_PREV, "prev", S_IFREG|S_IRUGO), 292 E(PROC_TGID_ATTR_PREV, "prev", S_IFREG|S_IRUGO),
276 E(PROC_TGID_ATTR_EXEC, "exec", S_IFREG|S_IRUGO|S_IWUGO), 293 E(PROC_TGID_ATTR_EXEC, "exec", S_IFREG|S_IRUGO|S_IWUGO),
277 E(PROC_TGID_ATTR_FSCREATE, "fscreate", S_IFREG|S_IRUGO|S_IWUGO), 294 E(PROC_TGID_ATTR_FSCREATE, "fscreate", S_IFREG|S_IRUGO|S_IWUGO),
295 E(PROC_TGID_ATTR_KEYCREATE, "keycreate", S_IFREG|S_IRUGO|S_IWUGO),
296 E(PROC_TGID_ATTR_SOCKCREATE, "sockcreate", S_IFREG|S_IRUGO|S_IWUGO),
278 {0,0,NULL,0} 297 {0,0,NULL,0}
279}; 298};
280static struct pid_entry tid_attr_stuff[] = { 299static struct pid_entry tid_attr_stuff[] = {
@@ -282,6 +301,8 @@ static struct pid_entry tid_attr_stuff[] = {
282 E(PROC_TID_ATTR_PREV, "prev", S_IFREG|S_IRUGO), 301 E(PROC_TID_ATTR_PREV, "prev", S_IFREG|S_IRUGO),
283 E(PROC_TID_ATTR_EXEC, "exec", S_IFREG|S_IRUGO|S_IWUGO), 302 E(PROC_TID_ATTR_EXEC, "exec", S_IFREG|S_IRUGO|S_IWUGO),
284 E(PROC_TID_ATTR_FSCREATE, "fscreate", S_IFREG|S_IRUGO|S_IWUGO), 303 E(PROC_TID_ATTR_FSCREATE, "fscreate", S_IFREG|S_IRUGO|S_IWUGO),
304 E(PROC_TID_ATTR_KEYCREATE, "keycreate", S_IFREG|S_IRUGO|S_IWUGO),
305 E(PROC_TID_ATTR_SOCKCREATE, "sockcreate", S_IFREG|S_IRUGO|S_IWUGO),
285 {0,0,NULL,0} 306 {0,0,NULL,0}
286}; 307};
287#endif 308#endif
@@ -290,12 +311,15 @@ static struct pid_entry tid_attr_stuff[] = {
290 311
291static int proc_fd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) 312static int proc_fd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
292{ 313{
293 struct task_struct *task = proc_task(inode); 314 struct task_struct *task = get_proc_task(inode);
294 struct files_struct *files; 315 struct files_struct *files = NULL;
295 struct file *file; 316 struct file *file;
296 int fd = proc_type(inode) - PROC_TID_FD_DIR; 317 int fd = proc_fd(inode);
297 318
298 files = get_files_struct(task); 319 if (task) {
320 files = get_files_struct(task);
321 put_task_struct(task);
322 }
299 if (files) { 323 if (files) {
300 /* 324 /*
301 * We are not taking a ref to the file structure, so we must 325 * We are not taking a ref to the file structure, so we must
@@ -327,29 +351,33 @@ static struct fs_struct *get_fs_struct(struct task_struct *task)
327 return fs; 351 return fs;
328} 352}
329 353
330static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) 354static int get_nr_threads(struct task_struct *tsk)
331{ 355{
332 struct fs_struct *fs = get_fs_struct(proc_task(inode)); 356 /* Must be called with the rcu_read_lock held */
333 int result = -ENOENT; 357 unsigned long flags;
334 if (fs) { 358 int count = 0;
335 read_lock(&fs->lock); 359
336 *mnt = mntget(fs->pwdmnt); 360 if (lock_task_sighand(tsk, &flags)) {
337 *dentry = dget(fs->pwd); 361 count = atomic_read(&tsk->signal->count);
338 read_unlock(&fs->lock); 362 unlock_task_sighand(tsk, &flags);
339 result = 0;
340 put_fs_struct(fs);
341 } 363 }
342 return result; 364 return count;
343} 365}
344 366
345static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) 367static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
346{ 368{
347 struct fs_struct *fs = get_fs_struct(proc_task(inode)); 369 struct task_struct *task = get_proc_task(inode);
370 struct fs_struct *fs = NULL;
348 int result = -ENOENT; 371 int result = -ENOENT;
372
373 if (task) {
374 fs = get_fs_struct(task);
375 put_task_struct(task);
376 }
349 if (fs) { 377 if (fs) {
350 read_lock(&fs->lock); 378 read_lock(&fs->lock);
351 *mnt = mntget(fs->rootmnt); 379 *mnt = mntget(fs->pwdmnt);
352 *dentry = dget(fs->root); 380 *dentry = dget(fs->pwd);
353 read_unlock(&fs->lock); 381 read_unlock(&fs->lock);
354 result = 0; 382 result = 0;
355 put_fs_struct(fs); 383 put_fs_struct(fs);
@@ -357,42 +385,16 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf
357 return result; 385 return result;
358} 386}
359 387
360 388static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
361/* Same as proc_root_link, but this addionally tries to get fs from other
362 * threads in the group */
363static int proc_task_root_link(struct inode *inode, struct dentry **dentry,
364 struct vfsmount **mnt)
365{ 389{
366 struct fs_struct *fs; 390 struct task_struct *task = get_proc_task(inode);
391 struct fs_struct *fs = NULL;
367 int result = -ENOENT; 392 int result = -ENOENT;
368 struct task_struct *leader = proc_task(inode);
369 393
370 task_lock(leader); 394 if (task) {
371 fs = leader->fs; 395 fs = get_fs_struct(task);
372 if (fs) { 396 put_task_struct(task);
373 atomic_inc(&fs->count);
374 task_unlock(leader);
375 } else {
376 /* Try to get fs from other threads */
377 task_unlock(leader);
378 read_lock(&tasklist_lock);
379 if (pid_alive(leader)) {
380 struct task_struct *task = leader;
381
382 while ((task = next_thread(task)) != leader) {
383 task_lock(task);
384 fs = task->fs;
385 if (fs) {
386 atomic_inc(&fs->count);
387 task_unlock(task);
388 break;
389 }
390 task_unlock(task);
391 }
392 }
393 read_unlock(&tasklist_lock);
394 } 397 }
395
396 if (fs) { 398 if (fs) {
397 read_lock(&fs->lock); 399 read_lock(&fs->lock);
398 *mnt = mntget(fs->rootmnt); 400 *mnt = mntget(fs->rootmnt);
@@ -404,7 +406,6 @@ static int proc_task_root_link(struct inode *inode, struct dentry **dentry,
404 return result; 406 return result;
405} 407}
406 408
407
408#define MAY_PTRACE(task) \ 409#define MAY_PTRACE(task) \
409 (task == current || \ 410 (task == current || \
410 (task->parent == current && \ 411 (task->parent == current && \
@@ -535,142 +536,22 @@ static int proc_oom_score(struct task_struct *task, char *buffer)
535/************************************************************************/ 536/************************************************************************/
536 537
537/* permission checks */ 538/* permission checks */
538 539static int proc_fd_access_allowed(struct inode *inode)
539/* If the process being read is separated by chroot from the reading process,
540 * don't let the reader access the threads.
541 *
542 * note: this does dput(root) and mntput(vfsmnt) on exit.
543 */
544static int proc_check_chroot(struct dentry *root, struct vfsmount *vfsmnt)
545{
546 struct dentry *de, *base;
547 struct vfsmount *our_vfsmnt, *mnt;
548 int res = 0;
549
550 read_lock(&current->fs->lock);
551 our_vfsmnt = mntget(current->fs->rootmnt);
552 base = dget(current->fs->root);
553 read_unlock(&current->fs->lock);
554
555 spin_lock(&vfsmount_lock);
556 de = root;
557 mnt = vfsmnt;
558
559 while (mnt != our_vfsmnt) {
560 if (mnt == mnt->mnt_parent)
561 goto out;
562 de = mnt->mnt_mountpoint;
563 mnt = mnt->mnt_parent;
564 }
565
566 if (!is_subdir(de, base))
567 goto out;
568 spin_unlock(&vfsmount_lock);
569
570exit:
571 dput(base);
572 mntput(our_vfsmnt);
573 dput(root);
574 mntput(vfsmnt);
575 return res;
576out:
577 spin_unlock(&vfsmount_lock);
578 res = -EACCES;
579 goto exit;
580}
581
582static int proc_check_root(struct inode *inode)
583{
584 struct dentry *root;
585 struct vfsmount *vfsmnt;
586
587 if (proc_root_link(inode, &root, &vfsmnt)) /* Ewww... */
588 return -ENOENT;
589 return proc_check_chroot(root, vfsmnt);
590}
591
592static int proc_permission(struct inode *inode, int mask, struct nameidata *nd)
593{
594 if (generic_permission(inode, mask, NULL) != 0)
595 return -EACCES;
596 return proc_check_root(inode);
597}
598
599static int proc_task_permission(struct inode *inode, int mask, struct nameidata *nd)
600{
601 struct dentry *root;
602 struct vfsmount *vfsmnt;
603
604 if (generic_permission(inode, mask, NULL) != 0)
605 return -EACCES;
606
607 if (proc_task_root_link(inode, &root, &vfsmnt))
608 return -ENOENT;
609
610 return proc_check_chroot(root, vfsmnt);
611}
612
613extern struct seq_operations proc_pid_maps_op;
614static int maps_open(struct inode *inode, struct file *file)
615{
616 struct task_struct *task = proc_task(inode);
617 int ret = seq_open(file, &proc_pid_maps_op);
618 if (!ret) {
619 struct seq_file *m = file->private_data;
620 m->private = task;
621 }
622 return ret;
623}
624
625static struct file_operations proc_maps_operations = {
626 .open = maps_open,
627 .read = seq_read,
628 .llseek = seq_lseek,
629 .release = seq_release,
630};
631
632#ifdef CONFIG_NUMA
633extern struct seq_operations proc_pid_numa_maps_op;
634static int numa_maps_open(struct inode *inode, struct file *file)
635{
636 struct task_struct *task = proc_task(inode);
637 int ret = seq_open(file, &proc_pid_numa_maps_op);
638 if (!ret) {
639 struct seq_file *m = file->private_data;
640 m->private = task;
641 }
642 return ret;
643}
644
645static struct file_operations proc_numa_maps_operations = {
646 .open = numa_maps_open,
647 .read = seq_read,
648 .llseek = seq_lseek,
649 .release = seq_release,
650};
651#endif
652
653#ifdef CONFIG_MMU
654extern struct seq_operations proc_pid_smaps_op;
655static int smaps_open(struct inode *inode, struct file *file)
656{ 540{
657 struct task_struct *task = proc_task(inode); 541 struct task_struct *task;
658 int ret = seq_open(file, &proc_pid_smaps_op); 542 int allowed = 0;
659 if (!ret) { 543 /* Allow access to a task's file descriptors if it is us or we
660 struct seq_file *m = file->private_data; 544 * may use ptrace attach to the process and find out that
661 m->private = task; 545 * information.
546 */
547 task = get_proc_task(inode);
548 if (task) {
549 allowed = ptrace_may_attach(task);
550 put_task_struct(task);
662 } 551 }
663 return ret; 552 return allowed;
664} 553}
665 554
666static struct file_operations proc_smaps_operations = {
667 .open = smaps_open,
668 .read = seq_read,
669 .llseek = seq_lseek,
670 .release = seq_release,
671};
672#endif
673
674extern struct seq_operations mounts_op; 555extern struct seq_operations mounts_op;
675struct proc_mounts { 556struct proc_mounts {
676 struct seq_file m; 557 struct seq_file m;
@@ -679,16 +560,19 @@ struct proc_mounts {
679 560
680static int mounts_open(struct inode *inode, struct file *file) 561static int mounts_open(struct inode *inode, struct file *file)
681{ 562{
682 struct task_struct *task = proc_task(inode); 563 struct task_struct *task = get_proc_task(inode);
683 struct namespace *namespace; 564 struct namespace *namespace = NULL;
684 struct proc_mounts *p; 565 struct proc_mounts *p;
685 int ret = -EINVAL; 566 int ret = -EINVAL;
686 567
687 task_lock(task); 568 if (task) {
688 namespace = task->namespace; 569 task_lock(task);
689 if (namespace) 570 namespace = task->namespace;
690 get_namespace(namespace); 571 if (namespace)
691 task_unlock(task); 572 get_namespace(namespace);
573 task_unlock(task);
574 put_task_struct(task);
575 }
692 576
693 if (namespace) { 577 if (namespace) {
694 ret = -ENOMEM; 578 ret = -ENOMEM;
@@ -745,17 +629,21 @@ static struct file_operations proc_mounts_operations = {
745extern struct seq_operations mountstats_op; 629extern struct seq_operations mountstats_op;
746static int mountstats_open(struct inode *inode, struct file *file) 630static int mountstats_open(struct inode *inode, struct file *file)
747{ 631{
748 struct task_struct *task = proc_task(inode);
749 int ret = seq_open(file, &mountstats_op); 632 int ret = seq_open(file, &mountstats_op);
750 633
751 if (!ret) { 634 if (!ret) {
752 struct seq_file *m = file->private_data; 635 struct seq_file *m = file->private_data;
753 struct namespace *namespace; 636 struct namespace *namespace = NULL;
754 task_lock(task); 637 struct task_struct *task = get_proc_task(inode);
755 namespace = task->namespace; 638
756 if (namespace) 639 if (task) {
757 get_namespace(namespace); 640 task_lock(task);
758 task_unlock(task); 641 namespace = task->namespace;
642 if (namespace)
643 get_namespace(namespace);
644 task_unlock(task);
645 put_task_struct(task);
646 }
759 647
760 if (namespace) 648 if (namespace)
761 m->private = namespace; 649 m->private = namespace;
@@ -782,18 +670,27 @@ static ssize_t proc_info_read(struct file * file, char __user * buf,
782 struct inode * inode = file->f_dentry->d_inode; 670 struct inode * inode = file->f_dentry->d_inode;
783 unsigned long page; 671 unsigned long page;
784 ssize_t length; 672 ssize_t length;
785 struct task_struct *task = proc_task(inode); 673 struct task_struct *task = get_proc_task(inode);
674
675 length = -ESRCH;
676 if (!task)
677 goto out_no_task;
786 678
787 if (count > PROC_BLOCK_SIZE) 679 if (count > PROC_BLOCK_SIZE)
788 count = PROC_BLOCK_SIZE; 680 count = PROC_BLOCK_SIZE;
681
682 length = -ENOMEM;
789 if (!(page = __get_free_page(GFP_KERNEL))) 683 if (!(page = __get_free_page(GFP_KERNEL)))
790 return -ENOMEM; 684 goto out;
791 685
792 length = PROC_I(inode)->op.proc_read(task, (char*)page); 686 length = PROC_I(inode)->op.proc_read(task, (char*)page);
793 687
794 if (length >= 0) 688 if (length >= 0)
795 length = simple_read_from_buffer(buf, count, ppos, (char *)page, length); 689 length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
796 free_page(page); 690 free_page(page);
691out:
692 put_task_struct(task);
693out_no_task:
797 return length; 694 return length;
798} 695}
799 696
@@ -810,12 +707,15 @@ static int mem_open(struct inode* inode, struct file* file)
810static ssize_t mem_read(struct file * file, char __user * buf, 707static ssize_t mem_read(struct file * file, char __user * buf,
811 size_t count, loff_t *ppos) 708 size_t count, loff_t *ppos)
812{ 709{
813 struct task_struct *task = proc_task(file->f_dentry->d_inode); 710 struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
814 char *page; 711 char *page;
815 unsigned long src = *ppos; 712 unsigned long src = *ppos;
816 int ret = -ESRCH; 713 int ret = -ESRCH;
817 struct mm_struct *mm; 714 struct mm_struct *mm;
818 715
716 if (!task)
717 goto out_no_task;
718
819 if (!MAY_PTRACE(task) || !ptrace_may_attach(task)) 719 if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
820 goto out; 720 goto out;
821 721
@@ -865,6 +765,8 @@ out_put:
865out_free: 765out_free:
866 free_page((unsigned long) page); 766 free_page((unsigned long) page);
867out: 767out:
768 put_task_struct(task);
769out_no_task:
868 return ret; 770 return ret;
869} 771}
870 772
@@ -877,15 +779,20 @@ static ssize_t mem_write(struct file * file, const char * buf,
877{ 779{
878 int copied = 0; 780 int copied = 0;
879 char *page; 781 char *page;
880 struct task_struct *task = proc_task(file->f_dentry->d_inode); 782 struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
881 unsigned long dst = *ppos; 783 unsigned long dst = *ppos;
882 784
785 copied = -ESRCH;
786 if (!task)
787 goto out_no_task;
788
883 if (!MAY_PTRACE(task) || !ptrace_may_attach(task)) 789 if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
884 return -ESRCH; 790 goto out;
885 791
792 copied = -ENOMEM;
886 page = (char *)__get_free_page(GFP_USER); 793 page = (char *)__get_free_page(GFP_USER);
887 if (!page) 794 if (!page)
888 return -ENOMEM; 795 goto out;
889 796
890 while (count > 0) { 797 while (count > 0) {
891 int this_len, retval; 798 int this_len, retval;
@@ -908,6 +815,9 @@ static ssize_t mem_write(struct file * file, const char * buf,
908 } 815 }
909 *ppos = dst; 816 *ppos = dst;
910 free_page((unsigned long) page); 817 free_page((unsigned long) page);
818out:
819 put_task_struct(task);
820out_no_task:
911 return copied; 821 return copied;
912} 822}
913#endif 823#endif
@@ -938,13 +848,18 @@ static struct file_operations proc_mem_operations = {
938static ssize_t oom_adjust_read(struct file *file, char __user *buf, 848static ssize_t oom_adjust_read(struct file *file, char __user *buf,
939 size_t count, loff_t *ppos) 849 size_t count, loff_t *ppos)
940{ 850{
941 struct task_struct *task = proc_task(file->f_dentry->d_inode); 851 struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
942 char buffer[8]; 852 char buffer[PROC_NUMBUF];
943 size_t len; 853 size_t len;
944 int oom_adjust = task->oomkilladj; 854 int oom_adjust;
945 loff_t __ppos = *ppos; 855 loff_t __ppos = *ppos;
946 856
947 len = sprintf(buffer, "%i\n", oom_adjust); 857 if (!task)
858 return -ESRCH;
859 oom_adjust = task->oomkilladj;
860 put_task_struct(task);
861
862 len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust);
948 if (__ppos >= len) 863 if (__ppos >= len)
949 return 0; 864 return 0;
950 if (count > len-__ppos) 865 if (count > len-__ppos)
@@ -958,15 +873,15 @@ static ssize_t oom_adjust_read(struct file *file, char __user *buf,
958static ssize_t oom_adjust_write(struct file *file, const char __user *buf, 873static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
959 size_t count, loff_t *ppos) 874 size_t count, loff_t *ppos)
960{ 875{
961 struct task_struct *task = proc_task(file->f_dentry->d_inode); 876 struct task_struct *task;
962 char buffer[8], *end; 877 char buffer[PROC_NUMBUF], *end;
963 int oom_adjust; 878 int oom_adjust;
964 879
965 if (!capable(CAP_SYS_RESOURCE)) 880 if (!capable(CAP_SYS_RESOURCE))
966 return -EPERM; 881 return -EPERM;
967 memset(buffer, 0, 8); 882 memset(buffer, 0, sizeof(buffer));
968 if (count > 6) 883 if (count > sizeof(buffer) - 1)
969 count = 6; 884 count = sizeof(buffer) - 1;
970 if (copy_from_user(buffer, buf, count)) 885 if (copy_from_user(buffer, buf, count))
971 return -EFAULT; 886 return -EFAULT;
972 oom_adjust = simple_strtol(buffer, &end, 0); 887 oom_adjust = simple_strtol(buffer, &end, 0);
@@ -974,7 +889,11 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
974 return -EINVAL; 889 return -EINVAL;
975 if (*end == '\n') 890 if (*end == '\n')
976 end++; 891 end++;
892 task = get_proc_task(file->f_dentry->d_inode);
893 if (!task)
894 return -ESRCH;
977 task->oomkilladj = oom_adjust; 895 task->oomkilladj = oom_adjust;
896 put_task_struct(task);
978 if (end - buffer == 0) 897 if (end - buffer == 0)
979 return -EIO; 898 return -EIO;
980 return end - buffer; 899 return end - buffer;
@@ -985,22 +904,21 @@ static struct file_operations proc_oom_adjust_operations = {
985 .write = oom_adjust_write, 904 .write = oom_adjust_write,
986}; 905};
987 906
988static struct inode_operations proc_mem_inode_operations = {
989 .permission = proc_permission,
990};
991
992#ifdef CONFIG_AUDITSYSCALL 907#ifdef CONFIG_AUDITSYSCALL
993#define TMPBUFLEN 21 908#define TMPBUFLEN 21
994static ssize_t proc_loginuid_read(struct file * file, char __user * buf, 909static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
995 size_t count, loff_t *ppos) 910 size_t count, loff_t *ppos)
996{ 911{
997 struct inode * inode = file->f_dentry->d_inode; 912 struct inode * inode = file->f_dentry->d_inode;
998 struct task_struct *task = proc_task(inode); 913 struct task_struct *task = get_proc_task(inode);
999 ssize_t length; 914 ssize_t length;
1000 char tmpbuf[TMPBUFLEN]; 915 char tmpbuf[TMPBUFLEN];
1001 916
917 if (!task)
918 return -ESRCH;
1002 length = scnprintf(tmpbuf, TMPBUFLEN, "%u", 919 length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
1003 audit_get_loginuid(task->audit_context)); 920 audit_get_loginuid(task->audit_context));
921 put_task_struct(task);
1004 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); 922 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
1005} 923}
1006 924
@@ -1010,13 +928,12 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
1010 struct inode * inode = file->f_dentry->d_inode; 928 struct inode * inode = file->f_dentry->d_inode;
1011 char *page, *tmp; 929 char *page, *tmp;
1012 ssize_t length; 930 ssize_t length;
1013 struct task_struct *task = proc_task(inode);
1014 uid_t loginuid; 931 uid_t loginuid;
1015 932
1016 if (!capable(CAP_AUDIT_CONTROL)) 933 if (!capable(CAP_AUDIT_CONTROL))
1017 return -EPERM; 934 return -EPERM;
1018 935
1019 if (current != task) 936 if (current != pid_task(proc_pid(inode), PIDTYPE_PID))
1020 return -EPERM; 937 return -EPERM;
1021 938
1022 if (count >= PAGE_SIZE) 939 if (count >= PAGE_SIZE)
@@ -1040,7 +957,7 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
1040 goto out_free_page; 957 goto out_free_page;
1041 958
1042 } 959 }
1043 length = audit_set_loginuid(task, loginuid); 960 length = audit_set_loginuid(current, loginuid);
1044 if (likely(length == 0)) 961 if (likely(length == 0))
1045 length = count; 962 length = count;
1046 963
@@ -1059,13 +976,16 @@ static struct file_operations proc_loginuid_operations = {
1059static ssize_t seccomp_read(struct file *file, char __user *buf, 976static ssize_t seccomp_read(struct file *file, char __user *buf,
1060 size_t count, loff_t *ppos) 977 size_t count, loff_t *ppos)
1061{ 978{
1062 struct task_struct *tsk = proc_task(file->f_dentry->d_inode); 979 struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode);
1063 char __buf[20]; 980 char __buf[20];
1064 loff_t __ppos = *ppos; 981 loff_t __ppos = *ppos;
1065 size_t len; 982 size_t len;
1066 983
984 if (!tsk)
985 return -ESRCH;
1067 /* no need to print the trailing zero, so use only len */ 986 /* no need to print the trailing zero, so use only len */
1068 len = sprintf(__buf, "%u\n", tsk->seccomp.mode); 987 len = sprintf(__buf, "%u\n", tsk->seccomp.mode);
988 put_task_struct(tsk);
1069 if (__ppos >= len) 989 if (__ppos >= len)
1070 return 0; 990 return 0;
1071 if (count > len - __ppos) 991 if (count > len - __ppos)
@@ -1079,29 +999,43 @@ static ssize_t seccomp_read(struct file *file, char __user *buf,
1079static ssize_t seccomp_write(struct file *file, const char __user *buf, 999static ssize_t seccomp_write(struct file *file, const char __user *buf,
1080 size_t count, loff_t *ppos) 1000 size_t count, loff_t *ppos)
1081{ 1001{
1082 struct task_struct *tsk = proc_task(file->f_dentry->d_inode); 1002 struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode);
1083 char __buf[20], *end; 1003 char __buf[20], *end;
1084 unsigned int seccomp_mode; 1004 unsigned int seccomp_mode;
1005 ssize_t result;
1006
1007 result = -ESRCH;
1008 if (!tsk)
1009 goto out_no_task;
1085 1010
1086 /* can set it only once to be even more secure */ 1011 /* can set it only once to be even more secure */
1012 result = -EPERM;
1087 if (unlikely(tsk->seccomp.mode)) 1013 if (unlikely(tsk->seccomp.mode))
1088 return -EPERM; 1014 goto out;
1089 1015
1016 result = -EFAULT;
1090 memset(__buf, 0, sizeof(__buf)); 1017 memset(__buf, 0, sizeof(__buf));
1091 count = min(count, sizeof(__buf) - 1); 1018 count = min(count, sizeof(__buf) - 1);
1092 if (copy_from_user(__buf, buf, count)) 1019 if (copy_from_user(__buf, buf, count))
1093 return -EFAULT; 1020 goto out;
1021
1094 seccomp_mode = simple_strtoul(__buf, &end, 0); 1022 seccomp_mode = simple_strtoul(__buf, &end, 0);
1095 if (*end == '\n') 1023 if (*end == '\n')
1096 end++; 1024 end++;
1025 result = -EINVAL;
1097 if (seccomp_mode && seccomp_mode <= NR_SECCOMP_MODES) { 1026 if (seccomp_mode && seccomp_mode <= NR_SECCOMP_MODES) {
1098 tsk->seccomp.mode = seccomp_mode; 1027 tsk->seccomp.mode = seccomp_mode;
1099 set_tsk_thread_flag(tsk, TIF_SECCOMP); 1028 set_tsk_thread_flag(tsk, TIF_SECCOMP);
1100 } else 1029 } else
1101 return -EINVAL; 1030 goto out;
1031 result = -EIO;
1102 if (unlikely(!(end - __buf))) 1032 if (unlikely(!(end - __buf)))
1103 return -EIO; 1033 goto out;
1104 return end - __buf; 1034 result = end - __buf;
1035out:
1036 put_task_struct(tsk);
1037out_no_task:
1038 return result;
1105} 1039}
1106 1040
1107static struct file_operations proc_seccomp_operations = { 1041static struct file_operations proc_seccomp_operations = {
@@ -1118,10 +1052,8 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
1118 /* We don't need a base pointer in the /proc filesystem */ 1052 /* We don't need a base pointer in the /proc filesystem */
1119 path_release(nd); 1053 path_release(nd);
1120 1054
1121 if (current->fsuid != inode->i_uid && !capable(CAP_DAC_OVERRIDE)) 1055 /* Are we allowed to snoop on the tasks file descriptors? */
1122 goto out; 1056 if (!proc_fd_access_allowed(inode))
1123 error = proc_check_root(inode);
1124 if (error)
1125 goto out; 1057 goto out;
1126 1058
1127 error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt); 1059 error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt);
@@ -1163,12 +1095,8 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
1163 struct dentry *de; 1095 struct dentry *de;
1164 struct vfsmount *mnt = NULL; 1096 struct vfsmount *mnt = NULL;
1165 1097
1166 lock_kernel(); 1098 /* Are we allowed to snoop on the tasks file descriptors? */
1167 1099 if (!proc_fd_access_allowed(inode))
1168 if (current->fsuid != inode->i_uid && !capable(CAP_DAC_OVERRIDE))
1169 goto out;
1170 error = proc_check_root(inode);
1171 if (error)
1172 goto out; 1100 goto out;
1173 1101
1174 error = PROC_I(inode)->op.proc_get_link(inode, &de, &mnt); 1102 error = PROC_I(inode)->op.proc_get_link(inode, &de, &mnt);
@@ -1179,7 +1107,6 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
1179 dput(de); 1107 dput(de);
1180 mntput(mnt); 1108 mntput(mnt);
1181out: 1109out:
1182 unlock_kernel();
1183 return error; 1110 return error;
1184} 1111}
1185 1112
@@ -1188,21 +1115,20 @@ static struct inode_operations proc_pid_link_inode_operations = {
1188 .follow_link = proc_pid_follow_link 1115 .follow_link = proc_pid_follow_link
1189}; 1116};
1190 1117
1191#define NUMBUF 10
1192
1193static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir) 1118static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir)
1194{ 1119{
1195 struct inode *inode = filp->f_dentry->d_inode; 1120 struct dentry *dentry = filp->f_dentry;
1196 struct task_struct *p = proc_task(inode); 1121 struct inode *inode = dentry->d_inode;
1122 struct task_struct *p = get_proc_task(inode);
1197 unsigned int fd, tid, ino; 1123 unsigned int fd, tid, ino;
1198 int retval; 1124 int retval;
1199 char buf[NUMBUF]; 1125 char buf[PROC_NUMBUF];
1200 struct files_struct * files; 1126 struct files_struct * files;
1201 struct fdtable *fdt; 1127 struct fdtable *fdt;
1202 1128
1203 retval = -ENOENT; 1129 retval = -ENOENT;
1204 if (!pid_alive(p)) 1130 if (!p)
1205 goto out; 1131 goto out_no_task;
1206 retval = 0; 1132 retval = 0;
1207 tid = p->pid; 1133 tid = p->pid;
1208 1134
@@ -1213,7 +1139,7 @@ static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir)
1213 goto out; 1139 goto out;
1214 filp->f_pos++; 1140 filp->f_pos++;
1215 case 1: 1141 case 1:
1216 ino = fake_ino(tid, PROC_TID_INO); 1142 ino = parent_ino(dentry);
1217 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) 1143 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
1218 goto out; 1144 goto out;
1219 filp->f_pos++; 1145 filp->f_pos++;
@@ -1232,7 +1158,7 @@ static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir)
1232 continue; 1158 continue;
1233 rcu_read_unlock(); 1159 rcu_read_unlock();
1234 1160
1235 j = NUMBUF; 1161 j = PROC_NUMBUF;
1236 i = fd; 1162 i = fd;
1237 do { 1163 do {
1238 j--; 1164 j--;
@@ -1241,7 +1167,7 @@ static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir)
1241 } while (i); 1167 } while (i);
1242 1168
1243 ino = fake_ino(tid, PROC_TID_FD_DIR + fd); 1169 ino = fake_ino(tid, PROC_TID_FD_DIR + fd);
1244 if (filldir(dirent, buf+j, NUMBUF-j, fd+2, ino, DT_LNK) < 0) { 1170 if (filldir(dirent, buf+j, PROC_NUMBUF-j, fd+2, ino, DT_LNK) < 0) {
1245 rcu_read_lock(); 1171 rcu_read_lock();
1246 break; 1172 break;
1247 } 1173 }
@@ -1251,6 +1177,8 @@ static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir)
1251 put_files_struct(files); 1177 put_files_struct(files);
1252 } 1178 }
1253out: 1179out:
1180 put_task_struct(p);
1181out_no_task:
1254 return retval; 1182 return retval;
1255} 1183}
1256 1184
@@ -1262,16 +1190,18 @@ static int proc_pident_readdir(struct file *filp,
1262 int pid; 1190 int pid;
1263 struct dentry *dentry = filp->f_dentry; 1191 struct dentry *dentry = filp->f_dentry;
1264 struct inode *inode = dentry->d_inode; 1192 struct inode *inode = dentry->d_inode;
1193 struct task_struct *task = get_proc_task(inode);
1265 struct pid_entry *p; 1194 struct pid_entry *p;
1266 ino_t ino; 1195 ino_t ino;
1267 int ret; 1196 int ret;
1268 1197
1269 ret = -ENOENT; 1198 ret = -ENOENT;
1270 if (!pid_alive(proc_task(inode))) 1199 if (!task)
1271 goto out; 1200 goto out;
1272 1201
1273 ret = 0; 1202 ret = 0;
1274 pid = proc_task(inode)->pid; 1203 pid = task->pid;
1204 put_task_struct(task);
1275 i = filp->f_pos; 1205 i = filp->f_pos;
1276 switch (i) { 1206 switch (i) {
1277 case 0: 1207 case 0:
@@ -1354,22 +1284,19 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
1354 1284
1355 /* Common stuff */ 1285 /* Common stuff */
1356 ei = PROC_I(inode); 1286 ei = PROC_I(inode);
1357 ei->task = NULL;
1358 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 1287 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1359 inode->i_ino = fake_ino(task->pid, ino); 1288 inode->i_ino = fake_ino(task->pid, ino);
1360 1289
1361 if (!pid_alive(task))
1362 goto out_unlock;
1363
1364 /* 1290 /*
1365 * grab the reference to task. 1291 * grab the reference to task.
1366 */ 1292 */
1367 get_task_struct(task); 1293 ei->pid = get_pid(task->pids[PIDTYPE_PID].pid);
1368 ei->task = task; 1294 if (!ei->pid)
1369 ei->type = ino; 1295 goto out_unlock;
1296
1370 inode->i_uid = 0; 1297 inode->i_uid = 0;
1371 inode->i_gid = 0; 1298 inode->i_gid = 0;
1372 if (ino == PROC_TGID_INO || ino == PROC_TID_INO || task_dumpable(task)) { 1299 if (task_dumpable(task)) {
1373 inode->i_uid = task->euid; 1300 inode->i_uid = task->euid;
1374 inode->i_gid = task->egid; 1301 inode->i_gid = task->egid;
1375 } 1302 }
@@ -1379,7 +1306,6 @@ out:
1379 return inode; 1306 return inode;
1380 1307
1381out_unlock: 1308out_unlock:
1382 ei->pde = NULL;
1383 iput(inode); 1309 iput(inode);
1384 return NULL; 1310 return NULL;
1385} 1311}
@@ -1393,13 +1319,21 @@ out_unlock:
1393 * 1319 *
1394 * Rewrite the inode's ownerships here because the owning task may have 1320 * Rewrite the inode's ownerships here because the owning task may have
1395 * performed a setuid(), etc. 1321 * performed a setuid(), etc.
1322 *
1323 * Before the /proc/pid/status file was created the only way to read
1324 * the effective uid of a /process was to stat /proc/pid. Reading
1325 * /proc/pid/status is slow enough that procps and other packages
1326 * kept stating /proc/pid. To keep the rules in /proc simple I have
1327 * made this apply to all per process world readable and executable
1328 * directories.
1396 */ 1329 */
1397static int pid_revalidate(struct dentry *dentry, struct nameidata *nd) 1330static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
1398{ 1331{
1399 struct inode *inode = dentry->d_inode; 1332 struct inode *inode = dentry->d_inode;
1400 struct task_struct *task = proc_task(inode); 1333 struct task_struct *task = get_proc_task(inode);
1401 if (pid_alive(task)) { 1334 if (task) {
1402 if (proc_type(inode) == PROC_TGID_INO || proc_type(inode) == PROC_TID_INO || task_dumpable(task)) { 1335 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1336 task_dumpable(task)) {
1403 inode->i_uid = task->euid; 1337 inode->i_uid = task->euid;
1404 inode->i_gid = task->egid; 1338 inode->i_gid = task->egid;
1405 } else { 1339 } else {
@@ -1407,59 +1341,75 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
1407 inode->i_gid = 0; 1341 inode->i_gid = 0;
1408 } 1342 }
1409 security_task_to_inode(task, inode); 1343 security_task_to_inode(task, inode);
1344 put_task_struct(task);
1410 return 1; 1345 return 1;
1411 } 1346 }
1412 d_drop(dentry); 1347 d_drop(dentry);
1413 return 0; 1348 return 0;
1414} 1349}
1415 1350
1351static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1352{
1353 struct inode *inode = dentry->d_inode;
1354 struct task_struct *task;
1355 generic_fillattr(inode, stat);
1356
1357 rcu_read_lock();
1358 stat->uid = 0;
1359 stat->gid = 0;
1360 task = pid_task(proc_pid(inode), PIDTYPE_PID);
1361 if (task) {
1362 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1363 task_dumpable(task)) {
1364 stat->uid = task->euid;
1365 stat->gid = task->egid;
1366 }
1367 }
1368 rcu_read_unlock();
1369 return 0;
1370}
1371
1416static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) 1372static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
1417{ 1373{
1418 struct inode *inode = dentry->d_inode; 1374 struct inode *inode = dentry->d_inode;
1419 struct task_struct *task = proc_task(inode); 1375 struct task_struct *task = get_proc_task(inode);
1420 int fd = proc_type(inode) - PROC_TID_FD_DIR; 1376 int fd = proc_fd(inode);
1421 struct files_struct *files; 1377 struct files_struct *files;
1422 1378
1423 files = get_files_struct(task); 1379 if (task) {
1424 if (files) { 1380 files = get_files_struct(task);
1425 rcu_read_lock(); 1381 if (files) {
1426 if (fcheck_files(files, fd)) { 1382 rcu_read_lock();
1383 if (fcheck_files(files, fd)) {
1384 rcu_read_unlock();
1385 put_files_struct(files);
1386 if (task_dumpable(task)) {
1387 inode->i_uid = task->euid;
1388 inode->i_gid = task->egid;
1389 } else {
1390 inode->i_uid = 0;
1391 inode->i_gid = 0;
1392 }
1393 security_task_to_inode(task, inode);
1394 put_task_struct(task);
1395 return 1;
1396 }
1427 rcu_read_unlock(); 1397 rcu_read_unlock();
1428 put_files_struct(files); 1398 put_files_struct(files);
1429 if (task_dumpable(task)) {
1430 inode->i_uid = task->euid;
1431 inode->i_gid = task->egid;
1432 } else {
1433 inode->i_uid = 0;
1434 inode->i_gid = 0;
1435 }
1436 security_task_to_inode(task, inode);
1437 return 1;
1438 } 1399 }
1439 rcu_read_unlock(); 1400 put_task_struct(task);
1440 put_files_struct(files);
1441 } 1401 }
1442 d_drop(dentry); 1402 d_drop(dentry);
1443 return 0; 1403 return 0;
1444} 1404}
1445 1405
1446static void pid_base_iput(struct dentry *dentry, struct inode *inode)
1447{
1448 struct task_struct *task = proc_task(inode);
1449 spin_lock(&task->proc_lock);
1450 if (task->proc_dentry == dentry)
1451 task->proc_dentry = NULL;
1452 spin_unlock(&task->proc_lock);
1453 iput(inode);
1454}
1455
1456static int pid_delete_dentry(struct dentry * dentry) 1406static int pid_delete_dentry(struct dentry * dentry)
1457{ 1407{
1458 /* Is the task we represent dead? 1408 /* Is the task we represent dead?
1459 * If so, then don't put the dentry on the lru list, 1409 * If so, then don't put the dentry on the lru list,
1460 * kill it immediately. 1410 * kill it immediately.
1461 */ 1411 */
1462 return !pid_alive(proc_task(dentry->d_inode)); 1412 return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
1463} 1413}
1464 1414
1465static struct dentry_operations tid_fd_dentry_operations = 1415static struct dentry_operations tid_fd_dentry_operations =
@@ -1474,13 +1424,6 @@ static struct dentry_operations pid_dentry_operations =
1474 .d_delete = pid_delete_dentry, 1424 .d_delete = pid_delete_dentry,
1475}; 1425};
1476 1426
1477static struct dentry_operations pid_base_dentry_operations =
1478{
1479 .d_revalidate = pid_revalidate,
1480 .d_iput = pid_base_iput,
1481 .d_delete = pid_delete_dentry,
1482};
1483
1484/* Lookups */ 1427/* Lookups */
1485 1428
1486static unsigned name_to_int(struct dentry *dentry) 1429static unsigned name_to_int(struct dentry *dentry)
@@ -1508,22 +1451,24 @@ out:
1508/* SMP-safe */ 1451/* SMP-safe */
1509static struct dentry *proc_lookupfd(struct inode * dir, struct dentry * dentry, struct nameidata *nd) 1452static struct dentry *proc_lookupfd(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
1510{ 1453{
1511 struct task_struct *task = proc_task(dir); 1454 struct task_struct *task = get_proc_task(dir);
1512 unsigned fd = name_to_int(dentry); 1455 unsigned fd = name_to_int(dentry);
1456 struct dentry *result = ERR_PTR(-ENOENT);
1513 struct file * file; 1457 struct file * file;
1514 struct files_struct * files; 1458 struct files_struct * files;
1515 struct inode *inode; 1459 struct inode *inode;
1516 struct proc_inode *ei; 1460 struct proc_inode *ei;
1517 1461
1462 if (!task)
1463 goto out_no_task;
1518 if (fd == ~0U) 1464 if (fd == ~0U)
1519 goto out; 1465 goto out;
1520 if (!pid_alive(task))
1521 goto out;
1522 1466
1523 inode = proc_pid_make_inode(dir->i_sb, task, PROC_TID_FD_DIR+fd); 1467 inode = proc_pid_make_inode(dir->i_sb, task, PROC_TID_FD_DIR+fd);
1524 if (!inode) 1468 if (!inode)
1525 goto out; 1469 goto out;
1526 ei = PROC_I(inode); 1470 ei = PROC_I(inode);
1471 ei->fd = fd;
1527 files = get_files_struct(task); 1472 files = get_files_struct(task);
1528 if (!files) 1473 if (!files)
1529 goto out_unlock; 1474 goto out_unlock;
@@ -1548,19 +1493,25 @@ static struct dentry *proc_lookupfd(struct inode * dir, struct dentry * dentry,
1548 ei->op.proc_get_link = proc_fd_link; 1493 ei->op.proc_get_link = proc_fd_link;
1549 dentry->d_op = &tid_fd_dentry_operations; 1494 dentry->d_op = &tid_fd_dentry_operations;
1550 d_add(dentry, inode); 1495 d_add(dentry, inode);
1551 return NULL; 1496 /* Close the race of the process dying before we return the dentry */
1497 if (tid_fd_revalidate(dentry, NULL))
1498 result = NULL;
1499out:
1500 put_task_struct(task);
1501out_no_task:
1502 return result;
1552 1503
1553out_unlock2: 1504out_unlock2:
1554 spin_unlock(&files->file_lock); 1505 spin_unlock(&files->file_lock);
1555 put_files_struct(files); 1506 put_files_struct(files);
1556out_unlock: 1507out_unlock:
1557 iput(inode); 1508 iput(inode);
1558out: 1509 goto out;
1559 return ERR_PTR(-ENOENT);
1560} 1510}
1561 1511
1562static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir); 1512static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir);
1563static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd); 1513static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd);
1514static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
1564 1515
1565static struct file_operations proc_fd_operations = { 1516static struct file_operations proc_fd_operations = {
1566 .read = generic_read_dir, 1517 .read = generic_read_dir,
@@ -1577,12 +1528,11 @@ static struct file_operations proc_task_operations = {
1577 */ 1528 */
1578static struct inode_operations proc_fd_inode_operations = { 1529static struct inode_operations proc_fd_inode_operations = {
1579 .lookup = proc_lookupfd, 1530 .lookup = proc_lookupfd,
1580 .permission = proc_permission,
1581}; 1531};
1582 1532
1583static struct inode_operations proc_task_inode_operations = { 1533static struct inode_operations proc_task_inode_operations = {
1584 .lookup = proc_task_lookup, 1534 .lookup = proc_task_lookup,
1585 .permission = proc_task_permission, 1535 .getattr = proc_task_getattr,
1586}; 1536};
1587 1537
1588#ifdef CONFIG_SECURITY 1538#ifdef CONFIG_SECURITY
@@ -1592,12 +1542,17 @@ static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
1592 struct inode * inode = file->f_dentry->d_inode; 1542 struct inode * inode = file->f_dentry->d_inode;
1593 unsigned long page; 1543 unsigned long page;
1594 ssize_t length; 1544 ssize_t length;
1595 struct task_struct *task = proc_task(inode); 1545 struct task_struct *task = get_proc_task(inode);
1546
1547 length = -ESRCH;
1548 if (!task)
1549 goto out_no_task;
1596 1550
1597 if (count > PAGE_SIZE) 1551 if (count > PAGE_SIZE)
1598 count = PAGE_SIZE; 1552 count = PAGE_SIZE;
1553 length = -ENOMEM;
1599 if (!(page = __get_free_page(GFP_KERNEL))) 1554 if (!(page = __get_free_page(GFP_KERNEL)))
1600 return -ENOMEM; 1555 goto out;
1601 1556
1602 length = security_getprocattr(task, 1557 length = security_getprocattr(task,
1603 (char*)file->f_dentry->d_name.name, 1558 (char*)file->f_dentry->d_name.name,
@@ -1605,6 +1560,9 @@ static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
1605 if (length >= 0) 1560 if (length >= 0)
1606 length = simple_read_from_buffer(buf, count, ppos, (char *)page, length); 1561 length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
1607 free_page(page); 1562 free_page(page);
1563out:
1564 put_task_struct(task);
1565out_no_task:
1608 return length; 1566 return length;
1609} 1567}
1610 1568
@@ -1614,26 +1572,36 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
1614 struct inode * inode = file->f_dentry->d_inode; 1572 struct inode * inode = file->f_dentry->d_inode;
1615 char *page; 1573 char *page;
1616 ssize_t length; 1574 ssize_t length;
1617 struct task_struct *task = proc_task(inode); 1575 struct task_struct *task = get_proc_task(inode);
1618 1576
1577 length = -ESRCH;
1578 if (!task)
1579 goto out_no_task;
1619 if (count > PAGE_SIZE) 1580 if (count > PAGE_SIZE)
1620 count = PAGE_SIZE; 1581 count = PAGE_SIZE;
1621 if (*ppos != 0) { 1582
1622 /* No partial writes. */ 1583 /* No partial writes. */
1623 return -EINVAL; 1584 length = -EINVAL;
1624 } 1585 if (*ppos != 0)
1586 goto out;
1587
1588 length = -ENOMEM;
1625 page = (char*)__get_free_page(GFP_USER); 1589 page = (char*)__get_free_page(GFP_USER);
1626 if (!page) 1590 if (!page)
1627 return -ENOMEM; 1591 goto out;
1592
1628 length = -EFAULT; 1593 length = -EFAULT;
1629 if (copy_from_user(page, buf, count)) 1594 if (copy_from_user(page, buf, count))
1630 goto out; 1595 goto out_free;
1631 1596
1632 length = security_setprocattr(task, 1597 length = security_setprocattr(task,
1633 (char*)file->f_dentry->d_name.name, 1598 (char*)file->f_dentry->d_name.name,
1634 (void*)page, count); 1599 (void*)page, count);
1635out: 1600out_free:
1636 free_page((unsigned long) page); 1601 free_page((unsigned long) page);
1602out:
1603 put_task_struct(task);
1604out_no_task:
1637 return length; 1605 return length;
1638} 1606}
1639 1607
@@ -1648,24 +1616,22 @@ static struct file_operations proc_tgid_attr_operations;
1648static struct inode_operations proc_tgid_attr_inode_operations; 1616static struct inode_operations proc_tgid_attr_inode_operations;
1649#endif 1617#endif
1650 1618
1651static int get_tid_list(int index, unsigned int *tids, struct inode *dir);
1652
1653/* SMP-safe */ 1619/* SMP-safe */
1654static struct dentry *proc_pident_lookup(struct inode *dir, 1620static struct dentry *proc_pident_lookup(struct inode *dir,
1655 struct dentry *dentry, 1621 struct dentry *dentry,
1656 struct pid_entry *ents) 1622 struct pid_entry *ents)
1657{ 1623{
1658 struct inode *inode; 1624 struct inode *inode;
1659 int error; 1625 struct dentry *error;
1660 struct task_struct *task = proc_task(dir); 1626 struct task_struct *task = get_proc_task(dir);
1661 struct pid_entry *p; 1627 struct pid_entry *p;
1662 struct proc_inode *ei; 1628 struct proc_inode *ei;
1663 1629
1664 error = -ENOENT; 1630 error = ERR_PTR(-ENOENT);
1665 inode = NULL; 1631 inode = NULL;
1666 1632
1667 if (!pid_alive(task)) 1633 if (!task)
1668 goto out; 1634 goto out_no_task;
1669 1635
1670 for (p = ents; p->name; p++) { 1636 for (p = ents; p->name; p++) {
1671 if (p->len != dentry->d_name.len) 1637 if (p->len != dentry->d_name.len)
@@ -1676,7 +1642,7 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
1676 if (!p->name) 1642 if (!p->name)
1677 goto out; 1643 goto out;
1678 1644
1679 error = -EINVAL; 1645 error = ERR_PTR(-EINVAL);
1680 inode = proc_pid_make_inode(dir->i_sb, task, p->type); 1646 inode = proc_pid_make_inode(dir->i_sb, task, p->type);
1681 if (!inode) 1647 if (!inode)
1682 goto out; 1648 goto out;
@@ -1689,7 +1655,7 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
1689 */ 1655 */
1690 switch(p->type) { 1656 switch(p->type) {
1691 case PROC_TGID_TASK: 1657 case PROC_TGID_TASK:
1692 inode->i_nlink = 2 + get_tid_list(2, NULL, dir); 1658 inode->i_nlink = 2;
1693 inode->i_op = &proc_task_inode_operations; 1659 inode->i_op = &proc_task_inode_operations;
1694 inode->i_fop = &proc_task_operations; 1660 inode->i_fop = &proc_task_operations;
1695 break; 1661 break;
@@ -1759,7 +1725,6 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
1759#endif 1725#endif
1760 case PROC_TID_MEM: 1726 case PROC_TID_MEM:
1761 case PROC_TGID_MEM: 1727 case PROC_TGID_MEM:
1762 inode->i_op = &proc_mem_inode_operations;
1763 inode->i_fop = &proc_mem_operations; 1728 inode->i_fop = &proc_mem_operations;
1764 break; 1729 break;
1765#ifdef CONFIG_SECCOMP 1730#ifdef CONFIG_SECCOMP
@@ -1801,6 +1766,10 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
1801 case PROC_TGID_ATTR_EXEC: 1766 case PROC_TGID_ATTR_EXEC:
1802 case PROC_TID_ATTR_FSCREATE: 1767 case PROC_TID_ATTR_FSCREATE:
1803 case PROC_TGID_ATTR_FSCREATE: 1768 case PROC_TGID_ATTR_FSCREATE:
1769 case PROC_TID_ATTR_KEYCREATE:
1770 case PROC_TGID_ATTR_KEYCREATE:
1771 case PROC_TID_ATTR_SOCKCREATE:
1772 case PROC_TGID_ATTR_SOCKCREATE:
1804 inode->i_fop = &proc_pid_attr_operations; 1773 inode->i_fop = &proc_pid_attr_operations;
1805 break; 1774 break;
1806#endif 1775#endif
@@ -1842,14 +1811,18 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
1842 default: 1811 default:
1843 printk("procfs: impossible type (%d)",p->type); 1812 printk("procfs: impossible type (%d)",p->type);
1844 iput(inode); 1813 iput(inode);
1845 return ERR_PTR(-EINVAL); 1814 error = ERR_PTR(-EINVAL);
1815 goto out;
1846 } 1816 }
1847 dentry->d_op = &pid_dentry_operations; 1817 dentry->d_op = &pid_dentry_operations;
1848 d_add(dentry, inode); 1818 d_add(dentry, inode);
1849 return NULL; 1819 /* Close the race of the process dying before we return the dentry */
1850 1820 if (pid_revalidate(dentry, NULL))
1821 error = NULL;
1851out: 1822out:
1852 return ERR_PTR(error); 1823 put_task_struct(task);
1824out_no_task:
1825 return error;
1853} 1826}
1854 1827
1855static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){ 1828static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
@@ -1872,10 +1845,12 @@ static struct file_operations proc_tid_base_operations = {
1872 1845
1873static struct inode_operations proc_tgid_base_inode_operations = { 1846static struct inode_operations proc_tgid_base_inode_operations = {
1874 .lookup = proc_tgid_base_lookup, 1847 .lookup = proc_tgid_base_lookup,
1848 .getattr = pid_getattr,
1875}; 1849};
1876 1850
1877static struct inode_operations proc_tid_base_inode_operations = { 1851static struct inode_operations proc_tid_base_inode_operations = {
1878 .lookup = proc_tid_base_lookup, 1852 .lookup = proc_tid_base_lookup,
1853 .getattr = pid_getattr,
1879}; 1854};
1880 1855
1881#ifdef CONFIG_SECURITY 1856#ifdef CONFIG_SECURITY
@@ -1917,10 +1892,12 @@ static struct dentry *proc_tid_attr_lookup(struct inode *dir,
1917 1892
1918static struct inode_operations proc_tgid_attr_inode_operations = { 1893static struct inode_operations proc_tgid_attr_inode_operations = {
1919 .lookup = proc_tgid_attr_lookup, 1894 .lookup = proc_tgid_attr_lookup,
1895 .getattr = pid_getattr,
1920}; 1896};
1921 1897
1922static struct inode_operations proc_tid_attr_inode_operations = { 1898static struct inode_operations proc_tid_attr_inode_operations = {
1923 .lookup = proc_tid_attr_lookup, 1899 .lookup = proc_tid_attr_lookup,
1900 .getattr = pid_getattr,
1924}; 1901};
1925#endif 1902#endif
1926 1903
@@ -1930,14 +1907,14 @@ static struct inode_operations proc_tid_attr_inode_operations = {
1930static int proc_self_readlink(struct dentry *dentry, char __user *buffer, 1907static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
1931 int buflen) 1908 int buflen)
1932{ 1909{
1933 char tmp[30]; 1910 char tmp[PROC_NUMBUF];
1934 sprintf(tmp, "%d", current->tgid); 1911 sprintf(tmp, "%d", current->tgid);
1935 return vfs_readlink(dentry,buffer,buflen,tmp); 1912 return vfs_readlink(dentry,buffer,buflen,tmp);
1936} 1913}
1937 1914
1938static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd) 1915static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
1939{ 1916{
1940 char tmp[30]; 1917 char tmp[PROC_NUMBUF];
1941 sprintf(tmp, "%d", current->tgid); 1918 sprintf(tmp, "%d", current->tgid);
1942 return ERR_PTR(vfs_follow_link(nd,tmp)); 1919 return ERR_PTR(vfs_follow_link(nd,tmp));
1943} 1920}
@@ -1948,67 +1925,80 @@ static struct inode_operations proc_self_inode_operations = {
1948}; 1925};
1949 1926
1950/** 1927/**
1951 * proc_pid_unhash - Unhash /proc/@pid entry from the dcache. 1928 * proc_flush_task - Remove dcache entries for @task from the /proc dcache.
1952 * @p: task that should be flushed. 1929 *
1930 * @task: task that should be flushed.
1931 *
1932 * Looks in the dcache for
1933 * /proc/@pid
1934 * /proc/@tgid/task/@pid
1935 * if either directory is present flushes it and all of it'ts children
1936 * from the dcache.
1953 * 1937 *
1954 * Drops the /proc/@pid dcache entry from the hash chains. 1938 * It is safe and reasonable to cache /proc entries for a task until
1939 * that task exits. After that they just clog up the dcache with
1940 * useless entries, possibly causing useful dcache entries to be
1941 * flushed instead. This routine is proved to flush those useless
1942 * dcache entries at process exit time.
1955 * 1943 *
1956 * Dropping /proc/@pid entries and detach_pid must be synchroneous, 1944 * NOTE: This routine is just an optimization so it does not guarantee
1957 * otherwise e.g. /proc/@pid/exe might point to the wrong executable, 1945 * that no dcache entries will exist at process exit time it
1958 * if the pid value is immediately reused. This is enforced by 1946 * just makes it very unlikely that any will persist.
1959 * - caller must acquire spin_lock(p->proc_lock)
1960 * - must be called before detach_pid()
1961 * - proc_pid_lookup acquires proc_lock, and checks that
1962 * the target is not dead by looking at the attach count
1963 * of PIDTYPE_PID.
1964 */ 1947 */
1965 1948void proc_flush_task(struct task_struct *task)
1966struct dentry *proc_pid_unhash(struct task_struct *p)
1967{ 1949{
1968 struct dentry *proc_dentry; 1950 struct dentry *dentry, *leader, *dir;
1951 char buf[PROC_NUMBUF];
1952 struct qstr name;
1953
1954 name.name = buf;
1955 name.len = snprintf(buf, sizeof(buf), "%d", task->pid);
1956 dentry = d_hash_and_lookup(proc_mnt->mnt_root, &name);
1957 if (dentry) {
1958 shrink_dcache_parent(dentry);
1959 d_drop(dentry);
1960 dput(dentry);
1961 }
1969 1962
1970 proc_dentry = p->proc_dentry; 1963 if (thread_group_leader(task))
1971 if (proc_dentry != NULL) { 1964 goto out;
1972 1965
1973 spin_lock(&dcache_lock); 1966 name.name = buf;
1974 spin_lock(&proc_dentry->d_lock); 1967 name.len = snprintf(buf, sizeof(buf), "%d", task->tgid);
1975 if (!d_unhashed(proc_dentry)) { 1968 leader = d_hash_and_lookup(proc_mnt->mnt_root, &name);
1976 dget_locked(proc_dentry); 1969 if (!leader)
1977 __d_drop(proc_dentry); 1970 goto out;
1978 spin_unlock(&proc_dentry->d_lock);
1979 } else {
1980 spin_unlock(&proc_dentry->d_lock);
1981 proc_dentry = NULL;
1982 }
1983 spin_unlock(&dcache_lock);
1984 }
1985 return proc_dentry;
1986}
1987 1971
1988/** 1972 name.name = "task";
1989 * proc_pid_flush - recover memory used by stale /proc/@pid/x entries 1973 name.len = strlen(name.name);
1990 * @proc_dentry: directoy to prune. 1974 dir = d_hash_and_lookup(leader, &name);
1991 * 1975 if (!dir)
1992 * Shrink the /proc directory that was used by the just killed thread. 1976 goto out_put_leader;
1993 */ 1977
1994 1978 name.name = buf;
1995void proc_pid_flush(struct dentry *proc_dentry) 1979 name.len = snprintf(buf, sizeof(buf), "%d", task->pid);
1996{ 1980 dentry = d_hash_and_lookup(dir, &name);
1997 might_sleep(); 1981 if (dentry) {
1998 if(proc_dentry != NULL) { 1982 shrink_dcache_parent(dentry);
1999 shrink_dcache_parent(proc_dentry); 1983 d_drop(dentry);
2000 dput(proc_dentry); 1984 dput(dentry);
2001 } 1985 }
1986
1987 dput(dir);
1988out_put_leader:
1989 dput(leader);
1990out:
1991 return;
2002} 1992}
2003 1993
2004/* SMP-safe */ 1994/* SMP-safe */
2005struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) 1995struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
2006{ 1996{
1997 struct dentry *result = ERR_PTR(-ENOENT);
2007 struct task_struct *task; 1998 struct task_struct *task;
2008 struct inode *inode; 1999 struct inode *inode;
2009 struct proc_inode *ei; 2000 struct proc_inode *ei;
2010 unsigned tgid; 2001 unsigned tgid;
2011 int died;
2012 2002
2013 if (dentry->d_name.len == 4 && !memcmp(dentry->d_name.name,"self",4)) { 2003 if (dentry->d_name.len == 4 && !memcmp(dentry->d_name.name,"self",4)) {
2014 inode = new_inode(dir->i_sb); 2004 inode = new_inode(dir->i_sb);
@@ -2029,21 +2019,18 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
2029 if (tgid == ~0U) 2019 if (tgid == ~0U)
2030 goto out; 2020 goto out;
2031 2021
2032 read_lock(&tasklist_lock); 2022 rcu_read_lock();
2033 task = find_task_by_pid(tgid); 2023 task = find_task_by_pid(tgid);
2034 if (task) 2024 if (task)
2035 get_task_struct(task); 2025 get_task_struct(task);
2036 read_unlock(&tasklist_lock); 2026 rcu_read_unlock();
2037 if (!task) 2027 if (!task)
2038 goto out; 2028 goto out;
2039 2029
2040 inode = proc_pid_make_inode(dir->i_sb, task, PROC_TGID_INO); 2030 inode = proc_pid_make_inode(dir->i_sb, task, PROC_TGID_INO);
2031 if (!inode)
2032 goto out_put_task;
2041 2033
2042
2043 if (!inode) {
2044 put_task_struct(task);
2045 goto out;
2046 }
2047 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; 2034 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
2048 inode->i_op = &proc_tgid_base_inode_operations; 2035 inode->i_op = &proc_tgid_base_inode_operations;
2049 inode->i_fop = &proc_tgid_base_operations; 2036 inode->i_fop = &proc_tgid_base_operations;
@@ -2054,45 +2041,40 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
2054 inode->i_nlink = 4; 2041 inode->i_nlink = 4;
2055#endif 2042#endif
2056 2043
2057 dentry->d_op = &pid_base_dentry_operations; 2044 dentry->d_op = &pid_dentry_operations;
2058 2045
2059 died = 0;
2060 d_add(dentry, inode); 2046 d_add(dentry, inode);
2061 spin_lock(&task->proc_lock); 2047 /* Close the race of the process dying before we return the dentry */
2062 task->proc_dentry = dentry; 2048 if (pid_revalidate(dentry, NULL))
2063 if (!pid_alive(task)) { 2049 result = NULL;
2064 dentry = proc_pid_unhash(task);
2065 died = 1;
2066 }
2067 spin_unlock(&task->proc_lock);
2068 2050
2051out_put_task:
2069 put_task_struct(task); 2052 put_task_struct(task);
2070 if (died) {
2071 proc_pid_flush(dentry);
2072 goto out;
2073 }
2074 return NULL;
2075out: 2053out:
2076 return ERR_PTR(-ENOENT); 2054 return result;
2077} 2055}
2078 2056
2079/* SMP-safe */ 2057/* SMP-safe */
2080static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) 2058static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
2081{ 2059{
2060 struct dentry *result = ERR_PTR(-ENOENT);
2082 struct task_struct *task; 2061 struct task_struct *task;
2083 struct task_struct *leader = proc_task(dir); 2062 struct task_struct *leader = get_proc_task(dir);
2084 struct inode *inode; 2063 struct inode *inode;
2085 unsigned tid; 2064 unsigned tid;
2086 2065
2066 if (!leader)
2067 goto out_no_task;
2068
2087 tid = name_to_int(dentry); 2069 tid = name_to_int(dentry);
2088 if (tid == ~0U) 2070 if (tid == ~0U)
2089 goto out; 2071 goto out;
2090 2072
2091 read_lock(&tasklist_lock); 2073 rcu_read_lock();
2092 task = find_task_by_pid(tid); 2074 task = find_task_by_pid(tid);
2093 if (task) 2075 if (task)
2094 get_task_struct(task); 2076 get_task_struct(task);
2095 read_unlock(&tasklist_lock); 2077 rcu_read_unlock();
2096 if (!task) 2078 if (!task)
2097 goto out; 2079 goto out;
2098 if (leader->tgid != task->tgid) 2080 if (leader->tgid != task->tgid)
@@ -2113,101 +2095,95 @@ static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry
2113 inode->i_nlink = 3; 2095 inode->i_nlink = 3;
2114#endif 2096#endif
2115 2097
2116 dentry->d_op = &pid_base_dentry_operations; 2098 dentry->d_op = &pid_dentry_operations;
2117 2099
2118 d_add(dentry, inode); 2100 d_add(dentry, inode);
2101 /* Close the race of the process dying before we return the dentry */
2102 if (pid_revalidate(dentry, NULL))
2103 result = NULL;
2119 2104
2120 put_task_struct(task);
2121 return NULL;
2122out_drop_task: 2105out_drop_task:
2123 put_task_struct(task); 2106 put_task_struct(task);
2124out: 2107out:
2125 return ERR_PTR(-ENOENT); 2108 put_task_struct(leader);
2109out_no_task:
2110 return result;
2126} 2111}
2127 2112
2128#define PROC_NUMBUF 10
2129#define PROC_MAXPIDS 20
2130
2131/* 2113/*
2132 * Get a few tgid's to return for filldir - we need to hold the 2114 * Find the first tgid to return to user space.
2133 * tasklist lock while doing this, and we must release it before 2115 *
2134 * we actually do the filldir itself, so we use a temp buffer.. 2116 * Usually this is just whatever follows &init_task, but if the users
2117 * buffer was too small to hold the full list or there was a seek into
2118 * the middle of the directory we have more work to do.
2119 *
2120 * In the case of a short read we start with find_task_by_pid.
2121 *
2122 * In the case of a seek we start with &init_task and walk nr
2123 * threads past it.
2135 */ 2124 */
2136static int get_tgid_list(int index, unsigned long version, unsigned int *tgids) 2125static struct task_struct *first_tgid(int tgid, unsigned int nr)
2137{ 2126{
2138 struct task_struct *p; 2127 struct task_struct *pos;
2139 int nr_tgids = 0; 2128 rcu_read_lock();
2140 2129 if (tgid && nr) {
2141 index--; 2130 pos = find_task_by_pid(tgid);
2142 read_lock(&tasklist_lock); 2131 if (pos && thread_group_leader(pos))
2143 p = NULL; 2132 goto found;
2144 if (version) {
2145 p = find_task_by_pid(version);
2146 if (p && !thread_group_leader(p))
2147 p = NULL;
2148 } 2133 }
2134 /* If nr exceeds the number of processes get out quickly */
2135 pos = NULL;
2136 if (nr && nr >= nr_processes())
2137 goto done;
2149 2138
2150 if (p) 2139 /* If we haven't found our starting place yet start with
2151 index = 0; 2140 * the init_task and walk nr tasks forward.
2152 else 2141 */
2153 p = next_task(&init_task); 2142 for (pos = next_task(&init_task); nr > 0; --nr) {
2154 2143 pos = next_task(pos);
2155 for ( ; p != &init_task; p = next_task(p)) { 2144 if (pos == &init_task) {
2156 int tgid = p->pid; 2145 pos = NULL;
2157 if (!pid_alive(p)) 2146 goto done;
2158 continue; 2147 }
2159 if (--index >= 0)
2160 continue;
2161 tgids[nr_tgids] = tgid;
2162 nr_tgids++;
2163 if (nr_tgids >= PROC_MAXPIDS)
2164 break;
2165 } 2148 }
2166 read_unlock(&tasklist_lock); 2149found:
2167 return nr_tgids; 2150 get_task_struct(pos);
2151done:
2152 rcu_read_unlock();
2153 return pos;
2168} 2154}
2169 2155
2170/* 2156/*
2171 * Get a few tid's to return for filldir - we need to hold the 2157 * Find the next task in the task list.
2172 * tasklist lock while doing this, and we must release it before 2158 * Return NULL if we loop or there is any error.
2173 * we actually do the filldir itself, so we use a temp buffer.. 2159 *
2160 * The reference to the input task_struct is released.
2174 */ 2161 */
2175static int get_tid_list(int index, unsigned int *tids, struct inode *dir) 2162static struct task_struct *next_tgid(struct task_struct *start)
2176{ 2163{
2177 struct task_struct *leader_task = proc_task(dir); 2164 struct task_struct *pos;
2178 struct task_struct *task = leader_task; 2165 rcu_read_lock();
2179 int nr_tids = 0; 2166 pos = start;
2180 2167 if (pid_alive(start))
2181 index -= 2; 2168 pos = next_task(start);
2182 read_lock(&tasklist_lock); 2169 if (pid_alive(pos) && (pos != &init_task)) {
2183 /* 2170 get_task_struct(pos);
2184 * The starting point task (leader_task) might be an already 2171 goto done;
2185 * unlinked task, which cannot be used to access the task-list 2172 }
2186 * via next_thread(). 2173 pos = NULL;
2187 */ 2174done:
2188 if (pid_alive(task)) do { 2175 rcu_read_unlock();
2189 int tid = task->pid; 2176 put_task_struct(start);
2190 2177 return pos;
2191 if (--index >= 0)
2192 continue;
2193 if (tids != NULL)
2194 tids[nr_tids] = tid;
2195 nr_tids++;
2196 if (nr_tids >= PROC_MAXPIDS)
2197 break;
2198 } while ((task = next_thread(task)) != leader_task);
2199 read_unlock(&tasklist_lock);
2200 return nr_tids;
2201} 2178}
2202 2179
2203/* for the /proc/ directory itself, after non-process stuff has been done */ 2180/* for the /proc/ directory itself, after non-process stuff has been done */
2204int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) 2181int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
2205{ 2182{
2206 unsigned int tgid_array[PROC_MAXPIDS];
2207 char buf[PROC_NUMBUF]; 2183 char buf[PROC_NUMBUF];
2208 unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY; 2184 unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
2209 unsigned int nr_tgids, i; 2185 struct task_struct *task;
2210 int next_tgid; 2186 int tgid;
2211 2187
2212 if (!nr) { 2188 if (!nr) {
2213 ino_t ino = fake_ino(0,PROC_TGID_INO); 2189 ino_t ino = fake_ino(0,PROC_TGID_INO);
@@ -2216,63 +2192,116 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
2216 filp->f_pos++; 2192 filp->f_pos++;
2217 nr++; 2193 nr++;
2218 } 2194 }
2195 nr -= 1;
2219 2196
2220 /* f_version caches the tgid value that the last readdir call couldn't 2197 /* f_version caches the tgid value that the last readdir call couldn't
2221 * return. lseek aka telldir automagically resets f_version to 0. 2198 * return. lseek aka telldir automagically resets f_version to 0.
2222 */ 2199 */
2223 next_tgid = filp->f_version; 2200 tgid = filp->f_version;
2224 filp->f_version = 0; 2201 filp->f_version = 0;
2225 for (;;) { 2202 for (task = first_tgid(tgid, nr);
2226 nr_tgids = get_tgid_list(nr, next_tgid, tgid_array); 2203 task;
2227 if (!nr_tgids) { 2204 task = next_tgid(task), filp->f_pos++) {
2228 /* no more entries ! */ 2205 int len;
2206 ino_t ino;
2207 tgid = task->pid;
2208 len = snprintf(buf, sizeof(buf), "%d", tgid);
2209 ino = fake_ino(tgid, PROC_TGID_INO);
2210 if (filldir(dirent, buf, len, filp->f_pos, ino, DT_DIR) < 0) {
2211 /* returning this tgid failed, save it as the first
2212 * pid for the next readir call */
2213 filp->f_version = tgid;
2214 put_task_struct(task);
2229 break; 2215 break;
2230 } 2216 }
2231 next_tgid = 0; 2217 }
2218 return 0;
2219}
2232 2220
2233 /* do not use the last found pid, reserve it for next_tgid */ 2221/*
2234 if (nr_tgids == PROC_MAXPIDS) { 2222 * Find the first tid of a thread group to return to user space.
2235 nr_tgids--; 2223 *
2236 next_tgid = tgid_array[nr_tgids]; 2224 * Usually this is just the thread group leader, but if the users
2237 } 2225 * buffer was too small or there was a seek into the middle of the
2226 * directory we have more work todo.
2227 *
2228 * In the case of a short read we start with find_task_by_pid.
2229 *
2230 * In the case of a seek we start with the leader and walk nr
2231 * threads past it.
2232 */
2233static struct task_struct *first_tid(struct task_struct *leader,
2234 int tid, int nr)
2235{
2236 struct task_struct *pos;
2238 2237
2239 for (i=0;i<nr_tgids;i++) { 2238 rcu_read_lock();
2240 int tgid = tgid_array[i]; 2239 /* Attempt to start with the pid of a thread */
2241 ino_t ino = fake_ino(tgid,PROC_TGID_INO); 2240 if (tid && (nr > 0)) {
2242 unsigned long j = PROC_NUMBUF; 2241 pos = find_task_by_pid(tid);
2242 if (pos && (pos->group_leader == leader))
2243 goto found;
2244 }
2243 2245
2244 do 2246 /* If nr exceeds the number of threads there is nothing todo */
2245 buf[--j] = '0' + (tgid % 10); 2247 pos = NULL;
2246 while ((tgid /= 10) != 0); 2248 if (nr && nr >= get_nr_threads(leader))
2249 goto out;
2247 2250
2248 if (filldir(dirent, buf+j, PROC_NUMBUF-j, filp->f_pos, ino, DT_DIR) < 0) { 2251 /* If we haven't found our starting place yet start
2249 /* returning this tgid failed, save it as the first 2252 * with the leader and walk nr threads forward.
2250 * pid for the next readir call */ 2253 */
2251 filp->f_version = tgid_array[i]; 2254 for (pos = leader; nr > 0; --nr) {
2252 goto out; 2255 pos = next_thread(pos);
2253 } 2256 if (pos == leader) {
2254 filp->f_pos++; 2257 pos = NULL;
2255 nr++; 2258 goto out;
2256 } 2259 }
2257 } 2260 }
2261found:
2262 get_task_struct(pos);
2258out: 2263out:
2259 return 0; 2264 rcu_read_unlock();
2265 return pos;
2266}
2267
2268/*
2269 * Find the next thread in the thread list.
2270 * Return NULL if there is an error or no next thread.
2271 *
2272 * The reference to the input task_struct is released.
2273 */
2274static struct task_struct *next_tid(struct task_struct *start)
2275{
2276 struct task_struct *pos = NULL;
2277 rcu_read_lock();
2278 if (pid_alive(start)) {
2279 pos = next_thread(start);
2280 if (thread_group_leader(pos))
2281 pos = NULL;
2282 else
2283 get_task_struct(pos);
2284 }
2285 rcu_read_unlock();
2286 put_task_struct(start);
2287 return pos;
2260} 2288}
2261 2289
2262/* for the /proc/TGID/task/ directories */ 2290/* for the /proc/TGID/task/ directories */
2263static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir) 2291static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir)
2264{ 2292{
2265 unsigned int tid_array[PROC_MAXPIDS];
2266 char buf[PROC_NUMBUF]; 2293 char buf[PROC_NUMBUF];
2267 unsigned int nr_tids, i;
2268 struct dentry *dentry = filp->f_dentry; 2294 struct dentry *dentry = filp->f_dentry;
2269 struct inode *inode = dentry->d_inode; 2295 struct inode *inode = dentry->d_inode;
2296 struct task_struct *leader = get_proc_task(inode);
2297 struct task_struct *task;
2270 int retval = -ENOENT; 2298 int retval = -ENOENT;
2271 ino_t ino; 2299 ino_t ino;
2300 int tid;
2272 unsigned long pos = filp->f_pos; /* avoiding "long long" filp->f_pos */ 2301 unsigned long pos = filp->f_pos; /* avoiding "long long" filp->f_pos */
2273 2302
2274 if (!pid_alive(proc_task(inode))) 2303 if (!leader)
2275 goto out; 2304 goto out_no_task;
2276 retval = 0; 2305 retval = 0;
2277 2306
2278 switch (pos) { 2307 switch (pos) {
@@ -2290,24 +2319,45 @@ static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldi
2290 /* fall through */ 2319 /* fall through */
2291 } 2320 }
2292 2321
2293 nr_tids = get_tid_list(pos, tid_array, inode); 2322 /* f_version caches the tgid value that the last readdir call couldn't
2294 inode->i_nlink = pos + nr_tids; 2323 * return. lseek aka telldir automagically resets f_version to 0.
2295 2324 */
2296 for (i = 0; i < nr_tids; i++) { 2325 tid = filp->f_version;
2297 unsigned long j = PROC_NUMBUF; 2326 filp->f_version = 0;
2298 int tid = tid_array[i]; 2327 for (task = first_tid(leader, tid, pos - 2);
2299 2328 task;
2300 ino = fake_ino(tid,PROC_TID_INO); 2329 task = next_tid(task), pos++) {
2301 2330 int len;
2302 do 2331 tid = task->pid;
2303 buf[--j] = '0' + (tid % 10); 2332 len = snprintf(buf, sizeof(buf), "%d", tid);
2304 while ((tid /= 10) != 0); 2333 ino = fake_ino(tid, PROC_TID_INO);
2305 2334 if (filldir(dirent, buf, len, pos, ino, DT_DIR < 0)) {
2306 if (filldir(dirent, buf+j, PROC_NUMBUF-j, pos, ino, DT_DIR) < 0) 2335 /* returning this tgid failed, save it as the first
2336 * pid for the next readir call */
2337 filp->f_version = tid;
2338 put_task_struct(task);
2307 break; 2339 break;
2308 pos++; 2340 }
2309 } 2341 }
2310out: 2342out:
2311 filp->f_pos = pos; 2343 filp->f_pos = pos;
2344 put_task_struct(leader);
2345out_no_task:
2312 return retval; 2346 return retval;
2313} 2347}
2348
2349static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
2350{
2351 struct inode *inode = dentry->d_inode;
2352 struct task_struct *p = get_proc_task(inode);
2353 generic_fillattr(inode, stat);
2354
2355 if (p) {
2356 rcu_read_lock();
2357 stat->nlink += get_nr_threads(p);
2358 rcu_read_unlock();
2359 put_task_struct(p);
2360 }
2361
2362 return 0;
2363}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 722b9c4631..6dcef089e1 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -58,14 +58,11 @@ static void de_put(struct proc_dir_entry *de)
58static void proc_delete_inode(struct inode *inode) 58static void proc_delete_inode(struct inode *inode)
59{ 59{
60 struct proc_dir_entry *de; 60 struct proc_dir_entry *de;
61 struct task_struct *tsk;
62 61
63 truncate_inode_pages(&inode->i_data, 0); 62 truncate_inode_pages(&inode->i_data, 0);
64 63
65 /* Let go of any associated process */ 64 /* Stop tracking associated processes */
66 tsk = PROC_I(inode)->task; 65 put_pid(PROC_I(inode)->pid);
67 if (tsk)
68 put_task_struct(tsk);
69 66
70 /* Let go of any associated proc directory entry */ 67 /* Let go of any associated proc directory entry */
71 de = PROC_I(inode)->pde; 68 de = PROC_I(inode)->pde;
@@ -94,8 +91,8 @@ static struct inode *proc_alloc_inode(struct super_block *sb)
94 ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, SLAB_KERNEL); 91 ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, SLAB_KERNEL);
95 if (!ei) 92 if (!ei)
96 return NULL; 93 return NULL;
97 ei->task = NULL; 94 ei->pid = NULL;
98 ei->type = 0; 95 ei->fd = 0;
99 ei->op.proc_get_link = NULL; 96 ei->op.proc_get_link = NULL;
100 ei->pde = NULL; 97 ei->pde = NULL;
101 inode = &ei->vfs_inode; 98 inode = &ei->vfs_inode;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 0502f17b86..146a434ba9 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -37,16 +37,30 @@ extern int proc_tgid_stat(struct task_struct *, char *);
37extern int proc_pid_status(struct task_struct *, char *); 37extern int proc_pid_status(struct task_struct *, char *);
38extern int proc_pid_statm(struct task_struct *, char *); 38extern int proc_pid_statm(struct task_struct *, char *);
39 39
40extern struct file_operations proc_maps_operations;
41extern struct file_operations proc_numa_maps_operations;
42extern struct file_operations proc_smaps_operations;
43
44extern struct file_operations proc_maps_operations;
45extern struct file_operations proc_numa_maps_operations;
46extern struct file_operations proc_smaps_operations;
47
48
40void free_proc_entry(struct proc_dir_entry *de); 49void free_proc_entry(struct proc_dir_entry *de);
41 50
42int proc_init_inodecache(void); 51int proc_init_inodecache(void);
43 52
44static inline struct task_struct *proc_task(struct inode *inode) 53static inline struct pid *proc_pid(struct inode *inode)
54{
55 return PROC_I(inode)->pid;
56}
57
58static inline struct task_struct *get_proc_task(struct inode *inode)
45{ 59{
46 return PROC_I(inode)->task; 60 return get_pid_task(proc_pid(inode), PIDTYPE_PID);
47} 61}
48 62
49static inline int proc_type(struct inode *inode) 63static inline int proc_fd(struct inode *inode)
50{ 64{
51 return PROC_I(inode)->type; 65 return PROC_I(inode)->fd;
52} 66}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 91b7c15ab3..0a163a4f77 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -75,9 +75,13 @@ int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount *
75{ 75{
76 struct vm_area_struct * vma; 76 struct vm_area_struct * vma;
77 int result = -ENOENT; 77 int result = -ENOENT;
78 struct task_struct *task = proc_task(inode); 78 struct task_struct *task = get_proc_task(inode);
79 struct mm_struct * mm = get_task_mm(task); 79 struct mm_struct * mm = NULL;
80 80
81 if (task) {
82 mm = get_task_mm(task);
83 put_task_struct(task);
84 }
81 if (!mm) 85 if (!mm)
82 goto out; 86 goto out;
83 down_read(&mm->mmap_sem); 87 down_read(&mm->mmap_sem);
@@ -118,9 +122,15 @@ struct mem_size_stats
118 unsigned long private_dirty; 122 unsigned long private_dirty;
119}; 123};
120 124
125__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
126{
127 return NULL;
128}
129
121static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss) 130static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
122{ 131{
123 struct task_struct *task = m->private; 132 struct proc_maps_private *priv = m->private;
133 struct task_struct *task = priv->task;
124 struct vm_area_struct *vma = v; 134 struct vm_area_struct *vma = v;
125 struct mm_struct *mm = vma->vm_mm; 135 struct mm_struct *mm = vma->vm_mm;
126 struct file *file = vma->vm_file; 136 struct file *file = vma->vm_file;
@@ -153,22 +163,23 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats
153 pad_len_spaces(m, len); 163 pad_len_spaces(m, len);
154 seq_path(m, file->f_vfsmnt, file->f_dentry, "\n"); 164 seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
155 } else { 165 } else {
156 if (mm) { 166 const char *name = arch_vma_name(vma);
157 if (vma->vm_start <= mm->start_brk && 167 if (!name) {
168 if (mm) {
169 if (vma->vm_start <= mm->start_brk &&
158 vma->vm_end >= mm->brk) { 170 vma->vm_end >= mm->brk) {
159 pad_len_spaces(m, len); 171 name = "[heap]";
160 seq_puts(m, "[heap]"); 172 } else if (vma->vm_start <= mm->start_stack &&
161 } else { 173 vma->vm_end >= mm->start_stack) {
162 if (vma->vm_start <= mm->start_stack && 174 name = "[stack]";
163 vma->vm_end >= mm->start_stack) {
164
165 pad_len_spaces(m, len);
166 seq_puts(m, "[stack]");
167 } 175 }
176 } else {
177 name = "[vdso]";
168 } 178 }
169 } else { 179 }
180 if (name) {
170 pad_len_spaces(m, len); 181 pad_len_spaces(m, len);
171 seq_puts(m, "[vdso]"); 182 seq_puts(m, name);
172 } 183 }
173 } 184 }
174 seq_putc(m, '\n'); 185 seq_putc(m, '\n');
@@ -295,12 +306,16 @@ static int show_smap(struct seq_file *m, void *v)
295 306
296static void *m_start(struct seq_file *m, loff_t *pos) 307static void *m_start(struct seq_file *m, loff_t *pos)
297{ 308{
298 struct task_struct *task = m->private; 309 struct proc_maps_private *priv = m->private;
299 unsigned long last_addr = m->version; 310 unsigned long last_addr = m->version;
300 struct mm_struct *mm; 311 struct mm_struct *mm;
301 struct vm_area_struct *vma, *tail_vma; 312 struct vm_area_struct *vma, *tail_vma = NULL;
302 loff_t l = *pos; 313 loff_t l = *pos;
303 314
315 /* Clear the per syscall fields in priv */
316 priv->task = NULL;
317 priv->tail_vma = NULL;
318
304 /* 319 /*
305 * We remember last_addr rather than next_addr to hit with 320 * We remember last_addr rather than next_addr to hit with
306 * mmap_cache most of the time. We have zero last_addr at 321 * mmap_cache most of the time. We have zero last_addr at
@@ -311,11 +326,15 @@ static void *m_start(struct seq_file *m, loff_t *pos)
311 if (last_addr == -1UL) 326 if (last_addr == -1UL)
312 return NULL; 327 return NULL;
313 328
314 mm = get_task_mm(task); 329 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
330 if (!priv->task)
331 return NULL;
332
333 mm = get_task_mm(priv->task);
315 if (!mm) 334 if (!mm)
316 return NULL; 335 return NULL;
317 336
318 tail_vma = get_gate_vma(task); 337 priv->tail_vma = tail_vma = get_gate_vma(priv->task);
319 down_read(&mm->mmap_sem); 338 down_read(&mm->mmap_sem);
320 339
321 /* Start with last addr hint */ 340 /* Start with last addr hint */
@@ -350,11 +369,9 @@ out:
350 return tail_vma; 369 return tail_vma;
351} 370}
352 371
353static void m_stop(struct seq_file *m, void *v) 372static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
354{ 373{
355 struct task_struct *task = m->private; 374 if (vma && vma != priv->tail_vma) {
356 struct vm_area_struct *vma = v;
357 if (vma && vma != get_gate_vma(task)) {
358 struct mm_struct *mm = vma->vm_mm; 375 struct mm_struct *mm = vma->vm_mm;
359 up_read(&mm->mmap_sem); 376 up_read(&mm->mmap_sem);
360 mmput(mm); 377 mmput(mm);
@@ -363,38 +380,103 @@ static void m_stop(struct seq_file *m, void *v)
363 380
364static void *m_next(struct seq_file *m, void *v, loff_t *pos) 381static void *m_next(struct seq_file *m, void *v, loff_t *pos)
365{ 382{
366 struct task_struct *task = m->private; 383 struct proc_maps_private *priv = m->private;
367 struct vm_area_struct *vma = v; 384 struct vm_area_struct *vma = v;
368 struct vm_area_struct *tail_vma = get_gate_vma(task); 385 struct vm_area_struct *tail_vma = priv->tail_vma;
369 386
370 (*pos)++; 387 (*pos)++;
371 if (vma && (vma != tail_vma) && vma->vm_next) 388 if (vma && (vma != tail_vma) && vma->vm_next)
372 return vma->vm_next; 389 return vma->vm_next;
373 m_stop(m, v); 390 vma_stop(priv, vma);
374 return (vma != tail_vma)? tail_vma: NULL; 391 return (vma != tail_vma)? tail_vma: NULL;
375} 392}
376 393
377struct seq_operations proc_pid_maps_op = { 394static void m_stop(struct seq_file *m, void *v)
395{
396 struct proc_maps_private *priv = m->private;
397 struct vm_area_struct *vma = v;
398
399 vma_stop(priv, vma);
400 if (priv->task)
401 put_task_struct(priv->task);
402}
403
404static struct seq_operations proc_pid_maps_op = {
378 .start = m_start, 405 .start = m_start,
379 .next = m_next, 406 .next = m_next,
380 .stop = m_stop, 407 .stop = m_stop,
381 .show = show_map 408 .show = show_map
382}; 409};
383 410
384struct seq_operations proc_pid_smaps_op = { 411static struct seq_operations proc_pid_smaps_op = {
385 .start = m_start, 412 .start = m_start,
386 .next = m_next, 413 .next = m_next,
387 .stop = m_stop, 414 .stop = m_stop,
388 .show = show_smap 415 .show = show_smap
389}; 416};
390 417
418static int do_maps_open(struct inode *inode, struct file *file,
419 struct seq_operations *ops)
420{
421 struct proc_maps_private *priv;
422 int ret = -ENOMEM;
423 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
424 if (priv) {
425 priv->pid = proc_pid(inode);
426 ret = seq_open(file, ops);
427 if (!ret) {
428 struct seq_file *m = file->private_data;
429 m->private = priv;
430 } else {
431 kfree(priv);
432 }
433 }
434 return ret;
435}
436
437static int maps_open(struct inode *inode, struct file *file)
438{
439 return do_maps_open(inode, file, &proc_pid_maps_op);
440}
441
442struct file_operations proc_maps_operations = {
443 .open = maps_open,
444 .read = seq_read,
445 .llseek = seq_lseek,
446 .release = seq_release_private,
447};
448
391#ifdef CONFIG_NUMA 449#ifdef CONFIG_NUMA
392extern int show_numa_map(struct seq_file *m, void *v); 450extern int show_numa_map(struct seq_file *m, void *v);
393 451
394struct seq_operations proc_pid_numa_maps_op = { 452static struct seq_operations proc_pid_numa_maps_op = {
395 .start = m_start, 453 .start = m_start,
396 .next = m_next, 454 .next = m_next,
397 .stop = m_stop, 455 .stop = m_stop,
398 .show = show_numa_map 456 .show = show_numa_map
399}; 457};
458
459static int numa_maps_open(struct inode *inode, struct file *file)
460{
461 return do_maps_open(inode, file, &proc_pid_numa_maps_op);
462}
463
464struct file_operations proc_numa_maps_operations = {
465 .open = numa_maps_open,
466 .read = seq_read,
467 .llseek = seq_lseek,
468 .release = seq_release_private,
469};
400#endif 470#endif
471
472static int smaps_open(struct inode *inode, struct file *file)
473{
474 return do_maps_open(inode, file, &proc_pid_smaps_op);
475}
476
477struct file_operations proc_smaps_operations = {
478 .open = smaps_open,
479 .read = seq_read,
480 .llseek = seq_lseek,
481 .release = seq_release_private,
482};
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 8f68827ed1..af69f28277 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -156,9 +156,28 @@ static void *m_next(struct seq_file *m, void *v, loff_t *pos)
156{ 156{
157 return NULL; 157 return NULL;
158} 158}
159struct seq_operations proc_pid_maps_op = { 159static struct seq_operations proc_pid_maps_op = {
160 .start = m_start, 160 .start = m_start,
161 .next = m_next, 161 .next = m_next,
162 .stop = m_stop, 162 .stop = m_stop,
163 .show = show_map 163 .show = show_map
164}; 164};
165
166static int maps_open(struct inode *inode, struct file *file)
167{
168 int ret;
169 ret = seq_open(file, &proc_pid_maps_op);
170 if (!ret) {
171 struct seq_file *m = file->private_data;
172 m->private = NULL;
173 }
174 return ret;
175}
176
177struct file_operations proc_maps_operations = {
178 .open = maps_open,
179 .read = seq_read,
180 .llseek = seq_lseek,
181 .release = seq_release,
182};
183
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index cf6e1cf403..752cea12e3 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -1560,12 +1560,6 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t
1560 return res; 1560 return res;
1561} 1561}
1562 1562
1563static ssize_t reiserfs_aio_write(struct kiocb *iocb, const char __user * buf,
1564 size_t count, loff_t pos)
1565{
1566 return generic_file_aio_write(iocb, buf, count, pos);
1567}
1568
1569const struct file_operations reiserfs_file_operations = { 1563const struct file_operations reiserfs_file_operations = {
1570 .read = generic_file_read, 1564 .read = generic_file_read,
1571 .write = reiserfs_file_write, 1565 .write = reiserfs_file_write,
@@ -1575,7 +1569,7 @@ const struct file_operations reiserfs_file_operations = {
1575 .fsync = reiserfs_sync_file, 1569 .fsync = reiserfs_sync_file,
1576 .sendfile = generic_file_sendfile, 1570 .sendfile = generic_file_sendfile,
1577 .aio_read = generic_file_aio_read, 1571 .aio_read = generic_file_aio_read,
1578 .aio_write = reiserfs_aio_write, 1572 .aio_write = generic_file_aio_write,
1579 .splice_read = generic_file_splice_read, 1573 .splice_read = generic_file_splice_read,
1580 .splice_write = generic_file_splice_write, 1574 .splice_write = generic_file_splice_write,
1581}; 1575};
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 1b73529b80..49d1a53dbe 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -834,8 +834,7 @@ static int write_ordered_buffers(spinlock_t * lock,
834 get_bh(bh); 834 get_bh(bh);
835 if (test_set_buffer_locked(bh)) { 835 if (test_set_buffer_locked(bh)) {
836 if (!buffer_dirty(bh)) { 836 if (!buffer_dirty(bh)) {
837 list_del_init(&jh->list); 837 list_move(&jh->list, &tmp);
838 list_add(&jh->list, &tmp);
839 goto loop_next; 838 goto loop_next;
840 } 839 }
841 spin_unlock(lock); 840 spin_unlock(lock);
@@ -855,8 +854,7 @@ static int write_ordered_buffers(spinlock_t * lock,
855 ret = -EIO; 854 ret = -EIO;
856 } 855 }
857 if (buffer_dirty(bh)) { 856 if (buffer_dirty(bh)) {
858 list_del_init(&jh->list); 857 list_move(&jh->list, &tmp);
859 list_add(&jh->list, &tmp);
860 add_to_chunk(&chunk, bh, lock, write_ordered_chunk); 858 add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
861 } else { 859 } else {
862 reiserfs_free_jh(bh); 860 reiserfs_free_jh(bh);
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c
index c71dd2760d..c8e96195b9 100644
--- a/fs/smbfs/request.c
+++ b/fs/smbfs/request.c
@@ -400,8 +400,7 @@ static int smb_request_send_req(struct smb_request *req)
400 if (!(req->rq_flags & SMB_REQ_TRANSMITTED)) 400 if (!(req->rq_flags & SMB_REQ_TRANSMITTED))
401 goto out; 401 goto out;
402 402
403 list_del_init(&req->rq_queue); 403 list_move_tail(&req->rq_queue, &server->recvq);
404 list_add_tail(&req->rq_queue, &server->recvq);
405 result = 1; 404 result = 1;
406out: 405out:
407 return result; 406 return result;
@@ -435,8 +434,7 @@ int smb_request_send_server(struct smb_sb_info *server)
435 result = smb_request_send_req(req); 434 result = smb_request_send_req(req);
436 if (result < 0) { 435 if (result < 0) {
437 server->conn_error = result; 436 server->conn_error = result;
438 list_del_init(&req->rq_queue); 437 list_move(&req->rq_queue, &server->xmitq);
439 list_add(&req->rq_queue, &server->xmitq);
440 result = -EIO; 438 result = -EIO;
441 goto out; 439 goto out;
442 } 440 }
diff --git a/fs/smbfs/smbiod.c b/fs/smbfs/smbiod.c
index 3f71384020..24577e2c48 100644
--- a/fs/smbfs/smbiod.c
+++ b/fs/smbfs/smbiod.c
@@ -193,8 +193,7 @@ int smbiod_retry(struct smb_sb_info *server)
193 if (req->rq_flags & SMB_REQ_RETRY) { 193 if (req->rq_flags & SMB_REQ_RETRY) {
194 /* must move the request to the xmitq */ 194 /* must move the request to the xmitq */
195 VERBOSE("retrying request %p on recvq\n", req); 195 VERBOSE("retrying request %p on recvq\n", req);
196 list_del(&req->rq_queue); 196 list_move(&req->rq_queue, &server->xmitq);
197 list_add(&req->rq_queue, &server->xmitq);
198 continue; 197 continue;
199 } 198 }
200#endif 199#endif
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 610b5bdbe7..61c42430cb 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -430,10 +430,9 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
430 i++; 430 i++;
431 /* fallthrough */ 431 /* fallthrough */
432 default: 432 default:
433 if (filp->f_pos == 2) { 433 if (filp->f_pos == 2)
434 list_del(q); 434 list_move(q, &parent_sd->s_children);
435 list_add(q, &parent_sd->s_children); 435
436 }
437 for (p=q->next; p!= &parent_sd->s_children; p=p->next) { 436 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
438 struct sysfs_dirent *next; 437 struct sysfs_dirent *next;
439 const char * name; 438 const char * name;
@@ -455,8 +454,7 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
455 dt_type(next)) < 0) 454 dt_type(next)) < 0)
456 return 0; 455 return 0;
457 456
458 list_del(q); 457 list_move(q, p);
459 list_add(q, p);
460 p = q; 458 p = q;
461 filp->f_pos++; 459 filp->f_pos++;
462 } 460 }
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index f2dbdf5a87..259bd19609 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -605,39 +605,12 @@ static void ufs_set_inode_ops(struct inode *inode)
605 ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); 605 ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
606} 606}
607 607
608void ufs_read_inode (struct inode * inode) 608static void ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
609{ 609{
610 struct ufs_inode_info *ufsi = UFS_I(inode); 610 struct ufs_inode_info *ufsi = UFS_I(inode);
611 struct super_block * sb; 611 struct super_block *sb = inode->i_sb;
612 struct ufs_sb_private_info * uspi;
613 struct ufs_inode * ufs_inode;
614 struct ufs2_inode *ufs2_inode;
615 struct buffer_head * bh;
616 mode_t mode; 612 mode_t mode;
617 unsigned i; 613 unsigned i;
618 unsigned flags;
619
620 UFSD("ENTER, ino %lu\n", inode->i_ino);
621
622 sb = inode->i_sb;
623 uspi = UFS_SB(sb)->s_uspi;
624 flags = UFS_SB(sb)->s_flags;
625
626 if (inode->i_ino < UFS_ROOTINO ||
627 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
628 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
629 goto bad_inode;
630 }
631
632 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
633 if (!bh) {
634 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
635 goto bad_inode;
636 }
637 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
638 goto ufs2_inode;
639
640 ufs_inode = (struct ufs_inode *) (bh->b_data + sizeof(struct ufs_inode) * ufs_inotofsbo(inode->i_ino));
641 614
642 /* 615 /*
643 * Copy data to the in-core inode. 616 * Copy data to the in-core inode.
@@ -661,14 +634,11 @@ void ufs_read_inode (struct inode * inode)
661 inode->i_atime.tv_nsec = 0; 634 inode->i_atime.tv_nsec = 0;
662 inode->i_ctime.tv_nsec = 0; 635 inode->i_ctime.tv_nsec = 0;
663 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); 636 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
664 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat) */
665 inode->i_version++;
666 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); 637 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
667 ufsi->i_gen = fs32_to_cpu(sb, ufs_inode->ui_gen); 638 ufsi->i_gen = fs32_to_cpu(sb, ufs_inode->ui_gen);
668 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 639 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
669 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 640 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
670 ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; 641
671 ufsi->i_dir_start_lookup = 0;
672 642
673 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 643 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
674 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 644 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
@@ -677,24 +647,16 @@ void ufs_read_inode (struct inode * inode)
677 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 647 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
678 ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i]; 648 ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i];
679 } 649 }
680 ufsi->i_osync = 0; 650}
681
682 ufs_set_inode_ops(inode);
683
684 brelse (bh);
685
686 UFSD("EXIT\n");
687 return;
688 651
689bad_inode: 652static void ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
690 make_bad_inode(inode); 653{
691 return; 654 struct ufs_inode_info *ufsi = UFS_I(inode);
655 struct super_block *sb = inode->i_sb;
656 mode_t mode;
657 unsigned i;
692 658
693ufs2_inode :
694 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); 659 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
695
696 ufs2_inode = (struct ufs2_inode *)(bh->b_data + sizeof(struct ufs2_inode) * ufs_inotofsbo(inode->i_ino));
697
698 /* 660 /*
699 * Copy data to the in-core inode. 661 * Copy data to the in-core inode.
700 */ 662 */
@@ -717,26 +679,64 @@ ufs2_inode :
717 inode->i_atime.tv_nsec = 0; 679 inode->i_atime.tv_nsec = 0;
718 inode->i_ctime.tv_nsec = 0; 680 inode->i_ctime.tv_nsec = 0;
719 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); 681 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
720 inode->i_blksize = PAGE_SIZE; /*This is the optimal IO size(for stat)*/
721
722 inode->i_version++;
723 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); 682 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
724 ufsi->i_gen = fs32_to_cpu(sb, ufs2_inode->ui_gen); 683 ufsi->i_gen = fs32_to_cpu(sb, ufs2_inode->ui_gen);
725 /* 684 /*
726 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 685 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
727 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 686 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
728 */ 687 */
729 ufsi->i_lastfrag= (inode->i_size + uspi->s_fsize- 1) >> uspi->s_fshift;
730 688
731 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 689 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
732 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 690 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
733 ufsi->i_u1.u2_i_data[i] = 691 ufsi->i_u1.u2_i_data[i] =
734 ufs2_inode->ui_u2.ui_addr.ui_db[i]; 692 ufs2_inode->ui_u2.ui_addr.ui_db[i];
735 } 693 } else {
736 else {
737 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 694 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
738 ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i]; 695 ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i];
739 } 696 }
697}
698
699void ufs_read_inode(struct inode * inode)
700{
701 struct ufs_inode_info *ufsi = UFS_I(inode);
702 struct super_block * sb;
703 struct ufs_sb_private_info * uspi;
704 struct buffer_head * bh;
705
706 UFSD("ENTER, ino %lu\n", inode->i_ino);
707
708 sb = inode->i_sb;
709 uspi = UFS_SB(sb)->s_uspi;
710
711 if (inode->i_ino < UFS_ROOTINO ||
712 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
713 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
714 inode->i_ino);
715 goto bad_inode;
716 }
717
718 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
719 if (!bh) {
720 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
721 inode->i_ino);
722 goto bad_inode;
723 }
724 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
725 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
726
727 ufs2_read_inode(inode,
728 ufs2_inode + ufs_inotofsbo(inode->i_ino));
729 } else {
730 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
731
732 ufs1_read_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
733 }
734
735 inode->i_blksize = PAGE_SIZE;/*This is the optimal IO size (for stat)*/
736 inode->i_version++;
737 ufsi->i_lastfrag =
738 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
739 ufsi->i_dir_start_lookup = 0;
740 ufsi->i_osync = 0; 740 ufsi->i_osync = 0;
741 741
742 ufs_set_inode_ops(inode); 742 ufs_set_inode_ops(inode);
@@ -745,6 +745,9 @@ ufs2_inode :
745 745
746 UFSD("EXIT\n"); 746 UFSD("EXIT\n");
747 return; 747 return;
748
749bad_inode:
750 make_bad_inode(inode);
748} 751}
749 752
750static int ufs_update_inode(struct inode * inode, int do_sync) 753static int ufs_update_inode(struct inode * inode, int do_sync)
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 12810baeb5..d9180020de 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -419,16 +419,15 @@ xfs_vn_link(
419 int error; 419 int error;
420 420
421 ip = old_dentry->d_inode; /* inode being linked to */ 421 ip = old_dentry->d_inode; /* inode being linked to */
422 if (S_ISDIR(ip->i_mode))
423 return -EPERM;
424
425 tdvp = vn_from_inode(dir); 422 tdvp = vn_from_inode(dir);
426 vp = vn_from_inode(ip); 423 vp = vn_from_inode(ip);
427 424
425 VN_HOLD(vp);
428 error = bhv_vop_link(tdvp, vp, dentry, NULL); 426 error = bhv_vop_link(tdvp, vp, dentry, NULL);
429 if (likely(!error)) { 427 if (unlikely(error)) {
428 VN_RELE(vp);
429 } else {
430 VMODIFY(tdvp); 430 VMODIFY(tdvp);
431 VN_HOLD(vp);
432 xfs_validate_fields(ip, &vattr); 431 xfs_validate_fields(ip, &vattr);
433 d_instantiate(dentry, ip); 432 d_instantiate(dentry, ip);
434 } 433 }
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index aa26ab906c..028eb17ec2 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -140,9 +140,7 @@ BUFFER_FNS(PrivateStart, unwritten);
140#define current_pid() (current->pid) 140#define current_pid() (current->pid)
141#define current_fsuid(cred) (current->fsuid) 141#define current_fsuid(cred) (current->fsuid)
142#define current_fsgid(cred) (current->fsgid) 142#define current_fsgid(cred) (current->fsgid)
143#define current_set_flags(f) (current->flags |= (f))
144#define current_test_flags(f) (current->flags & (f)) 143#define current_test_flags(f) (current->flags & (f))
145#define current_clear_flags(f) (current->flags & ~(f))
146#define current_set_flags_nested(sp, f) \ 144#define current_set_flags_nested(sp, f) \
147 (*(sp) = current->flags, current->flags |= (f)) 145 (*(sp) = current->flags, current->flags |= (f))
148#define current_clear_flags_nested(sp, f) \ 146#define current_clear_flags_nested(sp, f) \
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index 35c6a01963..c42b3221b2 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -93,7 +93,7 @@ typedef enum {
93 */ 93 */
94static inline struct bhv_vnode *vn_from_inode(struct inode *inode) 94static inline struct bhv_vnode *vn_from_inode(struct inode *inode)
95{ 95{
96 return (bhv_vnode_t *)list_entry(inode, bhv_vnode_t, v_inode); 96 return container_of(inode, bhv_vnode_t, v_inode);
97} 97}
98static inline struct inode *vn_to_inode(struct bhv_vnode *vnode) 98static inline struct inode *vn_to_inode(struct bhv_vnode *vnode)
99{ 99{
diff --git a/fs/xfs/xfs_behavior.h b/fs/xfs/xfs_behavior.h
index 1d8ff10320..6e6e56fb35 100644
--- a/fs/xfs/xfs_behavior.h
+++ b/fs/xfs/xfs_behavior.h
@@ -78,15 +78,12 @@
78 * 78 *
79 */ 79 */
80 80
81struct bhv_head_lock;
82
83/* 81/*
84 * Behavior head. Head of the chain of behaviors. 82 * Behavior head. Head of the chain of behaviors.
85 * Contained within each virtualized object data structure. 83 * Contained within each virtualized object data structure.
86 */ 84 */
87typedef struct bhv_head { 85typedef struct bhv_head {
88 struct bhv_desc *bh_first; /* first behavior in chain */ 86 struct bhv_desc *bh_first; /* first behavior in chain */
89 struct bhv_head_lock *bh_lockp; /* pointer to lock info struct */
90} bhv_head_t; 87} bhv_head_t;
91 88
92/* 89/*
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 5fa0adb7e1..86c1bf0bba 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1961,9 +1961,9 @@ xfs_iunlink_remove(
1961 xfs_agino_t agino; 1961 xfs_agino_t agino;
1962 xfs_agino_t next_agino; 1962 xfs_agino_t next_agino;
1963 xfs_buf_t *last_ibp; 1963 xfs_buf_t *last_ibp;
1964 xfs_dinode_t *last_dip; 1964 xfs_dinode_t *last_dip = NULL;
1965 short bucket_index; 1965 short bucket_index;
1966 int offset, last_offset; 1966 int offset, last_offset = 0;
1967 int error; 1967 int error;
1968 int agi_ok; 1968 int agi_ok;
1969 1969
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index d8f5d4cbe8..e730328636 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1740,10 +1740,10 @@ xlog_write(xfs_mount_t * mp,
1740 xlog_in_core_t **commit_iclog, 1740 xlog_in_core_t **commit_iclog,
1741 uint flags) 1741 uint flags)
1742{ 1742{
1743 xlog_t *log = mp->m_log; 1743 xlog_t *log = mp->m_log;
1744 xlog_ticket_t *ticket = (xlog_ticket_t *)tic; 1744 xlog_ticket_t *ticket = (xlog_ticket_t *)tic;
1745 xlog_in_core_t *iclog = NULL; /* ptr to current in-core log */
1745 xlog_op_header_t *logop_head; /* ptr to log operation header */ 1746 xlog_op_header_t *logop_head; /* ptr to log operation header */
1746 xlog_in_core_t *iclog; /* ptr to current in-core log */
1747 __psint_t ptr; /* copy address into data region */ 1747 __psint_t ptr; /* copy address into data region */
1748 int len; /* # xlog_write() bytes 2 still copy */ 1748 int len; /* # xlog_write() bytes 2 still copy */
1749 int index; /* region index currently copying */ 1749 int index; /* region index currently copying */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 55b4237c21..3cb678e3a1 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -990,6 +990,8 @@ xlog_find_zeroed(
990 xfs_daddr_t num_scan_bblks; 990 xfs_daddr_t num_scan_bblks;
991 int error, log_bbnum = log->l_logBBsize; 991 int error, log_bbnum = log->l_logBBsize;
992 992
993 *blk_no = 0;
994
993 /* check totally zeroed log */ 995 /* check totally zeroed log */
994 bp = xlog_get_bp(log, 1); 996 bp = xlog_get_bp(log, 1);
995 if (!bp) 997 if (!bp)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 10dbf203c6..4be5c0b2d2 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1721,15 +1721,14 @@ xfs_mount_log_sbunit(
1721 * is present to prevent thrashing). 1721 * is present to prevent thrashing).
1722 */ 1722 */
1723 1723
1724#ifdef CONFIG_HOTPLUG_CPU
1724/* 1725/*
1725 * hot-plug CPU notifier support. 1726 * hot-plug CPU notifier support.
1726 * 1727 *
1727 * We cannot use the hotcpu_register() function because it does 1728 * We need a notifier per filesystem as we need to be able to identify
1728 * not allow notifier instances. We need a notifier per filesystem 1729 * the filesystem to balance the counters out. This is achieved by
1729 * as we need to be able to identify the filesystem to balance 1730 * having a notifier block embedded in the xfs_mount_t and doing pointer
1730 * the counters out. This is achieved by having a notifier block 1731 * magic to get the mount pointer from the notifier block address.
1731 * embedded in the xfs_mount_t and doing pointer magic to get the
1732 * mount pointer from the notifier block address.
1733 */ 1732 */
1734STATIC int 1733STATIC int
1735xfs_icsb_cpu_notify( 1734xfs_icsb_cpu_notify(
@@ -1779,6 +1778,7 @@ xfs_icsb_cpu_notify(
1779 1778
1780 return NOTIFY_OK; 1779 return NOTIFY_OK;
1781} 1780}
1781#endif /* CONFIG_HOTPLUG_CPU */
1782 1782
1783int 1783int
1784xfs_icsb_init_counters( 1784xfs_icsb_init_counters(
@@ -1791,9 +1791,11 @@ xfs_icsb_init_counters(
1791 if (mp->m_sb_cnts == NULL) 1791 if (mp->m_sb_cnts == NULL)
1792 return -ENOMEM; 1792 return -ENOMEM;
1793 1793
1794#ifdef CONFIG_HOTPLUG_CPU
1794 mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify; 1795 mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify;
1795 mp->m_icsb_notifier.priority = 0; 1796 mp->m_icsb_notifier.priority = 0;
1796 register_cpu_notifier(&mp->m_icsb_notifier); 1797 register_hotcpu_notifier(&mp->m_icsb_notifier);
1798#endif /* CONFIG_HOTPLUG_CPU */
1797 1799
1798 for_each_online_cpu(i) { 1800 for_each_online_cpu(i) {
1799 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 1801 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
@@ -1812,7 +1814,7 @@ xfs_icsb_destroy_counters(
1812 xfs_mount_t *mp) 1814 xfs_mount_t *mp)
1813{ 1815{
1814 if (mp->m_sb_cnts) { 1816 if (mp->m_sb_cnts) {
1815 unregister_cpu_notifier(&mp->m_icsb_notifier); 1817 unregister_hotcpu_notifier(&mp->m_icsb_notifier);
1816 free_percpu(mp->m_sb_cnts); 1818 free_percpu(mp->m_sb_cnts);
1817 } 1819 }
1818} 1820}
@@ -2026,7 +2028,7 @@ xfs_icsb_balance_counter(
2026 xfs_sb_field_t field, 2028 xfs_sb_field_t field,
2027 int flags) 2029 int flags)
2028{ 2030{
2029 uint64_t count, resid = 0; 2031 uint64_t count, resid;
2030 int weight = num_online_cpus(); 2032 int weight = num_online_cpus();
2031 int s; 2033 int s;
2032 2034
@@ -2058,6 +2060,7 @@ xfs_icsb_balance_counter(
2058 break; 2060 break;
2059 default: 2061 default:
2060 BUG(); 2062 BUG();
2063 count = resid = 0; /* quiet, gcc */
2061 break; 2064 break;
2062 } 2065 }
2063 2066
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 0c1e42b037..5a0b678956 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -1929,7 +1929,7 @@ xfs_growfs_rt(
1929 /* 1929 /*
1930 * Initial error checking. 1930 * Initial error checking.
1931 */ 1931 */
1932 if (mp->m_rtdev_targp || mp->m_rbmip == NULL || 1932 if (mp->m_rtdev_targp == NULL || mp->m_rbmip == NULL ||
1933 (nrblocks = in->newblocks) <= sbp->sb_rblocks || 1933 (nrblocks = in->newblocks) <= sbp->sb_rblocks ||
1934 (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize))) 1934 (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize)))
1935 return XFS_ERROR(EINVAL); 1935 return XFS_ERROR(EINVAL);
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index cb65c3a603..9dc88b3806 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -338,8 +338,6 @@ typedef void (*xfs_trans_callback_t)(struct xfs_trans *, void *);
338typedef struct xfs_trans { 338typedef struct xfs_trans {
339 unsigned int t_magic; /* magic number */ 339 unsigned int t_magic; /* magic number */
340 xfs_log_callback_t t_logcb; /* log callback struct */ 340 xfs_log_callback_t t_logcb; /* log callback struct */
341 struct xfs_trans *t_forw; /* async list pointers */
342 struct xfs_trans *t_back; /* async list pointers */
343 unsigned int t_type; /* transaction type */ 341 unsigned int t_type; /* transaction type */
344 unsigned int t_log_res; /* amt of log space resvd */ 342 unsigned int t_log_res; /* amt of log space resvd */
345 unsigned int t_log_count; /* count for perm log res */ 343 unsigned int t_log_count; /* count for perm log res */
@@ -364,9 +362,11 @@ typedef struct xfs_trans {
364 long t_res_fdblocks_delta; /* on-disk only chg */ 362 long t_res_fdblocks_delta; /* on-disk only chg */
365 long t_frextents_delta;/* superblock freextents chg*/ 363 long t_frextents_delta;/* superblock freextents chg*/
366 long t_res_frextents_delta; /* on-disk only chg */ 364 long t_res_frextents_delta; /* on-disk only chg */
365#ifdef DEBUG
367 long t_ag_freeblks_delta; /* debugging counter */ 366 long t_ag_freeblks_delta; /* debugging counter */
368 long t_ag_flist_delta; /* debugging counter */ 367 long t_ag_flist_delta; /* debugging counter */
369 long t_ag_btree_delta; /* debugging counter */ 368 long t_ag_btree_delta; /* debugging counter */
369#endif
370 long t_dblocks_delta;/* superblock dblocks change */ 370 long t_dblocks_delta;/* superblock dblocks change */
371 long t_agcount_delta;/* superblock agcount change */ 371 long t_agcount_delta;/* superblock agcount change */
372 long t_imaxpct_delta;/* superblock imaxpct change */ 372 long t_imaxpct_delta;/* superblock imaxpct change */
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 00a6b7dc24..23cfa58377 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -2603,8 +2603,7 @@ xfs_link(
2603 vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address); 2603 vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address);
2604 2604
2605 target_namelen = VNAMELEN(dentry); 2605 target_namelen = VNAMELEN(dentry);
2606 if (VN_ISDIR(src_vp)) 2606 ASSERT(!VN_ISDIR(src_vp));
2607 return XFS_ERROR(EPERM);
2608 2607
2609 sip = xfs_vtoi(src_vp); 2608 sip = xfs_vtoi(src_vp);
2610 tdp = XFS_BHVTOI(target_dir_bdp); 2609 tdp = XFS_BHVTOI(target_dir_bdp);
@@ -2699,9 +2698,8 @@ xfs_link(
2699 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); 2698 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
2700 2699
2701 error = xfs_bumplink(tp, sip); 2700 error = xfs_bumplink(tp, sip);
2702 if (error) { 2701 if (error)
2703 goto abort_return; 2702 goto abort_return;
2704 }
2705 2703
2706 /* 2704 /*
2707 * If this is a synchronous mount, make sure that the 2705 * If this is a synchronous mount, make sure that the
@@ -2719,9 +2717,8 @@ xfs_link(
2719 } 2717 }
2720 2718
2721 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); 2719 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
2722 if (error) { 2720 if (error)
2723 goto std_return; 2721 goto std_return;
2724 }
2725 2722
2726 /* Fall through to std_return with error = 0. */ 2723 /* Fall through to std_return with error = 0. */
2727std_return: 2724std_return:
@@ -2742,6 +2739,8 @@ std_return:
2742 xfs_trans_cancel(tp, cancel_flags); 2739 xfs_trans_cancel(tp, cancel_flags);
2743 goto std_return; 2740 goto std_return;
2744} 2741}
2742
2743
2745/* 2744/*
2746 * xfs_mkdir 2745 * xfs_mkdir
2747 * 2746 *
diff --git a/include/asm-alpha/core_t2.h b/include/asm-alpha/core_t2.h
index dba70c62a1..457c34b6eb 100644
--- a/include/asm-alpha/core_t2.h
+++ b/include/asm-alpha/core_t2.h
@@ -435,7 +435,7 @@ static inline void t2_outl(u32 b, unsigned long addr)
435 set_hae(msb); \ 435 set_hae(msb); \
436} 436}
437 437
438static spinlock_t t2_hae_lock = SPIN_LOCK_UNLOCKED; 438static DEFINE_SPINLOCK(t2_hae_lock);
439 439
440__EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) 440__EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
441{ 441{
diff --git a/include/asm-arm/arch-s3c2410/regs-nand.h b/include/asm-arm/arch-s3c2410/regs-nand.h
index 7cff235e66..c1470c695c 100644
--- a/include/asm-arm/arch-s3c2410/regs-nand.h
+++ b/include/asm-arm/arch-s3c2410/regs-nand.h
@@ -39,10 +39,19 @@
39#define S3C2440_NFESTAT1 S3C2410_NFREG(0x28) 39#define S3C2440_NFESTAT1 S3C2410_NFREG(0x28)
40#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C) 40#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C)
41#define S3C2440_NFMECC1 S3C2410_NFREG(0x30) 41#define S3C2440_NFMECC1 S3C2410_NFREG(0x30)
42#define S3C2440_NFSECC S3C2410_NFREG(0x34) 42#define S3C2440_NFSECC S3C24E10_NFREG(0x34)
43#define S3C2440_NFSBLK S3C2410_NFREG(0x38) 43#define S3C2440_NFSBLK S3C2410_NFREG(0x38)
44#define S3C2440_NFEBLK S3C2410_NFREG(0x3C) 44#define S3C2440_NFEBLK S3C2410_NFREG(0x3C)
45 45
46#define S3C2412_NFSBLK S3C2410_NFREG(0x20)
47#define S3C2412_NFEBLK S3C2410_NFREG(0x24)
48#define S3C2412_NFSTAT S3C2410_NFREG(0x28)
49#define S3C2412_NFMECC_ERR0 S3C2410_NFREG(0x2C)
50#define S3C2412_NFMECC_ERR1 S3C2410_NFREG(0x30)
51#define S3C2412_NFMECC0 S3C2410_NFREG(0x34)
52#define S3C2412_NFMECC1 S3C2410_NFREG(0x38)
53#define S3C2412_NFSECC S3C2410_NFREG(0x3C)
54
46#define S3C2410_NFCONF_EN (1<<15) 55#define S3C2410_NFCONF_EN (1<<15)
47#define S3C2410_NFCONF_512BYTE (1<<14) 56#define S3C2410_NFCONF_512BYTE (1<<14)
48#define S3C2410_NFCONF_4STEP (1<<13) 57#define S3C2410_NFCONF_4STEP (1<<13)
@@ -77,5 +86,42 @@
77#define S3C2440_NFSTAT_RnB_CHANGE (1<<2) 86#define S3C2440_NFSTAT_RnB_CHANGE (1<<2)
78#define S3C2440_NFSTAT_ILLEGAL_ACCESS (1<<3) 87#define S3C2440_NFSTAT_ILLEGAL_ACCESS (1<<3)
79 88
89#define S3C2412_NFCONF_NANDBOOT (1<<31)
90#define S3C2412_NFCONF_ECCCLKCON (1<<30)
91#define S3C2412_NFCONF_ECC_MLC (1<<24)
92#define S3C2412_NFCONF_TACLS_MASK (7<<12) /* 1 extra bit of Tacls */
93
94#define S3C2412_NFCONT_ECC4_DIRWR (1<<18)
95#define S3C2412_NFCONT_LOCKTIGHT (1<<17)
96#define S3C2412_NFCONT_SOFTLOCK (1<<16)
97#define S3C2412_NFCONT_ECC4_ENCINT (1<<13)
98#define S3C2412_NFCONT_ECC4_DECINT (1<<12)
99#define S3C2412_NFCONT_MAIN_ECC_LOCK (1<<7)
100#define S3C2412_NFCONT_INIT_MAIN_ECC (1<<5)
101#define S3C2412_NFCONT_nFCE1 (1<<2)
102#define S3C2412_NFCONT_nFCE0 (1<<1)
103
104#define S3C2412_NFSTAT_ECC_ENCDONE (1<<7)
105#define S3C2412_NFSTAT_ECC_DECDONE (1<<6)
106#define S3C2412_NFSTAT_ILLEGAL_ACCESS (1<<5)
107#define S3C2412_NFSTAT_RnB_CHANGE (1<<4)
108#define S3C2412_NFSTAT_nFCE1 (1<<3)
109#define S3C2412_NFSTAT_nFCE0 (1<<2)
110#define S3C2412_NFSTAT_Res1 (1<<1)
111#define S3C2412_NFSTAT_READY (1<<0)
112
113#define S3C2412_NFECCERR_SERRDATA(x) (((x) >> 21) & 0xf)
114#define S3C2412_NFECCERR_SERRBIT(x) (((x) >> 18) & 0x7)
115#define S3C2412_NFECCERR_MERRDATA(x) (((x) >> 7) & 0x3ff)
116#define S3C2412_NFECCERR_MERRBIT(x) (((x) >> 4) & 0x7)
117#define S3C2412_NFECCERR_SPARE_ERR(x) (((x) >> 2) & 0x3)
118#define S3C2412_NFECCERR_MAIN_ERR(x) (((x) >> 2) & 0x3)
119#define S3C2412_NFECCERR_NONE (0)
120#define S3C2412_NFECCERR_1BIT (1)
121#define S3C2412_NFECCERR_MULTIBIT (2)
122#define S3C2412_NFECCERR_ECCAREA (3)
123
124
125
80#endif /* __ASM_ARM_REGS_NAND */ 126#endif /* __ASM_ARM_REGS_NAND */
81 127
diff --git a/include/asm-arm/assembler.h b/include/asm-arm/assembler.h
index d53bafa9bf..fce8328208 100644
--- a/include/asm-arm/assembler.h
+++ b/include/asm-arm/assembler.h
@@ -55,30 +55,6 @@
55#define PLD(code...) 55#define PLD(code...)
56#endif 56#endif
57 57
58#define MODE_USR USR_MODE
59#define MODE_FIQ FIQ_MODE
60#define MODE_IRQ IRQ_MODE
61#define MODE_SVC SVC_MODE
62
63#define DEFAULT_FIQ MODE_FIQ
64
65/*
66 * LOADREGS - ldm with PC in register list (eg, ldmfd sp!, {pc})
67 */
68#ifdef __STDC__
69#define LOADREGS(cond, base, reglist...)\
70 ldm##cond base,reglist
71#else
72#define LOADREGS(cond, base, reglist...)\
73 ldm/**/cond base,reglist
74#endif
75
76/*
77 * Build a return instruction for this processor type.
78 */
79#define RETINSTR(instr, regs...)\
80 instr regs
81
82/* 58/*
83 * Enable and disable interrupts 59 * Enable and disable interrupts
84 */ 60 */
@@ -117,18 +93,6 @@
117 msr cpsr_c, \oldcpsr 93 msr cpsr_c, \oldcpsr
118 .endm 94 .endm
119 95
120/*
121 * These two are used to save LR/restore PC over a user-based access.
122 * The old 26-bit architecture requires that we do. On 32-bit
123 * architecture, we can safely ignore this requirement.
124 */
125 .macro save_lr
126 .endm
127
128 .macro restore_pc
129 mov pc, lr
130 .endm
131
132#define USER(x...) \ 96#define USER(x...) \
1339999: x; \ 979999: x; \
134 .section __ex_table,"a"; \ 98 .section __ex_table,"a"; \
diff --git a/include/asm-arm/hardware/locomo.h b/include/asm-arm/hardware/locomo.h
index 5f10048ec5..22dfb17377 100644
--- a/include/asm-arm/hardware/locomo.h
+++ b/include/asm-arm/hardware/locomo.h
@@ -111,6 +111,8 @@
111#define LOCOMO_ALS 0x00 /* Adjust light cycle */ 111#define LOCOMO_ALS 0x00 /* Adjust light cycle */
112#define LOCOMO_ALD 0x04 /* Adjust light duty */ 112#define LOCOMO_ALD 0x04 /* Adjust light duty */
113 113
114#define LOCOMO_ALC_EN 0x8000
115
114/* Backlight controller: TFT signal */ 116/* Backlight controller: TFT signal */
115#define LOCOMO_BACKLIGHT 0x38 117#define LOCOMO_BACKLIGHT 0x38
116#define LOCOMO_TC 0x00 /* TFT control signal */ 118#define LOCOMO_TC 0x00 /* TFT control signal */
@@ -203,4 +205,7 @@ void locomo_gpio_write(struct locomo_dev *ldev, unsigned int bits, unsigned int
203/* M62332 control function */ 205/* M62332 control function */
204void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel); 206void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel);
205 207
208/* Frontlight control */
209void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf);
210
206#endif 211#endif
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 845cb67ad8..8ceab7bcd8 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -51,4 +51,10 @@
51 __ret; \ 51 __ret; \
52}) 52})
53 53
54#ifdef CONFIG_SMP
55# define WARN_ON_SMP(x) WARN_ON(x)
56#else
57# define WARN_ON_SMP(x) do { } while (0)
58#endif
59
54#endif 60#endif
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index d79e9ee10f..c61bd1a17f 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -5,6 +5,8 @@
5 5
6#include <asm/types.h> 6#include <asm/types.h>
7 7
8#include <linux/types.h>
9
8struct alt_instr { 10struct alt_instr {
9 u8 *instr; /* original instruction */ 11 u8 *instr; /* original instruction */
10 u8 *replacement; 12 u8 *replacement;
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 1d8362cb2c..2c1e371ceb 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -111,24 +111,12 @@ extern void init_apic_mappings (void);
111extern void smp_local_timer_interrupt (struct pt_regs * regs); 111extern void smp_local_timer_interrupt (struct pt_regs * regs);
112extern void setup_boot_APIC_clock (void); 112extern void setup_boot_APIC_clock (void);
113extern void setup_secondary_APIC_clock (void); 113extern void setup_secondary_APIC_clock (void);
114extern void setup_apic_nmi_watchdog (void);
115extern int reserve_lapic_nmi(void);
116extern void release_lapic_nmi(void);
117extern void disable_timer_nmi_watchdog(void);
118extern void enable_timer_nmi_watchdog(void);
119extern void nmi_watchdog_tick (struct pt_regs * regs);
120extern int APIC_init_uniprocessor (void); 114extern int APIC_init_uniprocessor (void);
121extern void disable_APIC_timer(void); 115extern void disable_APIC_timer(void);
122extern void enable_APIC_timer(void); 116extern void enable_APIC_timer(void);
123 117
124extern void enable_NMI_through_LVT0 (void * dummy); 118extern void enable_NMI_through_LVT0 (void * dummy);
125 119
126extern unsigned int nmi_watchdog;
127#define NMI_NONE 0
128#define NMI_IO_APIC 1
129#define NMI_LOCAL_APIC 2
130#define NMI_INVALID 3
131
132extern int disable_timer_pin_1; 120extern int disable_timer_pin_1;
133 121
134void smp_send_timer_broadcast_ipi(struct pt_regs *regs); 122void smp_send_timer_broadcast_ipi(struct pt_regs *regs);
diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h
index e7252c216c..b1bc7b1b64 100644
--- a/include/asm-i386/cpu.h
+++ b/include/asm-i386/cpu.h
@@ -7,8 +7,6 @@
7#include <linux/nodemask.h> 7#include <linux/nodemask.h>
8#include <linux/percpu.h> 8#include <linux/percpu.h>
9 9
10#include <asm/node.h>
11
12struct i386_cpu { 10struct i386_cpu {
13 struct cpu cpu; 11 struct cpu cpu;
14}; 12};
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index 3ecedbafa8..d314ebb3d5 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -72,6 +72,7 @@
72#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ 72#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
73#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ 73#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
74#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ 74#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
75#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
75 76
76/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 77/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
77#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 78#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h
index 456db8501c..b1c7650dc7 100644
--- a/include/asm-i386/delay.h
+++ b/include/asm-i386/delay.h
@@ -23,4 +23,6 @@ extern void __delay(unsigned long loops);
23 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ 23 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
24 __ndelay(n)) 24 __ndelay(n))
25 25
26void use_tsc_delay(void);
27
26#endif /* defined(_I386_DELAY_H) */ 28#endif /* defined(_I386_DELAY_H) */
diff --git a/include/asm-i386/dwarf2.h b/include/asm-i386/dwarf2.h
new file mode 100644
index 0000000000..2280f6272f
--- /dev/null
+++ b/include/asm-i386/dwarf2.h
@@ -0,0 +1,54 @@
1#ifndef _DWARF2_H
2#define _DWARF2_H
3
4#include <linux/config.h>
5
6#ifndef __ASSEMBLY__
7#warning "asm/dwarf2.h should be only included in pure assembly files"
8#endif
9
10/*
11 Macros for dwarf2 CFI unwind table entries.
12 See "as.info" for details on these pseudo ops. Unfortunately
13 they are only supported in very new binutils, so define them
14 away for older version.
15 */
16
17#ifdef CONFIG_UNWIND_INFO
18
19#define CFI_STARTPROC .cfi_startproc
20#define CFI_ENDPROC .cfi_endproc
21#define CFI_DEF_CFA .cfi_def_cfa
22#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
23#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
24#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
25#define CFI_OFFSET .cfi_offset
26#define CFI_REL_OFFSET .cfi_rel_offset
27#define CFI_REGISTER .cfi_register
28#define CFI_RESTORE .cfi_restore
29#define CFI_REMEMBER_STATE .cfi_remember_state
30#define CFI_RESTORE_STATE .cfi_restore_state
31
32#else
33
34/* Due to the structure of pre-exisiting code, don't use assembler line
35 comment character # to ignore the arguments. Instead, use a dummy macro. */
36.macro ignore a=0, b=0, c=0, d=0
37.endm
38
39#define CFI_STARTPROC ignore
40#define CFI_ENDPROC ignore
41#define CFI_DEF_CFA ignore
42#define CFI_DEF_CFA_REGISTER ignore
43#define CFI_DEF_CFA_OFFSET ignore
44#define CFI_ADJUST_CFA_OFFSET ignore
45#define CFI_OFFSET ignore
46#define CFI_REL_OFFSET ignore
47#define CFI_REGISTER ignore
48#define CFI_RESTORE ignore
49#define CFI_REMEMBER_STATE ignore
50#define CFI_RESTORE_STATE ignore
51
52#endif
53
54#endif
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index 4153d80e4d..1eac92cb5b 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -10,6 +10,7 @@
10#include <asm/processor.h> 10#include <asm/processor.h>
11#include <asm/system.h> /* for savesegment */ 11#include <asm/system.h> /* for savesegment */
12#include <asm/auxvec.h> 12#include <asm/auxvec.h>
13#include <asm/desc.h>
13 14
14#include <linux/utsname.h> 15#include <linux/utsname.h>
15 16
@@ -129,15 +130,41 @@ extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct
129#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) 130#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
130#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs) 131#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs)
131 132
132#define VSYSCALL_BASE (__fix_to_virt(FIX_VSYSCALL)) 133#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
133#define VSYSCALL_EHDR ((const struct elfhdr *) VSYSCALL_BASE) 134#define VDSO_BASE ((unsigned long)current->mm->context.vdso)
134#define VSYSCALL_ENTRY ((unsigned long) &__kernel_vsyscall) 135
136#ifdef CONFIG_COMPAT_VDSO
137# define VDSO_COMPAT_BASE VDSO_HIGH_BASE
138# define VDSO_PRELINK VDSO_HIGH_BASE
139#else
140# define VDSO_COMPAT_BASE VDSO_BASE
141# define VDSO_PRELINK 0
142#endif
143
144#define VDSO_COMPAT_SYM(x) \
145 (VDSO_COMPAT_BASE + (unsigned long)(x) - VDSO_PRELINK)
146
147#define VDSO_SYM(x) \
148 (VDSO_BASE + (unsigned long)(x) - VDSO_PRELINK)
149
150#define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE)
151#define VDSO_EHDR ((const struct elfhdr *) VDSO_COMPAT_BASE)
152
135extern void __kernel_vsyscall; 153extern void __kernel_vsyscall;
136 154
155#define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall)
156
157#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
158struct linux_binprm;
159extern int arch_setup_additional_pages(struct linux_binprm *bprm,
160 int executable_stack);
161
162extern unsigned int vdso_enabled;
163
137#define ARCH_DLINFO \ 164#define ARCH_DLINFO \
138do { \ 165do if (vdso_enabled) { \
139 NEW_AUX_ENT(AT_SYSINFO, VSYSCALL_ENTRY); \ 166 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
140 NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL_BASE); \ 167 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_COMPAT_BASE); \
141} while (0) 168} while (0)
142 169
143/* 170/*
@@ -148,15 +175,15 @@ do { \
148 * Dumping its extra ELF program headers includes all the other information 175 * Dumping its extra ELF program headers includes all the other information
149 * a debugger needs to easily find how the vsyscall DSO was being used. 176 * a debugger needs to easily find how the vsyscall DSO was being used.
150 */ 177 */
151#define ELF_CORE_EXTRA_PHDRS (VSYSCALL_EHDR->e_phnum) 178#define ELF_CORE_EXTRA_PHDRS (VDSO_HIGH_EHDR->e_phnum)
152#define ELF_CORE_WRITE_EXTRA_PHDRS \ 179#define ELF_CORE_WRITE_EXTRA_PHDRS \
153do { \ 180do { \
154 const struct elf_phdr *const vsyscall_phdrs = \ 181 const struct elf_phdr *const vsyscall_phdrs = \
155 (const struct elf_phdr *) (VSYSCALL_BASE \ 182 (const struct elf_phdr *) (VDSO_HIGH_BASE \
156 + VSYSCALL_EHDR->e_phoff); \ 183 + VDSO_HIGH_EHDR->e_phoff); \
157 int i; \ 184 int i; \
158 Elf32_Off ofs = 0; \ 185 Elf32_Off ofs = 0; \
159 for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \ 186 for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \
160 struct elf_phdr phdr = vsyscall_phdrs[i]; \ 187 struct elf_phdr phdr = vsyscall_phdrs[i]; \
161 if (phdr.p_type == PT_LOAD) { \ 188 if (phdr.p_type == PT_LOAD) { \
162 BUG_ON(ofs != 0); \ 189 BUG_ON(ofs != 0); \
@@ -174,10 +201,10 @@ do { \
174#define ELF_CORE_WRITE_EXTRA_DATA \ 201#define ELF_CORE_WRITE_EXTRA_DATA \
175do { \ 202do { \
176 const struct elf_phdr *const vsyscall_phdrs = \ 203 const struct elf_phdr *const vsyscall_phdrs = \
177 (const struct elf_phdr *) (VSYSCALL_BASE \ 204 (const struct elf_phdr *) (VDSO_HIGH_BASE \
178 + VSYSCALL_EHDR->e_phoff); \ 205 + VDSO_HIGH_EHDR->e_phoff); \
179 int i; \ 206 int i; \
180 for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \ 207 for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \
181 if (vsyscall_phdrs[i].p_type == PT_LOAD) \ 208 if (vsyscall_phdrs[i].p_type == PT_LOAD) \
182 DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr, \ 209 DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr, \
183 PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \ 210 PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h
index f7e068f4d2..a48cc3f7cc 100644
--- a/include/asm-i386/fixmap.h
+++ b/include/asm-i386/fixmap.h
@@ -51,7 +51,7 @@
51 */ 51 */
52enum fixed_addresses { 52enum fixed_addresses {
53 FIX_HOLE, 53 FIX_HOLE,
54 FIX_VSYSCALL, 54 FIX_VDSO,
55#ifdef CONFIG_X86_LOCAL_APIC 55#ifdef CONFIG_X86_LOCAL_APIC
56 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ 56 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
57#endif 57#endif
@@ -115,14 +115,6 @@ extern void __set_fixmap (enum fixed_addresses idx,
115#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 115#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
116#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) 116#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
117 117
118/*
119 * This is the range that is readable by user mode, and things
120 * acting like user mode such as get_user_pages.
121 */
122#define FIXADDR_USER_START (__fix_to_virt(FIX_VSYSCALL))
123#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
124
125
126extern void __this_fixmap_does_not_exist(void); 118extern void __this_fixmap_does_not_exist(void);
127 119
128/* 120/*
diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h
index 95d3fd0902..a4c0a5a9ff 100644
--- a/include/asm-i386/hw_irq.h
+++ b/include/asm-i386/hw_irq.h
@@ -19,6 +19,8 @@
19 19
20struct hw_interrupt_type; 20struct hw_interrupt_type;
21 21
22#define NMI_VECTOR 0x02
23
22/* 24/*
23 * Various low-level irq details needed by irq.c, process.c, 25 * Various low-level irq details needed by irq.c, process.c,
24 * time.c, io_apic.c and smp.c 26 * time.c, io_apic.c and smp.c
diff --git a/include/asm-i386/intel_arch_perfmon.h b/include/asm-i386/intel_arch_perfmon.h
new file mode 100644
index 0000000000..134ea9cc52
--- /dev/null
+++ b/include/asm-i386/intel_arch_perfmon.h
@@ -0,0 +1,19 @@
1#ifndef X86_INTEL_ARCH_PERFMON_H
2#define X86_INTEL_ARCH_PERFMON_H 1
3
4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
6
7#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
8#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
9
10#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
11#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
12#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
13#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
14
15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0)
18
19#endif /* X86_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-i386/k8.h b/include/asm-i386/k8.h
new file mode 100644
index 0000000000..dfd88a6e60
--- /dev/null
+++ b/include/asm-i386/k8.h
@@ -0,0 +1 @@
#include <asm-x86_64/k8.h>
diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h
index 96d0828ce0..d18cdb9fc9 100644
--- a/include/asm-i386/kdebug.h
+++ b/include/asm-i386/kdebug.h
@@ -19,6 +19,8 @@ struct die_args {
19 19
20extern int register_die_notifier(struct notifier_block *); 20extern int register_die_notifier(struct notifier_block *);
21extern int unregister_die_notifier(struct notifier_block *); 21extern int unregister_die_notifier(struct notifier_block *);
22extern int register_page_fault_notifier(struct notifier_block *);
23extern int unregister_page_fault_notifier(struct notifier_block *);
22extern struct atomic_notifier_head i386die_chain; 24extern struct atomic_notifier_head i386die_chain;
23 25
24 26
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
index 57d157c5cf..0730a20f6d 100644
--- a/include/asm-i386/kprobes.h
+++ b/include/asm-i386/kprobes.h
@@ -44,6 +44,7 @@ typedef u8 kprobe_opcode_t;
44 44
45#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry 45#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
46#define ARCH_SUPPORTS_KRETPROBES 46#define ARCH_SUPPORTS_KRETPROBES
47#define ARCH_INACTIVE_KPROBE_COUNT 0
47 48
48void arch_remove_kprobe(struct kprobe *p); 49void arch_remove_kprobe(struct kprobe *p);
49void kretprobe_trampoline(void); 50void kretprobe_trampoline(void);
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
index e67fa08260..3b4998c51d 100644
--- a/include/asm-i386/local.h
+++ b/include/asm-i386/local.h
@@ -55,12 +55,26 @@ static __inline__ void local_sub(long i, local_t *v)
55 * much more efficient than these naive implementations. Note they take 55 * much more efficient than these naive implementations. Note they take
56 * a variable, not an address. 56 * a variable, not an address.
57 */ 57 */
58#define cpu_local_read(v) local_read(&__get_cpu_var(v)) 58
59#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) 59/* Need to disable preemption for the cpu local counters otherwise we could
60#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) 60 still access a variable of a previous CPU in a non atomic way. */
61#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) 61#define cpu_local_wrap_v(v) \
62#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) 62 ({ local_t res__; \
63#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) 63 preempt_disable(); \
64 res__ = (v); \
65 preempt_enable(); \
66 res__; })
67#define cpu_local_wrap(v) \
68 ({ preempt_disable(); \
69 v; \
70 preempt_enable(); }) \
71
72#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
73#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
74#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
75#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
76#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
77#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
64 78
65#define __cpu_local_inc(v) cpu_local_inc(v) 79#define __cpu_local_inc(v) cpu_local_inc(v)
66#define __cpu_local_dec(v) cpu_local_dec(v) 80#define __cpu_local_dec(v) cpu_local_dec(v)
diff --git a/include/asm-i386/mach-default/mach_ipi.h b/include/asm-i386/mach-default/mach_ipi.h
index a1d0072e36..0dba244c86 100644
--- a/include/asm-i386/mach-default/mach_ipi.h
+++ b/include/asm-i386/mach-default/mach_ipi.h
@@ -1,6 +1,9 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define __ASM_MACH_IPI_H
3 3
4/* Avoid include hell */
5#define NMI_VECTOR 0x02
6
4void send_IPI_mask_bitmask(cpumask_t mask, int vector); 7void send_IPI_mask_bitmask(cpumask_t mask, int vector);
5void __send_IPI_shortcut(unsigned int shortcut, int vector); 8void __send_IPI_shortcut(unsigned int shortcut, int vector);
6 9
@@ -13,7 +16,7 @@ static inline void send_IPI_mask(cpumask_t mask, int vector)
13 16
14static inline void __local_send_IPI_allbutself(int vector) 17static inline void __local_send_IPI_allbutself(int vector)
15{ 18{
16 if (no_broadcast) { 19 if (no_broadcast || vector == NMI_VECTOR) {
17 cpumask_t mask = cpu_online_map; 20 cpumask_t mask = cpu_online_map;
18 21
19 cpu_clear(smp_processor_id(), mask); 22 cpu_clear(smp_processor_id(), mask);
@@ -24,7 +27,7 @@ static inline void __local_send_IPI_allbutself(int vector)
24 27
25static inline void __local_send_IPI_all(int vector) 28static inline void __local_send_IPI_all(int vector)
26{ 29{
27 if (no_broadcast) 30 if (no_broadcast || vector == NMI_VECTOR)
28 send_IPI_mask(cpu_online_map, vector); 31 send_IPI_mask(cpu_online_map, vector);
29 else 32 else
30 __send_IPI_shortcut(APIC_DEST_ALLINC, vector); 33 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
diff --git a/include/asm-i386/mach-default/mach_timer.h b/include/asm-i386/mach-default/mach_timer.h
index 4b9703bb02..807992fd41 100644
--- a/include/asm-i386/mach-default/mach_timer.h
+++ b/include/asm-i386/mach-default/mach_timer.h
@@ -15,7 +15,9 @@
15#ifndef _MACH_TIMER_H 15#ifndef _MACH_TIMER_H
16#define _MACH_TIMER_H 16#define _MACH_TIMER_H
17 17
18#define CALIBRATE_LATCH (5 * LATCH) 18#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
19#define CALIBRATE_LATCH \
20 ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
19 21
20static inline void mach_prepare_counter(void) 22static inline void mach_prepare_counter(void)
21{ 23{
diff --git a/include/asm-i386/mach-summit/mach_mpparse.h b/include/asm-i386/mach-summit/mach_mpparse.h
index 1cce2b924a..9426839917 100644
--- a/include/asm-i386/mach-summit/mach_mpparse.h
+++ b/include/asm-i386/mach-summit/mach_mpparse.h
@@ -2,6 +2,7 @@
2#define __ASM_MACH_MPPARSE_H 2#define __ASM_MACH_MPPARSE_H
3 3
4#include <mach_apic.h> 4#include <mach_apic.h>
5#include <asm/tsc.h>
5 6
6extern int use_cyclone; 7extern int use_cyclone;
7 8
@@ -29,6 +30,7 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
29 (!strncmp(productid, "VIGIL SMP", 9) 30 (!strncmp(productid, "VIGIL SMP", 9)
30 || !strncmp(productid, "EXA", 3) 31 || !strncmp(productid, "EXA", 3)
31 || !strncmp(productid, "RUTHLESS SMP", 12))){ 32 || !strncmp(productid, "RUTHLESS SMP", 12))){
33 mark_tsc_unstable();
32 use_cyclone = 1; /*enable cyclone-timer*/ 34 use_cyclone = 1; /*enable cyclone-timer*/
33 setup_summit(); 35 setup_summit();
34 return 1; 36 return 1;
@@ -42,6 +44,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
42 if (!strncmp(oem_id, "IBM", 3) && 44 if (!strncmp(oem_id, "IBM", 3) &&
43 (!strncmp(oem_table_id, "SERVIGIL", 8) 45 (!strncmp(oem_table_id, "SERVIGIL", 8)
44 || !strncmp(oem_table_id, "EXA", 3))){ 46 || !strncmp(oem_table_id, "EXA", 3))){
47 mark_tsc_unstable();
45 use_cyclone = 1; /*enable cyclone-timer*/ 48 use_cyclone = 1; /*enable cyclone-timer*/
46 setup_summit(); 49 setup_summit();
47 return 1; 50 return 1;
diff --git a/include/asm-i386/mmu.h b/include/asm-i386/mmu.h
index f431a0b86d..8358dd3df7 100644
--- a/include/asm-i386/mmu.h
+++ b/include/asm-i386/mmu.h
@@ -12,6 +12,7 @@ typedef struct {
12 int size; 12 int size;
13 struct semaphore sem; 13 struct semaphore sem;
14 void *ldt; 14 void *ldt;
15 void *vdso;
15} mm_context_t; 16} mm_context_t;
16 17
17#endif 18#endif
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
index 21f16638fc..67d9947999 100644
--- a/include/asm-i386/nmi.h
+++ b/include/asm-i386/nmi.h
@@ -5,24 +5,38 @@
5#define ASM_NMI_H 5#define ASM_NMI_H
6 6
7#include <linux/pm.h> 7#include <linux/pm.h>
8 8
9struct pt_regs; 9struct pt_regs;
10 10
11typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); 11typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
12 12
13/** 13/**
14 * set_nmi_callback 14 * set_nmi_callback
15 * 15 *
16 * Set a handler for an NMI. Only one handler may be 16 * Set a handler for an NMI. Only one handler may be
17 * set. Return 1 if the NMI was handled. 17 * set. Return 1 if the NMI was handled.
18 */ 18 */
19void set_nmi_callback(nmi_callback_t callback); 19void set_nmi_callback(nmi_callback_t callback);
20 20
21/** 21/**
22 * unset_nmi_callback 22 * unset_nmi_callback
23 * 23 *
24 * Remove the handler previously set. 24 * Remove the handler previously set.
25 */ 25 */
26void unset_nmi_callback(void); 26void unset_nmi_callback(void);
27 27
28extern void setup_apic_nmi_watchdog (void);
29extern int reserve_lapic_nmi(void);
30extern void release_lapic_nmi(void);
31extern void disable_timer_nmi_watchdog(void);
32extern void enable_timer_nmi_watchdog(void);
33extern void nmi_watchdog_tick (struct pt_regs * regs);
34
35extern unsigned int nmi_watchdog;
36#define NMI_DEFAULT -1
37#define NMI_NONE 0
38#define NMI_IO_APIC 1
39#define NMI_LOCAL_APIC 2
40#define NMI_INVALID 3
41
28#endif /* ASM_NMI_H */ 42#endif /* ASM_NMI_H */
diff --git a/include/asm-i386/node.h b/include/asm-i386/node.h
deleted file mode 100644
index e13c6ffa72..0000000000
--- a/include/asm-i386/node.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef _ASM_I386_NODE_H_
2#define _ASM_I386_NODE_H_
3
4#include <linux/device.h>
5#include <linux/mmzone.h>
6#include <linux/node.h>
7#include <linux/topology.h>
8#include <linux/nodemask.h>
9
10struct i386_node {
11 struct node node;
12};
13extern struct i386_node node_devices[MAX_NUMNODES];
14
15static inline int arch_register_node(int num){
16 int p_node;
17 struct node *parent = NULL;
18
19 if (!node_online(num))
20 return 0;
21 p_node = parent_node(num);
22
23 if (p_node != num)
24 parent = &node_devices[p_node].node;
25
26 return register_node(&node_devices[num].node, num, parent);
27}
28
29#endif /* _ASM_I386_NODE_H_ */
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index e3a552fa55..f5bf544c72 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -96,6 +96,8 @@ typedef struct { unsigned long pgprot; } pgprot_t;
96 96
97#ifndef __ASSEMBLY__ 97#ifndef __ASSEMBLY__
98 98
99struct vm_area_struct;
100
99/* 101/*
100 * This much address space is reserved for vmalloc() and iomap() 102 * This much address space is reserved for vmalloc() and iomap()
101 * as well as fixmap mappings. 103 * as well as fixmap mappings.
@@ -139,6 +141,7 @@ extern int page_is_ram(unsigned long pagenr);
139#include <asm-generic/memory_model.h> 141#include <asm-generic/memory_model.h>
140#include <asm-generic/page.h> 142#include <asm-generic/page.h>
141 143
144#define __HAVE_ARCH_GATE_AREA 1
142#endif /* __KERNEL__ */ 145#endif /* __KERNEL__ */
143 146
144#endif /* _I386_PAGE_H */ 147#endif /* _I386_PAGE_H */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 0c83cf12ee..b32346d62e 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -71,8 +71,12 @@ struct cpuinfo_x86 {
71 cpumask_t llc_shared_map; /* cpus sharing the last level cache */ 71 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
72#endif 72#endif
73 unsigned char x86_max_cores; /* cpuid returned max cores value */ 73 unsigned char x86_max_cores; /* cpuid returned max cores value */
74 unsigned char booted_cores; /* number of cores as seen by OS */
75 unsigned char apicid; 74 unsigned char apicid;
75#ifdef CONFIG_SMP
76 unsigned char booted_cores; /* number of cores as seen by OS */
77 __u8 phys_proc_id; /* Physical processor id. */
78 __u8 cpu_core_id; /* Core id */
79#endif
76} __attribute__((__aligned__(SMP_CACHE_BYTES))); 80} __attribute__((__aligned__(SMP_CACHE_BYTES)));
77 81
78#define X86_VENDOR_INTEL 0 82#define X86_VENDOR_INTEL 0
@@ -104,14 +108,13 @@ extern struct cpuinfo_x86 cpu_data[];
104#define current_cpu_data boot_cpu_data 108#define current_cpu_data boot_cpu_data
105#endif 109#endif
106 110
107extern int phys_proc_id[NR_CPUS];
108extern int cpu_core_id[NR_CPUS];
109extern int cpu_llc_id[NR_CPUS]; 111extern int cpu_llc_id[NR_CPUS];
110extern char ignore_fpu_irq; 112extern char ignore_fpu_irq;
111 113
112extern void identify_cpu(struct cpuinfo_x86 *); 114extern void identify_cpu(struct cpuinfo_x86 *);
113extern void print_cpu_info(struct cpuinfo_x86 *); 115extern void print_cpu_info(struct cpuinfo_x86 *);
114extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 116extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
117extern unsigned short num_cache_leaves;
115 118
116#ifdef CONFIG_X86_HT 119#ifdef CONFIG_X86_HT
117extern void detect_ht(struct cpuinfo_x86 *c); 120extern void detect_ht(struct cpuinfo_x86 *c);
@@ -554,7 +557,7 @@ extern void prepare_to_copy(struct task_struct *tsk);
554extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); 557extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
555 558
556extern unsigned long thread_saved_pc(struct task_struct *tsk); 559extern unsigned long thread_saved_pc(struct task_struct *tsk);
557void show_trace(struct task_struct *task, unsigned long *stack); 560void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
558 561
559unsigned long get_wchan(struct task_struct *p); 562unsigned long get_wchan(struct task_struct *p);
560 563
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 0249f912a2..cab0180567 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -427,7 +427,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
427 * does not enforce ordering, since there is no data dependency between 427 * does not enforce ordering, since there is no data dependency between
428 * the read of "a" and the read of "b". Therefore, on some CPUs, such 428 * the read of "a" and the read of "b". Therefore, on some CPUs, such
429 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 429 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
430 * in cases like thiswhere there are no data dependencies. 430 * in cases like this where there are no data dependencies.
431 **/ 431 **/
432 432
433#define read_barrier_depends() do { } while(0) 433#define read_barrier_depends() do { } while(0)
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 8420ed1249..2833fa2c0d 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -37,6 +37,7 @@ struct thread_info {
37 0-0xBFFFFFFF for user-thead 37 0-0xBFFFFFFF for user-thead
38 0-0xFFFFFFFF for kernel-thread 38 0-0xFFFFFFFF for kernel-thread
39 */ 39 */
40 void *sysenter_return;
40 struct restart_block restart_block; 41 struct restart_block restart_block;
41 42
42 unsigned long previous_esp; /* ESP of the previous stack in case 43 unsigned long previous_esp; /* ESP of the previous stack in case
@@ -83,17 +84,15 @@ struct thread_info {
83#define init_stack (init_thread_union.stack) 84#define init_stack (init_thread_union.stack)
84 85
85 86
87/* how to get the current stack pointer from C */
88register unsigned long current_stack_pointer asm("esp") __attribute_used__;
89
86/* how to get the thread information struct from C */ 90/* how to get the thread information struct from C */
87static inline struct thread_info *current_thread_info(void) 91static inline struct thread_info *current_thread_info(void)
88{ 92{
89 struct thread_info *ti; 93 return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1));
90 __asm__("andl %%esp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1)));
91 return ti;
92} 94}
93 95
94/* how to get the current stack pointer from C */
95register unsigned long current_stack_pointer asm("esp") __attribute_used__;
96
97/* thread information allocation */ 96/* thread information allocation */
98#ifdef CONFIG_DEBUG_STACK_USAGE 97#ifdef CONFIG_DEBUG_STACK_USAGE
99#define alloc_thread_info(tsk) \ 98#define alloc_thread_info(tsk) \
@@ -140,8 +139,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
140#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 139#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
141#define TIF_SECCOMP 8 /* secure computing */ 140#define TIF_SECCOMP 8 /* secure computing */
142#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ 141#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
143#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 142#define TIF_MEMDIE 16
144#define TIF_MEMDIE 17
145 143
146#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 144#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
147#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 145#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -153,7 +151,6 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
153#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 151#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
154#define _TIF_SECCOMP (1<<TIF_SECCOMP) 152#define _TIF_SECCOMP (1<<TIF_SECCOMP)
155#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 153#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
156#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
157 154
158/* work to do on interrupt/exception return */ 155/* work to do on interrupt/exception return */
159#define _TIF_WORK_MASK \ 156#define _TIF_WORK_MASK \
@@ -170,6 +167,9 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
170 * have to worry about atomic accesses. 167 * have to worry about atomic accesses.
171 */ 168 */
172#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ 169#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
170#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */
171
172#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
173 173
174#endif /* __KERNEL__ */ 174#endif /* __KERNEL__ */
175 175
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h
index aed1643747..d0ebd05f85 100644
--- a/include/asm-i386/timer.h
+++ b/include/asm-i386/timer.h
@@ -3,68 +3,11 @@
3#include <linux/init.h> 3#include <linux/init.h>
4#include <linux/pm.h> 4#include <linux/pm.h>
5 5
6/**
7 * struct timer_ops - used to define a timer source
8 *
9 * @name: name of the timer.
10 * @init: Probes and initializes the timer. Takes clock= override
11 * string as an argument. Returns 0 on success, anything else
12 * on failure.
13 * @mark_offset: called by the timer interrupt.
14 * @get_offset: called by gettimeofday(). Returns the number of microseconds
15 * since the last timer interupt.
16 * @monotonic_clock: returns the number of nanoseconds since the init of the
17 * timer.
18 * @delay: delays this many clock cycles.
19 */
20struct timer_opts {
21 char* name;
22 void (*mark_offset)(void);
23 unsigned long (*get_offset)(void);
24 unsigned long long (*monotonic_clock)(void);
25 void (*delay)(unsigned long);
26 unsigned long (*read_timer)(void);
27 int (*suspend)(pm_message_t state);
28 int (*resume)(void);
29};
30
31struct init_timer_opts {
32 int (*init)(char *override);
33 struct timer_opts *opts;
34};
35
36#define TICK_SIZE (tick_nsec / 1000) 6#define TICK_SIZE (tick_nsec / 1000)
37
38extern struct timer_opts* __init select_timer(void);
39extern void clock_fallback(void);
40void setup_pit_timer(void); 7void setup_pit_timer(void);
41
42/* Modifiers for buggy PIT handling */ 8/* Modifiers for buggy PIT handling */
43
44extern int pit_latch_buggy; 9extern int pit_latch_buggy;
45
46extern struct timer_opts *cur_timer;
47extern int timer_ack; 10extern int timer_ack;
48
49/* list of externed timers */
50extern struct timer_opts timer_none;
51extern struct timer_opts timer_pit;
52extern struct init_timer_opts timer_pit_init;
53extern struct init_timer_opts timer_tsc_init;
54#ifdef CONFIG_X86_CYCLONE_TIMER
55extern struct init_timer_opts timer_cyclone_init;
56#endif
57
58extern unsigned long calibrate_tsc(void);
59extern unsigned long read_timer_tsc(void);
60extern void init_cpu_khz(void);
61extern int recalibrate_cpu_khz(void); 11extern int recalibrate_cpu_khz(void);
62#ifdef CONFIG_HPET_TIMER
63extern struct init_timer_opts timer_hpet_init;
64extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
65#endif
66 12
67#ifdef CONFIG_X86_PM_TIMER
68extern struct init_timer_opts timer_pmtmr_init;
69#endif
70#endif 13#endif
diff --git a/include/asm-i386/timex.h b/include/asm-i386/timex.h
index d434984303..3666044409 100644
--- a/include/asm-i386/timex.h
+++ b/include/asm-i386/timex.h
@@ -7,6 +7,7 @@
7#define _ASMi386_TIMEX_H 7#define _ASMi386_TIMEX_H
8 8
9#include <asm/processor.h> 9#include <asm/processor.h>
10#include <asm/tsc.h>
10 11
11#ifdef CONFIG_X86_ELAN 12#ifdef CONFIG_X86_ELAN
12# define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */ 13# define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */
@@ -15,39 +16,6 @@
15#endif 16#endif
16 17
17 18
18/*
19 * Standard way to access the cycle counter on i586+ CPUs.
20 * Currently only used on SMP.
21 *
22 * If you really have a SMP machine with i486 chips or older,
23 * compile for that, and this will just always return zero.
24 * That's ok, it just means that the nicer scheduling heuristics
25 * won't work for you.
26 *
27 * We only use the low 32 bits, and we'd simply better make sure
28 * that we reschedule before that wraps. Scheduling at least every
29 * four billion cycles just basically sounds like a good idea,
30 * regardless of how fast the machine is.
31 */
32typedef unsigned long long cycles_t;
33
34static inline cycles_t get_cycles (void)
35{
36 unsigned long long ret=0;
37
38#ifndef CONFIG_X86_TSC
39 if (!cpu_has_tsc)
40 return 0;
41#endif
42
43#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
44 rdtscll(ret);
45#endif
46 return ret;
47}
48
49extern unsigned int cpu_khz;
50
51extern int read_current_timer(unsigned long *timer_value); 19extern int read_current_timer(unsigned long *timer_value);
52#define ARCH_HAS_READ_CURRENT_TIMER 1 20#define ARCH_HAS_READ_CURRENT_TIMER 1
53 21
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index b94e5eeef9..6adbd9b1ae 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -28,10 +28,8 @@
28#define _ASM_I386_TOPOLOGY_H 28#define _ASM_I386_TOPOLOGY_H
29 29
30#ifdef CONFIG_X86_HT 30#ifdef CONFIG_X86_HT
31#define topology_physical_package_id(cpu) \ 31#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
32 (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu]) 32#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
33#define topology_core_id(cpu) \
34 (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu])
35#define topology_core_siblings(cpu) (cpu_core_map[cpu]) 33#define topology_core_siblings(cpu) (cpu_core_map[cpu])
36#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 34#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
37#endif 35#endif
@@ -114,4 +112,9 @@ extern unsigned long node_remap_size[];
114 112
115extern cpumask_t cpu_coregroup_map(int cpu); 113extern cpumask_t cpu_coregroup_map(int cpu);
116 114
115#ifdef CONFIG_SMP
116#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
117#define smt_capable() (smp_num_siblings > 1)
118#endif
119
117#endif /* _ASM_I386_TOPOLOGY_H */ 120#endif /* _ASM_I386_TOPOLOGY_H */
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
new file mode 100644
index 0000000000..97b828ce31
--- /dev/null
+++ b/include/asm-i386/tsc.h
@@ -0,0 +1,49 @@
1/*
2 * linux/include/asm-i386/tsc.h
3 *
4 * i386 TSC related functions
5 */
6#ifndef _ASM_i386_TSC_H
7#define _ASM_i386_TSC_H
8
9#include <linux/config.h>
10#include <asm/processor.h>
11
12/*
13 * Standard way to access the cycle counter on i586+ CPUs.
14 * Currently only used on SMP.
15 *
16 * If you really have a SMP machine with i486 chips or older,
17 * compile for that, and this will just always return zero.
18 * That's ok, it just means that the nicer scheduling heuristics
19 * won't work for you.
20 *
21 * We only use the low 32 bits, and we'd simply better make sure
22 * that we reschedule before that wraps. Scheduling at least every
23 * four billion cycles just basically sounds like a good idea,
24 * regardless of how fast the machine is.
25 */
26typedef unsigned long long cycles_t;
27
28extern unsigned int cpu_khz;
29extern unsigned int tsc_khz;
30
31static inline cycles_t get_cycles(void)
32{
33 unsigned long long ret = 0;
34
35#ifndef CONFIG_X86_TSC
36 if (!cpu_has_tsc)
37 return 0;
38#endif
39
40#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
41 rdtscll(ret);
42#endif
43 return ret;
44}
45
46extern void tsc_init(void);
47extern void mark_tsc_unstable(void);
48
49#endif
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h
new file mode 100644
index 0000000000..69f0f1df67
--- /dev/null
+++ b/include/asm-i386/unwind.h
@@ -0,0 +1,98 @@
1#ifndef _ASM_I386_UNWIND_H
2#define _ASM_I386_UNWIND_H
3
4/*
5 * Copyright (C) 2002-2006 Novell, Inc.
6 * Jan Beulich <jbeulich@novell.com>
7 * This code is released under version 2 of the GNU GPL.
8 */
9
10#ifdef CONFIG_STACK_UNWIND
11
12#include <linux/sched.h>
13#include <asm/fixmap.h>
14#include <asm/ptrace.h>
15#include <asm/uaccess.h>
16
17struct unwind_frame_info
18{
19 struct pt_regs regs;
20 struct task_struct *task;
21};
22
23#define UNW_PC(frame) (frame)->regs.eip
24#define UNW_SP(frame) (frame)->regs.esp
25#ifdef CONFIG_FRAME_POINTER
26#define UNW_FP(frame) (frame)->regs.ebp
27#define FRAME_RETADDR_OFFSET 4
28#define FRAME_LINK_OFFSET 0
29#define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0)
30#define STACK_TOP(tsk) ((tsk)->thread.esp0)
31#endif
32#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
33
34#define UNW_REGISTER_INFO \
35 PTREGS_INFO(eax), \
36 PTREGS_INFO(ecx), \
37 PTREGS_INFO(edx), \
38 PTREGS_INFO(ebx), \
39 PTREGS_INFO(esp), \
40 PTREGS_INFO(ebp), \
41 PTREGS_INFO(esi), \
42 PTREGS_INFO(edi), \
43 PTREGS_INFO(eip)
44
45static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
46 /*const*/ struct pt_regs *regs)
47{
48 if (user_mode_vm(regs))
49 info->regs = *regs;
50 else {
51 memcpy(&info->regs, regs, offsetof(struct pt_regs, esp));
52 info->regs.esp = (unsigned long)&regs->esp;
53 info->regs.xss = __KERNEL_DS;
54 }
55}
56
57static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
58{
59 memset(&info->regs, 0, sizeof(info->regs));
60 info->regs.eip = info->task->thread.eip;
61 info->regs.xcs = __KERNEL_CS;
62 __get_user(info->regs.ebp, (long *)info->task->thread.esp);
63 info->regs.esp = info->task->thread.esp;
64 info->regs.xss = __KERNEL_DS;
65 info->regs.xds = __USER_DS;
66 info->regs.xes = __USER_DS;
67}
68
69extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *,
70 asmlinkage int (*callback)(struct unwind_frame_info *,
71 void *arg),
72 void *arg);
73
74static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
75{
76#if 0 /* This can only work when selector register and EFLAGS saves/restores
77 are properly annotated (and tracked in UNW_REGISTER_INFO). */
78 return user_mode_vm(&info->regs);
79#else
80 return info->regs.eip < PAGE_OFFSET
81 || (info->regs.eip >= __fix_to_virt(FIX_VDSO)
82 && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
83 || info->regs.esp < PAGE_OFFSET;
84#endif
85}
86
87#else
88
89#define UNW_PC(frame) ((void)(frame), 0)
90
91static inline int arch_unw_user_mode(const void *info)
92{
93 return 0;
94}
95
96#endif
97
98#endif /* _ASM_I386_UNWIND_H */
diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h
index c195a9ad12..aed7142f9e 100644
--- a/include/asm-ia64/kdebug.h
+++ b/include/asm-ia64/kdebug.h
@@ -40,6 +40,8 @@ struct die_args {
40 40
41extern int register_die_notifier(struct notifier_block *); 41extern int register_die_notifier(struct notifier_block *);
42extern int unregister_die_notifier(struct notifier_block *); 42extern int unregister_die_notifier(struct notifier_block *);
43extern int register_page_fault_notifier(struct notifier_block *);
44extern int unregister_page_fault_notifier(struct notifier_block *);
43extern struct atomic_notifier_head ia64die_chain; 45extern struct atomic_notifier_head ia64die_chain;
44 46
45enum die_val { 47enum die_val {
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index 8c0fc227f0..2418a787c4 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -82,6 +82,7 @@ struct kprobe_ctlblk {
82#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry 82#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
83 83
84#define ARCH_SUPPORTS_KRETPROBES 84#define ARCH_SUPPORTS_KRETPROBES
85#define ARCH_INACTIVE_KPROBE_COUNT 1
85 86
86#define SLOT0_OPCODE_SHIFT (37) 87#define SLOT0_OPCODE_SHIFT (37)
87#define SLOT1_p1_OPCODE_SHIFT (37 - (64-46)) 88#define SLOT1_p1_OPCODE_SHIFT (37 - (64-46))
diff --git a/include/asm-ia64/nodedata.h b/include/asm-ia64/nodedata.h
index a140310bf8..2fb337b0e9 100644
--- a/include/asm-ia64/nodedata.h
+++ b/include/asm-ia64/nodedata.h
@@ -46,6 +46,18 @@ struct ia64_node_data {
46 */ 46 */
47#define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid]) 47#define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid])
48 48
49/*
50 * LOCAL_DATA_ADDR - This is to calculate the address of other node's
51 * "local_node_data" at hot-plug phase. The local_node_data
52 * is pointed by per_cpu_page. Kernel usually use it for
53 * just executing cpu. However, when new node is hot-added,
54 * the addresses of local data for other nodes are necessary
55 * to update all of them.
56 */
57#define LOCAL_DATA_ADDR(pgdat) \
58 ((struct ia64_node_data *)((u64)(pgdat) + \
59 L1_CACHE_ALIGN(sizeof(struct pglist_data))))
60
49#endif /* CONFIG_NUMA */ 61#endif /* CONFIG_NUMA */
50 62
51#endif /* _ASM_IA64_NODEDATA_H */ 63#endif /* _ASM_IA64_NODEDATA_H */
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index e5392c4d30..8bc9869e57 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -27,6 +27,7 @@ struct thread_info {
27 __u32 flags; /* thread_info flags (see TIF_*) */ 27 __u32 flags; /* thread_info flags (see TIF_*) */
28 __u32 cpu; /* current CPU */ 28 __u32 cpu; /* current CPU */
29 __u32 last_cpu; /* Last CPU thread ran on */ 29 __u32 last_cpu; /* Last CPU thread ran on */
30 __u32 status; /* Thread synchronous flags */
30 mm_segment_t addr_limit; /* user-level address space limit */ 31 mm_segment_t addr_limit; /* user-level address space limit */
31 int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ 32 int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
32 struct restart_block restart_block; 33 struct restart_block restart_block;
@@ -103,4 +104,8 @@ struct thread_info {
103/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ 104/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
104#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) 105#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
105 106
107#define TS_POLLING 1 /* true if in idle loop and not sleeping */
108
109#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
110
106#endif /* _ASM_IA64_THREAD_INFO_H */ 111#endif /* _ASM_IA64_THREAD_INFO_H */
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index 616b5ed2aa..937c212575 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -112,6 +112,7 @@ void build_cpu_to_node_map(void);
112#define topology_core_id(cpu) (cpu_data(cpu)->core_id) 112#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
113#define topology_core_siblings(cpu) (cpu_core_map[cpu]) 113#define topology_core_siblings(cpu) (cpu_core_map[cpu])
114#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 114#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
115#define smt_capable() (smp_num_siblings > 1)
115#endif 116#endif
116 117
117#include <asm-generic/topology.h> 118#include <asm-generic/topology.h>
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 33567e8bfe..66c4742f09 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -318,7 +318,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
318 * does not enforce ordering, since there is no data dependency between 318 * does not enforce ordering, since there is no data dependency between
319 * the read of "a" and the read of "b". Therefore, on some CPUs, such 319 * the read of "a" and the read of "b". Therefore, on some CPUs, such
320 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 320 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
321 * in cases like thiswhere there are no data dependencies. 321 * in cases like this where there are no data dependencies.
322 **/ 322 **/
323 323
324#define read_barrier_depends() do { } while (0) 324#define read_barrier_depends() do { } while (0)
diff --git a/include/asm-m68knommu/page_offset.h b/include/asm-m68knommu/page_offset.h
index 8ed6d7b7d9..d4e73e0ba6 100644
--- a/include/asm-m68knommu/page_offset.h
+++ b/include/asm-m68knommu/page_offset.h
@@ -1,46 +1,5 @@
1 1
2 2
3/* This handles the memory map.. */ 3/* This handles the memory map.. */
4 4#define PAGE_OFFSET_RAW CONFIG_RAMBASE
5#ifdef CONFIG_COLDFIRE
6#if defined(CONFIG_SMALL)
7#define PAGE_OFFSET_RAW 0x30020000
8#elif defined(CONFIG_CFV240)
9#define PAGE_OFFSET_RAW 0x02000000
10#else
11#define PAGE_OFFSET_RAW 0x00000000
12#endif
13#endif
14
15#ifdef CONFIG_M68360
16#define PAGE_OFFSET_RAW 0x00000000
17#endif
18
19#ifdef CONFIG_PILOT
20#ifdef CONFIG_M68328
21#define PAGE_OFFSET_RAW 0x10000000
22#endif
23#ifdef CONFIG_M68EZ328
24#define PAGE_OFFSET_RAW 0x00000000
25#endif
26#endif
27#ifdef CONFIG_UCSIMM
28#define PAGE_OFFSET_RAW 0x00000000
29#endif
30
31#if defined(CONFIG_UCDIMM) || defined(CONFIG_DRAGEN2)
32#ifdef CONFIG_M68VZ328
33#define PAGE_OFFSET_RAW 0x00000000
34#endif /* CONFIG_M68VZ328 */
35#endif /* CONFIG_UCDIMM */
36
37#ifdef CONFIG_M68EZ328ADS
38#define PAGE_OFFSET_RAW 0x00000000
39#endif
40#ifdef CONFIG_ALMA_ANS
41#define PAGE_OFFSET_RAW 0x00000000
42#endif
43#ifdef CONFIG_M68EN302
44#define PAGE_OFFSET_RAW 0x00000000
45#endif
46 5
diff --git a/include/asm-m68knommu/ptrace.h b/include/asm-m68knommu/ptrace.h
index 1e19c457de..47258e86e8 100644
--- a/include/asm-m68knommu/ptrace.h
+++ b/include/asm-m68knommu/ptrace.h
@@ -46,11 +46,9 @@ struct pt_regs {
46#else 46#else
47 unsigned short sr; 47 unsigned short sr;
48 unsigned long pc; 48 unsigned long pc;
49#ifndef NO_FORMAT_VEC
50 unsigned format : 4; /* frame format specifier */ 49 unsigned format : 4; /* frame format specifier */
51 unsigned vector : 12; /* vector offset */ 50 unsigned vector : 12; /* vector offset */
52#endif 51#endif
53#endif
54}; 52};
55 53
56/* 54/*
diff --git a/include/asm-powerpc/kdebug.h b/include/asm-powerpc/kdebug.h
index c01786ab5f..532bfee934 100644
--- a/include/asm-powerpc/kdebug.h
+++ b/include/asm-powerpc/kdebug.h
@@ -18,6 +18,8 @@ struct die_args {
18 18
19extern int register_die_notifier(struct notifier_block *); 19extern int register_die_notifier(struct notifier_block *);
20extern int unregister_die_notifier(struct notifier_block *); 20extern int unregister_die_notifier(struct notifier_block *);
21extern int register_page_fault_notifier(struct notifier_block *);
22extern int unregister_page_fault_notifier(struct notifier_block *);
21extern struct atomic_notifier_head powerpc_die_chain; 23extern struct atomic_notifier_head powerpc_die_chain;
22 24
23/* Grossly misnamed. */ 25/* Grossly misnamed. */
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h
index f466bc804f..2d0af52c82 100644
--- a/include/asm-powerpc/kprobes.h
+++ b/include/asm-powerpc/kprobes.h
@@ -50,6 +50,8 @@ typedef unsigned int kprobe_opcode_t;
50 IS_TWI(instr) || IS_TDI(instr)) 50 IS_TWI(instr) || IS_TDI(instr))
51 51
52#define ARCH_SUPPORTS_KRETPROBES 52#define ARCH_SUPPORTS_KRETPROBES
53#define ARCH_INACTIVE_KPROBE_COUNT 1
54
53void kretprobe_trampoline(void); 55void kretprobe_trampoline(void);
54extern void arch_remove_kprobe(struct kprobe *p); 56extern void arch_remove_kprobe(struct kprobe *p);
55 57
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index 92f3e5507d..bbc3844b08 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -93,5 +93,10 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
93 93
94#endif /* CONFIG_NUMA */ 94#endif /* CONFIG_NUMA */
95 95
96#ifdef CONFIG_SMP
97#include <asm/cputable.h>
98#define smt_capable() (cpu_has_feature(CPU_FTR_SMT))
99#endif
100
96#endif /* __KERNEL__ */ 101#endif /* __KERNEL__ */
97#endif /* _ASM_POWERPC_TOPOLOGY_H */ 102#endif /* _ASM_POWERPC_TOPOLOGY_H */
diff --git a/include/asm-sparc/io.h b/include/asm-sparc/io.h
index a42df208d5..cab0b851b8 100644
--- a/include/asm-sparc/io.h
+++ b/include/asm-sparc/io.h
@@ -249,6 +249,22 @@ extern void __iomem *ioremap(unsigned long offset, unsigned long size);
249#define ioremap_nocache(X,Y) ioremap((X),(Y)) 249#define ioremap_nocache(X,Y) ioremap((X),(Y))
250extern void iounmap(volatile void __iomem *addr); 250extern void iounmap(volatile void __iomem *addr);
251 251
252#define ioread8(X) readb(X)
253#define ioread16(X) readw(X)
254#define ioread32(X) readl(X)
255#define iowrite8(val,X) writeb(val,X)
256#define iowrite16(val,X) writew(val,X)
257#define iowrite32(val,X) writel(val,X)
258
259/* Create a virtual mapping cookie for an IO port range */
260extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
261extern void ioport_unmap(void __iomem *);
262
263/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
264struct pci_dev;
265extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
266extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
267
252/* 268/*
253 * Bus number may be in res->flags... somewhere. 269 * Bus number may be in res->flags... somewhere.
254 */ 270 */
diff --git a/include/asm-sparc/prom.h b/include/asm-sparc/prom.h
index c5e3d26eab..f9cf44c071 100644
--- a/include/asm-sparc/prom.h
+++ b/include/asm-sparc/prom.h
@@ -35,6 +35,8 @@ struct property {
35 int length; 35 int length;
36 void *value; 36 void *value;
37 struct property *next; 37 struct property *next;
38 unsigned long _flags;
39 unsigned int unique_id;
38}; 40};
39 41
40struct device_node { 42struct device_node {
@@ -58,8 +60,15 @@ struct device_node {
58 struct kref kref; 60 struct kref kref;
59 unsigned long _flags; 61 unsigned long _flags;
60 void *data; 62 void *data;
63 unsigned int unique_id;
61}; 64};
62 65
66/* flag descriptions */
67#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
68
69#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
70#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
71
63static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de) 72static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
64{ 73{
65 dn->pde = de; 74 dn->pde = de;
@@ -88,6 +97,7 @@ extern struct property *of_find_property(struct device_node *np,
88extern int of_device_is_compatible(struct device_node *device, const char *); 97extern int of_device_is_compatible(struct device_node *device, const char *);
89extern void *of_get_property(struct device_node *node, const char *name, 98extern void *of_get_property(struct device_node *node, const char *name,
90 int *lenp); 99 int *lenp);
100extern int of_set_property(struct device_node *node, const char *name, void *val, int len);
91extern int of_getintprop_default(struct device_node *np, 101extern int of_getintprop_default(struct device_node *np,
92 const char *name, 102 const char *name,
93 int def); 103 int def);
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index 3c2b5bc865..0f5b89c932 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -162,4 +162,47 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
162 162
163#endif /* PCI */ 163#endif /* PCI */
164 164
165
166/* Now for the API extensions over the pci_ one */
167
168#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
169#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
170#define dma_is_consistent(d) (1)
171
172static inline int
173dma_get_cache_alignment(void)
174{
175 /* no easy way to get cache size on all processors, so return
176 * the maximum possible, to be safe */
177 return (1 << INTERNODE_CACHE_SHIFT);
178}
179
180static inline void
181dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
182 unsigned long offset, size_t size,
183 enum dma_data_direction direction)
184{
185 /* just sync everything, that's all the pci API can do */
186 dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
187}
188
189static inline void
190dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
191 unsigned long offset, size_t size,
192 enum dma_data_direction direction)
193{
194 /* just sync everything, that's all the pci API can do */
195 dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
196}
197
198static inline void
199dma_cache_sync(void *vaddr, size_t size,
200 enum dma_data_direction direction)
201{
202 /* could define this in terms of the dma_cache ... operations,
203 * but if you get this on a platform, you should convert the platform
204 * to using the generic device DMA API */
205 BUG();
206}
207
165#endif /* _ASM_SPARC64_DMA_MAPPING_H */ 208#endif /* _ASM_SPARC64_DMA_MAPPING_H */
diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h
index f8d57bb557..b591d0e8d8 100644
--- a/include/asm-sparc64/floppy.h
+++ b/include/asm-sparc64/floppy.h
@@ -208,7 +208,55 @@ static void sun_fd_enable_dma(void)
208 pdma_areasize = pdma_size; 208 pdma_areasize = pdma_size;
209} 209}
210 210
211extern irqreturn_t sparc_floppy_irq(int, void *, struct pt_regs *); 211irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
212{
213 if (likely(doing_pdma)) {
214 void __iomem *stat = (void __iomem *) fdc_status;
215 unsigned char *vaddr = pdma_vaddr;
216 unsigned long size = pdma_size;
217 u8 val;
218
219 while (size) {
220 val = readb(stat);
221 if (unlikely(!(val & 0x80))) {
222 pdma_vaddr = vaddr;
223 pdma_size = size;
224 return IRQ_HANDLED;
225 }
226 if (unlikely(!(val & 0x20))) {
227 pdma_vaddr = vaddr;
228 pdma_size = size;
229 doing_pdma = 0;
230 goto main_interrupt;
231 }
232 if (val & 0x40) {
233 /* read */
234 *vaddr++ = readb(stat + 1);
235 } else {
236 unsigned char data = *vaddr++;
237
238 /* write */
239 writeb(data, stat + 1);
240 }
241 size--;
242 }
243
244 pdma_vaddr = vaddr;
245 pdma_size = size;
246
247 /* Send Terminal Count pulse to floppy controller. */
248 val = readb(auxio_register);
249 val |= AUXIO_AUX1_FTCNT;
250 writeb(val, auxio_register);
251 val &= ~AUXIO_AUX1_FTCNT;
252 writeb(val, auxio_register);
253
254 doing_pdma = 0;
255 }
256
257main_interrupt:
258 return floppy_interrupt(irq, dev_cookie, regs);
259}
212 260
213static int sun_fd_request_irq(void) 261static int sun_fd_request_irq(void)
214{ 262{
diff --git a/include/asm-sparc64/kdebug.h b/include/asm-sparc64/kdebug.h
index 4040d127ac..11251bdd00 100644
--- a/include/asm-sparc64/kdebug.h
+++ b/include/asm-sparc64/kdebug.h
@@ -17,6 +17,8 @@ struct die_args {
17 17
18extern int register_die_notifier(struct notifier_block *); 18extern int register_die_notifier(struct notifier_block *);
19extern int unregister_die_notifier(struct notifier_block *); 19extern int unregister_die_notifier(struct notifier_block *);
20extern int register_page_fault_notifier(struct notifier_block *);
21extern int unregister_page_fault_notifier(struct notifier_block *);
20extern struct atomic_notifier_head sparc64die_chain; 22extern struct atomic_notifier_head sparc64die_chain;
21 23
22extern void bad_trap(struct pt_regs *, long); 24extern void bad_trap(struct pt_regs *, long);
diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h
index e9bb26f770..15065af566 100644
--- a/include/asm-sparc64/kprobes.h
+++ b/include/asm-sparc64/kprobes.h
@@ -12,6 +12,7 @@ typedef u32 kprobe_opcode_t;
12 12
13#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry 13#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
14#define arch_remove_kprobe(p) do {} while (0) 14#define arch_remove_kprobe(p) do {} while (0)
15#define ARCH_INACTIVE_KPROBE_COUNT 0
15 16
16/* Architecture specific copy of original instruction*/ 17/* Architecture specific copy of original instruction*/
17struct arch_specific_insn { 18struct arch_specific_insn {
diff --git a/include/asm-sparc64/prom.h b/include/asm-sparc64/prom.h
index 6d1556c0c2..265614d497 100644
--- a/include/asm-sparc64/prom.h
+++ b/include/asm-sparc64/prom.h
@@ -35,6 +35,8 @@ struct property {
35 int length; 35 int length;
36 void *value; 36 void *value;
37 struct property *next; 37 struct property *next;
38 unsigned long _flags;
39 unsigned int unique_id;
38}; 40};
39 41
40struct device_node { 42struct device_node {
@@ -58,8 +60,15 @@ struct device_node {
58 struct kref kref; 60 struct kref kref;
59 unsigned long _flags; 61 unsigned long _flags;
60 void *data; 62 void *data;
63 unsigned int unique_id;
61}; 64};
62 65
66/* flag descriptions */
67#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
68
69#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
70#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
71
63static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de) 72static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
64{ 73{
65 dn->pde = de; 74 dn->pde = de;
@@ -88,6 +97,7 @@ extern struct property *of_find_property(struct device_node *np,
88extern int of_device_is_compatible(struct device_node *device, const char *); 97extern int of_device_is_compatible(struct device_node *device, const char *);
89extern void *of_get_property(struct device_node *node, const char *name, 98extern void *of_get_property(struct device_node *node, const char *name,
90 int *lenp); 99 int *lenp);
100extern int of_set_property(struct device_node *node, const char *name, void *val, int len);
91extern int of_getintprop_default(struct device_node *np, 101extern int of_getintprop_default(struct device_node *np,
92 const char *name, 102 const char *name,
93 int def); 103 int def);
diff --git a/include/asm-sparc64/topology.h b/include/asm-sparc64/topology.h
index 0e234e201b..98a6c61358 100644
--- a/include/asm-sparc64/topology.h
+++ b/include/asm-sparc64/topology.h
@@ -1,6 +1,9 @@
1#ifndef _ASM_SPARC64_TOPOLOGY_H 1#ifndef _ASM_SPARC64_TOPOLOGY_H
2#define _ASM_SPARC64_TOPOLOGY_H 2#define _ASM_SPARC64_TOPOLOGY_H
3 3
4#include <asm/spitfire.h>
5#define smt_capable() (tlb_type == hypervisor)
6
4#include <asm-generic/topology.h> 7#include <asm-generic/topology.h>
5 8
6#endif /* _ASM_SPARC64_TOPOLOGY_H */ 9#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
new file mode 100644
index 0000000000..387c8f66af
--- /dev/null
+++ b/include/asm-x86_64/alternative.h
@@ -0,0 +1,146 @@
1#ifndef _X86_64_ALTERNATIVE_H
2#define _X86_64_ALTERNATIVE_H
3
4#ifdef __KERNEL__
5
6#include <linux/types.h>
7
8struct alt_instr {
9 u8 *instr; /* original instruction */
10 u8 *replacement;
11 u8 cpuid; /* cpuid bit set for replacement */
12 u8 instrlen; /* length of original instruction */
13 u8 replacementlen; /* length of new instruction, <= instrlen */
14 u8 pad[5];
15};
16
17extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
18
19struct module;
20extern void alternatives_smp_module_add(struct module *mod, char *name,
21 void *locks, void *locks_end,
22 void *text, void *text_end);
23extern void alternatives_smp_module_del(struct module *mod);
24extern void alternatives_smp_switch(int smp);
25
26#endif
27
28/*
29 * Alternative instructions for different CPU types or capabilities.
30 *
31 * This allows to use optimized instructions even on generic binary
32 * kernels.
33 *
34 * length of oldinstr must be longer or equal the length of newinstr
35 * It can be padded with nops as needed.
36 *
37 * For non barrier like inlines please define new variants
38 * without volatile and memory clobber.
39 */
40#define alternative(oldinstr, newinstr, feature) \
41 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
42 ".section .altinstructions,\"a\"\n" \
43 " .align 8\n" \
44 " .quad 661b\n" /* label */ \
45 " .quad 663f\n" /* new instruction */ \
46 " .byte %c0\n" /* feature bit */ \
47 " .byte 662b-661b\n" /* sourcelen */ \
48 " .byte 664f-663f\n" /* replacementlen */ \
49 ".previous\n" \
50 ".section .altinstr_replacement,\"ax\"\n" \
51 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
52 ".previous" :: "i" (feature) : "memory")
53
54/*
55 * Alternative inline assembly with input.
56 *
57 * Pecularities:
58 * No memory clobber here.
59 * Argument numbers start with 1.
60 * Best is to use constraints that are fixed size (like (%1) ... "r")
61 * If you use variable sized constraints like "m" or "g" in the
62 * replacement make sure to pad to the worst case length.
63 */
64#define alternative_input(oldinstr, newinstr, feature, input...) \
65 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
66 ".section .altinstructions,\"a\"\n" \
67 " .align 8\n" \
68 " .quad 661b\n" /* label */ \
69 " .quad 663f\n" /* new instruction */ \
70 " .byte %c0\n" /* feature bit */ \
71 " .byte 662b-661b\n" /* sourcelen */ \
72 " .byte 664f-663f\n" /* replacementlen */ \
73 ".previous\n" \
74 ".section .altinstr_replacement,\"ax\"\n" \
75 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
76 ".previous" :: "i" (feature), ##input)
77
78/* Like alternative_input, but with a single output argument */
79#define alternative_io(oldinstr, newinstr, feature, output, input...) \
80 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
81 ".section .altinstructions,\"a\"\n" \
82 " .align 8\n" \
83 " .quad 661b\n" /* label */ \
84 " .quad 663f\n" /* new instruction */ \
85 " .byte %c[feat]\n" /* feature bit */ \
86 " .byte 662b-661b\n" /* sourcelen */ \
87 " .byte 664f-663f\n" /* replacementlen */ \
88 ".previous\n" \
89 ".section .altinstr_replacement,\"ax\"\n" \
90 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
91 ".previous" : output : [feat] "i" (feature), ##input)
92
93/*
94 * Alternative inline assembly for SMP.
95 *
96 * alternative_smp() takes two versions (SMP first, UP second) and is
97 * for more complex stuff such as spinlocks.
98 *
99 * The LOCK_PREFIX macro defined here replaces the LOCK and
100 * LOCK_PREFIX macros used everywhere in the source tree.
101 *
102 * SMP alternatives use the same data structures as the other
103 * alternatives and the X86_FEATURE_UP flag to indicate the case of a
104 * UP system running a SMP kernel. The existing apply_alternatives()
105 * works fine for patching a SMP kernel for UP.
106 *
107 * The SMP alternative tables can be kept after boot and contain both
108 * UP and SMP versions of the instructions to allow switching back to
109 * SMP at runtime, when hotplugging in a new CPU, which is especially
110 * useful in virtualized environments.
111 *
112 * The very common lock prefix is handled as special case in a
113 * separate table which is a pure address list without replacement ptr
114 * and size information. That keeps the table sizes small.
115 */
116
117#ifdef CONFIG_SMP
118#define alternative_smp(smpinstr, upinstr, args...) \
119 asm volatile ("661:\n\t" smpinstr "\n662:\n" \
120 ".section .smp_altinstructions,\"a\"\n" \
121 " .align 8\n" \
122 " .quad 661b\n" /* label */ \
123 " .quad 663f\n" /* new instruction */ \
124 " .byte 0x66\n" /* X86_FEATURE_UP */ \
125 " .byte 662b-661b\n" /* sourcelen */ \
126 " .byte 664f-663f\n" /* replacementlen */ \
127 ".previous\n" \
128 ".section .smp_altinstr_replacement,\"awx\"\n" \
129 "663:\n\t" upinstr "\n" /* replacement */ \
130 "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \
131 ".previous" : args)
132
133#define LOCK_PREFIX \
134 ".section .smp_locks,\"a\"\n" \
135 " .align 8\n" \
136 " .quad 661f\n" /* address */ \
137 ".previous\n" \
138 "661:\n\tlock; "
139
140#else /* ! CONFIG_SMP */
141#define alternative_smp(smpinstr, upinstr, args...) \
142 asm volatile (upinstr : args)
143#define LOCK_PREFIX ""
144#endif
145
146#endif /* _X86_64_ALTERNATIVE_H */
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index a731be2204..9c96a0a8d1 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -49,7 +49,8 @@ static __inline unsigned int apic_read(unsigned long reg)
49 49
50static __inline__ void apic_wait_icr_idle(void) 50static __inline__ void apic_wait_icr_idle(void)
51{ 51{
52 while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ); 52 while (apic_read( APIC_ICR ) & APIC_ICR_BUSY)
53 cpu_relax();
53} 54}
54 55
55static inline void ack_APIC_irq(void) 56static inline void ack_APIC_irq(void)
@@ -79,30 +80,23 @@ extern void init_apic_mappings (void);
79extern void smp_local_timer_interrupt (struct pt_regs * regs); 80extern void smp_local_timer_interrupt (struct pt_regs * regs);
80extern void setup_boot_APIC_clock (void); 81extern void setup_boot_APIC_clock (void);
81extern void setup_secondary_APIC_clock (void); 82extern void setup_secondary_APIC_clock (void);
82extern void setup_apic_nmi_watchdog (void);
83extern int reserve_lapic_nmi(void);
84extern void release_lapic_nmi(void);
85extern void disable_timer_nmi_watchdog(void);
86extern void enable_timer_nmi_watchdog(void);
87extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
88extern int APIC_init_uniprocessor (void); 83extern int APIC_init_uniprocessor (void);
89extern void disable_APIC_timer(void); 84extern void disable_APIC_timer(void);
90extern void enable_APIC_timer(void); 85extern void enable_APIC_timer(void);
91extern void clustered_apic_check(void); 86extern void clustered_apic_check(void);
92 87
93extern void nmi_watchdog_default(void); 88extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector,
94extern int setup_nmi_watchdog(char *); 89 unsigned char msg_type, unsigned char mask);
95 90
96extern unsigned int nmi_watchdog; 91#define K8_APIC_EXT_LVT_BASE 0x500
97#define NMI_DEFAULT -1 92#define K8_APIC_EXT_INT_MSG_FIX 0x0
98#define NMI_NONE 0 93#define K8_APIC_EXT_INT_MSG_SMI 0x2
99#define NMI_IO_APIC 1 94#define K8_APIC_EXT_INT_MSG_NMI 0x4
100#define NMI_LOCAL_APIC 2 95#define K8_APIC_EXT_INT_MSG_EXT 0x7
101#define NMI_INVALID 3 96#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0
102 97
103extern int disable_timer_pin_1; 98extern int disable_timer_pin_1;
104 99
105extern void setup_threshold_lvt(unsigned long lvt_off);
106 100
107void smp_send_timer_broadcast_ipi(void); 101void smp_send_timer_broadcast_ipi(void);
108void switch_APIC_timer_to_ipi(void *cpumask); 102void switch_APIC_timer_to_ipi(void *cpumask);
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index bd3fa67ed8..007e88d6d4 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -1,7 +1,7 @@
1#ifndef __ARCH_X86_64_ATOMIC__ 1#ifndef __ARCH_X86_64_ATOMIC__
2#define __ARCH_X86_64_ATOMIC__ 2#define __ARCH_X86_64_ATOMIC__
3 3
4#include <asm/types.h> 4#include <asm/alternative.h>
5 5
6/* atomic_t should be 32 bit signed type */ 6/* atomic_t should be 32 bit signed type */
7 7
@@ -52,7 +52,7 @@ typedef struct { volatile int counter; } atomic_t;
52static __inline__ void atomic_add(int i, atomic_t *v) 52static __inline__ void atomic_add(int i, atomic_t *v)
53{ 53{
54 __asm__ __volatile__( 54 __asm__ __volatile__(
55 LOCK "addl %1,%0" 55 LOCK_PREFIX "addl %1,%0"
56 :"=m" (v->counter) 56 :"=m" (v->counter)
57 :"ir" (i), "m" (v->counter)); 57 :"ir" (i), "m" (v->counter));
58} 58}
@@ -67,7 +67,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
67static __inline__ void atomic_sub(int i, atomic_t *v) 67static __inline__ void atomic_sub(int i, atomic_t *v)
68{ 68{
69 __asm__ __volatile__( 69 __asm__ __volatile__(
70 LOCK "subl %1,%0" 70 LOCK_PREFIX "subl %1,%0"
71 :"=m" (v->counter) 71 :"=m" (v->counter)
72 :"ir" (i), "m" (v->counter)); 72 :"ir" (i), "m" (v->counter));
73} 73}
@@ -86,7 +86,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
86 unsigned char c; 86 unsigned char c;
87 87
88 __asm__ __volatile__( 88 __asm__ __volatile__(
89 LOCK "subl %2,%0; sete %1" 89 LOCK_PREFIX "subl %2,%0; sete %1"
90 :"=m" (v->counter), "=qm" (c) 90 :"=m" (v->counter), "=qm" (c)
91 :"ir" (i), "m" (v->counter) : "memory"); 91 :"ir" (i), "m" (v->counter) : "memory");
92 return c; 92 return c;
@@ -101,7 +101,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
101static __inline__ void atomic_inc(atomic_t *v) 101static __inline__ void atomic_inc(atomic_t *v)
102{ 102{
103 __asm__ __volatile__( 103 __asm__ __volatile__(
104 LOCK "incl %0" 104 LOCK_PREFIX "incl %0"
105 :"=m" (v->counter) 105 :"=m" (v->counter)
106 :"m" (v->counter)); 106 :"m" (v->counter));
107} 107}
@@ -115,7 +115,7 @@ static __inline__ void atomic_inc(atomic_t *v)
115static __inline__ void atomic_dec(atomic_t *v) 115static __inline__ void atomic_dec(atomic_t *v)
116{ 116{
117 __asm__ __volatile__( 117 __asm__ __volatile__(
118 LOCK "decl %0" 118 LOCK_PREFIX "decl %0"
119 :"=m" (v->counter) 119 :"=m" (v->counter)
120 :"m" (v->counter)); 120 :"m" (v->counter));
121} 121}
@@ -133,7 +133,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
133 unsigned char c; 133 unsigned char c;
134 134
135 __asm__ __volatile__( 135 __asm__ __volatile__(
136 LOCK "decl %0; sete %1" 136 LOCK_PREFIX "decl %0; sete %1"
137 :"=m" (v->counter), "=qm" (c) 137 :"=m" (v->counter), "=qm" (c)
138 :"m" (v->counter) : "memory"); 138 :"m" (v->counter) : "memory");
139 return c != 0; 139 return c != 0;
@@ -152,7 +152,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
152 unsigned char c; 152 unsigned char c;
153 153
154 __asm__ __volatile__( 154 __asm__ __volatile__(
155 LOCK "incl %0; sete %1" 155 LOCK_PREFIX "incl %0; sete %1"
156 :"=m" (v->counter), "=qm" (c) 156 :"=m" (v->counter), "=qm" (c)
157 :"m" (v->counter) : "memory"); 157 :"m" (v->counter) : "memory");
158 return c != 0; 158 return c != 0;
@@ -172,7 +172,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
172 unsigned char c; 172 unsigned char c;
173 173
174 __asm__ __volatile__( 174 __asm__ __volatile__(
175 LOCK "addl %2,%0; sets %1" 175 LOCK_PREFIX "addl %2,%0; sets %1"
176 :"=m" (v->counter), "=qm" (c) 176 :"=m" (v->counter), "=qm" (c)
177 :"ir" (i), "m" (v->counter) : "memory"); 177 :"ir" (i), "m" (v->counter) : "memory");
178 return c; 178 return c;
@@ -189,7 +189,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
189{ 189{
190 int __i = i; 190 int __i = i;
191 __asm__ __volatile__( 191 __asm__ __volatile__(
192 LOCK "xaddl %0, %1;" 192 LOCK_PREFIX "xaddl %0, %1;"
193 :"=r"(i) 193 :"=r"(i)
194 :"m"(v->counter), "0"(i)); 194 :"m"(v->counter), "0"(i));
195 return i + __i; 195 return i + __i;
@@ -237,7 +237,7 @@ typedef struct { volatile long counter; } atomic64_t;
237static __inline__ void atomic64_add(long i, atomic64_t *v) 237static __inline__ void atomic64_add(long i, atomic64_t *v)
238{ 238{
239 __asm__ __volatile__( 239 __asm__ __volatile__(
240 LOCK "addq %1,%0" 240 LOCK_PREFIX "addq %1,%0"
241 :"=m" (v->counter) 241 :"=m" (v->counter)
242 :"ir" (i), "m" (v->counter)); 242 :"ir" (i), "m" (v->counter));
243} 243}
@@ -252,7 +252,7 @@ static __inline__ void atomic64_add(long i, atomic64_t *v)
252static __inline__ void atomic64_sub(long i, atomic64_t *v) 252static __inline__ void atomic64_sub(long i, atomic64_t *v)
253{ 253{
254 __asm__ __volatile__( 254 __asm__ __volatile__(
255 LOCK "subq %1,%0" 255 LOCK_PREFIX "subq %1,%0"
256 :"=m" (v->counter) 256 :"=m" (v->counter)
257 :"ir" (i), "m" (v->counter)); 257 :"ir" (i), "m" (v->counter));
258} 258}
@@ -271,7 +271,7 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
271 unsigned char c; 271 unsigned char c;
272 272
273 __asm__ __volatile__( 273 __asm__ __volatile__(
274 LOCK "subq %2,%0; sete %1" 274 LOCK_PREFIX "subq %2,%0; sete %1"
275 :"=m" (v->counter), "=qm" (c) 275 :"=m" (v->counter), "=qm" (c)
276 :"ir" (i), "m" (v->counter) : "memory"); 276 :"ir" (i), "m" (v->counter) : "memory");
277 return c; 277 return c;
@@ -286,7 +286,7 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
286static __inline__ void atomic64_inc(atomic64_t *v) 286static __inline__ void atomic64_inc(atomic64_t *v)
287{ 287{
288 __asm__ __volatile__( 288 __asm__ __volatile__(
289 LOCK "incq %0" 289 LOCK_PREFIX "incq %0"
290 :"=m" (v->counter) 290 :"=m" (v->counter)
291 :"m" (v->counter)); 291 :"m" (v->counter));
292} 292}
@@ -300,7 +300,7 @@ static __inline__ void atomic64_inc(atomic64_t *v)
300static __inline__ void atomic64_dec(atomic64_t *v) 300static __inline__ void atomic64_dec(atomic64_t *v)
301{ 301{
302 __asm__ __volatile__( 302 __asm__ __volatile__(
303 LOCK "decq %0" 303 LOCK_PREFIX "decq %0"
304 :"=m" (v->counter) 304 :"=m" (v->counter)
305 :"m" (v->counter)); 305 :"m" (v->counter));
306} 306}
@@ -318,7 +318,7 @@ static __inline__ int atomic64_dec_and_test(atomic64_t *v)
318 unsigned char c; 318 unsigned char c;
319 319
320 __asm__ __volatile__( 320 __asm__ __volatile__(
321 LOCK "decq %0; sete %1" 321 LOCK_PREFIX "decq %0; sete %1"
322 :"=m" (v->counter), "=qm" (c) 322 :"=m" (v->counter), "=qm" (c)
323 :"m" (v->counter) : "memory"); 323 :"m" (v->counter) : "memory");
324 return c != 0; 324 return c != 0;
@@ -337,7 +337,7 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v)
337 unsigned char c; 337 unsigned char c;
338 338
339 __asm__ __volatile__( 339 __asm__ __volatile__(
340 LOCK "incq %0; sete %1" 340 LOCK_PREFIX "incq %0; sete %1"
341 :"=m" (v->counter), "=qm" (c) 341 :"=m" (v->counter), "=qm" (c)
342 :"m" (v->counter) : "memory"); 342 :"m" (v->counter) : "memory");
343 return c != 0; 343 return c != 0;
@@ -357,7 +357,7 @@ static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
357 unsigned char c; 357 unsigned char c;
358 358
359 __asm__ __volatile__( 359 __asm__ __volatile__(
360 LOCK "addq %2,%0; sets %1" 360 LOCK_PREFIX "addq %2,%0; sets %1"
361 :"=m" (v->counter), "=qm" (c) 361 :"=m" (v->counter), "=qm" (c)
362 :"ir" (i), "m" (v->counter) : "memory"); 362 :"ir" (i), "m" (v->counter) : "memory");
363 return c; 363 return c;
@@ -374,7 +374,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t *v)
374{ 374{
375 long __i = i; 375 long __i = i;
376 __asm__ __volatile__( 376 __asm__ __volatile__(
377 LOCK "xaddq %0, %1;" 377 LOCK_PREFIX "xaddq %0, %1;"
378 :"=r"(i) 378 :"=r"(i)
379 :"m"(v->counter), "0"(i)); 379 :"m"(v->counter), "0"(i));
380 return i + __i; 380 return i + __i;
@@ -418,11 +418,11 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
418 418
419/* These are x86-specific, used by some header files */ 419/* These are x86-specific, used by some header files */
420#define atomic_clear_mask(mask, addr) \ 420#define atomic_clear_mask(mask, addr) \
421__asm__ __volatile__(LOCK "andl %0,%1" \ 421__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
422: : "r" (~(mask)),"m" (*addr) : "memory") 422: : "r" (~(mask)),"m" (*addr) : "memory")
423 423
424#define atomic_set_mask(mask, addr) \ 424#define atomic_set_mask(mask, addr) \
425__asm__ __volatile__(LOCK "orl %0,%1" \ 425__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
426: : "r" ((unsigned)mask),"m" (*(addr)) : "memory") 426: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
427 427
428/* Atomic operations are already serializing on x86 */ 428/* Atomic operations are already serializing on x86 */
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
index e9bf933d25..f7ba57b1cc 100644
--- a/include/asm-x86_64/bitops.h
+++ b/include/asm-x86_64/bitops.h
@@ -5,12 +5,7 @@
5 * Copyright 1992, Linus Torvalds. 5 * Copyright 1992, Linus Torvalds.
6 */ 6 */
7 7
8 8#include <asm/alternative.h>
9#ifdef CONFIG_SMP
10#define LOCK_PREFIX "lock ; "
11#else
12#define LOCK_PREFIX ""
13#endif
14 9
15#define ADDR (*(volatile long *) addr) 10#define ADDR (*(volatile long *) addr)
16 11
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
new file mode 100644
index 0000000000..6e1654f309
--- /dev/null
+++ b/include/asm-x86_64/calgary.h
@@ -0,0 +1,66 @@
1/*
2 * Derived from include/asm-powerpc/iommu.h
3 *
4 * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
5 * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#ifndef _ASM_X86_64_CALGARY_H
23#define _ASM_X86_64_CALGARY_H
24
25#include <linux/config.h>
26#include <linux/spinlock.h>
27#include <linux/device.h>
28#include <linux/dma-mapping.h>
29#include <asm/types.h>
30
31struct iommu_table {
32 unsigned long it_base; /* mapped address of tce table */
33 unsigned long it_hint; /* Hint for next alloc */
34 unsigned long *it_map; /* A simple allocation bitmap for now */
35 spinlock_t it_lock; /* Protects it_map */
36 unsigned int it_size; /* Size of iommu table in entries */
37 unsigned char it_busno; /* Bus number this table belongs to */
38 void __iomem *bbar;
39 u64 tar_val;
40 struct timer_list watchdog_timer;
41};
42
43#define TCE_TABLE_SIZE_UNSPECIFIED ~0
44#define TCE_TABLE_SIZE_64K 0
45#define TCE_TABLE_SIZE_128K 1
46#define TCE_TABLE_SIZE_256K 2
47#define TCE_TABLE_SIZE_512K 3
48#define TCE_TABLE_SIZE_1M 4
49#define TCE_TABLE_SIZE_2M 5
50#define TCE_TABLE_SIZE_4M 6
51#define TCE_TABLE_SIZE_8M 7
52
53#ifdef CONFIG_CALGARY_IOMMU
54extern int calgary_iommu_init(void);
55extern void detect_calgary(void);
56#else
57static inline int calgary_iommu_init(void) { return 1; }
58static inline void detect_calgary(void) { return; }
59#endif
60
61static inline unsigned int bus_to_phb(unsigned char busno)
62{
63 return ((busno % 15 == 0) ? 0 : busno / 2 + 1);
64}
65
66#endif /* _ASM_X86_64_CALGARY_H */
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
index 662964b74e..ee792faaca 100644
--- a/include/asm-x86_64/cpufeature.h
+++ b/include/asm-x86_64/cpufeature.h
@@ -46,6 +46,7 @@
46#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ 46#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
47#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ 47#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
48#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSR optimizations */ 48#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSR optimizations */
49#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
49#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ 50#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
50#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ 51#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
51#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ 52#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
@@ -65,6 +66,8 @@
65#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */ 66#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */
66#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */ 67#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */
67#define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ 68#define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */
69#define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */
70#define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */
68 71
69/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 72/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
70#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 73#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index 498f66df36..b6da83dcc7 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -55,6 +55,13 @@ extern dma_addr_t bad_dma_address;
55extern struct dma_mapping_ops* dma_ops; 55extern struct dma_mapping_ops* dma_ops;
56extern int iommu_merge; 56extern int iommu_merge;
57 57
58static inline int valid_dma_direction(int dma_direction)
59{
60 return ((dma_direction == DMA_BIDIRECTIONAL) ||
61 (dma_direction == DMA_TO_DEVICE) ||
62 (dma_direction == DMA_FROM_DEVICE));
63}
64
58static inline int dma_mapping_error(dma_addr_t dma_addr) 65static inline int dma_mapping_error(dma_addr_t dma_addr)
59{ 66{
60 if (dma_ops->mapping_error) 67 if (dma_ops->mapping_error)
@@ -72,6 +79,7 @@ static inline dma_addr_t
72dma_map_single(struct device *hwdev, void *ptr, size_t size, 79dma_map_single(struct device *hwdev, void *ptr, size_t size,
73 int direction) 80 int direction)
74{ 81{
82 BUG_ON(!valid_dma_direction(direction));
75 return dma_ops->map_single(hwdev, ptr, size, direction); 83 return dma_ops->map_single(hwdev, ptr, size, direction);
76} 84}
77 85
@@ -79,6 +87,7 @@ static inline void
79dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size, 87dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
80 int direction) 88 int direction)
81{ 89{
90 BUG_ON(!valid_dma_direction(direction));
82 dma_ops->unmap_single(dev, addr, size, direction); 91 dma_ops->unmap_single(dev, addr, size, direction);
83} 92}
84 93
@@ -91,6 +100,7 @@ static inline void
91dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 100dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
92 size_t size, int direction) 101 size_t size, int direction)
93{ 102{
103 BUG_ON(!valid_dma_direction(direction));
94 if (dma_ops->sync_single_for_cpu) 104 if (dma_ops->sync_single_for_cpu)
95 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, 105 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
96 direction); 106 direction);
@@ -101,6 +111,7 @@ static inline void
101dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, 111dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
102 size_t size, int direction) 112 size_t size, int direction)
103{ 113{
114 BUG_ON(!valid_dma_direction(direction));
104 if (dma_ops->sync_single_for_device) 115 if (dma_ops->sync_single_for_device)
105 dma_ops->sync_single_for_device(hwdev, dma_handle, size, 116 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
106 direction); 117 direction);
@@ -111,6 +122,7 @@ static inline void
111dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 122dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
112 unsigned long offset, size_t size, int direction) 123 unsigned long offset, size_t size, int direction)
113{ 124{
125 BUG_ON(!valid_dma_direction(direction));
114 if (dma_ops->sync_single_range_for_cpu) { 126 if (dma_ops->sync_single_range_for_cpu) {
115 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction); 127 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
116 } 128 }
@@ -122,6 +134,7 @@ static inline void
122dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, 134dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
123 unsigned long offset, size_t size, int direction) 135 unsigned long offset, size_t size, int direction)
124{ 136{
137 BUG_ON(!valid_dma_direction(direction));
125 if (dma_ops->sync_single_range_for_device) 138 if (dma_ops->sync_single_range_for_device)
126 dma_ops->sync_single_range_for_device(hwdev, dma_handle, 139 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
127 offset, size, direction); 140 offset, size, direction);
@@ -133,6 +146,7 @@ static inline void
133dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 146dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
134 int nelems, int direction) 147 int nelems, int direction)
135{ 148{
149 BUG_ON(!valid_dma_direction(direction));
136 if (dma_ops->sync_sg_for_cpu) 150 if (dma_ops->sync_sg_for_cpu)
137 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); 151 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
138 flush_write_buffers(); 152 flush_write_buffers();
@@ -142,6 +156,7 @@ static inline void
142dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 156dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
143 int nelems, int direction) 157 int nelems, int direction)
144{ 158{
159 BUG_ON(!valid_dma_direction(direction));
145 if (dma_ops->sync_sg_for_device) { 160 if (dma_ops->sync_sg_for_device) {
146 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); 161 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
147 } 162 }
@@ -152,6 +167,7 @@ dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
152static inline int 167static inline int
153dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) 168dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
154{ 169{
170 BUG_ON(!valid_dma_direction(direction));
155 return dma_ops->map_sg(hwdev, sg, nents, direction); 171 return dma_ops->map_sg(hwdev, sg, nents, direction);
156} 172}
157 173
@@ -159,6 +175,7 @@ static inline void
159dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 175dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
160 int direction) 176 int direction)
161{ 177{
178 BUG_ON(!valid_dma_direction(direction));
162 dma_ops->unmap_sg(hwdev, sg, nents, direction); 179 dma_ops->unmap_sg(hwdev, sg, nents, direction);
163} 180}
164 181
diff --git a/include/asm-x86_64/dma.h b/include/asm-x86_64/dma.h
index c556208d3d..a37c16f062 100644
--- a/include/asm-x86_64/dma.h
+++ b/include/asm-x86_64/dma.h
@@ -1,4 +1,4 @@
1/* $Id: dma.h,v 1.1.1.1 2001/04/19 20:00:38 ak Exp $ 1/*
2 * linux/include/asm/dma.h: Defines for using and allocating dma channels. 2 * linux/include/asm/dma.h: Defines for using and allocating dma channels.
3 * Written by Hennus Bergman, 1992. 3 * Written by Hennus Bergman, 1992.
4 * High DMA channel support & info by Hannu Savolainen 4 * High DMA channel support & info by Hannu Savolainen
diff --git a/include/asm-x86_64/gart-mapping.h b/include/asm-x86_64/gart-mapping.h
deleted file mode 100644
index ada497b0b5..0000000000
--- a/include/asm-x86_64/gart-mapping.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef _X8664_GART_MAPPING_H
2#define _X8664_GART_MAPPING_H 1
3
4#include <linux/types.h>
5#include <asm/types.h>
6
7struct device;
8
9extern void*
10gart_alloc_coherent(struct device *dev, size_t size,
11 dma_addr_t *dma_handle, gfp_t gfp);
12
13extern int
14gart_dma_supported(struct device *hwdev, u64 mask);
15
16#endif /* _X8664_GART_MAPPING_H */
diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h
index 18ff7ee9e7..b39098408b 100644
--- a/include/asm-x86_64/hpet.h
+++ b/include/asm-x86_64/hpet.h
@@ -55,7 +55,7 @@
55 55
56extern int is_hpet_enabled(void); 56extern int is_hpet_enabled(void);
57extern int hpet_rtc_timer_init(void); 57extern int hpet_rtc_timer_init(void);
58extern int oem_force_hpet_timer(void); 58extern int apic_is_clustered_box(void);
59 59
60extern int hpet_use_timer; 60extern int hpet_use_timer;
61 61
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
index 3de96fd86a..9318774627 100644
--- a/include/asm-x86_64/hw_irq.h
+++ b/include/asm-x86_64/hw_irq.h
@@ -12,8 +12,6 @@
12 * <tomsoft@informatik.tu-chemnitz.de> 12 * <tomsoft@informatik.tu-chemnitz.de>
13 * 13 *
14 * hacked by Andi Kleen for x86-64. 14 * hacked by Andi Kleen for x86-64.
15 *
16 * $Id: hw_irq.h,v 1.24 2001/09/14 20:55:03 vojtech Exp $
17 */ 15 */
18 16
19#ifndef __ASSEMBLY__ 17#ifndef __ASSEMBLY__
@@ -126,7 +124,7 @@ asmlinkage void IRQ_NAME(nr); \
126__asm__( \ 124__asm__( \
127"\n.p2align\n" \ 125"\n.p2align\n" \
128"IRQ" #nr "_interrupt:\n\t" \ 126"IRQ" #nr "_interrupt:\n\t" \
129 "push $" #nr "-256 ; " \ 127 "push $~(" #nr ") ; " \
130 "jmp common_interrupt"); 128 "jmp common_interrupt");
131 129
132#if defined(CONFIG_X86_IO_APIC) 130#if defined(CONFIG_X86_IO_APIC)
diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h
index b4f4b172b1..5b52ce5073 100644
--- a/include/asm-x86_64/ia32_unistd.h
+++ b/include/asm-x86_64/ia32_unistd.h
@@ -4,317 +4,15 @@
4/* 4/*
5 * This file contains the system call numbers of the ia32 port, 5 * This file contains the system call numbers of the ia32 port,
6 * this is for the kernel only. 6 * this is for the kernel only.
7 * Only add syscalls here where some part of the kernel needs to know
8 * the number. This should be otherwise in sync with asm-i386/unistd.h. -AK
7 */ 9 */
8 10
9#define __NR_ia32_restart_syscall 0 11#define __NR_ia32_restart_syscall 0
10#define __NR_ia32_exit 1 12#define __NR_ia32_exit 1
11#define __NR_ia32_fork 2
12#define __NR_ia32_read 3 13#define __NR_ia32_read 3
13#define __NR_ia32_write 4 14#define __NR_ia32_write 4
14#define __NR_ia32_open 5 15#define __NR_ia32_sigreturn 119
15#define __NR_ia32_close 6
16#define __NR_ia32_waitpid 7
17#define __NR_ia32_creat 8
18#define __NR_ia32_link 9
19#define __NR_ia32_unlink 10
20#define __NR_ia32_execve 11
21#define __NR_ia32_chdir 12
22#define __NR_ia32_time 13
23#define __NR_ia32_mknod 14
24#define __NR_ia32_chmod 15
25#define __NR_ia32_lchown 16
26#define __NR_ia32_break 17
27#define __NR_ia32_oldstat 18
28#define __NR_ia32_lseek 19
29#define __NR_ia32_getpid 20
30#define __NR_ia32_mount 21
31#define __NR_ia32_umount 22
32#define __NR_ia32_setuid 23
33#define __NR_ia32_getuid 24
34#define __NR_ia32_stime 25
35#define __NR_ia32_ptrace 26
36#define __NR_ia32_alarm 27
37#define __NR_ia32_oldfstat 28
38#define __NR_ia32_pause 29
39#define __NR_ia32_utime 30
40#define __NR_ia32_stty 31
41#define __NR_ia32_gtty 32
42#define __NR_ia32_access 33
43#define __NR_ia32_nice 34
44#define __NR_ia32_ftime 35
45#define __NR_ia32_sync 36
46#define __NR_ia32_kill 37
47#define __NR_ia32_rename 38
48#define __NR_ia32_mkdir 39
49#define __NR_ia32_rmdir 40
50#define __NR_ia32_dup 41
51#define __NR_ia32_pipe 42
52#define __NR_ia32_times 43
53#define __NR_ia32_prof 44
54#define __NR_ia32_brk 45
55#define __NR_ia32_setgid 46
56#define __NR_ia32_getgid 47
57#define __NR_ia32_signal 48
58#define __NR_ia32_geteuid 49
59#define __NR_ia32_getegid 50
60#define __NR_ia32_acct 51
61#define __NR_ia32_umount2 52
62#define __NR_ia32_lock 53
63#define __NR_ia32_ioctl 54
64#define __NR_ia32_fcntl 55
65#define __NR_ia32_mpx 56
66#define __NR_ia32_setpgid 57
67#define __NR_ia32_ulimit 58
68#define __NR_ia32_oldolduname 59
69#define __NR_ia32_umask 60
70#define __NR_ia32_chroot 61
71#define __NR_ia32_ustat 62
72#define __NR_ia32_dup2 63
73#define __NR_ia32_getppid 64
74#define __NR_ia32_getpgrp 65
75#define __NR_ia32_setsid 66
76#define __NR_ia32_sigaction 67
77#define __NR_ia32_sgetmask 68
78#define __NR_ia32_ssetmask 69
79#define __NR_ia32_setreuid 70
80#define __NR_ia32_setregid 71
81#define __NR_ia32_sigsuspend 72
82#define __NR_ia32_sigpending 73
83#define __NR_ia32_sethostname 74
84#define __NR_ia32_setrlimit 75
85#define __NR_ia32_getrlimit 76 /* Back compatible 2Gig limited rlimit */
86#define __NR_ia32_getrusage 77
87#define __NR_ia32_gettimeofday 78
88#define __NR_ia32_settimeofday 79
89#define __NR_ia32_getgroups 80
90#define __NR_ia32_setgroups 81
91#define __NR_ia32_select 82
92#define __NR_ia32_symlink 83
93#define __NR_ia32_oldlstat 84
94#define __NR_ia32_readlink 85
95#define __NR_ia32_uselib 86
96#define __NR_ia32_swapon 87
97#define __NR_ia32_reboot 88
98#define __NR_ia32_readdir 89
99#define __NR_ia32_mmap 90
100#define __NR_ia32_munmap 91
101#define __NR_ia32_truncate 92
102#define __NR_ia32_ftruncate 93
103#define __NR_ia32_fchmod 94
104#define __NR_ia32_fchown 95
105#define __NR_ia32_getpriority 96
106#define __NR_ia32_setpriority 97
107#define __NR_ia32_profil 98
108#define __NR_ia32_statfs 99
109#define __NR_ia32_fstatfs 100
110#define __NR_ia32_ioperm 101
111#define __NR_ia32_socketcall 102
112#define __NR_ia32_syslog 103
113#define __NR_ia32_setitimer 104
114#define __NR_ia32_getitimer 105
115#define __NR_ia32_stat 106
116#define __NR_ia32_lstat 107
117#define __NR_ia32_fstat 108
118#define __NR_ia32_olduname 109
119#define __NR_ia32_iopl 110
120#define __NR_ia32_vhangup 111
121#define __NR_ia32_idle 112
122#define __NR_ia32_vm86old 113
123#define __NR_ia32_wait4 114
124#define __NR_ia32_swapoff 115
125#define __NR_ia32_sysinfo 116
126#define __NR_ia32_ipc 117
127#define __NR_ia32_fsync 118
128#define __NR_ia32_sigreturn 119
129#define __NR_ia32_clone 120
130#define __NR_ia32_setdomainname 121
131#define __NR_ia32_uname 122
132#define __NR_ia32_modify_ldt 123
133#define __NR_ia32_adjtimex 124
134#define __NR_ia32_mprotect 125
135#define __NR_ia32_sigprocmask 126
136#define __NR_ia32_create_module 127
137#define __NR_ia32_init_module 128
138#define __NR_ia32_delete_module 129
139#define __NR_ia32_get_kernel_syms 130
140#define __NR_ia32_quotactl 131
141#define __NR_ia32_getpgid 132
142#define __NR_ia32_fchdir 133
143#define __NR_ia32_bdflush 134
144#define __NR_ia32_sysfs 135
145#define __NR_ia32_personality 136
146#define __NR_ia32_afs_syscall 137 /* Syscall for Andrew File System */
147#define __NR_ia32_setfsuid 138
148#define __NR_ia32_setfsgid 139
149#define __NR_ia32__llseek 140
150#define __NR_ia32_getdents 141
151#define __NR_ia32__newselect 142
152#define __NR_ia32_flock 143
153#define __NR_ia32_msync 144
154#define __NR_ia32_readv 145
155#define __NR_ia32_writev 146
156#define __NR_ia32_getsid 147
157#define __NR_ia32_fdatasync 148
158#define __NR_ia32__sysctl 149
159#define __NR_ia32_mlock 150
160#define __NR_ia32_munlock 151
161#define __NR_ia32_mlockall 152
162#define __NR_ia32_munlockall 153
163#define __NR_ia32_sched_setparam 154
164#define __NR_ia32_sched_getparam 155
165#define __NR_ia32_sched_setscheduler 156
166#define __NR_ia32_sched_getscheduler 157
167#define __NR_ia32_sched_yield 158
168#define __NR_ia32_sched_get_priority_max 159
169#define __NR_ia32_sched_get_priority_min 160
170#define __NR_ia32_sched_rr_get_interval 161
171#define __NR_ia32_nanosleep 162
172#define __NR_ia32_mremap 163
173#define __NR_ia32_setresuid 164
174#define __NR_ia32_getresuid 165
175#define __NR_ia32_vm86 166
176#define __NR_ia32_query_module 167
177#define __NR_ia32_poll 168
178#define __NR_ia32_nfsservctl 169
179#define __NR_ia32_setresgid 170
180#define __NR_ia32_getresgid 171
181#define __NR_ia32_prctl 172
182#define __NR_ia32_rt_sigreturn 173 16#define __NR_ia32_rt_sigreturn 173
183#define __NR_ia32_rt_sigaction 174
184#define __NR_ia32_rt_sigprocmask 175
185#define __NR_ia32_rt_sigpending 176
186#define __NR_ia32_rt_sigtimedwait 177
187#define __NR_ia32_rt_sigqueueinfo 178
188#define __NR_ia32_rt_sigsuspend 179
189#define __NR_ia32_pread 180
190#define __NR_ia32_pwrite 181
191#define __NR_ia32_chown 182
192#define __NR_ia32_getcwd 183
193#define __NR_ia32_capget 184
194#define __NR_ia32_capset 185
195#define __NR_ia32_sigaltstack 186
196#define __NR_ia32_sendfile 187
197#define __NR_ia32_getpmsg 188 /* some people actually want streams */
198#define __NR_ia32_putpmsg 189 /* some people actually want streams */
199#define __NR_ia32_vfork 190
200#define __NR_ia32_ugetrlimit 191 /* SuS compliant getrlimit */
201#define __NR_ia32_mmap2 192
202#define __NR_ia32_truncate64 193
203#define __NR_ia32_ftruncate64 194
204#define __NR_ia32_stat64 195
205#define __NR_ia32_lstat64 196
206#define __NR_ia32_fstat64 197
207#define __NR_ia32_lchown32 198
208#define __NR_ia32_getuid32 199
209#define __NR_ia32_getgid32 200
210#define __NR_ia32_geteuid32 201
211#define __NR_ia32_getegid32 202
212#define __NR_ia32_setreuid32 203
213#define __NR_ia32_setregid32 204
214#define __NR_ia32_getgroups32 205
215#define __NR_ia32_setgroups32 206
216#define __NR_ia32_fchown32 207
217#define __NR_ia32_setresuid32 208
218#define __NR_ia32_getresuid32 209
219#define __NR_ia32_setresgid32 210
220#define __NR_ia32_getresgid32 211
221#define __NR_ia32_chown32 212
222#define __NR_ia32_setuid32 213
223#define __NR_ia32_setgid32 214
224#define __NR_ia32_setfsuid32 215
225#define __NR_ia32_setfsgid32 216
226#define __NR_ia32_pivot_root 217
227#define __NR_ia32_mincore 218
228#define __NR_ia32_madvise 219
229#define __NR_ia32_madvise1 219 /* delete when C lib stub is removed */
230#define __NR_ia32_getdents64 220
231#define __NR_ia32_fcntl64 221
232#define __NR_ia32_tuxcall 222
233#define __NR_ia32_security 223
234#define __NR_ia32_gettid 224
235#define __NR_ia32_readahead 225
236#define __NR_ia32_setxattr 226
237#define __NR_ia32_lsetxattr 227
238#define __NR_ia32_fsetxattr 228
239#define __NR_ia32_getxattr 229
240#define __NR_ia32_lgetxattr 230
241#define __NR_ia32_fgetxattr 231
242#define __NR_ia32_listxattr 232
243#define __NR_ia32_llistxattr 233
244#define __NR_ia32_flistxattr 234
245#define __NR_ia32_removexattr 235
246#define __NR_ia32_lremovexattr 236
247#define __NR_ia32_fremovexattr 237
248#define __NR_ia32_tkill 238
249#define __NR_ia32_sendfile64 239
250#define __NR_ia32_futex 240
251#define __NR_ia32_sched_setaffinity 241
252#define __NR_ia32_sched_getaffinity 242
253#define __NR_ia32_set_thread_area 243
254#define __NR_ia32_get_thread_area 244
255#define __NR_ia32_io_setup 245
256#define __NR_ia32_io_destroy 246
257#define __NR_ia32_io_getevents 247
258#define __NR_ia32_io_submit 248
259#define __NR_ia32_io_cancel 249
260#define __NR_ia32_exit_group 252
261#define __NR_ia32_lookup_dcookie 253
262#define __NR_ia32_sys_epoll_create 254
263#define __NR_ia32_sys_epoll_ctl 255
264#define __NR_ia32_sys_epoll_wait 256
265#define __NR_ia32_remap_file_pages 257
266#define __NR_ia32_set_tid_address 258
267#define __NR_ia32_timer_create 259
268#define __NR_ia32_timer_settime (__NR_ia32_timer_create+1)
269#define __NR_ia32_timer_gettime (__NR_ia32_timer_create+2)
270#define __NR_ia32_timer_getoverrun (__NR_ia32_timer_create+3)
271#define __NR_ia32_timer_delete (__NR_ia32_timer_create+4)
272#define __NR_ia32_clock_settime (__NR_ia32_timer_create+5)
273#define __NR_ia32_clock_gettime (__NR_ia32_timer_create+6)
274#define __NR_ia32_clock_getres (__NR_ia32_timer_create+7)
275#define __NR_ia32_clock_nanosleep (__NR_ia32_timer_create+8)
276#define __NR_ia32_statfs64 268
277#define __NR_ia32_fstatfs64 269
278#define __NR_ia32_tgkill 270
279#define __NR_ia32_utimes 271
280#define __NR_ia32_fadvise64_64 272
281#define __NR_ia32_vserver 273
282#define __NR_ia32_mbind 274
283#define __NR_ia32_get_mempolicy 275
284#define __NR_ia32_set_mempolicy 276
285#define __NR_ia32_mq_open 277
286#define __NR_ia32_mq_unlink (__NR_ia32_mq_open+1)
287#define __NR_ia32_mq_timedsend (__NR_ia32_mq_open+2)
288#define __NR_ia32_mq_timedreceive (__NR_ia32_mq_open+3)
289#define __NR_ia32_mq_notify (__NR_ia32_mq_open+4)
290#define __NR_ia32_mq_getsetattr (__NR_ia32_mq_open+5)
291#define __NR_ia32_kexec 283
292#define __NR_ia32_waitid 284
293/* #define __NR_sys_setaltroot 285 */
294#define __NR_ia32_add_key 286
295#define __NR_ia32_request_key 287
296#define __NR_ia32_keyctl 288
297#define __NR_ia32_ioprio_set 289
298#define __NR_ia32_ioprio_get 290
299#define __NR_ia32_inotify_init 291
300#define __NR_ia32_inotify_add_watch 292
301#define __NR_ia32_inotify_rm_watch 293
302#define __NR_ia32_migrate_pages 294
303#define __NR_ia32_openat 295
304#define __NR_ia32_mkdirat 296
305#define __NR_ia32_mknodat 297
306#define __NR_ia32_fchownat 298
307#define __NR_ia32_futimesat 299
308#define __NR_ia32_fstatat64 300
309#define __NR_ia32_unlinkat 301
310#define __NR_ia32_renameat 302
311#define __NR_ia32_linkat 303
312#define __NR_ia32_symlinkat 304
313#define __NR_ia32_readlinkat 305
314#define __NR_ia32_fchmodat 306
315#define __NR_ia32_faccessat 307
316#define __NR_ia32_pselect6 308
317#define __NR_ia32_ppoll 309
318#define __NR_ia32_unshare 310
319 17
320#endif /* _ASM_X86_64_IA32_UNISTD_H_ */ 18#endif /* _ASM_X86_64_IA32_UNISTD_H_ */
diff --git a/include/asm-x86_64/intel_arch_perfmon.h b/include/asm-x86_64/intel_arch_perfmon.h
new file mode 100644
index 0000000000..59c3964315
--- /dev/null
+++ b/include/asm-x86_64/intel_arch_perfmon.h
@@ -0,0 +1,19 @@
1#ifndef X86_64_INTEL_ARCH_PERFMON_H
2#define X86_64_INTEL_ARCH_PERFMON_H 1
3
4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
6
7#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
8#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
9
10#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
11#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
12#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
13#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
14
15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0)
18
19#endif /* X86_64_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-x86_64/k8.h b/include/asm-x86_64/k8.h
new file mode 100644
index 0000000000..699dd6961e
--- /dev/null
+++ b/include/asm-x86_64/k8.h
@@ -0,0 +1,14 @@
1#ifndef _ASM_K8_H
2#define _ASM_K8_H 1
3
4#include <linux/pci.h>
5
6extern struct pci_device_id k8_nb_ids[];
7
8extern int early_is_k8_nb(u32 value);
9extern struct pci_dev **k8_northbridges;
10extern int num_k8_northbridges;
11extern int cache_k8_northbridges(void);
12extern void k8_flush_garts(void);
13
14#endif
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index cf795631d9..cd52c7f33b 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -15,6 +15,8 @@ struct die_args {
15 15
16extern int register_die_notifier(struct notifier_block *); 16extern int register_die_notifier(struct notifier_block *);
17extern int unregister_die_notifier(struct notifier_block *); 17extern int unregister_die_notifier(struct notifier_block *);
18extern int register_page_fault_notifier(struct notifier_block *);
19extern int unregister_page_fault_notifier(struct notifier_block *);
18extern struct atomic_notifier_head die_chain; 20extern struct atomic_notifier_head die_chain;
19 21
20/* Grossly misnamed. */ 22/* Grossly misnamed. */
diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h
index 98a1e95ddb..d36febd9bb 100644
--- a/include/asm-x86_64/kprobes.h
+++ b/include/asm-x86_64/kprobes.h
@@ -43,6 +43,7 @@ typedef u8 kprobe_opcode_t;
43 43
44#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry 44#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
45#define ARCH_SUPPORTS_KRETPROBES 45#define ARCH_SUPPORTS_KRETPROBES
46#define ARCH_INACTIVE_KPROBE_COUNT 1
46 47
47void kretprobe_trampoline(void); 48void kretprobe_trampoline(void);
48extern void arch_remove_kprobe(struct kprobe *p); 49extern void arch_remove_kprobe(struct kprobe *p);
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
index cd17945bf2..e769e62002 100644
--- a/include/asm-x86_64/local.h
+++ b/include/asm-x86_64/local.h
@@ -59,12 +59,26 @@ static inline void local_sub(long i, local_t *v)
59 * This could be done better if we moved the per cpu data directly 59 * This could be done better if we moved the per cpu data directly
60 * after GS. 60 * after GS.
61 */ 61 */
62#define cpu_local_read(v) local_read(&__get_cpu_var(v)) 62
63#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) 63/* Need to disable preemption for the cpu local counters otherwise we could
64#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) 64 still access a variable of a previous CPU in a non atomic way. */
65#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) 65#define cpu_local_wrap_v(v) \
66#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) 66 ({ local_t res__; \
67#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) 67 preempt_disable(); \
68 res__ = (v); \
69 preempt_enable(); \
70 res__; })
71#define cpu_local_wrap(v) \
72 ({ preempt_disable(); \
73 v; \
74 preempt_enable(); }) \
75
76#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
77#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
78#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
79#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
80#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
81#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
68 82
69#define __cpu_local_inc(v) cpu_local_inc(v) 83#define __cpu_local_inc(v) cpu_local_inc(v)
70#define __cpu_local_dec(v) cpu_local_dec(v) 84#define __cpu_local_dec(v) cpu_local_dec(v)
diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h
index 7229785094..d13687dfd6 100644
--- a/include/asm-x86_64/mce.h
+++ b/include/asm-x86_64/mce.h
@@ -67,13 +67,22 @@ struct mce_log {
67/* Software defined banks */ 67/* Software defined banks */
68#define MCE_EXTENDED_BANK 128 68#define MCE_EXTENDED_BANK 128
69#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 69#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
70#define MCE_THRESHOLD_BASE MCE_EXTENDED_BANK + 1 /* MCE_AMD */ 70
71#define MCE_THRESHOLD_DRAM_ECC MCE_THRESHOLD_BASE + 4 71#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */
72#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9)
73#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9)
74#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9)
75#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9)
76#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9)
77#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
78#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
72 79
73#ifdef __KERNEL__ 80#ifdef __KERNEL__
74#include <asm/atomic.h> 81#include <asm/atomic.h>
75 82
76void mce_log(struct mce *m); 83void mce_log(struct mce *m);
84DECLARE_PER_CPU(struct sys_device, device_mce);
85
77#ifdef CONFIG_X86_MCE_INTEL 86#ifdef CONFIG_X86_MCE_INTEL
78void mce_intel_feature_init(struct cpuinfo_x86 *c); 87void mce_intel_feature_init(struct cpuinfo_x86 *c);
79#else 88#else
diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
index 11fbee2bd6..06fab6de2a 100644
--- a/include/asm-x86_64/mutex.h
+++ b/include/asm-x86_64/mutex.h
@@ -24,7 +24,7 @@ do { \
24 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ 24 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
25 \ 25 \
26 __asm__ __volatile__( \ 26 __asm__ __volatile__( \
27 LOCK " decl (%%rdi) \n" \ 27 LOCK_PREFIX " decl (%%rdi) \n" \
28 " js 2f \n" \ 28 " js 2f \n" \
29 "1: \n" \ 29 "1: \n" \
30 \ 30 \
@@ -74,7 +74,7 @@ do { \
74 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ 74 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
75 \ 75 \
76 __asm__ __volatile__( \ 76 __asm__ __volatile__( \
77 LOCK " incl (%%rdi) \n" \ 77 LOCK_PREFIX " incl (%%rdi) \n" \
78 " jle 2f \n" \ 78 " jle 2f \n" \
79 "1: \n" \ 79 "1: \n" \
80 \ 80 \
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h
index d3abfc6a8f..efb45c894d 100644
--- a/include/asm-x86_64/nmi.h
+++ b/include/asm-x86_64/nmi.h
@@ -5,26 +5,27 @@
5#define ASM_NMI_H 5#define ASM_NMI_H
6 6
7#include <linux/pm.h> 7#include <linux/pm.h>
8#include <asm/io.h>
8 9
9struct pt_regs; 10struct pt_regs;
10 11
11typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); 12typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
12 13
13/** 14/**
14 * set_nmi_callback 15 * set_nmi_callback
15 * 16 *
16 * Set a handler for an NMI. Only one handler may be 17 * Set a handler for an NMI. Only one handler may be
17 * set. Return 1 if the NMI was handled. 18 * set. Return 1 if the NMI was handled.
18 */ 19 */
19void set_nmi_callback(nmi_callback_t callback); 20void set_nmi_callback(nmi_callback_t callback);
20 21
21/** 22/**
22 * unset_nmi_callback 23 * unset_nmi_callback
23 * 24 *
24 * Remove the handler previously set. 25 * Remove the handler previously set.
25 */ 26 */
26void unset_nmi_callback(void); 27void unset_nmi_callback(void);
27 28
28#ifdef CONFIG_PM 29#ifdef CONFIG_PM
29 30
30/** Replace the PM callback routine for NMI. */ 31/** Replace the PM callback routine for NMI. */
@@ -56,4 +57,21 @@ extern int unknown_nmi_panic;
56 57
57extern int check_nmi_watchdog(void); 58extern int check_nmi_watchdog(void);
58 59
60extern void setup_apic_nmi_watchdog (void);
61extern int reserve_lapic_nmi(void);
62extern void release_lapic_nmi(void);
63extern void disable_timer_nmi_watchdog(void);
64extern void enable_timer_nmi_watchdog(void);
65extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
66
67extern void nmi_watchdog_default(void);
68extern int setup_nmi_watchdog(char *);
69
70extern unsigned int nmi_watchdog;
71#define NMI_DEFAULT -1
72#define NMI_NONE 0
73#define NMI_IO_APIC 1
74#define NMI_LOCAL_APIC 2
75#define NMI_INVALID 3
76
59#endif /* ASM_NMI_H */ 77#endif /* ASM_NMI_H */
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
index 2db0620d54..49c5e92805 100644
--- a/include/asm-x86_64/pci.h
+++ b/include/asm-x86_64/pci.h
@@ -39,8 +39,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
39#include <asm/scatterlist.h> 39#include <asm/scatterlist.h>
40#include <linux/string.h> 40#include <linux/string.h>
41#include <asm/page.h> 41#include <asm/page.h>
42#include <linux/dma-mapping.h> /* for have_iommu */
43 42
43extern void pci_iommu_alloc(void);
44extern int iommu_setup(char *opt); 44extern int iommu_setup(char *opt);
45 45
46/* The PCI address space does equal the physical memory 46/* The PCI address space does equal the physical memory
@@ -52,7 +52,7 @@ extern int iommu_setup(char *opt);
52 */ 52 */
53#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) 53#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
54 54
55#ifdef CONFIG_GART_IOMMU 55#if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
56 56
57/* 57/*
58 * x86-64 always supports DAC, but sometimes it is useful to force 58 * x86-64 always supports DAC, but sometimes it is useful to force
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 31e83c3bd0..a31ab4e68a 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -337,14 +337,8 @@ static inline int pmd_large(pmd_t pte) {
337/* to find an entry in a page-table-directory. */ 337/* to find an entry in a page-table-directory. */
338#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 338#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
339#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address)) 339#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
340#define pud_offset_k(pgd, addr) pud_offset(pgd, addr)
341#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT) 340#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
342 341
343static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address)
344{
345 return pud + pud_index(address);
346}
347
348/* PMD - Level 2 access */ 342/* PMD - Level 2 access */
349#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK)) 343#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
350#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) 344#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 3061a38a3b..3b3c1217fe 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -69,7 +69,11 @@ struct cpuinfo_x86 {
69 cpumask_t llc_shared_map; /* cpus sharing the last level cache */ 69 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
70#endif 70#endif
71 __u8 apicid; 71 __u8 apicid;
72#ifdef CONFIG_SMP
72 __u8 booted_cores; /* number of cores as seen by OS */ 73 __u8 booted_cores; /* number of cores as seen by OS */
74 __u8 phys_proc_id; /* Physical Processor id. */
75 __u8 cpu_core_id; /* Core id. */
76#endif
73} ____cacheline_aligned; 77} ____cacheline_aligned;
74 78
75#define X86_VENDOR_INTEL 0 79#define X86_VENDOR_INTEL 0
@@ -96,6 +100,7 @@ extern char ignore_irq13;
96extern void identify_cpu(struct cpuinfo_x86 *); 100extern void identify_cpu(struct cpuinfo_x86 *);
97extern void print_cpu_info(struct cpuinfo_x86 *); 101extern void print_cpu_info(struct cpuinfo_x86 *);
98extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 102extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
103extern unsigned short num_cache_leaves;
99 104
100/* 105/*
101 * EFLAGS bits 106 * EFLAGS bits
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index 8abf2a43c9..038fe1f47e 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -37,7 +37,6 @@ extern void ia32_sysenter_target(void);
37 37
38extern void config_acpi_tables(void); 38extern void config_acpi_tables(void);
39extern void ia32_syscall(void); 39extern void ia32_syscall(void);
40extern void iommu_hole_init(void);
41 40
42extern int pmtimer_mark_offset(void); 41extern int pmtimer_mark_offset(void);
43extern void pmtimer_resume(void); 42extern void pmtimer_resume(void);
@@ -75,7 +74,7 @@ extern void main_timer_handler(struct pt_regs *regs);
75 74
76extern unsigned long end_pfn_map; 75extern unsigned long end_pfn_map;
77 76
78extern void show_trace(unsigned long * rsp); 77extern void show_trace(struct task_struct *, struct pt_regs *, unsigned long * rsp);
79extern void show_registers(struct pt_regs *regs); 78extern void show_registers(struct pt_regs *regs);
80 79
81extern void exception_table_check(void); 80extern void exception_table_check(void);
@@ -101,13 +100,9 @@ extern int unsynchronized_tsc(void);
101 100
102extern void select_idle_routine(const struct cpuinfo_x86 *c); 101extern void select_idle_routine(const struct cpuinfo_x86 *c);
103 102
104extern void gart_parse_options(char *);
105extern void __init no_iommu_init(void);
106
107extern unsigned long table_start, table_end; 103extern unsigned long table_start, table_end;
108 104
109extern int exception_trace; 105extern int exception_trace;
110extern int force_iommu, no_iommu;
111extern int using_apic_timer; 106extern int using_apic_timer;
112extern int disable_apic; 107extern int disable_apic;
113extern unsigned cpu_khz; 108extern unsigned cpu_khz;
@@ -116,7 +111,13 @@ extern int skip_ioapic_setup;
116extern int acpi_ht; 111extern int acpi_ht;
117extern int acpi_disabled; 112extern int acpi_disabled;
118 113
119#ifdef CONFIG_GART_IOMMU 114extern void no_iommu_init(void);
115extern int force_iommu, no_iommu;
116extern int iommu_detected;
117#ifdef CONFIG_IOMMU
118extern void gart_iommu_init(void);
119extern void gart_parse_options(char *);
120extern void iommu_hole_init(void);
120extern int fallback_aper_order; 121extern int fallback_aper_order;
121extern int fallback_aper_force; 122extern int fallback_aper_force;
122extern int iommu_aperture; 123extern int iommu_aperture;
diff --git a/include/asm-x86_64/rwlock.h b/include/asm-x86_64/rwlock.h
index 9942cc3930..dea0e94592 100644
--- a/include/asm-x86_64/rwlock.h
+++ b/include/asm-x86_64/rwlock.h
@@ -24,7 +24,7 @@
24#define RW_LOCK_BIAS_STR "0x01000000" 24#define RW_LOCK_BIAS_STR "0x01000000"
25 25
26#define __build_read_lock_ptr(rw, helper) \ 26#define __build_read_lock_ptr(rw, helper) \
27 asm volatile(LOCK "subl $1,(%0)\n\t" \ 27 asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" \
28 "js 2f\n" \ 28 "js 2f\n" \
29 "1:\n" \ 29 "1:\n" \
30 LOCK_SECTION_START("") \ 30 LOCK_SECTION_START("") \
@@ -34,7 +34,7 @@
34 ::"a" (rw) : "memory") 34 ::"a" (rw) : "memory")
35 35
36#define __build_read_lock_const(rw, helper) \ 36#define __build_read_lock_const(rw, helper) \
37 asm volatile(LOCK "subl $1,%0\n\t" \ 37 asm volatile(LOCK_PREFIX "subl $1,%0\n\t" \
38 "js 2f\n" \ 38 "js 2f\n" \
39 "1:\n" \ 39 "1:\n" \
40 LOCK_SECTION_START("") \ 40 LOCK_SECTION_START("") \
@@ -54,7 +54,7 @@
54 } while (0) 54 } while (0)
55 55
56#define __build_write_lock_ptr(rw, helper) \ 56#define __build_write_lock_ptr(rw, helper) \
57 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ 57 asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
58 "jnz 2f\n" \ 58 "jnz 2f\n" \
59 "1:\n" \ 59 "1:\n" \
60 LOCK_SECTION_START("") \ 60 LOCK_SECTION_START("") \
@@ -64,7 +64,7 @@
64 ::"a" (rw) : "memory") 64 ::"a" (rw) : "memory")
65 65
66#define __build_write_lock_const(rw, helper) \ 66#define __build_write_lock_const(rw, helper) \
67 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ 67 asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
68 "jnz 2f\n" \ 68 "jnz 2f\n" \
69 "1:\n" \ 69 "1:\n" \
70 LOCK_SECTION_START("") \ 70 LOCK_SECTION_START("") \
diff --git a/include/asm-x86_64/semaphore.h b/include/asm-x86_64/semaphore.h
index a389aa6fe8..064df08b9a 100644
--- a/include/asm-x86_64/semaphore.h
+++ b/include/asm-x86_64/semaphore.h
@@ -106,7 +106,7 @@ static inline void down(struct semaphore * sem)
106 106
107 __asm__ __volatile__( 107 __asm__ __volatile__(
108 "# atomic down operation\n\t" 108 "# atomic down operation\n\t"
109 LOCK "decl %0\n\t" /* --sem->count */ 109 LOCK_PREFIX "decl %0\n\t" /* --sem->count */
110 "js 2f\n" 110 "js 2f\n"
111 "1:\n" 111 "1:\n"
112 LOCK_SECTION_START("") 112 LOCK_SECTION_START("")
@@ -130,7 +130,7 @@ static inline int down_interruptible(struct semaphore * sem)
130 130
131 __asm__ __volatile__( 131 __asm__ __volatile__(
132 "# atomic interruptible down operation\n\t" 132 "# atomic interruptible down operation\n\t"
133 LOCK "decl %1\n\t" /* --sem->count */ 133 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
134 "js 2f\n\t" 134 "js 2f\n\t"
135 "xorl %0,%0\n" 135 "xorl %0,%0\n"
136 "1:\n" 136 "1:\n"
@@ -154,7 +154,7 @@ static inline int down_trylock(struct semaphore * sem)
154 154
155 __asm__ __volatile__( 155 __asm__ __volatile__(
156 "# atomic interruptible down operation\n\t" 156 "# atomic interruptible down operation\n\t"
157 LOCK "decl %1\n\t" /* --sem->count */ 157 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
158 "js 2f\n\t" 158 "js 2f\n\t"
159 "xorl %0,%0\n" 159 "xorl %0,%0\n"
160 "1:\n" 160 "1:\n"
@@ -178,7 +178,7 @@ static inline void up(struct semaphore * sem)
178{ 178{
179 __asm__ __volatile__( 179 __asm__ __volatile__(
180 "# atomic up operation\n\t" 180 "# atomic up operation\n\t"
181 LOCK "incl %0\n\t" /* ++sem->count */ 181 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
182 "jle 2f\n" 182 "jle 2f\n"
183 "1:\n" 183 "1:\n"
184 LOCK_SECTION_START("") 184 LOCK_SECTION_START("")
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index 7686b9b25a..6805e1feb3 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -53,8 +53,6 @@ extern int smp_call_function_single(int cpuid, void (*func) (void *info),
53 53
54extern cpumask_t cpu_sibling_map[NR_CPUS]; 54extern cpumask_t cpu_sibling_map[NR_CPUS];
55extern cpumask_t cpu_core_map[NR_CPUS]; 55extern cpumask_t cpu_core_map[NR_CPUS];
56extern u8 phys_proc_id[NR_CPUS];
57extern u8 cpu_core_id[NR_CPUS];
58extern u8 cpu_llc_id[NR_CPUS]; 56extern u8 cpu_llc_id[NR_CPUS];
59 57
60#define SMP_TRAMPOLINE_BASE 0x6000 58#define SMP_TRAMPOLINE_BASE 0x6000
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 5d8a5e3589..8d3421996f 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -31,15 +31,19 @@
31 "jmp 1b\n" \ 31 "jmp 1b\n" \
32 LOCK_SECTION_END 32 LOCK_SECTION_END
33 33
34#define __raw_spin_lock_string_up \
35 "\n\tdecl %0"
36
34#define __raw_spin_unlock_string \ 37#define __raw_spin_unlock_string \
35 "movl $1,%0" \ 38 "movl $1,%0" \
36 :"=m" (lock->slock) : : "memory" 39 :"=m" (lock->slock) : : "memory"
37 40
38static inline void __raw_spin_lock(raw_spinlock_t *lock) 41static inline void __raw_spin_lock(raw_spinlock_t *lock)
39{ 42{
40 __asm__ __volatile__( 43 alternative_smp(
41 __raw_spin_lock_string 44 __raw_spin_lock_string,
42 :"=m" (lock->slock) : : "memory"); 45 __raw_spin_lock_string_up,
46 "=m" (lock->slock) : : "memory");
43} 47}
44 48
45#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 49#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
diff --git a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h
index ee6bf27534..9505d9f4be 100644
--- a/include/asm-x86_64/string.h
+++ b/include/asm-x86_64/string.h
@@ -6,7 +6,8 @@
6/* Written 2002 by Andi Kleen */ 6/* Written 2002 by Andi Kleen */
7 7
8/* Only used for special circumstances. Stolen from i386/string.h */ 8/* Only used for special circumstances. Stolen from i386/string.h */
9static inline void * __inline_memcpy(void * to, const void * from, size_t n) 9static __always_inline void *
10__inline_memcpy(void * to, const void * from, size_t n)
10{ 11{
11unsigned long d0, d1, d2; 12unsigned long d0, d1, d2;
12__asm__ __volatile__( 13__asm__ __volatile__(
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index f48e0dad8b..68e559f363 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -3,15 +3,10 @@
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <asm/segment.h> 5#include <asm/segment.h>
6#include <asm/alternative.h>
6 7
7#ifdef __KERNEL__ 8#ifdef __KERNEL__
8 9
9#ifdef CONFIG_SMP
10#define LOCK_PREFIX "lock ; "
11#else
12#define LOCK_PREFIX ""
13#endif
14
15#define __STR(x) #x 10#define __STR(x) #x
16#define STR(x) __STR(x) 11#define STR(x) __STR(x)
17 12
@@ -34,7 +29,7 @@
34 "thread_return:\n\t" \ 29 "thread_return:\n\t" \
35 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ 30 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
36 "movq %P[thread_info](%%rsi),%%r8\n\t" \ 31 "movq %P[thread_info](%%rsi),%%r8\n\t" \
37 LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ 32 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
38 "movq %%rax,%%rdi\n\t" \ 33 "movq %%rax,%%rdi\n\t" \
39 "jc ret_from_fork\n\t" \ 34 "jc ret_from_fork\n\t" \
40 RESTORE_CONTEXT \ 35 RESTORE_CONTEXT \
@@ -69,82 +64,6 @@ extern void load_gs_index(unsigned);
69 ".previous" \ 64 ".previous" \
70 : :"r" (value), "r" (0)) 65 : :"r" (value), "r" (0))
71 66
72#ifdef __KERNEL__
73struct alt_instr {
74 __u8 *instr; /* original instruction */
75 __u8 *replacement;
76 __u8 cpuid; /* cpuid bit set for replacement */
77 __u8 instrlen; /* length of original instruction */
78 __u8 replacementlen; /* length of new instruction, <= instrlen */
79 __u8 pad[5];
80};
81#endif
82
83/*
84 * Alternative instructions for different CPU types or capabilities.
85 *
86 * This allows to use optimized instructions even on generic binary
87 * kernels.
88 *
89 * length of oldinstr must be longer or equal the length of newinstr
90 * It can be padded with nops as needed.
91 *
92 * For non barrier like inlines please define new variants
93 * without volatile and memory clobber.
94 */
95#define alternative(oldinstr, newinstr, feature) \
96 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
97 ".section .altinstructions,\"a\"\n" \
98 " .align 8\n" \
99 " .quad 661b\n" /* label */ \
100 " .quad 663f\n" /* new instruction */ \
101 " .byte %c0\n" /* feature bit */ \
102 " .byte 662b-661b\n" /* sourcelen */ \
103 " .byte 664f-663f\n" /* replacementlen */ \
104 ".previous\n" \
105 ".section .altinstr_replacement,\"ax\"\n" \
106 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
107 ".previous" :: "i" (feature) : "memory")
108
109/*
110 * Alternative inline assembly with input.
111 *
112 * Peculiarities:
113 * No memory clobber here.
114 * Argument numbers start with 1.
115 * Best is to use constraints that are fixed size (like (%1) ... "r")
116 * If you use variable sized constraints like "m" or "g" in the
117 * replacement make sure to pad to the worst case length.
118 */
119#define alternative_input(oldinstr, newinstr, feature, input...) \
120 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
121 ".section .altinstructions,\"a\"\n" \
122 " .align 8\n" \
123 " .quad 661b\n" /* label */ \
124 " .quad 663f\n" /* new instruction */ \
125 " .byte %c0\n" /* feature bit */ \
126 " .byte 662b-661b\n" /* sourcelen */ \
127 " .byte 664f-663f\n" /* replacementlen */ \
128 ".previous\n" \
129 ".section .altinstr_replacement,\"ax\"\n" \
130 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
131 ".previous" :: "i" (feature), ##input)
132
133/* Like alternative_input, but with a single output argument */
134#define alternative_io(oldinstr, newinstr, feature, output, input...) \
135 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
136 ".section .altinstructions,\"a\"\n" \
137 " .align 8\n" \
138 " .quad 661b\n" /* label */ \
139 " .quad 663f\n" /* new instruction */ \
140 " .byte %c[feat]\n" /* feature bit */ \
141 " .byte 662b-661b\n" /* sourcelen */ \
142 " .byte 664f-663f\n" /* replacementlen */ \
143 ".previous\n" \
144 ".section .altinstr_replacement,\"ax\"\n" \
145 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
146 ".previous" : output : [feat] "i" (feature), ##input)
147
148/* 67/*
149 * Clear and set 'TS' bit respectively 68 * Clear and set 'TS' bit respectively
150 */ 69 */
@@ -366,5 +285,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
366void cpu_idle_wait(void); 285void cpu_idle_wait(void);
367 286
368extern unsigned long arch_align_stack(unsigned long sp); 287extern unsigned long arch_align_stack(unsigned long sp);
288extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
369 289
370#endif 290#endif
diff --git a/include/asm-x86_64/tce.h b/include/asm-x86_64/tce.h
new file mode 100644
index 0000000000..ee51d31528
--- /dev/null
+++ b/include/asm-x86_64/tce.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
3 * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
4 *
5 * This file is derived from asm-powerpc/tce.h.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#ifndef _ASM_X86_64_TCE_H
23#define _ASM_X86_64_TCE_H
24
25extern void* tce_table_kva[];
26extern unsigned int specified_table_size;
27struct iommu_table;
28
29#define TCE_ENTRY_SIZE 8 /* in bytes */
30
31#define TCE_READ_SHIFT 0
32#define TCE_WRITE_SHIFT 1
33#define TCE_HUBID_SHIFT 2 /* unused */
34#define TCE_RSVD_SHIFT 8 /* unused */
35#define TCE_RPN_SHIFT 12
36#define TCE_UNUSED_SHIFT 48 /* unused */
37
38#define TCE_RPN_MASK 0x0000fffffffff000ULL
39
40extern void tce_build(struct iommu_table *tbl, unsigned long index,
41 unsigned int npages, unsigned long uaddr, int direction);
42extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
43extern void* alloc_tce_table(void);
44extern void free_tce_table(void *tbl);
45extern int build_tce_table(struct pci_dev *dev, void __iomem *bbar);
46
47#endif /* _ASM_X86_64_TCE_H */
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 4ac0e0a369..2029b00351 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -73,8 +73,21 @@ static inline struct thread_info *stack_thread_info(void)
73} 73}
74 74
75/* thread information allocation */ 75/* thread information allocation */
76#ifdef CONFIG_DEBUG_STACK_USAGE
77#define alloc_thread_info(tsk) \
78 ({ \
79 struct thread_info *ret; \
80 \
81 ret = ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)); \
82 if (ret) \
83 memset(ret, 0, THREAD_SIZE); \
84 ret; \
85 })
86#else
76#define alloc_thread_info(tsk) \ 87#define alloc_thread_info(tsk) \
77 ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)) 88 ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
89#endif
90
78#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) 91#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
79 92
80#else /* !__ASSEMBLY__ */ 93#else /* !__ASSEMBLY__ */
@@ -101,7 +114,7 @@ static inline struct thread_info *stack_thread_info(void)
101#define TIF_IRET 5 /* force IRET */ 114#define TIF_IRET 5 /* force IRET */
102#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 115#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
103#define TIF_SECCOMP 8 /* secure computing */ 116#define TIF_SECCOMP 8 /* secure computing */
104#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 117/* 16 free */
105#define TIF_IA32 17 /* 32bit process */ 118#define TIF_IA32 17 /* 32bit process */
106#define TIF_FORK 18 /* ret_from_fork */ 119#define TIF_FORK 18 /* ret_from_fork */
107#define TIF_ABI_PENDING 19 120#define TIF_ABI_PENDING 19
@@ -115,7 +128,6 @@ static inline struct thread_info *stack_thread_info(void)
115#define _TIF_IRET (1<<TIF_IRET) 128#define _TIF_IRET (1<<TIF_IRET)
116#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 129#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
117#define _TIF_SECCOMP (1<<TIF_SECCOMP) 130#define _TIF_SECCOMP (1<<TIF_SECCOMP)
118#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
119#define _TIF_IA32 (1<<TIF_IA32) 131#define _TIF_IA32 (1<<TIF_IA32)
120#define _TIF_FORK (1<<TIF_FORK) 132#define _TIF_FORK (1<<TIF_FORK)
121#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 133#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
@@ -137,6 +149,9 @@ static inline struct thread_info *stack_thread_info(void)
137 */ 149 */
138#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ 150#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
139#define TS_COMPAT 0x0002 /* 32bit syscall active */ 151#define TS_COMPAT 0x0002 /* 32bit syscall active */
152#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */
153
154#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
140 155
141#endif /* __KERNEL__ */ 156#endif /* __KERNEL__ */
142 157
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 80c4e44d01..6e7a2e976b 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -7,8 +7,6 @@
7#include <asm/mpspec.h> 7#include <asm/mpspec.h>
8#include <asm/bitops.h> 8#include <asm/bitops.h>
9 9
10/* Map the K8 CPU local memory controllers to a simple 1:1 CPU:NODE topology */
11
12extern cpumask_t cpu_online_map; 10extern cpumask_t cpu_online_map;
13 11
14extern unsigned char cpu_to_node[]; 12extern unsigned char cpu_to_node[];
@@ -57,12 +55,12 @@ extern int __node_distance(int, int);
57#endif 55#endif
58 56
59#ifdef CONFIG_SMP 57#ifdef CONFIG_SMP
60#define topology_physical_package_id(cpu) \ 58#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
61 (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu]) 59#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
62#define topology_core_id(cpu) \
63 (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu])
64#define topology_core_siblings(cpu) (cpu_core_map[cpu]) 60#define topology_core_siblings(cpu) (cpu_core_map[cpu])
65#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 61#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
62#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
63#define smt_capable() (smp_num_siblings > 1)
66#endif 64#endif
67 65
68#include <asm-generic/topology.h> 66#include <asm-generic/topology.h>
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h
new file mode 100644
index 0000000000..f3e7124eff
--- /dev/null
+++ b/include/asm-x86_64/unwind.h
@@ -0,0 +1,106 @@
1#ifndef _ASM_X86_64_UNWIND_H
2#define _ASM_X86_64_UNWIND_H
3
4/*
5 * Copyright (C) 2002-2006 Novell, Inc.
6 * Jan Beulich <jbeulich@novell.com>
7 * This code is released under version 2 of the GNU GPL.
8 */
9
10#ifdef CONFIG_STACK_UNWIND
11
12#include <linux/sched.h>
13#include <asm/ptrace.h>
14#include <asm/uaccess.h>
15#include <asm/vsyscall.h>
16
17struct unwind_frame_info
18{
19 struct pt_regs regs;
20 struct task_struct *task;
21};
22
23#define UNW_PC(frame) (frame)->regs.rip
24#define UNW_SP(frame) (frame)->regs.rsp
25#ifdef CONFIG_FRAME_POINTER
26#define UNW_FP(frame) (frame)->regs.rbp
27#define FRAME_RETADDR_OFFSET 8
28#define FRAME_LINK_OFFSET 0
29#define STACK_BOTTOM(tsk) (((tsk)->thread.rsp0 - 1) & ~(THREAD_SIZE - 1))
30#define STACK_TOP(tsk) ((tsk)->thread.rsp0)
31#endif
32/* Might need to account for the special exception and interrupt handling
33 stacks here, since normally
34 EXCEPTION_STACK_ORDER < THREAD_ORDER < IRQSTACK_ORDER,
35 but the construct is needed only for getting across the stack switch to
36 the interrupt stack - thus considering the IRQ stack itself is unnecessary,
37 and the overhead of comparing against all exception handling stacks seems
38 not desirable. */
39#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
40
41#define UNW_REGISTER_INFO \
42 PTREGS_INFO(rax), \
43 PTREGS_INFO(rdx), \
44 PTREGS_INFO(rcx), \
45 PTREGS_INFO(rbx), \
46 PTREGS_INFO(rsi), \
47 PTREGS_INFO(rdi), \
48 PTREGS_INFO(rbp), \
49 PTREGS_INFO(rsp), \
50 PTREGS_INFO(r8), \
51 PTREGS_INFO(r9), \
52 PTREGS_INFO(r10), \
53 PTREGS_INFO(r11), \
54 PTREGS_INFO(r12), \
55 PTREGS_INFO(r13), \
56 PTREGS_INFO(r14), \
57 PTREGS_INFO(r15), \
58 PTREGS_INFO(rip)
59
60static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
61 /*const*/ struct pt_regs *regs)
62{
63 info->regs = *regs;
64}
65
66static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
67{
68 extern const char thread_return[];
69
70 memset(&info->regs, 0, sizeof(info->regs));
71 info->regs.rip = (unsigned long)thread_return;
72 info->regs.cs = __KERNEL_CS;
73 __get_user(info->regs.rbp, (unsigned long *)info->task->thread.rsp);
74 info->regs.rsp = info->task->thread.rsp;
75 info->regs.ss = __KERNEL_DS;
76}
77
78extern int arch_unwind_init_running(struct unwind_frame_info *,
79 int (*callback)(struct unwind_frame_info *,
80 void *arg),
81 void *arg);
82
83static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
84{
85#if 0 /* This can only work when selector register saves/restores
86 are properly annotated (and tracked in UNW_REGISTER_INFO). */
87 return user_mode(&info->regs);
88#else
89 return (long)info->regs.rip >= 0
90 || (info->regs.rip >= VSYSCALL_START && info->regs.rip < VSYSCALL_END)
91 || (long)info->regs.rsp >= 0;
92#endif
93}
94
95#else
96
97#define UNW_PC(frame) ((void)(frame), 0)
98
99static inline int arch_unw_user_mode(const void *info)
100{
101 return 0;
102}
103
104#endif
105
106#endif /* _ASM_X86_64_UNWIND_H */
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index a3dae1803f..c37c34275a 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -37,6 +37,7 @@ extern struct key_type key_type_user;
37extern int user_instantiate(struct key *key, const void *data, size_t datalen); 37extern int user_instantiate(struct key *key, const void *data, size_t datalen);
38extern int user_update(struct key *key, const void *data, size_t datalen); 38extern int user_update(struct key *key, const void *data, size_t datalen);
39extern int user_match(const struct key *key, const void *criterion); 39extern int user_match(const struct key *key, const void *criterion);
40extern void user_revoke(struct key *key);
40extern void user_destroy(struct key *key); 41extern void user_destroy(struct key *key);
41extern void user_describe(const struct key *user, struct seq_file *m); 42extern void user_describe(const struct key *user, struct seq_file *m);
42extern long user_read(const struct key *key, 43extern long user_read(const struct key *key,
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 90d6df1551..88b5dfd8ee 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -528,12 +528,18 @@ static inline void acpi_set_cstate_limit(unsigned int new_limit) { return; }
528 528
529#ifdef CONFIG_ACPI_NUMA 529#ifdef CONFIG_ACPI_NUMA
530int acpi_get_pxm(acpi_handle handle); 530int acpi_get_pxm(acpi_handle handle);
531int acpi_get_node(acpi_handle *handle);
531#else 532#else
532static inline int acpi_get_pxm(acpi_handle handle) 533static inline int acpi_get_pxm(acpi_handle handle)
533{ 534{
534 return 0; 535 return 0;
535} 536}
537static inline int acpi_get_node(acpi_handle *handle)
538{
539 return 0;
540}
536#endif 541#endif
542extern int acpi_paddr_to_node(u64 start_addr, u64 size);
537 543
538extern int pnpacpi_disabled; 544extern int pnpacpi_disabled;
539 545
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index d9ed279698..dcc5de7cc4 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -24,6 +24,9 @@
24 * The available bitmap operations and their rough meaning in the 24 * The available bitmap operations and their rough meaning in the
25 * case that the bitmap is a single unsigned long are thus: 25 * case that the bitmap is a single unsigned long are thus:
26 * 26 *
27 * Note that nbits should be always a compile time evaluable constant.
28 * Otherwise many inlines will generate horrible code.
29 *
27 * bitmap_zero(dst, nbits) *dst = 0UL 30 * bitmap_zero(dst, nbits) *dst = 0UL
28 * bitmap_fill(dst, nbits) *dst = ~0UL 31 * bitmap_fill(dst, nbits) *dst = ~0UL
29 * bitmap_copy(dst, src, nbits) *dst = *src 32 * bitmap_copy(dst, src, nbits) *dst = *src
@@ -244,6 +247,8 @@ static inline int bitmap_full(const unsigned long *src, int nbits)
244 247
245static inline int bitmap_weight(const unsigned long *src, int nbits) 248static inline int bitmap_weight(const unsigned long *src, int nbits)
246{ 249{
250 if (nbits <= BITS_PER_LONG)
251 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
247 return __bitmap_weight(src, nbits); 252 return __bitmap_weight(src, nbits);
248} 253}
249 254
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index fb7e9b7ccb..737e407d0c 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -149,7 +149,6 @@ void create_empty_buffers(struct page *, unsigned long,
149 unsigned long b_state); 149 unsigned long b_state);
150void end_buffer_read_sync(struct buffer_head *bh, int uptodate); 150void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
151void end_buffer_write_sync(struct buffer_head *bh, int uptodate); 151void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
152void end_buffer_async_write(struct buffer_head *bh, int uptodate);
153 152
154/* Things to do with buffers at mapping->private_list */ 153/* Things to do with buffers at mapping->private_list */
155void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); 154void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
@@ -214,6 +213,7 @@ int nobh_truncate_page(struct address_space *, loff_t);
214int nobh_writepage(struct page *page, get_block_t *get_block, 213int nobh_writepage(struct page *page, get_block_t *get_block,
215 struct writeback_control *wbc); 214 struct writeback_control *wbc);
216 215
216void buffer_init(void);
217 217
218/* 218/*
219 * inline definitions 219 * inline definitions
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
new file mode 100644
index 0000000000..d852024ed0
--- /dev/null
+++ b/include/linux/clocksource.h
@@ -0,0 +1,185 @@
1/* linux/include/linux/clocksource.h
2 *
3 * This file contains the structure definitions for clocksources.
4 *
5 * If you are not a clocksource, or timekeeping code, you should
6 * not be including this file!
7 */
8#ifndef _LINUX_CLOCKSOURCE_H
9#define _LINUX_CLOCKSOURCE_H
10
11#include <linux/types.h>
12#include <linux/timex.h>
13#include <linux/time.h>
14#include <linux/list.h>
15#include <asm/div64.h>
16#include <asm/io.h>
17
18/* clocksource cycle base type */
19typedef u64 cycle_t;
20
21/**
22 * struct clocksource - hardware abstraction for a free running counter
23 * Provides mostly state-free accessors to the underlying hardware.
24 *
25 * @name: ptr to clocksource name
26 * @list: list head for registration
27 * @rating: rating value for selection (higher is better)
28 * To avoid rating inflation the following
29 * list should give you a guide as to how
30 * to assign your clocksource a rating
31 * 1-99: Unfit for real use
32 * Only available for bootup and testing purposes.
33 * 100-199: Base level usability.
34 * Functional for real use, but not desired.
35 * 200-299: Good.
36 * A correct and usable clocksource.
37 * 300-399: Desired.
38 * A reasonably fast and accurate clocksource.
39 * 400-499: Perfect
40 * The ideal clocksource. A must-use where
41 * available.
42 * @read: returns a cycle value
43 * @mask: bitmask for two's complement
44 * subtraction of non 64 bit counters
45 * @mult: cycle to nanosecond multiplier
46 * @shift: cycle to nanosecond divisor (power of two)
47 * @update_callback: called when safe to alter clocksource values
48 * @is_continuous: defines if clocksource is free-running.
49 * @cycle_interval: Used internally by timekeeping core, please ignore.
50 * @xtime_interval: Used internally by timekeeping core, please ignore.
51 */
52struct clocksource {
53 char *name;
54 struct list_head list;
55 int rating;
56 cycle_t (*read)(void);
57 cycle_t mask;
58 u32 mult;
59 u32 shift;
60 int (*update_callback)(void);
61 int is_continuous;
62
63 /* timekeeping specific data, ignore */
64 cycle_t cycle_last, cycle_interval;
65 u64 xtime_nsec, xtime_interval;
66 s64 error;
67};
68
69/* simplify initialization of mask field */
70#define CLOCKSOURCE_MASK(bits) (cycle_t)(bits<64 ? ((1ULL<<bits)-1) : -1)
71
72/**
73 * clocksource_khz2mult - calculates mult from khz and shift
74 * @khz: Clocksource frequency in KHz
75 * @shift_constant: Clocksource shift factor
76 *
77 * Helper functions that converts a khz counter frequency to a timsource
78 * multiplier, given the clocksource shift value
79 */
80static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
81{
82 /* khz = cyc/(Million ns)
83 * mult/2^shift = ns/cyc
84 * mult = ns/cyc * 2^shift
85 * mult = 1Million/khz * 2^shift
86 * mult = 1000000 * 2^shift / khz
87 * mult = (1000000<<shift) / khz
88 */
89 u64 tmp = ((u64)1000000) << shift_constant;
90
91 tmp += khz/2; /* round for do_div */
92 do_div(tmp, khz);
93
94 return (u32)tmp;
95}
96
97/**
98 * clocksource_hz2mult - calculates mult from hz and shift
99 * @hz: Clocksource frequency in Hz
100 * @shift_constant: Clocksource shift factor
101 *
102 * Helper functions that converts a hz counter
103 * frequency to a timsource multiplier, given the
104 * clocksource shift value
105 */
106static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
107{
108 /* hz = cyc/(Billion ns)
109 * mult/2^shift = ns/cyc
110 * mult = ns/cyc * 2^shift
111 * mult = 1Billion/hz * 2^shift
112 * mult = 1000000000 * 2^shift / hz
113 * mult = (1000000000<<shift) / hz
114 */
115 u64 tmp = ((u64)1000000000) << shift_constant;
116
117 tmp += hz/2; /* round for do_div */
118 do_div(tmp, hz);
119
120 return (u32)tmp;
121}
122
123/**
124 * clocksource_read: - Access the clocksource's current cycle value
125 * @cs: pointer to clocksource being read
126 *
127 * Uses the clocksource to return the current cycle_t value
128 */
129static inline cycle_t clocksource_read(struct clocksource *cs)
130{
131 return cs->read();
132}
133
134/**
135 * cyc2ns - converts clocksource cycles to nanoseconds
136 * @cs: Pointer to clocksource
137 * @cycles: Cycles
138 *
139 * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds.
140 *
141 * XXX - This could use some mult_lxl_ll() asm optimization
142 */
143static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles)
144{
145 u64 ret = (u64)cycles;
146 ret = (ret * cs->mult) >> cs->shift;
147 return ret;
148}
149
150/**
151 * clocksource_calculate_interval - Calculates a clocksource interval struct
152 *
153 * @c: Pointer to clocksource.
154 * @length_nsec: Desired interval length in nanoseconds.
155 *
156 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
157 * pair and interval request.
158 *
159 * Unless you're the timekeeping code, you should not be using this!
160 */
161static inline void clocksource_calculate_interval(struct clocksource *c,
162 unsigned long length_nsec)
163{
164 u64 tmp;
165
166 /* XXX - All of this could use a whole lot of optimization */
167 tmp = length_nsec;
168 tmp <<= c->shift;
169 tmp += c->mult/2;
170 do_div(tmp, c->mult);
171
172 c->cycle_interval = (cycle_t)tmp;
173 if (c->cycle_interval == 0)
174 c->cycle_interval = 1;
175
176 c->xtime_interval = (u64)c->cycle_interval * c->mult;
177}
178
179
180/* used to install a new clocksource */
181int clocksource_register(struct clocksource*);
182void clocksource_reselect(void);
183struct clocksource* clocksource_get_next(void);
184
185#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index dda1697ec7..9760753e66 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -226,5 +226,7 @@ static inline int compat_timespec_compare(struct compat_timespec *lhs,
226 226
227asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); 227asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
228 228
229extern int compat_printk(const char *fmt, ...);
230
229#endif /* CONFIG_COMPAT */ 231#endif /* CONFIG_COMPAT */
230#endif /* _LINUX_COMPAT_H */ 232#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
index 89ab677cb9..917d62e414 100644
--- a/include/linux/compat_ioctl.h
+++ b/include/linux/compat_ioctl.h
@@ -673,6 +673,11 @@ COMPATIBLE_IOCTL(CAPI_SET_FLAGS)
673COMPATIBLE_IOCTL(CAPI_CLR_FLAGS) 673COMPATIBLE_IOCTL(CAPI_CLR_FLAGS)
674COMPATIBLE_IOCTL(CAPI_NCCI_OPENCOUNT) 674COMPATIBLE_IOCTL(CAPI_NCCI_OPENCOUNT)
675COMPATIBLE_IOCTL(CAPI_NCCI_GETUNIT) 675COMPATIBLE_IOCTL(CAPI_NCCI_GETUNIT)
676/* Siemens Gigaset */
677COMPATIBLE_IOCTL(GIGASET_REDIR)
678COMPATIBLE_IOCTL(GIGASET_CONFIG)
679COMPATIBLE_IOCTL(GIGASET_BRKCHARS)
680COMPATIBLE_IOCTL(GIGASET_VERSION)
676/* Misc. */ 681/* Misc. */
677COMPATIBLE_IOCTL(0x41545900) /* ATYIO_CLKR */ 682COMPATIBLE_IOCTL(0x41545900) /* ATYIO_CLKR */
678COMPATIBLE_IOCTL(0x41545901) /* ATYIO_CLKW */ 683COMPATIBLE_IOCTL(0x41545901) /* ATYIO_CLKW */
diff --git a/include/linux/console.h b/include/linux/console.h
index d0f8a80094..3bdf2155e5 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -63,9 +63,11 @@ extern const struct consw vga_con; /* VGA text console */
63extern const struct consw newport_con; /* SGI Newport console */ 63extern const struct consw newport_con; /* SGI Newport console */
64extern const struct consw prom_con; /* SPARC PROM console */ 64extern const struct consw prom_con; /* SPARC PROM console */
65 65
66int con_is_bound(const struct consw *csw);
67int register_con_driver(const struct consw *csw, int first, int last);
68int unregister_con_driver(const struct consw *csw);
66int take_over_console(const struct consw *sw, int first, int last, int deflt); 69int take_over_console(const struct consw *sw, int first, int last, int deflt);
67void give_up_console(const struct consw *sw); 70void give_up_console(const struct consw *sw);
68
69/* scroll */ 71/* scroll */
70#define SM_UP (1) 72#define SM_UP (1)
71#define SM_DOWN (2) 73#define SM_DOWN (2)
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 08d50c53aa..a3caf6866b 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -31,17 +31,23 @@ struct cpu {
31 struct sys_device sysdev; 31 struct sys_device sysdev;
32}; 32};
33 33
34extern int register_cpu(struct cpu *, int, struct node *); 34extern int register_cpu(struct cpu *cpu, int num);
35extern struct sys_device *get_cpu_sysdev(unsigned cpu); 35extern struct sys_device *get_cpu_sysdev(unsigned cpu);
36#ifdef CONFIG_HOTPLUG_CPU 36#ifdef CONFIG_HOTPLUG_CPU
37extern void unregister_cpu(struct cpu *, struct node *); 37extern void unregister_cpu(struct cpu *cpu);
38#endif 38#endif
39struct notifier_block; 39struct notifier_block;
40 40
41#ifdef CONFIG_SMP 41#ifdef CONFIG_SMP
42/* Need to know about CPUs going up/down? */ 42/* Need to know about CPUs going up/down? */
43extern int register_cpu_notifier(struct notifier_block *nb); 43extern int register_cpu_notifier(struct notifier_block *nb);
44#ifdef CONFIG_HOTPLUG_CPU
44extern void unregister_cpu_notifier(struct notifier_block *nb); 45extern void unregister_cpu_notifier(struct notifier_block *nb);
46#else
47static inline void unregister_cpu_notifier(struct notifier_block *nb)
48{
49}
50#endif
45extern int current_in_cpu_hotplug(void); 51extern int current_in_cpu_hotplug(void);
46 52
47int cpu_up(unsigned int cpu); 53int cpu_up(unsigned int cpu);
@@ -73,6 +79,8 @@ extern int lock_cpu_hotplug_interruptible(void);
73 { .notifier_call = fn, .priority = pri }; \ 79 { .notifier_call = fn, .priority = pri }; \
74 register_cpu_notifier(&fn##_nb); \ 80 register_cpu_notifier(&fn##_nb); \
75} 81}
82#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
83#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
76int cpu_down(unsigned int cpu); 84int cpu_down(unsigned int cpu);
77#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) 85#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
78#else 86#else
@@ -80,6 +88,8 @@ int cpu_down(unsigned int cpu);
80#define unlock_cpu_hotplug() do { } while (0) 88#define unlock_cpu_hotplug() do { } while (0)
81#define lock_cpu_hotplug_interruptible() 0 89#define lock_cpu_hotplug_interruptible() 0
82#define hotcpu_notifier(fn, pri) 90#define hotcpu_notifier(fn, pri)
91#define register_hotcpu_notifier(nb)
92#define unregister_hotcpu_notifier(nb)
83 93
84/* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */ 94/* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */
85static inline int cpu_is_offline(int cpu) { return 0; } 95static inline int cpu_is_offline(int cpu) { return 0; }
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 5a0470e361..7f946241b8 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -66,7 +66,7 @@ struct crypto_tfm;
66 66
67struct cipher_desc { 67struct cipher_desc {
68 struct crypto_tfm *tfm; 68 struct crypto_tfm *tfm;
69 void (*crfn)(void *ctx, u8 *dst, const u8 *src); 69 void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
70 unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, 70 unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
71 const u8 *src, unsigned int nbytes); 71 const u8 *src, unsigned int nbytes);
72 void *info; 72 void *info;
@@ -79,10 +79,10 @@ struct cipher_desc {
79struct cipher_alg { 79struct cipher_alg {
80 unsigned int cia_min_keysize; 80 unsigned int cia_min_keysize;
81 unsigned int cia_max_keysize; 81 unsigned int cia_max_keysize;
82 int (*cia_setkey)(void *ctx, const u8 *key, 82 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
83 unsigned int keylen, u32 *flags); 83 unsigned int keylen, u32 *flags);
84 void (*cia_encrypt)(void *ctx, u8 *dst, const u8 *src); 84 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
85 void (*cia_decrypt)(void *ctx, u8 *dst, const u8 *src); 85 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
86 86
87 unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc, 87 unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc,
88 u8 *dst, const u8 *src, 88 u8 *dst, const u8 *src,
@@ -100,20 +100,19 @@ struct cipher_alg {
100 100
101struct digest_alg { 101struct digest_alg {
102 unsigned int dia_digestsize; 102 unsigned int dia_digestsize;
103 void (*dia_init)(void *ctx); 103 void (*dia_init)(struct crypto_tfm *tfm);
104 void (*dia_update)(void *ctx, const u8 *data, unsigned int len); 104 void (*dia_update)(struct crypto_tfm *tfm, const u8 *data,
105 void (*dia_final)(void *ctx, u8 *out); 105 unsigned int len);
106 int (*dia_setkey)(void *ctx, const u8 *key, 106 void (*dia_final)(struct crypto_tfm *tfm, u8 *out);
107 int (*dia_setkey)(struct crypto_tfm *tfm, const u8 *key,
107 unsigned int keylen, u32 *flags); 108 unsigned int keylen, u32 *flags);
108}; 109};
109 110
110struct compress_alg { 111struct compress_alg {
111 int (*coa_init)(void *ctx); 112 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
112 void (*coa_exit)(void *ctx); 113 unsigned int slen, u8 *dst, unsigned int *dlen);
113 int (*coa_compress)(void *ctx, const u8 *src, unsigned int slen, 114 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
114 u8 *dst, unsigned int *dlen); 115 unsigned int slen, u8 *dst, unsigned int *dlen);
115 int (*coa_decompress)(void *ctx, const u8 *src, unsigned int slen,
116 u8 *dst, unsigned int *dlen);
117}; 116};
118 117
119#define cra_cipher cra_u.cipher 118#define cra_cipher cra_u.cipher
@@ -129,14 +128,17 @@ struct crypto_alg {
129 128
130 int cra_priority; 129 int cra_priority;
131 130
132 const char cra_name[CRYPTO_MAX_ALG_NAME]; 131 char cra_name[CRYPTO_MAX_ALG_NAME];
133 const char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 132 char cra_driver_name[CRYPTO_MAX_ALG_NAME];
134 133
135 union { 134 union {
136 struct cipher_alg cipher; 135 struct cipher_alg cipher;
137 struct digest_alg digest; 136 struct digest_alg digest;
138 struct compress_alg compress; 137 struct compress_alg compress;
139 } cra_u; 138 } cra_u;
139
140 int (*cra_init)(struct crypto_tfm *tfm);
141 void (*cra_exit)(struct crypto_tfm *tfm);
140 142
141 struct module *cra_module; 143 struct module *cra_module;
142}; 144};
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index aee10b2ea4..e3d1c33d15 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -8,9 +8,12 @@
8#ifndef _LINUX_DEVICE_MAPPER_H 8#ifndef _LINUX_DEVICE_MAPPER_H
9#define _LINUX_DEVICE_MAPPER_H 9#define _LINUX_DEVICE_MAPPER_H
10 10
11#ifdef __KERNEL__
12
11struct dm_target; 13struct dm_target;
12struct dm_table; 14struct dm_table;
13struct dm_dev; 15struct dm_dev;
16struct mapped_device;
14 17
15typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; 18typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
16 19
@@ -78,7 +81,7 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d);
78struct target_type { 81struct target_type {
79 const char *name; 82 const char *name;
80 struct module *module; 83 struct module *module;
81 unsigned version[3]; 84 unsigned version[3];
82 dm_ctr_fn ctr; 85 dm_ctr_fn ctr;
83 dm_dtr_fn dtr; 86 dm_dtr_fn dtr;
84 dm_map_fn map; 87 dm_map_fn map;
@@ -128,4 +131,108 @@ struct dm_target {
128int dm_register_target(struct target_type *t); 131int dm_register_target(struct target_type *t);
129int dm_unregister_target(struct target_type *t); 132int dm_unregister_target(struct target_type *t);
130 133
131#endif /* _LINUX_DEVICE_MAPPER_H */ 134
135/*-----------------------------------------------------------------
136 * Functions for creating and manipulating mapped devices.
137 * Drop the reference with dm_put when you finish with the object.
138 *---------------------------------------------------------------*/
139
140/*
141 * DM_ANY_MINOR chooses the next available minor number.
142 */
143#define DM_ANY_MINOR (-1)
144int dm_create(int minor, struct mapped_device **md);
145
146/*
147 * Reference counting for md.
148 */
149struct mapped_device *dm_get_md(dev_t dev);
150void dm_get(struct mapped_device *md);
151void dm_put(struct mapped_device *md);
152
153/*
154 * An arbitrary pointer may be stored alongside a mapped device.
155 */
156void dm_set_mdptr(struct mapped_device *md, void *ptr);
157void *dm_get_mdptr(struct mapped_device *md);
158
159/*
160 * A device can still be used while suspended, but I/O is deferred.
161 */
162int dm_suspend(struct mapped_device *md, int with_lockfs);
163int dm_resume(struct mapped_device *md);
164
165/*
166 * Event functions.
167 */
168uint32_t dm_get_event_nr(struct mapped_device *md);
169int dm_wait_event(struct mapped_device *md, int event_nr);
170
171/*
172 * Info functions.
173 */
174const char *dm_device_name(struct mapped_device *md);
175struct gendisk *dm_disk(struct mapped_device *md);
176int dm_suspended(struct mapped_device *md);
177
178/*
179 * Geometry functions.
180 */
181int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
182int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
183
184
185/*-----------------------------------------------------------------
186 * Functions for manipulating device-mapper tables.
187 *---------------------------------------------------------------*/
188
189/*
190 * First create an empty table.
191 */
192int dm_table_create(struct dm_table **result, int mode,
193 unsigned num_targets, struct mapped_device *md);
194
195/*
196 * Then call this once for each target.
197 */
198int dm_table_add_target(struct dm_table *t, const char *type,
199 sector_t start, sector_t len, char *params);
200
201/*
202 * Finally call this to make the table ready for use.
203 */
204int dm_table_complete(struct dm_table *t);
205
206/*
207 * Table reference counting.
208 */
209struct dm_table *dm_get_table(struct mapped_device *md);
210void dm_table_get(struct dm_table *t);
211void dm_table_put(struct dm_table *t);
212
213/*
214 * Queries
215 */
216sector_t dm_table_get_size(struct dm_table *t);
217unsigned int dm_table_get_num_targets(struct dm_table *t);
218int dm_table_get_mode(struct dm_table *t);
219struct mapped_device *dm_table_get_md(struct dm_table *t);
220
221/*
222 * Trigger an event.
223 */
224void dm_table_event(struct dm_table *t);
225
226/*
227 * The device must be suspended before calling this method.
228 */
229int dm_swap_table(struct mapped_device *md, struct dm_table *t);
230
231/*
232 * Prepare a table for a device that will error all I/O.
233 * To make it active, call dm_suspend(), dm_swap_table() then dm_resume().
234 */
235int dm_create_error_table(struct dm_table **result, struct mapped_device *md);
236
237#endif /* __KERNEL__ */
238#endif /* _LINUX_DEVICE_MAPPER_H */
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index c67c678661..9623bb6250 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -285,9 +285,9 @@ typedef char ioctl_struct[308];
285#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) 285#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
286 286
287#define DM_VERSION_MAJOR 4 287#define DM_VERSION_MAJOR 4
288#define DM_VERSION_MINOR 6 288#define DM_VERSION_MINOR 7
289#define DM_VERSION_PATCHLEVEL 0 289#define DM_VERSION_PATCHLEVEL 0
290#define DM_VERSION_EXTRA "-ioctl (2006-02-17)" 290#define DM_VERSION_EXTRA "-ioctl (2006-06-24)"
291 291
292/* Status bits */ 292/* Status bits */
293#define DM_READONLY_FLAG (1 << 0) /* In/Out */ 293#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -314,7 +314,7 @@ typedef char ioctl_struct[308];
314#define DM_BUFFER_FULL_FLAG (1 << 8) /* Out */ 314#define DM_BUFFER_FULL_FLAG (1 << 8) /* Out */
315 315
316/* 316/*
317 * Set this to improve performance when you aren't going to use open_count. 317 * This flag is now ignored.
318 */ 318 */
319#define DM_SKIP_BDGET_FLAG (1 << 9) /* In */ 319#define DM_SKIP_BDGET_FLAG (1 << 9) /* In */
320 320
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 78b236ca04..272010a607 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -20,7 +20,7 @@
20 */ 20 */
21#ifndef DMAENGINE_H 21#ifndef DMAENGINE_H
22#define DMAENGINE_H 22#define DMAENGINE_H
23#include <linux/config.h> 23
24#ifdef CONFIG_DMA_ENGINE 24#ifdef CONFIG_DMA_ENGINE
25 25
26#include <linux/device.h> 26#include <linux/device.h>
diff --git a/include/linux/fb.h b/include/linux/fb.h
index f1281687e5..07a08e92bc 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -504,23 +504,19 @@ struct fb_cursor_user {
504#define FB_EVENT_MODE_DELETE 0x04 504#define FB_EVENT_MODE_DELETE 0x04
505/* A driver registered itself */ 505/* A driver registered itself */
506#define FB_EVENT_FB_REGISTERED 0x05 506#define FB_EVENT_FB_REGISTERED 0x05
507/* A driver unregistered itself */
508#define FB_EVENT_FB_UNREGISTERED 0x06
507/* CONSOLE-SPECIFIC: get console to framebuffer mapping */ 509/* CONSOLE-SPECIFIC: get console to framebuffer mapping */
508#define FB_EVENT_GET_CONSOLE_MAP 0x06 510#define FB_EVENT_GET_CONSOLE_MAP 0x07
509/* CONSOLE-SPECIFIC: set console to framebuffer mapping */ 511/* CONSOLE-SPECIFIC: set console to framebuffer mapping */
510#define FB_EVENT_SET_CONSOLE_MAP 0x07 512#define FB_EVENT_SET_CONSOLE_MAP 0x08
511/* A display blank is requested */ 513/* A display blank is requested */
512#define FB_EVENT_BLANK 0x08 514#define FB_EVENT_BLANK 0x09
513/* Private modelist is to be replaced */ 515/* Private modelist is to be replaced */
514#define FB_EVENT_NEW_MODELIST 0x09 516#define FB_EVENT_NEW_MODELIST 0x0A
515/* The resolution of the passed in fb_info about to change and 517/* The resolution of the passed in fb_info about to change and
516 all vc's should be changed */ 518 all vc's should be changed */
517#define FB_EVENT_MODE_CHANGE_ALL 0x0A 519#define FB_EVENT_MODE_CHANGE_ALL 0x0B
518/* CONSOLE-SPECIFIC: set console rotation */
519#define FB_EVENT_SET_CON_ROTATE 0x0B
520/* CONSOLE-SPECIFIC: get console rotation */
521#define FB_EVENT_GET_CON_ROTATE 0x0C
522/* CONSOLE-SPECIFIC: rotate all consoles */
523#define FB_EVENT_SET_CON_ROTATE_ALL 0x0D
524 520
525struct fb_event { 521struct fb_event {
526 struct fb_info *info; 522 struct fb_info *info;
@@ -892,7 +888,6 @@ extern int fb_get_color_depth(struct fb_var_screeninfo *var,
892 struct fb_fix_screeninfo *fix); 888 struct fb_fix_screeninfo *fix);
893extern int fb_get_options(char *name, char **option); 889extern int fb_get_options(char *name, char **option);
894extern int fb_new_modelist(struct fb_info *info); 890extern int fb_new_modelist(struct fb_info *info);
895extern int fb_con_duit(struct fb_info *info, int event, void *data);
896 891
897extern struct fb_info *registered_fb[FB_MAX]; 892extern struct fb_info *registered_fb[FB_MAX];
898extern int num_registered_fb; 893extern int num_registered_fb;
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 966a5b3da4..34c3a215f2 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -12,6 +12,9 @@
12#define FUTEX_REQUEUE 3 12#define FUTEX_REQUEUE 3
13#define FUTEX_CMP_REQUEUE 4 13#define FUTEX_CMP_REQUEUE 4
14#define FUTEX_WAKE_OP 5 14#define FUTEX_WAKE_OP 5
15#define FUTEX_LOCK_PI 6
16#define FUTEX_UNLOCK_PI 7
17#define FUTEX_TRYLOCK_PI 8
15 18
16/* 19/*
17 * Support for robust futexes: the kernel cleans up held futexes at 20 * Support for robust futexes: the kernel cleans up held futexes at
@@ -90,18 +93,21 @@ struct robust_list_head {
90 */ 93 */
91#define ROBUST_LIST_LIMIT 2048 94#define ROBUST_LIST_LIMIT 2048
92 95
93long do_futex(unsigned long uaddr, int op, int val, 96long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
94 unsigned long timeout, unsigned long uaddr2, int val2, 97 u32 __user *uaddr2, u32 val2, u32 val3);
95 int val3);
96 98
97extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr); 99extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr);
98 100
99#ifdef CONFIG_FUTEX 101#ifdef CONFIG_FUTEX
100extern void exit_robust_list(struct task_struct *curr); 102extern void exit_robust_list(struct task_struct *curr);
103extern void exit_pi_state_list(struct task_struct *curr);
101#else 104#else
102static inline void exit_robust_list(struct task_struct *curr) 105static inline void exit_robust_list(struct task_struct *curr)
103{ 106{
104} 107}
108static inline void exit_pi_state_list(struct task_struct *curr)
109{
110}
105#endif 111#endif
106 112
107#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ 113#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
new file mode 100644
index 0000000000..21ea7610e1
--- /dev/null
+++ b/include/linux/hw_random.h
@@ -0,0 +1,50 @@
1/*
2 Hardware Random Number Generator
3
4 Please read Documentation/hw_random.txt for details on use.
5
6 ----------------------------------------------------------
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
9
10 */
11
12#ifndef LINUX_HWRANDOM_H_
13#define LINUX_HWRANDOM_H_
14#ifdef __KERNEL__
15
16#include <linux/types.h>
17#include <linux/list.h>
18
19/**
20 * struct hwrng - Hardware Random Number Generator driver
21 * @name: Unique RNG name.
22 * @init: Initialization callback (can be NULL).
23 * @cleanup: Cleanup callback (can be NULL).
24 * @data_present: Callback to determine if data is available
25 * on the RNG. If NULL, it is assumed that
26 * there is always data available.
27 * @data_read: Read data from the RNG device.
28 * Returns the number of lower random bytes in "data".
29 * Must not be NULL.
30 * @priv: Private data, for use by the RNG driver.
31 */
32struct hwrng {
33 const char *name;
34 int (*init)(struct hwrng *rng);
35 void (*cleanup)(struct hwrng *rng);
36 int (*data_present)(struct hwrng *rng);
37 int (*data_read)(struct hwrng *rng, u32 *data);
38 unsigned long priv;
39
40 /* internal. */
41 struct list_head list;
42};
43
44/** Register a new Hardware Random Number Generator driver. */
45extern int hwrng_register(struct hwrng *rng);
46/** Unregister a Hardware Random Number Generator driver. */
47extern void hwrng_unregister(struct hwrng *rng);
48
49#endif /* __KERNEL__ */
50#endif /* LINUX_HWRANDOM_H_ */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index d37c8d808b..f559a719db 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -78,6 +78,7 @@ void *idr_find(struct idr *idp, int id);
78int idr_pre_get(struct idr *idp, gfp_t gfp_mask); 78int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
79int idr_get_new(struct idr *idp, void *ptr, int *id); 79int idr_get_new(struct idr *idp, void *ptr, int *id);
80int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); 80int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
81void *idr_replace(struct idr *idp, void *ptr, int id);
81void idr_remove(struct idr *idp, int id); 82void idr_remove(struct idr *idp, int id);
82void idr_destroy(struct idr *idp); 83void idr_destroy(struct idr *idp);
83void idr_init(struct idr *idp); 84void idr_init(struct idr *idp);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 41ecbb847f..3a256957fb 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -87,6 +87,7 @@ extern struct group_info init_groups;
87 .lock_depth = -1, \ 87 .lock_depth = -1, \
88 .prio = MAX_PRIO-20, \ 88 .prio = MAX_PRIO-20, \
89 .static_prio = MAX_PRIO-20, \ 89 .static_prio = MAX_PRIO-20, \
90 .normal_prio = MAX_PRIO-20, \
90 .policy = SCHED_NORMAL, \ 91 .policy = SCHED_NORMAL, \
91 .cpus_allowed = CPU_MASK_ALL, \ 92 .cpus_allowed = CPU_MASK_ALL, \
92 .mm = NULL, \ 93 .mm = NULL, \
@@ -119,10 +120,11 @@ extern struct group_info init_groups;
119 .signal = {{0}}}, \ 120 .signal = {{0}}}, \
120 .blocked = {{0}}, \ 121 .blocked = {{0}}, \
121 .alloc_lock = SPIN_LOCK_UNLOCKED, \ 122 .alloc_lock = SPIN_LOCK_UNLOCKED, \
122 .proc_lock = SPIN_LOCK_UNLOCKED, \
123 .journal_info = NULL, \ 123 .journal_info = NULL, \
124 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 124 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
125 .fs_excl = ATOMIC_INIT(0), \ 125 .fs_excl = ATOMIC_INIT(0), \
126 .pi_lock = SPIN_LOCK_UNLOCKED, \
127 INIT_RT_MUTEXES(tsk) \
126} 128}
127 129
128 130
diff --git a/include/linux/input.h b/include/linux/input.h
index b32c2b6e53..56f1e0e1e5 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -232,7 +232,8 @@ struct input_absinfo {
232#define KEY_PAUSE 119 232#define KEY_PAUSE 119
233 233
234#define KEY_KPCOMMA 121 234#define KEY_KPCOMMA 121
235#define KEY_HANGUEL 122 235#define KEY_HANGEUL 122
236#define KEY_HANGUEL KEY_HANGEUL
236#define KEY_HANJA 123 237#define KEY_HANJA 123
237#define KEY_YEN 124 238#define KEY_YEN 124
238#define KEY_LEFTMETA 125 239#define KEY_LEFTMETA 125
@@ -1005,6 +1006,7 @@ static inline void init_input_dev(struct input_dev *dev)
1005} 1006}
1006 1007
1007struct input_dev *input_allocate_device(void); 1008struct input_dev *input_allocate_device(void);
1009void input_free_device(struct input_dev *dev);
1008 1010
1009static inline struct input_dev *input_get_device(struct input_dev *dev) 1011static inline struct input_dev *input_get_device(struct input_dev *dev)
1010{ 1012{
@@ -1016,12 +1018,6 @@ static inline void input_put_device(struct input_dev *dev)
1016 class_device_put(&dev->cdev); 1018 class_device_put(&dev->cdev);
1017} 1019}
1018 1020
1019static inline void input_free_device(struct input_dev *dev)
1020{
1021 if (dev)
1022 input_put_device(dev);
1023}
1024
1025int input_register_device(struct input_dev *); 1021int input_register_device(struct input_dev *);
1026void input_unregister_device(struct input_dev *); 1022void input_unregister_device(struct input_dev *);
1027 1023
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index cd6bd001ba..edfc733b15 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -105,6 +105,9 @@ extern int allocate_resource(struct resource *root, struct resource *new,
105int adjust_resource(struct resource *res, unsigned long start, 105int adjust_resource(struct resource *res, unsigned long start,
106 unsigned long size); 106 unsigned long size);
107 107
108/* get registered SYSTEM_RAM resources in specified area */
109extern int find_next_system_ram(struct resource *res);
110
108/* Convenience shorthand with allocation */ 111/* Convenience shorthand with allocation */
109#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) 112#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name))
110#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name)) 113#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name))
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 5653b2f23b..d09fbeabf1 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -210,11 +210,7 @@ struct kernel_ipmi_msg
210#include <linux/list.h> 210#include <linux/list.h>
211#include <linux/module.h> 211#include <linux/module.h>
212#include <linux/device.h> 212#include <linux/device.h>
213
214#ifdef CONFIG_PROC_FS
215#include <linux/proc_fs.h> 213#include <linux/proc_fs.h>
216extern struct proc_dir_entry *proc_ipmi_root;
217#endif /* CONFIG_PROC_FS */
218 214
219/* Opaque type for a IPMI message user. One of these is needed to 215/* Opaque type for a IPMI message user. One of these is needed to
220 send and receive messages. */ 216 send and receive messages. */
diff --git a/include/linux/jffs2.h b/include/linux/jffs2.h
index c6f70660b3..c9c760700b 100644
--- a/include/linux/jffs2.h
+++ b/include/linux/jffs2.h
@@ -186,6 +186,7 @@ struct jffs2_raw_xref
186 jint32_t hdr_crc; 186 jint32_t hdr_crc;
187 jint32_t ino; /* inode number */ 187 jint32_t ino; /* inode number */
188 jint32_t xid; /* XATTR identifier number */ 188 jint32_t xid; /* XATTR identifier number */
189 jint32_t xseqno; /* xref sequencial number */
189 jint32_t node_crc; 190 jint32_t node_crc;
190} __attribute__((packed)); 191} __attribute__((packed));
191 192
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3c5e4c2e51..5c1ec1f84e 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -32,6 +32,7 @@ extern const char linux_banner[];
32 32
33#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 33#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
34#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) 34#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
35#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
35 36
36#define KERN_EMERG "<0>" /* system is unusable */ 37#define KERN_EMERG "<0>" /* system is unusable */
37#define KERN_ALERT "<1>" /* action must be taken immediately */ 38#define KERN_ALERT "<1>" /* action must be taken immediately */
@@ -336,6 +337,12 @@ struct sysinfo {
336/* Force a compilation error if condition is true */ 337/* Force a compilation error if condition is true */
337#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) 338#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
338 339
340/* Force a compilation error if condition is true, but also produce a
341 result (of value 0 and type size_t), so the expression can be used
342 e.g. in a structure initializer (or where-ever else comma expressions
343 aren't permitted). */
344#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
345
339/* Trap pasters of __FUNCTION__ at compile-time */ 346/* Trap pasters of __FUNCTION__ at compile-time */
340#define __FUNCTION__ (__func__) 347#define __FUNCTION__ (__func__)
341 348
diff --git a/include/linux/key.h b/include/linux/key.h
index e81ebf910d..e693e729bc 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -248,7 +248,14 @@ extern struct key *key_alloc(struct key_type *type,
248 const char *desc, 248 const char *desc,
249 uid_t uid, gid_t gid, 249 uid_t uid, gid_t gid,
250 struct task_struct *ctx, 250 struct task_struct *ctx,
251 key_perm_t perm, int not_in_quota); 251 key_perm_t perm,
252 unsigned long flags);
253
254
255#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
256#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
257#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
258
252extern int key_payload_reserve(struct key *key, size_t datalen); 259extern int key_payload_reserve(struct key *key, size_t datalen);
253extern int key_instantiate_and_link(struct key *key, 260extern int key_instantiate_and_link(struct key *key,
254 const void *data, 261 const void *data,
@@ -285,7 +292,7 @@ extern key_ref_t key_create_or_update(key_ref_t keyring,
285 const char *description, 292 const char *description,
286 const void *payload, 293 const void *payload,
287 size_t plen, 294 size_t plen,
288 int not_in_quota); 295 unsigned long flags);
289 296
290extern int key_update(key_ref_t key, 297extern int key_update(key_ref_t key,
291 const void *payload, 298 const void *payload,
@@ -299,7 +306,7 @@ extern int key_unlink(struct key *keyring,
299 306
300extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, 307extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
301 struct task_struct *ctx, 308 struct task_struct *ctx,
302 int not_in_quota, 309 unsigned long flags,
303 struct key *dest); 310 struct key *dest);
304 311
305extern int keyring_clear(struct key *keyring); 312extern int keyring_clear(struct key *keyring);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 20b1cf527c..f4284bf897 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -30,6 +30,7 @@
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
33#include <asm/scatterlist.h>
33#include <asm/io.h> 34#include <asm/io.h>
34#include <linux/ata.h> 35#include <linux/ata.h>
35#include <linux/workqueue.h> 36#include <linux/workqueue.h>
@@ -887,6 +888,9 @@ static inline unsigned int ata_tag_internal(unsigned int tag)
887 return tag == ATA_MAX_QUEUE - 1; 888 return tag == ATA_MAX_QUEUE - 1;
888} 889}
889 890
891/*
892 * device helpers
893 */
890static inline unsigned int ata_class_enabled(unsigned int class) 894static inline unsigned int ata_class_enabled(unsigned int class)
891{ 895{
892 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI; 896 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
@@ -917,6 +921,17 @@ static inline unsigned int ata_dev_absent(const struct ata_device *dev)
917 return ata_class_absent(dev->class); 921 return ata_class_absent(dev->class);
918} 922}
919 923
924/*
925 * port helpers
926 */
927static inline int ata_port_max_devices(const struct ata_port *ap)
928{
929 if (ap->flags & ATA_FLAG_SLAVE_POSS)
930 return 2;
931 return 1;
932}
933
934
920static inline u8 ata_chk_status(struct ata_port *ap) 935static inline u8 ata_chk_status(struct ata_port *ap)
921{ 936{
922 return ap->ops->check_status(ap); 937 return ap->ops->check_status(ap);
diff --git a/include/linux/license.h b/include/linux/license.h
new file mode 100644
index 0000000000..decdbf43cb
--- /dev/null
+++ b/include/linux/license.h
@@ -0,0 +1,14 @@
1#ifndef __LICENSE_H
2#define __LICENSE_H
3
4static inline int license_is_gpl_compatible(const char *license)
5{
6 return (strcmp(license, "GPL") == 0
7 || strcmp(license, "GPL v2") == 0
8 || strcmp(license, "GPL and additional rights") == 0
9 || strcmp(license, "Dual BSD/GPL") == 0
10 || strcmp(license, "Dual MIT/GPL") == 0
11 || strcmp(license, "Dual MPL/GPL") == 0);
12}
13
14#endif
diff --git a/include/linux/list.h b/include/linux/list.h
index 37ca31b21b..6b74adf529 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -4,18 +4,11 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <linux/stddef.h> 6#include <linux/stddef.h>
7#include <linux/poison.h>
7#include <linux/prefetch.h> 8#include <linux/prefetch.h>
8#include <asm/system.h> 9#include <asm/system.h>
9 10
10/* 11/*
11 * These are non-NULL pointers that will result in page faults
12 * under normal circumstances, used to verify that nobody uses
13 * non-initialized list entries.
14 */
15#define LIST_POISON1 ((void *) 0x00100100)
16#define LIST_POISON2 ((void *) 0x00200200)
17
18/*
19 * Simple doubly linked list implementation. 12 * Simple doubly linked list implementation.
20 * 13 *
21 * Some of the internal functions ("__xxx") are useful when 14 * Some of the internal functions ("__xxx") are useful when
diff --git a/include/linux/loop.h b/include/linux/loop.h
index bf3d2345ce..e76c7611d6 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -59,7 +59,7 @@ struct loop_device {
59 struct bio *lo_bio; 59 struct bio *lo_bio;
60 struct bio *lo_biotail; 60 struct bio *lo_biotail;
61 int lo_state; 61 int lo_state;
62 struct task_struct *lo_thread; 62 struct completion lo_done;
63 struct completion lo_bh_done; 63 struct completion lo_bh_done;
64 struct mutex lo_ctl_mutex; 64 struct mutex lo_ctl_mutex;
65 int lo_pending; 65 int lo_pending;
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 9112063861..218501cfae 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -63,6 +63,76 @@ extern int online_pages(unsigned long, unsigned long);
63/* reasonably generic interface to expand the physical pages in a zone */ 63/* reasonably generic interface to expand the physical pages in a zone */
64extern int __add_pages(struct zone *zone, unsigned long start_pfn, 64extern int __add_pages(struct zone *zone, unsigned long start_pfn,
65 unsigned long nr_pages); 65 unsigned long nr_pages);
66
67#ifdef CONFIG_NUMA
68extern int memory_add_physaddr_to_nid(u64 start);
69#else
70static inline int memory_add_physaddr_to_nid(u64 start)
71{
72 return 0;
73}
74#endif
75
76#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
77/*
78 * For supporting node-hotadd, we have to allocate a new pgdat.
79 *
80 * If an arch has generic style NODE_DATA(),
81 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
82 *
83 * In general, generic_alloc_nodedata() is used.
84 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
85 *
86 */
87extern pg_data_t *arch_alloc_nodedata(int nid);
88extern void arch_free_nodedata(pg_data_t *pgdat);
89extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
90
91#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
92
93#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
94#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
95
96#ifdef CONFIG_NUMA
97/*
98 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
99 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
100 * Because, pgdat for the new node is not allocated/initialized yet itself.
101 * To use new node's memory, more consideration will be necessary.
102 */
103#define generic_alloc_nodedata(nid) \
104({ \
105 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
106})
107/*
108 * This definition is just for error path in node hotadd.
109 * For node hotremove, we have to replace this.
110 */
111#define generic_free_nodedata(pgdat) kfree(pgdat)
112
113extern pg_data_t *node_data[];
114static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
115{
116 node_data[nid] = pgdat;
117}
118
119#else /* !CONFIG_NUMA */
120
121/* never called */
122static inline pg_data_t *generic_alloc_nodedata(int nid)
123{
124 BUG();
125 return NULL;
126}
127static inline void generic_free_nodedata(pg_data_t *pgdat)
128{
129}
130static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
131{
132}
133#endif /* CONFIG_NUMA */
134#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
135
66#else /* ! CONFIG_MEMORY_HOTPLUG */ 136#else /* ! CONFIG_MEMORY_HOTPLUG */
67/* 137/*
68 * Stub functions for when hotplug is off 138 * Stub functions for when hotplug is off
@@ -99,7 +169,8 @@ static inline int __remove_pages(struct zone *zone, unsigned long start_pfn,
99 return -ENOSYS; 169 return -ENOSYS;
100} 170}
101 171
102extern int add_memory(u64 start, u64 size); 172extern int add_memory(int nid, u64 start, u64 size);
173extern int arch_add_memory(int nid, u64 start, u64 size);
103extern int remove_memory(u64 start, u64 size); 174extern int remove_memory(u64 start, u64 size);
104 175
105#endif /* __LINUX_MEMORY_HOTPLUG_H */ 176#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a929ea197e..c41a1299b8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1030,13 +1030,20 @@ static inline void vm_stat_account(struct mm_struct *mm,
1030} 1030}
1031#endif /* CONFIG_PROC_FS */ 1031#endif /* CONFIG_PROC_FS */
1032 1032
1033static inline void
1034debug_check_no_locks_freed(const void *from, unsigned long len)
1035{
1036 mutex_debug_check_no_locks_freed(from, len);
1037 rt_mutex_debug_check_no_locks_freed(from, len);
1038}
1039
1033#ifndef CONFIG_DEBUG_PAGEALLOC 1040#ifndef CONFIG_DEBUG_PAGEALLOC
1034static inline void 1041static inline void
1035kernel_map_pages(struct page *page, int numpages, int enable) 1042kernel_map_pages(struct page *page, int numpages, int enable)
1036{ 1043{
1037 if (!PageHighMem(page) && !enable) 1044 if (!PageHighMem(page) && !enable)
1038 mutex_debug_check_no_locks_freed(page_address(page), 1045 debug_check_no_locks_freed(page_address(page),
1039 numpages * PAGE_SIZE); 1046 numpages * PAGE_SIZE);
1040} 1047}
1041#endif 1048#endif
1042 1049
@@ -1065,5 +1072,7 @@ void drop_slab(void);
1065extern int randomize_va_space; 1072extern int randomize_va_space;
1066#endif 1073#endif
1067 1074
1075const char *arch_vma_name(struct vm_area_struct *vma);
1076
1068#endif /* __KERNEL__ */ 1077#endif /* __KERNEL__ */
1069#endif /* _LINUX_MM_H */ 1078#endif /* _LINUX_MM_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 2d366098ea..9ebbb74b7b 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -285,6 +285,9 @@ struct module
285 /* The size of the executable code in each section. */ 285 /* The size of the executable code in each section. */
286 unsigned long init_text_size, core_text_size; 286 unsigned long init_text_size, core_text_size;
287 287
288 /* The handle returned from unwind_add_table. */
289 void *unwind_info;
290
288 /* Arch-specific module values */ 291 /* Arch-specific module values */
289 struct mod_arch_specific arch; 292 struct mod_arch_specific arch;
290 293
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index bc747e5d71..03cd7551a7 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -699,7 +699,6 @@ extern int dev_hard_start_xmit(struct sk_buff *skb,
699 699
700extern void dev_init(void); 700extern void dev_init(void);
701 701
702extern int netdev_nit;
703extern int netdev_budget; 702extern int netdev_budget;
704 703
705/* Called by rtnetlink.c:rtnl_unlock() */ 704/* Called by rtnetlink.c:rtnl_unlock() */
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index ca5a873300..1efe60c5c0 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -31,6 +31,7 @@ struct netpoll_info {
31 int rx_flags; 31 int rx_flags;
32 spinlock_t rx_lock; 32 spinlock_t rx_lock;
33 struct netpoll *rx_np; /* netpoll that registered an rx_hook */ 33 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
34 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
34}; 35};
35 36
36void netpoll_poll(struct netpoll *np); 37void netpoll_poll(struct netpoll *np);
diff --git a/include/linux/node.h b/include/linux/node.h
index 254dc3de65..81dcec84cd 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -26,8 +26,25 @@ struct node {
26 struct sys_device sysdev; 26 struct sys_device sysdev;
27}; 27};
28 28
29extern struct node node_devices[];
30
29extern int register_node(struct node *, int, struct node *); 31extern int register_node(struct node *, int, struct node *);
30extern void unregister_node(struct node *node); 32extern void unregister_node(struct node *node);
33extern int register_one_node(int nid);
34extern void unregister_one_node(int nid);
35#ifdef CONFIG_NUMA
36extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
37extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
38#else
39static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid)
40{
41 return 0;
42}
43static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
44{
45 return 0;
46}
47#endif
31 48
32#define to_node(sys_device) container_of(sys_device, struct node, sysdev) 49#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
33 50
diff --git a/include/linux/nsc_gpio.h b/include/linux/nsc_gpio.h
new file mode 100644
index 0000000000..135742cfad
--- /dev/null
+++ b/include/linux/nsc_gpio.h
@@ -0,0 +1,42 @@
1/**
2 nsc_gpio.c
3
4 National Semiconductor GPIO common access methods.
5
6 struct nsc_gpio_ops abstracts the low-level access
7 operations for the GPIO units on 2 NSC chip families; the GEODE
8 integrated CPU, and the PC-8736[03456] integrated PC-peripheral
9 chips.
10
11 The GPIO units on these chips have the same pin architecture, but
12 the access methods differ. Thus, scx200_gpio and pc8736x_gpio
13 implement their own versions of these routines; and use the common
14 file-operations routines implemented in nsc_gpio module.
15
16 Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com>
17
18 NB: this work was tested on the Geode SC-1100 and PC-87366 chips.
19 NSC sold the GEODE line to AMD, and the PC-8736x line to Winbond.
20*/
21
22struct nsc_gpio_ops {
23 struct module* owner;
24 u32 (*gpio_config) (unsigned iminor, u32 mask, u32 bits);
25 void (*gpio_dump) (struct nsc_gpio_ops *amp, unsigned iminor);
26 int (*gpio_get) (unsigned iminor);
27 void (*gpio_set) (unsigned iminor, int state);
28 void (*gpio_set_high)(unsigned iminor);
29 void (*gpio_set_low) (unsigned iminor);
30 void (*gpio_change) (unsigned iminor);
31 int (*gpio_current) (unsigned iminor);
32 struct device* dev; /* for dev_dbg() support, set in init */
33};
34
35extern ssize_t nsc_gpio_write(struct file *file, const char __user *data,
36 size_t len, loff_t *ppos);
37
38extern ssize_t nsc_gpio_read(struct file *file, char __user *buf,
39 size_t len, loff_t *ppos);
40
41extern void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index);
42
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c2fd2d1993..9ae6b1a753 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1202,6 +1202,7 @@
1202#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF 1202#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF
1203#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6 1203#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6
1204#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7 1204#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7
1205#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448
1205#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450 1206#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450
1206#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451 1207#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451
1207#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452 1208#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452
@@ -2170,7 +2171,6 @@
2170#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815 2171#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
2171#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e 2172#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
2172#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850 2173#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
2173#define PCI_DEVICE_ID_INTEL_GD31244 0x3200
2174#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 2174#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
2175#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 2175#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
2176#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 2176#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
diff --git a/include/linux/plist.h b/include/linux/plist.h
new file mode 100644
index 0000000000..3404faef54
--- /dev/null
+++ b/include/linux/plist.h
@@ -0,0 +1,247 @@
1/*
2 * Descending-priority-sorted double-linked list
3 *
4 * (C) 2002-2003 Intel Corp
5 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>.
6 *
7 * 2001-2005 (c) MontaVista Software, Inc.
8 * Daniel Walker <dwalker@mvista.com>
9 *
10 * (C) 2005 Thomas Gleixner <tglx@linutronix.de>
11 *
12 * Simplifications of the original code by
13 * Oleg Nesterov <oleg@tv-sign.ru>
14 *
15 * Licensed under the FSF's GNU Public License v2 or later.
16 *
17 * Based on simple lists (include/linux/list.h).
18 *
19 * This is a priority-sorted list of nodes; each node has a
20 * priority from INT_MIN (highest) to INT_MAX (lowest).
21 *
22 * Addition is O(K), removal is O(1), change of priority of a node is
23 * O(K) and K is the number of RT priority levels used in the system.
24 * (1 <= K <= 99)
25 *
26 * This list is really a list of lists:
27 *
28 * - The tier 1 list is the prio_list, different priority nodes.
29 *
30 * - The tier 2 list is the node_list, serialized nodes.
31 *
32 * Simple ASCII art explanation:
33 *
34 * |HEAD |
35 * | |
36 * |prio_list.prev|<------------------------------------|
37 * |prio_list.next|<->|pl|<->|pl|<--------------->|pl|<-|
38 * |10 | |10| |21| |21| |21| |40| (prio)
39 * | | | | | | | | | | | |
40 * | | | | | | | | | | | |
41 * |node_list.next|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-|
42 * |node_list.prev|<------------------------------------|
43 *
44 * The nodes on the prio_list list are sorted by priority to simplify
45 * the insertion of new nodes. There are no nodes with duplicate
46 * priorites on the list.
47 *
48 * The nodes on the node_list is ordered by priority and can contain
49 * entries which have the same priority. Those entries are ordered
50 * FIFO
51 *
52 * Addition means: look for the prio_list node in the prio_list
53 * for the priority of the node and insert it before the node_list
54 * entry of the next prio_list node. If it is the first node of
55 * that priority, add it to the prio_list in the right position and
56 * insert it into the serialized node_list list
57 *
58 * Removal means remove it from the node_list and remove it from
59 * the prio_list if the node_list list_head is non empty. In case
60 * of removal from the prio_list it must be checked whether other
61 * entries of the same priority are on the list or not. If there
62 * is another entry of the same priority then this entry has to
63 * replace the removed entry on the prio_list. If the entry which
64 * is removed is the only entry of this priority then a simple
65 * remove from both list is sufficient.
66 *
67 * INT_MIN is the highest priority, 0 is the medium highest, INT_MAX
68 * is lowest priority.
69 *
70 * No locking is done, up to the caller.
71 *
72 */
73#ifndef _LINUX_PLIST_H_
74#define _LINUX_PLIST_H_
75
76#include <linux/list.h>
77#include <linux/spinlock_types.h>
78
79struct plist_head {
80 struct list_head prio_list;
81 struct list_head node_list;
82#ifdef CONFIG_DEBUG_PI_LIST
83 spinlock_t *lock;
84#endif
85};
86
87struct plist_node {
88 int prio;
89 struct plist_head plist;
90};
91
92#ifdef CONFIG_DEBUG_PI_LIST
93# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock
94#else
95# define PLIST_HEAD_LOCK_INIT(_lock)
96#endif
97
98/**
99 * #PLIST_HEAD_INIT - static struct plist_head initializer
100 *
101 * @head: struct plist_head variable name
102 */
103#define PLIST_HEAD_INIT(head, _lock) \
104{ \
105 .prio_list = LIST_HEAD_INIT((head).prio_list), \
106 .node_list = LIST_HEAD_INIT((head).node_list), \
107 PLIST_HEAD_LOCK_INIT(&(_lock)) \
108}
109
110/**
111 * #PLIST_NODE_INIT - static struct plist_node initializer
112 *
113 * @node: struct plist_node variable name
114 * @__prio: initial node priority
115 */
116#define PLIST_NODE_INIT(node, __prio) \
117{ \
118 .prio = (__prio), \
119 .plist = PLIST_HEAD_INIT((node).plist, NULL), \
120}
121
122/**
123 * plist_head_init - dynamic struct plist_head initializer
124 *
125 * @head: &struct plist_head pointer
126 */
127static inline void
128plist_head_init(struct plist_head *head, spinlock_t *lock)
129{
130 INIT_LIST_HEAD(&head->prio_list);
131 INIT_LIST_HEAD(&head->node_list);
132#ifdef CONFIG_DEBUG_PI_LIST
133 head->lock = lock;
134#endif
135}
136
137/**
138 * plist_node_init - Dynamic struct plist_node initializer
139 *
140 * @node: &struct plist_node pointer
141 * @prio: initial node priority
142 */
143static inline void plist_node_init(struct plist_node *node, int prio)
144{
145 node->prio = prio;
146 plist_head_init(&node->plist, NULL);
147}
148
149extern void plist_add(struct plist_node *node, struct plist_head *head);
150extern void plist_del(struct plist_node *node, struct plist_head *head);
151
152/**
153 * plist_for_each - iterate over the plist
154 *
155 * @pos1: the type * to use as a loop counter.
156 * @head: the head for your list.
157 */
158#define plist_for_each(pos, head) \
159 list_for_each_entry(pos, &(head)->node_list, plist.node_list)
160
161/**
162 * plist_for_each_entry_safe - iterate over a plist of given type safe
163 * against removal of list entry
164 *
165 * @pos1: the type * to use as a loop counter.
166 * @n1: another type * to use as temporary storage
167 * @head: the head for your list.
168 */
169#define plist_for_each_safe(pos, n, head) \
170 list_for_each_entry_safe(pos, n, &(head)->node_list, plist.node_list)
171
172/**
173 * plist_for_each_entry - iterate over list of given type
174 *
175 * @pos: the type * to use as a loop counter.
176 * @head: the head for your list.
177 * @member: the name of the list_struct within the struct.
178 */
179#define plist_for_each_entry(pos, head, mem) \
180 list_for_each_entry(pos, &(head)->node_list, mem.plist.node_list)
181
182/**
183 * plist_for_each_entry_safe - iterate over list of given type safe against
184 * removal of list entry
185 *
186 * @pos: the type * to use as a loop counter.
187 * @n: another type * to use as temporary storage
188 * @head: the head for your list.
189 * @m: the name of the list_struct within the struct.
190 */
191#define plist_for_each_entry_safe(pos, n, head, m) \
192 list_for_each_entry_safe(pos, n, &(head)->node_list, m.plist.node_list)
193
194/**
195 * plist_head_empty - return !0 if a plist_head is empty
196 *
197 * @head: &struct plist_head pointer
198 */
199static inline int plist_head_empty(const struct plist_head *head)
200{
201 return list_empty(&head->node_list);
202}
203
204/**
205 * plist_node_empty - return !0 if plist_node is not on a list
206 *
207 * @node: &struct plist_node pointer
208 */
209static inline int plist_node_empty(const struct plist_node *node)
210{
211 return plist_head_empty(&node->plist);
212}
213
214/* All functions below assume the plist_head is not empty. */
215
216/**
217 * plist_first_entry - get the struct for the first entry
218 *
219 * @ptr: the &struct plist_head pointer.
220 * @type: the type of the struct this is embedded in.
221 * @member: the name of the list_struct within the struct.
222 */
223#ifdef CONFIG_DEBUG_PI_LIST
224# define plist_first_entry(head, type, member) \
225({ \
226 WARN_ON(plist_head_empty(head)); \
227 container_of(plist_first(head), type, member); \
228})
229#else
230# define plist_first_entry(head, type, member) \
231 container_of(plist_first(head), type, member)
232#endif
233
234/**
235 * plist_first - return the first node (and thus, highest priority)
236 *
237 * @head: the &struct plist_head pointer
238 *
239 * Assumes the plist is _not_ empty.
240 */
241static inline struct plist_node* plist_first(const struct plist_head *head)
242{
243 return list_entry(head->node_list.next,
244 struct plist_node, plist.node_list);
245}
246
247#endif
diff --git a/include/linux/poison.h b/include/linux/poison.h
new file mode 100644
index 0000000000..a5347c0243
--- /dev/null
+++ b/include/linux/poison.h
@@ -0,0 +1,58 @@
1#ifndef _LINUX_POISON_H
2#define _LINUX_POISON_H
3
4/********** include/linux/list.h **********/
5/*
6 * These are non-NULL pointers that will result in page faults
7 * under normal circumstances, used to verify that nobody uses
8 * non-initialized list entries.
9 */
10#define LIST_POISON1 ((void *) 0x00100100)
11#define LIST_POISON2 ((void *) 0x00200200)
12
13/********** mm/slab.c **********/
14/*
15 * Magic nums for obj red zoning.
16 * Placed in the first word before and the first word after an obj.
17 */
18#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
19#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */
20
21/* ...and for poisoning */
22#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
23#define POISON_FREE 0x6b /* for use-after-free poisoning */
24#define POISON_END 0xa5 /* end-byte of poisoning */
25
26/********** arch/$ARCH/mm/init.c **********/
27#define POISON_FREE_INITMEM 0xcc
28
29/********** arch/x86_64/mm/init.c **********/
30#define POISON_FREE_INITDATA 0xba
31
32/********** arch/ia64/hp/common/sba_iommu.c **********/
33/*
34 * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a
35 * value of "SBAIOMMU POISON\0" for spill-over poisoning.
36 */
37
38/********** fs/jbd/journal.c **********/
39#define JBD_POISON_FREE 0x5b
40
41/********** drivers/base/dmapool.c **********/
42#define POOL_POISON_FREED 0xa7 /* !inuse */
43#define POOL_POISON_ALLOCATED 0xa9 /* !initted */
44
45/********** drivers/atm/ **********/
46#define ATM_POISON_FREE 0x12
47
48/********** kernel/mutexes **********/
49#define MUTEX_DEBUG_INIT 0x11
50#define MUTEX_DEBUG_FREE 0x22
51
52/********** security/ **********/
53#define KEY_DESTROY 0xbd
54
55/********** sound/oss/ **********/
56#define OSS_POISON_FREE 0xAB
57
58#endif
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 5810d28fbe..17e75783e3 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -99,9 +99,8 @@ extern void proc_misc_init(void);
99 99
100struct mm_struct; 100struct mm_struct;
101 101
102void proc_flush_task(struct task_struct *task);
102struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *); 103struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *);
103struct dentry *proc_pid_unhash(struct task_struct *p);
104void proc_pid_flush(struct dentry *proc_dentry);
105int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir); 104int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir);
106unsigned long task_vsize(struct mm_struct *); 105unsigned long task_vsize(struct mm_struct *);
107int task_statm(struct mm_struct *, int *, int *, int *, int *); 106int task_statm(struct mm_struct *, int *, int *, int *, int *);
@@ -211,8 +210,7 @@ static inline void proc_net_remove(const char *name)
211#define proc_net_create(name, mode, info) ({ (void)(mode), NULL; }) 210#define proc_net_create(name, mode, info) ({ (void)(mode), NULL; })
212static inline void proc_net_remove(const char *name) {} 211static inline void proc_net_remove(const char *name) {}
213 212
214static inline struct dentry *proc_pid_unhash(struct task_struct *p) { return NULL; } 213static inline void proc_flush_task(struct task_struct *task) { }
215static inline void proc_pid_flush(struct dentry *proc_dentry) { }
216 214
217static inline struct proc_dir_entry *create_proc_entry(const char *name, 215static inline struct proc_dir_entry *create_proc_entry(const char *name,
218 mode_t mode, struct proc_dir_entry *parent) { return NULL; } 216 mode_t mode, struct proc_dir_entry *parent) { return NULL; }
@@ -248,8 +246,8 @@ extern void kclist_add(struct kcore_list *, void *, size_t);
248#endif 246#endif
249 247
250struct proc_inode { 248struct proc_inode {
251 struct task_struct *task; 249 struct pid *pid;
252 int type; 250 int fd;
253 union { 251 union {
254 int (*proc_get_link)(struct inode *, struct dentry **, struct vfsmount **); 252 int (*proc_get_link)(struct inode *, struct dentry **, struct vfsmount **);
255 int (*proc_read)(struct task_struct *task, char *page); 253 int (*proc_read)(struct task_struct *task, char *page);
@@ -268,4 +266,10 @@ static inline struct proc_dir_entry *PDE(const struct inode *inode)
268 return PROC_I(inode)->pde; 266 return PROC_I(inode)->pde;
269} 267}
270 268
269struct proc_maps_private {
270 struct pid *pid;
271 struct task_struct *task;
272 struct vm_area_struct *tail_vma;
273};
274
271#endif /* _LINUX_PROC_FS_H */ 275#endif /* _LINUX_PROC_FS_H */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index ee918bc6e1..8b2749a259 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -88,7 +88,6 @@ extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __us
88extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); 88extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
89extern int ptrace_attach(struct task_struct *tsk); 89extern int ptrace_attach(struct task_struct *tsk);
90extern int ptrace_detach(struct task_struct *, unsigned int); 90extern int ptrace_detach(struct task_struct *, unsigned int);
91extern void __ptrace_detach(struct task_struct *, unsigned int);
92extern void ptrace_disable(struct task_struct *); 91extern void ptrace_disable(struct task_struct *);
93extern int ptrace_check_attach(struct task_struct *task, int kill); 92extern int ptrace_check_attach(struct task_struct *task, int kill);
94extern int ptrace_request(struct task_struct *child, long request, long addr, long data); 93extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h
index 899437802a..63df898fe2 100644
--- a/include/linux/raid/bitmap.h
+++ b/include/linux/raid/bitmap.h
@@ -140,6 +140,7 @@ typedef __u16 bitmap_counter_t;
140enum bitmap_state { 140enum bitmap_state {
141 BITMAP_ACTIVE = 0x001, /* the bitmap is in use */ 141 BITMAP_ACTIVE = 0x001, /* the bitmap is in use */
142 BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */ 142 BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */
143 BITMAP_WRITE_ERROR = 0x004, /* A write error has occurred */
143 BITMAP_HOSTENDIAN = 0x8000, 144 BITMAP_HOSTENDIAN = 0x8000,
144}; 145};
145 146
@@ -244,15 +245,9 @@ struct bitmap {
244 unsigned long daemon_lastrun; /* jiffies of last run */ 245 unsigned long daemon_lastrun; /* jiffies of last run */
245 unsigned long daemon_sleep; /* how many seconds between updates? */ 246 unsigned long daemon_sleep; /* how many seconds between updates? */
246 247
247 /* 248 atomic_t pending_writes; /* pending writes to the bitmap file */
248 * bitmap_writeback_daemon waits for file-pages that have been written,
249 * as there is no way to get a call-back when a page write completes.
250 */
251 mdk_thread_t *writeback_daemon;
252 spinlock_t write_lock;
253 wait_queue_head_t write_wait; 249 wait_queue_head_t write_wait;
254 struct list_head complete_pages; 250
255 mempool_t *write_pool;
256}; 251};
257 252
258/* the bitmap API */ 253/* the bitmap API */
diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h
index 7eaf290e10..ba15469daf 100644
--- a/include/linux/raid/linear.h
+++ b/include/linux/raid/linear.h
@@ -13,8 +13,10 @@ typedef struct dev_info dev_info_t;
13 13
14struct linear_private_data 14struct linear_private_data
15{ 15{
16 struct linear_private_data *prev; /* earlier version */
16 dev_info_t **hash_table; 17 dev_info_t **hash_table;
17 sector_t hash_spacing; 18 sector_t hash_spacing;
19 sector_t array_size;
18 int preshift; /* shift before dividing by hash_spacing */ 20 int preshift; /* shift before dividing by hash_spacing */
19 dev_info_t disks[0]; 21 dev_info_t disks[0];
20}; 22};
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
index 66b44e5e0d..eb3e547c8f 100644
--- a/include/linux/raid/md.h
+++ b/include/linux/raid/md.h
@@ -85,8 +85,6 @@ extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
85extern void md_error (mddev_t *mddev, mdk_rdev_t *rdev); 85extern void md_error (mddev_t *mddev, mdk_rdev_t *rdev);
86extern void md_unplug_mddev(mddev_t *mddev); 86extern void md_unplug_mddev(mddev_t *mddev);
87 87
88extern void md_print_devices (void);
89
90extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 88extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
91 sector_t sector, int size, struct page *page); 89 sector_t sector, int size, struct page *page);
92extern void md_super_wait(mddev_t *mddev); 90extern void md_super_wait(mddev_t *mddev);
@@ -97,7 +95,5 @@ extern void md_new_event(mddev_t *mddev);
97 95
98extern void md_update_sb(mddev_t * mddev); 96extern void md_update_sb(mddev_t * mddev);
99 97
100#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
101
102#endif 98#endif
103 99
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index e2df61f5b0..c1e0ac55ba 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -40,7 +40,8 @@ typedef struct mdk_rdev_s mdk_rdev_t;
40 * options passed in raidrun: 40 * options passed in raidrun:
41 */ 41 */
42 42
43#define MAX_CHUNK_SIZE (4096*1024) 43/* Currently this must fix in an 'int' */
44#define MAX_CHUNK_SIZE (1<<30)
44 45
45/* 46/*
46 * MD's 'extended' device 47 * MD's 'extended' device
@@ -57,6 +58,7 @@ struct mdk_rdev_s
57 58
58 struct page *sb_page; 59 struct page *sb_page;
59 int sb_loaded; 60 int sb_loaded;
61 __u64 sb_events;
60 sector_t data_offset; /* start of data in array */ 62 sector_t data_offset; /* start of data in array */
61 sector_t sb_offset; 63 sector_t sb_offset;
62 int sb_size; /* bytes in the superblock */ 64 int sb_size; /* bytes in the superblock */
@@ -87,6 +89,10 @@ struct mdk_rdev_s
87 * array and could again if we did a partial 89 * array and could again if we did a partial
88 * resync from the bitmap 90 * resync from the bitmap
89 */ 91 */
92 sector_t recovery_offset;/* If this device has been partially
93 * recovered, this is where we were
94 * up to.
95 */
90 96
91 atomic_t nr_pending; /* number of pending requests. 97 atomic_t nr_pending; /* number of pending requests.
92 * only maintained for arrays that 98 * only maintained for arrays that
@@ -182,6 +188,8 @@ struct mddev_s
182#define MD_RECOVERY_REQUESTED 6 188#define MD_RECOVERY_REQUESTED 6
183#define MD_RECOVERY_CHECK 7 189#define MD_RECOVERY_CHECK 7
184#define MD_RECOVERY_RESHAPE 8 190#define MD_RECOVERY_RESHAPE 8
191#define MD_RECOVERY_FROZEN 9
192
185 unsigned long recovery; 193 unsigned long recovery;
186 194
187 int in_sync; /* know to not need resync */ 195 int in_sync; /* know to not need resync */
diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h
index f1fbae7e39..b6ebc69bae 100644
--- a/include/linux/raid/md_p.h
+++ b/include/linux/raid/md_p.h
@@ -265,9 +265,12 @@ struct mdp_superblock_1 {
265 265
266/* feature_map bits */ 266/* feature_map bits */
267#define MD_FEATURE_BITMAP_OFFSET 1 267#define MD_FEATURE_BITMAP_OFFSET 1
268#define MD_FEATURE_RECOVERY_OFFSET 2 /* recovery_offset is present and
269 * must be honoured
270 */
268#define MD_FEATURE_RESHAPE_ACTIVE 4 271#define MD_FEATURE_RESHAPE_ACTIVE 4
269 272
270#define MD_FEATURE_ALL 5 273#define MD_FEATURE_ALL (1|2|4)
271 274
272#endif 275#endif
273 276
diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h
index b1103298a8..c41e56a7c0 100644
--- a/include/linux/raid/raid10.h
+++ b/include/linux/raid/raid10.h
@@ -24,11 +24,16 @@ struct r10_private_data_s {
24 int far_copies; /* number of copies layed out 24 int far_copies; /* number of copies layed out
25 * at large strides across drives 25 * at large strides across drives
26 */ 26 */
27 int far_offset; /* far_copies are offset by 1 stripe
28 * instead of many
29 */
27 int copies; /* near_copies * far_copies. 30 int copies; /* near_copies * far_copies.
28 * must be <= raid_disks 31 * must be <= raid_disks
29 */ 32 */
30 sector_t stride; /* distance between far copies. 33 sector_t stride; /* distance between far copies.
31 * This is size / far_copies 34 * This is size / far_copies unless
35 * far_offset, in which case it is
36 * 1 stripe.
32 */ 37 */
33 38
34 int chunk_shift; /* shift from chunks to sectors */ 39 int chunk_shift; /* shift from chunks to sectors */
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index 914af66704..20ed4c9976 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -212,6 +212,7 @@ struct raid5_private_data {
212 mddev_t *mddev; 212 mddev_t *mddev;
213 struct disk_info *spare; 213 struct disk_info *spare;
214 int chunk_size, level, algorithm; 214 int chunk_size, level, algorithm;
215 int max_degraded;
215 int raid_disks, working_disks, failed_disks; 216 int raid_disks, working_disks, failed_disks;
216 int max_nr_stripes; 217 int max_nr_stripes;
217 218
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 6312758393..48dfe00070 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -258,6 +258,7 @@ extern void rcu_init(void);
258extern void rcu_check_callbacks(int cpu, int user); 258extern void rcu_check_callbacks(int cpu, int user);
259extern void rcu_restart_cpu(int cpu); 259extern void rcu_restart_cpu(int cpu);
260extern long rcu_batches_completed(void); 260extern long rcu_batches_completed(void);
261extern long rcu_batches_completed_bh(void);
261 262
262/* Exported interfaces */ 263/* Exported interfaces */
263extern void FASTCALL(call_rcu(struct rcu_head *head, 264extern void FASTCALL(call_rcu(struct rcu_head *head,
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
new file mode 100644
index 0000000000..fa4a3b82ba
--- /dev/null
+++ b/include/linux/rtmutex.h
@@ -0,0 +1,117 @@
1/*
2 * RT Mutexes: blocking mutual exclusion locks with PI support
3 *
4 * started by Ingo Molnar and Thomas Gleixner:
5 *
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 *
9 * This file contains the public data structure and API definitions.
10 */
11
12#ifndef __LINUX_RT_MUTEX_H
13#define __LINUX_RT_MUTEX_H
14
15#include <linux/linkage.h>
16#include <linux/plist.h>
17#include <linux/spinlock_types.h>
18
19/*
20 * The rt_mutex structure
21 *
22 * @wait_lock: spinlock to protect the structure
23 * @wait_list: pilist head to enqueue waiters in priority order
24 * @owner: the mutex owner
25 */
26struct rt_mutex {
27 spinlock_t wait_lock;
28 struct plist_head wait_list;
29 struct task_struct *owner;
30#ifdef CONFIG_DEBUG_RT_MUTEXES
31 int save_state;
32 struct list_head held_list_entry;
33 unsigned long acquire_ip;
34 const char *name, *file;
35 int line;
36 void *magic;
37#endif
38};
39
40struct rt_mutex_waiter;
41struct hrtimer_sleeper;
42
43#ifdef CONFIG_DEBUG_RT_MUTEXES
44 extern int rt_mutex_debug_check_no_locks_freed(const void *from,
45 unsigned long len);
46 extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task);
47#else
48 static inline int rt_mutex_debug_check_no_locks_freed(const void *from,
49 unsigned long len)
50 {
51 return 0;
52 }
53# define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
54#endif
55
56#ifdef CONFIG_DEBUG_RT_MUTEXES
57# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
58 , .name = #mutexname, .file = __FILE__, .line = __LINE__
59# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __FUNCTION__)
60 extern void rt_mutex_debug_task_free(struct task_struct *tsk);
61#else
62# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
63# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
64# define rt_mutex_debug_task_free(t) do { } while (0)
65#endif
66
67#define __RT_MUTEX_INITIALIZER(mutexname) \
68 { .wait_lock = SPIN_LOCK_UNLOCKED \
69 , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
70 , .owner = NULL \
71 __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
72
73#define DEFINE_RT_MUTEX(mutexname) \
74 struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
75
76/***
77 * rt_mutex_is_locked - is the mutex locked
78 * @lock: the mutex to be queried
79 *
80 * Returns 1 if the mutex is locked, 0 if unlocked.
81 */
82static inline int rt_mutex_is_locked(struct rt_mutex *lock)
83{
84 return lock->owner != NULL;
85}
86
87extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
88extern void rt_mutex_destroy(struct rt_mutex *lock);
89
90extern void rt_mutex_lock(struct rt_mutex *lock);
91extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
92 int detect_deadlock);
93extern int rt_mutex_timed_lock(struct rt_mutex *lock,
94 struct hrtimer_sleeper *timeout,
95 int detect_deadlock);
96
97extern int rt_mutex_trylock(struct rt_mutex *lock);
98
99extern void rt_mutex_unlock(struct rt_mutex *lock);
100
101#ifdef CONFIG_DEBUG_RT_MUTEXES
102# define INIT_RT_MUTEX_DEBUG(tsk) \
103 .held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \
104 .held_list_lock = SPIN_LOCK_UNLOCKED
105#else
106# define INIT_RT_MUTEX_DEBUG(tsk)
107#endif
108
109#ifdef CONFIG_RT_MUTEXES
110# define INIT_RT_MUTEXES(tsk) \
111 .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \
112 INIT_RT_MUTEX_DEBUG(tsk)
113#else
114# define INIT_RT_MUTEXES(tsk)
115#endif
116
117#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8d11d9310d..821f0481eb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -73,6 +73,7 @@ struct sched_param {
73#include <linux/seccomp.h> 73#include <linux/seccomp.h>
74#include <linux/rcupdate.h> 74#include <linux/rcupdate.h>
75#include <linux/futex.h> 75#include <linux/futex.h>
76#include <linux/rtmutex.h>
76 77
77#include <linux/time.h> 78#include <linux/time.h>
78#include <linux/param.h> 79#include <linux/param.h>
@@ -83,6 +84,7 @@ struct sched_param {
83#include <asm/processor.h> 84#include <asm/processor.h>
84 85
85struct exec_domain; 86struct exec_domain;
87struct futex_pi_state;
86 88
87/* 89/*
88 * List of flags we want to share for kernel threads, 90 * List of flags we want to share for kernel threads,
@@ -123,6 +125,7 @@ extern unsigned long nr_running(void);
123extern unsigned long nr_uninterruptible(void); 125extern unsigned long nr_uninterruptible(void);
124extern unsigned long nr_active(void); 126extern unsigned long nr_active(void);
125extern unsigned long nr_iowait(void); 127extern unsigned long nr_iowait(void);
128extern unsigned long weighted_cpuload(const int cpu);
126 129
127 130
128/* 131/*
@@ -494,8 +497,11 @@ struct signal_struct {
494 497
495#define MAX_PRIO (MAX_RT_PRIO + 40) 498#define MAX_PRIO (MAX_RT_PRIO + 40)
496 499
497#define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO)) 500#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
501#define rt_task(p) rt_prio((p)->prio)
498#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) 502#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
503#define has_rt_policy(p) \
504 unlikely((p)->policy != SCHED_NORMAL && (p)->policy != SCHED_BATCH)
499 505
500/* 506/*
501 * Some day this will be a full-fledged user tracking system.. 507 * Some day this will be a full-fledged user tracking system..
@@ -558,9 +564,9 @@ enum idle_type
558/* 564/*
559 * sched-domains (multiprocessor balancing) declarations: 565 * sched-domains (multiprocessor balancing) declarations:
560 */ 566 */
561#ifdef CONFIG_SMP
562#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ 567#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */
563 568
569#ifdef CONFIG_SMP
564#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ 570#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
565#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ 571#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */
566#define SD_BALANCE_EXEC 4 /* Balance on exec */ 572#define SD_BALANCE_EXEC 4 /* Balance on exec */
@@ -569,6 +575,11 @@ enum idle_type
569#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ 575#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */
570#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ 576#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */
571#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ 577#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */
578#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
579
580#define BALANCE_FOR_POWER ((sched_mc_power_savings || sched_smt_power_savings) \
581 ? SD_POWERSAVINGS_BALANCE : 0)
582
572 583
573struct sched_group { 584struct sched_group {
574 struct sched_group *next; /* Must be a circular list */ 585 struct sched_group *next; /* Must be a circular list */
@@ -638,7 +649,7 @@ struct sched_domain {
638#endif 649#endif
639}; 650};
640 651
641extern void partition_sched_domains(cpumask_t *partition1, 652extern int partition_sched_domains(cpumask_t *partition1,
642 cpumask_t *partition2); 653 cpumask_t *partition2);
643 654
644/* 655/*
@@ -713,10 +724,13 @@ struct task_struct {
713 724
714 int lock_depth; /* BKL lock depth */ 725 int lock_depth; /* BKL lock depth */
715 726
716#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 727#ifdef CONFIG_SMP
728#ifdef __ARCH_WANT_UNLOCKED_CTXSW
717 int oncpu; 729 int oncpu;
718#endif 730#endif
719 int prio, static_prio; 731#endif
732 int load_weight; /* for niceness load balancing purposes */
733 int prio, static_prio, normal_prio;
720 struct list_head run_list; 734 struct list_head run_list;
721 prio_array_t *array; 735 prio_array_t *array;
722 736
@@ -842,8 +856,20 @@ struct task_struct {
842 u32 self_exec_id; 856 u32 self_exec_id;
843/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ 857/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
844 spinlock_t alloc_lock; 858 spinlock_t alloc_lock;
845/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ 859
846 spinlock_t proc_lock; 860 /* Protection of the PI data structures: */
861 spinlock_t pi_lock;
862
863#ifdef CONFIG_RT_MUTEXES
864 /* PI waiters blocked on a rt_mutex held by this task */
865 struct plist_head pi_waiters;
866 /* Deadlock detection and priority inheritance handling */
867 struct rt_mutex_waiter *pi_blocked_on;
868# ifdef CONFIG_DEBUG_RT_MUTEXES
869 spinlock_t held_list_lock;
870 struct list_head held_list_head;
871# endif
872#endif
847 873
848#ifdef CONFIG_DEBUG_MUTEXES 874#ifdef CONFIG_DEBUG_MUTEXES
849 /* mutex deadlock detection */ 875 /* mutex deadlock detection */
@@ -856,7 +882,6 @@ struct task_struct {
856/* VM state */ 882/* VM state */
857 struct reclaim_state *reclaim_state; 883 struct reclaim_state *reclaim_state;
858 884
859 struct dentry *proc_dentry;
860 struct backing_dev_info *backing_dev_info; 885 struct backing_dev_info *backing_dev_info;
861 886
862 struct io_context *io_context; 887 struct io_context *io_context;
@@ -891,6 +916,8 @@ struct task_struct {
891#ifdef CONFIG_COMPAT 916#ifdef CONFIG_COMPAT
892 struct compat_robust_list_head __user *compat_robust_list; 917 struct compat_robust_list_head __user *compat_robust_list;
893#endif 918#endif
919 struct list_head pi_state_list;
920 struct futex_pi_state *pi_state_cache;
894 921
895 atomic_t fs_excl; /* holding fs exclusive resources */ 922 atomic_t fs_excl; /* holding fs exclusive resources */
896 struct rcu_head rcu; 923 struct rcu_head rcu;
@@ -958,6 +985,7 @@ static inline void put_task_struct(struct task_struct *t)
958#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ 985#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
959#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ 986#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
960#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 987#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
988#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
961 989
962/* 990/*
963 * Only the _current_ task can read/write to tsk->flags, but other 991 * Only the _current_ task can read/write to tsk->flags, but other
@@ -1012,6 +1040,19 @@ static inline void idle_task_exit(void) {}
1012#endif 1040#endif
1013 1041
1014extern void sched_idle_next(void); 1042extern void sched_idle_next(void);
1043
1044#ifdef CONFIG_RT_MUTEXES
1045extern int rt_mutex_getprio(task_t *p);
1046extern void rt_mutex_setprio(task_t *p, int prio);
1047extern void rt_mutex_adjust_pi(task_t *p);
1048#else
1049static inline int rt_mutex_getprio(task_t *p)
1050{
1051 return p->normal_prio;
1052}
1053# define rt_mutex_adjust_pi(p) do { } while (0)
1054#endif
1055
1015extern void set_user_nice(task_t *p, long nice); 1056extern void set_user_nice(task_t *p, long nice);
1016extern int task_prio(const task_t *p); 1057extern int task_prio(const task_t *p);
1017extern int task_nice(const task_t *p); 1058extern int task_nice(const task_t *p);
@@ -1411,6 +1452,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
1411extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); 1452extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
1412extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 1453extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
1413 1454
1455#include <linux/sysdev.h>
1456extern int sched_mc_power_savings, sched_smt_power_savings;
1457extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings;
1458extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
1459
1414extern void normalize_rt_tasks(void); 1460extern void normalize_rt_tasks(void);
1415 1461
1416#ifdef CONFIG_PM 1462#ifdef CONFIG_PM
diff --git a/include/linux/scx200.h b/include/linux/scx200.h
index a22f9e173a..693c0557e7 100644
--- a/include/linux/scx200.h
+++ b/include/linux/scx200.h
@@ -49,10 +49,3 @@ extern unsigned scx200_cb_base;
49#define SCx200_REV 0x3d /* Revision Register */ 49#define SCx200_REV 0x3d /* Revision Register */
50#define SCx200_CBA 0x3e /* Configuration Base Address Register */ 50#define SCx200_CBA 0x3e /* Configuration Base Address Register */
51#define SCx200_CBA_SCRATCH 0x64 /* Configuration Base Address Scratchpad */ 51#define SCx200_CBA_SCRATCH 0x64 /* Configuration Base Address Scratchpad */
52
53/*
54 Local variables:
55 compile-command: "make -C ../.. bzImage modules"
56 c-basic-offset: 8
57 End:
58*/
diff --git a/include/linux/scx200_gpio.h b/include/linux/scx200_gpio.h
index 30cdd648ba..90dd069cc1 100644
--- a/include/linux/scx200_gpio.h
+++ b/include/linux/scx200_gpio.h
@@ -1,6 +1,6 @@
1#include <linux/spinlock.h> 1#include <linux/spinlock.h>
2 2
3u32 scx200_gpio_configure(int index, u32 set, u32 clear); 3u32 scx200_gpio_configure(unsigned index, u32 set, u32 clear);
4 4
5extern unsigned scx200_gpio_base; 5extern unsigned scx200_gpio_base;
6extern long scx200_gpio_shadow[2]; 6extern long scx200_gpio_shadow[2];
@@ -17,7 +17,7 @@ extern long scx200_gpio_shadow[2];
17 17
18/* returns the value of the GPIO pin */ 18/* returns the value of the GPIO pin */
19 19
20static inline int scx200_gpio_get(int index) { 20static inline int scx200_gpio_get(unsigned index) {
21 __SCx200_GPIO_BANK; 21 __SCx200_GPIO_BANK;
22 __SCx200_GPIO_IOADDR + 0x04; 22 __SCx200_GPIO_IOADDR + 0x04;
23 __SCx200_GPIO_INDEX; 23 __SCx200_GPIO_INDEX;
@@ -29,7 +29,7 @@ static inline int scx200_gpio_get(int index) {
29 driven if the GPIO is configured as an output, it might not be the 29 driven if the GPIO is configured as an output, it might not be the
30 state of the GPIO right now if the GPIO is configured as an input) */ 30 state of the GPIO right now if the GPIO is configured as an input) */
31 31
32static inline int scx200_gpio_current(int index) { 32static inline int scx200_gpio_current(unsigned index) {
33 __SCx200_GPIO_BANK; 33 __SCx200_GPIO_BANK;
34 __SCx200_GPIO_INDEX; 34 __SCx200_GPIO_INDEX;
35 35
@@ -38,7 +38,7 @@ static inline int scx200_gpio_current(int index) {
38 38
39/* drive the GPIO signal high */ 39/* drive the GPIO signal high */
40 40
41static inline void scx200_gpio_set_high(int index) { 41static inline void scx200_gpio_set_high(unsigned index) {
42 __SCx200_GPIO_BANK; 42 __SCx200_GPIO_BANK;
43 __SCx200_GPIO_IOADDR; 43 __SCx200_GPIO_IOADDR;
44 __SCx200_GPIO_SHADOW; 44 __SCx200_GPIO_SHADOW;
@@ -49,7 +49,7 @@ static inline void scx200_gpio_set_high(int index) {
49 49
50/* drive the GPIO signal low */ 50/* drive the GPIO signal low */
51 51
52static inline void scx200_gpio_set_low(int index) { 52static inline void scx200_gpio_set_low(unsigned index) {
53 __SCx200_GPIO_BANK; 53 __SCx200_GPIO_BANK;
54 __SCx200_GPIO_IOADDR; 54 __SCx200_GPIO_IOADDR;
55 __SCx200_GPIO_SHADOW; 55 __SCx200_GPIO_SHADOW;
@@ -60,7 +60,7 @@ static inline void scx200_gpio_set_low(int index) {
60 60
61/* drive the GPIO signal to state */ 61/* drive the GPIO signal to state */
62 62
63static inline void scx200_gpio_set(int index, int state) { 63static inline void scx200_gpio_set(unsigned index, int state) {
64 __SCx200_GPIO_BANK; 64 __SCx200_GPIO_BANK;
65 __SCx200_GPIO_IOADDR; 65 __SCx200_GPIO_IOADDR;
66 __SCx200_GPIO_SHADOW; 66 __SCx200_GPIO_SHADOW;
@@ -73,7 +73,7 @@ static inline void scx200_gpio_set(int index, int state) {
73} 73}
74 74
75/* toggle the GPIO signal */ 75/* toggle the GPIO signal */
76static inline void scx200_gpio_change(int index) { 76static inline void scx200_gpio_change(unsigned index) {
77 __SCx200_GPIO_BANK; 77 __SCx200_GPIO_BANK;
78 __SCx200_GPIO_IOADDR; 78 __SCx200_GPIO_IOADDR;
79 __SCx200_GPIO_SHADOW; 79 __SCx200_GPIO_SHADOW;
@@ -87,10 +87,3 @@ static inline void scx200_gpio_change(int index) {
87#undef __SCx200_GPIO_SHADOW 87#undef __SCx200_GPIO_SHADOW
88#undef __SCx200_GPIO_INDEX 88#undef __SCx200_GPIO_INDEX
89#undef __SCx200_GPIO_OUT 89#undef __SCx200_GPIO_OUT
90
91/*
92 Local variables:
93 compile-command: "make -C ../.. bzImage modules"
94 c-basic-offset: 8
95 End:
96*/
diff --git a/include/linux/security.h b/include/linux/security.h
index d2c17bd91a..51805806f9 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -862,6 +862,7 @@ struct swap_info_struct;
862 * Permit allocation of a key and assign security data. Note that key does 862 * Permit allocation of a key and assign security data. Note that key does
863 * not have a serial number assigned at this point. 863 * not have a serial number assigned at this point.
864 * @key points to the key. 864 * @key points to the key.
865 * @flags is the allocation flags
865 * Return 0 if permission is granted, -ve error otherwise. 866 * Return 0 if permission is granted, -ve error otherwise.
866 * @key_free: 867 * @key_free:
867 * Notification of destruction; free security data. 868 * Notification of destruction; free security data.
@@ -1324,7 +1325,7 @@ struct security_operations {
1324 1325
1325 /* key management security hooks */ 1326 /* key management security hooks */
1326#ifdef CONFIG_KEYS 1327#ifdef CONFIG_KEYS
1327 int (*key_alloc)(struct key *key, struct task_struct *tsk); 1328 int (*key_alloc)(struct key *key, struct task_struct *tsk, unsigned long flags);
1328 void (*key_free)(struct key *key); 1329 void (*key_free)(struct key *key);
1329 int (*key_permission)(key_ref_t key_ref, 1330 int (*key_permission)(key_ref_t key_ref,
1330 struct task_struct *context, 1331 struct task_struct *context,
@@ -3040,9 +3041,10 @@ static inline int security_xfrm_policy_lookup(struct xfrm_policy *xp, u32 sk_sid
3040#ifdef CONFIG_KEYS 3041#ifdef CONFIG_KEYS
3041#ifdef CONFIG_SECURITY 3042#ifdef CONFIG_SECURITY
3042static inline int security_key_alloc(struct key *key, 3043static inline int security_key_alloc(struct key *key,
3043 struct task_struct *tsk) 3044 struct task_struct *tsk,
3045 unsigned long flags)
3044{ 3046{
3045 return security_ops->key_alloc(key, tsk); 3047 return security_ops->key_alloc(key, tsk, flags);
3046} 3048}
3047 3049
3048static inline void security_key_free(struct key *key) 3050static inline void security_key_free(struct key *key)
@@ -3060,7 +3062,8 @@ static inline int security_key_permission(key_ref_t key_ref,
3060#else 3062#else
3061 3063
3062static inline int security_key_alloc(struct key *key, 3064static inline int security_key_alloc(struct key *key,
3063 struct task_struct *tsk) 3065 struct task_struct *tsk,
3066 unsigned long flags)
3064{ 3067{
3065 return 0; 3068 return 0;
3066} 3069}
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index 9b8bcf125c..6e112cc5cd 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -126,7 +126,7 @@ struct gss_api_mech *gss_mech_get_by_pseudoflavor(u32);
126/* Just increments the mechanism's reference count and returns its input: */ 126/* Just increments the mechanism's reference count and returns its input: */
127struct gss_api_mech * gss_mech_get(struct gss_api_mech *); 127struct gss_api_mech * gss_mech_get(struct gss_api_mech *);
128 128
129/* For every succesful gss_mech_get or gss_mech_get_by_* call there must be a 129/* For every successful gss_mech_get or gss_mech_get_by_* call there must be a
130 * corresponding call to gss_mech_put. */ 130 * corresponding call to gss_mech_put. */
131void gss_mech_put(struct gss_api_mech *); 131void gss_mech_put(struct gss_api_mech *);
132 132
diff --git a/include/linux/swap.h b/include/linux/swap.h
index dc3f3aa0c8..c41e2d6d1a 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -199,6 +199,8 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
199} 199}
200#endif 200#endif
201 201
202extern int kswapd_run(int nid);
203
202#ifdef CONFIG_MMU 204#ifdef CONFIG_MMU
203/* linux/mm/shmem.c */ 205/* linux/mm/shmem.c */
204extern int shmem_unuse(swp_entry_t entry, struct page *page); 206extern int shmem_unuse(swp_entry_t entry, struct page *page);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 33785b79d5..008f04c567 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -174,9 +174,9 @@ asmlinkage long sys_waitid(int which, pid_t pid,
174 int options, struct rusage __user *ru); 174 int options, struct rusage __user *ru);
175asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options); 175asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options);
176asmlinkage long sys_set_tid_address(int __user *tidptr); 176asmlinkage long sys_set_tid_address(int __user *tidptr);
177asmlinkage long sys_futex(u32 __user *uaddr, int op, int val, 177asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
178 struct timespec __user *utime, u32 __user *uaddr2, 178 struct timespec __user *utime, u32 __user *uaddr2,
179 int val3); 179 u32 val3);
180 180
181asmlinkage long sys_init_module(void __user *umod, unsigned long len, 181asmlinkage long sys_init_module(void __user *umod, unsigned long len,
182 const char __user *uargs); 182 const char __user *uargs);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 6a60770984..46e4d8f277 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -148,9 +148,12 @@ enum
148 KERN_SPIN_RETRY=70, /* int: number of spinlock retries */ 148 KERN_SPIN_RETRY=70, /* int: number of spinlock retries */
149 KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */ 149 KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */
150 KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */ 150 KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */
151 KERN_COMPAT_LOG=73, /* int: print compat layer messages */
152 KERN_MAX_LOCK_DEPTH=74,
151}; 153};
152 154
153 155
156
154/* CTL_VM names: */ 157/* CTL_VM names: */
155enum 158enum
156{ 159{
@@ -187,6 +190,7 @@ enum
187 VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */ 190 VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */
188 VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */ 191 VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */
189 VM_PANIC_ON_OOM=33, /* panic at out-of-memory */ 192 VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
193 VM_VDSO_ENABLED=34, /* map VDSO into new processes? */
190}; 194};
191 195
192 196
diff --git a/include/linux/time.h b/include/linux/time.h
index 0cd696cee9..c05f8bb9a3 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -28,10 +28,13 @@ struct timezone {
28#ifdef __KERNEL__ 28#ifdef __KERNEL__
29 29
30/* Parameters used to convert the timespec values: */ 30/* Parameters used to convert the timespec values: */
31#define MSEC_PER_SEC 1000L 31#define MSEC_PER_SEC 1000L
32#define USEC_PER_SEC 1000000L 32#define USEC_PER_MSEC 1000L
33#define NSEC_PER_SEC 1000000000L 33#define NSEC_PER_USEC 1000L
34#define NSEC_PER_USEC 1000L 34#define NSEC_PER_MSEC 1000000L
35#define USEC_PER_SEC 1000000L
36#define NSEC_PER_SEC 1000000000L
37#define FSEC_PER_SEC 1000000000000000L
35 38
36static inline int timespec_equal(struct timespec *a, struct timespec *b) 39static inline int timespec_equal(struct timespec *a, struct timespec *b)
37{ 40{
@@ -77,6 +80,8 @@ extern struct timespec xtime;
77extern struct timespec wall_to_monotonic; 80extern struct timespec wall_to_monotonic;
78extern seqlock_t xtime_lock; 81extern seqlock_t xtime_lock;
79 82
83void timekeeping_init(void);
84
80static inline unsigned long get_seconds(void) 85static inline unsigned long get_seconds(void)
81{ 86{
82 return xtime.tv_sec; 87 return xtime.tv_sec;
@@ -100,6 +105,7 @@ extern int do_getitimer(int which, struct itimerval *value);
100extern void getnstimeofday(struct timespec *tv); 105extern void getnstimeofday(struct timespec *tv);
101 106
102extern struct timespec timespec_trunc(struct timespec t, unsigned gran); 107extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
108extern int timekeeping_is_continuous(void);
103 109
104/** 110/**
105 * timespec_to_ns - Convert timespec to nanoseconds 111 * timespec_to_ns - Convert timespec to nanoseconds
@@ -142,6 +148,20 @@ extern struct timespec ns_to_timespec(const s64 nsec);
142 */ 148 */
143extern struct timeval ns_to_timeval(const s64 nsec); 149extern struct timeval ns_to_timeval(const s64 nsec);
144 150
151/**
152 * timespec_add_ns - Adds nanoseconds to a timespec
153 * @a: pointer to timespec to be incremented
154 * @ns: unsigned nanoseconds value to be added
155 */
156static inline void timespec_add_ns(struct timespec *a, u64 ns)
157{
158 ns += a->tv_nsec;
159 while(unlikely(ns >= NSEC_PER_SEC)) {
160 ns -= NSEC_PER_SEC;
161 a->tv_sec++;
162 }
163 a->tv_nsec = ns;
164}
145#endif /* __KERNEL__ */ 165#endif /* __KERNEL__ */
146 166
147#define NFDBITS __NFDBITS 167#define NFDBITS __NFDBITS
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 34d3ccff7b..19bb6538b4 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -303,6 +303,8 @@ time_interpolator_reset(void)
303 303
304#endif /* !CONFIG_TIME_INTERPOLATION */ 304#endif /* !CONFIG_TIME_INTERPOLATION */
305 305
306#define TICK_LENGTH_SHIFT 32
307
306/* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */ 308/* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */
307extern u64 current_tick_length(void); 309extern u64 current_tick_length(void);
308 310
diff --git a/include/linux/topology.h b/include/linux/topology.h
index a305ae2e44..ec1eca8529 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -134,7 +134,8 @@
134 .flags = SD_LOAD_BALANCE \ 134 .flags = SD_LOAD_BALANCE \
135 | SD_BALANCE_NEWIDLE \ 135 | SD_BALANCE_NEWIDLE \
136 | SD_BALANCE_EXEC \ 136 | SD_BALANCE_EXEC \
137 | SD_WAKE_AFFINE, \ 137 | SD_WAKE_AFFINE \
138 | BALANCE_FOR_POWER, \
138 .last_balance = jiffies, \ 139 .last_balance = jiffies, \
139 .balance_interval = 1, \ 140 .balance_interval = 1, \
140 .nr_balance_failed = 0, \ 141 .nr_balance_failed = 0, \
diff --git a/include/linux/unwind.h b/include/linux/unwind.h
new file mode 100644
index 0000000000..ce48e2cd37
--- /dev/null
+++ b/include/linux/unwind.h
@@ -0,0 +1,127 @@
1#ifndef _LINUX_UNWIND_H
2#define _LINUX_UNWIND_H
3
4/*
5 * Copyright (C) 2002-2006 Novell, Inc.
6 * Jan Beulich <jbeulich@novell.com>
7 * This code is released under version 2 of the GNU GPL.
8 *
9 * A simple API for unwinding kernel stacks. This is used for
10 * debugging and error reporting purposes. The kernel doesn't need
11 * full-blown stack unwinding with all the bells and whistles, so there
12 * is not much point in implementing the full Dwarf2 unwind API.
13 */
14
15#include <linux/config.h>
16
17struct module;
18
19#ifdef CONFIG_STACK_UNWIND
20
21#include <asm/unwind.h>
22
23#ifndef ARCH_UNWIND_SECTION_NAME
24#define ARCH_UNWIND_SECTION_NAME ".eh_frame"
25#endif
26
27/*
28 * Initialize unwind support.
29 */
30extern void unwind_init(void);
31
32#ifdef CONFIG_MODULES
33
34extern void *unwind_add_table(struct module *,
35 const void *table_start,
36 unsigned long table_size);
37
38extern void unwind_remove_table(void *handle, int init_only);
39
40#endif
41
42extern int unwind_init_frame_info(struct unwind_frame_info *,
43 struct task_struct *,
44 /*const*/ struct pt_regs *);
45
46/*
47 * Prepare to unwind a blocked task.
48 */
49extern int unwind_init_blocked(struct unwind_frame_info *,
50 struct task_struct *);
51
52/*
53 * Prepare to unwind the currently running thread.
54 */
55extern int unwind_init_running(struct unwind_frame_info *,
56 asmlinkage int (*callback)(struct unwind_frame_info *,
57 void *arg),
58 void *arg);
59
60/*
61 * Unwind to previous to frame. Returns 0 if successful, negative
62 * number in case of an error.
63 */
64extern int unwind(struct unwind_frame_info *);
65
66/*
67 * Unwind until the return pointer is in user-land (or until an error
68 * occurs). Returns 0 if successful, negative number in case of
69 * error.
70 */
71extern int unwind_to_user(struct unwind_frame_info *);
72
73#else
74
75struct unwind_frame_info {};
76
77static inline void unwind_init(void) {}
78
79#ifdef CONFIG_MODULES
80
81static inline void *unwind_add_table(struct module *mod,
82 const void *table_start,
83 unsigned long table_size)
84{
85 return NULL;
86}
87
88#endif
89
90static inline void unwind_remove_table(void *handle, int init_only)
91{
92}
93
94static inline int unwind_init_frame_info(struct unwind_frame_info *info,
95 struct task_struct *tsk,
96 const struct pt_regs *regs)
97{
98 return -ENOSYS;
99}
100
101static inline int unwind_init_blocked(struct unwind_frame_info *info,
102 struct task_struct *tsk)
103{
104 return -ENOSYS;
105}
106
107static inline int unwind_init_running(struct unwind_frame_info *info,
108 asmlinkage int (*cb)(struct unwind_frame_info *,
109 void *arg),
110 void *arg)
111{
112 return -ENOSYS;
113}
114
115static inline int unwind(struct unwind_frame_info *info)
116{
117 return -ENOSYS;
118}
119
120static inline int unwind_to_user(struct unwind_frame_info *info)
121{
122 return -ENOSYS;
123}
124
125#endif
126
127#endif /* _LINUX_UNWIND_H */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 4f428547ec..a62673dad7 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -245,6 +245,7 @@ struct v4l2_pix_format
245#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y','U','1','2') /* 12 YUV 4:2:0 */ 245#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y','U','1','2') /* 12 YUV 4:2:0 */
246#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y','Y','U','V') /* 16 YUV 4:2:2 */ 246#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y','Y','U','V') /* 16 YUV 4:2:2 */
247#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H','I','2','4') /* 8 8-bit color */ 247#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H','I','2','4') /* 8 8-bit color */
248#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H','M','1','2') /* 8 YUV 4:1:1 16x16 macroblocks */
248 249
249/* see http://www.siliconimaging.com/RGB%20Bayer.htm */ 250/* see http://www.siliconimaging.com/RGB%20Bayer.htm */
250#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B','A','8','1') /* 8 BGBG.. GRGR.. */ 251#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B','A','8','1') /* 8 BGBG.. GRGR.. */
@@ -821,6 +822,11 @@ enum v4l2_mpeg_stream_type {
821#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_MPEG_BASE+4) 822#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_MPEG_BASE+4)
822#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_MPEG_BASE+5) 823#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_MPEG_BASE+5)
823#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_MPEG_BASE+6) 824#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_MPEG_BASE+6)
825#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_MPEG_BASE+7)
826enum v4l2_mpeg_stream_vbi_fmt {
827 V4L2_MPEG_STREAM_VBI_FMT_NONE = 0, /* No VBI in the MPEG stream */
828 V4L2_MPEG_STREAM_VBI_FMT_IVTV = 1, /* VBI in private packets, IVTV format */
829};
824 830
825/* MPEG audio */ 831/* MPEG audio */
826#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_MPEG_BASE+100) 832#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_MPEG_BASE+100)
diff --git a/include/media/cx2341x.h b/include/media/cx2341x.h
index 51fb06b4c3..d91d88f93c 100644
--- a/include/media/cx2341x.h
+++ b/include/media/cx2341x.h
@@ -25,8 +25,13 @@ enum cx2341x_port {
25 CX2341X_PORT_SERIAL = 2 25 CX2341X_PORT_SERIAL = 2
26}; 26};
27 27
28enum cx2341x_cap {
29 CX2341X_CAP_HAS_SLICED_VBI = 1 << 0,
30};
31
28struct cx2341x_mpeg_params { 32struct cx2341x_mpeg_params {
29 /* misc */ 33 /* misc */
34 u32 capabilities;
30 enum cx2341x_port port; 35 enum cx2341x_port port;
31 u16 width; 36 u16 width;
32 u16 height; 37 u16 height;
@@ -34,6 +39,7 @@ struct cx2341x_mpeg_params {
34 39
35 /* stream */ 40 /* stream */
36 enum v4l2_mpeg_stream_type stream_type; 41 enum v4l2_mpeg_stream_type stream_type;
42 enum v4l2_mpeg_stream_vbi_fmt stream_vbi_fmt;
37 43
38 /* audio */ 44 /* audio */
39 enum v4l2_mpeg_audio_sampling_freq audio_sampling_freq; 45 enum v4l2_mpeg_audio_sampling_freq audio_sampling_freq;
@@ -83,9 +89,9 @@ int cx2341x_ctrl_query(struct cx2341x_mpeg_params *params,
83 struct v4l2_queryctrl *qctrl); 89 struct v4l2_queryctrl *qctrl);
84const char **cx2341x_ctrl_get_menu(u32 id); 90const char **cx2341x_ctrl_get_menu(u32 id);
85int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params, 91int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params,
86 struct v4l2_ext_controls *ctrls, int cmd); 92 struct v4l2_ext_controls *ctrls, unsigned int cmd);
87void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p); 93void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p);
88void cx2341x_log_status(struct cx2341x_mpeg_params *p, int cardid); 94void cx2341x_log_status(struct cx2341x_mpeg_params *p, const char *prefix);
89 95
90/* Firmware names */ 96/* Firmware names */
91#define CX2341X_FIRM_ENC_FILENAME "v4l-cx2341x-enc.fw" 97#define CX2341X_FIRM_ENC_FILENAME "v4l-cx2341x-enc.fw"
diff --git a/include/net/tipc/tipc_bearer.h b/include/net/tipc/tipc_bearer.h
index 098607cd4b..e07136d74c 100644
--- a/include/net/tipc/tipc_bearer.h
+++ b/include/net/tipc/tipc_bearer.h
@@ -49,10 +49,18 @@
49 49
50#define TIPC_MEDIA_TYPE_ETH 1 50#define TIPC_MEDIA_TYPE_ETH 1
51 51
52/*
53 * Destination address structure used by TIPC bearers when sending messages
54 *
55 * IMPORTANT: The fields of this structure MUST be stored using the specified
56 * byte order indicated below, as the structure is exchanged between nodes
57 * as part of a link setup process.
58 */
59
52struct tipc_media_addr { 60struct tipc_media_addr {
53 __u32 type; 61 __u32 type; /* bearer type (network byte order) */
54 union { 62 union {
55 __u8 eth_addr[6]; /* Ethernet bearer */ 63 __u8 eth_addr[6]; /* 48 bit Ethernet addr (byte array) */
56#if 0 64#if 0
57 /* Prototypes for other possible bearer types */ 65 /* Prototypes for other possible bearer types */
58 66
diff --git a/init/Kconfig b/init/Kconfig
index e0358f3946..f70f2fd273 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1,3 +1,11 @@
1config DEFCONFIG_LIST
2 string
3 option defconfig_list
4 default "/lib/modules/$UNAME_RELEASE/.config"
5 default "/etc/kernel-config"
6 default "/boot/config-$UNAME_RELEASE"
7 default "arch/$ARCH/defconfig"
8
1menu "Code maturity level options" 9menu "Code maturity level options"
2 10
3config EXPERIMENTAL 11config EXPERIMENTAL
@@ -236,16 +244,6 @@ config UID16
236 help 244 help
237 This enables the legacy 16-bit UID syscall wrappers. 245 This enables the legacy 16-bit UID syscall wrappers.
238 246
239config VM86
240 depends X86
241 default y
242 bool "Enable VM86 support" if EMBEDDED
243 help
244 This option is required by programs like DOSEMU to run 16-bit legacy
245 code on X86 processors. It also may be needed by software like
246 XFree86 to initialize some video cards via BIOS. Disabling this
247 option saves about 6k.
248
249config CC_OPTIMIZE_FOR_SIZE 247config CC_OPTIMIZE_FOR_SIZE
250 bool "Optimize for size (Look out for broken compilers!)" 248 bool "Optimize for size (Look out for broken compilers!)"
251 default y 249 default y
@@ -341,9 +339,14 @@ config BASE_FULL
341 kernel data structures. This saves memory on small machines, 339 kernel data structures. This saves memory on small machines,
342 but may reduce performance. 340 but may reduce performance.
343 341
342config RT_MUTEXES
343 boolean
344 select PLIST
345
344config FUTEX 346config FUTEX
345 bool "Enable futex support" if EMBEDDED 347 bool "Enable futex support" if EMBEDDED
346 default y 348 default y
349 select RT_MUTEXES
347 help 350 help
348 Disabling this option will cause the kernel to be built without 351 Disabling this option will cause the kernel to be built without
349 support for "fast userspace mutexes". The resulting kernel may not 352 support for "fast userspace mutexes". The resulting kernel may not
diff --git a/init/initramfs.c b/init/initramfs.c
index f81cfa40a7..d28c1094d7 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -30,6 +30,7 @@ static void __init free(void *where)
30 30
31static __initdata struct hash { 31static __initdata struct hash {
32 int ino, minor, major; 32 int ino, minor, major;
33 mode_t mode;
33 struct hash *next; 34 struct hash *next;
34 char name[N_ALIGN(PATH_MAX)]; 35 char name[N_ALIGN(PATH_MAX)];
35} *head[32]; 36} *head[32];
@@ -41,7 +42,8 @@ static inline int hash(int major, int minor, int ino)
41 return tmp & 31; 42 return tmp & 31;
42} 43}
43 44
44static char __init *find_link(int major, int minor, int ino, char *name) 45static char __init *find_link(int major, int minor, int ino,
46 mode_t mode, char *name)
45{ 47{
46 struct hash **p, *q; 48 struct hash **p, *q;
47 for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) { 49 for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) {
@@ -51,14 +53,17 @@ static char __init *find_link(int major, int minor, int ino, char *name)
51 continue; 53 continue;
52 if ((*p)->major != major) 54 if ((*p)->major != major)
53 continue; 55 continue;
56 if (((*p)->mode ^ mode) & S_IFMT)
57 continue;
54 return (*p)->name; 58 return (*p)->name;
55 } 59 }
56 q = (struct hash *)malloc(sizeof(struct hash)); 60 q = (struct hash *)malloc(sizeof(struct hash));
57 if (!q) 61 if (!q)
58 panic("can't allocate link hash entry"); 62 panic("can't allocate link hash entry");
59 q->ino = ino;
60 q->minor = minor;
61 q->major = major; 63 q->major = major;
64 q->minor = minor;
65 q->ino = ino;
66 q->mode = mode;
62 strcpy(q->name, name); 67 strcpy(q->name, name);
63 q->next = NULL; 68 q->next = NULL;
64 *p = q; 69 *p = q;
@@ -229,13 +234,25 @@ static int __init do_reset(void)
229static int __init maybe_link(void) 234static int __init maybe_link(void)
230{ 235{
231 if (nlink >= 2) { 236 if (nlink >= 2) {
232 char *old = find_link(major, minor, ino, collected); 237 char *old = find_link(major, minor, ino, mode, collected);
233 if (old) 238 if (old)
234 return (sys_link(old, collected) < 0) ? -1 : 1; 239 return (sys_link(old, collected) < 0) ? -1 : 1;
235 } 240 }
236 return 0; 241 return 0;
237} 242}
238 243
244static void __init clean_path(char *path, mode_t mode)
245{
246 struct stat st;
247
248 if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
249 if (S_ISDIR(st.st_mode))
250 sys_rmdir(path);
251 else
252 sys_unlink(path);
253 }
254}
255
239static __initdata int wfd; 256static __initdata int wfd;
240 257
241static int __init do_name(void) 258static int __init do_name(void)
@@ -248,9 +265,15 @@ static int __init do_name(void)
248 } 265 }
249 if (dry_run) 266 if (dry_run)
250 return 0; 267 return 0;
268 clean_path(collected, mode);
251 if (S_ISREG(mode)) { 269 if (S_ISREG(mode)) {
252 if (maybe_link() >= 0) { 270 int ml = maybe_link();
253 wfd = sys_open(collected, O_WRONLY|O_CREAT, mode); 271 if (ml >= 0) {
272 int openflags = O_WRONLY|O_CREAT;
273 if (ml != 1)
274 openflags |= O_TRUNC;
275 wfd = sys_open(collected, openflags, mode);
276
254 if (wfd >= 0) { 277 if (wfd >= 0) {
255 sys_fchown(wfd, uid, gid); 278 sys_fchown(wfd, uid, gid);
256 sys_fchmod(wfd, mode); 279 sys_fchmod(wfd, mode);
@@ -291,6 +314,7 @@ static int __init do_copy(void)
291static int __init do_symlink(void) 314static int __init do_symlink(void)
292{ 315{
293 collected[N_ALIGN(name_len) + body_len] = '\0'; 316 collected[N_ALIGN(name_len) + body_len] = '\0';
317 clean_path(collected, 0);
294 sys_symlink(collected + N_ALIGN(name_len), collected); 318 sys_symlink(collected + N_ALIGN(name_len), collected);
295 sys_lchown(collected, uid, gid); 319 sys_lchown(collected, uid, gid);
296 state = SkipIt; 320 state = SkipIt;
diff --git a/init/main.c b/init/main.c
index f715b9b897..0d57f6ccb6 100644
--- a/init/main.c
+++ b/init/main.c
@@ -47,6 +47,8 @@
47#include <linux/rmap.h> 47#include <linux/rmap.h>
48#include <linux/mempolicy.h> 48#include <linux/mempolicy.h>
49#include <linux/key.h> 49#include <linux/key.h>
50#include <linux/unwind.h>
51#include <linux/buffer_head.h>
50 52
51#include <asm/io.h> 53#include <asm/io.h>
52#include <asm/bugs.h> 54#include <asm/bugs.h>
@@ -79,7 +81,6 @@ extern void mca_init(void);
79extern void sbus_init(void); 81extern void sbus_init(void);
80extern void sysctl_init(void); 82extern void sysctl_init(void);
81extern void signals_init(void); 83extern void signals_init(void);
82extern void buffer_init(void);
83extern void pidhash_init(void); 84extern void pidhash_init(void);
84extern void pidmap_init(void); 85extern void pidmap_init(void);
85extern void prio_tree_init(void); 86extern void prio_tree_init(void);
@@ -482,6 +483,7 @@ asmlinkage void __init start_kernel(void)
482 __stop___param - __start___param, 483 __stop___param - __start___param,
483 &unknown_bootoption); 484 &unknown_bootoption);
484 sort_main_extable(); 485 sort_main_extable();
486 unwind_init();
485 trap_init(); 487 trap_init();
486 rcu_init(); 488 rcu_init();
487 init_IRQ(); 489 init_IRQ();
@@ -490,6 +492,7 @@ asmlinkage void __init start_kernel(void)
490 hrtimers_init(); 492 hrtimers_init();
491 softirq_init(); 493 softirq_init();
492 time_init(); 494 time_init();
495 timekeeping_init();
493 496
494 /* 497 /*
495 * HACK ALERT! This is early. We're enabling the console before 498 * HACK ALERT! This is early. We're enabling the console before
diff --git a/kernel/Makefile b/kernel/Makefile
index f6ef00f4f9..82fb182f6f 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -10,17 +10,22 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ 10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
11 hrtimer.o 11 hrtimer.o
12 12
13obj-y += time/
13obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o 14obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
14obj-$(CONFIG_FUTEX) += futex.o 15obj-$(CONFIG_FUTEX) += futex.o
15ifeq ($(CONFIG_COMPAT),y) 16ifeq ($(CONFIG_COMPAT),y)
16obj-$(CONFIG_FUTEX) += futex_compat.o 17obj-$(CONFIG_FUTEX) += futex_compat.o
17endif 18endif
19obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
20obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
21obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
18obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o 22obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
19obj-$(CONFIG_SMP) += cpu.o spinlock.o 23obj-$(CONFIG_SMP) += cpu.o spinlock.o
20obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o 24obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
21obj-$(CONFIG_UID16) += uid16.o 25obj-$(CONFIG_UID16) += uid16.o
22obj-$(CONFIG_MODULES) += module.o 26obj-$(CONFIG_MODULES) += module.o
23obj-$(CONFIG_KALLSYMS) += kallsyms.o 27obj-$(CONFIG_KALLSYMS) += kallsyms.o
28obj-$(CONFIG_STACK_UNWIND) += unwind.o
24obj-$(CONFIG_PM) += power/ 29obj-$(CONFIG_PM) += power/
25obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o 30obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
26obj-$(CONFIG_KEXEC) += kexec.o 31obj-$(CONFIG_KEXEC) += kexec.o
diff --git a/kernel/acct.c b/kernel/acct.c
index 368c4f03fe..126ca43d5d 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -521,6 +521,7 @@ static void do_acct_process(struct file *file)
521 521
522/** 522/**
523 * acct_init_pacct - initialize a new pacct_struct 523 * acct_init_pacct - initialize a new pacct_struct
524 * @pacct: per-process accounting info struct to initialize
524 */ 525 */
525void acct_init_pacct(struct pacct_struct *pacct) 526void acct_init_pacct(struct pacct_struct *pacct)
526{ 527{
@@ -576,7 +577,7 @@ void acct_collect(long exitcode, int group_dead)
576 * 577 *
577 * handles process accounting for an exiting task 578 * handles process accounting for an exiting task
578 */ 579 */
579void acct_process() 580void acct_process(void)
580{ 581{
581 struct file *file = NULL; 582 struct file *file = NULL;
582 583
diff --git a/kernel/audit.c b/kernel/audit.c
index 7dfac7031b..82443fb433 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -818,7 +818,7 @@ err:
818 */ 818 */
819unsigned int audit_serial(void) 819unsigned int audit_serial(void)
820{ 820{
821 static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED; 821 static DEFINE_SPINLOCK(serial_lock);
822 static unsigned int serial = 0; 822 static unsigned int serial = 0;
823 823
824 unsigned long flags; 824 unsigned long flags;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 9ebd96fda2..dc5e3f01ef 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -658,8 +658,7 @@ static void audit_log_task_context(struct audit_buffer *ab)
658 return; 658 return;
659 659
660error_path: 660error_path:
661 if (ctx) 661 kfree(ctx);
662 kfree(ctx);
663 audit_panic("error in audit_log_task_context"); 662 audit_panic("error in audit_log_task_context");
664 return; 663 return;
665} 664}
@@ -1367,7 +1366,7 @@ int __audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr)
1367 * @mqdes: MQ descriptor 1366 * @mqdes: MQ descriptor
1368 * @msg_len: Message length 1367 * @msg_len: Message length
1369 * @msg_prio: Message priority 1368 * @msg_prio: Message priority
1370 * @abs_timeout: Message timeout in absolute time 1369 * @u_abs_timeout: Message timeout in absolute time
1371 * 1370 *
1372 * Returns 0 for success or NULL context or < 0 on error. 1371 * Returns 0 for success or NULL context or < 0 on error.
1373 */ 1372 */
@@ -1409,8 +1408,8 @@ int __audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio,
1409 * __audit_mq_timedreceive - record audit data for a POSIX MQ timed receive 1408 * __audit_mq_timedreceive - record audit data for a POSIX MQ timed receive
1410 * @mqdes: MQ descriptor 1409 * @mqdes: MQ descriptor
1411 * @msg_len: Message length 1410 * @msg_len: Message length
1412 * @msg_prio: Message priority 1411 * @u_msg_prio: Message priority
1413 * @abs_timeout: Message timeout in absolute time 1412 * @u_abs_timeout: Message timeout in absolute time
1414 * 1413 *
1415 * Returns 0 for success or NULL context or < 0 on error. 1414 * Returns 0 for success or NULL context or < 0 on error.
1416 */ 1415 */
@@ -1558,7 +1557,6 @@ int __audit_ipc_obj(struct kern_ipc_perm *ipcp)
1558 * @uid: msgq user id 1557 * @uid: msgq user id
1559 * @gid: msgq group id 1558 * @gid: msgq group id
1560 * @mode: msgq mode (permissions) 1559 * @mode: msgq mode (permissions)
1561 * @ipcp: in-kernel IPC permissions
1562 * 1560 *
1563 * Returns 0 for success or NULL context or < 0 on error. 1561 * Returns 0 for success or NULL context or < 0 on error.
1564 */ 1562 */
diff --git a/kernel/cpu.c b/kernel/cpu.c
index fe2b8d0bfe..70fbf2e837 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -13,12 +13,12 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/kthread.h> 14#include <linux/kthread.h>
15#include <linux/stop_machine.h> 15#include <linux/stop_machine.h>
16#include <asm/semaphore.h> 16#include <linux/mutex.h>
17 17
18/* This protects CPUs going up and down... */ 18/* This protects CPUs going up and down... */
19static DECLARE_MUTEX(cpucontrol); 19static DEFINE_MUTEX(cpucontrol);
20 20
21static BLOCKING_NOTIFIER_HEAD(cpu_chain); 21static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain);
22 22
23#ifdef CONFIG_HOTPLUG_CPU 23#ifdef CONFIG_HOTPLUG_CPU
24static struct task_struct *lock_cpu_hotplug_owner; 24static struct task_struct *lock_cpu_hotplug_owner;
@@ -30,9 +30,9 @@ static int __lock_cpu_hotplug(int interruptible)
30 30
31 if (lock_cpu_hotplug_owner != current) { 31 if (lock_cpu_hotplug_owner != current) {
32 if (interruptible) 32 if (interruptible)
33 ret = down_interruptible(&cpucontrol); 33 ret = mutex_lock_interruptible(&cpucontrol);
34 else 34 else
35 down(&cpucontrol); 35 mutex_lock(&cpucontrol);
36 } 36 }
37 37
38 /* 38 /*
@@ -56,7 +56,7 @@ void unlock_cpu_hotplug(void)
56{ 56{
57 if (--lock_cpu_hotplug_depth == 0) { 57 if (--lock_cpu_hotplug_depth == 0) {
58 lock_cpu_hotplug_owner = NULL; 58 lock_cpu_hotplug_owner = NULL;
59 up(&cpucontrol); 59 mutex_unlock(&cpucontrol);
60 } 60 }
61} 61}
62EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); 62EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
@@ -69,10 +69,13 @@ EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
69#endif /* CONFIG_HOTPLUG_CPU */ 69#endif /* CONFIG_HOTPLUG_CPU */
70 70
71/* Need to know about CPUs going up/down? */ 71/* Need to know about CPUs going up/down? */
72int register_cpu_notifier(struct notifier_block *nb) 72int __cpuinit register_cpu_notifier(struct notifier_block *nb)
73{ 73{
74 return blocking_notifier_chain_register(&cpu_chain, nb); 74 return blocking_notifier_chain_register(&cpu_chain, nb);
75} 75}
76
77#ifdef CONFIG_HOTPLUG_CPU
78
76EXPORT_SYMBOL(register_cpu_notifier); 79EXPORT_SYMBOL(register_cpu_notifier);
77 80
78void unregister_cpu_notifier(struct notifier_block *nb) 81void unregister_cpu_notifier(struct notifier_block *nb)
@@ -81,7 +84,6 @@ void unregister_cpu_notifier(struct notifier_block *nb)
81} 84}
82EXPORT_SYMBOL(unregister_cpu_notifier); 85EXPORT_SYMBOL(unregister_cpu_notifier);
83 86
84#ifdef CONFIG_HOTPLUG_CPU
85static inline void check_for_tasks(int cpu) 87static inline void check_for_tasks(int cpu)
86{ 88{
87 struct task_struct *p; 89 struct task_struct *p;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b602f73fb3..1535af3a91 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2442,31 +2442,43 @@ void __cpuset_memory_pressure_bump(void)
2442 */ 2442 */
2443static int proc_cpuset_show(struct seq_file *m, void *v) 2443static int proc_cpuset_show(struct seq_file *m, void *v)
2444{ 2444{
2445 struct pid *pid;
2445 struct task_struct *tsk; 2446 struct task_struct *tsk;
2446 char *buf; 2447 char *buf;
2447 int retval = 0; 2448 int retval;
2448 2449
2450 retval = -ENOMEM;
2449 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 2451 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2450 if (!buf) 2452 if (!buf)
2451 return -ENOMEM; 2453 goto out;
2454
2455 retval = -ESRCH;
2456 pid = m->private;
2457 tsk = get_pid_task(pid, PIDTYPE_PID);
2458 if (!tsk)
2459 goto out_free;
2452 2460
2453 tsk = m->private; 2461 retval = -EINVAL;
2454 mutex_lock(&manage_mutex); 2462 mutex_lock(&manage_mutex);
2463
2455 retval = cpuset_path(tsk->cpuset, buf, PAGE_SIZE); 2464 retval = cpuset_path(tsk->cpuset, buf, PAGE_SIZE);
2456 if (retval < 0) 2465 if (retval < 0)
2457 goto out; 2466 goto out_unlock;
2458 seq_puts(m, buf); 2467 seq_puts(m, buf);
2459 seq_putc(m, '\n'); 2468 seq_putc(m, '\n');
2460out: 2469out_unlock:
2461 mutex_unlock(&manage_mutex); 2470 mutex_unlock(&manage_mutex);
2471 put_task_struct(tsk);
2472out_free:
2462 kfree(buf); 2473 kfree(buf);
2474out:
2463 return retval; 2475 return retval;
2464} 2476}
2465 2477
2466static int cpuset_open(struct inode *inode, struct file *file) 2478static int cpuset_open(struct inode *inode, struct file *file)
2467{ 2479{
2468 struct task_struct *tsk = PROC_I(inode)->task; 2480 struct pid *pid = PROC_I(inode)->pid;
2469 return single_open(file, proc_cpuset_show, tsk); 2481 return single_open(file, proc_cpuset_show, pid);
2470} 2482}
2471 2483
2472struct file_operations proc_cpuset_operations = { 2484struct file_operations proc_cpuset_operations = {
diff --git a/kernel/exit.c b/kernel/exit.c
index e76bd02e93..ab06b9f88f 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -137,12 +137,8 @@ void release_task(struct task_struct * p)
137{ 137{
138 int zap_leader; 138 int zap_leader;
139 task_t *leader; 139 task_t *leader;
140 struct dentry *proc_dentry;
141
142repeat: 140repeat:
143 atomic_dec(&p->user->processes); 141 atomic_dec(&p->user->processes);
144 spin_lock(&p->proc_lock);
145 proc_dentry = proc_pid_unhash(p);
146 write_lock_irq(&tasklist_lock); 142 write_lock_irq(&tasklist_lock);
147 ptrace_unlink(p); 143 ptrace_unlink(p);
148 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); 144 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
@@ -171,8 +167,7 @@ repeat:
171 167
172 sched_exit(p); 168 sched_exit(p);
173 write_unlock_irq(&tasklist_lock); 169 write_unlock_irq(&tasklist_lock);
174 spin_unlock(&p->proc_lock); 170 proc_flush_task(p);
175 proc_pid_flush(proc_dentry);
176 release_thread(p); 171 release_thread(p);
177 call_rcu(&p->rcu, delayed_put_task_struct); 172 call_rcu(&p->rcu, delayed_put_task_struct);
178 173
@@ -931,9 +926,18 @@ fastcall NORET_TYPE void do_exit(long code)
931 tsk->mempolicy = NULL; 926 tsk->mempolicy = NULL;
932#endif 927#endif
933 /* 928 /*
929 * This must happen late, after the PID is not
930 * hashed anymore:
931 */
932 if (unlikely(!list_empty(&tsk->pi_state_list)))
933 exit_pi_state_list(tsk);
934 if (unlikely(current->pi_state_cache))
935 kfree(current->pi_state_cache);
936 /*
934 * If DEBUG_MUTEXES is on, make sure we are holding no locks: 937 * If DEBUG_MUTEXES is on, make sure we are holding no locks:
935 */ 938 */
936 mutex_debug_check_no_locks_held(tsk); 939 mutex_debug_check_no_locks_held(tsk);
940 rt_mutex_debug_check_no_locks_held(tsk);
937 941
938 if (tsk->io_context) 942 if (tsk->io_context)
939 exit_io_context(); 943 exit_io_context();
diff --git a/kernel/fork.c b/kernel/fork.c
index dfd10cb370..628198a4f2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -104,6 +104,7 @@ static kmem_cache_t *mm_cachep;
104void free_task(struct task_struct *tsk) 104void free_task(struct task_struct *tsk)
105{ 105{
106 free_thread_info(tsk->thread_info); 106 free_thread_info(tsk->thread_info);
107 rt_mutex_debug_task_free(tsk);
107 free_task_struct(tsk); 108 free_task_struct(tsk);
108} 109}
109EXPORT_SYMBOL(free_task); 110EXPORT_SYMBOL(free_task);
@@ -913,6 +914,19 @@ asmlinkage long sys_set_tid_address(int __user *tidptr)
913 return current->pid; 914 return current->pid;
914} 915}
915 916
917static inline void rt_mutex_init_task(struct task_struct *p)
918{
919#ifdef CONFIG_RT_MUTEXES
920 spin_lock_init(&p->pi_lock);
921 plist_head_init(&p->pi_waiters, &p->pi_lock);
922 p->pi_blocked_on = NULL;
923# ifdef CONFIG_DEBUG_RT_MUTEXES
924 spin_lock_init(&p->held_list_lock);
925 INIT_LIST_HEAD(&p->held_list_head);
926# endif
927#endif
928}
929
916/* 930/*
917 * This creates a new process as a copy of the old one, 931 * This creates a new process as a copy of the old one,
918 * but does not actually start it yet. 932 * but does not actually start it yet.
@@ -993,13 +1007,10 @@ static task_t *copy_process(unsigned long clone_flags,
993 if (put_user(p->pid, parent_tidptr)) 1007 if (put_user(p->pid, parent_tidptr))
994 goto bad_fork_cleanup; 1008 goto bad_fork_cleanup;
995 1009
996 p->proc_dentry = NULL;
997
998 INIT_LIST_HEAD(&p->children); 1010 INIT_LIST_HEAD(&p->children);
999 INIT_LIST_HEAD(&p->sibling); 1011 INIT_LIST_HEAD(&p->sibling);
1000 p->vfork_done = NULL; 1012 p->vfork_done = NULL;
1001 spin_lock_init(&p->alloc_lock); 1013 spin_lock_init(&p->alloc_lock);
1002 spin_lock_init(&p->proc_lock);
1003 1014
1004 clear_tsk_thread_flag(p, TIF_SIGPENDING); 1015 clear_tsk_thread_flag(p, TIF_SIGPENDING);
1005 init_sigpending(&p->pending); 1016 init_sigpending(&p->pending);
@@ -1037,6 +1048,8 @@ static task_t *copy_process(unsigned long clone_flags,
1037 mpol_fix_fork_child_flag(p); 1048 mpol_fix_fork_child_flag(p);
1038#endif 1049#endif
1039 1050
1051 rt_mutex_init_task(p);
1052
1040#ifdef CONFIG_DEBUG_MUTEXES 1053#ifdef CONFIG_DEBUG_MUTEXES
1041 p->blocked_on = NULL; /* not blocked yet */ 1054 p->blocked_on = NULL; /* not blocked yet */
1042#endif 1055#endif
@@ -1079,6 +1092,9 @@ static task_t *copy_process(unsigned long clone_flags,
1079#ifdef CONFIG_COMPAT 1092#ifdef CONFIG_COMPAT
1080 p->compat_robust_list = NULL; 1093 p->compat_robust_list = NULL;
1081#endif 1094#endif
1095 INIT_LIST_HEAD(&p->pi_state_list);
1096 p->pi_state_cache = NULL;
1097
1082 /* 1098 /*
1083 * sigaltstack should be cleared when sharing the same VM 1099 * sigaltstack should be cleared when sharing the same VM
1084 */ 1100 */
@@ -1159,18 +1175,6 @@ static task_t *copy_process(unsigned long clone_flags,
1159 } 1175 }
1160 1176
1161 if (clone_flags & CLONE_THREAD) { 1177 if (clone_flags & CLONE_THREAD) {
1162 /*
1163 * Important: if an exit-all has been started then
1164 * do not create this new thread - the whole thread
1165 * group is supposed to exit anyway.
1166 */
1167 if (current->signal->flags & SIGNAL_GROUP_EXIT) {
1168 spin_unlock(&current->sighand->siglock);
1169 write_unlock_irq(&tasklist_lock);
1170 retval = -EAGAIN;
1171 goto bad_fork_cleanup_namespace;
1172 }
1173
1174 p->group_leader = current->group_leader; 1178 p->group_leader = current->group_leader;
1175 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); 1179 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1176 1180
diff --git a/kernel/futex.c b/kernel/futex.c
index e1a380c77a..6c91f93800 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -12,6 +12,10 @@
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved 12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes. 13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 * 14 *
15 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
15 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly 19 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
16 * enough at me, Linus for the original (flawed) idea, Matthew 20 * enough at me, Linus for the original (flawed) idea, Matthew
17 * Kirkwood for proof-of-concept implementation. 21 * Kirkwood for proof-of-concept implementation.
@@ -46,6 +50,8 @@
46#include <linux/signal.h> 50#include <linux/signal.h>
47#include <asm/futex.h> 51#include <asm/futex.h>
48 52
53#include "rtmutex_common.h"
54
49#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) 55#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
50 56
51/* 57/*
@@ -63,7 +69,7 @@ union futex_key {
63 int offset; 69 int offset;
64 } shared; 70 } shared;
65 struct { 71 struct {
66 unsigned long uaddr; 72 unsigned long address;
67 struct mm_struct *mm; 73 struct mm_struct *mm;
68 int offset; 74 int offset;
69 } private; 75 } private;
@@ -75,6 +81,27 @@ union futex_key {
75}; 81};
76 82
77/* 83/*
84 * Priority Inheritance state:
85 */
86struct futex_pi_state {
87 /*
88 * list of 'owned' pi_state instances - these have to be
89 * cleaned up in do_exit() if the task exits prematurely:
90 */
91 struct list_head list;
92
93 /*
94 * The PI object:
95 */
96 struct rt_mutex pi_mutex;
97
98 struct task_struct *owner;
99 atomic_t refcount;
100
101 union futex_key key;
102};
103
104/*
78 * We use this hashed waitqueue instead of a normal wait_queue_t, so 105 * We use this hashed waitqueue instead of a normal wait_queue_t, so
79 * we can wake only the relevant ones (hashed queues may be shared). 106 * we can wake only the relevant ones (hashed queues may be shared).
80 * 107 *
@@ -87,15 +114,19 @@ struct futex_q {
87 struct list_head list; 114 struct list_head list;
88 wait_queue_head_t waiters; 115 wait_queue_head_t waiters;
89 116
90 /* Which hash list lock to use. */ 117 /* Which hash list lock to use: */
91 spinlock_t *lock_ptr; 118 spinlock_t *lock_ptr;
92 119
93 /* Key which the futex is hashed on. */ 120 /* Key which the futex is hashed on: */
94 union futex_key key; 121 union futex_key key;
95 122
96 /* For fd, sigio sent using these. */ 123 /* For fd, sigio sent using these: */
97 int fd; 124 int fd;
98 struct file *filp; 125 struct file *filp;
126
127 /* Optional priority inheritance state: */
128 struct futex_pi_state *pi_state;
129 struct task_struct *task;
99}; 130};
100 131
101/* 132/*
@@ -144,8 +175,9 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
144 * 175 *
145 * Should be called with &current->mm->mmap_sem but NOT any spinlocks. 176 * Should be called with &current->mm->mmap_sem but NOT any spinlocks.
146 */ 177 */
147static int get_futex_key(unsigned long uaddr, union futex_key *key) 178static int get_futex_key(u32 __user *uaddr, union futex_key *key)
148{ 179{
180 unsigned long address = (unsigned long)uaddr;
149 struct mm_struct *mm = current->mm; 181 struct mm_struct *mm = current->mm;
150 struct vm_area_struct *vma; 182 struct vm_area_struct *vma;
151 struct page *page; 183 struct page *page;
@@ -154,16 +186,16 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
154 /* 186 /*
155 * The futex address must be "naturally" aligned. 187 * The futex address must be "naturally" aligned.
156 */ 188 */
157 key->both.offset = uaddr % PAGE_SIZE; 189 key->both.offset = address % PAGE_SIZE;
158 if (unlikely((key->both.offset % sizeof(u32)) != 0)) 190 if (unlikely((key->both.offset % sizeof(u32)) != 0))
159 return -EINVAL; 191 return -EINVAL;
160 uaddr -= key->both.offset; 192 address -= key->both.offset;
161 193
162 /* 194 /*
163 * The futex is hashed differently depending on whether 195 * The futex is hashed differently depending on whether
164 * it's in a shared or private mapping. So check vma first. 196 * it's in a shared or private mapping. So check vma first.
165 */ 197 */
166 vma = find_extend_vma(mm, uaddr); 198 vma = find_extend_vma(mm, address);
167 if (unlikely(!vma)) 199 if (unlikely(!vma))
168 return -EFAULT; 200 return -EFAULT;
169 201
@@ -184,7 +216,7 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
184 */ 216 */
185 if (likely(!(vma->vm_flags & VM_MAYSHARE))) { 217 if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
186 key->private.mm = mm; 218 key->private.mm = mm;
187 key->private.uaddr = uaddr; 219 key->private.address = address;
188 return 0; 220 return 0;
189 } 221 }
190 222
@@ -194,7 +226,7 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
194 key->shared.inode = vma->vm_file->f_dentry->d_inode; 226 key->shared.inode = vma->vm_file->f_dentry->d_inode;
195 key->both.offset++; /* Bit 0 of offset indicates inode-based key. */ 227 key->both.offset++; /* Bit 0 of offset indicates inode-based key. */
196 if (likely(!(vma->vm_flags & VM_NONLINEAR))) { 228 if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
197 key->shared.pgoff = (((uaddr - vma->vm_start) >> PAGE_SHIFT) 229 key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
198 + vma->vm_pgoff); 230 + vma->vm_pgoff);
199 return 0; 231 return 0;
200 } 232 }
@@ -205,7 +237,7 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
205 * from swap. But that's a lot of code to duplicate here 237 * from swap. But that's a lot of code to duplicate here
206 * for a rare case, so we simply fetch the page. 238 * for a rare case, so we simply fetch the page.
207 */ 239 */
208 err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL); 240 err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
209 if (err >= 0) { 241 if (err >= 0) {
210 key->shared.pgoff = 242 key->shared.pgoff =
211 page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 243 page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -246,18 +278,244 @@ static void drop_key_refs(union futex_key *key)
246 } 278 }
247} 279}
248 280
249static inline int get_futex_value_locked(int *dest, int __user *from) 281static inline int get_futex_value_locked(u32 *dest, u32 __user *from)
250{ 282{
251 int ret; 283 int ret;
252 284
253 inc_preempt_count(); 285 inc_preempt_count();
254 ret = __copy_from_user_inatomic(dest, from, sizeof(int)); 286 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
255 dec_preempt_count(); 287 dec_preempt_count();
256 288
257 return ret ? -EFAULT : 0; 289 return ret ? -EFAULT : 0;
258} 290}
259 291
260/* 292/*
293 * Fault handling. Called with current->mm->mmap_sem held.
294 */
295static int futex_handle_fault(unsigned long address, int attempt)
296{
297 struct vm_area_struct * vma;
298 struct mm_struct *mm = current->mm;
299
300 if (attempt >= 2 || !(vma = find_vma(mm, address)) ||
301 vma->vm_start > address || !(vma->vm_flags & VM_WRITE))
302 return -EFAULT;
303
304 switch (handle_mm_fault(mm, vma, address, 1)) {
305 case VM_FAULT_MINOR:
306 current->min_flt++;
307 break;
308 case VM_FAULT_MAJOR:
309 current->maj_flt++;
310 break;
311 default:
312 return -EFAULT;
313 }
314 return 0;
315}
316
317/*
318 * PI code:
319 */
320static int refill_pi_state_cache(void)
321{
322 struct futex_pi_state *pi_state;
323
324 if (likely(current->pi_state_cache))
325 return 0;
326
327 pi_state = kmalloc(sizeof(*pi_state), GFP_KERNEL);
328
329 if (!pi_state)
330 return -ENOMEM;
331
332 memset(pi_state, 0, sizeof(*pi_state));
333 INIT_LIST_HEAD(&pi_state->list);
334 /* pi_mutex gets initialized later */
335 pi_state->owner = NULL;
336 atomic_set(&pi_state->refcount, 1);
337
338 current->pi_state_cache = pi_state;
339
340 return 0;
341}
342
343static struct futex_pi_state * alloc_pi_state(void)
344{
345 struct futex_pi_state *pi_state = current->pi_state_cache;
346
347 WARN_ON(!pi_state);
348 current->pi_state_cache = NULL;
349
350 return pi_state;
351}
352
353static void free_pi_state(struct futex_pi_state *pi_state)
354{
355 if (!atomic_dec_and_test(&pi_state->refcount))
356 return;
357
358 /*
359 * If pi_state->owner is NULL, the owner is most probably dying
360 * and has cleaned up the pi_state already
361 */
362 if (pi_state->owner) {
363 spin_lock_irq(&pi_state->owner->pi_lock);
364 list_del_init(&pi_state->list);
365 spin_unlock_irq(&pi_state->owner->pi_lock);
366
367 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
368 }
369
370 if (current->pi_state_cache)
371 kfree(pi_state);
372 else {
373 /*
374 * pi_state->list is already empty.
375 * clear pi_state->owner.
376 * refcount is at 0 - put it back to 1.
377 */
378 pi_state->owner = NULL;
379 atomic_set(&pi_state->refcount, 1);
380 current->pi_state_cache = pi_state;
381 }
382}
383
384/*
385 * Look up the task based on what TID userspace gave us.
386 * We dont trust it.
387 */
388static struct task_struct * futex_find_get_task(pid_t pid)
389{
390 struct task_struct *p;
391
392 read_lock(&tasklist_lock);
393 p = find_task_by_pid(pid);
394 if (!p)
395 goto out_unlock;
396 if ((current->euid != p->euid) && (current->euid != p->uid)) {
397 p = NULL;
398 goto out_unlock;
399 }
400 if (p->state == EXIT_ZOMBIE || p->exit_state == EXIT_ZOMBIE) {
401 p = NULL;
402 goto out_unlock;
403 }
404 get_task_struct(p);
405out_unlock:
406 read_unlock(&tasklist_lock);
407
408 return p;
409}
410
411/*
412 * This task is holding PI mutexes at exit time => bad.
413 * Kernel cleans up PI-state, but userspace is likely hosed.
414 * (Robust-futex cleanup is separate and might save the day for userspace.)
415 */
416void exit_pi_state_list(struct task_struct *curr)
417{
418 struct futex_hash_bucket *hb;
419 struct list_head *next, *head = &curr->pi_state_list;
420 struct futex_pi_state *pi_state;
421 union futex_key key;
422
423 /*
424 * We are a ZOMBIE and nobody can enqueue itself on
425 * pi_state_list anymore, but we have to be careful
426 * versus waiters unqueueing themselfs
427 */
428 spin_lock_irq(&curr->pi_lock);
429 while (!list_empty(head)) {
430
431 next = head->next;
432 pi_state = list_entry(next, struct futex_pi_state, list);
433 key = pi_state->key;
434 spin_unlock_irq(&curr->pi_lock);
435
436 hb = hash_futex(&key);
437 spin_lock(&hb->lock);
438
439 spin_lock_irq(&curr->pi_lock);
440 if (head->next != next) {
441 spin_unlock(&hb->lock);
442 continue;
443 }
444
445 list_del_init(&pi_state->list);
446
447 WARN_ON(pi_state->owner != curr);
448
449 pi_state->owner = NULL;
450 spin_unlock_irq(&curr->pi_lock);
451
452 rt_mutex_unlock(&pi_state->pi_mutex);
453
454 spin_unlock(&hb->lock);
455
456 spin_lock_irq(&curr->pi_lock);
457 }
458 spin_unlock_irq(&curr->pi_lock);
459}
460
461static int
462lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
463{
464 struct futex_pi_state *pi_state = NULL;
465 struct futex_q *this, *next;
466 struct list_head *head;
467 struct task_struct *p;
468 pid_t pid;
469
470 head = &hb->chain;
471
472 list_for_each_entry_safe(this, next, head, list) {
473 if (match_futex (&this->key, &me->key)) {
474 /*
475 * Another waiter already exists - bump up
476 * the refcount and return its pi_state:
477 */
478 pi_state = this->pi_state;
479 atomic_inc(&pi_state->refcount);
480 me->pi_state = pi_state;
481
482 return 0;
483 }
484 }
485
486 /*
487 * We are the first waiter - try to look up the real owner and
488 * attach the new pi_state to it:
489 */
490 pid = uval & FUTEX_TID_MASK;
491 p = futex_find_get_task(pid);
492 if (!p)
493 return -ESRCH;
494
495 pi_state = alloc_pi_state();
496
497 /*
498 * Initialize the pi_mutex in locked state and make 'p'
499 * the owner of it:
500 */
501 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
502
503 /* Store the key for possible exit cleanups: */
504 pi_state->key = me->key;
505
506 spin_lock_irq(&p->pi_lock);
507 list_add(&pi_state->list, &p->pi_state_list);
508 pi_state->owner = p;
509 spin_unlock_irq(&p->pi_lock);
510
511 put_task_struct(p);
512
513 me->pi_state = pi_state;
514
515 return 0;
516}
517
518/*
261 * The hash bucket lock must be held when this is called. 519 * The hash bucket lock must be held when this is called.
262 * Afterwards, the futex_q must not be accessed. 520 * Afterwards, the futex_q must not be accessed.
263 */ 521 */
@@ -284,16 +542,80 @@ static void wake_futex(struct futex_q *q)
284 q->lock_ptr = NULL; 542 q->lock_ptr = NULL;
285} 543}
286 544
545static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
546{
547 struct task_struct *new_owner;
548 struct futex_pi_state *pi_state = this->pi_state;
549 u32 curval, newval;
550
551 if (!pi_state)
552 return -EINVAL;
553
554 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
555
556 /*
557 * This happens when we have stolen the lock and the original
558 * pending owner did not enqueue itself back on the rt_mutex.
559 * Thats not a tragedy. We know that way, that a lock waiter
560 * is on the fly. We make the futex_q waiter the pending owner.
561 */
562 if (!new_owner)
563 new_owner = this->task;
564
565 /*
566 * We pass it to the next owner. (The WAITERS bit is always
567 * kept enabled while there is PI state around. We must also
568 * preserve the owner died bit.)
569 */
570 newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid;
571
572 inc_preempt_count();
573 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
574 dec_preempt_count();
575
576 if (curval == -EFAULT)
577 return -EFAULT;
578 if (curval != uval)
579 return -EINVAL;
580
581 list_del_init(&pi_state->owner->pi_state_list);
582 list_add(&pi_state->list, &new_owner->pi_state_list);
583 pi_state->owner = new_owner;
584 rt_mutex_unlock(&pi_state->pi_mutex);
585
586 return 0;
587}
588
589static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
590{
591 u32 oldval;
592
593 /*
594 * There is no waiter, so we unlock the futex. The owner died
595 * bit has not to be preserved here. We are the owner:
596 */
597 inc_preempt_count();
598 oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0);
599 dec_preempt_count();
600
601 if (oldval == -EFAULT)
602 return oldval;
603 if (oldval != uval)
604 return -EAGAIN;
605
606 return 0;
607}
608
287/* 609/*
288 * Wake up all waiters hashed on the physical page that is mapped 610 * Wake up all waiters hashed on the physical page that is mapped
289 * to this virtual address: 611 * to this virtual address:
290 */ 612 */
291static int futex_wake(unsigned long uaddr, int nr_wake) 613static int futex_wake(u32 __user *uaddr, int nr_wake)
292{ 614{
293 union futex_key key; 615 struct futex_hash_bucket *hb;
294 struct futex_hash_bucket *bh;
295 struct list_head *head;
296 struct futex_q *this, *next; 616 struct futex_q *this, *next;
617 struct list_head *head;
618 union futex_key key;
297 int ret; 619 int ret;
298 620
299 down_read(&current->mm->mmap_sem); 621 down_read(&current->mm->mmap_sem);
@@ -302,19 +624,21 @@ static int futex_wake(unsigned long uaddr, int nr_wake)
302 if (unlikely(ret != 0)) 624 if (unlikely(ret != 0))
303 goto out; 625 goto out;
304 626
305 bh = hash_futex(&key); 627 hb = hash_futex(&key);
306 spin_lock(&bh->lock); 628 spin_lock(&hb->lock);
307 head = &bh->chain; 629 head = &hb->chain;
308 630
309 list_for_each_entry_safe(this, next, head, list) { 631 list_for_each_entry_safe(this, next, head, list) {
310 if (match_futex (&this->key, &key)) { 632 if (match_futex (&this->key, &key)) {
633 if (this->pi_state)
634 return -EINVAL;
311 wake_futex(this); 635 wake_futex(this);
312 if (++ret >= nr_wake) 636 if (++ret >= nr_wake)
313 break; 637 break;
314 } 638 }
315 } 639 }
316 640
317 spin_unlock(&bh->lock); 641 spin_unlock(&hb->lock);
318out: 642out:
319 up_read(&current->mm->mmap_sem); 643 up_read(&current->mm->mmap_sem);
320 return ret; 644 return ret;
@@ -324,10 +648,12 @@ out:
324 * Wake up all waiters hashed on the physical page that is mapped 648 * Wake up all waiters hashed on the physical page that is mapped
325 * to this virtual address: 649 * to this virtual address:
326 */ 650 */
327static int futex_wake_op(unsigned long uaddr1, unsigned long uaddr2, int nr_wake, int nr_wake2, int op) 651static int
652futex_wake_op(u32 __user *uaddr1, u32 __user *uaddr2,
653 int nr_wake, int nr_wake2, int op)
328{ 654{
329 union futex_key key1, key2; 655 union futex_key key1, key2;
330 struct futex_hash_bucket *bh1, *bh2; 656 struct futex_hash_bucket *hb1, *hb2;
331 struct list_head *head; 657 struct list_head *head;
332 struct futex_q *this, *next; 658 struct futex_q *this, *next;
333 int ret, op_ret, attempt = 0; 659 int ret, op_ret, attempt = 0;
@@ -342,27 +668,29 @@ retryfull:
342 if (unlikely(ret != 0)) 668 if (unlikely(ret != 0))
343 goto out; 669 goto out;
344 670
345 bh1 = hash_futex(&key1); 671 hb1 = hash_futex(&key1);
346 bh2 = hash_futex(&key2); 672 hb2 = hash_futex(&key2);
347 673
348retry: 674retry:
349 if (bh1 < bh2) 675 if (hb1 < hb2)
350 spin_lock(&bh1->lock); 676 spin_lock(&hb1->lock);
351 spin_lock(&bh2->lock); 677 spin_lock(&hb2->lock);
352 if (bh1 > bh2) 678 if (hb1 > hb2)
353 spin_lock(&bh1->lock); 679 spin_lock(&hb1->lock);
354 680
355 op_ret = futex_atomic_op_inuser(op, (int __user *)uaddr2); 681 op_ret = futex_atomic_op_inuser(op, uaddr2);
356 if (unlikely(op_ret < 0)) { 682 if (unlikely(op_ret < 0)) {
357 int dummy; 683 u32 dummy;
358 684
359 spin_unlock(&bh1->lock); 685 spin_unlock(&hb1->lock);
360 if (bh1 != bh2) 686 if (hb1 != hb2)
361 spin_unlock(&bh2->lock); 687 spin_unlock(&hb2->lock);
362 688
363#ifndef CONFIG_MMU 689#ifndef CONFIG_MMU
364 /* we don't get EFAULT from MMU faults if we don't have an MMU, 690 /*
365 * but we might get them from range checking */ 691 * we don't get EFAULT from MMU faults if we don't have an MMU,
692 * but we might get them from range checking
693 */
366 ret = op_ret; 694 ret = op_ret;
367 goto out; 695 goto out;
368#endif 696#endif
@@ -372,47 +700,34 @@ retry:
372 goto out; 700 goto out;
373 } 701 }
374 702
375 /* futex_atomic_op_inuser needs to both read and write 703 /*
704 * futex_atomic_op_inuser needs to both read and write
376 * *(int __user *)uaddr2, but we can't modify it 705 * *(int __user *)uaddr2, but we can't modify it
377 * non-atomically. Therefore, if get_user below is not 706 * non-atomically. Therefore, if get_user below is not
378 * enough, we need to handle the fault ourselves, while 707 * enough, we need to handle the fault ourselves, while
379 * still holding the mmap_sem. */ 708 * still holding the mmap_sem.
709 */
380 if (attempt++) { 710 if (attempt++) {
381 struct vm_area_struct * vma; 711 if (futex_handle_fault((unsigned long)uaddr2,
382 struct mm_struct *mm = current->mm; 712 attempt))
383
384 ret = -EFAULT;
385 if (attempt >= 2 ||
386 !(vma = find_vma(mm, uaddr2)) ||
387 vma->vm_start > uaddr2 ||
388 !(vma->vm_flags & VM_WRITE))
389 goto out;
390
391 switch (handle_mm_fault(mm, vma, uaddr2, 1)) {
392 case VM_FAULT_MINOR:
393 current->min_flt++;
394 break;
395 case VM_FAULT_MAJOR:
396 current->maj_flt++;
397 break;
398 default:
399 goto out; 713 goto out;
400 }
401 goto retry; 714 goto retry;
402 } 715 }
403 716
404 /* If we would have faulted, release mmap_sem, 717 /*
405 * fault it in and start all over again. */ 718 * If we would have faulted, release mmap_sem,
719 * fault it in and start all over again.
720 */
406 up_read(&current->mm->mmap_sem); 721 up_read(&current->mm->mmap_sem);
407 722
408 ret = get_user(dummy, (int __user *)uaddr2); 723 ret = get_user(dummy, uaddr2);
409 if (ret) 724 if (ret)
410 return ret; 725 return ret;
411 726
412 goto retryfull; 727 goto retryfull;
413 } 728 }
414 729
415 head = &bh1->chain; 730 head = &hb1->chain;
416 731
417 list_for_each_entry_safe(this, next, head, list) { 732 list_for_each_entry_safe(this, next, head, list) {
418 if (match_futex (&this->key, &key1)) { 733 if (match_futex (&this->key, &key1)) {
@@ -423,7 +738,7 @@ retry:
423 } 738 }
424 739
425 if (op_ret > 0) { 740 if (op_ret > 0) {
426 head = &bh2->chain; 741 head = &hb2->chain;
427 742
428 op_ret = 0; 743 op_ret = 0;
429 list_for_each_entry_safe(this, next, head, list) { 744 list_for_each_entry_safe(this, next, head, list) {
@@ -436,9 +751,9 @@ retry:
436 ret += op_ret; 751 ret += op_ret;
437 } 752 }
438 753
439 spin_unlock(&bh1->lock); 754 spin_unlock(&hb1->lock);
440 if (bh1 != bh2) 755 if (hb1 != hb2)
441 spin_unlock(&bh2->lock); 756 spin_unlock(&hb2->lock);
442out: 757out:
443 up_read(&current->mm->mmap_sem); 758 up_read(&current->mm->mmap_sem);
444 return ret; 759 return ret;
@@ -448,11 +763,11 @@ out:
448 * Requeue all waiters hashed on one physical page to another 763 * Requeue all waiters hashed on one physical page to another
449 * physical page. 764 * physical page.
450 */ 765 */
451static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2, 766static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2,
452 int nr_wake, int nr_requeue, int *valp) 767 int nr_wake, int nr_requeue, u32 *cmpval)
453{ 768{
454 union futex_key key1, key2; 769 union futex_key key1, key2;
455 struct futex_hash_bucket *bh1, *bh2; 770 struct futex_hash_bucket *hb1, *hb2;
456 struct list_head *head1; 771 struct list_head *head1;
457 struct futex_q *this, *next; 772 struct futex_q *this, *next;
458 int ret, drop_count = 0; 773 int ret, drop_count = 0;
@@ -467,68 +782,72 @@ static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2,
467 if (unlikely(ret != 0)) 782 if (unlikely(ret != 0))
468 goto out; 783 goto out;
469 784
470 bh1 = hash_futex(&key1); 785 hb1 = hash_futex(&key1);
471 bh2 = hash_futex(&key2); 786 hb2 = hash_futex(&key2);
472 787
473 if (bh1 < bh2) 788 if (hb1 < hb2)
474 spin_lock(&bh1->lock); 789 spin_lock(&hb1->lock);
475 spin_lock(&bh2->lock); 790 spin_lock(&hb2->lock);
476 if (bh1 > bh2) 791 if (hb1 > hb2)
477 spin_lock(&bh1->lock); 792 spin_lock(&hb1->lock);
478 793
479 if (likely(valp != NULL)) { 794 if (likely(cmpval != NULL)) {
480 int curval; 795 u32 curval;
481 796
482 ret = get_futex_value_locked(&curval, (int __user *)uaddr1); 797 ret = get_futex_value_locked(&curval, uaddr1);
483 798
484 if (unlikely(ret)) { 799 if (unlikely(ret)) {
485 spin_unlock(&bh1->lock); 800 spin_unlock(&hb1->lock);
486 if (bh1 != bh2) 801 if (hb1 != hb2)
487 spin_unlock(&bh2->lock); 802 spin_unlock(&hb2->lock);
488 803
489 /* If we would have faulted, release mmap_sem, fault 804 /*
805 * If we would have faulted, release mmap_sem, fault
490 * it in and start all over again. 806 * it in and start all over again.
491 */ 807 */
492 up_read(&current->mm->mmap_sem); 808 up_read(&current->mm->mmap_sem);
493 809
494 ret = get_user(curval, (int __user *)uaddr1); 810 ret = get_user(curval, uaddr1);
495 811
496 if (!ret) 812 if (!ret)
497 goto retry; 813 goto retry;
498 814
499 return ret; 815 return ret;
500 } 816 }
501 if (curval != *valp) { 817 if (curval != *cmpval) {
502 ret = -EAGAIN; 818 ret = -EAGAIN;
503 goto out_unlock; 819 goto out_unlock;
504 } 820 }
505 } 821 }
506 822
507 head1 = &bh1->chain; 823 head1 = &hb1->chain;
508 list_for_each_entry_safe(this, next, head1, list) { 824 list_for_each_entry_safe(this, next, head1, list) {
509 if (!match_futex (&this->key, &key1)) 825 if (!match_futex (&this->key, &key1))
510 continue; 826 continue;
511 if (++ret <= nr_wake) { 827 if (++ret <= nr_wake) {
512 wake_futex(this); 828 wake_futex(this);
513 } else { 829 } else {
514 list_move_tail(&this->list, &bh2->chain); 830 /*
515 this->lock_ptr = &bh2->lock; 831 * If key1 and key2 hash to the same bucket, no need to
832 * requeue.
833 */
834 if (likely(head1 != &hb2->chain)) {
835 list_move_tail(&this->list, &hb2->chain);
836 this->lock_ptr = &hb2->lock;
837 }
516 this->key = key2; 838 this->key = key2;
517 get_key_refs(&key2); 839 get_key_refs(&key2);
518 drop_count++; 840 drop_count++;
519 841
520 if (ret - nr_wake >= nr_requeue) 842 if (ret - nr_wake >= nr_requeue)
521 break; 843 break;
522 /* Make sure to stop if key1 == key2 */
523 if (head1 == &bh2->chain && head1 != &next->list)
524 head1 = &this->list;
525 } 844 }
526 } 845 }
527 846
528out_unlock: 847out_unlock:
529 spin_unlock(&bh1->lock); 848 spin_unlock(&hb1->lock);
530 if (bh1 != bh2) 849 if (hb1 != hb2)
531 spin_unlock(&bh2->lock); 850 spin_unlock(&hb2->lock);
532 851
533 /* drop_key_refs() must be called outside the spinlocks. */ 852 /* drop_key_refs() must be called outside the spinlocks. */
534 while (--drop_count >= 0) 853 while (--drop_count >= 0)
@@ -543,7 +862,7 @@ out:
543static inline struct futex_hash_bucket * 862static inline struct futex_hash_bucket *
544queue_lock(struct futex_q *q, int fd, struct file *filp) 863queue_lock(struct futex_q *q, int fd, struct file *filp)
545{ 864{
546 struct futex_hash_bucket *bh; 865 struct futex_hash_bucket *hb;
547 866
548 q->fd = fd; 867 q->fd = fd;
549 q->filp = filp; 868 q->filp = filp;
@@ -551,23 +870,24 @@ queue_lock(struct futex_q *q, int fd, struct file *filp)
551 init_waitqueue_head(&q->waiters); 870 init_waitqueue_head(&q->waiters);
552 871
553 get_key_refs(&q->key); 872 get_key_refs(&q->key);
554 bh = hash_futex(&q->key); 873 hb = hash_futex(&q->key);
555 q->lock_ptr = &bh->lock; 874 q->lock_ptr = &hb->lock;
556 875
557 spin_lock(&bh->lock); 876 spin_lock(&hb->lock);
558 return bh; 877 return hb;
559} 878}
560 879
561static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *bh) 880static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
562{ 881{
563 list_add_tail(&q->list, &bh->chain); 882 list_add_tail(&q->list, &hb->chain);
564 spin_unlock(&bh->lock); 883 q->task = current;
884 spin_unlock(&hb->lock);
565} 885}
566 886
567static inline void 887static inline void
568queue_unlock(struct futex_q *q, struct futex_hash_bucket *bh) 888queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
569{ 889{
570 spin_unlock(&bh->lock); 890 spin_unlock(&hb->lock);
571 drop_key_refs(&q->key); 891 drop_key_refs(&q->key);
572} 892}
573 893
@@ -579,16 +899,17 @@ queue_unlock(struct futex_q *q, struct futex_hash_bucket *bh)
579/* The key must be already stored in q->key. */ 899/* The key must be already stored in q->key. */
580static void queue_me(struct futex_q *q, int fd, struct file *filp) 900static void queue_me(struct futex_q *q, int fd, struct file *filp)
581{ 901{
582 struct futex_hash_bucket *bh; 902 struct futex_hash_bucket *hb;
583 bh = queue_lock(q, fd, filp); 903
584 __queue_me(q, bh); 904 hb = queue_lock(q, fd, filp);
905 __queue_me(q, hb);
585} 906}
586 907
587/* Return 1 if we were still queued (ie. 0 means we were woken) */ 908/* Return 1 if we were still queued (ie. 0 means we were woken) */
588static int unqueue_me(struct futex_q *q) 909static int unqueue_me(struct futex_q *q)
589{ 910{
590 int ret = 0;
591 spinlock_t *lock_ptr; 911 spinlock_t *lock_ptr;
912 int ret = 0;
592 913
593 /* In the common case we don't take the spinlock, which is nice. */ 914 /* In the common case we don't take the spinlock, which is nice. */
594 retry: 915 retry:
@@ -614,6 +935,9 @@ static int unqueue_me(struct futex_q *q)
614 } 935 }
615 WARN_ON(list_empty(&q->list)); 936 WARN_ON(list_empty(&q->list));
616 list_del(&q->list); 937 list_del(&q->list);
938
939 BUG_ON(q->pi_state);
940
617 spin_unlock(lock_ptr); 941 spin_unlock(lock_ptr);
618 ret = 1; 942 ret = 1;
619 } 943 }
@@ -622,21 +946,42 @@ static int unqueue_me(struct futex_q *q)
622 return ret; 946 return ret;
623} 947}
624 948
625static int futex_wait(unsigned long uaddr, int val, unsigned long time) 949/*
950 * PI futexes can not be requeued and must remove themself from the
951 * hash bucket. The hash bucket lock is held on entry and dropped here.
952 */
953static void unqueue_me_pi(struct futex_q *q, struct futex_hash_bucket *hb)
626{ 954{
627 DECLARE_WAITQUEUE(wait, current); 955 WARN_ON(list_empty(&q->list));
628 int ret, curval; 956 list_del(&q->list);
957
958 BUG_ON(!q->pi_state);
959 free_pi_state(q->pi_state);
960 q->pi_state = NULL;
961
962 spin_unlock(&hb->lock);
963
964 drop_key_refs(&q->key);
965}
966
967static int futex_wait(u32 __user *uaddr, u32 val, unsigned long time)
968{
969 struct task_struct *curr = current;
970 DECLARE_WAITQUEUE(wait, curr);
971 struct futex_hash_bucket *hb;
629 struct futex_q q; 972 struct futex_q q;
630 struct futex_hash_bucket *bh; 973 u32 uval;
974 int ret;
631 975
976 q.pi_state = NULL;
632 retry: 977 retry:
633 down_read(&current->mm->mmap_sem); 978 down_read(&curr->mm->mmap_sem);
634 979
635 ret = get_futex_key(uaddr, &q.key); 980 ret = get_futex_key(uaddr, &q.key);
636 if (unlikely(ret != 0)) 981 if (unlikely(ret != 0))
637 goto out_release_sem; 982 goto out_release_sem;
638 983
639 bh = queue_lock(&q, -1, NULL); 984 hb = queue_lock(&q, -1, NULL);
640 985
641 /* 986 /*
642 * Access the page AFTER the futex is queued. 987 * Access the page AFTER the futex is queued.
@@ -658,37 +1003,35 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time)
658 * We hold the mmap semaphore, so the mapping cannot have changed 1003 * We hold the mmap semaphore, so the mapping cannot have changed
659 * since we looked it up in get_futex_key. 1004 * since we looked it up in get_futex_key.
660 */ 1005 */
661 1006 ret = get_futex_value_locked(&uval, uaddr);
662 ret = get_futex_value_locked(&curval, (int __user *)uaddr);
663 1007
664 if (unlikely(ret)) { 1008 if (unlikely(ret)) {
665 queue_unlock(&q, bh); 1009 queue_unlock(&q, hb);
666 1010
667 /* If we would have faulted, release mmap_sem, fault it in and 1011 /*
1012 * If we would have faulted, release mmap_sem, fault it in and
668 * start all over again. 1013 * start all over again.
669 */ 1014 */
670 up_read(&current->mm->mmap_sem); 1015 up_read(&curr->mm->mmap_sem);
671 1016
672 ret = get_user(curval, (int __user *)uaddr); 1017 ret = get_user(uval, uaddr);
673 1018
674 if (!ret) 1019 if (!ret)
675 goto retry; 1020 goto retry;
676 return ret; 1021 return ret;
677 } 1022 }
678 if (curval != val) { 1023 ret = -EWOULDBLOCK;
679 ret = -EWOULDBLOCK; 1024 if (uval != val)
680 queue_unlock(&q, bh); 1025 goto out_unlock_release_sem;
681 goto out_release_sem;
682 }
683 1026
684 /* Only actually queue if *uaddr contained val. */ 1027 /* Only actually queue if *uaddr contained val. */
685 __queue_me(&q, bh); 1028 __queue_me(&q, hb);
686 1029
687 /* 1030 /*
688 * Now the futex is queued and we have checked the data, we 1031 * Now the futex is queued and we have checked the data, we
689 * don't want to hold mmap_sem while we sleep. 1032 * don't want to hold mmap_sem while we sleep.
690 */ 1033 */
691 up_read(&current->mm->mmap_sem); 1034 up_read(&curr->mm->mmap_sem);
692 1035
693 /* 1036 /*
694 * There might have been scheduling since the queue_me(), as we 1037 * There might have been scheduling since the queue_me(), as we
@@ -720,12 +1063,421 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time)
720 return 0; 1063 return 0;
721 if (time == 0) 1064 if (time == 0)
722 return -ETIMEDOUT; 1065 return -ETIMEDOUT;
723 /* We expect signal_pending(current), but another thread may 1066 /*
724 * have handled it for us already. */ 1067 * We expect signal_pending(current), but another thread may
1068 * have handled it for us already.
1069 */
725 return -EINTR; 1070 return -EINTR;
726 1071
1072 out_unlock_release_sem:
1073 queue_unlock(&q, hb);
1074
727 out_release_sem: 1075 out_release_sem:
1076 up_read(&curr->mm->mmap_sem);
1077 return ret;
1078}
1079
1080/*
1081 * Userspace tried a 0 -> TID atomic transition of the futex value
1082 * and failed. The kernel side here does the whole locking operation:
1083 * if there are waiters then it will block, it does PI, etc. (Due to
1084 * races the kernel might see a 0 value of the futex too.)
1085 */
1086static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1087 struct hrtimer_sleeper *to)
1088{
1089 struct task_struct *curr = current;
1090 struct futex_hash_bucket *hb;
1091 u32 uval, newval, curval;
1092 struct futex_q q;
1093 int ret, attempt = 0;
1094
1095 if (refill_pi_state_cache())
1096 return -ENOMEM;
1097
1098 q.pi_state = NULL;
1099 retry:
1100 down_read(&curr->mm->mmap_sem);
1101
1102 ret = get_futex_key(uaddr, &q.key);
1103 if (unlikely(ret != 0))
1104 goto out_release_sem;
1105
1106 hb = queue_lock(&q, -1, NULL);
1107
1108 retry_locked:
1109 /*
1110 * To avoid races, we attempt to take the lock here again
1111 * (by doing a 0 -> TID atomic cmpxchg), while holding all
1112 * the locks. It will most likely not succeed.
1113 */
1114 newval = current->pid;
1115
1116 inc_preempt_count();
1117 curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval);
1118 dec_preempt_count();
1119
1120 if (unlikely(curval == -EFAULT))
1121 goto uaddr_faulted;
1122
1123 /* We own the lock already */
1124 if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) {
1125 if (!detect && 0)
1126 force_sig(SIGKILL, current);
1127 ret = -EDEADLK;
1128 goto out_unlock_release_sem;
1129 }
1130
1131 /*
1132 * Surprise - we got the lock. Just return
1133 * to userspace:
1134 */
1135 if (unlikely(!curval))
1136 goto out_unlock_release_sem;
1137
1138 uval = curval;
1139 newval = uval | FUTEX_WAITERS;
1140
1141 inc_preempt_count();
1142 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
1143 dec_preempt_count();
1144
1145 if (unlikely(curval == -EFAULT))
1146 goto uaddr_faulted;
1147 if (unlikely(curval != uval))
1148 goto retry_locked;
1149
1150 /*
1151 * We dont have the lock. Look up the PI state (or create it if
1152 * we are the first waiter):
1153 */
1154 ret = lookup_pi_state(uval, hb, &q);
1155
1156 if (unlikely(ret)) {
1157 /*
1158 * There were no waiters and the owner task lookup
1159 * failed. When the OWNER_DIED bit is set, then we
1160 * know that this is a robust futex and we actually
1161 * take the lock. This is safe as we are protected by
1162 * the hash bucket lock. We also set the waiters bit
1163 * unconditionally here, to simplify glibc handling of
1164 * multiple tasks racing to acquire the lock and
1165 * cleanup the problems which were left by the dead
1166 * owner.
1167 */
1168 if (curval & FUTEX_OWNER_DIED) {
1169 uval = newval;
1170 newval = current->pid |
1171 FUTEX_OWNER_DIED | FUTEX_WAITERS;
1172
1173 inc_preempt_count();
1174 curval = futex_atomic_cmpxchg_inatomic(uaddr,
1175 uval, newval);
1176 dec_preempt_count();
1177
1178 if (unlikely(curval == -EFAULT))
1179 goto uaddr_faulted;
1180 if (unlikely(curval != uval))
1181 goto retry_locked;
1182 ret = 0;
1183 }
1184 goto out_unlock_release_sem;
1185 }
1186
1187 /*
1188 * Only actually queue now that the atomic ops are done:
1189 */
1190 __queue_me(&q, hb);
1191
1192 /*
1193 * Now the futex is queued and we have checked the data, we
1194 * don't want to hold mmap_sem while we sleep.
1195 */
1196 up_read(&curr->mm->mmap_sem);
1197
1198 WARN_ON(!q.pi_state);
1199 /*
1200 * Block on the PI mutex:
1201 */
1202 if (!trylock)
1203 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1204 else {
1205 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1206 /* Fixup the trylock return value: */
1207 ret = ret ? 0 : -EWOULDBLOCK;
1208 }
1209
1210 down_read(&curr->mm->mmap_sem);
1211 hb = queue_lock(&q, -1, NULL);
1212
1213 /*
1214 * Got the lock. We might not be the anticipated owner if we
1215 * did a lock-steal - fix up the PI-state in that case.
1216 */
1217 if (!ret && q.pi_state->owner != curr) {
1218 u32 newtid = current->pid | FUTEX_WAITERS;
1219
1220 /* Owner died? */
1221 if (q.pi_state->owner != NULL) {
1222 spin_lock_irq(&q.pi_state->owner->pi_lock);
1223 list_del_init(&q.pi_state->list);
1224 spin_unlock_irq(&q.pi_state->owner->pi_lock);
1225 } else
1226 newtid |= FUTEX_OWNER_DIED;
1227
1228 q.pi_state->owner = current;
1229
1230 spin_lock_irq(&current->pi_lock);
1231 list_add(&q.pi_state->list, &current->pi_state_list);
1232 spin_unlock_irq(&current->pi_lock);
1233
1234 /* Unqueue and drop the lock */
1235 unqueue_me_pi(&q, hb);
1236 up_read(&curr->mm->mmap_sem);
1237 /*
1238 * We own it, so we have to replace the pending owner
1239 * TID. This must be atomic as we have preserve the
1240 * owner died bit here.
1241 */
1242 ret = get_user(uval, uaddr);
1243 while (!ret) {
1244 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1245 curval = futex_atomic_cmpxchg_inatomic(uaddr,
1246 uval, newval);
1247 if (curval == -EFAULT)
1248 ret = -EFAULT;
1249 if (curval == uval)
1250 break;
1251 uval = curval;
1252 }
1253 } else {
1254 /*
1255 * Catch the rare case, where the lock was released
1256 * when we were on the way back before we locked
1257 * the hash bucket.
1258 */
1259 if (ret && q.pi_state->owner == curr) {
1260 if (rt_mutex_trylock(&q.pi_state->pi_mutex))
1261 ret = 0;
1262 }
1263 /* Unqueue and drop the lock */
1264 unqueue_me_pi(&q, hb);
1265 up_read(&curr->mm->mmap_sem);
1266 }
1267
1268 if (!detect && ret == -EDEADLK && 0)
1269 force_sig(SIGKILL, current);
1270
1271 return ret;
1272
1273 out_unlock_release_sem:
1274 queue_unlock(&q, hb);
1275
1276 out_release_sem:
1277 up_read(&curr->mm->mmap_sem);
1278 return ret;
1279
1280 uaddr_faulted:
1281 /*
1282 * We have to r/w *(int __user *)uaddr, but we can't modify it
1283 * non-atomically. Therefore, if get_user below is not
1284 * enough, we need to handle the fault ourselves, while
1285 * still holding the mmap_sem.
1286 */
1287 if (attempt++) {
1288 if (futex_handle_fault((unsigned long)uaddr, attempt))
1289 goto out_unlock_release_sem;
1290
1291 goto retry_locked;
1292 }
1293
1294 queue_unlock(&q, hb);
1295 up_read(&curr->mm->mmap_sem);
1296
1297 ret = get_user(uval, uaddr);
1298 if (!ret && (uval != -EFAULT))
1299 goto retry;
1300
1301 return ret;
1302}
1303
1304/*
1305 * Restart handler
1306 */
1307static long futex_lock_pi_restart(struct restart_block *restart)
1308{
1309 struct hrtimer_sleeper timeout, *to = NULL;
1310 int ret;
1311
1312 restart->fn = do_no_restart_syscall;
1313
1314 if (restart->arg2 || restart->arg3) {
1315 to = &timeout;
1316 hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
1317 hrtimer_init_sleeper(to, current);
1318 to->timer.expires.tv64 = ((u64)restart->arg1 << 32) |
1319 (u64) restart->arg0;
1320 }
1321
1322 pr_debug("lock_pi restart: %p, %d (%d)\n",
1323 (u32 __user *)restart->arg0, current->pid);
1324
1325 ret = do_futex_lock_pi((u32 __user *)restart->arg0, restart->arg1,
1326 0, to);
1327
1328 if (ret != -EINTR)
1329 return ret;
1330
1331 restart->fn = futex_lock_pi_restart;
1332
1333 /* The other values are filled in */
1334 return -ERESTART_RESTARTBLOCK;
1335}
1336
1337/*
1338 * Called from the syscall entry below.
1339 */
1340static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1341 long nsec, int trylock)
1342{
1343 struct hrtimer_sleeper timeout, *to = NULL;
1344 struct restart_block *restart;
1345 int ret;
1346
1347 if (sec != MAX_SCHEDULE_TIMEOUT) {
1348 to = &timeout;
1349 hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
1350 hrtimer_init_sleeper(to, current);
1351 to->timer.expires = ktime_set(sec, nsec);
1352 }
1353
1354 ret = do_futex_lock_pi(uaddr, detect, trylock, to);
1355
1356 if (ret != -EINTR)
1357 return ret;
1358
1359 pr_debug("lock_pi interrupted: %p, %d (%d)\n", uaddr, current->pid);
1360
1361 restart = &current_thread_info()->restart_block;
1362 restart->fn = futex_lock_pi_restart;
1363 restart->arg0 = (unsigned long) uaddr;
1364 restart->arg1 = detect;
1365 if (to) {
1366 restart->arg2 = to->timer.expires.tv64 & 0xFFFFFFFF;
1367 restart->arg3 = to->timer.expires.tv64 >> 32;
1368 } else
1369 restart->arg2 = restart->arg3 = 0;
1370
1371 return -ERESTART_RESTARTBLOCK;
1372}
1373
1374/*
1375 * Userspace attempted a TID -> 0 atomic transition, and failed.
1376 * This is the in-kernel slowpath: we look up the PI state (if any),
1377 * and do the rt-mutex unlock.
1378 */
1379static int futex_unlock_pi(u32 __user *uaddr)
1380{
1381 struct futex_hash_bucket *hb;
1382 struct futex_q *this, *next;
1383 u32 uval;
1384 struct list_head *head;
1385 union futex_key key;
1386 int ret, attempt = 0;
1387
1388retry:
1389 if (get_user(uval, uaddr))
1390 return -EFAULT;
1391 /*
1392 * We release only a lock we actually own:
1393 */
1394 if ((uval & FUTEX_TID_MASK) != current->pid)
1395 return -EPERM;
1396 /*
1397 * First take all the futex related locks:
1398 */
1399 down_read(&current->mm->mmap_sem);
1400
1401 ret = get_futex_key(uaddr, &key);
1402 if (unlikely(ret != 0))
1403 goto out;
1404
1405 hb = hash_futex(&key);
1406 spin_lock(&hb->lock);
1407
1408retry_locked:
1409 /*
1410 * To avoid races, try to do the TID -> 0 atomic transition
1411 * again. If it succeeds then we can return without waking
1412 * anyone else up:
1413 */
1414 inc_preempt_count();
1415 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
1416 dec_preempt_count();
1417
1418 if (unlikely(uval == -EFAULT))
1419 goto pi_faulted;
1420 /*
1421 * Rare case: we managed to release the lock atomically,
1422 * no need to wake anyone else up:
1423 */
1424 if (unlikely(uval == current->pid))
1425 goto out_unlock;
1426
1427 /*
1428 * Ok, other tasks may need to be woken up - check waiters
1429 * and do the wakeup if necessary:
1430 */
1431 head = &hb->chain;
1432
1433 list_for_each_entry_safe(this, next, head, list) {
1434 if (!match_futex (&this->key, &key))
1435 continue;
1436 ret = wake_futex_pi(uaddr, uval, this);
1437 /*
1438 * The atomic access to the futex value
1439 * generated a pagefault, so retry the
1440 * user-access and the wakeup:
1441 */
1442 if (ret == -EFAULT)
1443 goto pi_faulted;
1444 goto out_unlock;
1445 }
1446 /*
1447 * No waiters - kernel unlocks the futex:
1448 */
1449 ret = unlock_futex_pi(uaddr, uval);
1450 if (ret == -EFAULT)
1451 goto pi_faulted;
1452
1453out_unlock:
1454 spin_unlock(&hb->lock);
1455out:
728 up_read(&current->mm->mmap_sem); 1456 up_read(&current->mm->mmap_sem);
1457
1458 return ret;
1459
1460pi_faulted:
1461 /*
1462 * We have to r/w *(int __user *)uaddr, but we can't modify it
1463 * non-atomically. Therefore, if get_user below is not
1464 * enough, we need to handle the fault ourselves, while
1465 * still holding the mmap_sem.
1466 */
1467 if (attempt++) {
1468 if (futex_handle_fault((unsigned long)uaddr, attempt))
1469 goto out_unlock;
1470
1471 goto retry_locked;
1472 }
1473
1474 spin_unlock(&hb->lock);
1475 up_read(&current->mm->mmap_sem);
1476
1477 ret = get_user(uval, uaddr);
1478 if (!ret && (uval != -EFAULT))
1479 goto retry;
1480
729 return ret; 1481 return ret;
730} 1482}
731 1483
@@ -735,6 +1487,7 @@ static int futex_close(struct inode *inode, struct file *filp)
735 1487
736 unqueue_me(q); 1488 unqueue_me(q);
737 kfree(q); 1489 kfree(q);
1490
738 return 0; 1491 return 0;
739} 1492}
740 1493
@@ -766,7 +1519,7 @@ static struct file_operations futex_fops = {
766 * Signal allows caller to avoid the race which would occur if they 1519 * Signal allows caller to avoid the race which would occur if they
767 * set the sigio stuff up afterwards. 1520 * set the sigio stuff up afterwards.
768 */ 1521 */
769static int futex_fd(unsigned long uaddr, int signal) 1522static int futex_fd(u32 __user *uaddr, int signal)
770{ 1523{
771 struct futex_q *q; 1524 struct futex_q *q;
772 struct file *filp; 1525 struct file *filp;
@@ -803,6 +1556,7 @@ static int futex_fd(unsigned long uaddr, int signal)
803 err = -ENOMEM; 1556 err = -ENOMEM;
804 goto error; 1557 goto error;
805 } 1558 }
1559 q->pi_state = NULL;
806 1560
807 down_read(&current->mm->mmap_sem); 1561 down_read(&current->mm->mmap_sem);
808 err = get_futex_key(uaddr, &q->key); 1562 err = get_futex_key(uaddr, &q->key);
@@ -840,7 +1594,7 @@ error:
840 * Implementation: user-space maintains a per-thread list of locks it 1594 * Implementation: user-space maintains a per-thread list of locks it
841 * is holding. Upon do_exit(), the kernel carefully walks this list, 1595 * is holding. Upon do_exit(), the kernel carefully walks this list,
842 * and marks all locks that are owned by this thread with the 1596 * and marks all locks that are owned by this thread with the
843 * FUTEX_OWNER_DEAD bit, and wakes up a waiter (if any). The list is 1597 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
844 * always manipulated with the lock held, so the list is private and 1598 * always manipulated with the lock held, so the list is private and
845 * per-thread. Userspace also maintains a per-thread 'list_op_pending' 1599 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
846 * field, to allow the kernel to clean up if the thread dies after 1600 * field, to allow the kernel to clean up if the thread dies after
@@ -915,7 +1669,7 @@ err_unlock:
915 */ 1669 */
916int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) 1670int handle_futex_death(u32 __user *uaddr, struct task_struct *curr)
917{ 1671{
918 u32 uval; 1672 u32 uval, nval;
919 1673
920retry: 1674retry:
921 if (get_user(uval, uaddr)) 1675 if (get_user(uval, uaddr))
@@ -932,12 +1686,16 @@ retry:
932 * thread-death.) The rest of the cleanup is done in 1686 * thread-death.) The rest of the cleanup is done in
933 * userspace. 1687 * userspace.
934 */ 1688 */
935 if (futex_atomic_cmpxchg_inatomic(uaddr, uval, 1689 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval,
936 uval | FUTEX_OWNER_DIED) != uval) 1690 uval | FUTEX_OWNER_DIED);
1691 if (nval == -EFAULT)
1692 return -1;
1693
1694 if (nval != uval)
937 goto retry; 1695 goto retry;
938 1696
939 if (uval & FUTEX_WAITERS) 1697 if (uval & FUTEX_WAITERS)
940 futex_wake((unsigned long)uaddr, 1); 1698 futex_wake(uaddr, 1);
941 } 1699 }
942 return 0; 1700 return 0;
943} 1701}
@@ -978,7 +1736,7 @@ void exit_robust_list(struct task_struct *curr)
978 while (entry != &head->list) { 1736 while (entry != &head->list) {
979 /* 1737 /*
980 * A pending lock might already be on the list, so 1738 * A pending lock might already be on the list, so
981 * dont process it twice: 1739 * don't process it twice:
982 */ 1740 */
983 if (entry != pending) 1741 if (entry != pending)
984 if (handle_futex_death((void *)entry + futex_offset, 1742 if (handle_futex_death((void *)entry + futex_offset,
@@ -999,8 +1757,8 @@ void exit_robust_list(struct task_struct *curr)
999 } 1757 }
1000} 1758}
1001 1759
1002long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout, 1760long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
1003 unsigned long uaddr2, int val2, int val3) 1761 u32 __user *uaddr2, u32 val2, u32 val3)
1004{ 1762{
1005 int ret; 1763 int ret;
1006 1764
@@ -1024,6 +1782,15 @@ long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout,
1024 case FUTEX_WAKE_OP: 1782 case FUTEX_WAKE_OP:
1025 ret = futex_wake_op(uaddr, uaddr2, val, val2, val3); 1783 ret = futex_wake_op(uaddr, uaddr2, val, val2, val3);
1026 break; 1784 break;
1785 case FUTEX_LOCK_PI:
1786 ret = futex_lock_pi(uaddr, val, timeout, val2, 0);
1787 break;
1788 case FUTEX_UNLOCK_PI:
1789 ret = futex_unlock_pi(uaddr);
1790 break;
1791 case FUTEX_TRYLOCK_PI:
1792 ret = futex_lock_pi(uaddr, 0, timeout, val2, 1);
1793 break;
1027 default: 1794 default:
1028 ret = -ENOSYS; 1795 ret = -ENOSYS;
1029 } 1796 }
@@ -1031,29 +1798,33 @@ long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout,
1031} 1798}
1032 1799
1033 1800
1034asmlinkage long sys_futex(u32 __user *uaddr, int op, int val, 1801asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
1035 struct timespec __user *utime, u32 __user *uaddr2, 1802 struct timespec __user *utime, u32 __user *uaddr2,
1036 int val3) 1803 u32 val3)
1037{ 1804{
1038 struct timespec t; 1805 struct timespec t;
1039 unsigned long timeout = MAX_SCHEDULE_TIMEOUT; 1806 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
1040 int val2 = 0; 1807 u32 val2 = 0;
1041 1808
1042 if (utime && (op == FUTEX_WAIT)) { 1809 if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) {
1043 if (copy_from_user(&t, utime, sizeof(t)) != 0) 1810 if (copy_from_user(&t, utime, sizeof(t)) != 0)
1044 return -EFAULT; 1811 return -EFAULT;
1045 if (!timespec_valid(&t)) 1812 if (!timespec_valid(&t))
1046 return -EINVAL; 1813 return -EINVAL;
1047 timeout = timespec_to_jiffies(&t) + 1; 1814 if (op == FUTEX_WAIT)
1815 timeout = timespec_to_jiffies(&t) + 1;
1816 else {
1817 timeout = t.tv_sec;
1818 val2 = t.tv_nsec;
1819 }
1048 } 1820 }
1049 /* 1821 /*
1050 * requeue parameter in 'utime' if op == FUTEX_REQUEUE. 1822 * requeue parameter in 'utime' if op == FUTEX_REQUEUE.
1051 */ 1823 */
1052 if (op >= FUTEX_REQUEUE) 1824 if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE)
1053 val2 = (int) (unsigned long) utime; 1825 val2 = (u32) (unsigned long) utime;
1054 1826
1055 return do_futex((unsigned long)uaddr, op, val, timeout, 1827 return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3);
1056 (unsigned long)uaddr2, val2, val3);
1057} 1828}
1058 1829
1059static int futexfs_get_sb(struct file_system_type *fs_type, 1830static int futexfs_get_sb(struct file_system_type *fs_type,
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 1ab6a0ea3d..d1d92b441f 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -129,16 +129,20 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
129 unsigned long timeout = MAX_SCHEDULE_TIMEOUT; 129 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
130 int val2 = 0; 130 int val2 = 0;
131 131
132 if (utime && (op == FUTEX_WAIT)) { 132 if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) {
133 if (get_compat_timespec(&t, utime)) 133 if (get_compat_timespec(&t, utime))
134 return -EFAULT; 134 return -EFAULT;
135 if (!timespec_valid(&t)) 135 if (!timespec_valid(&t))
136 return -EINVAL; 136 return -EINVAL;
137 timeout = timespec_to_jiffies(&t) + 1; 137 if (op == FUTEX_WAIT)
138 timeout = timespec_to_jiffies(&t) + 1;
139 else {
140 timeout = t.tv_sec;
141 val2 = t.tv_nsec;
142 }
138 } 143 }
139 if (op >= FUTEX_REQUEUE) 144 if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE)
140 val2 = (int) (unsigned long) utime; 145 val2 = (int) (unsigned long) utime;
141 146
142 return do_futex((unsigned long)uaddr, op, val, timeout, 147 return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3);
143 (unsigned long)uaddr2, val2, val3);
144} 148}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 55601b3ce6..8d3dc29ef4 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -833,7 +833,7 @@ static void migrate_hrtimers(int cpu)
833} 833}
834#endif /* CONFIG_HOTPLUG_CPU */ 834#endif /* CONFIG_HOTPLUG_CPU */
835 835
836static int hrtimer_cpu_notify(struct notifier_block *self, 836static int __devinit hrtimer_cpu_notify(struct notifier_block *self,
837 unsigned long action, void *hcpu) 837 unsigned long action, void *hcpu)
838{ 838{
839 long cpu = (long)hcpu; 839 long cpu = (long)hcpu;
@@ -857,7 +857,7 @@ static int hrtimer_cpu_notify(struct notifier_block *self,
857 return NOTIFY_OK; 857 return NOTIFY_OK;
858} 858}
859 859
860static struct notifier_block hrtimers_nb = { 860static struct notifier_block __devinitdata hrtimers_nb = {
861 .notifier_call = hrtimer_cpu_notify, 861 .notifier_call = hrtimer_cpu_notify,
862}; 862};
863 863
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1fbf466a29..64aab08115 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -47,11 +47,17 @@
47 47
48static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 48static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
49static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 49static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
50static atomic_t kprobe_count;
50 51
51DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ 52DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
52DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ 53DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
53static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 54static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
54 55
56static struct notifier_block kprobe_page_fault_nb = {
57 .notifier_call = kprobe_exceptions_notify,
58 .priority = 0x7fffffff /* we need to notified first */
59};
60
55#ifdef __ARCH_WANT_KPROBES_INSN_SLOT 61#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
56/* 62/*
57 * kprobe->ainsn.insn points to the copy of the instruction to be 63 * kprobe->ainsn.insn points to the copy of the instruction to be
@@ -368,16 +374,15 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
368*/ 374*/
369static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p) 375static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
370{ 376{
371 struct kprobe *kp;
372
373 if (p->break_handler) { 377 if (p->break_handler) {
374 list_for_each_entry_rcu(kp, &old_p->list, list) { 378 if (old_p->break_handler)
375 if (kp->break_handler) 379 return -EEXIST;
376 return -EEXIST;
377 }
378 list_add_tail_rcu(&p->list, &old_p->list); 380 list_add_tail_rcu(&p->list, &old_p->list);
381 old_p->break_handler = aggr_break_handler;
379 } else 382 } else
380 list_add_rcu(&p->list, &old_p->list); 383 list_add_rcu(&p->list, &old_p->list);
384 if (p->post_handler && !old_p->post_handler)
385 old_p->post_handler = aggr_post_handler;
381 return 0; 386 return 0;
382} 387}
383 388
@@ -390,9 +395,11 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
390 copy_kprobe(p, ap); 395 copy_kprobe(p, ap);
391 ap->addr = p->addr; 396 ap->addr = p->addr;
392 ap->pre_handler = aggr_pre_handler; 397 ap->pre_handler = aggr_pre_handler;
393 ap->post_handler = aggr_post_handler;
394 ap->fault_handler = aggr_fault_handler; 398 ap->fault_handler = aggr_fault_handler;
395 ap->break_handler = aggr_break_handler; 399 if (p->post_handler)
400 ap->post_handler = aggr_post_handler;
401 if (p->break_handler)
402 ap->break_handler = aggr_break_handler;
396 403
397 INIT_LIST_HEAD(&ap->list); 404 INIT_LIST_HEAD(&ap->list);
398 list_add_rcu(&p->list, &ap->list); 405 list_add_rcu(&p->list, &ap->list);
@@ -464,6 +471,8 @@ static int __kprobes __register_kprobe(struct kprobe *p,
464 old_p = get_kprobe(p->addr); 471 old_p = get_kprobe(p->addr);
465 if (old_p) { 472 if (old_p) {
466 ret = register_aggr_kprobe(old_p, p); 473 ret = register_aggr_kprobe(old_p, p);
474 if (!ret)
475 atomic_inc(&kprobe_count);
467 goto out; 476 goto out;
468 } 477 }
469 478
@@ -474,6 +483,10 @@ static int __kprobes __register_kprobe(struct kprobe *p,
474 hlist_add_head_rcu(&p->hlist, 483 hlist_add_head_rcu(&p->hlist,
475 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 484 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
476 485
486 if (atomic_add_return(1, &kprobe_count) == \
487 (ARCH_INACTIVE_KPROBE_COUNT + 1))
488 register_page_fault_notifier(&kprobe_page_fault_nb);
489
477 arch_arm_kprobe(p); 490 arch_arm_kprobe(p);
478 491
479out: 492out:
@@ -536,14 +549,40 @@ valid_p:
536 kfree(old_p); 549 kfree(old_p);
537 } 550 }
538 arch_remove_kprobe(p); 551 arch_remove_kprobe(p);
552 } else {
553 mutex_lock(&kprobe_mutex);
554 if (p->break_handler)
555 old_p->break_handler = NULL;
556 if (p->post_handler){
557 list_for_each_entry_rcu(list_p, &old_p->list, list){
558 if (list_p->post_handler){
559 cleanup_p = 2;
560 break;
561 }
562 }
563 if (cleanup_p == 0)
564 old_p->post_handler = NULL;
565 }
566 mutex_unlock(&kprobe_mutex);
539 } 567 }
568
569 /* Call unregister_page_fault_notifier()
570 * if no probes are active
571 */
572 mutex_lock(&kprobe_mutex);
573 if (atomic_add_return(-1, &kprobe_count) == \
574 ARCH_INACTIVE_KPROBE_COUNT)
575 unregister_page_fault_notifier(&kprobe_page_fault_nb);
576 mutex_unlock(&kprobe_mutex);
577 return;
540} 578}
541 579
542static struct notifier_block kprobe_exceptions_nb = { 580static struct notifier_block kprobe_exceptions_nb = {
543 .notifier_call = kprobe_exceptions_notify, 581 .notifier_call = kprobe_exceptions_notify,
544 .priority = 0x7fffffff /* we need to notified first */ 582 .priority = 0x7fffffff /* we need to be notified first */
545}; 583};
546 584
585
547int __kprobes register_jprobe(struct jprobe *jp) 586int __kprobes register_jprobe(struct jprobe *jp)
548{ 587{
549 /* Todo: Verify probepoint is a function entry point */ 588 /* Todo: Verify probepoint is a function entry point */
@@ -652,6 +691,7 @@ static int __init init_kprobes(void)
652 INIT_HLIST_HEAD(&kprobe_table[i]); 691 INIT_HLIST_HEAD(&kprobe_table[i]);
653 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 692 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
654 } 693 }
694 atomic_set(&kprobe_count, 0);
655 695
656 err = arch_init_kprobes(); 696 err = arch_init_kprobes();
657 if (!err) 697 if (!err)
diff --git a/kernel/module.c b/kernel/module.c
index d75275de1c..10e5b872ad 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -40,9 +40,11 @@
40#include <linux/string.h> 40#include <linux/string.h>
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <linux/mutex.h> 42#include <linux/mutex.h>
43#include <linux/unwind.h>
43#include <asm/uaccess.h> 44#include <asm/uaccess.h>
44#include <asm/semaphore.h> 45#include <asm/semaphore.h>
45#include <asm/cacheflush.h> 46#include <asm/cacheflush.h>
47#include <linux/license.h>
46 48
47#if 0 49#if 0
48#define DEBUGP printk 50#define DEBUGP printk
@@ -1051,6 +1053,8 @@ static void free_module(struct module *mod)
1051 remove_sect_attrs(mod); 1053 remove_sect_attrs(mod);
1052 mod_kobject_remove(mod); 1054 mod_kobject_remove(mod);
1053 1055
1056 unwind_remove_table(mod->unwind_info, 0);
1057
1054 /* Arch-specific cleanup. */ 1058 /* Arch-specific cleanup. */
1055 module_arch_cleanup(mod); 1059 module_arch_cleanup(mod);
1056 1060
@@ -1248,16 +1252,6 @@ static void layout_sections(struct module *mod,
1248 } 1252 }
1249} 1253}
1250 1254
1251static inline int license_is_gpl_compatible(const char *license)
1252{
1253 return (strcmp(license, "GPL") == 0
1254 || strcmp(license, "GPL v2") == 0
1255 || strcmp(license, "GPL and additional rights") == 0
1256 || strcmp(license, "Dual BSD/GPL") == 0
1257 || strcmp(license, "Dual MIT/GPL") == 0
1258 || strcmp(license, "Dual MPL/GPL") == 0);
1259}
1260
1261static void set_license(struct module *mod, const char *license) 1255static void set_license(struct module *mod, const char *license)
1262{ 1256{
1263 if (!license) 1257 if (!license)
@@ -1412,7 +1406,7 @@ static struct module *load_module(void __user *umod,
1412 unsigned int i, symindex = 0, strindex = 0, setupindex, exindex, 1406 unsigned int i, symindex = 0, strindex = 0, setupindex, exindex,
1413 exportindex, modindex, obsparmindex, infoindex, gplindex, 1407 exportindex, modindex, obsparmindex, infoindex, gplindex,
1414 crcindex, gplcrcindex, versindex, pcpuindex, gplfutureindex, 1408 crcindex, gplcrcindex, versindex, pcpuindex, gplfutureindex,
1415 gplfuturecrcindex; 1409 gplfuturecrcindex, unwindex = 0;
1416 struct module *mod; 1410 struct module *mod;
1417 long err = 0; 1411 long err = 0;
1418 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ 1412 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
@@ -1502,6 +1496,9 @@ static struct module *load_module(void __user *umod,
1502 versindex = find_sec(hdr, sechdrs, secstrings, "__versions"); 1496 versindex = find_sec(hdr, sechdrs, secstrings, "__versions");
1503 infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo"); 1497 infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo");
1504 pcpuindex = find_pcpusec(hdr, sechdrs, secstrings); 1498 pcpuindex = find_pcpusec(hdr, sechdrs, secstrings);
1499#ifdef ARCH_UNWIND_SECTION_NAME
1500 unwindex = find_sec(hdr, sechdrs, secstrings, ARCH_UNWIND_SECTION_NAME);
1501#endif
1505 1502
1506 /* Don't keep modinfo section */ 1503 /* Don't keep modinfo section */
1507 sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC; 1504 sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
@@ -1510,6 +1507,8 @@ static struct module *load_module(void __user *umod,
1510 sechdrs[symindex].sh_flags |= SHF_ALLOC; 1507 sechdrs[symindex].sh_flags |= SHF_ALLOC;
1511 sechdrs[strindex].sh_flags |= SHF_ALLOC; 1508 sechdrs[strindex].sh_flags |= SHF_ALLOC;
1512#endif 1509#endif
1510 if (unwindex)
1511 sechdrs[unwindex].sh_flags |= SHF_ALLOC;
1513 1512
1514 /* Check module struct version now, before we try to use module. */ 1513 /* Check module struct version now, before we try to use module. */
1515 if (!check_modstruct_version(sechdrs, versindex, mod)) { 1514 if (!check_modstruct_version(sechdrs, versindex, mod)) {
@@ -1738,6 +1737,11 @@ static struct module *load_module(void __user *umod,
1738 goto arch_cleanup; 1737 goto arch_cleanup;
1739 add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs); 1738 add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
1740 1739
1740 /* Size of section 0 is 0, so this works well if no unwind info. */
1741 mod->unwind_info = unwind_add_table(mod,
1742 (void *)sechdrs[unwindex].sh_addr,
1743 sechdrs[unwindex].sh_size);
1744
1741 /* Get rid of temporary copy */ 1745 /* Get rid of temporary copy */
1742 vfree(hdr); 1746 vfree(hdr);
1743 1747
@@ -1836,6 +1840,7 @@ sys_init_module(void __user *umod,
1836 mod->state = MODULE_STATE_LIVE; 1840 mod->state = MODULE_STATE_LIVE;
1837 /* Drop initial reference. */ 1841 /* Drop initial reference. */
1838 module_put(mod); 1842 module_put(mod);
1843 unwind_remove_table(mod->unwind_info, 1);
1839 module_free(mod, mod->module_init); 1844 module_free(mod, mod->module_init);
1840 mod->module_init = NULL; 1845 mod->module_init = NULL;
1841 mod->init_size = 0; 1846 mod->init_size = 0;
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index f4913c3769..e38e4bac97 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -16,6 +16,7 @@
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/poison.h>
19#include <linux/spinlock.h> 20#include <linux/spinlock.h>
20#include <linux/kallsyms.h> 21#include <linux/kallsyms.h>
21#include <linux/interrupt.h> 22#include <linux/interrupt.h>
@@ -153,13 +154,13 @@ next:
153 continue; 154 continue;
154 count++; 155 count++;
155 cursor = curr->next; 156 cursor = curr->next;
156 debug_spin_lock_restore(&debug_mutex_lock, flags); 157 debug_spin_unlock_restore(&debug_mutex_lock, flags);
157 158
158 printk("\n#%03d: ", count); 159 printk("\n#%03d: ", count);
159 printk_lock(lock, filter ? 0 : 1); 160 printk_lock(lock, filter ? 0 : 1);
160 goto next; 161 goto next;
161 } 162 }
162 debug_spin_lock_restore(&debug_mutex_lock, flags); 163 debug_spin_unlock_restore(&debug_mutex_lock, flags);
163 printk("\n"); 164 printk("\n");
164} 165}
165 166
@@ -316,7 +317,7 @@ void mutex_debug_check_no_locks_held(struct task_struct *task)
316 continue; 317 continue;
317 list_del_init(curr); 318 list_del_init(curr);
318 DEBUG_OFF(); 319 DEBUG_OFF();
319 debug_spin_lock_restore(&debug_mutex_lock, flags); 320 debug_spin_unlock_restore(&debug_mutex_lock, flags);
320 321
321 printk("BUG: %s/%d, lock held at task exit time!\n", 322 printk("BUG: %s/%d, lock held at task exit time!\n",
322 task->comm, task->pid); 323 task->comm, task->pid);
@@ -325,7 +326,7 @@ void mutex_debug_check_no_locks_held(struct task_struct *task)
325 printk("exiting task is not even the owner??\n"); 326 printk("exiting task is not even the owner??\n");
326 return; 327 return;
327 } 328 }
328 debug_spin_lock_restore(&debug_mutex_lock, flags); 329 debug_spin_unlock_restore(&debug_mutex_lock, flags);
329} 330}
330 331
331/* 332/*
@@ -352,7 +353,7 @@ void mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
352 continue; 353 continue;
353 list_del_init(curr); 354 list_del_init(curr);
354 DEBUG_OFF(); 355 DEBUG_OFF();
355 debug_spin_lock_restore(&debug_mutex_lock, flags); 356 debug_spin_unlock_restore(&debug_mutex_lock, flags);
356 357
357 printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n", 358 printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
358 current->comm, current->pid, lock, from, to); 359 current->comm, current->pid, lock, from, to);
@@ -362,7 +363,7 @@ void mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
362 printk("freeing task is not even the owner??\n"); 363 printk("freeing task is not even the owner??\n");
363 return; 364 return;
364 } 365 }
365 debug_spin_lock_restore(&debug_mutex_lock, flags); 366 debug_spin_unlock_restore(&debug_mutex_lock, flags);
366} 367}
367 368
368/* 369/*
@@ -381,7 +382,7 @@ void debug_mutex_set_owner(struct mutex *lock,
381 382
382void debug_mutex_init_waiter(struct mutex_waiter *waiter) 383void debug_mutex_init_waiter(struct mutex_waiter *waiter)
383{ 384{
384 memset(waiter, 0x11, sizeof(*waiter)); 385 memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
385 waiter->magic = waiter; 386 waiter->magic = waiter;
386 INIT_LIST_HEAD(&waiter->list); 387 INIT_LIST_HEAD(&waiter->list);
387} 388}
@@ -397,7 +398,7 @@ void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
397void debug_mutex_free_waiter(struct mutex_waiter *waiter) 398void debug_mutex_free_waiter(struct mutex_waiter *waiter)
398{ 399{
399 DEBUG_WARN_ON(!list_empty(&waiter->list)); 400 DEBUG_WARN_ON(!list_empty(&waiter->list));
400 memset(waiter, 0x22, sizeof(*waiter)); 401 memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter));
401} 402}
402 403
403void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 404void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index fd384050ac..a5196c36a5 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -46,21 +46,6 @@ extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
46extern void debug_mutex_unlock(struct mutex *lock); 46extern void debug_mutex_unlock(struct mutex *lock);
47extern void debug_mutex_init(struct mutex *lock, const char *name); 47extern void debug_mutex_init(struct mutex *lock, const char *name);
48 48
49#define debug_spin_lock(lock) \
50 do { \
51 local_irq_disable(); \
52 if (debug_mutex_on) \
53 spin_lock(lock); \
54 } while (0)
55
56#define debug_spin_unlock(lock) \
57 do { \
58 if (debug_mutex_on) \
59 spin_unlock(lock); \
60 local_irq_enable(); \
61 preempt_check_resched(); \
62 } while (0)
63
64#define debug_spin_lock_save(lock, flags) \ 49#define debug_spin_lock_save(lock, flags) \
65 do { \ 50 do { \
66 local_irq_save(flags); \ 51 local_irq_save(flags); \
@@ -68,7 +53,7 @@ extern void debug_mutex_init(struct mutex *lock, const char *name);
68 spin_lock(lock); \ 53 spin_lock(lock); \
69 } while (0) 54 } while (0)
70 55
71#define debug_spin_lock_restore(lock, flags) \ 56#define debug_spin_unlock_restore(lock, flags) \
72 do { \ 57 do { \
73 if (debug_mutex_on) \ 58 if (debug_mutex_on) \
74 spin_unlock(lock); \ 59 spin_unlock(lock); \
@@ -76,20 +61,20 @@ extern void debug_mutex_init(struct mutex *lock, const char *name);
76 preempt_check_resched(); \ 61 preempt_check_resched(); \
77 } while (0) 62 } while (0)
78 63
79#define spin_lock_mutex(lock) \ 64#define spin_lock_mutex(lock, flags) \
80 do { \ 65 do { \
81 struct mutex *l = container_of(lock, struct mutex, wait_lock); \ 66 struct mutex *l = container_of(lock, struct mutex, wait_lock); \
82 \ 67 \
83 DEBUG_WARN_ON(in_interrupt()); \ 68 DEBUG_WARN_ON(in_interrupt()); \
84 debug_spin_lock(&debug_mutex_lock); \ 69 debug_spin_lock_save(&debug_mutex_lock, flags); \
85 spin_lock(lock); \ 70 spin_lock(lock); \
86 DEBUG_WARN_ON(l->magic != l); \ 71 DEBUG_WARN_ON(l->magic != l); \
87 } while (0) 72 } while (0)
88 73
89#define spin_unlock_mutex(lock) \ 74#define spin_unlock_mutex(lock, flags) \
90 do { \ 75 do { \
91 spin_unlock(lock); \ 76 spin_unlock(lock); \
92 debug_spin_unlock(&debug_mutex_lock); \ 77 debug_spin_unlock_restore(&debug_mutex_lock, flags); \
93 } while (0) 78 } while (0)
94 79
95#define DEBUG_OFF() \ 80#define DEBUG_OFF() \
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 5449b210d9..7043db21bb 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -125,10 +125,11 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
125 struct task_struct *task = current; 125 struct task_struct *task = current;
126 struct mutex_waiter waiter; 126 struct mutex_waiter waiter;
127 unsigned int old_val; 127 unsigned int old_val;
128 unsigned long flags;
128 129
129 debug_mutex_init_waiter(&waiter); 130 debug_mutex_init_waiter(&waiter);
130 131
131 spin_lock_mutex(&lock->wait_lock); 132 spin_lock_mutex(&lock->wait_lock, flags);
132 133
133 debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); 134 debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip);
134 135
@@ -157,7 +158,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
157 if (unlikely(state == TASK_INTERRUPTIBLE && 158 if (unlikely(state == TASK_INTERRUPTIBLE &&
158 signal_pending(task))) { 159 signal_pending(task))) {
159 mutex_remove_waiter(lock, &waiter, task->thread_info); 160 mutex_remove_waiter(lock, &waiter, task->thread_info);
160 spin_unlock_mutex(&lock->wait_lock); 161 spin_unlock_mutex(&lock->wait_lock, flags);
161 162
162 debug_mutex_free_waiter(&waiter); 163 debug_mutex_free_waiter(&waiter);
163 return -EINTR; 164 return -EINTR;
@@ -165,9 +166,9 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
165 __set_task_state(task, state); 166 __set_task_state(task, state);
166 167
167 /* didnt get the lock, go to sleep: */ 168 /* didnt get the lock, go to sleep: */
168 spin_unlock_mutex(&lock->wait_lock); 169 spin_unlock_mutex(&lock->wait_lock, flags);
169 schedule(); 170 schedule();
170 spin_lock_mutex(&lock->wait_lock); 171 spin_lock_mutex(&lock->wait_lock, flags);
171 } 172 }
172 173
173 /* got the lock - rejoice! */ 174 /* got the lock - rejoice! */
@@ -178,7 +179,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
178 if (likely(list_empty(&lock->wait_list))) 179 if (likely(list_empty(&lock->wait_list)))
179 atomic_set(&lock->count, 0); 180 atomic_set(&lock->count, 0);
180 181
181 spin_unlock_mutex(&lock->wait_lock); 182 spin_unlock_mutex(&lock->wait_lock, flags);
182 183
183 debug_mutex_free_waiter(&waiter); 184 debug_mutex_free_waiter(&waiter);
184 185
@@ -203,10 +204,11 @@ static fastcall noinline void
203__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) 204__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
204{ 205{
205 struct mutex *lock = container_of(lock_count, struct mutex, count); 206 struct mutex *lock = container_of(lock_count, struct mutex, count);
207 unsigned long flags;
206 208
207 DEBUG_WARN_ON(lock->owner != current_thread_info()); 209 DEBUG_WARN_ON(lock->owner != current_thread_info());
208 210
209 spin_lock_mutex(&lock->wait_lock); 211 spin_lock_mutex(&lock->wait_lock, flags);
210 212
211 /* 213 /*
212 * some architectures leave the lock unlocked in the fastpath failure 214 * some architectures leave the lock unlocked in the fastpath failure
@@ -231,7 +233,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
231 233
232 debug_mutex_clear_owner(lock); 234 debug_mutex_clear_owner(lock);
233 235
234 spin_unlock_mutex(&lock->wait_lock); 236 spin_unlock_mutex(&lock->wait_lock, flags);
235} 237}
236 238
237/* 239/*
@@ -276,9 +278,10 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__)
276static inline int __mutex_trylock_slowpath(atomic_t *lock_count) 278static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
277{ 279{
278 struct mutex *lock = container_of(lock_count, struct mutex, count); 280 struct mutex *lock = container_of(lock_count, struct mutex, count);
281 unsigned long flags;
279 int prev; 282 int prev;
280 283
281 spin_lock_mutex(&lock->wait_lock); 284 spin_lock_mutex(&lock->wait_lock, flags);
282 285
283 prev = atomic_xchg(&lock->count, -1); 286 prev = atomic_xchg(&lock->count, -1);
284 if (likely(prev == 1)) 287 if (likely(prev == 1))
@@ -287,7 +290,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
287 if (likely(list_empty(&lock->wait_list))) 290 if (likely(list_empty(&lock->wait_list)))
288 atomic_set(&lock->count, 0); 291 atomic_set(&lock->count, 0);
289 292
290 spin_unlock_mutex(&lock->wait_lock); 293 spin_unlock_mutex(&lock->wait_lock, flags);
291 294
292 return prev == 1; 295 return prev == 1;
293} 296}
diff --git a/kernel/mutex.h b/kernel/mutex.h
index 00fe84e7b6..0691899472 100644
--- a/kernel/mutex.h
+++ b/kernel/mutex.h
@@ -9,8 +9,10 @@
9 * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs: 9 * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
10 */ 10 */
11 11
12#define spin_lock_mutex(lock) spin_lock(lock) 12#define spin_lock_mutex(lock, flags) \
13#define spin_unlock_mutex(lock) spin_unlock(lock) 13 do { spin_lock(lock); (void)(flags); } while (0)
14#define spin_unlock_mutex(lock, flags) \
15 do { spin_unlock(lock); (void)(flags); } while (0)
14#define mutex_remove_waiter(lock, waiter, ti) \ 16#define mutex_remove_waiter(lock, waiter, ti) \
15 __list_del((waiter)->list.prev, (waiter)->list.next) 17 __list_del((waiter)->list.prev, (waiter)->list.next)
16 18
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index fc311a4673..857b4fa091 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -38,13 +38,22 @@ config PM_DEBUG
38 38
39config PM_TRACE 39config PM_TRACE
40 bool "Suspend/resume event tracing" 40 bool "Suspend/resume event tracing"
41 depends on PM && PM_DEBUG && X86_32 41 depends on PM && PM_DEBUG && X86_32 && EXPERIMENTAL
42 default y 42 default n
43 ---help--- 43 ---help---
44 This enables some cheesy code to save the last PM event point in the 44 This enables some cheesy code to save the last PM event point in the
45 RTC across reboots, so that you can debug a machine that just hangs 45 RTC across reboots, so that you can debug a machine that just hangs
46 during suspend (or more commonly, during resume). 46 during suspend (or more commonly, during resume).
47 47
48 To use this debugging feature you should attempt to suspend the machine,
49 then reboot it, then run
50
51 dmesg -s 1000000 | grep 'hash matches'
52
53 CAUTION: this option will cause your machine's real-time clock to be
54 set to an invalid time after a resume.
55
56
48config SOFTWARE_SUSPEND 57config SOFTWARE_SUSPEND
49 bool "Software Suspend" 58 bool "Software Suspend"
50 depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP) 59 depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)
diff --git a/kernel/profile.c b/kernel/profile.c
index 68afe121e5..5a730fdb1a 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -299,7 +299,7 @@ out:
299} 299}
300 300
301#ifdef CONFIG_HOTPLUG_CPU 301#ifdef CONFIG_HOTPLUG_CPU
302static int profile_cpu_callback(struct notifier_block *info, 302static int __devinit profile_cpu_callback(struct notifier_block *info,
303 unsigned long action, void *__cpu) 303 unsigned long action, void *__cpu)
304{ 304{
305 int node, cpu = (unsigned long)__cpu; 305 int node, cpu = (unsigned long)__cpu;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 921c22ad16..335c5b932e 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -120,8 +120,18 @@ int ptrace_check_attach(struct task_struct *child, int kill)
120 120
121static int may_attach(struct task_struct *task) 121static int may_attach(struct task_struct *task)
122{ 122{
123 if (!task->mm) 123 /* May we inspect the given task?
124 return -EPERM; 124 * This check is used both for attaching with ptrace
125 * and for allowing access to sensitive information in /proc.
126 *
127 * ptrace_attach denies several cases that /proc allows
128 * because setting up the necessary parent/child relationship
129 * or halting the specified task is impossible.
130 */
131 int dumpable = 0;
132 /* Don't let security modules deny introspection */
133 if (task == current)
134 return 0;
125 if (((current->uid != task->euid) || 135 if (((current->uid != task->euid) ||
126 (current->uid != task->suid) || 136 (current->uid != task->suid) ||
127 (current->uid != task->uid) || 137 (current->uid != task->uid) ||
@@ -130,7 +140,9 @@ static int may_attach(struct task_struct *task)
130 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE)) 140 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
131 return -EPERM; 141 return -EPERM;
132 smp_rmb(); 142 smp_rmb();
133 if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE)) 143 if (task->mm)
144 dumpable = task->mm->dumpable;
145 if (!dumpable && !capable(CAP_SYS_PTRACE))
134 return -EPERM; 146 return -EPERM;
135 147
136 return security_ptrace(current, task); 148 return security_ptrace(current, task);
@@ -176,6 +188,8 @@ repeat:
176 goto repeat; 188 goto repeat;
177 } 189 }
178 190
191 if (!task->mm)
192 goto bad;
179 /* the same process cannot be attached many times */ 193 /* the same process cannot be attached many times */
180 if (task->ptrace & PT_PTRACED) 194 if (task->ptrace & PT_PTRACED)
181 goto bad; 195 goto bad;
@@ -200,7 +214,7 @@ out:
200 return retval; 214 return retval;
201} 215}
202 216
203void __ptrace_detach(struct task_struct *child, unsigned int data) 217static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
204{ 218{
205 child->exit_code = data; 219 child->exit_code = data;
206 /* .. re-parent .. */ 220 /* .. re-parent .. */
@@ -219,6 +233,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
219 ptrace_disable(child); 233 ptrace_disable(child);
220 234
221 write_lock_irq(&tasklist_lock); 235 write_lock_irq(&tasklist_lock);
236 /* protect against de_thread()->release_task() */
222 if (child->ptrace) 237 if (child->ptrace)
223 __ptrace_detach(child, data); 238 __ptrace_detach(child, data);
224 write_unlock_irq(&tasklist_lock); 239 write_unlock_irq(&tasklist_lock);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 20e9710fc2..f464f5ae3f 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -182,6 +182,15 @@ long rcu_batches_completed(void)
182 return rcu_ctrlblk.completed; 182 return rcu_ctrlblk.completed;
183} 183}
184 184
185/*
186 * Return the number of RCU batches processed thus far. Useful
187 * for debug and statistics.
188 */
189long rcu_batches_completed_bh(void)
190{
191 return rcu_bh_ctrlblk.completed;
192}
193
185static void rcu_barrier_callback(struct rcu_head *notused) 194static void rcu_barrier_callback(struct rcu_head *notused)
186{ 195{
187 if (atomic_dec_and_test(&rcu_barrier_cpu_count)) 196 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
@@ -539,7 +548,7 @@ static void __devinit rcu_online_cpu(int cpu)
539 tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); 548 tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
540} 549}
541 550
542static int rcu_cpu_notify(struct notifier_block *self, 551static int __devinit rcu_cpu_notify(struct notifier_block *self,
543 unsigned long action, void *hcpu) 552 unsigned long action, void *hcpu)
544{ 553{
545 long cpu = (long)hcpu; 554 long cpu = (long)hcpu;
@@ -556,7 +565,7 @@ static int rcu_cpu_notify(struct notifier_block *self,
556 return NOTIFY_OK; 565 return NOTIFY_OK;
557} 566}
558 567
559static struct notifier_block rcu_nb = { 568static struct notifier_block __devinitdata rcu_nb = {
560 .notifier_call = rcu_cpu_notify, 569 .notifier_call = rcu_cpu_notify,
561}; 570};
562 571
@@ -619,6 +628,7 @@ module_param(qlowmark, int, 0);
619module_param(rsinterval, int, 0); 628module_param(rsinterval, int, 0);
620#endif 629#endif
621EXPORT_SYMBOL_GPL(rcu_batches_completed); 630EXPORT_SYMBOL_GPL(rcu_batches_completed);
631EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
622EXPORT_SYMBOL_GPL(call_rcu); 632EXPORT_SYMBOL_GPL(call_rcu);
623EXPORT_SYMBOL_GPL(call_rcu_bh); 633EXPORT_SYMBOL_GPL(call_rcu_bh);
624EXPORT_SYMBOL_GPL(synchronize_rcu); 634EXPORT_SYMBOL_GPL(synchronize_rcu);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 8154e7589d..4d1c3d2471 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Read-Copy Update /proc-based torture test facility 2 * Read-Copy Update module-based torture test facility
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
@@ -53,6 +53,7 @@ static int stat_interval; /* Interval between stats, in seconds. */
53static int verbose; /* Print more debug info. */ 53static int verbose; /* Print more debug info. */
54static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ 54static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
55static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/ 55static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
56static char *torture_type = "rcu"; /* What to torture. */
56 57
57module_param(nreaders, int, 0); 58module_param(nreaders, int, 0);
58MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); 59MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
@@ -64,13 +65,16 @@ module_param(test_no_idle_hz, bool, 0);
64MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); 65MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
65module_param(shuffle_interval, int, 0); 66module_param(shuffle_interval, int, 0);
66MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); 67MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
67#define TORTURE_FLAG "rcutorture: " 68module_param(torture_type, charp, 0);
69MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh)");
70
71#define TORTURE_FLAG "-torture:"
68#define PRINTK_STRING(s) \ 72#define PRINTK_STRING(s) \
69 do { printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0) 73 do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
70#define VERBOSE_PRINTK_STRING(s) \ 74#define VERBOSE_PRINTK_STRING(s) \
71 do { if (verbose) printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0) 75 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
72#define VERBOSE_PRINTK_ERRSTRING(s) \ 76#define VERBOSE_PRINTK_ERRSTRING(s) \
73 do { if (verbose) printk(KERN_ALERT TORTURE_FLAG "!!! " s "\n"); } while (0) 77 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
74 78
75static char printk_buf[4096]; 79static char printk_buf[4096];
76 80
@@ -139,28 +143,6 @@ rcu_torture_free(struct rcu_torture *p)
139 spin_unlock_bh(&rcu_torture_lock); 143 spin_unlock_bh(&rcu_torture_lock);
140} 144}
141 145
142static void
143rcu_torture_cb(struct rcu_head *p)
144{
145 int i;
146 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
147
148 if (fullstop) {
149 /* Test is ending, just drop callbacks on the floor. */
150 /* The next initialization will pick up the pieces. */
151 return;
152 }
153 i = rp->rtort_pipe_count;
154 if (i > RCU_TORTURE_PIPE_LEN)
155 i = RCU_TORTURE_PIPE_LEN;
156 atomic_inc(&rcu_torture_wcount[i]);
157 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
158 rp->rtort_mbtest = 0;
159 rcu_torture_free(rp);
160 } else
161 call_rcu(p, rcu_torture_cb);
162}
163
164struct rcu_random_state { 146struct rcu_random_state {
165 unsigned long rrs_state; 147 unsigned long rrs_state;
166 unsigned long rrs_count; 148 unsigned long rrs_count;
@@ -191,6 +173,119 @@ rcu_random(struct rcu_random_state *rrsp)
191} 173}
192 174
193/* 175/*
176 * Operations vector for selecting different types of tests.
177 */
178
179struct rcu_torture_ops {
180 void (*init)(void);
181 void (*cleanup)(void);
182 int (*readlock)(void);
183 void (*readunlock)(int idx);
184 int (*completed)(void);
185 void (*deferredfree)(struct rcu_torture *p);
186 int (*stats)(char *page);
187 char *name;
188};
189static struct rcu_torture_ops *cur_ops = NULL;
190
191/*
192 * Definitions for rcu torture testing.
193 */
194
195static int rcu_torture_read_lock(void)
196{
197 rcu_read_lock();
198 return 0;
199}
200
201static void rcu_torture_read_unlock(int idx)
202{
203 rcu_read_unlock();
204}
205
206static int rcu_torture_completed(void)
207{
208 return rcu_batches_completed();
209}
210
211static void
212rcu_torture_cb(struct rcu_head *p)
213{
214 int i;
215 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
216
217 if (fullstop) {
218 /* Test is ending, just drop callbacks on the floor. */
219 /* The next initialization will pick up the pieces. */
220 return;
221 }
222 i = rp->rtort_pipe_count;
223 if (i > RCU_TORTURE_PIPE_LEN)
224 i = RCU_TORTURE_PIPE_LEN;
225 atomic_inc(&rcu_torture_wcount[i]);
226 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
227 rp->rtort_mbtest = 0;
228 rcu_torture_free(rp);
229 } else
230 cur_ops->deferredfree(rp);
231}
232
233static void rcu_torture_deferred_free(struct rcu_torture *p)
234{
235 call_rcu(&p->rtort_rcu, rcu_torture_cb);
236}
237
238static struct rcu_torture_ops rcu_ops = {
239 .init = NULL,
240 .cleanup = NULL,
241 .readlock = rcu_torture_read_lock,
242 .readunlock = rcu_torture_read_unlock,
243 .completed = rcu_torture_completed,
244 .deferredfree = rcu_torture_deferred_free,
245 .stats = NULL,
246 .name = "rcu"
247};
248
249/*
250 * Definitions for rcu_bh torture testing.
251 */
252
253static int rcu_bh_torture_read_lock(void)
254{
255 rcu_read_lock_bh();
256 return 0;
257}
258
259static void rcu_bh_torture_read_unlock(int idx)
260{
261 rcu_read_unlock_bh();
262}
263
264static int rcu_bh_torture_completed(void)
265{
266 return rcu_batches_completed_bh();
267}
268
269static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
270{
271 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
272}
273
274static struct rcu_torture_ops rcu_bh_ops = {
275 .init = NULL,
276 .cleanup = NULL,
277 .readlock = rcu_bh_torture_read_lock,
278 .readunlock = rcu_bh_torture_read_unlock,
279 .completed = rcu_bh_torture_completed,
280 .deferredfree = rcu_bh_torture_deferred_free,
281 .stats = NULL,
282 .name = "rcu_bh"
283};
284
285static struct rcu_torture_ops *torture_ops[] =
286 { &rcu_ops, &rcu_bh_ops, NULL };
287
288/*
194 * RCU torture writer kthread. Repeatedly substitutes a new structure 289 * RCU torture writer kthread. Repeatedly substitutes a new structure
195 * for that pointed to by rcu_torture_current, freeing the old structure 290 * for that pointed to by rcu_torture_current, freeing the old structure
196 * after a series of grace periods (the "pipeline"). 291 * after a series of grace periods (the "pipeline").
@@ -209,8 +304,6 @@ rcu_torture_writer(void *arg)
209 304
210 do { 305 do {
211 schedule_timeout_uninterruptible(1); 306 schedule_timeout_uninterruptible(1);
212 if (rcu_batches_completed() == oldbatch)
213 continue;
214 if ((rp = rcu_torture_alloc()) == NULL) 307 if ((rp = rcu_torture_alloc()) == NULL)
215 continue; 308 continue;
216 rp->rtort_pipe_count = 0; 309 rp->rtort_pipe_count = 0;
@@ -225,10 +318,10 @@ rcu_torture_writer(void *arg)
225 i = RCU_TORTURE_PIPE_LEN; 318 i = RCU_TORTURE_PIPE_LEN;
226 atomic_inc(&rcu_torture_wcount[i]); 319 atomic_inc(&rcu_torture_wcount[i]);
227 old_rp->rtort_pipe_count++; 320 old_rp->rtort_pipe_count++;
228 call_rcu(&old_rp->rtort_rcu, rcu_torture_cb); 321 cur_ops->deferredfree(old_rp);
229 } 322 }
230 rcu_torture_current_version++; 323 rcu_torture_current_version++;
231 oldbatch = rcu_batches_completed(); 324 oldbatch = cur_ops->completed();
232 } while (!kthread_should_stop() && !fullstop); 325 } while (!kthread_should_stop() && !fullstop);
233 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); 326 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
234 while (!kthread_should_stop()) 327 while (!kthread_should_stop())
@@ -246,6 +339,7 @@ static int
246rcu_torture_reader(void *arg) 339rcu_torture_reader(void *arg)
247{ 340{
248 int completed; 341 int completed;
342 int idx;
249 DEFINE_RCU_RANDOM(rand); 343 DEFINE_RCU_RANDOM(rand);
250 struct rcu_torture *p; 344 struct rcu_torture *p;
251 int pipe_count; 345 int pipe_count;
@@ -254,12 +348,12 @@ rcu_torture_reader(void *arg)
254 set_user_nice(current, 19); 348 set_user_nice(current, 19);
255 349
256 do { 350 do {
257 rcu_read_lock(); 351 idx = cur_ops->readlock();
258 completed = rcu_batches_completed(); 352 completed = cur_ops->completed();
259 p = rcu_dereference(rcu_torture_current); 353 p = rcu_dereference(rcu_torture_current);
260 if (p == NULL) { 354 if (p == NULL) {
261 /* Wait for rcu_torture_writer to get underway */ 355 /* Wait for rcu_torture_writer to get underway */
262 rcu_read_unlock(); 356 cur_ops->readunlock(idx);
263 schedule_timeout_interruptible(HZ); 357 schedule_timeout_interruptible(HZ);
264 continue; 358 continue;
265 } 359 }
@@ -273,14 +367,14 @@ rcu_torture_reader(void *arg)
273 pipe_count = RCU_TORTURE_PIPE_LEN; 367 pipe_count = RCU_TORTURE_PIPE_LEN;
274 } 368 }
275 ++__get_cpu_var(rcu_torture_count)[pipe_count]; 369 ++__get_cpu_var(rcu_torture_count)[pipe_count];
276 completed = rcu_batches_completed() - completed; 370 completed = cur_ops->completed() - completed;
277 if (completed > RCU_TORTURE_PIPE_LEN) { 371 if (completed > RCU_TORTURE_PIPE_LEN) {
278 /* Should not happen, but... */ 372 /* Should not happen, but... */
279 completed = RCU_TORTURE_PIPE_LEN; 373 completed = RCU_TORTURE_PIPE_LEN;
280 } 374 }
281 ++__get_cpu_var(rcu_torture_batch)[completed]; 375 ++__get_cpu_var(rcu_torture_batch)[completed];
282 preempt_enable(); 376 preempt_enable();
283 rcu_read_unlock(); 377 cur_ops->readunlock(idx);
284 schedule(); 378 schedule();
285 } while (!kthread_should_stop() && !fullstop); 379 } while (!kthread_should_stop() && !fullstop);
286 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); 380 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
@@ -311,7 +405,7 @@ rcu_torture_printk(char *page)
311 if (pipesummary[i] != 0) 405 if (pipesummary[i] != 0)
312 break; 406 break;
313 } 407 }
314 cnt += sprintf(&page[cnt], "rcutorture: "); 408 cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
315 cnt += sprintf(&page[cnt], 409 cnt += sprintf(&page[cnt],
316 "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d " 410 "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
317 "rtmbe: %d", 411 "rtmbe: %d",
@@ -324,7 +418,7 @@ rcu_torture_printk(char *page)
324 atomic_read(&n_rcu_torture_mberror)); 418 atomic_read(&n_rcu_torture_mberror));
325 if (atomic_read(&n_rcu_torture_mberror) != 0) 419 if (atomic_read(&n_rcu_torture_mberror) != 0)
326 cnt += sprintf(&page[cnt], " !!!"); 420 cnt += sprintf(&page[cnt], " !!!");
327 cnt += sprintf(&page[cnt], "\nrcutorture: "); 421 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
328 if (i > 1) { 422 if (i > 1) {
329 cnt += sprintf(&page[cnt], "!!! "); 423 cnt += sprintf(&page[cnt], "!!! ");
330 atomic_inc(&n_rcu_torture_error); 424 atomic_inc(&n_rcu_torture_error);
@@ -332,17 +426,19 @@ rcu_torture_printk(char *page)
332 cnt += sprintf(&page[cnt], "Reader Pipe: "); 426 cnt += sprintf(&page[cnt], "Reader Pipe: ");
333 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 427 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
334 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]); 428 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
335 cnt += sprintf(&page[cnt], "\nrcutorture: "); 429 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
336 cnt += sprintf(&page[cnt], "Reader Batch: "); 430 cnt += sprintf(&page[cnt], "Reader Batch: ");
337 for (i = 0; i < RCU_TORTURE_PIPE_LEN; i++) 431 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
338 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]); 432 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
339 cnt += sprintf(&page[cnt], "\nrcutorture: "); 433 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
340 cnt += sprintf(&page[cnt], "Free-Block Circulation: "); 434 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
341 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 435 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
342 cnt += sprintf(&page[cnt], " %d", 436 cnt += sprintf(&page[cnt], " %d",
343 atomic_read(&rcu_torture_wcount[i])); 437 atomic_read(&rcu_torture_wcount[i]));
344 } 438 }
345 cnt += sprintf(&page[cnt], "\n"); 439 cnt += sprintf(&page[cnt], "\n");
440 if (cur_ops->stats != NULL)
441 cnt += cur_ops->stats(&page[cnt]);
346 return cnt; 442 return cnt;
347} 443}
348 444
@@ -444,11 +540,11 @@ rcu_torture_shuffle(void *arg)
444static inline void 540static inline void
445rcu_torture_print_module_parms(char *tag) 541rcu_torture_print_module_parms(char *tag)
446{ 542{
447 printk(KERN_ALERT TORTURE_FLAG "--- %s: nreaders=%d " 543 printk(KERN_ALERT "%s" TORTURE_FLAG "--- %s: nreaders=%d "
448 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 544 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
449 "shuffle_interval = %d\n", 545 "shuffle_interval = %d\n",
450 tag, nrealreaders, stat_interval, verbose, test_no_idle_hz, 546 torture_type, tag, nrealreaders, stat_interval, verbose,
451 shuffle_interval); 547 test_no_idle_hz, shuffle_interval);
452} 548}
453 549
454static void 550static void
@@ -493,6 +589,9 @@ rcu_torture_cleanup(void)
493 rcu_barrier(); 589 rcu_barrier();
494 590
495 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 591 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
592
593 if (cur_ops->cleanup != NULL)
594 cur_ops->cleanup();
496 if (atomic_read(&n_rcu_torture_error)) 595 if (atomic_read(&n_rcu_torture_error))
497 rcu_torture_print_module_parms("End of test: FAILURE"); 596 rcu_torture_print_module_parms("End of test: FAILURE");
498 else 597 else
@@ -508,6 +607,20 @@ rcu_torture_init(void)
508 607
509 /* Process args and tell the world that the torturer is on the job. */ 608 /* Process args and tell the world that the torturer is on the job. */
510 609
610 for (i = 0; cur_ops = torture_ops[i], cur_ops != NULL; i++) {
611 cur_ops = torture_ops[i];
612 if (strcmp(torture_type, cur_ops->name) == 0) {
613 break;
614 }
615 }
616 if (cur_ops == NULL) {
617 printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
618 torture_type);
619 return (-EINVAL);
620 }
621 if (cur_ops->init != NULL)
622 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
623
511 if (nreaders >= 0) 624 if (nreaders >= 0)
512 nrealreaders = nreaders; 625 nrealreaders = nreaders;
513 else 626 else
diff --git a/kernel/resource.c b/kernel/resource.c
index e3080fcc66..2404f9b0bc 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -232,6 +232,44 @@ int release_resource(struct resource *old)
232 232
233EXPORT_SYMBOL(release_resource); 233EXPORT_SYMBOL(release_resource);
234 234
235#ifdef CONFIG_MEMORY_HOTPLUG
236/*
237 * Finds the lowest memory reosurce exists within [res->start.res->end)
238 * the caller must specify res->start, res->end, res->flags.
239 * If found, returns 0, res is overwritten, if not found, returns -1.
240 */
241int find_next_system_ram(struct resource *res)
242{
243 resource_size_t start, end;
244 struct resource *p;
245
246 BUG_ON(!res);
247
248 start = res->start;
249 end = res->end;
250
251 read_lock(&resource_lock);
252 for (p = iomem_resource.child; p ; p = p->sibling) {
253 /* system ram is just marked as IORESOURCE_MEM */
254 if (p->flags != res->flags)
255 continue;
256 if (p->start > end) {
257 p = NULL;
258 break;
259 }
260 if (p->start >= start)
261 break;
262 }
263 read_unlock(&resource_lock);
264 if (!p)
265 return -1;
266 /* copy data */
267 res->start = p->start;
268 res->end = p->end;
269 return 0;
270}
271#endif
272
235/* 273/*
236 * Find empty slot in the resource tree given range and alignment. 274 * Find empty slot in the resource tree given range and alignment.
237 */ 275 */
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
new file mode 100644
index 0000000000..4aa8a2c9f4
--- /dev/null
+++ b/kernel/rtmutex-debug.c
@@ -0,0 +1,513 @@
1/*
2 * RT-Mutexes: blocking mutual exclusion locks with PI support
3 *
4 * started by Ingo Molnar and Thomas Gleixner:
5 *
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 *
9 * This code is based on the rt.c implementation in the preempt-rt tree.
10 * Portions of said code are
11 *
12 * Copyright (C) 2004 LynuxWorks, Inc., Igor Manyilov, Bill Huey
13 * Copyright (C) 2006 Esben Nielsen
14 * Copyright (C) 2006 Kihon Technologies Inc.,
15 * Steven Rostedt <rostedt@goodmis.org>
16 *
17 * See rt.c in preempt-rt for proper credits and further information
18 */
19#include <linux/config.h>
20#include <linux/sched.h>
21#include <linux/delay.h>
22#include <linux/module.h>
23#include <linux/spinlock.h>
24#include <linux/kallsyms.h>
25#include <linux/syscalls.h>
26#include <linux/interrupt.h>
27#include <linux/plist.h>
28#include <linux/fs.h>
29
30#include "rtmutex_common.h"
31
32#ifdef CONFIG_DEBUG_RT_MUTEXES
33# include "rtmutex-debug.h"
34#else
35# include "rtmutex.h"
36#endif
37
38# define TRACE_WARN_ON(x) WARN_ON(x)
39# define TRACE_BUG_ON(x) BUG_ON(x)
40
41# define TRACE_OFF() \
42do { \
43 if (rt_trace_on) { \
44 rt_trace_on = 0; \
45 console_verbose(); \
46 if (spin_is_locked(&current->pi_lock)) \
47 spin_unlock(&current->pi_lock); \
48 if (spin_is_locked(&current->held_list_lock)) \
49 spin_unlock(&current->held_list_lock); \
50 } \
51} while (0)
52
53# define TRACE_OFF_NOLOCK() \
54do { \
55 if (rt_trace_on) { \
56 rt_trace_on = 0; \
57 console_verbose(); \
58 } \
59} while (0)
60
61# define TRACE_BUG_LOCKED() \
62do { \
63 TRACE_OFF(); \
64 BUG(); \
65} while (0)
66
67# define TRACE_WARN_ON_LOCKED(c) \
68do { \
69 if (unlikely(c)) { \
70 TRACE_OFF(); \
71 WARN_ON(1); \
72 } \
73} while (0)
74
75# define TRACE_BUG_ON_LOCKED(c) \
76do { \
77 if (unlikely(c)) \
78 TRACE_BUG_LOCKED(); \
79} while (0)
80
81#ifdef CONFIG_SMP
82# define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c)
83#else
84# define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0)
85#endif
86
87/*
88 * deadlock detection flag. We turn it off when we detect
89 * the first problem because we dont want to recurse back
90 * into the tracing code when doing error printk or
91 * executing a BUG():
92 */
93int rt_trace_on = 1;
94
95void deadlock_trace_off(void)
96{
97 rt_trace_on = 0;
98}
99
100static void printk_task(task_t *p)
101{
102 if (p)
103 printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio);
104 else
105 printk("<none>");
106}
107
108static void printk_task_short(task_t *p)
109{
110 if (p)
111 printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio);
112 else
113 printk("<none>");
114}
115
116static void printk_lock(struct rt_mutex *lock, int print_owner)
117{
118 if (lock->name)
119 printk(" [%p] {%s}\n",
120 lock, lock->name);
121 else
122 printk(" [%p] {%s:%d}\n",
123 lock, lock->file, lock->line);
124
125 if (print_owner && rt_mutex_owner(lock)) {
126 printk(".. ->owner: %p\n", lock->owner);
127 printk(".. held by: ");
128 printk_task(rt_mutex_owner(lock));
129 printk("\n");
130 }
131 if (rt_mutex_owner(lock)) {
132 printk("... acquired at: ");
133 print_symbol("%s\n", lock->acquire_ip);
134 }
135}
136
137static void printk_waiter(struct rt_mutex_waiter *w)
138{
139 printk("-------------------------\n");
140 printk("| waiter struct %p:\n", w);
141 printk("| w->list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
142 w->list_entry.plist.prio_list.prev, w->list_entry.plist.prio_list.next,
143 w->list_entry.plist.node_list.prev, w->list_entry.plist.node_list.next,
144 w->list_entry.prio);
145 printk("| w->pi_list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
146 w->pi_list_entry.plist.prio_list.prev, w->pi_list_entry.plist.prio_list.next,
147 w->pi_list_entry.plist.node_list.prev, w->pi_list_entry.plist.node_list.next,
148 w->pi_list_entry.prio);
149 printk("\n| lock:\n");
150 printk_lock(w->lock, 1);
151 printk("| w->ti->task:\n");
152 printk_task(w->task);
153 printk("| blocked at: ");
154 print_symbol("%s\n", w->ip);
155 printk("-------------------------\n");
156}
157
158static void show_task_locks(task_t *p)
159{
160 switch (p->state) {
161 case TASK_RUNNING: printk("R"); break;
162 case TASK_INTERRUPTIBLE: printk("S"); break;
163 case TASK_UNINTERRUPTIBLE: printk("D"); break;
164 case TASK_STOPPED: printk("T"); break;
165 case EXIT_ZOMBIE: printk("Z"); break;
166 case EXIT_DEAD: printk("X"); break;
167 default: printk("?"); break;
168 }
169 printk_task(p);
170 if (p->pi_blocked_on) {
171 struct rt_mutex *lock = p->pi_blocked_on->lock;
172
173 printk(" blocked on:");
174 printk_lock(lock, 1);
175 } else
176 printk(" (not blocked)\n");
177}
178
179void rt_mutex_show_held_locks(task_t *task, int verbose)
180{
181 struct list_head *curr, *cursor = NULL;
182 struct rt_mutex *lock;
183 task_t *t;
184 unsigned long flags;
185 int count = 0;
186
187 if (!rt_trace_on)
188 return;
189
190 if (verbose) {
191 printk("------------------------------\n");
192 printk("| showing all locks held by: | (");
193 printk_task_short(task);
194 printk("):\n");
195 printk("------------------------------\n");
196 }
197
198next:
199 spin_lock_irqsave(&task->held_list_lock, flags);
200 list_for_each(curr, &task->held_list_head) {
201 if (cursor && curr != cursor)
202 continue;
203 lock = list_entry(curr, struct rt_mutex, held_list_entry);
204 t = rt_mutex_owner(lock);
205 WARN_ON(t != task);
206 count++;
207 cursor = curr->next;
208 spin_unlock_irqrestore(&task->held_list_lock, flags);
209
210 printk("\n#%03d: ", count);
211 printk_lock(lock, 0);
212 goto next;
213 }
214 spin_unlock_irqrestore(&task->held_list_lock, flags);
215
216 printk("\n");
217}
218
219void rt_mutex_show_all_locks(void)
220{
221 task_t *g, *p;
222 int count = 10;
223 int unlock = 1;
224
225 printk("\n");
226 printk("----------------------\n");
227 printk("| showing all tasks: |\n");
228 printk("----------------------\n");
229
230 /*
231 * Here we try to get the tasklist_lock as hard as possible,
232 * if not successful after 2 seconds we ignore it (but keep
233 * trying). This is to enable a debug printout even if a
234 * tasklist_lock-holding task deadlocks or crashes.
235 */
236retry:
237 if (!read_trylock(&tasklist_lock)) {
238 if (count == 10)
239 printk("hm, tasklist_lock locked, retrying... ");
240 if (count) {
241 count--;
242 printk(" #%d", 10-count);
243 mdelay(200);
244 goto retry;
245 }
246 printk(" ignoring it.\n");
247 unlock = 0;
248 }
249 if (count != 10)
250 printk(" locked it.\n");
251
252 do_each_thread(g, p) {
253 show_task_locks(p);
254 if (!unlock)
255 if (read_trylock(&tasklist_lock))
256 unlock = 1;
257 } while_each_thread(g, p);
258
259 printk("\n");
260
261 printk("-----------------------------------------\n");
262 printk("| showing all locks held in the system: |\n");
263 printk("-----------------------------------------\n");
264
265 do_each_thread(g, p) {
266 rt_mutex_show_held_locks(p, 0);
267 if (!unlock)
268 if (read_trylock(&tasklist_lock))
269 unlock = 1;
270 } while_each_thread(g, p);
271
272
273 printk("=============================================\n\n");
274
275 if (unlock)
276 read_unlock(&tasklist_lock);
277}
278
279void rt_mutex_debug_check_no_locks_held(task_t *task)
280{
281 struct rt_mutex_waiter *w;
282 struct list_head *curr;
283 struct rt_mutex *lock;
284
285 if (!rt_trace_on)
286 return;
287 if (!rt_prio(task->normal_prio) && rt_prio(task->prio)) {
288 printk("BUG: PI priority boost leaked!\n");
289 printk_task(task);
290 printk("\n");
291 }
292 if (list_empty(&task->held_list_head))
293 return;
294
295 spin_lock(&task->pi_lock);
296 plist_for_each_entry(w, &task->pi_waiters, pi_list_entry) {
297 TRACE_OFF();
298
299 printk("hm, PI interest held at exit time? Task:\n");
300 printk_task(task);
301 printk_waiter(w);
302 return;
303 }
304 spin_unlock(&task->pi_lock);
305
306 list_for_each(curr, &task->held_list_head) {
307 lock = list_entry(curr, struct rt_mutex, held_list_entry);
308
309 printk("BUG: %s/%d, lock held at task exit time!\n",
310 task->comm, task->pid);
311 printk_lock(lock, 1);
312 if (rt_mutex_owner(lock) != task)
313 printk("exiting task is not even the owner??\n");
314 }
315}
316
317int rt_mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
318{
319 const void *to = from + len;
320 struct list_head *curr;
321 struct rt_mutex *lock;
322 unsigned long flags;
323 void *lock_addr;
324
325 if (!rt_trace_on)
326 return 0;
327
328 spin_lock_irqsave(&current->held_list_lock, flags);
329 list_for_each(curr, &current->held_list_head) {
330 lock = list_entry(curr, struct rt_mutex, held_list_entry);
331 lock_addr = lock;
332 if (lock_addr < from || lock_addr >= to)
333 continue;
334 TRACE_OFF();
335
336 printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
337 current->comm, current->pid, lock, from, to);
338 dump_stack();
339 printk_lock(lock, 1);
340 if (rt_mutex_owner(lock) != current)
341 printk("freeing task is not even the owner??\n");
342 return 1;
343 }
344 spin_unlock_irqrestore(&current->held_list_lock, flags);
345
346 return 0;
347}
348
349void rt_mutex_debug_task_free(struct task_struct *task)
350{
351 WARN_ON(!plist_head_empty(&task->pi_waiters));
352 WARN_ON(task->pi_blocked_on);
353}
354
355/*
356 * We fill out the fields in the waiter to store the information about
357 * the deadlock. We print when we return. act_waiter can be NULL in
358 * case of a remove waiter operation.
359 */
360void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter,
361 struct rt_mutex *lock)
362{
363 struct task_struct *task;
364
365 if (!rt_trace_on || detect || !act_waiter)
366 return;
367
368 task = rt_mutex_owner(act_waiter->lock);
369 if (task && task != current) {
370 act_waiter->deadlock_task_pid = task->pid;
371 act_waiter->deadlock_lock = lock;
372 }
373}
374
375void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
376{
377 struct task_struct *task;
378
379 if (!waiter->deadlock_lock || !rt_trace_on)
380 return;
381
382 task = find_task_by_pid(waiter->deadlock_task_pid);
383 if (!task)
384 return;
385
386 TRACE_OFF_NOLOCK();
387
388 printk("\n============================================\n");
389 printk( "[ BUG: circular locking deadlock detected! ]\n");
390 printk( "--------------------------------------------\n");
391 printk("%s/%d is deadlocking current task %s/%d\n\n",
392 task->comm, task->pid, current->comm, current->pid);
393
394 printk("\n1) %s/%d is trying to acquire this lock:\n",
395 current->comm, current->pid);
396 printk_lock(waiter->lock, 1);
397
398 printk("... trying at: ");
399 print_symbol("%s\n", waiter->ip);
400
401 printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid);
402 printk_lock(waiter->deadlock_lock, 1);
403
404 rt_mutex_show_held_locks(current, 1);
405 rt_mutex_show_held_locks(task, 1);
406
407 printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid);
408 show_stack(task, NULL);
409 printk("\n%s/%d's [current] stackdump:\n\n",
410 current->comm, current->pid);
411 dump_stack();
412 rt_mutex_show_all_locks();
413 printk("[ turning off deadlock detection."
414 "Please report this trace. ]\n\n");
415 local_irq_disable();
416}
417
418void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__)
419{
420 unsigned long flags;
421
422 if (rt_trace_on) {
423 TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
424
425 spin_lock_irqsave(&current->held_list_lock, flags);
426 list_add_tail(&lock->held_list_entry, &current->held_list_head);
427 spin_unlock_irqrestore(&current->held_list_lock, flags);
428
429 lock->acquire_ip = ip;
430 }
431}
432
433void debug_rt_mutex_unlock(struct rt_mutex *lock)
434{
435 unsigned long flags;
436
437 if (rt_trace_on) {
438 TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
439 TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
440
441 spin_lock_irqsave(&current->held_list_lock, flags);
442 list_del_init(&lock->held_list_entry);
443 spin_unlock_irqrestore(&current->held_list_lock, flags);
444 }
445}
446
447void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
448 struct task_struct *powner __IP_DECL__)
449{
450 unsigned long flags;
451
452 if (rt_trace_on) {
453 TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
454
455 spin_lock_irqsave(&powner->held_list_lock, flags);
456 list_add_tail(&lock->held_list_entry, &powner->held_list_head);
457 spin_unlock_irqrestore(&powner->held_list_lock, flags);
458
459 lock->acquire_ip = ip;
460 }
461}
462
463void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
464{
465 unsigned long flags;
466
467 if (rt_trace_on) {
468 struct task_struct *owner = rt_mutex_owner(lock);
469
470 TRACE_WARN_ON_LOCKED(!owner);
471 TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
472
473 spin_lock_irqsave(&owner->held_list_lock, flags);
474 list_del_init(&lock->held_list_entry);
475 spin_unlock_irqrestore(&owner->held_list_lock, flags);
476 }
477}
478
479void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
480{
481 memset(waiter, 0x11, sizeof(*waiter));
482 plist_node_init(&waiter->list_entry, MAX_PRIO);
483 plist_node_init(&waiter->pi_list_entry, MAX_PRIO);
484}
485
486void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
487{
488 TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry));
489 TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
490 TRACE_WARN_ON(waiter->task);
491 memset(waiter, 0x22, sizeof(*waiter));
492}
493
494void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
495{
496 void *addr = lock;
497
498 if (rt_trace_on) {
499 rt_mutex_debug_check_no_locks_freed(addr,
500 sizeof(struct rt_mutex));
501 INIT_LIST_HEAD(&lock->held_list_entry);
502 lock->name = name;
503 }
504}
505
506void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task)
507{
508}
509
510void rt_mutex_deadlock_account_unlock(struct task_struct *task)
511{
512}
513
diff --git a/kernel/rtmutex-debug.h b/kernel/rtmutex-debug.h
new file mode 100644
index 0000000000..7612fbc62d
--- /dev/null
+++ b/kernel/rtmutex-debug.h
@@ -0,0 +1,37 @@
1/*
2 * RT-Mutexes: blocking mutual exclusion locks with PI support
3 *
4 * started by Ingo Molnar and Thomas Gleixner:
5 *
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 *
9 * This file contains macros used solely by rtmutex.c. Debug version.
10 */
11
12#define __IP_DECL__ , unsigned long ip
13#define __IP__ , ip
14#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
15
16extern void
17rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task);
18extern void rt_mutex_deadlock_account_unlock(struct task_struct *task);
19extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
20extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
21extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
22extern void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__);
23extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
24extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
25 struct task_struct *powner __IP_DECL__);
26extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
27extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter,
28 struct rt_mutex *lock);
29extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter);
30# define debug_rt_mutex_reset_waiter(w) \
31 do { (w)->deadlock_lock = NULL; } while (0)
32
33static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
34 int detect)
35{
36 return (waiter != NULL);
37}
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
new file mode 100644
index 0000000000..e82c2f8482
--- /dev/null
+++ b/kernel/rtmutex-tester.c
@@ -0,0 +1,440 @@
1/*
2 * RT-Mutex-tester: scriptable tester for rt mutexes
3 *
4 * started by Thomas Gleixner:
5 *
6 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
7 *
8 */
9#include <linux/config.h>
10#include <linux/kthread.h>
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/smp_lock.h>
14#include <linux/spinlock.h>
15#include <linux/sysdev.h>
16#include <linux/timer.h>
17
18#include "rtmutex.h"
19
20#define MAX_RT_TEST_THREADS 8
21#define MAX_RT_TEST_MUTEXES 8
22
23static spinlock_t rttest_lock;
24static atomic_t rttest_event;
25
26struct test_thread_data {
27 int opcode;
28 int opdata;
29 int mutexes[MAX_RT_TEST_MUTEXES];
30 int bkl;
31 int event;
32 struct sys_device sysdev;
33};
34
35static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
36static task_t *threads[MAX_RT_TEST_THREADS];
37static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
38
39enum test_opcodes {
40 RTTEST_NOP = 0,
41 RTTEST_SCHEDOT, /* 1 Sched other, data = nice */
42 RTTEST_SCHEDRT, /* 2 Sched fifo, data = prio */
43 RTTEST_LOCK, /* 3 Lock uninterruptible, data = lockindex */
44 RTTEST_LOCKNOWAIT, /* 4 Lock uninterruptible no wait in wakeup, data = lockindex */
45 RTTEST_LOCKINT, /* 5 Lock interruptible, data = lockindex */
46 RTTEST_LOCKINTNOWAIT, /* 6 Lock interruptible no wait in wakeup, data = lockindex */
47 RTTEST_LOCKCONT, /* 7 Continue locking after the wakeup delay */
48 RTTEST_UNLOCK, /* 8 Unlock, data = lockindex */
49 RTTEST_LOCKBKL, /* 9 Lock BKL */
50 RTTEST_UNLOCKBKL, /* 10 Unlock BKL */
51 RTTEST_SIGNAL, /* 11 Signal other test thread, data = thread id */
52 RTTEST_RESETEVENT = 98, /* 98 Reset event counter */
53 RTTEST_RESET = 99, /* 99 Reset all pending operations */
54};
55
56static int handle_op(struct test_thread_data *td, int lockwakeup)
57{
58 int i, id, ret = -EINVAL;
59
60 switch(td->opcode) {
61
62 case RTTEST_NOP:
63 return 0;
64
65 case RTTEST_LOCKCONT:
66 td->mutexes[td->opdata] = 1;
67 td->event = atomic_add_return(1, &rttest_event);
68 return 0;
69
70 case RTTEST_RESET:
71 for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
72 if (td->mutexes[i] == 4) {
73 rt_mutex_unlock(&mutexes[i]);
74 td->mutexes[i] = 0;
75 }
76 }
77
78 if (!lockwakeup && td->bkl == 4) {
79 unlock_kernel();
80 td->bkl = 0;
81 }
82 return 0;
83
84 case RTTEST_RESETEVENT:
85 atomic_set(&rttest_event, 0);
86 return 0;
87
88 default:
89 if (lockwakeup)
90 return ret;
91 }
92
93 switch(td->opcode) {
94
95 case RTTEST_LOCK:
96 case RTTEST_LOCKNOWAIT:
97 id = td->opdata;
98 if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
99 return ret;
100
101 td->mutexes[id] = 1;
102 td->event = atomic_add_return(1, &rttest_event);
103 rt_mutex_lock(&mutexes[id]);
104 td->event = atomic_add_return(1, &rttest_event);
105 td->mutexes[id] = 4;
106 return 0;
107
108 case RTTEST_LOCKINT:
109 case RTTEST_LOCKINTNOWAIT:
110 id = td->opdata;
111 if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
112 return ret;
113
114 td->mutexes[id] = 1;
115 td->event = atomic_add_return(1, &rttest_event);
116 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
117 td->event = atomic_add_return(1, &rttest_event);
118 td->mutexes[id] = ret ? 0 : 4;
119 return ret ? -EINTR : 0;
120
121 case RTTEST_UNLOCK:
122 id = td->opdata;
123 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
124 return ret;
125
126 td->event = atomic_add_return(1, &rttest_event);
127 rt_mutex_unlock(&mutexes[id]);
128 td->event = atomic_add_return(1, &rttest_event);
129 td->mutexes[id] = 0;
130 return 0;
131
132 case RTTEST_LOCKBKL:
133 if (td->bkl)
134 return 0;
135 td->bkl = 1;
136 lock_kernel();
137 td->bkl = 4;
138 return 0;
139
140 case RTTEST_UNLOCKBKL:
141 if (td->bkl != 4)
142 break;
143 unlock_kernel();
144 td->bkl = 0;
145 return 0;
146
147 default:
148 break;
149 }
150 return ret;
151}
152
153/*
154 * Schedule replacement for rtsem_down(). Only called for threads with
155 * PF_MUTEX_TESTER set.
156 *
157 * This allows us to have finegrained control over the event flow.
158 *
159 */
160void schedule_rt_mutex_test(struct rt_mutex *mutex)
161{
162 int tid, op, dat;
163 struct test_thread_data *td;
164
165 /* We have to lookup the task */
166 for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) {
167 if (threads[tid] == current)
168 break;
169 }
170
171 BUG_ON(tid == MAX_RT_TEST_THREADS);
172
173 td = &thread_data[tid];
174
175 op = td->opcode;
176 dat = td->opdata;
177
178 switch (op) {
179 case RTTEST_LOCK:
180 case RTTEST_LOCKINT:
181 case RTTEST_LOCKNOWAIT:
182 case RTTEST_LOCKINTNOWAIT:
183 if (mutex != &mutexes[dat])
184 break;
185
186 if (td->mutexes[dat] != 1)
187 break;
188
189 td->mutexes[dat] = 2;
190 td->event = atomic_add_return(1, &rttest_event);
191 break;
192
193 case RTTEST_LOCKBKL:
194 default:
195 break;
196 }
197
198 schedule();
199
200
201 switch (op) {
202 case RTTEST_LOCK:
203 case RTTEST_LOCKINT:
204 if (mutex != &mutexes[dat])
205 return;
206
207 if (td->mutexes[dat] != 2)
208 return;
209
210 td->mutexes[dat] = 3;
211 td->event = atomic_add_return(1, &rttest_event);
212 break;
213
214 case RTTEST_LOCKNOWAIT:
215 case RTTEST_LOCKINTNOWAIT:
216 if (mutex != &mutexes[dat])
217 return;
218
219 if (td->mutexes[dat] != 2)
220 return;
221
222 td->mutexes[dat] = 1;
223 td->event = atomic_add_return(1, &rttest_event);
224 return;
225
226 case RTTEST_LOCKBKL:
227 return;
228 default:
229 return;
230 }
231
232 td->opcode = 0;
233
234 for (;;) {
235 set_current_state(TASK_INTERRUPTIBLE);
236
237 if (td->opcode > 0) {
238 int ret;
239
240 set_current_state(TASK_RUNNING);
241 ret = handle_op(td, 1);
242 set_current_state(TASK_INTERRUPTIBLE);
243 if (td->opcode == RTTEST_LOCKCONT)
244 break;
245 td->opcode = ret;
246 }
247
248 /* Wait for the next command to be executed */
249 schedule();
250 }
251
252 /* Restore previous command and data */
253 td->opcode = op;
254 td->opdata = dat;
255}
256
257static int test_func(void *data)
258{
259 struct test_thread_data *td = data;
260 int ret;
261
262 current->flags |= PF_MUTEX_TESTER;
263 allow_signal(SIGHUP);
264
265 for(;;) {
266
267 set_current_state(TASK_INTERRUPTIBLE);
268
269 if (td->opcode > 0) {
270 set_current_state(TASK_RUNNING);
271 ret = handle_op(td, 0);
272 set_current_state(TASK_INTERRUPTIBLE);
273 td->opcode = ret;
274 }
275
276 /* Wait for the next command to be executed */
277 schedule();
278
279 if (signal_pending(current))
280 flush_signals(current);
281
282 if(kthread_should_stop())
283 break;
284 }
285 return 0;
286}
287
288/**
289 * sysfs_test_command - interface for test commands
290 * @dev: thread reference
291 * @buf: command for actual step
292 * @count: length of buffer
293 *
294 * command syntax:
295 *
296 * opcode:data
297 */
298static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf,
299 size_t count)
300{
301 struct sched_param schedpar;
302 struct test_thread_data *td;
303 char cmdbuf[32];
304 int op, dat, tid, ret;
305
306 td = container_of(dev, struct test_thread_data, sysdev);
307 tid = td->sysdev.id;
308
309 /* strings from sysfs write are not 0 terminated! */
310 if (count >= sizeof(cmdbuf))
311 return -EINVAL;
312
313 /* strip of \n: */
314 if (buf[count-1] == '\n')
315 count--;
316 if (count < 1)
317 return -EINVAL;
318
319 memcpy(cmdbuf, buf, count);
320 cmdbuf[count] = 0;
321
322 if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2)
323 return -EINVAL;
324
325 switch (op) {
326 case RTTEST_SCHEDOT:
327 schedpar.sched_priority = 0;
328 ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar);
329 if (ret)
330 return ret;
331 set_user_nice(current, 0);
332 break;
333
334 case RTTEST_SCHEDRT:
335 schedpar.sched_priority = dat;
336 ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar);
337 if (ret)
338 return ret;
339 break;
340
341 case RTTEST_SIGNAL:
342 send_sig(SIGHUP, threads[tid], 0);
343 break;
344
345 default:
346 if (td->opcode > 0)
347 return -EBUSY;
348 td->opdata = dat;
349 td->opcode = op;
350 wake_up_process(threads[tid]);
351 }
352
353 return count;
354}
355
356/**
357 * sysfs_test_status - sysfs interface for rt tester
358 * @dev: thread to query
359 * @buf: char buffer to be filled with thread status info
360 */
361static ssize_t sysfs_test_status(struct sys_device *dev, char *buf)
362{
363 struct test_thread_data *td;
364 char *curr = buf;
365 task_t *tsk;
366 int i;
367
368 td = container_of(dev, struct test_thread_data, sysdev);
369 tsk = threads[td->sysdev.id];
370
371 spin_lock(&rttest_lock);
372
373 curr += sprintf(curr,
374 "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, K: %d, M:",
375 td->opcode, td->event, tsk->state,
376 (MAX_RT_PRIO - 1) - tsk->prio,
377 (MAX_RT_PRIO - 1) - tsk->normal_prio,
378 tsk->pi_blocked_on, td->bkl);
379
380 for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
381 curr += sprintf(curr, "%d", td->mutexes[i]);
382
383 spin_unlock(&rttest_lock);
384
385 curr += sprintf(curr, ", T: %p, R: %p\n", tsk,
386 mutexes[td->sysdev.id].owner);
387
388 return curr - buf;
389}
390
391static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL);
392static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command);
393
394static struct sysdev_class rttest_sysclass = {
395 set_kset_name("rttest"),
396};
397
398static int init_test_thread(int id)
399{
400 thread_data[id].sysdev.cls = &rttest_sysclass;
401 thread_data[id].sysdev.id = id;
402
403 threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id);
404 if (IS_ERR(threads[id]))
405 return PTR_ERR(threads[id]);
406
407 return sysdev_register(&thread_data[id].sysdev);
408}
409
410static int init_rttest(void)
411{
412 int ret, i;
413
414 spin_lock_init(&rttest_lock);
415
416 for (i = 0; i < MAX_RT_TEST_MUTEXES; i++)
417 rt_mutex_init(&mutexes[i]);
418
419 ret = sysdev_class_register(&rttest_sysclass);
420 if (ret)
421 return ret;
422
423 for (i = 0; i < MAX_RT_TEST_THREADS; i++) {
424 ret = init_test_thread(i);
425 if (ret)
426 break;
427 ret = sysdev_create_file(&thread_data[i].sysdev, &attr_status);
428 if (ret)
429 break;
430 ret = sysdev_create_file(&thread_data[i].sysdev, &attr_command);
431 if (ret)
432 break;
433 }
434
435 printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" );
436
437 return ret;
438}
439
440device_initcall(init_rttest);
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
new file mode 100644
index 0000000000..45d61016da
--- /dev/null
+++ b/kernel/rtmutex.c
@@ -0,0 +1,990 @@
1/*
2 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3 *
4 * started by Ingo Molnar and Thomas Gleixner.
5 *
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9 * Copyright (C) 2006 Esben Nielsen
10 */
11#include <linux/spinlock.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/timer.h>
15
16#include "rtmutex_common.h"
17
18#ifdef CONFIG_DEBUG_RT_MUTEXES
19# include "rtmutex-debug.h"
20#else
21# include "rtmutex.h"
22#endif
23
24/*
25 * lock->owner state tracking:
26 *
27 * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
28 * are used to keep track of the "owner is pending" and "lock has
29 * waiters" state.
30 *
31 * owner bit1 bit0
32 * NULL 0 0 lock is free (fast acquire possible)
33 * NULL 0 1 invalid state
34 * NULL 1 0 Transitional State*
35 * NULL 1 1 invalid state
36 * taskpointer 0 0 lock is held (fast release possible)
37 * taskpointer 0 1 task is pending owner
38 * taskpointer 1 0 lock is held and has waiters
39 * taskpointer 1 1 task is pending owner and lock has more waiters
40 *
41 * Pending ownership is assigned to the top (highest priority)
42 * waiter of the lock, when the lock is released. The thread is woken
43 * up and can now take the lock. Until the lock is taken (bit 0
44 * cleared) a competing higher priority thread can steal the lock
45 * which puts the woken up thread back on the waiters list.
46 *
47 * The fast atomic compare exchange based acquire and release is only
48 * possible when bit 0 and 1 of lock->owner are 0.
49 *
50 * (*) There's a small time where the owner can be NULL and the
51 * "lock has waiters" bit is set. This can happen when grabbing the lock.
52 * To prevent a cmpxchg of the owner releasing the lock, we need to set this
53 * bit before looking at the lock, hence the reason this is a transitional
54 * state.
55 */
56
57static void
58rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
59 unsigned long mask)
60{
61 unsigned long val = (unsigned long)owner | mask;
62
63 if (rt_mutex_has_waiters(lock))
64 val |= RT_MUTEX_HAS_WAITERS;
65
66 lock->owner = (struct task_struct *)val;
67}
68
69static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
70{
71 lock->owner = (struct task_struct *)
72 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
73}
74
75static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
76{
77 if (!rt_mutex_has_waiters(lock))
78 clear_rt_mutex_waiters(lock);
79}
80
81/*
82 * We can speed up the acquire/release, if the architecture
83 * supports cmpxchg and if there's no debugging state to be set up
84 */
85#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
86# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
87static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
88{
89 unsigned long owner, *p = (unsigned long *) &lock->owner;
90
91 do {
92 owner = *p;
93 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
94}
95#else
96# define rt_mutex_cmpxchg(l,c,n) (0)
97static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
98{
99 lock->owner = (struct task_struct *)
100 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
101}
102#endif
103
104/*
105 * Calculate task priority from the waiter list priority
106 *
107 * Return task->normal_prio when the waiter list is empty or when
108 * the waiter is not allowed to do priority boosting
109 */
110int rt_mutex_getprio(struct task_struct *task)
111{
112 if (likely(!task_has_pi_waiters(task)))
113 return task->normal_prio;
114
115 return min(task_top_pi_waiter(task)->pi_list_entry.prio,
116 task->normal_prio);
117}
118
119/*
120 * Adjust the priority of a task, after its pi_waiters got modified.
121 *
122 * This can be both boosting and unboosting. task->pi_lock must be held.
123 */
124static void __rt_mutex_adjust_prio(struct task_struct *task)
125{
126 int prio = rt_mutex_getprio(task);
127
128 if (task->prio != prio)
129 rt_mutex_setprio(task, prio);
130}
131
132/*
133 * Adjust task priority (undo boosting). Called from the exit path of
134 * rt_mutex_slowunlock() and rt_mutex_slowlock().
135 *
136 * (Note: We do this outside of the protection of lock->wait_lock to
137 * allow the lock to be taken while or before we readjust the priority
138 * of task. We do not use the spin_xx_mutex() variants here as we are
139 * outside of the debug path.)
140 */
141static void rt_mutex_adjust_prio(struct task_struct *task)
142{
143 unsigned long flags;
144
145 spin_lock_irqsave(&task->pi_lock, flags);
146 __rt_mutex_adjust_prio(task);
147 spin_unlock_irqrestore(&task->pi_lock, flags);
148}
149
150/*
151 * Max number of times we'll walk the boosting chain:
152 */
153int max_lock_depth = 1024;
154
155/*
156 * Adjust the priority chain. Also used for deadlock detection.
157 * Decreases task's usage by one - may thus free the task.
158 * Returns 0 or -EDEADLK.
159 */
160static int rt_mutex_adjust_prio_chain(task_t *task,
161 int deadlock_detect,
162 struct rt_mutex *orig_lock,
163 struct rt_mutex_waiter *orig_waiter,
164 struct task_struct *top_task
165 __IP_DECL__)
166{
167 struct rt_mutex *lock;
168 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
169 int detect_deadlock, ret = 0, depth = 0;
170 unsigned long flags;
171
172 detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
173 deadlock_detect);
174
175 /*
176 * The (de)boosting is a step by step approach with a lot of
177 * pitfalls. We want this to be preemptible and we want hold a
178 * maximum of two locks per step. So we have to check
179 * carefully whether things change under us.
180 */
181 again:
182 if (++depth > max_lock_depth) {
183 static int prev_max;
184
185 /*
186 * Print this only once. If the admin changes the limit,
187 * print a new message when reaching the limit again.
188 */
189 if (prev_max != max_lock_depth) {
190 prev_max = max_lock_depth;
191 printk(KERN_WARNING "Maximum lock depth %d reached "
192 "task: %s (%d)\n", max_lock_depth,
193 top_task->comm, top_task->pid);
194 }
195 put_task_struct(task);
196
197 return deadlock_detect ? -EDEADLK : 0;
198 }
199 retry:
200 /*
201 * Task can not go away as we did a get_task() before !
202 */
203 spin_lock_irqsave(&task->pi_lock, flags);
204
205 waiter = task->pi_blocked_on;
206 /*
207 * Check whether the end of the boosting chain has been
208 * reached or the state of the chain has changed while we
209 * dropped the locks.
210 */
211 if (!waiter || !waiter->task)
212 goto out_unlock_pi;
213
214 if (top_waiter && (!task_has_pi_waiters(task) ||
215 top_waiter != task_top_pi_waiter(task)))
216 goto out_unlock_pi;
217
218 /*
219 * When deadlock detection is off then we check, if further
220 * priority adjustment is necessary.
221 */
222 if (!detect_deadlock && waiter->list_entry.prio == task->prio)
223 goto out_unlock_pi;
224
225 lock = waiter->lock;
226 if (!spin_trylock(&lock->wait_lock)) {
227 spin_unlock_irqrestore(&task->pi_lock, flags);
228 cpu_relax();
229 goto retry;
230 }
231
232 /* Deadlock detection */
233 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
234 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
235 spin_unlock(&lock->wait_lock);
236 ret = deadlock_detect ? -EDEADLK : 0;
237 goto out_unlock_pi;
238 }
239
240 top_waiter = rt_mutex_top_waiter(lock);
241
242 /* Requeue the waiter */
243 plist_del(&waiter->list_entry, &lock->wait_list);
244 waiter->list_entry.prio = task->prio;
245 plist_add(&waiter->list_entry, &lock->wait_list);
246
247 /* Release the task */
248 spin_unlock_irqrestore(&task->pi_lock, flags);
249 put_task_struct(task);
250
251 /* Grab the next task */
252 task = rt_mutex_owner(lock);
253 spin_lock_irqsave(&task->pi_lock, flags);
254
255 if (waiter == rt_mutex_top_waiter(lock)) {
256 /* Boost the owner */
257 plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
258 waiter->pi_list_entry.prio = waiter->list_entry.prio;
259 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
260 __rt_mutex_adjust_prio(task);
261
262 } else if (top_waiter == waiter) {
263 /* Deboost the owner */
264 plist_del(&waiter->pi_list_entry, &task->pi_waiters);
265 waiter = rt_mutex_top_waiter(lock);
266 waiter->pi_list_entry.prio = waiter->list_entry.prio;
267 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
268 __rt_mutex_adjust_prio(task);
269 }
270
271 get_task_struct(task);
272 spin_unlock_irqrestore(&task->pi_lock, flags);
273
274 top_waiter = rt_mutex_top_waiter(lock);
275 spin_unlock(&lock->wait_lock);
276
277 if (!detect_deadlock && waiter != top_waiter)
278 goto out_put_task;
279
280 goto again;
281
282 out_unlock_pi:
283 spin_unlock_irqrestore(&task->pi_lock, flags);
284 out_put_task:
285 put_task_struct(task);
286 return ret;
287}
288
289/*
290 * Optimization: check if we can steal the lock from the
291 * assigned pending owner [which might not have taken the
292 * lock yet]:
293 */
294static inline int try_to_steal_lock(struct rt_mutex *lock)
295{
296 struct task_struct *pendowner = rt_mutex_owner(lock);
297 struct rt_mutex_waiter *next;
298 unsigned long flags;
299
300 if (!rt_mutex_owner_pending(lock))
301 return 0;
302
303 if (pendowner == current)
304 return 1;
305
306 spin_lock_irqsave(&pendowner->pi_lock, flags);
307 if (current->prio >= pendowner->prio) {
308 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
309 return 0;
310 }
311
312 /*
313 * Check if a waiter is enqueued on the pending owners
314 * pi_waiters list. Remove it and readjust pending owners
315 * priority.
316 */
317 if (likely(!rt_mutex_has_waiters(lock))) {
318 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
319 return 1;
320 }
321
322 /* No chain handling, pending owner is not blocked on anything: */
323 next = rt_mutex_top_waiter(lock);
324 plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
325 __rt_mutex_adjust_prio(pendowner);
326 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
327
328 /*
329 * We are going to steal the lock and a waiter was
330 * enqueued on the pending owners pi_waiters queue. So
331 * we have to enqueue this waiter into
332 * current->pi_waiters list. This covers the case,
333 * where current is boosted because it holds another
334 * lock and gets unboosted because the booster is
335 * interrupted, so we would delay a waiter with higher
336 * priority as current->normal_prio.
337 *
338 * Note: in the rare case of a SCHED_OTHER task changing
339 * its priority and thus stealing the lock, next->task
340 * might be current:
341 */
342 if (likely(next->task != current)) {
343 spin_lock_irqsave(&current->pi_lock, flags);
344 plist_add(&next->pi_list_entry, &current->pi_waiters);
345 __rt_mutex_adjust_prio(current);
346 spin_unlock_irqrestore(&current->pi_lock, flags);
347 }
348 return 1;
349}
350
351/*
352 * Try to take an rt-mutex
353 *
354 * This fails
355 * - when the lock has a real owner
356 * - when a different pending owner exists and has higher priority than current
357 *
358 * Must be called with lock->wait_lock held.
359 */
360static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__)
361{
362 /*
363 * We have to be careful here if the atomic speedups are
364 * enabled, such that, when
365 * - no other waiter is on the lock
366 * - the lock has been released since we did the cmpxchg
367 * the lock can be released or taken while we are doing the
368 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
369 *
370 * The atomic acquire/release aware variant of
371 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
372 * the WAITERS bit, the atomic release / acquire can not
373 * happen anymore and lock->wait_lock protects us from the
374 * non-atomic case.
375 *
376 * Note, that this might set lock->owner =
377 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
378 * any more. This is fixed up when we take the ownership.
379 * This is the transitional state explained at the top of this file.
380 */
381 mark_rt_mutex_waiters(lock);
382
383 if (rt_mutex_owner(lock) && !try_to_steal_lock(lock))
384 return 0;
385
386 /* We got the lock. */
387 debug_rt_mutex_lock(lock __IP__);
388
389 rt_mutex_set_owner(lock, current, 0);
390
391 rt_mutex_deadlock_account_lock(lock, current);
392
393 return 1;
394}
395
396/*
397 * Task blocks on lock.
398 *
399 * Prepare waiter and propagate pi chain
400 *
401 * This must be called with lock->wait_lock held.
402 */
403static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
404 struct rt_mutex_waiter *waiter,
405 int detect_deadlock
406 __IP_DECL__)
407{
408 struct rt_mutex_waiter *top_waiter = waiter;
409 task_t *owner = rt_mutex_owner(lock);
410 int boost = 0, res;
411 unsigned long flags;
412
413 spin_lock_irqsave(&current->pi_lock, flags);
414 __rt_mutex_adjust_prio(current);
415 waiter->task = current;
416 waiter->lock = lock;
417 plist_node_init(&waiter->list_entry, current->prio);
418 plist_node_init(&waiter->pi_list_entry, current->prio);
419
420 /* Get the top priority waiter on the lock */
421 if (rt_mutex_has_waiters(lock))
422 top_waiter = rt_mutex_top_waiter(lock);
423 plist_add(&waiter->list_entry, &lock->wait_list);
424
425 current->pi_blocked_on = waiter;
426
427 spin_unlock_irqrestore(&current->pi_lock, flags);
428
429 if (waiter == rt_mutex_top_waiter(lock)) {
430 spin_lock_irqsave(&owner->pi_lock, flags);
431 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
432 plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
433
434 __rt_mutex_adjust_prio(owner);
435 if (owner->pi_blocked_on) {
436 boost = 1;
437 /* gets dropped in rt_mutex_adjust_prio_chain()! */
438 get_task_struct(owner);
439 }
440 spin_unlock_irqrestore(&owner->pi_lock, flags);
441 }
442 else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
443 spin_lock_irqsave(&owner->pi_lock, flags);
444 if (owner->pi_blocked_on) {
445 boost = 1;
446 /* gets dropped in rt_mutex_adjust_prio_chain()! */
447 get_task_struct(owner);
448 }
449 spin_unlock_irqrestore(&owner->pi_lock, flags);
450 }
451 if (!boost)
452 return 0;
453
454 spin_unlock(&lock->wait_lock);
455
456 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
457 current __IP__);
458
459 spin_lock(&lock->wait_lock);
460
461 return res;
462}
463
464/*
465 * Wake up the next waiter on the lock.
466 *
467 * Remove the top waiter from the current tasks waiter list and from
468 * the lock waiter list. Set it as pending owner. Then wake it up.
469 *
470 * Called with lock->wait_lock held.
471 */
472static void wakeup_next_waiter(struct rt_mutex *lock)
473{
474 struct rt_mutex_waiter *waiter;
475 struct task_struct *pendowner;
476 unsigned long flags;
477
478 spin_lock_irqsave(&current->pi_lock, flags);
479
480 waiter = rt_mutex_top_waiter(lock);
481 plist_del(&waiter->list_entry, &lock->wait_list);
482
483 /*
484 * Remove it from current->pi_waiters. We do not adjust a
485 * possible priority boost right now. We execute wakeup in the
486 * boosted mode and go back to normal after releasing
487 * lock->wait_lock.
488 */
489 plist_del(&waiter->pi_list_entry, &current->pi_waiters);
490 pendowner = waiter->task;
491 waiter->task = NULL;
492
493 rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
494
495 spin_unlock_irqrestore(&current->pi_lock, flags);
496
497 /*
498 * Clear the pi_blocked_on variable and enqueue a possible
499 * waiter into the pi_waiters list of the pending owner. This
500 * prevents that in case the pending owner gets unboosted a
501 * waiter with higher priority than pending-owner->normal_prio
502 * is blocked on the unboosted (pending) owner.
503 */
504 spin_lock_irqsave(&pendowner->pi_lock, flags);
505
506 WARN_ON(!pendowner->pi_blocked_on);
507 WARN_ON(pendowner->pi_blocked_on != waiter);
508 WARN_ON(pendowner->pi_blocked_on->lock != lock);
509
510 pendowner->pi_blocked_on = NULL;
511
512 if (rt_mutex_has_waiters(lock)) {
513 struct rt_mutex_waiter *next;
514
515 next = rt_mutex_top_waiter(lock);
516 plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
517 }
518 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
519
520 wake_up_process(pendowner);
521}
522
523/*
524 * Remove a waiter from a lock
525 *
526 * Must be called with lock->wait_lock held
527 */
528static void remove_waiter(struct rt_mutex *lock,
529 struct rt_mutex_waiter *waiter __IP_DECL__)
530{
531 int first = (waiter == rt_mutex_top_waiter(lock));
532 int boost = 0;
533 task_t *owner = rt_mutex_owner(lock);
534 unsigned long flags;
535
536 spin_lock_irqsave(&current->pi_lock, flags);
537 plist_del(&waiter->list_entry, &lock->wait_list);
538 waiter->task = NULL;
539 current->pi_blocked_on = NULL;
540 spin_unlock_irqrestore(&current->pi_lock, flags);
541
542 if (first && owner != current) {
543
544 spin_lock_irqsave(&owner->pi_lock, flags);
545
546 plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
547
548 if (rt_mutex_has_waiters(lock)) {
549 struct rt_mutex_waiter *next;
550
551 next = rt_mutex_top_waiter(lock);
552 plist_add(&next->pi_list_entry, &owner->pi_waiters);
553 }
554 __rt_mutex_adjust_prio(owner);
555
556 if (owner->pi_blocked_on) {
557 boost = 1;
558 /* gets dropped in rt_mutex_adjust_prio_chain()! */
559 get_task_struct(owner);
560 }
561 spin_unlock_irqrestore(&owner->pi_lock, flags);
562 }
563
564 WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
565
566 if (!boost)
567 return;
568
569 spin_unlock(&lock->wait_lock);
570
571 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__);
572
573 spin_lock(&lock->wait_lock);
574}
575
576/*
577 * Recheck the pi chain, in case we got a priority setting
578 *
579 * Called from sched_setscheduler
580 */
581void rt_mutex_adjust_pi(struct task_struct *task)
582{
583 struct rt_mutex_waiter *waiter;
584 unsigned long flags;
585
586 spin_lock_irqsave(&task->pi_lock, flags);
587
588 waiter = task->pi_blocked_on;
589 if (!waiter || waiter->list_entry.prio == task->prio) {
590 spin_unlock_irqrestore(&task->pi_lock, flags);
591 return;
592 }
593
594 /* gets dropped in rt_mutex_adjust_prio_chain()! */
595 get_task_struct(task);
596 spin_unlock_irqrestore(&task->pi_lock, flags);
597
598 rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__);
599}
600
601/*
602 * Slow path lock function:
603 */
604static int __sched
605rt_mutex_slowlock(struct rt_mutex *lock, int state,
606 struct hrtimer_sleeper *timeout,
607 int detect_deadlock __IP_DECL__)
608{
609 struct rt_mutex_waiter waiter;
610 int ret = 0;
611
612 debug_rt_mutex_init_waiter(&waiter);
613 waiter.task = NULL;
614
615 spin_lock(&lock->wait_lock);
616
617 /* Try to acquire the lock again: */
618 if (try_to_take_rt_mutex(lock __IP__)) {
619 spin_unlock(&lock->wait_lock);
620 return 0;
621 }
622
623 set_current_state(state);
624
625 /* Setup the timer, when timeout != NULL */
626 if (unlikely(timeout))
627 hrtimer_start(&timeout->timer, timeout->timer.expires,
628 HRTIMER_ABS);
629
630 for (;;) {
631 /* Try to acquire the lock: */
632 if (try_to_take_rt_mutex(lock __IP__))
633 break;
634
635 /*
636 * TASK_INTERRUPTIBLE checks for signals and
637 * timeout. Ignored otherwise.
638 */
639 if (unlikely(state == TASK_INTERRUPTIBLE)) {
640 /* Signal pending? */
641 if (signal_pending(current))
642 ret = -EINTR;
643 if (timeout && !timeout->task)
644 ret = -ETIMEDOUT;
645 if (ret)
646 break;
647 }
648
649 /*
650 * waiter.task is NULL the first time we come here and
651 * when we have been woken up by the previous owner
652 * but the lock got stolen by a higher prio task.
653 */
654 if (!waiter.task) {
655 ret = task_blocks_on_rt_mutex(lock, &waiter,
656 detect_deadlock __IP__);
657 /*
658 * If we got woken up by the owner then start loop
659 * all over without going into schedule to try
660 * to get the lock now:
661 */
662 if (unlikely(!waiter.task))
663 continue;
664
665 if (unlikely(ret))
666 break;
667 }
668
669 spin_unlock(&lock->wait_lock);
670
671 debug_rt_mutex_print_deadlock(&waiter);
672
673 if (waiter.task)
674 schedule_rt_mutex(lock);
675
676 spin_lock(&lock->wait_lock);
677 set_current_state(state);
678 }
679
680 set_current_state(TASK_RUNNING);
681
682 if (unlikely(waiter.task))
683 remove_waiter(lock, &waiter __IP__);
684
685 /*
686 * try_to_take_rt_mutex() sets the waiter bit
687 * unconditionally. We might have to fix that up.
688 */
689 fixup_rt_mutex_waiters(lock);
690
691 spin_unlock(&lock->wait_lock);
692
693 /* Remove pending timer: */
694 if (unlikely(timeout))
695 hrtimer_cancel(&timeout->timer);
696
697 /*
698 * Readjust priority, when we did not get the lock. We might
699 * have been the pending owner and boosted. Since we did not
700 * take the lock, the PI boost has to go.
701 */
702 if (unlikely(ret))
703 rt_mutex_adjust_prio(current);
704
705 debug_rt_mutex_free_waiter(&waiter);
706
707 return ret;
708}
709
710/*
711 * Slow path try-lock function:
712 */
713static inline int
714rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__)
715{
716 int ret = 0;
717
718 spin_lock(&lock->wait_lock);
719
720 if (likely(rt_mutex_owner(lock) != current)) {
721
722 ret = try_to_take_rt_mutex(lock __IP__);
723 /*
724 * try_to_take_rt_mutex() sets the lock waiters
725 * bit unconditionally. Clean this up.
726 */
727 fixup_rt_mutex_waiters(lock);
728 }
729
730 spin_unlock(&lock->wait_lock);
731
732 return ret;
733}
734
735/*
736 * Slow path to release a rt-mutex:
737 */
738static void __sched
739rt_mutex_slowunlock(struct rt_mutex *lock)
740{
741 spin_lock(&lock->wait_lock);
742
743 debug_rt_mutex_unlock(lock);
744
745 rt_mutex_deadlock_account_unlock(current);
746
747 if (!rt_mutex_has_waiters(lock)) {
748 lock->owner = NULL;
749 spin_unlock(&lock->wait_lock);
750 return;
751 }
752
753 wakeup_next_waiter(lock);
754
755 spin_unlock(&lock->wait_lock);
756
757 /* Undo pi boosting if necessary: */
758 rt_mutex_adjust_prio(current);
759}
760
761/*
762 * debug aware fast / slowpath lock,trylock,unlock
763 *
764 * The atomic acquire/release ops are compiled away, when either the
765 * architecture does not support cmpxchg or when debugging is enabled.
766 */
767static inline int
768rt_mutex_fastlock(struct rt_mutex *lock, int state,
769 int detect_deadlock,
770 int (*slowfn)(struct rt_mutex *lock, int state,
771 struct hrtimer_sleeper *timeout,
772 int detect_deadlock __IP_DECL__))
773{
774 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
775 rt_mutex_deadlock_account_lock(lock, current);
776 return 0;
777 } else
778 return slowfn(lock, state, NULL, detect_deadlock __RET_IP__);
779}
780
781static inline int
782rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
783 struct hrtimer_sleeper *timeout, int detect_deadlock,
784 int (*slowfn)(struct rt_mutex *lock, int state,
785 struct hrtimer_sleeper *timeout,
786 int detect_deadlock __IP_DECL__))
787{
788 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
789 rt_mutex_deadlock_account_lock(lock, current);
790 return 0;
791 } else
792 return slowfn(lock, state, timeout, detect_deadlock __RET_IP__);
793}
794
795static inline int
796rt_mutex_fasttrylock(struct rt_mutex *lock,
797 int (*slowfn)(struct rt_mutex *lock __IP_DECL__))
798{
799 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
800 rt_mutex_deadlock_account_lock(lock, current);
801 return 1;
802 }
803 return slowfn(lock __RET_IP__);
804}
805
806static inline void
807rt_mutex_fastunlock(struct rt_mutex *lock,
808 void (*slowfn)(struct rt_mutex *lock))
809{
810 if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
811 rt_mutex_deadlock_account_unlock(current);
812 else
813 slowfn(lock);
814}
815
816/**
817 * rt_mutex_lock - lock a rt_mutex
818 *
819 * @lock: the rt_mutex to be locked
820 */
821void __sched rt_mutex_lock(struct rt_mutex *lock)
822{
823 might_sleep();
824
825 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
826}
827EXPORT_SYMBOL_GPL(rt_mutex_lock);
828
829/**
830 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
831 *
832 * @lock: the rt_mutex to be locked
833 * @detect_deadlock: deadlock detection on/off
834 *
835 * Returns:
836 * 0 on success
837 * -EINTR when interrupted by a signal
838 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
839 */
840int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
841 int detect_deadlock)
842{
843 might_sleep();
844
845 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
846 detect_deadlock, rt_mutex_slowlock);
847}
848EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
849
850/**
851 * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
852 * the timeout structure is provided
853 * by the caller
854 *
855 * @lock: the rt_mutex to be locked
856 * @timeout: timeout structure or NULL (no timeout)
857 * @detect_deadlock: deadlock detection on/off
858 *
859 * Returns:
860 * 0 on success
861 * -EINTR when interrupted by a signal
862 * -ETIMEOUT when the timeout expired
863 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
864 */
865int
866rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
867 int detect_deadlock)
868{
869 might_sleep();
870
871 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
872 detect_deadlock, rt_mutex_slowlock);
873}
874EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
875
876/**
877 * rt_mutex_trylock - try to lock a rt_mutex
878 *
879 * @lock: the rt_mutex to be locked
880 *
881 * Returns 1 on success and 0 on contention
882 */
883int __sched rt_mutex_trylock(struct rt_mutex *lock)
884{
885 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
886}
887EXPORT_SYMBOL_GPL(rt_mutex_trylock);
888
889/**
890 * rt_mutex_unlock - unlock a rt_mutex
891 *
892 * @lock: the rt_mutex to be unlocked
893 */
894void __sched rt_mutex_unlock(struct rt_mutex *lock)
895{
896 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
897}
898EXPORT_SYMBOL_GPL(rt_mutex_unlock);
899
900/***
901 * rt_mutex_destroy - mark a mutex unusable
902 * @lock: the mutex to be destroyed
903 *
904 * This function marks the mutex uninitialized, and any subsequent
905 * use of the mutex is forbidden. The mutex must not be locked when
906 * this function is called.
907 */
908void rt_mutex_destroy(struct rt_mutex *lock)
909{
910 WARN_ON(rt_mutex_is_locked(lock));
911#ifdef CONFIG_DEBUG_RT_MUTEXES
912 lock->magic = NULL;
913#endif
914}
915
916EXPORT_SYMBOL_GPL(rt_mutex_destroy);
917
918/**
919 * __rt_mutex_init - initialize the rt lock
920 *
921 * @lock: the rt lock to be initialized
922 *
923 * Initialize the rt lock to unlocked state.
924 *
925 * Initializing of a locked rt lock is not allowed
926 */
927void __rt_mutex_init(struct rt_mutex *lock, const char *name)
928{
929 lock->owner = NULL;
930 spin_lock_init(&lock->wait_lock);
931 plist_head_init(&lock->wait_list, &lock->wait_lock);
932
933 debug_rt_mutex_init(lock, name);
934}
935EXPORT_SYMBOL_GPL(__rt_mutex_init);
936
937/**
938 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
939 * proxy owner
940 *
941 * @lock: the rt_mutex to be locked
942 * @proxy_owner:the task to set as owner
943 *
944 * No locking. Caller has to do serializing itself
945 * Special API call for PI-futex support
946 */
947void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
948 struct task_struct *proxy_owner)
949{
950 __rt_mutex_init(lock, NULL);
951 debug_rt_mutex_proxy_lock(lock, proxy_owner __RET_IP__);
952 rt_mutex_set_owner(lock, proxy_owner, 0);
953 rt_mutex_deadlock_account_lock(lock, proxy_owner);
954}
955
956/**
957 * rt_mutex_proxy_unlock - release a lock on behalf of owner
958 *
959 * @lock: the rt_mutex to be locked
960 *
961 * No locking. Caller has to do serializing itself
962 * Special API call for PI-futex support
963 */
964void rt_mutex_proxy_unlock(struct rt_mutex *lock,
965 struct task_struct *proxy_owner)
966{
967 debug_rt_mutex_proxy_unlock(lock);
968 rt_mutex_set_owner(lock, NULL, 0);
969 rt_mutex_deadlock_account_unlock(proxy_owner);
970}
971
972/**
973 * rt_mutex_next_owner - return the next owner of the lock
974 *
975 * @lock: the rt lock query
976 *
977 * Returns the next owner of the lock or NULL
978 *
979 * Caller has to serialize against other accessors to the lock
980 * itself.
981 *
982 * Special API call for PI-futex support
983 */
984struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
985{
986 if (!rt_mutex_has_waiters(lock))
987 return NULL;
988
989 return rt_mutex_top_waiter(lock)->task;
990}
diff --git a/kernel/rtmutex.h b/kernel/rtmutex.h
new file mode 100644
index 0000000000..1e0fca13ff
--- /dev/null
+++ b/kernel/rtmutex.h
@@ -0,0 +1,29 @@
1/*
2 * RT-Mutexes: blocking mutual exclusion locks with PI support
3 *
4 * started by Ingo Molnar and Thomas Gleixner:
5 *
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 *
9 * This file contains macros used solely by rtmutex.c.
10 * Non-debug version.
11 */
12
13#define __IP_DECL__
14#define __IP__
15#define __RET_IP__
16#define rt_mutex_deadlock_check(l) (0)
17#define rt_mutex_deadlock_account_lock(m, t) do { } while (0)
18#define rt_mutex_deadlock_account_unlock(l) do { } while (0)
19#define debug_rt_mutex_init_waiter(w) do { } while (0)
20#define debug_rt_mutex_free_waiter(w) do { } while (0)
21#define debug_rt_mutex_lock(l) do { } while (0)
22#define debug_rt_mutex_proxy_lock(l,p) do { } while (0)
23#define debug_rt_mutex_proxy_unlock(l) do { } while (0)
24#define debug_rt_mutex_unlock(l) do { } while (0)
25#define debug_rt_mutex_init(m, n) do { } while (0)
26#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0)
27#define debug_rt_mutex_print_deadlock(w) do { } while (0)
28#define debug_rt_mutex_detect_deadlock(w,d) (d)
29#define debug_rt_mutex_reset_waiter(w) do { } while (0)
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
new file mode 100644
index 0000000000..9c75856e79
--- /dev/null
+++ b/kernel/rtmutex_common.h
@@ -0,0 +1,123 @@
1/*
2 * RT Mutexes: blocking mutual exclusion locks with PI support
3 *
4 * started by Ingo Molnar and Thomas Gleixner:
5 *
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 *
9 * This file contains the private data structure and API definitions.
10 */
11
12#ifndef __KERNEL_RTMUTEX_COMMON_H
13#define __KERNEL_RTMUTEX_COMMON_H
14
15#include <linux/rtmutex.h>
16
17/*
18 * The rtmutex in kernel tester is independent of rtmutex debugging. We
19 * call schedule_rt_mutex_test() instead of schedule() for the tasks which
20 * belong to the tester. That way we can delay the wakeup path of those
21 * threads to provoke lock stealing and testing of complex boosting scenarios.
22 */
23#ifdef CONFIG_RT_MUTEX_TESTER
24
25extern void schedule_rt_mutex_test(struct rt_mutex *lock);
26
27#define schedule_rt_mutex(_lock) \
28 do { \
29 if (!(current->flags & PF_MUTEX_TESTER)) \
30 schedule(); \
31 else \
32 schedule_rt_mutex_test(_lock); \
33 } while (0)
34
35#else
36# define schedule_rt_mutex(_lock) schedule()
37#endif
38
39/*
40 * This is the control structure for tasks blocked on a rt_mutex,
41 * which is allocated on the kernel stack on of the blocked task.
42 *
43 * @list_entry: pi node to enqueue into the mutex waiters list
44 * @pi_list_entry: pi node to enqueue into the mutex owner waiters list
45 * @task: task reference to the blocked task
46 */
47struct rt_mutex_waiter {
48 struct plist_node list_entry;
49 struct plist_node pi_list_entry;
50 struct task_struct *task;
51 struct rt_mutex *lock;
52#ifdef CONFIG_DEBUG_RT_MUTEXES
53 unsigned long ip;
54 pid_t deadlock_task_pid;
55 struct rt_mutex *deadlock_lock;
56#endif
57};
58
59/*
60 * Various helpers to access the waiters-plist:
61 */
62static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
63{
64 return !plist_head_empty(&lock->wait_list);
65}
66
67static inline struct rt_mutex_waiter *
68rt_mutex_top_waiter(struct rt_mutex *lock)
69{
70 struct rt_mutex_waiter *w;
71
72 w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter,
73 list_entry);
74 BUG_ON(w->lock != lock);
75
76 return w;
77}
78
79static inline int task_has_pi_waiters(struct task_struct *p)
80{
81 return !plist_head_empty(&p->pi_waiters);
82}
83
84static inline struct rt_mutex_waiter *
85task_top_pi_waiter(struct task_struct *p)
86{
87 return plist_first_entry(&p->pi_waiters, struct rt_mutex_waiter,
88 pi_list_entry);
89}
90
91/*
92 * lock->owner state tracking:
93 */
94#define RT_MUTEX_OWNER_PENDING 1UL
95#define RT_MUTEX_HAS_WAITERS 2UL
96#define RT_MUTEX_OWNER_MASKALL 3UL
97
98static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
99{
100 return (struct task_struct *)
101 ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
102}
103
104static inline struct task_struct *rt_mutex_real_owner(struct rt_mutex *lock)
105{
106 return (struct task_struct *)
107 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
108}
109
110static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock)
111{
112 return (unsigned long)lock->owner & RT_MUTEX_OWNER_PENDING;
113}
114
115/*
116 * PI-futex support (proxy locking functions, etc.):
117 */
118extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
119extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
120 struct task_struct *proxy_owner);
121extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
122 struct task_struct *proxy_owner);
123#endif
diff --git a/kernel/sched.c b/kernel/sched.c
index f06d059ede..2629c1711f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -168,15 +168,21 @@
168 */ 168 */
169 169
170#define SCALE_PRIO(x, prio) \ 170#define SCALE_PRIO(x, prio) \
171 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE) 171 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
172 172
173static unsigned int task_timeslice(task_t *p) 173static unsigned int static_prio_timeslice(int static_prio)
174{ 174{
175 if (p->static_prio < NICE_TO_PRIO(0)) 175 if (static_prio < NICE_TO_PRIO(0))
176 return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio); 176 return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
177 else 177 else
178 return SCALE_PRIO(DEF_TIMESLICE, p->static_prio); 178 return SCALE_PRIO(DEF_TIMESLICE, static_prio);
179} 179}
180
181static inline unsigned int task_timeslice(task_t *p)
182{
183 return static_prio_timeslice(p->static_prio);
184}
185
180#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \ 186#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
181 < (long long) (sd)->cache_hot_time) 187 < (long long) (sd)->cache_hot_time)
182 188
@@ -184,13 +190,11 @@ static unsigned int task_timeslice(task_t *p)
184 * These are the runqueue data structures: 190 * These are the runqueue data structures:
185 */ 191 */
186 192
187#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
188
189typedef struct runqueue runqueue_t; 193typedef struct runqueue runqueue_t;
190 194
191struct prio_array { 195struct prio_array {
192 unsigned int nr_active; 196 unsigned int nr_active;
193 unsigned long bitmap[BITMAP_SIZE]; 197 DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
194 struct list_head queue[MAX_PRIO]; 198 struct list_head queue[MAX_PRIO];
195}; 199};
196 200
@@ -209,6 +213,7 @@ struct runqueue {
209 * remote CPUs use both these fields when doing load calculation. 213 * remote CPUs use both these fields when doing load calculation.
210 */ 214 */
211 unsigned long nr_running; 215 unsigned long nr_running;
216 unsigned long raw_weighted_load;
212#ifdef CONFIG_SMP 217#ifdef CONFIG_SMP
213 unsigned long cpu_load[3]; 218 unsigned long cpu_load[3];
214#endif 219#endif
@@ -239,7 +244,6 @@ struct runqueue {
239 244
240 task_t *migration_thread; 245 task_t *migration_thread;
241 struct list_head migration_queue; 246 struct list_head migration_queue;
242 int cpu;
243#endif 247#endif
244 248
245#ifdef CONFIG_SCHEDSTATS 249#ifdef CONFIG_SCHEDSTATS
@@ -351,11 +355,30 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
351#endif /* __ARCH_WANT_UNLOCKED_CTXSW */ 355#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
352 356
353/* 357/*
358 * __task_rq_lock - lock the runqueue a given task resides on.
359 * Must be called interrupts disabled.
360 */
361static inline runqueue_t *__task_rq_lock(task_t *p)
362 __acquires(rq->lock)
363{
364 struct runqueue *rq;
365
366repeat_lock_task:
367 rq = task_rq(p);
368 spin_lock(&rq->lock);
369 if (unlikely(rq != task_rq(p))) {
370 spin_unlock(&rq->lock);
371 goto repeat_lock_task;
372 }
373 return rq;
374}
375
376/*
354 * task_rq_lock - lock the runqueue a given task resides on and disable 377 * task_rq_lock - lock the runqueue a given task resides on and disable
355 * interrupts. Note the ordering: we can safely lookup the task_rq without 378 * interrupts. Note the ordering: we can safely lookup the task_rq without
356 * explicitly disabling preemption. 379 * explicitly disabling preemption.
357 */ 380 */
358static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) 381static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
359 __acquires(rq->lock) 382 __acquires(rq->lock)
360{ 383{
361 struct runqueue *rq; 384 struct runqueue *rq;
@@ -371,6 +394,12 @@ repeat_lock_task:
371 return rq; 394 return rq;
372} 395}
373 396
397static inline void __task_rq_unlock(runqueue_t *rq)
398 __releases(rq->lock)
399{
400 spin_unlock(&rq->lock);
401}
402
374static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) 403static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
375 __releases(rq->lock) 404 __releases(rq->lock)
376{ 405{
@@ -634,7 +663,7 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
634} 663}
635 664
636/* 665/*
637 * effective_prio - return the priority that is based on the static 666 * __normal_prio - return the priority that is based on the static
638 * priority but is modified by bonuses/penalties. 667 * priority but is modified by bonuses/penalties.
639 * 668 *
640 * We scale the actual sleep average [0 .... MAX_SLEEP_AVG] 669 * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
@@ -647,13 +676,11 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
647 * 676 *
648 * Both properties are important to certain workloads. 677 * Both properties are important to certain workloads.
649 */ 678 */
650static int effective_prio(task_t *p) 679
680static inline int __normal_prio(task_t *p)
651{ 681{
652 int bonus, prio; 682 int bonus, prio;
653 683
654 if (rt_task(p))
655 return p->prio;
656
657 bonus = CURRENT_BONUS(p) - MAX_BONUS / 2; 684 bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
658 685
659 prio = p->static_prio - bonus; 686 prio = p->static_prio - bonus;
@@ -665,6 +692,106 @@ static int effective_prio(task_t *p)
665} 692}
666 693
667/* 694/*
695 * To aid in avoiding the subversion of "niceness" due to uneven distribution
696 * of tasks with abnormal "nice" values across CPUs the contribution that
697 * each task makes to its run queue's load is weighted according to its
698 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
699 * scaled version of the new time slice allocation that they receive on time
700 * slice expiry etc.
701 */
702
703/*
704 * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
705 * If static_prio_timeslice() is ever changed to break this assumption then
706 * this code will need modification
707 */
708#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
709#define LOAD_WEIGHT(lp) \
710 (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
711#define PRIO_TO_LOAD_WEIGHT(prio) \
712 LOAD_WEIGHT(static_prio_timeslice(prio))
713#define RTPRIO_TO_LOAD_WEIGHT(rp) \
714 (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
715
716static void set_load_weight(task_t *p)
717{
718 if (has_rt_policy(p)) {
719#ifdef CONFIG_SMP
720 if (p == task_rq(p)->migration_thread)
721 /*
722 * The migration thread does the actual balancing.
723 * Giving its load any weight will skew balancing
724 * adversely.
725 */
726 p->load_weight = 0;
727 else
728#endif
729 p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
730 } else
731 p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
732}
733
734static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p)
735{
736 rq->raw_weighted_load += p->load_weight;
737}
738
739static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p)
740{
741 rq->raw_weighted_load -= p->load_weight;
742}
743
744static inline void inc_nr_running(task_t *p, runqueue_t *rq)
745{
746 rq->nr_running++;
747 inc_raw_weighted_load(rq, p);
748}
749
750static inline void dec_nr_running(task_t *p, runqueue_t *rq)
751{
752 rq->nr_running--;
753 dec_raw_weighted_load(rq, p);
754}
755
756/*
757 * Calculate the expected normal priority: i.e. priority
758 * without taking RT-inheritance into account. Might be
759 * boosted by interactivity modifiers. Changes upon fork,
760 * setprio syscalls, and whenever the interactivity
761 * estimator recalculates.
762 */
763static inline int normal_prio(task_t *p)
764{
765 int prio;
766
767 if (has_rt_policy(p))
768 prio = MAX_RT_PRIO-1 - p->rt_priority;
769 else
770 prio = __normal_prio(p);
771 return prio;
772}
773
774/*
775 * Calculate the current priority, i.e. the priority
776 * taken into account by the scheduler. This value might
777 * be boosted by RT tasks, or might be boosted by
778 * interactivity modifiers. Will be RT if the task got
779 * RT-boosted. If not then it returns p->normal_prio.
780 */
781static int effective_prio(task_t *p)
782{
783 p->normal_prio = normal_prio(p);
784 /*
785 * If we are RT tasks or we were boosted to RT priority,
786 * keep the priority unchanged. Otherwise, update priority
787 * to the normal priority:
788 */
789 if (!rt_prio(p->prio))
790 return p->normal_prio;
791 return p->prio;
792}
793
794/*
668 * __activate_task - move a task to the runqueue. 795 * __activate_task - move a task to the runqueue.
669 */ 796 */
670static void __activate_task(task_t *p, runqueue_t *rq) 797static void __activate_task(task_t *p, runqueue_t *rq)
@@ -674,7 +801,7 @@ static void __activate_task(task_t *p, runqueue_t *rq)
674 if (batch_task(p)) 801 if (batch_task(p))
675 target = rq->expired; 802 target = rq->expired;
676 enqueue_task(p, target); 803 enqueue_task(p, target);
677 rq->nr_running++; 804 inc_nr_running(p, rq);
678} 805}
679 806
680/* 807/*
@@ -683,39 +810,45 @@ static void __activate_task(task_t *p, runqueue_t *rq)
683static inline void __activate_idle_task(task_t *p, runqueue_t *rq) 810static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
684{ 811{
685 enqueue_task_head(p, rq->active); 812 enqueue_task_head(p, rq->active);
686 rq->nr_running++; 813 inc_nr_running(p, rq);
687} 814}
688 815
816/*
817 * Recalculate p->normal_prio and p->prio after having slept,
818 * updating the sleep-average too:
819 */
689static int recalc_task_prio(task_t *p, unsigned long long now) 820static int recalc_task_prio(task_t *p, unsigned long long now)
690{ 821{
691 /* Caller must always ensure 'now >= p->timestamp' */ 822 /* Caller must always ensure 'now >= p->timestamp' */
692 unsigned long long __sleep_time = now - p->timestamp; 823 unsigned long sleep_time = now - p->timestamp;
693 unsigned long sleep_time;
694 824
695 if (batch_task(p)) 825 if (batch_task(p))
696 sleep_time = 0; 826 sleep_time = 0;
697 else {
698 if (__sleep_time > NS_MAX_SLEEP_AVG)
699 sleep_time = NS_MAX_SLEEP_AVG;
700 else
701 sleep_time = (unsigned long)__sleep_time;
702 }
703 827
704 if (likely(sleep_time > 0)) { 828 if (likely(sleep_time > 0)) {
705 /* 829 /*
706 * User tasks that sleep a long time are categorised as 830 * This ceiling is set to the lowest priority that would allow
707 * idle. They will only have their sleep_avg increased to a 831 * a task to be reinserted into the active array on timeslice
708 * level that makes them just interactive priority to stay 832 * completion.
709 * active yet prevent them suddenly becoming cpu hogs and
710 * starving other processes.
711 */ 833 */
712 if (p->mm && sleep_time > INTERACTIVE_SLEEP(p)) { 834 unsigned long ceiling = INTERACTIVE_SLEEP(p);
713 unsigned long ceiling;
714 835
715 ceiling = JIFFIES_TO_NS(MAX_SLEEP_AVG - 836 if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) {
716 DEF_TIMESLICE); 837 /*
717 if (p->sleep_avg < ceiling) 838 * Prevents user tasks from achieving best priority
718 p->sleep_avg = ceiling; 839 * with one single large enough sleep.
840 */
841 p->sleep_avg = ceiling;
842 /*
843 * Using INTERACTIVE_SLEEP() as a ceiling places a
844 * nice(0) task 1ms sleep away from promotion, and
845 * gives it 700ms to round-robin with no chance of
846 * being demoted. This is more than generous, so
847 * mark this sleep as non-interactive to prevent the
848 * on-runqueue bonus logic from intervening should
849 * this task not receive cpu immediately.
850 */
851 p->sleep_type = SLEEP_NONINTERACTIVE;
719 } else { 852 } else {
720 /* 853 /*
721 * Tasks waking from uninterruptible sleep are 854 * Tasks waking from uninterruptible sleep are
@@ -723,12 +856,12 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
723 * are likely to be waiting on I/O 856 * are likely to be waiting on I/O
724 */ 857 */
725 if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) { 858 if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
726 if (p->sleep_avg >= INTERACTIVE_SLEEP(p)) 859 if (p->sleep_avg >= ceiling)
727 sleep_time = 0; 860 sleep_time = 0;
728 else if (p->sleep_avg + sleep_time >= 861 else if (p->sleep_avg + sleep_time >=
729 INTERACTIVE_SLEEP(p)) { 862 ceiling) {
730 p->sleep_avg = INTERACTIVE_SLEEP(p); 863 p->sleep_avg = ceiling;
731 sleep_time = 0; 864 sleep_time = 0;
732 } 865 }
733 } 866 }
734 867
@@ -742,9 +875,9 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
742 */ 875 */
743 p->sleep_avg += sleep_time; 876 p->sleep_avg += sleep_time;
744 877
745 if (p->sleep_avg > NS_MAX_SLEEP_AVG)
746 p->sleep_avg = NS_MAX_SLEEP_AVG;
747 } 878 }
879 if (p->sleep_avg > NS_MAX_SLEEP_AVG)
880 p->sleep_avg = NS_MAX_SLEEP_AVG;
748 } 881 }
749 882
750 return effective_prio(p); 883 return effective_prio(p);
@@ -805,7 +938,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
805 */ 938 */
806static void deactivate_task(struct task_struct *p, runqueue_t *rq) 939static void deactivate_task(struct task_struct *p, runqueue_t *rq)
807{ 940{
808 rq->nr_running--; 941 dec_nr_running(p, rq);
809 dequeue_task(p, p->array); 942 dequeue_task(p, p->array);
810 p->array = NULL; 943 p->array = NULL;
811} 944}
@@ -818,6 +951,11 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq)
818 * the target CPU. 951 * the target CPU.
819 */ 952 */
820#ifdef CONFIG_SMP 953#ifdef CONFIG_SMP
954
955#ifndef tsk_is_polling
956#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
957#endif
958
821static void resched_task(task_t *p) 959static void resched_task(task_t *p)
822{ 960{
823 int cpu; 961 int cpu;
@@ -833,9 +971,9 @@ static void resched_task(task_t *p)
833 if (cpu == smp_processor_id()) 971 if (cpu == smp_processor_id())
834 return; 972 return;
835 973
836 /* NEED_RESCHED must be visible before we test POLLING_NRFLAG */ 974 /* NEED_RESCHED must be visible before we test polling */
837 smp_mb(); 975 smp_mb();
838 if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG)) 976 if (!tsk_is_polling(p))
839 smp_send_reschedule(cpu); 977 smp_send_reschedule(cpu);
840} 978}
841#else 979#else
@@ -855,6 +993,12 @@ inline int task_curr(const task_t *p)
855 return cpu_curr(task_cpu(p)) == p; 993 return cpu_curr(task_cpu(p)) == p;
856} 994}
857 995
996/* Used instead of source_load when we know the type == 0 */
997unsigned long weighted_cpuload(const int cpu)
998{
999 return cpu_rq(cpu)->raw_weighted_load;
1000}
1001
858#ifdef CONFIG_SMP 1002#ifdef CONFIG_SMP
859typedef struct { 1003typedef struct {
860 struct list_head list; 1004 struct list_head list;
@@ -944,7 +1088,8 @@ void kick_process(task_t *p)
944} 1088}
945 1089
946/* 1090/*
947 * Return a low guess at the load of a migration-source cpu. 1091 * Return a low guess at the load of a migration-source cpu weighted
1092 * according to the scheduling class and "nice" value.
948 * 1093 *
949 * We want to under-estimate the load of migration sources, to 1094 * We want to under-estimate the load of migration sources, to
950 * balance conservatively. 1095 * balance conservatively.
@@ -952,24 +1097,36 @@ void kick_process(task_t *p)
952static inline unsigned long source_load(int cpu, int type) 1097static inline unsigned long source_load(int cpu, int type)
953{ 1098{
954 runqueue_t *rq = cpu_rq(cpu); 1099 runqueue_t *rq = cpu_rq(cpu);
955 unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE; 1100
956 if (type == 0) 1101 if (type == 0)
957 return load_now; 1102 return rq->raw_weighted_load;
958 1103
959 return min(rq->cpu_load[type-1], load_now); 1104 return min(rq->cpu_load[type-1], rq->raw_weighted_load);
960} 1105}
961 1106
962/* 1107/*
963 * Return a high guess at the load of a migration-target cpu 1108 * Return a high guess at the load of a migration-target cpu weighted
1109 * according to the scheduling class and "nice" value.
964 */ 1110 */
965static inline unsigned long target_load(int cpu, int type) 1111static inline unsigned long target_load(int cpu, int type)
966{ 1112{
967 runqueue_t *rq = cpu_rq(cpu); 1113 runqueue_t *rq = cpu_rq(cpu);
968 unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE; 1114
969 if (type == 0) 1115 if (type == 0)
970 return load_now; 1116 return rq->raw_weighted_load;
1117
1118 return max(rq->cpu_load[type-1], rq->raw_weighted_load);
1119}
1120
1121/*
1122 * Return the average load per task on the cpu's run queue
1123 */
1124static inline unsigned long cpu_avg_load_per_task(int cpu)
1125{
1126 runqueue_t *rq = cpu_rq(cpu);
1127 unsigned long n = rq->nr_running;
971 1128
972 return max(rq->cpu_load[type-1], load_now); 1129 return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE;
973} 1130}
974 1131
975/* 1132/*
@@ -1042,7 +1199,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1042 cpus_and(tmp, group->cpumask, p->cpus_allowed); 1199 cpus_and(tmp, group->cpumask, p->cpus_allowed);
1043 1200
1044 for_each_cpu_mask(i, tmp) { 1201 for_each_cpu_mask(i, tmp) {
1045 load = source_load(i, 0); 1202 load = weighted_cpuload(i);
1046 1203
1047 if (load < min_load || (load == min_load && i == this_cpu)) { 1204 if (load < min_load || (load == min_load && i == this_cpu)) {
1048 min_load = load; 1205 min_load = load;
@@ -1069,9 +1226,15 @@ static int sched_balance_self(int cpu, int flag)
1069 struct task_struct *t = current; 1226 struct task_struct *t = current;
1070 struct sched_domain *tmp, *sd = NULL; 1227 struct sched_domain *tmp, *sd = NULL;
1071 1228
1072 for_each_domain(cpu, tmp) 1229 for_each_domain(cpu, tmp) {
1230 /*
1231 * If power savings logic is enabled for a domain, stop there.
1232 */
1233 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1234 break;
1073 if (tmp->flags & flag) 1235 if (tmp->flags & flag)
1074 sd = tmp; 1236 sd = tmp;
1237 }
1075 1238
1076 while (sd) { 1239 while (sd) {
1077 cpumask_t span; 1240 cpumask_t span;
@@ -1221,17 +1384,19 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
1221 1384
1222 if (this_sd->flags & SD_WAKE_AFFINE) { 1385 if (this_sd->flags & SD_WAKE_AFFINE) {
1223 unsigned long tl = this_load; 1386 unsigned long tl = this_load;
1387 unsigned long tl_per_task = cpu_avg_load_per_task(this_cpu);
1388
1224 /* 1389 /*
1225 * If sync wakeup then subtract the (maximum possible) 1390 * If sync wakeup then subtract the (maximum possible)
1226 * effect of the currently running task from the load 1391 * effect of the currently running task from the load
1227 * of the current CPU: 1392 * of the current CPU:
1228 */ 1393 */
1229 if (sync) 1394 if (sync)
1230 tl -= SCHED_LOAD_SCALE; 1395 tl -= current->load_weight;
1231 1396
1232 if ((tl <= load && 1397 if ((tl <= load &&
1233 tl + target_load(cpu, idx) <= SCHED_LOAD_SCALE) || 1398 tl + target_load(cpu, idx) <= tl_per_task) ||
1234 100*(tl + SCHED_LOAD_SCALE) <= imbalance*load) { 1399 100*(tl + p->load_weight) <= imbalance*load) {
1235 /* 1400 /*
1236 * This domain has SD_WAKE_AFFINE and 1401 * This domain has SD_WAKE_AFFINE and
1237 * p is cache cold in this domain, and 1402 * p is cache cold in this domain, and
@@ -1348,6 +1513,12 @@ void fastcall sched_fork(task_t *p, int clone_flags)
1348 * event cannot wake it up and insert it on the runqueue either. 1513 * event cannot wake it up and insert it on the runqueue either.
1349 */ 1514 */
1350 p->state = TASK_RUNNING; 1515 p->state = TASK_RUNNING;
1516
1517 /*
1518 * Make sure we do not leak PI boosting priority to the child:
1519 */
1520 p->prio = current->normal_prio;
1521
1351 INIT_LIST_HEAD(&p->run_list); 1522 INIT_LIST_HEAD(&p->run_list);
1352 p->array = NULL; 1523 p->array = NULL;
1353#ifdef CONFIG_SCHEDSTATS 1524#ifdef CONFIG_SCHEDSTATS
@@ -1427,10 +1598,11 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
1427 __activate_task(p, rq); 1598 __activate_task(p, rq);
1428 else { 1599 else {
1429 p->prio = current->prio; 1600 p->prio = current->prio;
1601 p->normal_prio = current->normal_prio;
1430 list_add_tail(&p->run_list, &current->run_list); 1602 list_add_tail(&p->run_list, &current->run_list);
1431 p->array = current->array; 1603 p->array = current->array;
1432 p->array->nr_active++; 1604 p->array->nr_active++;
1433 rq->nr_running++; 1605 inc_nr_running(p, rq);
1434 } 1606 }
1435 set_need_resched(); 1607 set_need_resched();
1436 } else 1608 } else
@@ -1648,7 +1820,8 @@ unsigned long nr_uninterruptible(void)
1648 1820
1649unsigned long long nr_context_switches(void) 1821unsigned long long nr_context_switches(void)
1650{ 1822{
1651 unsigned long long i, sum = 0; 1823 int i;
1824 unsigned long long sum = 0;
1652 1825
1653 for_each_possible_cpu(i) 1826 for_each_possible_cpu(i)
1654 sum += cpu_rq(i)->nr_switches; 1827 sum += cpu_rq(i)->nr_switches;
@@ -1686,9 +1859,6 @@ unsigned long nr_active(void)
1686/* 1859/*
1687 * double_rq_lock - safely lock two runqueues 1860 * double_rq_lock - safely lock two runqueues
1688 * 1861 *
1689 * We must take them in cpu order to match code in
1690 * dependent_sleeper and wake_dependent_sleeper.
1691 *
1692 * Note this does not disable interrupts like task_rq_lock, 1862 * Note this does not disable interrupts like task_rq_lock,
1693 * you need to do so manually before calling. 1863 * you need to do so manually before calling.
1694 */ 1864 */
@@ -1700,7 +1870,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
1700 spin_lock(&rq1->lock); 1870 spin_lock(&rq1->lock);
1701 __acquire(rq2->lock); /* Fake it out ;) */ 1871 __acquire(rq2->lock); /* Fake it out ;) */
1702 } else { 1872 } else {
1703 if (rq1->cpu < rq2->cpu) { 1873 if (rq1 < rq2) {
1704 spin_lock(&rq1->lock); 1874 spin_lock(&rq1->lock);
1705 spin_lock(&rq2->lock); 1875 spin_lock(&rq2->lock);
1706 } else { 1876 } else {
@@ -1736,7 +1906,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
1736 __acquires(this_rq->lock) 1906 __acquires(this_rq->lock)
1737{ 1907{
1738 if (unlikely(!spin_trylock(&busiest->lock))) { 1908 if (unlikely(!spin_trylock(&busiest->lock))) {
1739 if (busiest->cpu < this_rq->cpu) { 1909 if (busiest < this_rq) {
1740 spin_unlock(&this_rq->lock); 1910 spin_unlock(&this_rq->lock);
1741 spin_lock(&busiest->lock); 1911 spin_lock(&busiest->lock);
1742 spin_lock(&this_rq->lock); 1912 spin_lock(&this_rq->lock);
@@ -1799,9 +1969,9 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
1799 runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) 1969 runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
1800{ 1970{
1801 dequeue_task(p, src_array); 1971 dequeue_task(p, src_array);
1802 src_rq->nr_running--; 1972 dec_nr_running(p, src_rq);
1803 set_task_cpu(p, this_cpu); 1973 set_task_cpu(p, this_cpu);
1804 this_rq->nr_running++; 1974 inc_nr_running(p, this_rq);
1805 enqueue_task(p, this_array); 1975 enqueue_task(p, this_array);
1806 p->timestamp = (p->timestamp - src_rq->timestamp_last_tick) 1976 p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
1807 + this_rq->timestamp_last_tick; 1977 + this_rq->timestamp_last_tick;
@@ -1848,26 +2018,42 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
1848 return 1; 2018 return 1;
1849} 2019}
1850 2020
2021#define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio)
1851/* 2022/*
1852 * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq, 2023 * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
1853 * as part of a balancing operation within "domain". Returns the number of 2024 * load from busiest to this_rq, as part of a balancing operation within
1854 * tasks moved. 2025 * "domain". Returns the number of tasks moved.
1855 * 2026 *
1856 * Called with both runqueues locked. 2027 * Called with both runqueues locked.
1857 */ 2028 */
1858static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, 2029static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
1859 unsigned long max_nr_move, struct sched_domain *sd, 2030 unsigned long max_nr_move, unsigned long max_load_move,
1860 enum idle_type idle, int *all_pinned) 2031 struct sched_domain *sd, enum idle_type idle,
2032 int *all_pinned)
1861{ 2033{
1862 prio_array_t *array, *dst_array; 2034 prio_array_t *array, *dst_array;
1863 struct list_head *head, *curr; 2035 struct list_head *head, *curr;
1864 int idx, pulled = 0, pinned = 0; 2036 int idx, pulled = 0, pinned = 0, this_best_prio, busiest_best_prio;
2037 int busiest_best_prio_seen;
2038 int skip_for_load; /* skip the task based on weighted load issues */
2039 long rem_load_move;
1865 task_t *tmp; 2040 task_t *tmp;
1866 2041
1867 if (max_nr_move == 0) 2042 if (max_nr_move == 0 || max_load_move == 0)
1868 goto out; 2043 goto out;
1869 2044
2045 rem_load_move = max_load_move;
1870 pinned = 1; 2046 pinned = 1;
2047 this_best_prio = rq_best_prio(this_rq);
2048 busiest_best_prio = rq_best_prio(busiest);
2049 /*
2050 * Enable handling of the case where there is more than one task
2051 * with the best priority. If the current running task is one
2052 * of those with prio==busiest_best_prio we know it won't be moved
2053 * and therefore it's safe to override the skip (based on load) of
2054 * any task we find with that prio.
2055 */
2056 busiest_best_prio_seen = busiest_best_prio == busiest->curr->prio;
1871 2057
1872 /* 2058 /*
1873 * We first consider expired tasks. Those will likely not be 2059 * We first consider expired tasks. Those will likely not be
@@ -1907,7 +2093,17 @@ skip_queue:
1907 2093
1908 curr = curr->prev; 2094 curr = curr->prev;
1909 2095
1910 if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) { 2096 /*
2097 * To help distribute high priority tasks accross CPUs we don't
2098 * skip a task if it will be the highest priority task (i.e. smallest
2099 * prio value) on its new queue regardless of its load weight
2100 */
2101 skip_for_load = tmp->load_weight > rem_load_move;
2102 if (skip_for_load && idx < this_best_prio)
2103 skip_for_load = !busiest_best_prio_seen && idx == busiest_best_prio;
2104 if (skip_for_load ||
2105 !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
2106 busiest_best_prio_seen |= idx == busiest_best_prio;
1911 if (curr != head) 2107 if (curr != head)
1912 goto skip_queue; 2108 goto skip_queue;
1913 idx++; 2109 idx++;
@@ -1921,9 +2117,15 @@ skip_queue:
1921 2117
1922 pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu); 2118 pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
1923 pulled++; 2119 pulled++;
2120 rem_load_move -= tmp->load_weight;
1924 2121
1925 /* We only want to steal up to the prescribed number of tasks. */ 2122 /*
1926 if (pulled < max_nr_move) { 2123 * We only want to steal up to the prescribed number of tasks
2124 * and the prescribed amount of weighted load.
2125 */
2126 if (pulled < max_nr_move && rem_load_move > 0) {
2127 if (idx < this_best_prio)
2128 this_best_prio = idx;
1927 if (curr != head) 2129 if (curr != head)
1928 goto skip_queue; 2130 goto skip_queue;
1929 idx++; 2131 idx++;
@@ -1944,7 +2146,7 @@ out:
1944 2146
1945/* 2147/*
1946 * find_busiest_group finds and returns the busiest CPU group within the 2148 * find_busiest_group finds and returns the busiest CPU group within the
1947 * domain. It calculates and returns the number of tasks which should be 2149 * domain. It calculates and returns the amount of weighted load which should be
1948 * moved to restore balance via the imbalance parameter. 2150 * moved to restore balance via the imbalance parameter.
1949 */ 2151 */
1950static struct sched_group * 2152static struct sched_group *
@@ -1954,9 +2156,19 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
1954 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; 2156 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
1955 unsigned long max_load, avg_load, total_load, this_load, total_pwr; 2157 unsigned long max_load, avg_load, total_load, this_load, total_pwr;
1956 unsigned long max_pull; 2158 unsigned long max_pull;
2159 unsigned long busiest_load_per_task, busiest_nr_running;
2160 unsigned long this_load_per_task, this_nr_running;
1957 int load_idx; 2161 int load_idx;
2162#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2163 int power_savings_balance = 1;
2164 unsigned long leader_nr_running = 0, min_load_per_task = 0;
2165 unsigned long min_nr_running = ULONG_MAX;
2166 struct sched_group *group_min = NULL, *group_leader = NULL;
2167#endif
1958 2168
1959 max_load = this_load = total_load = total_pwr = 0; 2169 max_load = this_load = total_load = total_pwr = 0;
2170 busiest_load_per_task = busiest_nr_running = 0;
2171 this_load_per_task = this_nr_running = 0;
1960 if (idle == NOT_IDLE) 2172 if (idle == NOT_IDLE)
1961 load_idx = sd->busy_idx; 2173 load_idx = sd->busy_idx;
1962 else if (idle == NEWLY_IDLE) 2174 else if (idle == NEWLY_IDLE)
@@ -1965,16 +2177,19 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
1965 load_idx = sd->idle_idx; 2177 load_idx = sd->idle_idx;
1966 2178
1967 do { 2179 do {
1968 unsigned long load; 2180 unsigned long load, group_capacity;
1969 int local_group; 2181 int local_group;
1970 int i; 2182 int i;
2183 unsigned long sum_nr_running, sum_weighted_load;
1971 2184
1972 local_group = cpu_isset(this_cpu, group->cpumask); 2185 local_group = cpu_isset(this_cpu, group->cpumask);
1973 2186
1974 /* Tally up the load of all CPUs in the group */ 2187 /* Tally up the load of all CPUs in the group */
1975 avg_load = 0; 2188 sum_weighted_load = sum_nr_running = avg_load = 0;
1976 2189
1977 for_each_cpu_mask(i, group->cpumask) { 2190 for_each_cpu_mask(i, group->cpumask) {
2191 runqueue_t *rq = cpu_rq(i);
2192
1978 if (*sd_idle && !idle_cpu(i)) 2193 if (*sd_idle && !idle_cpu(i))
1979 *sd_idle = 0; 2194 *sd_idle = 0;
1980 2195
@@ -1985,6 +2200,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
1985 load = source_load(i, load_idx); 2200 load = source_load(i, load_idx);
1986 2201
1987 avg_load += load; 2202 avg_load += load;
2203 sum_nr_running += rq->nr_running;
2204 sum_weighted_load += rq->raw_weighted_load;
1988 } 2205 }
1989 2206
1990 total_load += avg_load; 2207 total_load += avg_load;
@@ -1993,17 +2210,80 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
1993 /* Adjust by relative CPU power of the group */ 2210 /* Adjust by relative CPU power of the group */
1994 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power; 2211 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1995 2212
2213 group_capacity = group->cpu_power / SCHED_LOAD_SCALE;
2214
1996 if (local_group) { 2215 if (local_group) {
1997 this_load = avg_load; 2216 this_load = avg_load;
1998 this = group; 2217 this = group;
1999 } else if (avg_load > max_load) { 2218 this_nr_running = sum_nr_running;
2219 this_load_per_task = sum_weighted_load;
2220 } else if (avg_load > max_load &&
2221 sum_nr_running > group_capacity) {
2000 max_load = avg_load; 2222 max_load = avg_load;
2001 busiest = group; 2223 busiest = group;
2224 busiest_nr_running = sum_nr_running;
2225 busiest_load_per_task = sum_weighted_load;
2002 } 2226 }
2227
2228#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2229 /*
2230 * Busy processors will not participate in power savings
2231 * balance.
2232 */
2233 if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
2234 goto group_next;
2235
2236 /*
2237 * If the local group is idle or completely loaded
2238 * no need to do power savings balance at this domain
2239 */
2240 if (local_group && (this_nr_running >= group_capacity ||
2241 !this_nr_running))
2242 power_savings_balance = 0;
2243
2244 /*
2245 * If a group is already running at full capacity or idle,
2246 * don't include that group in power savings calculations
2247 */
2248 if (!power_savings_balance || sum_nr_running >= group_capacity
2249 || !sum_nr_running)
2250 goto group_next;
2251
2252 /*
2253 * Calculate the group which has the least non-idle load.
2254 * This is the group from where we need to pick up the load
2255 * for saving power
2256 */
2257 if ((sum_nr_running < min_nr_running) ||
2258 (sum_nr_running == min_nr_running &&
2259 first_cpu(group->cpumask) <
2260 first_cpu(group_min->cpumask))) {
2261 group_min = group;
2262 min_nr_running = sum_nr_running;
2263 min_load_per_task = sum_weighted_load /
2264 sum_nr_running;
2265 }
2266
2267 /*
2268 * Calculate the group which is almost near its
2269 * capacity but still has some space to pick up some load
2270 * from other group and save more power
2271 */
2272 if (sum_nr_running <= group_capacity - 1)
2273 if (sum_nr_running > leader_nr_running ||
2274 (sum_nr_running == leader_nr_running &&
2275 first_cpu(group->cpumask) >
2276 first_cpu(group_leader->cpumask))) {
2277 group_leader = group;
2278 leader_nr_running = sum_nr_running;
2279 }
2280
2281group_next:
2282#endif
2003 group = group->next; 2283 group = group->next;
2004 } while (group != sd->groups); 2284 } while (group != sd->groups);
2005 2285
2006 if (!busiest || this_load >= max_load || max_load <= SCHED_LOAD_SCALE) 2286 if (!busiest || this_load >= max_load || busiest_nr_running == 0)
2007 goto out_balanced; 2287 goto out_balanced;
2008 2288
2009 avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr; 2289 avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
@@ -2012,6 +2292,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2012 100*max_load <= sd->imbalance_pct*this_load) 2292 100*max_load <= sd->imbalance_pct*this_load)
2013 goto out_balanced; 2293 goto out_balanced;
2014 2294
2295 busiest_load_per_task /= busiest_nr_running;
2015 /* 2296 /*
2016 * We're trying to get all the cpus to the average_load, so we don't 2297 * We're trying to get all the cpus to the average_load, so we don't
2017 * want to push ourselves above the average load, nor do we wish to 2298 * want to push ourselves above the average load, nor do we wish to
@@ -2023,21 +2304,50 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2023 * by pulling tasks to us. Be careful of negative numbers as they'll 2304 * by pulling tasks to us. Be careful of negative numbers as they'll
2024 * appear as very large values with unsigned longs. 2305 * appear as very large values with unsigned longs.
2025 */ 2306 */
2307 if (max_load <= busiest_load_per_task)
2308 goto out_balanced;
2309
2310 /*
2311 * In the presence of smp nice balancing, certain scenarios can have
2312 * max load less than avg load(as we skip the groups at or below
2313 * its cpu_power, while calculating max_load..)
2314 */
2315 if (max_load < avg_load) {
2316 *imbalance = 0;
2317 goto small_imbalance;
2318 }
2026 2319
2027 /* Don't want to pull so many tasks that a group would go idle */ 2320 /* Don't want to pull so many tasks that a group would go idle */
2028 max_pull = min(max_load - avg_load, max_load - SCHED_LOAD_SCALE); 2321 max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
2029 2322
2030 /* How much load to actually move to equalise the imbalance */ 2323 /* How much load to actually move to equalise the imbalance */
2031 *imbalance = min(max_pull * busiest->cpu_power, 2324 *imbalance = min(max_pull * busiest->cpu_power,
2032 (avg_load - this_load) * this->cpu_power) 2325 (avg_load - this_load) * this->cpu_power)
2033 / SCHED_LOAD_SCALE; 2326 / SCHED_LOAD_SCALE;
2034 2327
2035 if (*imbalance < SCHED_LOAD_SCALE) { 2328 /*
2036 unsigned long pwr_now = 0, pwr_move = 0; 2329 * if *imbalance is less than the average load per runnable task
2330 * there is no gaurantee that any tasks will be moved so we'll have
2331 * a think about bumping its value to force at least one task to be
2332 * moved
2333 */
2334 if (*imbalance < busiest_load_per_task) {
2335 unsigned long pwr_now, pwr_move;
2037 unsigned long tmp; 2336 unsigned long tmp;
2337 unsigned int imbn;
2338
2339small_imbalance:
2340 pwr_move = pwr_now = 0;
2341 imbn = 2;
2342 if (this_nr_running) {
2343 this_load_per_task /= this_nr_running;
2344 if (busiest_load_per_task > this_load_per_task)
2345 imbn = 1;
2346 } else
2347 this_load_per_task = SCHED_LOAD_SCALE;
2038 2348
2039 if (max_load - this_load >= SCHED_LOAD_SCALE*2) { 2349 if (max_load - this_load >= busiest_load_per_task * imbn) {
2040 *imbalance = 1; 2350 *imbalance = busiest_load_per_task;
2041 return busiest; 2351 return busiest;
2042 } 2352 }
2043 2353
@@ -2047,39 +2357,47 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2047 * moving them. 2357 * moving them.
2048 */ 2358 */
2049 2359
2050 pwr_now += busiest->cpu_power*min(SCHED_LOAD_SCALE, max_load); 2360 pwr_now += busiest->cpu_power *
2051 pwr_now += this->cpu_power*min(SCHED_LOAD_SCALE, this_load); 2361 min(busiest_load_per_task, max_load);
2362 pwr_now += this->cpu_power *
2363 min(this_load_per_task, this_load);
2052 pwr_now /= SCHED_LOAD_SCALE; 2364 pwr_now /= SCHED_LOAD_SCALE;
2053 2365
2054 /* Amount of load we'd subtract */ 2366 /* Amount of load we'd subtract */
2055 tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/busiest->cpu_power; 2367 tmp = busiest_load_per_task*SCHED_LOAD_SCALE/busiest->cpu_power;
2056 if (max_load > tmp) 2368 if (max_load > tmp)
2057 pwr_move += busiest->cpu_power*min(SCHED_LOAD_SCALE, 2369 pwr_move += busiest->cpu_power *
2058 max_load - tmp); 2370 min(busiest_load_per_task, max_load - tmp);
2059 2371
2060 /* Amount of load we'd add */ 2372 /* Amount of load we'd add */
2061 if (max_load*busiest->cpu_power < 2373 if (max_load*busiest->cpu_power <
2062 SCHED_LOAD_SCALE*SCHED_LOAD_SCALE) 2374 busiest_load_per_task*SCHED_LOAD_SCALE)
2063 tmp = max_load*busiest->cpu_power/this->cpu_power; 2375 tmp = max_load*busiest->cpu_power/this->cpu_power;
2064 else 2376 else
2065 tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/this->cpu_power; 2377 tmp = busiest_load_per_task*SCHED_LOAD_SCALE/this->cpu_power;
2066 pwr_move += this->cpu_power*min(SCHED_LOAD_SCALE, this_load + tmp); 2378 pwr_move += this->cpu_power*min(this_load_per_task, this_load + tmp);
2067 pwr_move /= SCHED_LOAD_SCALE; 2379 pwr_move /= SCHED_LOAD_SCALE;
2068 2380
2069 /* Move if we gain throughput */ 2381 /* Move if we gain throughput */
2070 if (pwr_move <= pwr_now) 2382 if (pwr_move <= pwr_now)
2071 goto out_balanced; 2383 goto out_balanced;
2072 2384
2073 *imbalance = 1; 2385 *imbalance = busiest_load_per_task;
2074 return busiest;
2075 } 2386 }
2076 2387
2077 /* Get rid of the scaling factor, rounding down as we divide */
2078 *imbalance = *imbalance / SCHED_LOAD_SCALE;
2079 return busiest; 2388 return busiest;
2080 2389
2081out_balanced: 2390out_balanced:
2391#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2392 if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
2393 goto ret;
2082 2394
2395 if (this == group_leader && group_leader != group_min) {
2396 *imbalance = min_load_per_task;
2397 return group_min;
2398 }
2399ret:
2400#endif
2083 *imbalance = 0; 2401 *imbalance = 0;
2084 return NULL; 2402 return NULL;
2085} 2403}
@@ -2088,18 +2406,21 @@ out_balanced:
2088 * find_busiest_queue - find the busiest runqueue among the cpus in group. 2406 * find_busiest_queue - find the busiest runqueue among the cpus in group.
2089 */ 2407 */
2090static runqueue_t *find_busiest_queue(struct sched_group *group, 2408static runqueue_t *find_busiest_queue(struct sched_group *group,
2091 enum idle_type idle) 2409 enum idle_type idle, unsigned long imbalance)
2092{ 2410{
2093 unsigned long load, max_load = 0; 2411 unsigned long max_load = 0;
2094 runqueue_t *busiest = NULL; 2412 runqueue_t *busiest = NULL, *rqi;
2095 int i; 2413 int i;
2096 2414
2097 for_each_cpu_mask(i, group->cpumask) { 2415 for_each_cpu_mask(i, group->cpumask) {
2098 load = source_load(i, 0); 2416 rqi = cpu_rq(i);
2099 2417
2100 if (load > max_load) { 2418 if (rqi->nr_running == 1 && rqi->raw_weighted_load > imbalance)
2101 max_load = load; 2419 continue;
2102 busiest = cpu_rq(i); 2420
2421 if (rqi->raw_weighted_load > max_load) {
2422 max_load = rqi->raw_weighted_load;
2423 busiest = rqi;
2103 } 2424 }
2104 } 2425 }
2105 2426
@@ -2112,6 +2433,7 @@ static runqueue_t *find_busiest_queue(struct sched_group *group,
2112 */ 2433 */
2113#define MAX_PINNED_INTERVAL 512 2434#define MAX_PINNED_INTERVAL 512
2114 2435
2436#define minus_1_or_zero(n) ((n) > 0 ? (n) - 1 : 0)
2115/* 2437/*
2116 * Check this_cpu to ensure it is balanced within domain. Attempt to move 2438 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2117 * tasks if there is an imbalance. 2439 * tasks if there is an imbalance.
@@ -2128,7 +2450,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
2128 int active_balance = 0; 2450 int active_balance = 0;
2129 int sd_idle = 0; 2451 int sd_idle = 0;
2130 2452
2131 if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER) 2453 if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
2454 !sched_smt_power_savings)
2132 sd_idle = 1; 2455 sd_idle = 1;
2133 2456
2134 schedstat_inc(sd, lb_cnt[idle]); 2457 schedstat_inc(sd, lb_cnt[idle]);
@@ -2139,7 +2462,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
2139 goto out_balanced; 2462 goto out_balanced;
2140 } 2463 }
2141 2464
2142 busiest = find_busiest_queue(group, idle); 2465 busiest = find_busiest_queue(group, idle, imbalance);
2143 if (!busiest) { 2466 if (!busiest) {
2144 schedstat_inc(sd, lb_nobusyq[idle]); 2467 schedstat_inc(sd, lb_nobusyq[idle]);
2145 goto out_balanced; 2468 goto out_balanced;
@@ -2159,6 +2482,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
2159 */ 2482 */
2160 double_rq_lock(this_rq, busiest); 2483 double_rq_lock(this_rq, busiest);
2161 nr_moved = move_tasks(this_rq, this_cpu, busiest, 2484 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2485 minus_1_or_zero(busiest->nr_running),
2162 imbalance, sd, idle, &all_pinned); 2486 imbalance, sd, idle, &all_pinned);
2163 double_rq_unlock(this_rq, busiest); 2487 double_rq_unlock(this_rq, busiest);
2164 2488
@@ -2216,7 +2540,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
2216 sd->balance_interval *= 2; 2540 sd->balance_interval *= 2;
2217 } 2541 }
2218 2542
2219 if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER) 2543 if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2544 !sched_smt_power_savings)
2220 return -1; 2545 return -1;
2221 return nr_moved; 2546 return nr_moved;
2222 2547
@@ -2231,7 +2556,7 @@ out_one_pinned:
2231 (sd->balance_interval < sd->max_interval)) 2556 (sd->balance_interval < sd->max_interval))
2232 sd->balance_interval *= 2; 2557 sd->balance_interval *= 2;
2233 2558
2234 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER) 2559 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings)
2235 return -1; 2560 return -1;
2236 return 0; 2561 return 0;
2237} 2562}
@@ -2252,7 +2577,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
2252 int nr_moved = 0; 2577 int nr_moved = 0;
2253 int sd_idle = 0; 2578 int sd_idle = 0;
2254 2579
2255 if (sd->flags & SD_SHARE_CPUPOWER) 2580 if (sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings)
2256 sd_idle = 1; 2581 sd_idle = 1;
2257 2582
2258 schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); 2583 schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
@@ -2262,7 +2587,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
2262 goto out_balanced; 2587 goto out_balanced;
2263 } 2588 }
2264 2589
2265 busiest = find_busiest_queue(group, NEWLY_IDLE); 2590 busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance);
2266 if (!busiest) { 2591 if (!busiest) {
2267 schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); 2592 schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
2268 goto out_balanced; 2593 goto out_balanced;
@@ -2277,6 +2602,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
2277 /* Attempt to move tasks */ 2602 /* Attempt to move tasks */
2278 double_lock_balance(this_rq, busiest); 2603 double_lock_balance(this_rq, busiest);
2279 nr_moved = move_tasks(this_rq, this_cpu, busiest, 2604 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2605 minus_1_or_zero(busiest->nr_running),
2280 imbalance, sd, NEWLY_IDLE, NULL); 2606 imbalance, sd, NEWLY_IDLE, NULL);
2281 spin_unlock(&busiest->lock); 2607 spin_unlock(&busiest->lock);
2282 } 2608 }
@@ -2292,7 +2618,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
2292 2618
2293out_balanced: 2619out_balanced:
2294 schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); 2620 schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
2295 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER) 2621 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings)
2296 return -1; 2622 return -1;
2297 sd->nr_balance_failed = 0; 2623 sd->nr_balance_failed = 0;
2298 return 0; 2624 return 0;
@@ -2347,17 +2673,19 @@ static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu)
2347 double_lock_balance(busiest_rq, target_rq); 2673 double_lock_balance(busiest_rq, target_rq);
2348 2674
2349 /* Search for an sd spanning us and the target CPU. */ 2675 /* Search for an sd spanning us and the target CPU. */
2350 for_each_domain(target_cpu, sd) 2676 for_each_domain(target_cpu, sd) {
2351 if ((sd->flags & SD_LOAD_BALANCE) && 2677 if ((sd->flags & SD_LOAD_BALANCE) &&
2352 cpu_isset(busiest_cpu, sd->span)) 2678 cpu_isset(busiest_cpu, sd->span))
2353 break; 2679 break;
2680 }
2354 2681
2355 if (unlikely(sd == NULL)) 2682 if (unlikely(sd == NULL))
2356 goto out; 2683 goto out;
2357 2684
2358 schedstat_inc(sd, alb_cnt); 2685 schedstat_inc(sd, alb_cnt);
2359 2686
2360 if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE, NULL)) 2687 if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
2688 RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE, NULL))
2361 schedstat_inc(sd, alb_pushed); 2689 schedstat_inc(sd, alb_pushed);
2362 else 2690 else
2363 schedstat_inc(sd, alb_failed); 2691 schedstat_inc(sd, alb_failed);
@@ -2385,7 +2713,7 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
2385 struct sched_domain *sd; 2713 struct sched_domain *sd;
2386 int i; 2714 int i;
2387 2715
2388 this_load = this_rq->nr_running * SCHED_LOAD_SCALE; 2716 this_load = this_rq->raw_weighted_load;
2389 /* Update our load */ 2717 /* Update our load */
2390 for (i = 0; i < 3; i++) { 2718 for (i = 0; i < 3; i++) {
2391 unsigned long new_load = this_load; 2719 unsigned long new_load = this_load;
@@ -2686,48 +3014,35 @@ static inline void wakeup_busy_runqueue(runqueue_t *rq)
2686 resched_task(rq->idle); 3014 resched_task(rq->idle);
2687} 3015}
2688 3016
2689static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) 3017/*
3018 * Called with interrupt disabled and this_rq's runqueue locked.
3019 */
3020static void wake_sleeping_dependent(int this_cpu)
2690{ 3021{
2691 struct sched_domain *tmp, *sd = NULL; 3022 struct sched_domain *tmp, *sd = NULL;
2692 cpumask_t sibling_map;
2693 int i; 3023 int i;
2694 3024
2695 for_each_domain(this_cpu, tmp) 3025 for_each_domain(this_cpu, tmp) {
2696 if (tmp->flags & SD_SHARE_CPUPOWER) 3026 if (tmp->flags & SD_SHARE_CPUPOWER) {
2697 sd = tmp; 3027 sd = tmp;
3028 break;
3029 }
3030 }
2698 3031
2699 if (!sd) 3032 if (!sd)
2700 return; 3033 return;
2701 3034
2702 /* 3035 for_each_cpu_mask(i, sd->span) {
2703 * Unlock the current runqueue because we have to lock in
2704 * CPU order to avoid deadlocks. Caller knows that we might
2705 * unlock. We keep IRQs disabled.
2706 */
2707 spin_unlock(&this_rq->lock);
2708
2709 sibling_map = sd->span;
2710
2711 for_each_cpu_mask(i, sibling_map)
2712 spin_lock(&cpu_rq(i)->lock);
2713 /*
2714 * We clear this CPU from the mask. This both simplifies the
2715 * inner loop and keps this_rq locked when we exit:
2716 */
2717 cpu_clear(this_cpu, sibling_map);
2718
2719 for_each_cpu_mask(i, sibling_map) {
2720 runqueue_t *smt_rq = cpu_rq(i); 3036 runqueue_t *smt_rq = cpu_rq(i);
2721 3037
3038 if (i == this_cpu)
3039 continue;
3040 if (unlikely(!spin_trylock(&smt_rq->lock)))
3041 continue;
3042
2722 wakeup_busy_runqueue(smt_rq); 3043 wakeup_busy_runqueue(smt_rq);
3044 spin_unlock(&smt_rq->lock);
2723 } 3045 }
2724
2725 for_each_cpu_mask(i, sibling_map)
2726 spin_unlock(&cpu_rq(i)->lock);
2727 /*
2728 * We exit with this_cpu's rq still held and IRQs
2729 * still disabled:
2730 */
2731} 3046}
2732 3047
2733/* 3048/*
@@ -2740,52 +3055,46 @@ static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)
2740 return p->time_slice * (100 - sd->per_cpu_gain) / 100; 3055 return p->time_slice * (100 - sd->per_cpu_gain) / 100;
2741} 3056}
2742 3057
2743static int dependent_sleeper(int this_cpu, runqueue_t *this_rq) 3058/*
3059 * To minimise lock contention and not have to drop this_rq's runlock we only
3060 * trylock the sibling runqueues and bypass those runqueues if we fail to
3061 * acquire their lock. As we only trylock the normal locking order does not
3062 * need to be obeyed.
3063 */
3064static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p)
2744{ 3065{
2745 struct sched_domain *tmp, *sd = NULL; 3066 struct sched_domain *tmp, *sd = NULL;
2746 cpumask_t sibling_map;
2747 prio_array_t *array;
2748 int ret = 0, i; 3067 int ret = 0, i;
2749 task_t *p;
2750 3068
2751 for_each_domain(this_cpu, tmp) 3069 /* kernel/rt threads do not participate in dependent sleeping */
2752 if (tmp->flags & SD_SHARE_CPUPOWER) 3070 if (!p->mm || rt_task(p))
3071 return 0;
3072
3073 for_each_domain(this_cpu, tmp) {
3074 if (tmp->flags & SD_SHARE_CPUPOWER) {
2753 sd = tmp; 3075 sd = tmp;
3076 break;
3077 }
3078 }
2754 3079
2755 if (!sd) 3080 if (!sd)
2756 return 0; 3081 return 0;
2757 3082
2758 /* 3083 for_each_cpu_mask(i, sd->span) {
2759 * The same locking rules and details apply as for 3084 runqueue_t *smt_rq;
2760 * wake_sleeping_dependent(): 3085 task_t *smt_curr;
2761 */
2762 spin_unlock(&this_rq->lock);
2763 sibling_map = sd->span;
2764 for_each_cpu_mask(i, sibling_map)
2765 spin_lock(&cpu_rq(i)->lock);
2766 cpu_clear(this_cpu, sibling_map);
2767 3086
2768 /* 3087 if (i == this_cpu)
2769 * Establish next task to be run - it might have gone away because 3088 continue;
2770 * we released the runqueue lock above:
2771 */
2772 if (!this_rq->nr_running)
2773 goto out_unlock;
2774 array = this_rq->active;
2775 if (!array->nr_active)
2776 array = this_rq->expired;
2777 BUG_ON(!array->nr_active);
2778 3089
2779 p = list_entry(array->queue[sched_find_first_bit(array->bitmap)].next, 3090 smt_rq = cpu_rq(i);
2780 task_t, run_list); 3091 if (unlikely(!spin_trylock(&smt_rq->lock)))
3092 continue;
2781 3093
2782 for_each_cpu_mask(i, sibling_map) { 3094 smt_curr = smt_rq->curr;
2783 runqueue_t *smt_rq = cpu_rq(i);
2784 task_t *smt_curr = smt_rq->curr;
2785 3095
2786 /* Kernel threads do not participate in dependent sleeping */ 3096 if (!smt_curr->mm)
2787 if (!p->mm || !smt_curr->mm || rt_task(p)) 3097 goto unlock;
2788 goto check_smt_task;
2789 3098
2790 /* 3099 /*
2791 * If a user task with lower static priority than the 3100 * If a user task with lower static priority than the
@@ -2803,49 +3112,24 @@ static int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
2803 if ((jiffies % DEF_TIMESLICE) > 3112 if ((jiffies % DEF_TIMESLICE) >
2804 (sd->per_cpu_gain * DEF_TIMESLICE / 100)) 3113 (sd->per_cpu_gain * DEF_TIMESLICE / 100))
2805 ret = 1; 3114 ret = 1;
2806 } else 3115 } else {
2807 if (smt_curr->static_prio < p->static_prio && 3116 if (smt_curr->static_prio < p->static_prio &&
2808 !TASK_PREEMPTS_CURR(p, smt_rq) && 3117 !TASK_PREEMPTS_CURR(p, smt_rq) &&
2809 smt_slice(smt_curr, sd) > task_timeslice(p)) 3118 smt_slice(smt_curr, sd) > task_timeslice(p))
2810 ret = 1; 3119 ret = 1;
2811
2812check_smt_task:
2813 if ((!smt_curr->mm && smt_curr != smt_rq->idle) ||
2814 rt_task(smt_curr))
2815 continue;
2816 if (!p->mm) {
2817 wakeup_busy_runqueue(smt_rq);
2818 continue;
2819 }
2820
2821 /*
2822 * Reschedule a lower priority task on the SMT sibling for
2823 * it to be put to sleep, or wake it up if it has been put to
2824 * sleep for priority reasons to see if it should run now.
2825 */
2826 if (rt_task(p)) {
2827 if ((jiffies % DEF_TIMESLICE) >
2828 (sd->per_cpu_gain * DEF_TIMESLICE / 100))
2829 resched_task(smt_curr);
2830 } else {
2831 if (TASK_PREEMPTS_CURR(p, smt_rq) &&
2832 smt_slice(p, sd) > task_timeslice(smt_curr))
2833 resched_task(smt_curr);
2834 else
2835 wakeup_busy_runqueue(smt_rq);
2836 } 3120 }
3121unlock:
3122 spin_unlock(&smt_rq->lock);
2837 } 3123 }
2838out_unlock:
2839 for_each_cpu_mask(i, sibling_map)
2840 spin_unlock(&cpu_rq(i)->lock);
2841 return ret; 3124 return ret;
2842} 3125}
2843#else 3126#else
2844static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) 3127static inline void wake_sleeping_dependent(int this_cpu)
2845{ 3128{
2846} 3129}
2847 3130
2848static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) 3131static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq,
3132 task_t *p)
2849{ 3133{
2850 return 0; 3134 return 0;
2851} 3135}
@@ -2967,32 +3251,13 @@ need_resched_nonpreemptible:
2967 3251
2968 cpu = smp_processor_id(); 3252 cpu = smp_processor_id();
2969 if (unlikely(!rq->nr_running)) { 3253 if (unlikely(!rq->nr_running)) {
2970go_idle:
2971 idle_balance(cpu, rq); 3254 idle_balance(cpu, rq);
2972 if (!rq->nr_running) { 3255 if (!rq->nr_running) {
2973 next = rq->idle; 3256 next = rq->idle;
2974 rq->expired_timestamp = 0; 3257 rq->expired_timestamp = 0;
2975 wake_sleeping_dependent(cpu, rq); 3258 wake_sleeping_dependent(cpu);
2976 /*
2977 * wake_sleeping_dependent() might have released
2978 * the runqueue, so break out if we got new
2979 * tasks meanwhile:
2980 */
2981 if (!rq->nr_running)
2982 goto switch_tasks;
2983 }
2984 } else {
2985 if (dependent_sleeper(cpu, rq)) {
2986 next = rq->idle;
2987 goto switch_tasks; 3259 goto switch_tasks;
2988 } 3260 }
2989 /*
2990 * dependent_sleeper() releases and reacquires the runqueue
2991 * lock, hence go into the idle loop if the rq went
2992 * empty meanwhile:
2993 */
2994 if (unlikely(!rq->nr_running))
2995 goto go_idle;
2996 } 3261 }
2997 3262
2998 array = rq->active; 3263 array = rq->active;
@@ -3030,6 +3295,8 @@ go_idle:
3030 } 3295 }
3031 } 3296 }
3032 next->sleep_type = SLEEP_NORMAL; 3297 next->sleep_type = SLEEP_NORMAL;
3298 if (dependent_sleeper(cpu, rq, next))
3299 next = rq->idle;
3033switch_tasks: 3300switch_tasks:
3034 if (next == rq->idle) 3301 if (next == rq->idle)
3035 schedstat_inc(rq, sched_goidle); 3302 schedstat_inc(rq, sched_goidle);
@@ -3473,12 +3740,65 @@ long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
3473 3740
3474EXPORT_SYMBOL(sleep_on_timeout); 3741EXPORT_SYMBOL(sleep_on_timeout);
3475 3742
3743#ifdef CONFIG_RT_MUTEXES
3744
3745/*
3746 * rt_mutex_setprio - set the current priority of a task
3747 * @p: task
3748 * @prio: prio value (kernel-internal form)
3749 *
3750 * This function changes the 'effective' priority of a task. It does
3751 * not touch ->normal_prio like __setscheduler().
3752 *
3753 * Used by the rt_mutex code to implement priority inheritance logic.
3754 */
3755void rt_mutex_setprio(task_t *p, int prio)
3756{
3757 unsigned long flags;
3758 prio_array_t *array;
3759 runqueue_t *rq;
3760 int oldprio;
3761
3762 BUG_ON(prio < 0 || prio > MAX_PRIO);
3763
3764 rq = task_rq_lock(p, &flags);
3765
3766 oldprio = p->prio;
3767 array = p->array;
3768 if (array)
3769 dequeue_task(p, array);
3770 p->prio = prio;
3771
3772 if (array) {
3773 /*
3774 * If changing to an RT priority then queue it
3775 * in the active array!
3776 */
3777 if (rt_task(p))
3778 array = rq->active;
3779 enqueue_task(p, array);
3780 /*
3781 * Reschedule if we are currently running on this runqueue and
3782 * our priority decreased, or if we are not currently running on
3783 * this runqueue and our priority is higher than the current's
3784 */
3785 if (task_running(rq, p)) {
3786 if (p->prio > oldprio)
3787 resched_task(rq->curr);
3788 } else if (TASK_PREEMPTS_CURR(p, rq))
3789 resched_task(rq->curr);
3790 }
3791 task_rq_unlock(rq, &flags);
3792}
3793
3794#endif
3795
3476void set_user_nice(task_t *p, long nice) 3796void set_user_nice(task_t *p, long nice)
3477{ 3797{
3478 unsigned long flags; 3798 unsigned long flags;
3479 prio_array_t *array; 3799 prio_array_t *array;
3480 runqueue_t *rq; 3800 runqueue_t *rq;
3481 int old_prio, new_prio, delta; 3801 int old_prio, delta;
3482 3802
3483 if (TASK_NICE(p) == nice || nice < -20 || nice > 19) 3803 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3484 return; 3804 return;
@@ -3493,22 +3813,25 @@ void set_user_nice(task_t *p, long nice)
3493 * it wont have any effect on scheduling until the task is 3813 * it wont have any effect on scheduling until the task is
3494 * not SCHED_NORMAL/SCHED_BATCH: 3814 * not SCHED_NORMAL/SCHED_BATCH:
3495 */ 3815 */
3496 if (rt_task(p)) { 3816 if (has_rt_policy(p)) {
3497 p->static_prio = NICE_TO_PRIO(nice); 3817 p->static_prio = NICE_TO_PRIO(nice);
3498 goto out_unlock; 3818 goto out_unlock;
3499 } 3819 }
3500 array = p->array; 3820 array = p->array;
3501 if (array) 3821 if (array) {
3502 dequeue_task(p, array); 3822 dequeue_task(p, array);
3823 dec_raw_weighted_load(rq, p);
3824 }
3503 3825
3504 old_prio = p->prio;
3505 new_prio = NICE_TO_PRIO(nice);
3506 delta = new_prio - old_prio;
3507 p->static_prio = NICE_TO_PRIO(nice); 3826 p->static_prio = NICE_TO_PRIO(nice);
3508 p->prio += delta; 3827 set_load_weight(p);
3828 old_prio = p->prio;
3829 p->prio = effective_prio(p);
3830 delta = p->prio - old_prio;
3509 3831
3510 if (array) { 3832 if (array) {
3511 enqueue_task(p, array); 3833 enqueue_task(p, array);
3834 inc_raw_weighted_load(rq, p);
3512 /* 3835 /*
3513 * If the task increased its priority or is running and 3836 * If the task increased its priority or is running and
3514 * lowered its priority, then reschedule its CPU: 3837 * lowered its priority, then reschedule its CPU:
@@ -3519,7 +3842,6 @@ void set_user_nice(task_t *p, long nice)
3519out_unlock: 3842out_unlock:
3520 task_rq_unlock(rq, &flags); 3843 task_rq_unlock(rq, &flags);
3521} 3844}
3522
3523EXPORT_SYMBOL(set_user_nice); 3845EXPORT_SYMBOL(set_user_nice);
3524 3846
3525/* 3847/*
@@ -3634,16 +3956,15 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
3634 BUG_ON(p->array); 3956 BUG_ON(p->array);
3635 p->policy = policy; 3957 p->policy = policy;
3636 p->rt_priority = prio; 3958 p->rt_priority = prio;
3637 if (policy != SCHED_NORMAL && policy != SCHED_BATCH) { 3959 p->normal_prio = normal_prio(p);
3638 p->prio = MAX_RT_PRIO-1 - p->rt_priority; 3960 /* we are holding p->pi_lock already */
3639 } else { 3961 p->prio = rt_mutex_getprio(p);
3640 p->prio = p->static_prio; 3962 /*
3641 /* 3963 * SCHED_BATCH tasks are treated as perpetual CPU hogs:
3642 * SCHED_BATCH tasks are treated as perpetual CPU hogs: 3964 */
3643 */ 3965 if (policy == SCHED_BATCH)
3644 if (policy == SCHED_BATCH) 3966 p->sleep_avg = 0;
3645 p->sleep_avg = 0; 3967 set_load_weight(p);
3646 }
3647} 3968}
3648 3969
3649/** 3970/**
@@ -3662,6 +3983,8 @@ int sched_setscheduler(struct task_struct *p, int policy,
3662 unsigned long flags; 3983 unsigned long flags;
3663 runqueue_t *rq; 3984 runqueue_t *rq;
3664 3985
3986 /* may grab non-irq protected spin_locks */
3987 BUG_ON(in_interrupt());
3665recheck: 3988recheck:
3666 /* double check policy once rq lock held */ 3989 /* double check policy once rq lock held */
3667 if (policy < 0) 3990 if (policy < 0)
@@ -3710,14 +4033,20 @@ recheck:
3710 if (retval) 4033 if (retval)
3711 return retval; 4034 return retval;
3712 /* 4035 /*
4036 * make sure no PI-waiters arrive (or leave) while we are
4037 * changing the priority of the task:
4038 */
4039 spin_lock_irqsave(&p->pi_lock, flags);
4040 /*
3713 * To be able to change p->policy safely, the apropriate 4041 * To be able to change p->policy safely, the apropriate
3714 * runqueue lock must be held. 4042 * runqueue lock must be held.
3715 */ 4043 */
3716 rq = task_rq_lock(p, &flags); 4044 rq = __task_rq_lock(p);
3717 /* recheck policy now with rq lock held */ 4045 /* recheck policy now with rq lock held */
3718 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 4046 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3719 policy = oldpolicy = -1; 4047 policy = oldpolicy = -1;
3720 task_rq_unlock(rq, &flags); 4048 __task_rq_unlock(rq);
4049 spin_unlock_irqrestore(&p->pi_lock, flags);
3721 goto recheck; 4050 goto recheck;
3722 } 4051 }
3723 array = p->array; 4052 array = p->array;
@@ -3738,7 +4067,11 @@ recheck:
3738 } else if (TASK_PREEMPTS_CURR(p, rq)) 4067 } else if (TASK_PREEMPTS_CURR(p, rq))
3739 resched_task(rq->curr); 4068 resched_task(rq->curr);
3740 } 4069 }
3741 task_rq_unlock(rq, &flags); 4070 __task_rq_unlock(rq);
4071 spin_unlock_irqrestore(&p->pi_lock, flags);
4072
4073 rt_mutex_adjust_pi(p);
4074
3742 return 0; 4075 return 0;
3743} 4076}
3744EXPORT_SYMBOL_GPL(sched_setscheduler); 4077EXPORT_SYMBOL_GPL(sched_setscheduler);
@@ -3760,8 +4093,10 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
3760 read_unlock_irq(&tasklist_lock); 4093 read_unlock_irq(&tasklist_lock);
3761 return -ESRCH; 4094 return -ESRCH;
3762 } 4095 }
3763 retval = sched_setscheduler(p, policy, &lparam); 4096 get_task_struct(p);
3764 read_unlock_irq(&tasklist_lock); 4097 read_unlock_irq(&tasklist_lock);
4098 retval = sched_setscheduler(p, policy, &lparam);
4099 put_task_struct(p);
3765 return retval; 4100 return retval;
3766} 4101}
3767 4102
@@ -4247,7 +4582,7 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
4247 if (retval) 4582 if (retval)
4248 goto out_unlock; 4583 goto out_unlock;
4249 4584
4250 jiffies_to_timespec(p->policy & SCHED_FIFO ? 4585 jiffies_to_timespec(p->policy == SCHED_FIFO ?
4251 0 : task_timeslice(p), &t); 4586 0 : task_timeslice(p), &t);
4252 read_unlock(&tasklist_lock); 4587 read_unlock(&tasklist_lock);
4253 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 4588 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
@@ -4373,7 +4708,7 @@ void __devinit init_idle(task_t *idle, int cpu)
4373 idle->timestamp = sched_clock(); 4708 idle->timestamp = sched_clock();
4374 idle->sleep_avg = 0; 4709 idle->sleep_avg = 0;
4375 idle->array = NULL; 4710 idle->array = NULL;
4376 idle->prio = MAX_PRIO; 4711 idle->prio = idle->normal_prio = MAX_PRIO;
4377 idle->state = TASK_RUNNING; 4712 idle->state = TASK_RUNNING;
4378 idle->cpus_allowed = cpumask_of_cpu(cpu); 4713 idle->cpus_allowed = cpumask_of_cpu(cpu);
4379 set_task_cpu(idle, cpu); 4714 set_task_cpu(idle, cpu);
@@ -4469,13 +4804,16 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed);
4469 * 4804 *
4470 * So we race with normal scheduler movements, but that's OK, as long 4805 * So we race with normal scheduler movements, but that's OK, as long
4471 * as the task is no longer on this CPU. 4806 * as the task is no longer on this CPU.
4807 *
4808 * Returns non-zero if task was successfully migrated.
4472 */ 4809 */
4473static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) 4810static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4474{ 4811{
4475 runqueue_t *rq_dest, *rq_src; 4812 runqueue_t *rq_dest, *rq_src;
4813 int ret = 0;
4476 4814
4477 if (unlikely(cpu_is_offline(dest_cpu))) 4815 if (unlikely(cpu_is_offline(dest_cpu)))
4478 return; 4816 return ret;
4479 4817
4480 rq_src = cpu_rq(src_cpu); 4818 rq_src = cpu_rq(src_cpu);
4481 rq_dest = cpu_rq(dest_cpu); 4819 rq_dest = cpu_rq(dest_cpu);
@@ -4503,9 +4841,10 @@ static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4503 if (TASK_PREEMPTS_CURR(p, rq_dest)) 4841 if (TASK_PREEMPTS_CURR(p, rq_dest))
4504 resched_task(rq_dest->curr); 4842 resched_task(rq_dest->curr);
4505 } 4843 }
4506 4844 ret = 1;
4507out: 4845out:
4508 double_rq_unlock(rq_src, rq_dest); 4846 double_rq_unlock(rq_src, rq_dest);
4847 return ret;
4509} 4848}
4510 4849
4511/* 4850/*
@@ -4575,9 +4914,12 @@ wait_to_die:
4575/* Figure out where task on dead CPU should go, use force if neccessary. */ 4914/* Figure out where task on dead CPU should go, use force if neccessary. */
4576static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) 4915static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
4577{ 4916{
4917 runqueue_t *rq;
4918 unsigned long flags;
4578 int dest_cpu; 4919 int dest_cpu;
4579 cpumask_t mask; 4920 cpumask_t mask;
4580 4921
4922restart:
4581 /* On same node? */ 4923 /* On same node? */
4582 mask = node_to_cpumask(cpu_to_node(dead_cpu)); 4924 mask = node_to_cpumask(cpu_to_node(dead_cpu));
4583 cpus_and(mask, mask, tsk->cpus_allowed); 4925 cpus_and(mask, mask, tsk->cpus_allowed);
@@ -4589,8 +4931,10 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
4589 4931
4590 /* No more Mr. Nice Guy. */ 4932 /* No more Mr. Nice Guy. */
4591 if (dest_cpu == NR_CPUS) { 4933 if (dest_cpu == NR_CPUS) {
4934 rq = task_rq_lock(tsk, &flags);
4592 cpus_setall(tsk->cpus_allowed); 4935 cpus_setall(tsk->cpus_allowed);
4593 dest_cpu = any_online_cpu(tsk->cpus_allowed); 4936 dest_cpu = any_online_cpu(tsk->cpus_allowed);
4937 task_rq_unlock(rq, &flags);
4594 4938
4595 /* 4939 /*
4596 * Don't tell them about moving exiting tasks or 4940 * Don't tell them about moving exiting tasks or
@@ -4602,7 +4946,8 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
4602 "longer affine to cpu%d\n", 4946 "longer affine to cpu%d\n",
4603 tsk->pid, tsk->comm, dead_cpu); 4947 tsk->pid, tsk->comm, dead_cpu);
4604 } 4948 }
4605 __migrate_task(tsk, dead_cpu, dest_cpu); 4949 if (!__migrate_task(tsk, dead_cpu, dest_cpu))
4950 goto restart;
4606} 4951}
4607 4952
4608/* 4953/*
@@ -4729,8 +5074,9 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
4729 * migration_call - callback that gets triggered when a CPU is added. 5074 * migration_call - callback that gets triggered when a CPU is added.
4730 * Here we can start up the necessary migration thread for the new CPU. 5075 * Here we can start up the necessary migration thread for the new CPU.
4731 */ 5076 */
4732static int migration_call(struct notifier_block *nfb, unsigned long action, 5077static int __cpuinit migration_call(struct notifier_block *nfb,
4733 void *hcpu) 5078 unsigned long action,
5079 void *hcpu)
4734{ 5080{
4735 int cpu = (long)hcpu; 5081 int cpu = (long)hcpu;
4736 struct task_struct *p; 5082 struct task_struct *p;
@@ -4800,7 +5146,7 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
4800/* Register at highest priority so that task migration (migrate_all_tasks) 5146/* Register at highest priority so that task migration (migrate_all_tasks)
4801 * happens before everything else. 5147 * happens before everything else.
4802 */ 5148 */
4803static struct notifier_block migration_notifier = { 5149static struct notifier_block __cpuinitdata migration_notifier = {
4804 .notifier_call = migration_call, 5150 .notifier_call = migration_call,
4805 .priority = 10 5151 .priority = 10
4806}; 5152};
@@ -5601,6 +5947,7 @@ static cpumask_t sched_domain_node_span(int node)
5601} 5947}
5602#endif 5948#endif
5603 5949
5950int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
5604/* 5951/*
5605 * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we 5952 * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we
5606 * can switch it on easily if needed. 5953 * can switch it on easily if needed.
@@ -5616,7 +5963,7 @@ static int cpu_to_cpu_group(int cpu)
5616 5963
5617#ifdef CONFIG_SCHED_MC 5964#ifdef CONFIG_SCHED_MC
5618static DEFINE_PER_CPU(struct sched_domain, core_domains); 5965static DEFINE_PER_CPU(struct sched_domain, core_domains);
5619static struct sched_group sched_group_core[NR_CPUS]; 5966static struct sched_group *sched_group_core_bycpu[NR_CPUS];
5620#endif 5967#endif
5621 5968
5622#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) 5969#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
@@ -5632,7 +5979,7 @@ static int cpu_to_core_group(int cpu)
5632#endif 5979#endif
5633 5980
5634static DEFINE_PER_CPU(struct sched_domain, phys_domains); 5981static DEFINE_PER_CPU(struct sched_domain, phys_domains);
5635static struct sched_group sched_group_phys[NR_CPUS]; 5982static struct sched_group *sched_group_phys_bycpu[NR_CPUS];
5636static int cpu_to_phys_group(int cpu) 5983static int cpu_to_phys_group(int cpu)
5637{ 5984{
5638#if defined(CONFIG_SCHED_MC) 5985#if defined(CONFIG_SCHED_MC)
@@ -5689,13 +6036,74 @@ next_sg:
5689} 6036}
5690#endif 6037#endif
5691 6038
6039/* Free memory allocated for various sched_group structures */
6040static void free_sched_groups(const cpumask_t *cpu_map)
6041{
6042 int cpu;
6043#ifdef CONFIG_NUMA
6044 int i;
6045
6046 for_each_cpu_mask(cpu, *cpu_map) {
6047 struct sched_group *sched_group_allnodes
6048 = sched_group_allnodes_bycpu[cpu];
6049 struct sched_group **sched_group_nodes
6050 = sched_group_nodes_bycpu[cpu];
6051
6052 if (sched_group_allnodes) {
6053 kfree(sched_group_allnodes);
6054 sched_group_allnodes_bycpu[cpu] = NULL;
6055 }
6056
6057 if (!sched_group_nodes)
6058 continue;
6059
6060 for (i = 0; i < MAX_NUMNODES; i++) {
6061 cpumask_t nodemask = node_to_cpumask(i);
6062 struct sched_group *oldsg, *sg = sched_group_nodes[i];
6063
6064 cpus_and(nodemask, nodemask, *cpu_map);
6065 if (cpus_empty(nodemask))
6066 continue;
6067
6068 if (sg == NULL)
6069 continue;
6070 sg = sg->next;
6071next_sg:
6072 oldsg = sg;
6073 sg = sg->next;
6074 kfree(oldsg);
6075 if (oldsg != sched_group_nodes[i])
6076 goto next_sg;
6077 }
6078 kfree(sched_group_nodes);
6079 sched_group_nodes_bycpu[cpu] = NULL;
6080 }
6081#endif
6082 for_each_cpu_mask(cpu, *cpu_map) {
6083 if (sched_group_phys_bycpu[cpu]) {
6084 kfree(sched_group_phys_bycpu[cpu]);
6085 sched_group_phys_bycpu[cpu] = NULL;
6086 }
6087#ifdef CONFIG_SCHED_MC
6088 if (sched_group_core_bycpu[cpu]) {
6089 kfree(sched_group_core_bycpu[cpu]);
6090 sched_group_core_bycpu[cpu] = NULL;
6091 }
6092#endif
6093 }
6094}
6095
5692/* 6096/*
5693 * Build sched domains for a given set of cpus and attach the sched domains 6097 * Build sched domains for a given set of cpus and attach the sched domains
5694 * to the individual cpus 6098 * to the individual cpus
5695 */ 6099 */
5696void build_sched_domains(const cpumask_t *cpu_map) 6100static int build_sched_domains(const cpumask_t *cpu_map)
5697{ 6101{
5698 int i; 6102 int i;
6103 struct sched_group *sched_group_phys = NULL;
6104#ifdef CONFIG_SCHED_MC
6105 struct sched_group *sched_group_core = NULL;
6106#endif
5699#ifdef CONFIG_NUMA 6107#ifdef CONFIG_NUMA
5700 struct sched_group **sched_group_nodes = NULL; 6108 struct sched_group **sched_group_nodes = NULL;
5701 struct sched_group *sched_group_allnodes = NULL; 6109 struct sched_group *sched_group_allnodes = NULL;
@@ -5703,11 +6111,11 @@ void build_sched_domains(const cpumask_t *cpu_map)
5703 /* 6111 /*
5704 * Allocate the per-node list of sched groups 6112 * Allocate the per-node list of sched groups
5705 */ 6113 */
5706 sched_group_nodes = kmalloc(sizeof(struct sched_group*)*MAX_NUMNODES, 6114 sched_group_nodes = kzalloc(sizeof(struct sched_group*)*MAX_NUMNODES,
5707 GFP_ATOMIC); 6115 GFP_KERNEL);
5708 if (!sched_group_nodes) { 6116 if (!sched_group_nodes) {
5709 printk(KERN_WARNING "Can not alloc sched group node list\n"); 6117 printk(KERN_WARNING "Can not alloc sched group node list\n");
5710 return; 6118 return -ENOMEM;
5711 } 6119 }
5712 sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; 6120 sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
5713#endif 6121#endif
@@ -5733,7 +6141,7 @@ void build_sched_domains(const cpumask_t *cpu_map)
5733 if (!sched_group_allnodes) { 6141 if (!sched_group_allnodes) {
5734 printk(KERN_WARNING 6142 printk(KERN_WARNING
5735 "Can not alloc allnodes sched group\n"); 6143 "Can not alloc allnodes sched group\n");
5736 break; 6144 goto error;
5737 } 6145 }
5738 sched_group_allnodes_bycpu[i] 6146 sched_group_allnodes_bycpu[i]
5739 = sched_group_allnodes; 6147 = sched_group_allnodes;
@@ -5754,6 +6162,18 @@ void build_sched_domains(const cpumask_t *cpu_map)
5754 cpus_and(sd->span, sd->span, *cpu_map); 6162 cpus_and(sd->span, sd->span, *cpu_map);
5755#endif 6163#endif
5756 6164
6165 if (!sched_group_phys) {
6166 sched_group_phys
6167 = kmalloc(sizeof(struct sched_group) * NR_CPUS,
6168 GFP_KERNEL);
6169 if (!sched_group_phys) {
6170 printk (KERN_WARNING "Can not alloc phys sched"
6171 "group\n");
6172 goto error;
6173 }
6174 sched_group_phys_bycpu[i] = sched_group_phys;
6175 }
6176
5757 p = sd; 6177 p = sd;
5758 sd = &per_cpu(phys_domains, i); 6178 sd = &per_cpu(phys_domains, i);
5759 group = cpu_to_phys_group(i); 6179 group = cpu_to_phys_group(i);
@@ -5763,6 +6183,18 @@ void build_sched_domains(const cpumask_t *cpu_map)
5763 sd->groups = &sched_group_phys[group]; 6183 sd->groups = &sched_group_phys[group];
5764 6184
5765#ifdef CONFIG_SCHED_MC 6185#ifdef CONFIG_SCHED_MC
6186 if (!sched_group_core) {
6187 sched_group_core
6188 = kmalloc(sizeof(struct sched_group) * NR_CPUS,
6189 GFP_KERNEL);
6190 if (!sched_group_core) {
6191 printk (KERN_WARNING "Can not alloc core sched"
6192 "group\n");
6193 goto error;
6194 }
6195 sched_group_core_bycpu[i] = sched_group_core;
6196 }
6197
5766 p = sd; 6198 p = sd;
5767 sd = &per_cpu(core_domains, i); 6199 sd = &per_cpu(core_domains, i);
5768 group = cpu_to_core_group(i); 6200 group = cpu_to_core_group(i);
@@ -5846,24 +6278,21 @@ void build_sched_domains(const cpumask_t *cpu_map)
5846 domainspan = sched_domain_node_span(i); 6278 domainspan = sched_domain_node_span(i);
5847 cpus_and(domainspan, domainspan, *cpu_map); 6279 cpus_and(domainspan, domainspan, *cpu_map);
5848 6280
5849 sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); 6281 sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
6282 if (!sg) {
6283 printk(KERN_WARNING "Can not alloc domain group for "
6284 "node %d\n", i);
6285 goto error;
6286 }
5850 sched_group_nodes[i] = sg; 6287 sched_group_nodes[i] = sg;
5851 for_each_cpu_mask(j, nodemask) { 6288 for_each_cpu_mask(j, nodemask) {
5852 struct sched_domain *sd; 6289 struct sched_domain *sd;
5853 sd = &per_cpu(node_domains, j); 6290 sd = &per_cpu(node_domains, j);
5854 sd->groups = sg; 6291 sd->groups = sg;
5855 if (sd->groups == NULL) {
5856 /* Turn off balancing if we have no groups */
5857 sd->flags = 0;
5858 }
5859 }
5860 if (!sg) {
5861 printk(KERN_WARNING
5862 "Can not alloc domain group for node %d\n", i);
5863 continue;
5864 } 6292 }
5865 sg->cpu_power = 0; 6293 sg->cpu_power = 0;
5866 sg->cpumask = nodemask; 6294 sg->cpumask = nodemask;
6295 sg->next = sg;
5867 cpus_or(covered, covered, nodemask); 6296 cpus_or(covered, covered, nodemask);
5868 prev = sg; 6297 prev = sg;
5869 6298
@@ -5882,54 +6311,90 @@ void build_sched_domains(const cpumask_t *cpu_map)
5882 if (cpus_empty(tmp)) 6311 if (cpus_empty(tmp))
5883 continue; 6312 continue;
5884 6313
5885 sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); 6314 sg = kmalloc_node(sizeof(struct sched_group),
6315 GFP_KERNEL, i);
5886 if (!sg) { 6316 if (!sg) {
5887 printk(KERN_WARNING 6317 printk(KERN_WARNING
5888 "Can not alloc domain group for node %d\n", j); 6318 "Can not alloc domain group for node %d\n", j);
5889 break; 6319 goto error;
5890 } 6320 }
5891 sg->cpu_power = 0; 6321 sg->cpu_power = 0;
5892 sg->cpumask = tmp; 6322 sg->cpumask = tmp;
6323 sg->next = prev->next;
5893 cpus_or(covered, covered, tmp); 6324 cpus_or(covered, covered, tmp);
5894 prev->next = sg; 6325 prev->next = sg;
5895 prev = sg; 6326 prev = sg;
5896 } 6327 }
5897 prev->next = sched_group_nodes[i];
5898 } 6328 }
5899#endif 6329#endif
5900 6330
5901 /* Calculate CPU power for physical packages and nodes */ 6331 /* Calculate CPU power for physical packages and nodes */
6332#ifdef CONFIG_SCHED_SMT
5902 for_each_cpu_mask(i, *cpu_map) { 6333 for_each_cpu_mask(i, *cpu_map) {
5903 int power;
5904 struct sched_domain *sd; 6334 struct sched_domain *sd;
5905#ifdef CONFIG_SCHED_SMT
5906 sd = &per_cpu(cpu_domains, i); 6335 sd = &per_cpu(cpu_domains, i);
5907 power = SCHED_LOAD_SCALE; 6336 sd->groups->cpu_power = SCHED_LOAD_SCALE;
5908 sd->groups->cpu_power = power; 6337 }
5909#endif 6338#endif
5910#ifdef CONFIG_SCHED_MC 6339#ifdef CONFIG_SCHED_MC
6340 for_each_cpu_mask(i, *cpu_map) {
6341 int power;
6342 struct sched_domain *sd;
5911 sd = &per_cpu(core_domains, i); 6343 sd = &per_cpu(core_domains, i);
5912 power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1) 6344 if (sched_smt_power_savings)
6345 power = SCHED_LOAD_SCALE * cpus_weight(sd->groups->cpumask);
6346 else
6347 power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1)
5913 * SCHED_LOAD_SCALE / 10; 6348 * SCHED_LOAD_SCALE / 10;
5914 sd->groups->cpu_power = power; 6349 sd->groups->cpu_power = power;
6350 }
6351#endif
5915 6352
6353 for_each_cpu_mask(i, *cpu_map) {
6354 struct sched_domain *sd;
6355#ifdef CONFIG_SCHED_MC
5916 sd = &per_cpu(phys_domains, i); 6356 sd = &per_cpu(phys_domains, i);
6357 if (i != first_cpu(sd->groups->cpumask))
6358 continue;
5917 6359
5918 /* 6360 sd->groups->cpu_power = 0;
5919 * This has to be < 2 * SCHED_LOAD_SCALE 6361 if (sched_mc_power_savings || sched_smt_power_savings) {
5920 * Lets keep it SCHED_LOAD_SCALE, so that 6362 int j;
5921 * while calculating NUMA group's cpu_power 6363
5922 * we can simply do 6364 for_each_cpu_mask(j, sd->groups->cpumask) {
5923 * numa_group->cpu_power += phys_group->cpu_power; 6365 struct sched_domain *sd1;
5924 * 6366 sd1 = &per_cpu(core_domains, j);
5925 * See "only add power once for each physical pkg" 6367 /*
5926 * comment below 6368 * for each core we will add once
5927 */ 6369 * to the group in physical domain
5928 sd->groups->cpu_power = SCHED_LOAD_SCALE; 6370 */
6371 if (j != first_cpu(sd1->groups->cpumask))
6372 continue;
6373
6374 if (sched_smt_power_savings)
6375 sd->groups->cpu_power += sd1->groups->cpu_power;
6376 else
6377 sd->groups->cpu_power += SCHED_LOAD_SCALE;
6378 }
6379 } else
6380 /*
6381 * This has to be < 2 * SCHED_LOAD_SCALE
6382 * Lets keep it SCHED_LOAD_SCALE, so that
6383 * while calculating NUMA group's cpu_power
6384 * we can simply do
6385 * numa_group->cpu_power += phys_group->cpu_power;
6386 *
6387 * See "only add power once for each physical pkg"
6388 * comment below
6389 */
6390 sd->groups->cpu_power = SCHED_LOAD_SCALE;
5929#else 6391#else
6392 int power;
5930 sd = &per_cpu(phys_domains, i); 6393 sd = &per_cpu(phys_domains, i);
5931 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * 6394 if (sched_smt_power_savings)
5932 (cpus_weight(sd->groups->cpumask)-1) / 10; 6395 power = SCHED_LOAD_SCALE * cpus_weight(sd->groups->cpumask);
6396 else
6397 power = SCHED_LOAD_SCALE;
5933 sd->groups->cpu_power = power; 6398 sd->groups->cpu_power = power;
5934#endif 6399#endif
5935 } 6400 }
@@ -5957,13 +6422,20 @@ void build_sched_domains(const cpumask_t *cpu_map)
5957 * Tune cache-hot values: 6422 * Tune cache-hot values:
5958 */ 6423 */
5959 calibrate_migration_costs(cpu_map); 6424 calibrate_migration_costs(cpu_map);
6425
6426 return 0;
6427
6428error:
6429 free_sched_groups(cpu_map);
6430 return -ENOMEM;
5960} 6431}
5961/* 6432/*
5962 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 6433 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
5963 */ 6434 */
5964static void arch_init_sched_domains(const cpumask_t *cpu_map) 6435static int arch_init_sched_domains(const cpumask_t *cpu_map)
5965{ 6436{
5966 cpumask_t cpu_default_map; 6437 cpumask_t cpu_default_map;
6438 int err;
5967 6439
5968 /* 6440 /*
5969 * Setup mask for cpus without special case scheduling requirements. 6441 * Setup mask for cpus without special case scheduling requirements.
@@ -5972,51 +6444,14 @@ static void arch_init_sched_domains(const cpumask_t *cpu_map)
5972 */ 6444 */
5973 cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); 6445 cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
5974 6446
5975 build_sched_domains(&cpu_default_map); 6447 err = build_sched_domains(&cpu_default_map);
6448
6449 return err;
5976} 6450}
5977 6451
5978static void arch_destroy_sched_domains(const cpumask_t *cpu_map) 6452static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
5979{ 6453{
5980#ifdef CONFIG_NUMA 6454 free_sched_groups(cpu_map);
5981 int i;
5982 int cpu;
5983
5984 for_each_cpu_mask(cpu, *cpu_map) {
5985 struct sched_group *sched_group_allnodes
5986 = sched_group_allnodes_bycpu[cpu];
5987 struct sched_group **sched_group_nodes
5988 = sched_group_nodes_bycpu[cpu];
5989
5990 if (sched_group_allnodes) {
5991 kfree(sched_group_allnodes);
5992 sched_group_allnodes_bycpu[cpu] = NULL;
5993 }
5994
5995 if (!sched_group_nodes)
5996 continue;
5997
5998 for (i = 0; i < MAX_NUMNODES; i++) {
5999 cpumask_t nodemask = node_to_cpumask(i);
6000 struct sched_group *oldsg, *sg = sched_group_nodes[i];
6001
6002 cpus_and(nodemask, nodemask, *cpu_map);
6003 if (cpus_empty(nodemask))
6004 continue;
6005
6006 if (sg == NULL)
6007 continue;
6008 sg = sg->next;
6009next_sg:
6010 oldsg = sg;
6011 sg = sg->next;
6012 kfree(oldsg);
6013 if (oldsg != sched_group_nodes[i])
6014 goto next_sg;
6015 }
6016 kfree(sched_group_nodes);
6017 sched_group_nodes_bycpu[cpu] = NULL;
6018 }
6019#endif
6020} 6455}
6021 6456
6022/* 6457/*
@@ -6041,9 +6476,10 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
6041 * correct sched domains 6476 * correct sched domains
6042 * Call with hotplug lock held 6477 * Call with hotplug lock held
6043 */ 6478 */
6044void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) 6479int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
6045{ 6480{
6046 cpumask_t change_map; 6481 cpumask_t change_map;
6482 int err = 0;
6047 6483
6048 cpus_and(*partition1, *partition1, cpu_online_map); 6484 cpus_and(*partition1, *partition1, cpu_online_map);
6049 cpus_and(*partition2, *partition2, cpu_online_map); 6485 cpus_and(*partition2, *partition2, cpu_online_map);
@@ -6052,10 +6488,86 @@ void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
6052 /* Detach sched domains from all of the affected cpus */ 6488 /* Detach sched domains from all of the affected cpus */
6053 detach_destroy_domains(&change_map); 6489 detach_destroy_domains(&change_map);
6054 if (!cpus_empty(*partition1)) 6490 if (!cpus_empty(*partition1))
6055 build_sched_domains(partition1); 6491 err = build_sched_domains(partition1);
6056 if (!cpus_empty(*partition2)) 6492 if (!err && !cpus_empty(*partition2))
6057 build_sched_domains(partition2); 6493 err = build_sched_domains(partition2);
6494
6495 return err;
6496}
6497
6498#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
6499int arch_reinit_sched_domains(void)
6500{
6501 int err;
6502
6503 lock_cpu_hotplug();
6504 detach_destroy_domains(&cpu_online_map);
6505 err = arch_init_sched_domains(&cpu_online_map);
6506 unlock_cpu_hotplug();
6507
6508 return err;
6509}
6510
6511static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
6512{
6513 int ret;
6514
6515 if (buf[0] != '0' && buf[0] != '1')
6516 return -EINVAL;
6517
6518 if (smt)
6519 sched_smt_power_savings = (buf[0] == '1');
6520 else
6521 sched_mc_power_savings = (buf[0] == '1');
6522
6523 ret = arch_reinit_sched_domains();
6524
6525 return ret ? ret : count;
6526}
6527
6528int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
6529{
6530 int err = 0;
6531#ifdef CONFIG_SCHED_SMT
6532 if (smt_capable())
6533 err = sysfs_create_file(&cls->kset.kobj,
6534 &attr_sched_smt_power_savings.attr);
6535#endif
6536#ifdef CONFIG_SCHED_MC
6537 if (!err && mc_capable())
6538 err = sysfs_create_file(&cls->kset.kobj,
6539 &attr_sched_mc_power_savings.attr);
6540#endif
6541 return err;
6542}
6543#endif
6544
6545#ifdef CONFIG_SCHED_MC
6546static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page)
6547{
6548 return sprintf(page, "%u\n", sched_mc_power_savings);
6549}
6550static ssize_t sched_mc_power_savings_store(struct sys_device *dev, const char *buf, size_t count)
6551{
6552 return sched_power_savings_store(buf, count, 0);
6553}
6554SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
6555 sched_mc_power_savings_store);
6556#endif
6557
6558#ifdef CONFIG_SCHED_SMT
6559static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page)
6560{
6561 return sprintf(page, "%u\n", sched_smt_power_savings);
6562}
6563static ssize_t sched_smt_power_savings_store(struct sys_device *dev, const char *buf, size_t count)
6564{
6565 return sched_power_savings_store(buf, count, 1);
6058} 6566}
6567SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
6568 sched_smt_power_savings_store);
6569#endif
6570
6059 6571
6060#ifdef CONFIG_HOTPLUG_CPU 6572#ifdef CONFIG_HOTPLUG_CPU
6061/* 6573/*
@@ -6138,7 +6650,6 @@ void __init sched_init(void)
6138 rq->push_cpu = 0; 6650 rq->push_cpu = 0;
6139 rq->migration_thread = NULL; 6651 rq->migration_thread = NULL;
6140 INIT_LIST_HEAD(&rq->migration_queue); 6652 INIT_LIST_HEAD(&rq->migration_queue);
6141 rq->cpu = i;
6142#endif 6653#endif
6143 atomic_set(&rq->nr_iowait, 0); 6654 atomic_set(&rq->nr_iowait, 0);
6144 6655
@@ -6153,6 +6664,7 @@ void __init sched_init(void)
6153 } 6664 }
6154 } 6665 }
6155 6666
6667 set_load_weight(&init_task);
6156 /* 6668 /*
6157 * The boot idle thread does lazy MMU switching as well: 6669 * The boot idle thread does lazy MMU switching as well:
6158 */ 6670 */
@@ -6199,11 +6711,12 @@ void normalize_rt_tasks(void)
6199 runqueue_t *rq; 6711 runqueue_t *rq;
6200 6712
6201 read_lock_irq(&tasklist_lock); 6713 read_lock_irq(&tasklist_lock);
6202 for_each_process (p) { 6714 for_each_process(p) {
6203 if (!rt_task(p)) 6715 if (!rt_task(p))
6204 continue; 6716 continue;
6205 6717
6206 rq = task_rq_lock(p, &flags); 6718 spin_lock_irqsave(&p->pi_lock, flags);
6719 rq = __task_rq_lock(p);
6207 6720
6208 array = p->array; 6721 array = p->array;
6209 if (array) 6722 if (array)
@@ -6214,7 +6727,8 @@ void normalize_rt_tasks(void)
6214 resched_task(rq->curr); 6727 resched_task(rq->curr);
6215 } 6728 }
6216 6729
6217 task_rq_unlock(rq, &flags); 6730 __task_rq_unlock(rq);
6731 spin_unlock_irqrestore(&p->pi_lock, flags);
6218 } 6732 }
6219 read_unlock_irq(&tasklist_lock); 6733 read_unlock_irq(&tasklist_lock);
6220} 6734}
diff --git a/kernel/signal.c b/kernel/signal.c
index 1b3c921737..52adf53929 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1531,6 +1531,35 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1531 spin_unlock_irqrestore(&sighand->siglock, flags); 1531 spin_unlock_irqrestore(&sighand->siglock, flags);
1532} 1532}
1533 1533
1534static inline int may_ptrace_stop(void)
1535{
1536 if (!likely(current->ptrace & PT_PTRACED))
1537 return 0;
1538
1539 if (unlikely(current->parent == current->real_parent &&
1540 (current->ptrace & PT_ATTACHED)))
1541 return 0;
1542
1543 if (unlikely(current->signal == current->parent->signal) &&
1544 unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
1545 return 0;
1546
1547 /*
1548 * Are we in the middle of do_coredump?
1549 * If so and our tracer is also part of the coredump stopping
1550 * is a deadlock situation, and pointless because our tracer
1551 * is dead so don't allow us to stop.
1552 * If SIGKILL was already sent before the caller unlocked
1553 * ->siglock we must see ->core_waiters != 0. Otherwise it
1554 * is safe to enter schedule().
1555 */
1556 if (unlikely(current->mm->core_waiters) &&
1557 unlikely(current->mm == current->parent->mm))
1558 return 0;
1559
1560 return 1;
1561}
1562
1534/* 1563/*
1535 * This must be called with current->sighand->siglock held. 1564 * This must be called with current->sighand->siglock held.
1536 * 1565 *
@@ -1559,11 +1588,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1559 spin_unlock_irq(&current->sighand->siglock); 1588 spin_unlock_irq(&current->sighand->siglock);
1560 try_to_freeze(); 1589 try_to_freeze();
1561 read_lock(&tasklist_lock); 1590 read_lock(&tasklist_lock);
1562 if (likely(current->ptrace & PT_PTRACED) && 1591 if (may_ptrace_stop()) {
1563 likely(current->parent != current->real_parent ||
1564 !(current->ptrace & PT_ATTACHED)) &&
1565 (likely(current->parent->signal != current->signal) ||
1566 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1567 do_notify_parent_cldstop(current, CLD_TRAPPED); 1592 do_notify_parent_cldstop(current, CLD_TRAPPED);
1568 read_unlock(&tasklist_lock); 1593 read_unlock(&tasklist_lock);
1569 schedule(); 1594 schedule();
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 9e2f1c6e73..8f03e3b89b 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -446,7 +446,7 @@ static void takeover_tasklets(unsigned int cpu)
446} 446}
447#endif /* CONFIG_HOTPLUG_CPU */ 447#endif /* CONFIG_HOTPLUG_CPU */
448 448
449static int cpu_callback(struct notifier_block *nfb, 449static int __devinit cpu_callback(struct notifier_block *nfb,
450 unsigned long action, 450 unsigned long action,
451 void *hcpu) 451 void *hcpu)
452{ 452{
@@ -486,7 +486,7 @@ static int cpu_callback(struct notifier_block *nfb,
486 return NOTIFY_OK; 486 return NOTIFY_OK;
487} 487}
488 488
489static struct notifier_block cpu_nfb = { 489static struct notifier_block __devinitdata cpu_nfb = {
490 .notifier_call = cpu_callback 490 .notifier_call = cpu_callback
491}; 491};
492 492
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index b5c3b94e01..6b76caa229 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -104,7 +104,7 @@ static int watchdog(void * __bind_cpu)
104/* 104/*
105 * Create/destroy watchdog threads as CPUs come and go: 105 * Create/destroy watchdog threads as CPUs come and go:
106 */ 106 */
107static int 107static int __devinit
108cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 108cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
109{ 109{
110 int hotcpu = (unsigned long)hcpu; 110 int hotcpu = (unsigned long)hcpu;
@@ -142,7 +142,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
142 return NOTIFY_OK; 142 return NOTIFY_OK;
143} 143}
144 144
145static struct notifier_block cpu_nfb = { 145static struct notifier_block __devinitdata cpu_nfb = {
146 .notifier_call = cpu_callback 146 .notifier_call = cpu_callback
147}; 147};
148 148
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2c0e658194..93a2c53986 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -73,6 +73,7 @@ extern int printk_ratelimit_burst;
73extern int pid_max_min, pid_max_max; 73extern int pid_max_min, pid_max_max;
74extern int sysctl_drop_caches; 74extern int sysctl_drop_caches;
75extern int percpu_pagelist_fraction; 75extern int percpu_pagelist_fraction;
76extern int compat_log;
76 77
77#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) 78#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86)
78int unknown_nmi_panic; 79int unknown_nmi_panic;
@@ -132,6 +133,10 @@ extern int acct_parm[];
132extern int no_unaligned_warning; 133extern int no_unaligned_warning;
133#endif 134#endif
134 135
136#ifdef CONFIG_RT_MUTEXES
137extern int max_lock_depth;
138#endif
139
135static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t, 140static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t,
136 ctl_table *, void **); 141 ctl_table *, void **);
137static int proc_doutsstring(ctl_table *table, int write, struct file *filp, 142static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
@@ -677,6 +682,27 @@ static ctl_table kern_table[] = {
677 .proc_handler = &proc_dointvec, 682 .proc_handler = &proc_dointvec,
678 }, 683 },
679#endif 684#endif
685#ifdef CONFIG_COMPAT
686 {
687 .ctl_name = KERN_COMPAT_LOG,
688 .procname = "compat-log",
689 .data = &compat_log,
690 .maxlen = sizeof (int),
691 .mode = 0644,
692 .proc_handler = &proc_dointvec,
693 },
694#endif
695#ifdef CONFIG_RT_MUTEXES
696 {
697 .ctl_name = KERN_MAX_LOCK_DEPTH,
698 .procname = "max_lock_depth",
699 .data = &max_lock_depth,
700 .maxlen = sizeof(int),
701 .mode = 0644,
702 .proc_handler = &proc_dointvec,
703 },
704#endif
705
680 { .ctl_name = 0 } 706 { .ctl_name = 0 }
681}; 707};
682 708
@@ -917,6 +943,18 @@ static ctl_table vm_table[] = {
917 .strategy = &sysctl_jiffies, 943 .strategy = &sysctl_jiffies,
918 }, 944 },
919#endif 945#endif
946#ifdef CONFIG_X86_32
947 {
948 .ctl_name = VM_VDSO_ENABLED,
949 .procname = "vdso_enabled",
950 .data = &vdso_enabled,
951 .maxlen = sizeof(vdso_enabled),
952 .mode = 0644,
953 .proc_handler = &proc_dointvec,
954 .strategy = &sysctl_intvec,
955 .extra1 = &zero,
956 },
957#endif
920 { .ctl_name = 0 } 958 { .ctl_name = 0 }
921}; 959};
922 960
diff --git a/kernel/time.c b/kernel/time.c
index b00ddc71ce..5bd4897476 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -523,6 +523,7 @@ EXPORT_SYMBOL(do_gettimeofday);
523 523
524 524
525#else 525#else
526#ifndef CONFIG_GENERIC_TIME
526/* 527/*
527 * Simulate gettimeofday using do_gettimeofday which only allows a timeval 528 * Simulate gettimeofday using do_gettimeofday which only allows a timeval
528 * and therefore only yields usec accuracy 529 * and therefore only yields usec accuracy
@@ -537,6 +538,7 @@ void getnstimeofday(struct timespec *tv)
537} 538}
538EXPORT_SYMBOL_GPL(getnstimeofday); 539EXPORT_SYMBOL_GPL(getnstimeofday);
539#endif 540#endif
541#endif
540 542
541/* Converts Gregorian date to seconds since 1970-01-01 00:00:00. 543/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
542 * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 544 * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
new file mode 100644
index 0000000000..e1dfd8e86c
--- /dev/null
+++ b/kernel/time/Makefile
@@ -0,0 +1 @@
obj-y += clocksource.o jiffies.o
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
new file mode 100644
index 0000000000..74eca5939b
--- /dev/null
+++ b/kernel/time/clocksource.c
@@ -0,0 +1,349 @@
1/*
2 * linux/kernel/time/clocksource.c
3 *
4 * This file contains the functions which manage clocksource drivers.
5 *
6 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 * TODO WishList:
23 * o Allow clocksource drivers to be unregistered
24 * o get rid of clocksource_jiffies extern
25 */
26
27#include <linux/clocksource.h>
28#include <linux/sysdev.h>
29#include <linux/init.h>
30#include <linux/module.h>
31
32/* XXX - Would like a better way for initializing curr_clocksource */
33extern struct clocksource clocksource_jiffies;
34
35/*[Clocksource internal variables]---------
36 * curr_clocksource:
37 * currently selected clocksource. Initialized to clocksource_jiffies.
38 * next_clocksource:
39 * pending next selected clocksource.
40 * clocksource_list:
41 * linked list with the registered clocksources
42 * clocksource_lock:
43 * protects manipulations to curr_clocksource and next_clocksource
44 * and the clocksource_list
45 * override_name:
46 * Name of the user-specified clocksource.
47 */
48static struct clocksource *curr_clocksource = &clocksource_jiffies;
49static struct clocksource *next_clocksource;
50static LIST_HEAD(clocksource_list);
51static DEFINE_SPINLOCK(clocksource_lock);
52static char override_name[32];
53static int finished_booting;
54
55/* clocksource_done_booting - Called near the end of bootup
56 *
57 * Hack to avoid lots of clocksource churn at boot time
58 */
59static int __init clocksource_done_booting(void)
60{
61 finished_booting = 1;
62 return 0;
63}
64
65late_initcall(clocksource_done_booting);
66
67/**
68 * clocksource_get_next - Returns the selected clocksource
69 *
70 */
71struct clocksource *clocksource_get_next(void)
72{
73 unsigned long flags;
74
75 spin_lock_irqsave(&clocksource_lock, flags);
76 if (next_clocksource && finished_booting) {
77 curr_clocksource = next_clocksource;
78 next_clocksource = NULL;
79 }
80 spin_unlock_irqrestore(&clocksource_lock, flags);
81
82 return curr_clocksource;
83}
84
85/**
86 * select_clocksource - Finds the best registered clocksource.
87 *
88 * Private function. Must hold clocksource_lock when called.
89 *
90 * Looks through the list of registered clocksources, returning
91 * the one with the highest rating value. If there is a clocksource
92 * name that matches the override string, it returns that clocksource.
93 */
94static struct clocksource *select_clocksource(void)
95{
96 struct clocksource *best = NULL;
97 struct list_head *tmp;
98
99 list_for_each(tmp, &clocksource_list) {
100 struct clocksource *src;
101
102 src = list_entry(tmp, struct clocksource, list);
103 if (!best)
104 best = src;
105
106 /* check for override: */
107 if (strlen(src->name) == strlen(override_name) &&
108 !strcmp(src->name, override_name)) {
109 best = src;
110 break;
111 }
112 /* pick the highest rating: */
113 if (src->rating > best->rating)
114 best = src;
115 }
116
117 return best;
118}
119
120/**
121 * is_registered_source - Checks if clocksource is registered
122 * @c: pointer to a clocksource
123 *
124 * Private helper function. Must hold clocksource_lock when called.
125 *
126 * Returns one if the clocksource is already registered, zero otherwise.
127 */
128static int is_registered_source(struct clocksource *c)
129{
130 int len = strlen(c->name);
131 struct list_head *tmp;
132
133 list_for_each(tmp, &clocksource_list) {
134 struct clocksource *src;
135
136 src = list_entry(tmp, struct clocksource, list);
137 if (strlen(src->name) == len && !strcmp(src->name, c->name))
138 return 1;
139 }
140
141 return 0;
142}
143
144/**
145 * clocksource_register - Used to install new clocksources
146 * @t: clocksource to be registered
147 *
148 * Returns -EBUSY if registration fails, zero otherwise.
149 */
150int clocksource_register(struct clocksource *c)
151{
152 int ret = 0;
153 unsigned long flags;
154
155 spin_lock_irqsave(&clocksource_lock, flags);
156 /* check if clocksource is already registered */
157 if (is_registered_source(c)) {
158 printk("register_clocksource: Cannot register %s. "
159 "Already registered!", c->name);
160 ret = -EBUSY;
161 } else {
162 /* register it */
163 list_add(&c->list, &clocksource_list);
164 /* scan the registered clocksources, and pick the best one */
165 next_clocksource = select_clocksource();
166 }
167 spin_unlock_irqrestore(&clocksource_lock, flags);
168 return ret;
169}
170EXPORT_SYMBOL(clocksource_register);
171
172/**
173 * clocksource_reselect - Rescan list for next clocksource
174 *
175 * A quick helper function to be used if a clocksource changes its
176 * rating. Forces the clocksource list to be re-scanned for the best
177 * clocksource.
178 */
179void clocksource_reselect(void)
180{
181 unsigned long flags;
182
183 spin_lock_irqsave(&clocksource_lock, flags);
184 next_clocksource = select_clocksource();
185 spin_unlock_irqrestore(&clocksource_lock, flags);
186}
187EXPORT_SYMBOL(clocksource_reselect);
188
189/**
190 * sysfs_show_current_clocksources - sysfs interface for current clocksource
191 * @dev: unused
192 * @buf: char buffer to be filled with clocksource list
193 *
194 * Provides sysfs interface for listing current clocksource.
195 */
196static ssize_t
197sysfs_show_current_clocksources(struct sys_device *dev, char *buf)
198{
199 char *curr = buf;
200
201 spin_lock_irq(&clocksource_lock);
202 curr += sprintf(curr, "%s ", curr_clocksource->name);
203 spin_unlock_irq(&clocksource_lock);
204
205 curr += sprintf(curr, "\n");
206
207 return curr - buf;
208}
209
210/**
211 * sysfs_override_clocksource - interface for manually overriding clocksource
212 * @dev: unused
213 * @buf: name of override clocksource
214 * @count: length of buffer
215 *
216 * Takes input from sysfs interface for manually overriding the default
217 * clocksource selction.
218 */
219static ssize_t sysfs_override_clocksource(struct sys_device *dev,
220 const char *buf, size_t count)
221{
222 size_t ret = count;
223 /* strings from sysfs write are not 0 terminated! */
224 if (count >= sizeof(override_name))
225 return -EINVAL;
226
227 /* strip of \n: */
228 if (buf[count-1] == '\n')
229 count--;
230 if (count < 1)
231 return -EINVAL;
232
233 spin_lock_irq(&clocksource_lock);
234
235 /* copy the name given: */
236 memcpy(override_name, buf, count);
237 override_name[count] = 0;
238
239 /* try to select it: */
240 next_clocksource = select_clocksource();
241
242 spin_unlock_irq(&clocksource_lock);
243
244 return ret;
245}
246
247/**
248 * sysfs_show_available_clocksources - sysfs interface for listing clocksource
249 * @dev: unused
250 * @buf: char buffer to be filled with clocksource list
251 *
252 * Provides sysfs interface for listing registered clocksources
253 */
254static ssize_t
255sysfs_show_available_clocksources(struct sys_device *dev, char *buf)
256{
257 struct list_head *tmp;
258 char *curr = buf;
259
260 spin_lock_irq(&clocksource_lock);
261 list_for_each(tmp, &clocksource_list) {
262 struct clocksource *src;
263
264 src = list_entry(tmp, struct clocksource, list);
265 curr += sprintf(curr, "%s ", src->name);
266 }
267 spin_unlock_irq(&clocksource_lock);
268
269 curr += sprintf(curr, "\n");
270
271 return curr - buf;
272}
273
274/*
275 * Sysfs setup bits:
276 */
277static SYSDEV_ATTR(current_clocksource, 0600, sysfs_show_current_clocksources,
278 sysfs_override_clocksource);
279
280static SYSDEV_ATTR(available_clocksource, 0600,
281 sysfs_show_available_clocksources, NULL);
282
283static struct sysdev_class clocksource_sysclass = {
284 set_kset_name("clocksource"),
285};
286
287static struct sys_device device_clocksource = {
288 .id = 0,
289 .cls = &clocksource_sysclass,
290};
291
292static int __init init_clocksource_sysfs(void)
293{
294 int error = sysdev_class_register(&clocksource_sysclass);
295
296 if (!error)
297 error = sysdev_register(&device_clocksource);
298 if (!error)
299 error = sysdev_create_file(
300 &device_clocksource,
301 &attr_current_clocksource);
302 if (!error)
303 error = sysdev_create_file(
304 &device_clocksource,
305 &attr_available_clocksource);
306 return error;
307}
308
309device_initcall(init_clocksource_sysfs);
310
311/**
312 * boot_override_clocksource - boot clock override
313 * @str: override name
314 *
315 * Takes a clocksource= boot argument and uses it
316 * as the clocksource override name.
317 */
318static int __init boot_override_clocksource(char* str)
319{
320 unsigned long flags;
321 spin_lock_irqsave(&clocksource_lock, flags);
322 if (str)
323 strlcpy(override_name, str, sizeof(override_name));
324 spin_unlock_irqrestore(&clocksource_lock, flags);
325 return 1;
326}
327
328__setup("clocksource=", boot_override_clocksource);
329
330/**
331 * boot_override_clock - Compatibility layer for deprecated boot option
332 * @str: override name
333 *
334 * DEPRECATED! Takes a clock= boot argument and uses it
335 * as the clocksource override name
336 */
337static int __init boot_override_clock(char* str)
338{
339 if (!strcmp(str, "pmtmr")) {
340 printk("Warning: clock=pmtmr is deprecated. "
341 "Use clocksource=acpi_pm.\n");
342 return boot_override_clocksource("acpi_pm");
343 }
344 printk("Warning! clock= boot option is deprecated. "
345 "Use clocksource=xyz\n");
346 return boot_override_clocksource(str);
347}
348
349__setup("clock=", boot_override_clock);
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
new file mode 100644
index 0000000000..126bb30c4a
--- /dev/null
+++ b/kernel/time/jiffies.c
@@ -0,0 +1,73 @@
1/***********************************************************************
2* linux/kernel/time/jiffies.c
3*
4* This file contains the jiffies based clocksource.
5*
6* Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
7*
8* This program is free software; you can redistribute it and/or modify
9* it under the terms of the GNU General Public License as published by
10* the Free Software Foundation; either version 2 of the License, or
11* (at your option) any later version.
12*
13* This program is distributed in the hope that it will be useful,
14* but WITHOUT ANY WARRANTY; without even the implied warranty of
15* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16* GNU General Public License for more details.
17*
18* You should have received a copy of the GNU General Public License
19* along with this program; if not, write to the Free Software
20* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21*
22************************************************************************/
23#include <linux/clocksource.h>
24#include <linux/jiffies.h>
25#include <linux/init.h>
26
27/* The Jiffies based clocksource is the lowest common
28 * denominator clock source which should function on
29 * all systems. It has the same coarse resolution as
30 * the timer interrupt frequency HZ and it suffers
31 * inaccuracies caused by missed or lost timer
32 * interrupts and the inability for the timer
33 * interrupt hardware to accuratly tick at the
34 * requested HZ value. It is also not reccomended
35 * for "tick-less" systems.
36 */
37#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ))
38
39/* Since jiffies uses a simple NSEC_PER_JIFFY multiplier
40 * conversion, the .shift value could be zero. However
41 * this would make NTP adjustments impossible as they are
42 * in units of 1/2^.shift. Thus we use JIFFIES_SHIFT to
43 * shift both the nominator and denominator the same
44 * amount, and give ntp adjustments in units of 1/2^8
45 *
46 * The value 8 is somewhat carefully chosen, as anything
47 * larger can result in overflows. NSEC_PER_JIFFY grows as
48 * HZ shrinks, so values greater then 8 overflow 32bits when
49 * HZ=100.
50 */
51#define JIFFIES_SHIFT 8
52
53static cycle_t jiffies_read(void)
54{
55 return (cycle_t) jiffies;
56}
57
58struct clocksource clocksource_jiffies = {
59 .name = "jiffies",
60 .rating = 0, /* lowest rating*/
61 .read = jiffies_read,
62 .mask = 0xffffffff, /*32bits*/
63 .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
64 .shift = JIFFIES_SHIFT,
65 .is_continuous = 0, /* tick based, not free running */
66};
67
68static int __init init_jiffies_clocksource(void)
69{
70 return clocksource_register(&clocksource_jiffies);
71}
72
73module_init(init_jiffies_clocksource);
diff --git a/kernel/timer.c b/kernel/timer.c
index eb97371b87..5a89602530 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -597,7 +597,6 @@ long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
597long time_precision = 1; /* clock precision (us) */ 597long time_precision = 1; /* clock precision (us) */
598long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ 598long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
599long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ 599long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
600static long time_phase; /* phase offset (scaled us) */
601long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC; 600long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC;
602 /* frequency offset (scaled ppm)*/ 601 /* frequency offset (scaled ppm)*/
603static long time_adj; /* tick adjust (scaled 1 / HZ) */ 602static long time_adj; /* tick adjust (scaled 1 / HZ) */
@@ -747,27 +746,14 @@ static long adjtime_adjustment(void)
747} 746}
748 747
749/* in the NTP reference this is called "hardclock()" */ 748/* in the NTP reference this is called "hardclock()" */
750static void update_wall_time_one_tick(void) 749static void update_ntp_one_tick(void)
751{ 750{
752 long time_adjust_step, delta_nsec; 751 long time_adjust_step;
753 752
754 time_adjust_step = adjtime_adjustment(); 753 time_adjust_step = adjtime_adjustment();
755 if (time_adjust_step) 754 if (time_adjust_step)
756 /* Reduce by this step the amount of time left */ 755 /* Reduce by this step the amount of time left */
757 time_adjust -= time_adjust_step; 756 time_adjust -= time_adjust_step;
758 delta_nsec = tick_nsec + time_adjust_step * 1000;
759 /*
760 * Advance the phase, once it gets to one microsecond, then
761 * advance the tick more.
762 */
763 time_phase += time_adj;
764 if ((time_phase >= FINENSEC) || (time_phase <= -FINENSEC)) {
765 long ltemp = shift_right(time_phase, (SHIFT_SCALE - 10));
766 time_phase -= ltemp << (SHIFT_SCALE - 10);
767 delta_nsec += ltemp;
768 }
769 xtime.tv_nsec += delta_nsec;
770 time_interpolator_update(delta_nsec);
771 757
772 /* Changes by adjtime() do not take effect till next tick. */ 758 /* Changes by adjtime() do not take effect till next tick. */
773 if (time_next_adjust != 0) { 759 if (time_next_adjust != 0) {
@@ -780,36 +766,378 @@ static void update_wall_time_one_tick(void)
780 * Return how long ticks are at the moment, that is, how much time 766 * Return how long ticks are at the moment, that is, how much time
781 * update_wall_time_one_tick will add to xtime next time we call it 767 * update_wall_time_one_tick will add to xtime next time we call it
782 * (assuming no calls to do_adjtimex in the meantime). 768 * (assuming no calls to do_adjtimex in the meantime).
783 * The return value is in fixed-point nanoseconds with SHIFT_SCALE-10 769 * The return value is in fixed-point nanoseconds shifted by the
784 * bits to the right of the binary point. 770 * specified number of bits to the right of the binary point.
785 * This function has no side-effects. 771 * This function has no side-effects.
786 */ 772 */
787u64 current_tick_length(void) 773u64 current_tick_length(void)
788{ 774{
789 long delta_nsec; 775 long delta_nsec;
776 u64 ret;
790 777
778 /* calculate the finest interval NTP will allow.
779 * ie: nanosecond value shifted by (SHIFT_SCALE - 10)
780 */
791 delta_nsec = tick_nsec + adjtime_adjustment() * 1000; 781 delta_nsec = tick_nsec + adjtime_adjustment() * 1000;
792 return ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj; 782 ret = (u64)delta_nsec << TICK_LENGTH_SHIFT;
783 ret += (s64)time_adj << (TICK_LENGTH_SHIFT - (SHIFT_SCALE - 10));
784
785 return ret;
793} 786}
794 787
795/* 788/* XXX - all of this timekeeping code should be later moved to time.c */
796 * Using a loop looks inefficient, but "ticks" is 789#include <linux/clocksource.h>
797 * usually just one (we shouldn't be losing ticks, 790static struct clocksource *clock; /* pointer to current clocksource */
798 * we're doing this this way mainly for interrupt 791
799 * latency reasons, not because we think we'll 792#ifdef CONFIG_GENERIC_TIME
800 * have lots of lost timer ticks 793/**
794 * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
795 *
796 * private function, must hold xtime_lock lock when being
797 * called. Returns the number of nanoseconds since the
798 * last call to update_wall_time() (adjusted by NTP scaling)
799 */
800static inline s64 __get_nsec_offset(void)
801{
802 cycle_t cycle_now, cycle_delta;
803 s64 ns_offset;
804
805 /* read clocksource: */
806 cycle_now = clocksource_read(clock);
807
808 /* calculate the delta since the last update_wall_time: */
809 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
810
811 /* convert to nanoseconds: */
812 ns_offset = cyc2ns(clock, cycle_delta);
813
814 return ns_offset;
815}
816
817/**
818 * __get_realtime_clock_ts - Returns the time of day in a timespec
819 * @ts: pointer to the timespec to be set
820 *
821 * Returns the time of day in a timespec. Used by
822 * do_gettimeofday() and get_realtime_clock_ts().
801 */ 823 */
802static void update_wall_time(unsigned long ticks) 824static inline void __get_realtime_clock_ts(struct timespec *ts)
803{ 825{
826 unsigned long seq;
827 s64 nsecs;
828
829 do {
830 seq = read_seqbegin(&xtime_lock);
831
832 *ts = xtime;
833 nsecs = __get_nsec_offset();
834
835 } while (read_seqretry(&xtime_lock, seq));
836
837 timespec_add_ns(ts, nsecs);
838}
839
840/**
841 * getnstimeofday - Returns the time of day in a timespec
842 * @ts: pointer to the timespec to be set
843 *
844 * Returns the time of day in a timespec.
845 */
846void getnstimeofday(struct timespec *ts)
847{
848 __get_realtime_clock_ts(ts);
849}
850
851EXPORT_SYMBOL(getnstimeofday);
852
853/**
854 * do_gettimeofday - Returns the time of day in a timeval
855 * @tv: pointer to the timeval to be set
856 *
857 * NOTE: Users should be converted to using get_realtime_clock_ts()
858 */
859void do_gettimeofday(struct timeval *tv)
860{
861 struct timespec now;
862
863 __get_realtime_clock_ts(&now);
864 tv->tv_sec = now.tv_sec;
865 tv->tv_usec = now.tv_nsec/1000;
866}
867
868EXPORT_SYMBOL(do_gettimeofday);
869/**
870 * do_settimeofday - Sets the time of day
871 * @tv: pointer to the timespec variable containing the new time
872 *
873 * Sets the time of day to the new time and update NTP and notify hrtimers
874 */
875int do_settimeofday(struct timespec *tv)
876{
877 unsigned long flags;
878 time_t wtm_sec, sec = tv->tv_sec;
879 long wtm_nsec, nsec = tv->tv_nsec;
880
881 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
882 return -EINVAL;
883
884 write_seqlock_irqsave(&xtime_lock, flags);
885
886 nsec -= __get_nsec_offset();
887
888 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
889 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
890
891 set_normalized_timespec(&xtime, sec, nsec);
892 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
893
894 ntp_clear();
895
896 write_sequnlock_irqrestore(&xtime_lock, flags);
897
898 /* signal hrtimers about time change */
899 clock_was_set();
900
901 return 0;
902}
903
904EXPORT_SYMBOL(do_settimeofday);
905
906/**
907 * change_clocksource - Swaps clocksources if a new one is available
908 *
909 * Accumulates current time interval and initializes new clocksource
910 */
911static int change_clocksource(void)
912{
913 struct clocksource *new;
914 cycle_t now;
915 u64 nsec;
916 new = clocksource_get_next();
917 if (clock != new) {
918 now = clocksource_read(new);
919 nsec = __get_nsec_offset();
920 timespec_add_ns(&xtime, nsec);
921
922 clock = new;
923 clock->cycle_last = now;
924 printk(KERN_INFO "Time: %s clocksource has been installed.\n",
925 clock->name);
926 return 1;
927 } else if (clock->update_callback) {
928 return clock->update_callback();
929 }
930 return 0;
931}
932#else
933#define change_clocksource() (0)
934#endif
935
936/**
937 * timeofday_is_continuous - check to see if timekeeping is free running
938 */
939int timekeeping_is_continuous(void)
940{
941 unsigned long seq;
942 int ret;
943
804 do { 944 do {
805 ticks--; 945 seq = read_seqbegin(&xtime_lock);
806 update_wall_time_one_tick(); 946
807 if (xtime.tv_nsec >= 1000000000) { 947 ret = clock->is_continuous;
808 xtime.tv_nsec -= 1000000000; 948
949 } while (read_seqretry(&xtime_lock, seq));
950
951 return ret;
952}
953
954/*
955 * timekeeping_init - Initializes the clocksource and common timekeeping values
956 */
957void __init timekeeping_init(void)
958{
959 unsigned long flags;
960
961 write_seqlock_irqsave(&xtime_lock, flags);
962 clock = clocksource_get_next();
963 clocksource_calculate_interval(clock, tick_nsec);
964 clock->cycle_last = clocksource_read(clock);
965 ntp_clear();
966 write_sequnlock_irqrestore(&xtime_lock, flags);
967}
968
969
970/*
971 * timekeeping_resume - Resumes the generic timekeeping subsystem.
972 * @dev: unused
973 *
974 * This is for the generic clocksource timekeeping.
975 * xtime/wall_to_monotonic/jiffies/wall_jiffies/etc are
976 * still managed by arch specific suspend/resume code.
977 */
978static int timekeeping_resume(struct sys_device *dev)
979{
980 unsigned long flags;
981
982 write_seqlock_irqsave(&xtime_lock, flags);
983 /* restart the last cycle value */
984 clock->cycle_last = clocksource_read(clock);
985 write_sequnlock_irqrestore(&xtime_lock, flags);
986 return 0;
987}
988
989/* sysfs resume/suspend bits for timekeeping */
990static struct sysdev_class timekeeping_sysclass = {
991 .resume = timekeeping_resume,
992 set_kset_name("timekeeping"),
993};
994
995static struct sys_device device_timer = {
996 .id = 0,
997 .cls = &timekeeping_sysclass,
998};
999
1000static int __init timekeeping_init_device(void)
1001{
1002 int error = sysdev_class_register(&timekeeping_sysclass);
1003 if (!error)
1004 error = sysdev_register(&device_timer);
1005 return error;
1006}
1007
1008device_initcall(timekeeping_init_device);
1009
1010/*
1011 * If the error is already larger, we look ahead another tick,
1012 * to compensate for late or lost adjustments.
1013 */
1014static __always_inline int clocksource_bigadjust(int sign, s64 error, s64 *interval, s64 *offset)
1015{
1016 int adj;
1017
1018 /*
1019 * As soon as the machine is synchronized to the external time
1020 * source this should be the common case.
1021 */
1022 error >>= 2;
1023 if (likely(sign > 0 ? error <= *interval : error >= *interval))
1024 return sign;
1025
1026 /*
1027 * An extra look ahead dampens the effect of the current error,
1028 * which can grow quite large with continously late updates, as
1029 * it would dominate the adjustment value and can lead to
1030 * oscillation.
1031 */
1032 error += current_tick_length() >> (TICK_LENGTH_SHIFT - clock->shift + 1);
1033 error -= clock->xtime_interval >> 1;
1034
1035 adj = 0;
1036 while (1) {
1037 error >>= 1;
1038 if (sign > 0 ? error <= *interval : error >= *interval)
1039 break;
1040 adj++;
1041 }
1042
1043 /*
1044 * Add the current adjustments to the error and take the offset
1045 * into account, the latter can cause the error to be hardly
1046 * reduced at the next tick. Check the error again if there's
1047 * room for another adjustment, thus further reducing the error
1048 * which otherwise had to be corrected at the next update.
1049 */
1050 error = (error << 1) - *interval + *offset;
1051 if (sign > 0 ? error > *interval : error < *interval)
1052 adj++;
1053
1054 *interval <<= adj;
1055 *offset <<= adj;
1056 return sign << adj;
1057}
1058
1059/*
1060 * Adjust the multiplier to reduce the error value,
1061 * this is optimized for the most common adjustments of -1,0,1,
1062 * for other values we can do a bit more work.
1063 */
1064static void clocksource_adjust(struct clocksource *clock, s64 offset)
1065{
1066 s64 error, interval = clock->cycle_interval;
1067 int adj;
1068
1069 error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1);
1070 if (error > interval) {
1071 adj = clocksource_bigadjust(1, error, &interval, &offset);
1072 } else if (error < -interval) {
1073 interval = -interval;
1074 offset = -offset;
1075 adj = clocksource_bigadjust(-1, error, &interval, &offset);
1076 } else
1077 return;
1078
1079 clock->mult += adj;
1080 clock->xtime_interval += interval;
1081 clock->xtime_nsec -= offset;
1082 clock->error -= (interval - offset) << (TICK_LENGTH_SHIFT - clock->shift);
1083}
1084
1085/*
1086 * update_wall_time - Uses the current clocksource to increment the wall time
1087 *
1088 * Called from the timer interrupt, must hold a write on xtime_lock.
1089 */
1090static void update_wall_time(void)
1091{
1092 cycle_t offset;
1093
1094 clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
1095
1096#ifdef CONFIG_GENERIC_TIME
1097 offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
1098#else
1099 offset = clock->cycle_interval;
1100#endif
1101
1102 /* normally this loop will run just once, however in the
1103 * case of lost or late ticks, it will accumulate correctly.
1104 */
1105 while (offset >= clock->cycle_interval) {
1106 /* accumulate one interval */
1107 clock->xtime_nsec += clock->xtime_interval;
1108 clock->cycle_last += clock->cycle_interval;
1109 offset -= clock->cycle_interval;
1110
1111 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
1112 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
809 xtime.tv_sec++; 1113 xtime.tv_sec++;
810 second_overflow(); 1114 second_overflow();
811 } 1115 }
812 } while (ticks); 1116
1117 /* interpolator bits */
1118 time_interpolator_update(clock->xtime_interval
1119 >> clock->shift);
1120 /* increment the NTP state machine */
1121 update_ntp_one_tick();
1122
1123 /* accumulate error between NTP and clock interval */
1124 clock->error += current_tick_length();
1125 clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);
1126 }
1127
1128 /* correct the clock when NTP error is too big */
1129 clocksource_adjust(clock, offset);
1130
1131 /* store full nanoseconds into xtime */
1132 xtime.tv_nsec = clock->xtime_nsec >> clock->shift;
1133 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
1134
1135 /* check to see if there is a new clocksource to use */
1136 if (change_clocksource()) {
1137 clock->error = 0;
1138 clock->xtime_nsec = 0;
1139 clocksource_calculate_interval(clock, tick_nsec);
1140 }
813} 1141}
814 1142
815/* 1143/*
@@ -915,10 +1243,8 @@ static inline void update_times(void)
915 unsigned long ticks; 1243 unsigned long ticks;
916 1244
917 ticks = jiffies - wall_jiffies; 1245 ticks = jiffies - wall_jiffies;
918 if (ticks) { 1246 wall_jiffies += ticks;
919 wall_jiffies += ticks; 1247 update_wall_time();
920 update_wall_time(ticks);
921 }
922 calc_load(ticks); 1248 calc_load(ticks);
923} 1249}
924 1250
@@ -1326,7 +1652,7 @@ static void __devinit migrate_timers(int cpu)
1326} 1652}
1327#endif /* CONFIG_HOTPLUG_CPU */ 1653#endif /* CONFIG_HOTPLUG_CPU */
1328 1654
1329static int timer_cpu_notify(struct notifier_block *self, 1655static int __devinit timer_cpu_notify(struct notifier_block *self,
1330 unsigned long action, void *hcpu) 1656 unsigned long action, void *hcpu)
1331{ 1657{
1332 long cpu = (long)hcpu; 1658 long cpu = (long)hcpu;
@@ -1346,7 +1672,7 @@ static int timer_cpu_notify(struct notifier_block *self,
1346 return NOTIFY_OK; 1672 return NOTIFY_OK;
1347} 1673}
1348 1674
1349static struct notifier_block timers_nb = { 1675static struct notifier_block __devinitdata timers_nb = {
1350 .notifier_call = timer_cpu_notify, 1676 .notifier_call = timer_cpu_notify,
1351}; 1677};
1352 1678
diff --git a/kernel/unwind.c b/kernel/unwind.c
new file mode 100644
index 0000000000..f69c804c8e
--- /dev/null
+++ b/kernel/unwind.c
@@ -0,0 +1,918 @@
1/*
2 * Copyright (C) 2002-2006 Novell, Inc.
3 * Jan Beulich <jbeulich@novell.com>
4 * This code is released under version 2 of the GNU GPL.
5 *
6 * A simple API for unwinding kernel stacks. This is used for
7 * debugging and error reporting purposes. The kernel doesn't need
8 * full-blown stack unwinding with all the bells and whistles, so there
9 * is not much point in implementing the full Dwarf2 unwind API.
10 */
11
12#include <linux/unwind.h>
13#include <linux/module.h>
14#include <linux/delay.h>
15#include <linux/stop_machine.h>
16#include <asm/sections.h>
17#include <asm/uaccess.h>
18#include <asm/unaligned.h>
19
20extern char __start_unwind[], __end_unwind[];
21
22#define MAX_STACK_DEPTH 8
23
24#define EXTRA_INFO(f) { \
25 BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
26 % FIELD_SIZEOF(struct unwind_frame_info, f)) \
27 + offsetof(struct unwind_frame_info, f) \
28 / FIELD_SIZEOF(struct unwind_frame_info, f), \
29 FIELD_SIZEOF(struct unwind_frame_info, f) \
30 }
31#define PTREGS_INFO(f) EXTRA_INFO(regs.f)
32
33static const struct {
34 unsigned offs:BITS_PER_LONG / 2;
35 unsigned width:BITS_PER_LONG / 2;
36} reg_info[] = {
37 UNW_REGISTER_INFO
38};
39
40#undef PTREGS_INFO
41#undef EXTRA_INFO
42
43#ifndef REG_INVALID
44#define REG_INVALID(r) (reg_info[r].width == 0)
45#endif
46
47#define DW_CFA_nop 0x00
48#define DW_CFA_set_loc 0x01
49#define DW_CFA_advance_loc1 0x02
50#define DW_CFA_advance_loc2 0x03
51#define DW_CFA_advance_loc4 0x04
52#define DW_CFA_offset_extended 0x05
53#define DW_CFA_restore_extended 0x06
54#define DW_CFA_undefined 0x07
55#define DW_CFA_same_value 0x08
56#define DW_CFA_register 0x09
57#define DW_CFA_remember_state 0x0a
58#define DW_CFA_restore_state 0x0b
59#define DW_CFA_def_cfa 0x0c
60#define DW_CFA_def_cfa_register 0x0d
61#define DW_CFA_def_cfa_offset 0x0e
62#define DW_CFA_def_cfa_expression 0x0f
63#define DW_CFA_expression 0x10
64#define DW_CFA_offset_extended_sf 0x11
65#define DW_CFA_def_cfa_sf 0x12
66#define DW_CFA_def_cfa_offset_sf 0x13
67#define DW_CFA_val_offset 0x14
68#define DW_CFA_val_offset_sf 0x15
69#define DW_CFA_val_expression 0x16
70#define DW_CFA_lo_user 0x1c
71#define DW_CFA_GNU_window_save 0x2d
72#define DW_CFA_GNU_args_size 0x2e
73#define DW_CFA_GNU_negative_offset_extended 0x2f
74#define DW_CFA_hi_user 0x3f
75
76#define DW_EH_PE_FORM 0x07
77#define DW_EH_PE_native 0x00
78#define DW_EH_PE_leb128 0x01
79#define DW_EH_PE_data2 0x02
80#define DW_EH_PE_data4 0x03
81#define DW_EH_PE_data8 0x04
82#define DW_EH_PE_signed 0x08
83#define DW_EH_PE_ADJUST 0x70
84#define DW_EH_PE_abs 0x00
85#define DW_EH_PE_pcrel 0x10
86#define DW_EH_PE_textrel 0x20
87#define DW_EH_PE_datarel 0x30
88#define DW_EH_PE_funcrel 0x40
89#define DW_EH_PE_aligned 0x50
90#define DW_EH_PE_indirect 0x80
91#define DW_EH_PE_omit 0xff
92
93typedef unsigned long uleb128_t;
94typedef signed long sleb128_t;
95
96static struct unwind_table {
97 struct {
98 unsigned long pc;
99 unsigned long range;
100 } core, init;
101 const void *address;
102 unsigned long size;
103 struct unwind_table *link;
104 const char *name;
105} root_table, *last_table;
106
107struct unwind_item {
108 enum item_location {
109 Nowhere,
110 Memory,
111 Register,
112 Value
113 } where;
114 uleb128_t value;
115};
116
117struct unwind_state {
118 uleb128_t loc, org;
119 const u8 *cieStart, *cieEnd;
120 uleb128_t codeAlign;
121 sleb128_t dataAlign;
122 struct cfa {
123 uleb128_t reg, offs;
124 } cfa;
125 struct unwind_item regs[ARRAY_SIZE(reg_info)];
126 unsigned stackDepth:8;
127 unsigned version:8;
128 const u8 *label;
129 const u8 *stack[MAX_STACK_DEPTH];
130};
131
132static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
133
134static struct unwind_table *find_table(unsigned long pc)
135{
136 struct unwind_table *table;
137
138 for (table = &root_table; table; table = table->link)
139 if ((pc >= table->core.pc
140 && pc < table->core.pc + table->core.range)
141 || (pc >= table->init.pc
142 && pc < table->init.pc + table->init.range))
143 break;
144
145 return table;
146}
147
148static void init_unwind_table(struct unwind_table *table,
149 const char *name,
150 const void *core_start,
151 unsigned long core_size,
152 const void *init_start,
153 unsigned long init_size,
154 const void *table_start,
155 unsigned long table_size)
156{
157 table->core.pc = (unsigned long)core_start;
158 table->core.range = core_size;
159 table->init.pc = (unsigned long)init_start;
160 table->init.range = init_size;
161 table->address = table_start;
162 table->size = table_size;
163 table->link = NULL;
164 table->name = name;
165}
166
167void __init unwind_init(void)
168{
169 init_unwind_table(&root_table, "kernel",
170 _text, _end - _text,
171 NULL, 0,
172 __start_unwind, __end_unwind - __start_unwind);
173}
174
175#ifdef CONFIG_MODULES
176
177/* Must be called with module_mutex held. */
178void *unwind_add_table(struct module *module,
179 const void *table_start,
180 unsigned long table_size)
181{
182 struct unwind_table *table;
183
184 if (table_size <= 0)
185 return NULL;
186
187 table = kmalloc(sizeof(*table), GFP_KERNEL);
188 if (!table)
189 return NULL;
190
191 init_unwind_table(table, module->name,
192 module->module_core, module->core_size,
193 module->module_init, module->init_size,
194 table_start, table_size);
195
196 if (last_table)
197 last_table->link = table;
198 else
199 root_table.link = table;
200 last_table = table;
201
202 return table;
203}
204
205struct unlink_table_info
206{
207 struct unwind_table *table;
208 int init_only;
209};
210
211static int unlink_table(void *arg)
212{
213 struct unlink_table_info *info = arg;
214 struct unwind_table *table = info->table, *prev;
215
216 for (prev = &root_table; prev->link && prev->link != table; prev = prev->link)
217 ;
218
219 if (prev->link) {
220 if (info->init_only) {
221 table->init.pc = 0;
222 table->init.range = 0;
223 info->table = NULL;
224 } else {
225 prev->link = table->link;
226 if (!prev->link)
227 last_table = prev;
228 }
229 } else
230 info->table = NULL;
231
232 return 0;
233}
234
235/* Must be called with module_mutex held. */
236void unwind_remove_table(void *handle, int init_only)
237{
238 struct unwind_table *table = handle;
239 struct unlink_table_info info;
240
241 if (!table || table == &root_table)
242 return;
243
244 if (init_only && table == last_table) {
245 table->init.pc = 0;
246 table->init.range = 0;
247 return;
248 }
249
250 info.table = table;
251 info.init_only = init_only;
252 stop_machine_run(unlink_table, &info, NR_CPUS);
253
254 if (info.table)
255 kfree(table);
256}
257
258#endif /* CONFIG_MODULES */
259
260static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
261{
262 const u8 *cur = *pcur;
263 uleb128_t value;
264 unsigned shift;
265
266 for (shift = 0, value = 0; cur < end; shift += 7) {
267 if (shift + 7 > 8 * sizeof(value)
268 && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
269 cur = end + 1;
270 break;
271 }
272 value |= (uleb128_t)(*cur & 0x7f) << shift;
273 if (!(*cur++ & 0x80))
274 break;
275 }
276 *pcur = cur;
277
278 return value;
279}
280
281static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
282{
283 const u8 *cur = *pcur;
284 sleb128_t value;
285 unsigned shift;
286
287 for (shift = 0, value = 0; cur < end; shift += 7) {
288 if (shift + 7 > 8 * sizeof(value)
289 && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
290 cur = end + 1;
291 break;
292 }
293 value |= (sleb128_t)(*cur & 0x7f) << shift;
294 if (!(*cur & 0x80)) {
295 value |= -(*cur++ & 0x40) << shift;
296 break;
297 }
298 }
299 *pcur = cur;
300
301 return value;
302}
303
304static unsigned long read_pointer(const u8 **pLoc,
305 const void *end,
306 signed ptrType)
307{
308 unsigned long value = 0;
309 union {
310 const u8 *p8;
311 const u16 *p16u;
312 const s16 *p16s;
313 const u32 *p32u;
314 const s32 *p32s;
315 const unsigned long *pul;
316 } ptr;
317
318 if (ptrType < 0 || ptrType == DW_EH_PE_omit)
319 return 0;
320 ptr.p8 = *pLoc;
321 switch(ptrType & DW_EH_PE_FORM) {
322 case DW_EH_PE_data2:
323 if (end < (const void *)(ptr.p16u + 1))
324 return 0;
325 if(ptrType & DW_EH_PE_signed)
326 value = get_unaligned(ptr.p16s++);
327 else
328 value = get_unaligned(ptr.p16u++);
329 break;
330 case DW_EH_PE_data4:
331#ifdef CONFIG_64BIT
332 if (end < (const void *)(ptr.p32u + 1))
333 return 0;
334 if(ptrType & DW_EH_PE_signed)
335 value = get_unaligned(ptr.p32s++);
336 else
337 value = get_unaligned(ptr.p32u++);
338 break;
339 case DW_EH_PE_data8:
340 BUILD_BUG_ON(sizeof(u64) != sizeof(value));
341#else
342 BUILD_BUG_ON(sizeof(u32) != sizeof(value));
343#endif
344 case DW_EH_PE_native:
345 if (end < (const void *)(ptr.pul + 1))
346 return 0;
347 value = get_unaligned(ptr.pul++);
348 break;
349 case DW_EH_PE_leb128:
350 BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value));
351 value = ptrType & DW_EH_PE_signed
352 ? get_sleb128(&ptr.p8, end)
353 : get_uleb128(&ptr.p8, end);
354 if ((const void *)ptr.p8 > end)
355 return 0;
356 break;
357 default:
358 return 0;
359 }
360 switch(ptrType & DW_EH_PE_ADJUST) {
361 case DW_EH_PE_abs:
362 break;
363 case DW_EH_PE_pcrel:
364 value += (unsigned long)*pLoc;
365 break;
366 default:
367 return 0;
368 }
369 if ((ptrType & DW_EH_PE_indirect)
370 && __get_user(value, (unsigned long *)value))
371 return 0;
372 *pLoc = ptr.p8;
373
374 return value;
375}
376
377static signed fde_pointer_type(const u32 *cie)
378{
379 const u8 *ptr = (const u8 *)(cie + 2);
380 unsigned version = *ptr;
381
382 if (version != 1)
383 return -1; /* unsupported */
384 if (*++ptr) {
385 const char *aug;
386 const u8 *end = (const u8 *)(cie + 1) + *cie;
387 uleb128_t len;
388
389 /* check if augmentation size is first (and thus present) */
390 if (*ptr != 'z')
391 return -1;
392 /* check if augmentation string is nul-terminated */
393 if ((ptr = memchr(aug = (const void *)ptr, 0, end - ptr)) == NULL)
394 return -1;
395 ++ptr; /* skip terminator */
396 get_uleb128(&ptr, end); /* skip code alignment */
397 get_sleb128(&ptr, end); /* skip data alignment */
398 /* skip return address column */
399 version <= 1 ? (void)++ptr : (void)get_uleb128(&ptr, end);
400 len = get_uleb128(&ptr, end); /* augmentation length */
401 if (ptr + len < ptr || ptr + len > end)
402 return -1;
403 end = ptr + len;
404 while (*++aug) {
405 if (ptr >= end)
406 return -1;
407 switch(*aug) {
408 case 'L':
409 ++ptr;
410 break;
411 case 'P': {
412 signed ptrType = *ptr++;
413
414 if (!read_pointer(&ptr, end, ptrType) || ptr > end)
415 return -1;
416 }
417 break;
418 case 'R':
419 return *ptr;
420 default:
421 return -1;
422 }
423 }
424 }
425 return DW_EH_PE_native|DW_EH_PE_abs;
426}
427
428static int advance_loc(unsigned long delta, struct unwind_state *state)
429{
430 state->loc += delta * state->codeAlign;
431
432 return delta > 0;
433}
434
435static void set_rule(uleb128_t reg,
436 enum item_location where,
437 uleb128_t value,
438 struct unwind_state *state)
439{
440 if (reg < ARRAY_SIZE(state->regs)) {
441 state->regs[reg].where = where;
442 state->regs[reg].value = value;
443 }
444}
445
446static int processCFI(const u8 *start,
447 const u8 *end,
448 unsigned long targetLoc,
449 signed ptrType,
450 struct unwind_state *state)
451{
452 union {
453 const u8 *p8;
454 const u16 *p16;
455 const u32 *p32;
456 } ptr;
457 int result = 1;
458
459 if (start != state->cieStart) {
460 state->loc = state->org;
461 result = processCFI(state->cieStart, state->cieEnd, 0, ptrType, state);
462 if (targetLoc == 0 && state->label == NULL)
463 return result;
464 }
465 for (ptr.p8 = start; result && ptr.p8 < end; ) {
466 switch(*ptr.p8 >> 6) {
467 uleb128_t value;
468
469 case 0:
470 switch(*ptr.p8++) {
471 case DW_CFA_nop:
472 break;
473 case DW_CFA_set_loc:
474 if ((state->loc = read_pointer(&ptr.p8, end, ptrType)) == 0)
475 result = 0;
476 break;
477 case DW_CFA_advance_loc1:
478 result = ptr.p8 < end && advance_loc(*ptr.p8++, state);
479 break;
480 case DW_CFA_advance_loc2:
481 result = ptr.p8 <= end + 2
482 && advance_loc(*ptr.p16++, state);
483 break;
484 case DW_CFA_advance_loc4:
485 result = ptr.p8 <= end + 4
486 && advance_loc(*ptr.p32++, state);
487 break;
488 case DW_CFA_offset_extended:
489 value = get_uleb128(&ptr.p8, end);
490 set_rule(value, Memory, get_uleb128(&ptr.p8, end), state);
491 break;
492 case DW_CFA_val_offset:
493 value = get_uleb128(&ptr.p8, end);
494 set_rule(value, Value, get_uleb128(&ptr.p8, end), state);
495 break;
496 case DW_CFA_offset_extended_sf:
497 value = get_uleb128(&ptr.p8, end);
498 set_rule(value, Memory, get_sleb128(&ptr.p8, end), state);
499 break;
500 case DW_CFA_val_offset_sf:
501 value = get_uleb128(&ptr.p8, end);
502 set_rule(value, Value, get_sleb128(&ptr.p8, end), state);
503 break;
504 case DW_CFA_restore_extended:
505 case DW_CFA_undefined:
506 case DW_CFA_same_value:
507 set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0, state);
508 break;
509 case DW_CFA_register:
510 value = get_uleb128(&ptr.p8, end);
511 set_rule(value,
512 Register,
513 get_uleb128(&ptr.p8, end), state);
514 break;
515 case DW_CFA_remember_state:
516 if (ptr.p8 == state->label) {
517 state->label = NULL;
518 return 1;
519 }
520 if (state->stackDepth >= MAX_STACK_DEPTH)
521 return 0;
522 state->stack[state->stackDepth++] = ptr.p8;
523 break;
524 case DW_CFA_restore_state:
525 if (state->stackDepth) {
526 const uleb128_t loc = state->loc;
527 const u8 *label = state->label;
528
529 state->label = state->stack[state->stackDepth - 1];
530 memcpy(&state->cfa, &badCFA, sizeof(state->cfa));
531 memset(state->regs, 0, sizeof(state->regs));
532 state->stackDepth = 0;
533 result = processCFI(start, end, 0, ptrType, state);
534 state->loc = loc;
535 state->label = label;
536 } else
537 return 0;
538 break;
539 case DW_CFA_def_cfa:
540 state->cfa.reg = get_uleb128(&ptr.p8, end);
541 /*nobreak*/
542 case DW_CFA_def_cfa_offset:
543 state->cfa.offs = get_uleb128(&ptr.p8, end);
544 break;
545 case DW_CFA_def_cfa_sf:
546 state->cfa.reg = get_uleb128(&ptr.p8, end);
547 /*nobreak*/
548 case DW_CFA_def_cfa_offset_sf:
549 state->cfa.offs = get_sleb128(&ptr.p8, end)
550 * state->dataAlign;
551 break;
552 case DW_CFA_def_cfa_register:
553 state->cfa.reg = get_uleb128(&ptr.p8, end);
554 break;
555 /*todo case DW_CFA_def_cfa_expression: */
556 /*todo case DW_CFA_expression: */
557 /*todo case DW_CFA_val_expression: */
558 case DW_CFA_GNU_args_size:
559 get_uleb128(&ptr.p8, end);
560 break;
561 case DW_CFA_GNU_negative_offset_extended:
562 value = get_uleb128(&ptr.p8, end);
563 set_rule(value,
564 Memory,
565 (uleb128_t)0 - get_uleb128(&ptr.p8, end), state);
566 break;
567 case DW_CFA_GNU_window_save:
568 default:
569 result = 0;
570 break;
571 }
572 break;
573 case 1:
574 result = advance_loc(*ptr.p8++ & 0x3f, state);
575 break;
576 case 2:
577 value = *ptr.p8++ & 0x3f;
578 set_rule(value, Memory, get_uleb128(&ptr.p8, end), state);
579 break;
580 case 3:
581 set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
582 break;
583 }
584 if (ptr.p8 > end)
585 result = 0;
586 if (result && targetLoc != 0 && targetLoc < state->loc)
587 return 1;
588 }
589
590 return result
591 && ptr.p8 == end
592 && (targetLoc == 0
593 || (/*todo While in theory this should apply, gcc in practice omits
594 everything past the function prolog, and hence the location
595 never reaches the end of the function.
596 targetLoc < state->loc &&*/ state->label == NULL));
597}
598
599/* Unwind to previous to frame. Returns 0 if successful, negative
600 * number in case of an error. */
601int unwind(struct unwind_frame_info *frame)
602{
603#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
604 const u32 *fde = NULL, *cie = NULL;
605 const u8 *ptr = NULL, *end = NULL;
606 unsigned long startLoc = 0, endLoc = 0, cfa;
607 unsigned i;
608 signed ptrType = -1;
609 uleb128_t retAddrReg = 0;
610 struct unwind_table *table;
611 struct unwind_state state;
612
613 if (UNW_PC(frame) == 0)
614 return -EINVAL;
615 if ((table = find_table(UNW_PC(frame))) != NULL
616 && !(table->size & (sizeof(*fde) - 1))) {
617 unsigned long tableSize = table->size;
618
619 for (fde = table->address;
620 tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde;
621 tableSize -= sizeof(*fde) + *fde,
622 fde += 1 + *fde / sizeof(*fde)) {
623 if (!*fde || (*fde & (sizeof(*fde) - 1)))
624 break;
625 if (!fde[1])
626 continue; /* this is a CIE */
627 if ((fde[1] & (sizeof(*fde) - 1))
628 || fde[1] > (unsigned long)(fde + 1)
629 - (unsigned long)table->address)
630 continue; /* this is not a valid FDE */
631 cie = fde + 1 - fde[1] / sizeof(*fde);
632 if (*cie <= sizeof(*cie) + 4
633 || *cie >= fde[1] - sizeof(*fde)
634 || (*cie & (sizeof(*cie) - 1))
635 || cie[1]
636 || (ptrType = fde_pointer_type(cie)) < 0) {
637 cie = NULL; /* this is not a (valid) CIE */
638 continue;
639 }
640 ptr = (const u8 *)(fde + 2);
641 startLoc = read_pointer(&ptr,
642 (const u8 *)(fde + 1) + *fde,
643 ptrType);
644 endLoc = startLoc
645 + read_pointer(&ptr,
646 (const u8 *)(fde + 1) + *fde,
647 ptrType & DW_EH_PE_indirect
648 ? ptrType
649 : ptrType & (DW_EH_PE_FORM|DW_EH_PE_signed));
650 if (UNW_PC(frame) >= startLoc && UNW_PC(frame) < endLoc)
651 break;
652 cie = NULL;
653 }
654 }
655 if (cie != NULL) {
656 memset(&state, 0, sizeof(state));
657 state.cieEnd = ptr; /* keep here temporarily */
658 ptr = (const u8 *)(cie + 2);
659 end = (const u8 *)(cie + 1) + *cie;
660 if ((state.version = *ptr) != 1)
661 cie = NULL; /* unsupported version */
662 else if (*++ptr) {
663 /* check if augmentation size is first (and thus present) */
664 if (*ptr == 'z') {
665 /* check for ignorable (or already handled)
666 * nul-terminated augmentation string */
667 while (++ptr < end && *ptr)
668 if (strchr("LPR", *ptr) == NULL)
669 break;
670 }
671 if (ptr >= end || *ptr)
672 cie = NULL;
673 }
674 ++ptr;
675 }
676 if (cie != NULL) {
677 /* get code aligment factor */
678 state.codeAlign = get_uleb128(&ptr, end);
679 /* get data aligment factor */
680 state.dataAlign = get_sleb128(&ptr, end);
681 if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
682 cie = NULL;
683 else {
684 retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end);
685 /* skip augmentation */
686 if (((const char *)(cie + 2))[1] == 'z')
687 ptr += get_uleb128(&ptr, end);
688 if (ptr > end
689 || retAddrReg >= ARRAY_SIZE(reg_info)
690 || REG_INVALID(retAddrReg)
691 || reg_info[retAddrReg].width != sizeof(unsigned long))
692 cie = NULL;
693 }
694 }
695 if (cie != NULL) {
696 state.cieStart = ptr;
697 ptr = state.cieEnd;
698 state.cieEnd = end;
699 end = (const u8 *)(fde + 1) + *fde;
700 /* skip augmentation */
701 if (((const char *)(cie + 2))[1] == 'z') {
702 uleb128_t augSize = get_uleb128(&ptr, end);
703
704 if ((ptr += augSize) > end)
705 fde = NULL;
706 }
707 }
708 if (cie == NULL || fde == NULL) {
709#ifdef CONFIG_FRAME_POINTER
710 unsigned long top, bottom;
711#endif
712
713#ifdef CONFIG_FRAME_POINTER
714 top = STACK_TOP(frame->task);
715 bottom = STACK_BOTTOM(frame->task);
716# if FRAME_RETADDR_OFFSET < 0
717 if (UNW_SP(frame) < top
718 && UNW_FP(frame) <= UNW_SP(frame)
719 && bottom < UNW_FP(frame)
720# else
721 if (UNW_SP(frame) > top
722 && UNW_FP(frame) >= UNW_SP(frame)
723 && bottom > UNW_FP(frame)
724# endif
725 && !((UNW_SP(frame) | UNW_FP(frame))
726 & (sizeof(unsigned long) - 1))) {
727 unsigned long link;
728
729 if (!__get_user(link,
730 (unsigned long *)(UNW_FP(frame)
731 + FRAME_LINK_OFFSET))
732# if FRAME_RETADDR_OFFSET < 0
733 && link > bottom && link < UNW_FP(frame)
734# else
735 && link > UNW_FP(frame) && link < bottom
736# endif
737 && !(link & (sizeof(link) - 1))
738 && !__get_user(UNW_PC(frame),
739 (unsigned long *)(UNW_FP(frame)
740 + FRAME_RETADDR_OFFSET))) {
741 UNW_SP(frame) = UNW_FP(frame) + FRAME_RETADDR_OFFSET
742# if FRAME_RETADDR_OFFSET < 0
743 -
744# else
745 +
746# endif
747 sizeof(UNW_PC(frame));
748 UNW_FP(frame) = link;
749 return 0;
750 }
751 }
752#endif
753 return -ENXIO;
754 }
755 state.org = startLoc;
756 memcpy(&state.cfa, &badCFA, sizeof(state.cfa));
757 /* process instructions */
758 if (!processCFI(ptr, end, UNW_PC(frame), ptrType, &state)
759 || state.loc > endLoc
760 || state.regs[retAddrReg].where == Nowhere
761 || state.cfa.reg >= ARRAY_SIZE(reg_info)
762 || reg_info[state.cfa.reg].width != sizeof(unsigned long)
763 || state.cfa.offs % sizeof(unsigned long))
764 return -EIO;
765 /* update frame */
766 cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
767 startLoc = min((unsigned long)UNW_SP(frame), cfa);
768 endLoc = max((unsigned long)UNW_SP(frame), cfa);
769 if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) {
770 startLoc = min(STACK_LIMIT(cfa), cfa);
771 endLoc = max(STACK_LIMIT(cfa), cfa);
772 }
773#ifndef CONFIG_64BIT
774# define CASES CASE(8); CASE(16); CASE(32)
775#else
776# define CASES CASE(8); CASE(16); CASE(32); CASE(64)
777#endif
778 for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
779 if (REG_INVALID(i)) {
780 if (state.regs[i].where == Nowhere)
781 continue;
782 return -EIO;
783 }
784 switch(state.regs[i].where) {
785 default:
786 break;
787 case Register:
788 if (state.regs[i].value >= ARRAY_SIZE(reg_info)
789 || REG_INVALID(state.regs[i].value)
790 || reg_info[i].width > reg_info[state.regs[i].value].width)
791 return -EIO;
792 switch(reg_info[state.regs[i].value].width) {
793#define CASE(n) \
794 case sizeof(u##n): \
795 state.regs[i].value = FRAME_REG(state.regs[i].value, \
796 const u##n); \
797 break
798 CASES;
799#undef CASE
800 default:
801 return -EIO;
802 }
803 break;
804 }
805 }
806 for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
807 if (REG_INVALID(i))
808 continue;
809 switch(state.regs[i].where) {
810 case Nowhere:
811 if (reg_info[i].width != sizeof(UNW_SP(frame))
812 || &FRAME_REG(i, __typeof__(UNW_SP(frame)))
813 != &UNW_SP(frame))
814 continue;
815 UNW_SP(frame) = cfa;
816 break;
817 case Register:
818 switch(reg_info[i].width) {
819#define CASE(n) case sizeof(u##n): \
820 FRAME_REG(i, u##n) = state.regs[i].value; \
821 break
822 CASES;
823#undef CASE
824 default:
825 return -EIO;
826 }
827 break;
828 case Value:
829 if (reg_info[i].width != sizeof(unsigned long))
830 return -EIO;
831 FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
832 * state.dataAlign;
833 break;
834 case Memory: {
835 unsigned long addr = cfa + state.regs[i].value
836 * state.dataAlign;
837
838 if ((state.regs[i].value * state.dataAlign)
839 % sizeof(unsigned long)
840 || addr < startLoc
841 || addr + sizeof(unsigned long) < addr
842 || addr + sizeof(unsigned long) > endLoc)
843 return -EIO;
844 switch(reg_info[i].width) {
845#define CASE(n) case sizeof(u##n): \
846 __get_user(FRAME_REG(i, u##n), (u##n *)addr); \
847 break
848 CASES;
849#undef CASE
850 default:
851 return -EIO;
852 }
853 }
854 break;
855 }
856 }
857
858 return 0;
859#undef CASES
860#undef FRAME_REG
861}
862EXPORT_SYMBOL(unwind);
863
864int unwind_init_frame_info(struct unwind_frame_info *info,
865 struct task_struct *tsk,
866 /*const*/ struct pt_regs *regs)
867{
868 info->task = tsk;
869 arch_unw_init_frame_info(info, regs);
870
871 return 0;
872}
873EXPORT_SYMBOL(unwind_init_frame_info);
874
875/*
876 * Prepare to unwind a blocked task.
877 */
878int unwind_init_blocked(struct unwind_frame_info *info,
879 struct task_struct *tsk)
880{
881 info->task = tsk;
882 arch_unw_init_blocked(info);
883
884 return 0;
885}
886EXPORT_SYMBOL(unwind_init_blocked);
887
888/*
889 * Prepare to unwind the currently running thread.
890 */
891int unwind_init_running(struct unwind_frame_info *info,
892 asmlinkage int (*callback)(struct unwind_frame_info *,
893 void *arg),
894 void *arg)
895{
896 info->task = current;
897
898 return arch_unwind_init_running(info, callback, arg);
899}
900EXPORT_SYMBOL(unwind_init_running);
901
902/*
903 * Unwind until the return pointer is in user-land (or until an error
904 * occurs). Returns 0 if successful, negative number in case of
905 * error.
906 */
907int unwind_to_user(struct unwind_frame_info *info)
908{
909 while (!arch_unw_user_mode(info)) {
910 int err = unwind(info);
911
912 if (err < 0)
913 return err;
914 }
915
916 return 0;
917}
918EXPORT_SYMBOL(unwind_to_user);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 565cf7a1fe..59f0b42bd8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -559,7 +559,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
559} 559}
560 560
561/* We're holding the cpucontrol mutex here */ 561/* We're holding the cpucontrol mutex here */
562static int workqueue_cpu_callback(struct notifier_block *nfb, 562static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
563 unsigned long action, 563 unsigned long action,
564 void *hcpu) 564 void *hcpu)
565{ 565{
diff --git a/lib/Kconfig b/lib/Kconfig
index 3de93357f5..f6299342b8 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -86,4 +86,10 @@ config TEXTSEARCH_BM
86config TEXTSEARCH_FSM 86config TEXTSEARCH_FSM
87 tristate 87 tristate
88 88
89#
90# plist support is select#ed if needed
91#
92config PLIST
93 boolean
94
89endmenu 95endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ccb0c1fdf1..5330911ebd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -107,6 +107,24 @@ config DEBUG_MUTEXES
107 This allows mutex semantics violations and mutex related deadlocks 107 This allows mutex semantics violations and mutex related deadlocks
108 (lockups) to be detected and reported automatically. 108 (lockups) to be detected and reported automatically.
109 109
110config DEBUG_RT_MUTEXES
111 bool "RT Mutex debugging, deadlock detection"
112 depends on DEBUG_KERNEL && RT_MUTEXES
113 help
114 This allows rt mutex semantics violations and rt mutex related
115 deadlocks (lockups) to be detected and reported automatically.
116
117config DEBUG_PI_LIST
118 bool
119 default y
120 depends on DEBUG_RT_MUTEXES
121
122config RT_MUTEX_TESTER
123 bool "Built-in scriptable tester for rt-mutexes"
124 depends on DEBUG_KERNEL && RT_MUTEXES
125 help
126 This option enables a rt-mutex tester.
127
110config DEBUG_SPINLOCK 128config DEBUG_SPINLOCK
111 bool "Spinlock debugging" 129 bool "Spinlock debugging"
112 depends on DEBUG_KERNEL 130 depends on DEBUG_KERNEL
@@ -188,14 +206,22 @@ config FRAME_POINTER
188 206
189config UNWIND_INFO 207config UNWIND_INFO
190 bool "Compile the kernel with frame unwind information" 208 bool "Compile the kernel with frame unwind information"
191 depends on !IA64 209 depends on !IA64 && !PARISC
192 depends on !MODULES || !(MIPS || PARISC || PPC || SUPERH || V850) 210 depends on !MODULES || !(MIPS || PPC || SUPERH || V850)
193 help 211 help
194 If you say Y here the resulting kernel image will be slightly larger 212 If you say Y here the resulting kernel image will be slightly larger
195 but not slower, and it will give very useful debugging information. 213 but not slower, and it will give very useful debugging information.
196 If you don't debug the kernel, you can say N, but we may not be able 214 If you don't debug the kernel, you can say N, but we may not be able
197 to solve problems without frame unwind information or frame pointers. 215 to solve problems without frame unwind information or frame pointers.
198 216
217config STACK_UNWIND
218 bool "Stack unwind support"
219 depends on UNWIND_INFO
220 depends on X86
221 help
222 This enables more precise stack traces, omitting all unrelated
223 occurrences of pointers into kernel code from the dump.
224
199config FORCED_INLINING 225config FORCED_INLINING
200 bool "Force gcc to inline functions marked 'inline'" 226 bool "Force gcc to inline functions marked 'inline'"
201 depends on DEBUG_KERNEL 227 depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 79358ad1f1..10c13c9d78 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -25,6 +25,7 @@ lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
25lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 25lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
26lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 26lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
27obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 27obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
28obj-$(CONFIG_PLIST) += plist.o
28obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 29obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
29 30
30ifneq ($(CONFIG_HAVE_DEC_LOCK),y) 31ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
diff --git a/lib/idr.c b/lib/idr.c
index de19030a99..4d09681951 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -29,6 +29,7 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#endif 31#endif
32#include <linux/err.h>
32#include <linux/string.h> 33#include <linux/string.h>
33#include <linux/idr.h> 34#include <linux/idr.h>
34 35
@@ -398,6 +399,48 @@ void *idr_find(struct idr *idp, int id)
398} 399}
399EXPORT_SYMBOL(idr_find); 400EXPORT_SYMBOL(idr_find);
400 401
402/**
403 * idr_replace - replace pointer for given id
404 * @idp: idr handle
405 * @ptr: pointer you want associated with the id
406 * @id: lookup key
407 *
408 * Replace the pointer registered with an id and return the old value.
409 * A -ENOENT return indicates that @id was not found.
410 * A -EINVAL return indicates that @id was not within valid constraints.
411 *
412 * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
413 */
414void *idr_replace(struct idr *idp, void *ptr, int id)
415{
416 int n;
417 struct idr_layer *p, *old_p;
418
419 n = idp->layers * IDR_BITS;
420 p = idp->top;
421
422 id &= MAX_ID_MASK;
423
424 if (id >= (1 << n))
425 return ERR_PTR(-EINVAL);
426
427 n -= IDR_BITS;
428 while ((n > 0) && p) {
429 p = p->ary[(id >> n) & IDR_MASK];
430 n -= IDR_BITS;
431 }
432
433 n = id & IDR_MASK;
434 if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
435 return ERR_PTR(-ENOENT);
436
437 old_p = p->ary[n];
438 p->ary[n] = ptr;
439
440 return old_p;
441}
442EXPORT_SYMBOL(idr_replace);
443
401static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache, 444static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache,
402 unsigned long flags) 445 unsigned long flags)
403{ 446{
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index cb5490ec00..e713e86811 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -14,7 +14,7 @@
14 * The 'big kernel semaphore' 14 * The 'big kernel semaphore'
15 * 15 *
16 * This mutex is taken and released recursively by lock_kernel() 16 * This mutex is taken and released recursively by lock_kernel()
17 * and unlock_kernel(). It is transparently dropped and reaquired 17 * and unlock_kernel(). It is transparently dropped and reacquired
18 * over schedule(). It is used to protect legacy code that hasn't 18 * over schedule(). It is used to protect legacy code that hasn't
19 * been migrated to a proper locking design yet. 19 * been migrated to a proper locking design yet.
20 * 20 *
@@ -92,7 +92,7 @@ void __lockfunc unlock_kernel(void)
92 * The 'big kernel lock' 92 * The 'big kernel lock'
93 * 93 *
94 * This spinlock is taken and released recursively by lock_kernel() 94 * This spinlock is taken and released recursively by lock_kernel()
95 * and unlock_kernel(). It is transparently dropped and reaquired 95 * and unlock_kernel(). It is transparently dropped and reacquired
96 * over schedule(). It is used to protect legacy code that hasn't 96 * over schedule(). It is used to protect legacy code that hasn't
97 * been migrated to a proper locking design yet. 97 * been migrated to a proper locking design yet.
98 * 98 *
diff --git a/lib/plist.c b/lib/plist.c
new file mode 100644
index 0000000000..3074a02272
--- /dev/null
+++ b/lib/plist.c
@@ -0,0 +1,118 @@
1/*
2 * lib/plist.c
3 *
4 * Descending-priority-sorted double-linked list
5 *
6 * (C) 2002-2003 Intel Corp
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>.
8 *
9 * 2001-2005 (c) MontaVista Software, Inc.
10 * Daniel Walker <dwalker@mvista.com>
11 *
12 * (C) 2005 Thomas Gleixner <tglx@linutronix.de>
13 *
14 * Simplifications of the original code by
15 * Oleg Nesterov <oleg@tv-sign.ru>
16 *
17 * Licensed under the FSF's GNU Public License v2 or later.
18 *
19 * Based on simple lists (include/linux/list.h).
20 *
21 * This file contains the add / del functions which are considered to
22 * be too large to inline. See include/linux/plist.h for further
23 * information.
24 */
25
26#include <linux/plist.h>
27#include <linux/spinlock.h>
28
29#ifdef CONFIG_DEBUG_PI_LIST
30
31static void plist_check_prev_next(struct list_head *t, struct list_head *p,
32 struct list_head *n)
33{
34 if (n->prev != p || p->next != n) {
35 printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev);
36 printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev);
37 printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev);
38 WARN_ON(1);
39 }
40}
41
42static void plist_check_list(struct list_head *top)
43{
44 struct list_head *prev = top, *next = top->next;
45
46 plist_check_prev_next(top, prev, next);
47 while (next != top) {
48 prev = next;
49 next = prev->next;
50 plist_check_prev_next(top, prev, next);
51 }
52}
53
54static void plist_check_head(struct plist_head *head)
55{
56 WARN_ON(!head->lock);
57 if (head->lock)
58 WARN_ON_SMP(!spin_is_locked(head->lock));
59 plist_check_list(&head->prio_list);
60 plist_check_list(&head->node_list);
61}
62
63#else
64# define plist_check_head(h) do { } while (0)
65#endif
66
67/**
68 * plist_add - add @node to @head
69 *
70 * @node: &struct plist_node pointer
71 * @head: &struct plist_head pointer
72 */
73void plist_add(struct plist_node *node, struct plist_head *head)
74{
75 struct plist_node *iter;
76
77 plist_check_head(head);
78 WARN_ON(!plist_node_empty(node));
79
80 list_for_each_entry(iter, &head->prio_list, plist.prio_list) {
81 if (node->prio < iter->prio)
82 goto lt_prio;
83 else if (node->prio == iter->prio) {
84 iter = list_entry(iter->plist.prio_list.next,
85 struct plist_node, plist.prio_list);
86 goto eq_prio;
87 }
88 }
89
90lt_prio:
91 list_add_tail(&node->plist.prio_list, &iter->plist.prio_list);
92eq_prio:
93 list_add_tail(&node->plist.node_list, &iter->plist.node_list);
94
95 plist_check_head(head);
96}
97
98/**
99 * plist_del - Remove a @node from plist.
100 *
101 * @node: &struct plist_node pointer - entry to be removed
102 * @head: &struct plist_head pointer - list head
103 */
104void plist_del(struct plist_node *node, struct plist_head *head)
105{
106 plist_check_head(head);
107
108 if (!list_empty(&node->plist.prio_list)) {
109 struct plist_node *next = plist_first(&node->plist);
110
111 list_move_tail(&next->plist.prio_list, &node->plist.prio_list);
112 list_del_init(&node->plist.prio_list);
113 }
114
115 list_del_init(&node->plist.node_list);
116
117 plist_check_head(head);
118}
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index 02a16eacb7..d84560c076 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -63,10 +63,10 @@
63 bytes, which is the maximum length that can be coded. inflate_fast() 63 bytes, which is the maximum length that can be coded. inflate_fast()
64 requires strm->avail_out >= 258 for each loop to avoid checking for 64 requires strm->avail_out >= 258 for each loop to avoid checking for
65 output space. 65 output space.
66
67 - @start: inflate()'s starting value for strm->avail_out
66 */ 68 */
67void inflate_fast(strm, start) 69void inflate_fast(z_streamp strm, unsigned start)
68z_streamp strm;
69unsigned start; /* inflate()'s starting value for strm->avail_out */
70{ 70{
71 struct inflate_state *state; 71 struct inflate_state *state;
72 unsigned char *in; /* local strm->next_in */ 72 unsigned char *in; /* local strm->next_in */
diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
index 62343c53bf..3fe6ce5b53 100644
--- a/lib/zlib_inflate/inftrees.c
+++ b/lib/zlib_inflate/inftrees.c
@@ -8,15 +8,6 @@
8 8
9#define MAXBITS 15 9#define MAXBITS 15
10 10
11const char inflate_copyright[] =
12 " inflate 1.2.3 Copyright 1995-2005 Mark Adler ";
13/*
14 If you use the zlib library in a product, an acknowledgment is welcome
15 in the documentation of your product. If for some reason you cannot
16 include such an acknowledgment, I would appreciate that you keep this
17 copyright string in the executable of your product.
18 */
19
20/* 11/*
21 Build a set of tables to decode the provided canonical Huffman code. 12 Build a set of tables to decode the provided canonical Huffman code.
22 The code lengths are lens[0..codes-1]. The result starts at *table, 13 The code lengths are lens[0..codes-1]. The result starts at *table,
@@ -29,13 +20,8 @@ const char inflate_copyright[] =
29 table index bits. It will differ if the request is greater than the 20 table index bits. It will differ if the request is greater than the
30 longest code or if it is less than the shortest code. 21 longest code or if it is less than the shortest code.
31 */ 22 */
32int zlib_inflate_table(type, lens, codes, table, bits, work) 23int zlib_inflate_table(codetype type, unsigned short *lens, unsigned codes,
33codetype type; 24 code **table, unsigned *bits, unsigned short *work)
34unsigned short *lens;
35unsigned codes;
36code **table;
37unsigned *bits;
38unsigned short *work;
39{ 25{
40 unsigned len; /* a code's length in bits */ 26 unsigned len; /* a code's length in bits */
41 unsigned sym; /* index of code symbols */ 27 unsigned sym; /* index of code symbols */
diff --git a/mm/Kconfig b/mm/Kconfig
index 66e65ab394..e76c023eb0 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -116,6 +116,7 @@ config SPARSEMEM_EXTREME
116config MEMORY_HOTPLUG 116config MEMORY_HOTPLUG
117 bool "Allow for memory hot-add" 117 bool "Allow for memory hot-add"
118 depends on SPARSEMEM && HOTPLUG && !SOFTWARE_SUSPEND 118 depends on SPARSEMEM && HOTPLUG && !SOFTWARE_SUSPEND
119 depends on (IA64 || X86 || PPC64)
119 120
120comment "Memory hotplug is currently incompatible with Software Suspend" 121comment "Memory hotplug is currently incompatible with Software Suspend"
121 depends on SPARSEMEM && HOTPLUG && SOFTWARE_SUSPEND 122 depends on SPARSEMEM && HOTPLUG && SOFTWARE_SUSPEND
diff --git a/mm/filemap.c b/mm/filemap.c
index 9c7334bafd..d504d6e988 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2095,14 +2095,21 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2095 do { 2095 do {
2096 unsigned long index; 2096 unsigned long index;
2097 unsigned long offset; 2097 unsigned long offset;
2098 unsigned long maxlen;
2099 size_t copied; 2098 size_t copied;
2100 2099
2101 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 2100 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
2102 index = pos >> PAGE_CACHE_SHIFT; 2101 index = pos >> PAGE_CACHE_SHIFT;
2103 bytes = PAGE_CACHE_SIZE - offset; 2102 bytes = PAGE_CACHE_SIZE - offset;
2104 if (bytes > count) 2103
2105 bytes = count; 2104 /* Limit the size of the copy to the caller's write size */
2105 bytes = min(bytes, count);
2106
2107 /*
2108 * Limit the size of the copy to that of the current segment,
2109 * because fault_in_pages_readable() doesn't know how to walk
2110 * segments.
2111 */
2112 bytes = min(bytes, cur_iov->iov_len - iov_base);
2106 2113
2107 /* 2114 /*
2108 * Bring in the user page that we will copy from _first_. 2115 * Bring in the user page that we will copy from _first_.
@@ -2110,10 +2117,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2110 * same page as we're writing to, without it being marked 2117 * same page as we're writing to, without it being marked
2111 * up-to-date. 2118 * up-to-date.
2112 */ 2119 */
2113 maxlen = cur_iov->iov_len - iov_base; 2120 fault_in_pages_readable(buf, bytes);
2114 if (maxlen > bytes)
2115 maxlen = bytes;
2116 fault_in_pages_readable(buf, maxlen);
2117 2121
2118 page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); 2122 page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
2119 if (!page) { 2123 if (!page) {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 841a077d5a..ea4038838b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -21,6 +21,7 @@
21#include <linux/memory_hotplug.h> 21#include <linux/memory_hotplug.h>
22#include <linux/highmem.h> 22#include <linux/highmem.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/ioport.h>
24 25
25#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
26 27
@@ -126,6 +127,9 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
126 unsigned long i; 127 unsigned long i;
127 unsigned long flags; 128 unsigned long flags;
128 unsigned long onlined_pages = 0; 129 unsigned long onlined_pages = 0;
130 struct resource res;
131 u64 section_end;
132 unsigned long start_pfn;
129 struct zone *zone; 133 struct zone *zone;
130 int need_zonelists_rebuild = 0; 134 int need_zonelists_rebuild = 0;
131 135
@@ -148,10 +152,27 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
148 if (!populated_zone(zone)) 152 if (!populated_zone(zone))
149 need_zonelists_rebuild = 1; 153 need_zonelists_rebuild = 1;
150 154
151 for (i = 0; i < nr_pages; i++) { 155 res.start = (u64)pfn << PAGE_SHIFT;
152 struct page *page = pfn_to_page(pfn + i); 156 res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1;
153 online_page(page); 157 res.flags = IORESOURCE_MEM; /* we just need system ram */
154 onlined_pages++; 158 section_end = res.end;
159
160 while (find_next_system_ram(&res) >= 0) {
161 start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
162 nr_pages = (unsigned long)
163 ((res.end + 1 - res.start) >> PAGE_SHIFT);
164
165 if (PageReserved(pfn_to_page(start_pfn))) {
166 /* this region's page is not onlined now */
167 for (i = 0; i < nr_pages; i++) {
168 struct page *page = pfn_to_page(start_pfn + i);
169 online_page(page);
170 onlined_pages++;
171 }
172 }
173
174 res.start = res.end + 1;
175 res.end = section_end;
155 } 176 }
156 zone->present_pages += onlined_pages; 177 zone->present_pages += onlined_pages;
157 zone->zone_pgdat->node_present_pages += onlined_pages; 178 zone->zone_pgdat->node_present_pages += onlined_pages;
@@ -163,3 +184,100 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
163 vm_total_pages = nr_free_pagecache_pages(); 184 vm_total_pages = nr_free_pagecache_pages();
164 return 0; 185 return 0;
165} 186}
187
188static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
189{
190 struct pglist_data *pgdat;
191 unsigned long zones_size[MAX_NR_ZONES] = {0};
192 unsigned long zholes_size[MAX_NR_ZONES] = {0};
193 unsigned long start_pfn = start >> PAGE_SHIFT;
194
195 pgdat = arch_alloc_nodedata(nid);
196 if (!pgdat)
197 return NULL;
198
199 arch_refresh_nodedata(nid, pgdat);
200
201 /* we can use NODE_DATA(nid) from here */
202
203 /* init node's zones as empty zones, we don't have any present pages.*/
204 free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);
205
206 return pgdat;
207}
208
209static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
210{
211 arch_refresh_nodedata(nid, NULL);
212 arch_free_nodedata(pgdat);
213 return;
214}
215
216/* add this memory to iomem resource */
217static void register_memory_resource(u64 start, u64 size)
218{
219 struct resource *res;
220
221 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
222 BUG_ON(!res);
223
224 res->name = "System RAM";
225 res->start = start;
226 res->end = start + size - 1;
227 res->flags = IORESOURCE_MEM;
228 if (request_resource(&iomem_resource, res) < 0) {
229 printk("System RAM resource %llx - %llx cannot be added\n",
230 (unsigned long long)res->start, (unsigned long long)res->end);
231 kfree(res);
232 }
233}
234
235
236
237int add_memory(int nid, u64 start, u64 size)
238{
239 pg_data_t *pgdat = NULL;
240 int new_pgdat = 0;
241 int ret;
242
243 if (!node_online(nid)) {
244 pgdat = hotadd_new_pgdat(nid, start);
245 if (!pgdat)
246 return -ENOMEM;
247 new_pgdat = 1;
248 ret = kswapd_run(nid);
249 if (ret)
250 goto error;
251 }
252
253 /* call arch's memory hotadd */
254 ret = arch_add_memory(nid, start, size);
255
256 if (ret < 0)
257 goto error;
258
259 /* we online node here. we can't roll back from here. */
260 node_set_online(nid);
261
262 if (new_pgdat) {
263 ret = register_one_node(nid);
264 /*
265 * If sysfs file of new node can't create, cpu on the node
266 * can't be hot-added. There is no rollback way now.
267 * So, check by BUG_ON() to catch it reluctantly..
268 */
269 BUG_ON(ret);
270 }
271
272 /* register this memory as resource */
273 register_memory_resource(start, size);
274
275 return ret;
276error:
277 /* rollback pgdat allocation and others */
278 if (new_pgdat)
279 rollback_node_hotadd(nid, pgdat);
280
281 return ret;
282}
283EXPORT_SYMBOL_GPL(add_memory);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 73e0f23b7f..6b9740bbf4 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1821,7 +1821,7 @@ static inline void check_huge_range(struct vm_area_struct *vma,
1821 1821
1822int show_numa_map(struct seq_file *m, void *v) 1822int show_numa_map(struct seq_file *m, void *v)
1823{ 1823{
1824 struct task_struct *task = m->private; 1824 struct proc_maps_private *priv = m->private;
1825 struct vm_area_struct *vma = v; 1825 struct vm_area_struct *vma = v;
1826 struct numa_maps *md; 1826 struct numa_maps *md;
1827 struct file *file = vma->vm_file; 1827 struct file *file = vma->vm_file;
@@ -1837,7 +1837,7 @@ int show_numa_map(struct seq_file *m, void *v)
1837 return 0; 1837 return 0;
1838 1838
1839 mpol_to_str(buffer, sizeof(buffer), 1839 mpol_to_str(buffer, sizeof(buffer),
1840 get_vma_policy(task, vma, vma->vm_start)); 1840 get_vma_policy(priv->task, vma, vma->vm_start));
1841 1841
1842 seq_printf(m, "%08lx %s", vma->vm_start, buffer); 1842 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1843 1843
@@ -1891,7 +1891,7 @@ out:
1891 kfree(md); 1891 kfree(md);
1892 1892
1893 if (m->count < m->size) 1893 if (m->count < m->size)
1894 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; 1894 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
1895 return 0; 1895 return 0;
1896} 1896}
1897 1897
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 8ccf6f1b14..4ec7026c7b 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -516,14 +516,14 @@ static void set_ratelimit(void)
516 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; 516 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
517} 517}
518 518
519static int 519static int __cpuinit
520ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) 520ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
521{ 521{
522 set_ratelimit(); 522 set_ratelimit();
523 return 0; 523 return 0;
524} 524}
525 525
526static struct notifier_block ratelimit_nb = { 526static struct notifier_block __cpuinitdata ratelimit_nb = {
527 .notifier_call = ratelimit_handler, 527 .notifier_call = ratelimit_handler,
528 .next = NULL, 528 .next = NULL,
529}; 529};
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6c1174fcf5..084a2de7e5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -266,7 +266,7 @@ static inline void rmv_page_order(struct page *page)
266 * satisfies the following equation: 266 * satisfies the following equation:
267 * P = B & ~(1 << O) 267 * P = B & ~(1 << O)
268 * 268 *
269 * Assumption: *_mem_map is contigious at least up to MAX_ORDER 269 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
270 */ 270 */
271static inline struct page * 271static inline struct page *
272__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 272__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
@@ -446,8 +446,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
446 446
447 arch_free_page(page, order); 447 arch_free_page(page, order);
448 if (!PageHighMem(page)) 448 if (!PageHighMem(page))
449 mutex_debug_check_no_locks_freed(page_address(page), 449 debug_check_no_locks_freed(page_address(page),
450 PAGE_SIZE<<order); 450 PAGE_SIZE<<order);
451 451
452 for (i = 0 ; i < (1 << order) ; ++i) 452 for (i = 0 ; i < (1 << order) ; ++i)
453 reserved += free_pages_check(page + i); 453 reserved += free_pages_check(page + i);
@@ -2009,7 +2009,7 @@ static inline void free_zone_pagesets(int cpu)
2009 } 2009 }
2010} 2010}
2011 2011
2012static int pageset_cpuup_callback(struct notifier_block *nfb, 2012static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
2013 unsigned long action, 2013 unsigned long action,
2014 void *hcpu) 2014 void *hcpu)
2015{ 2015{
@@ -2031,7 +2031,7 @@ static int pageset_cpuup_callback(struct notifier_block *nfb,
2031 return ret; 2031 return ret;
2032} 2032}
2033 2033
2034static struct notifier_block pageset_notifier = 2034static struct notifier_block __cpuinitdata pageset_notifier =
2035 { &pageset_cpuup_callback, NULL, 0 }; 2035 { &pageset_cpuup_callback, NULL, 0 };
2036 2036
2037void __init setup_per_cpu_pageset(void) 2037void __init setup_per_cpu_pageset(void)
diff --git a/mm/readahead.c b/mm/readahead.c
index e39e416860..aa7ec42465 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -390,8 +390,8 @@ int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
390 * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block' 390 * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
391 * is set wait till the read completes. Otherwise attempt to read without 391 * is set wait till the read completes. Otherwise attempt to read without
392 * blocking. 392 * blocking.
393 * Returns 1 meaning 'success' if read is succesfull without switching off 393 * Returns 1 meaning 'success' if read is successful without switching off
394 * readhaead mode. Otherwise return failure. 394 * readahead mode. Otherwise return failure.
395 */ 395 */
396static int 396static int
397blockable_page_cache_readahead(struct address_space *mapping, struct file *filp, 397blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,
diff --git a/mm/slab.c b/mm/slab.c
index 98ac20bc0d..233e39d14c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -89,6 +89,7 @@
89#include <linux/config.h> 89#include <linux/config.h>
90#include <linux/slab.h> 90#include <linux/slab.h>
91#include <linux/mm.h> 91#include <linux/mm.h>
92#include <linux/poison.h>
92#include <linux/swap.h> 93#include <linux/swap.h>
93#include <linux/cache.h> 94#include <linux/cache.h>
94#include <linux/interrupt.h> 95#include <linux/interrupt.h>
@@ -106,6 +107,7 @@
106#include <linux/nodemask.h> 107#include <linux/nodemask.h>
107#include <linux/mempolicy.h> 108#include <linux/mempolicy.h>
108#include <linux/mutex.h> 109#include <linux/mutex.h>
110#include <linux/rtmutex.h>
109 111
110#include <asm/uaccess.h> 112#include <asm/uaccess.h>
111#include <asm/cacheflush.h> 113#include <asm/cacheflush.h>
@@ -492,17 +494,6 @@ struct kmem_cache {
492#endif 494#endif
493 495
494#if DEBUG 496#if DEBUG
495/*
496 * Magic nums for obj red zoning.
497 * Placed in the first word before and the first word after an obj.
498 */
499#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
500#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */
501
502/* ...and for poisoning */
503#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
504#define POISON_FREE 0x6b /* for use-after-free poisoning */
505#define POISON_END 0xa5 /* end-byte of poisoning */
506 497
507/* 498/*
508 * memory layout of objects: 499 * memory layout of objects:
@@ -1083,7 +1074,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1083 1074
1084#endif 1075#endif
1085 1076
1086static int cpuup_callback(struct notifier_block *nfb, 1077static int __devinit cpuup_callback(struct notifier_block *nfb,
1087 unsigned long action, void *hcpu) 1078 unsigned long action, void *hcpu)
1088{ 1079{
1089 long cpu = (long)hcpu; 1080 long cpu = (long)hcpu;
@@ -1265,7 +1256,9 @@ bad:
1265 return NOTIFY_BAD; 1256 return NOTIFY_BAD;
1266} 1257}
1267 1258
1268static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 }; 1259static struct notifier_block __cpuinitdata cpucache_notifier = {
1260 &cpuup_callback, NULL, 0
1261};
1269 1262
1270/* 1263/*
1271 * swap the static kmem_list3 with kmalloced memory 1264 * swap the static kmem_list3 with kmalloced memory
@@ -3405,7 +3398,7 @@ void kfree(const void *objp)
3405 local_irq_save(flags); 3398 local_irq_save(flags);
3406 kfree_debugcheck(objp); 3399 kfree_debugcheck(objp);
3407 c = virt_to_cache(objp); 3400 c = virt_to_cache(objp);
3408 mutex_debug_check_no_locks_freed(objp, obj_size(c)); 3401 debug_check_no_locks_freed(objp, obj_size(c));
3409 __cache_free(c, (void *)objp); 3402 __cache_free(c, (void *)objp);
3410 local_irq_restore(flags); 3403 local_irq_restore(flags);
3411} 3404}
diff --git a/mm/sparse.c b/mm/sparse.c
index e0a3fe48aa..c7a2b3a0e4 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -45,7 +45,7 @@ static struct mem_section *sparse_index_alloc(int nid)
45 45
46static int sparse_index_init(unsigned long section_nr, int nid) 46static int sparse_index_init(unsigned long section_nr, int nid)
47{ 47{
48 static spinlock_t index_init_lock = SPIN_LOCK_UNLOCKED; 48 static DEFINE_SPINLOCK(index_init_lock);
49 unsigned long root = SECTION_NR_TO_ROOT(section_nr); 49 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
50 struct mem_section *section; 50 struct mem_section *section;
51 int ret = 0; 51 int ret = 0;
diff --git a/mm/swap.c b/mm/swap.c
index 03ae2076f9..990868afc1 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -86,8 +86,7 @@ int rotate_reclaimable_page(struct page *page)
86 zone = page_zone(page); 86 zone = page_zone(page);
87 spin_lock_irqsave(&zone->lru_lock, flags); 87 spin_lock_irqsave(&zone->lru_lock, flags);
88 if (PageLRU(page) && !PageActive(page)) { 88 if (PageLRU(page) && !PageActive(page)) {
89 list_del(&page->lru); 89 list_move_tail(&page->lru, &zone->inactive_list);
90 list_add_tail(&page->lru, &zone->inactive_list);
91 inc_page_state(pgrotated); 90 inc_page_state(pgrotated);
92 } 91 }
93 if (!test_clear_page_writeback(page)) 92 if (!test_clear_page_writeback(page))
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 72babac71d..eeacb0d695 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -34,6 +34,7 @@
34#include <linux/notifier.h> 34#include <linux/notifier.h>
35#include <linux/rwsem.h> 35#include <linux/rwsem.h>
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/kthread.h>
37 38
38#include <asm/tlbflush.h> 39#include <asm/tlbflush.h>
39#include <asm/div64.h> 40#include <asm/div64.h>
@@ -1223,7 +1224,6 @@ static int kswapd(void *p)
1223 }; 1224 };
1224 cpumask_t cpumask; 1225 cpumask_t cpumask;
1225 1226
1226 daemonize("kswapd%d", pgdat->node_id);
1227 cpumask = node_to_cpumask(pgdat->node_id); 1227 cpumask = node_to_cpumask(pgdat->node_id);
1228 if (!cpus_empty(cpumask)) 1228 if (!cpus_empty(cpumask))
1229 set_cpus_allowed(tsk, cpumask); 1229 set_cpus_allowed(tsk, cpumask);
@@ -1450,7 +1450,7 @@ out:
1450 not required for correctness. So if the last cpu in a node goes 1450 not required for correctness. So if the last cpu in a node goes
1451 away, we get changed to run anywhere: as the first one comes back, 1451 away, we get changed to run anywhere: as the first one comes back,
1452 restore their cpu bindings. */ 1452 restore their cpu bindings. */
1453static int cpu_callback(struct notifier_block *nfb, 1453static int __devinit cpu_callback(struct notifier_block *nfb,
1454 unsigned long action, void *hcpu) 1454 unsigned long action, void *hcpu)
1455{ 1455{
1456 pg_data_t *pgdat; 1456 pg_data_t *pgdat;
@@ -1468,20 +1468,35 @@ static int cpu_callback(struct notifier_block *nfb,
1468} 1468}
1469#endif /* CONFIG_HOTPLUG_CPU */ 1469#endif /* CONFIG_HOTPLUG_CPU */
1470 1470
1471/*
1472 * This kswapd start function will be called by init and node-hot-add.
1473 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
1474 */
1475int kswapd_run(int nid)
1476{
1477 pg_data_t *pgdat = NODE_DATA(nid);
1478 int ret = 0;
1479
1480 if (pgdat->kswapd)
1481 return 0;
1482
1483 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
1484 if (IS_ERR(pgdat->kswapd)) {
1485 /* failure at boot is fatal */
1486 BUG_ON(system_state == SYSTEM_BOOTING);
1487 printk("Failed to start kswapd on node %d\n",nid);
1488 ret = -1;
1489 }
1490 return ret;
1491}
1492
1471static int __init kswapd_init(void) 1493static int __init kswapd_init(void)
1472{ 1494{
1473 pg_data_t *pgdat; 1495 int nid;
1474 1496
1475 swap_setup(); 1497 swap_setup();
1476 for_each_online_pgdat(pgdat) { 1498 for_each_online_node(nid)
1477 pid_t pid; 1499 kswapd_run(nid);
1478
1479 pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL);
1480 BUG_ON(pid < 0);
1481 read_lock(&tasklist_lock);
1482 pgdat->kswapd = find_task_by_pid(pid);
1483 read_unlock(&tasklist_lock);
1484 }
1485 hotcpu_notifier(cpu_callback, 0); 1500 hotcpu_notifier(cpu_callback, 0);
1486 return 0; 1501 return 0;
1487} 1502}
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index a48a5d5804..5fe77df001 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1113,10 +1113,9 @@ static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_clien
1113 1113
1114static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc) 1114static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1115{ 1115{
1116 unsigned char *ip;
1117
1118 uint32_t dst_ip = msg->content.in_info.in_dst_ip; 1116 uint32_t dst_ip = msg->content.in_info.in_dst_ip;
1119 in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc); 1117 in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc);
1118
1120 dprintk("mpoa: (%s) MPOA_res_reply_rcvd: ip %u.%u.%u.%u\n", mpc->dev->name, NIPQUAD(dst_ip)); 1119 dprintk("mpoa: (%s) MPOA_res_reply_rcvd: ip %u.%u.%u.%u\n", mpc->dev->name, NIPQUAD(dst_ip));
1121 ddprintk("mpoa: (%s) MPOA_res_reply_rcvd() entry = %p", mpc->dev->name, entry); 1120 ddprintk("mpoa: (%s) MPOA_res_reply_rcvd() entry = %p", mpc->dev->name, entry);
1122 if(entry == NULL){ 1121 if(entry == NULL){
diff --git a/net/core/dev.c b/net/core/dev.c
index ea2469398b..f1c52cbd6e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -230,7 +230,7 @@ extern void netdev_unregister_sysfs(struct net_device *);
230 * For efficiency 230 * For efficiency
231 */ 231 */
232 232
233int netdev_nit; 233static int netdev_nit;
234 234
235/* 235/*
236 * Add a protocol ID to the list. Now that the input handler is 236 * Add a protocol ID to the list. Now that the input handler is
@@ -1325,9 +1325,12 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1325 nskb->next = NULL; 1325 nskb->next = NULL;
1326 rc = dev->hard_start_xmit(nskb, dev); 1326 rc = dev->hard_start_xmit(nskb, dev);
1327 if (unlikely(rc)) { 1327 if (unlikely(rc)) {
1328 nskb->next = skb->next;
1328 skb->next = nskb; 1329 skb->next = nskb;
1329 return rc; 1330 return rc;
1330 } 1331 }
1332 if (unlikely(netif_queue_stopped(dev) && skb->next))
1333 return NETDEV_TX_BUSY;
1331 } while (skb->next); 1334 } while (skb->next);
1332 1335
1333 skb->destructor = DEV_GSO_CB(skb)->destructor; 1336 skb->destructor = DEV_GSO_CB(skb)->destructor;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 9cb7818303..471da451cd 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -54,6 +54,7 @@ static atomic_t trapped;
54 sizeof(struct iphdr) + sizeof(struct ethhdr)) 54 sizeof(struct iphdr) + sizeof(struct ethhdr))
55 55
56static void zap_completion_queue(void); 56static void zap_completion_queue(void);
57static void arp_reply(struct sk_buff *skb);
57 58
58static void queue_process(void *p) 59static void queue_process(void *p)
59{ 60{
@@ -153,6 +154,22 @@ static void poll_napi(struct netpoll *np)
153 } 154 }
154} 155}
155 156
157static void service_arp_queue(struct netpoll_info *npi)
158{
159 struct sk_buff *skb;
160
161 if (unlikely(!npi))
162 return;
163
164 skb = skb_dequeue(&npi->arp_tx);
165
166 while (skb != NULL) {
167 arp_reply(skb);
168 skb = skb_dequeue(&npi->arp_tx);
169 }
170 return;
171}
172
156void netpoll_poll(struct netpoll *np) 173void netpoll_poll(struct netpoll *np)
157{ 174{
158 if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller) 175 if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
@@ -163,6 +180,8 @@ void netpoll_poll(struct netpoll *np)
163 if (np->dev->poll) 180 if (np->dev->poll)
164 poll_napi(np); 181 poll_napi(np);
165 182
183 service_arp_queue(np->dev->npinfo);
184
166 zap_completion_queue(); 185 zap_completion_queue();
167} 186}
168 187
@@ -279,14 +298,10 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
279 * network drivers do not expect to be called if the queue is 298 * network drivers do not expect to be called if the queue is
280 * stopped. 299 * stopped.
281 */ 300 */
282 if (netif_queue_stopped(np->dev)) { 301 status = NETDEV_TX_BUSY;
283 netif_tx_unlock(np->dev); 302 if (!netif_queue_stopped(np->dev))
284 netpoll_poll(np); 303 status = np->dev->hard_start_xmit(skb, np->dev);
285 udelay(50);
286 continue;
287 }
288 304
289 status = np->dev->hard_start_xmit(skb, np->dev);
290 netif_tx_unlock(np->dev); 305 netif_tx_unlock(np->dev);
291 306
292 /* success */ 307 /* success */
@@ -446,7 +461,9 @@ int __netpoll_rx(struct sk_buff *skb)
446 int proto, len, ulen; 461 int proto, len, ulen;
447 struct iphdr *iph; 462 struct iphdr *iph;
448 struct udphdr *uh; 463 struct udphdr *uh;
449 struct netpoll *np = skb->dev->npinfo->rx_np; 464 struct netpoll_info *npi = skb->dev->npinfo;
465 struct netpoll *np = npi->rx_np;
466
450 467
451 if (!np) 468 if (!np)
452 goto out; 469 goto out;
@@ -456,7 +473,7 @@ int __netpoll_rx(struct sk_buff *skb)
456 /* check if netpoll clients need ARP */ 473 /* check if netpoll clients need ARP */
457 if (skb->protocol == __constant_htons(ETH_P_ARP) && 474 if (skb->protocol == __constant_htons(ETH_P_ARP) &&
458 atomic_read(&trapped)) { 475 atomic_read(&trapped)) {
459 arp_reply(skb); 476 skb_queue_tail(&npi->arp_tx, skb);
460 return 1; 477 return 1;
461 } 478 }
462 479
@@ -651,6 +668,7 @@ int netpoll_setup(struct netpoll *np)
651 npinfo->poll_owner = -1; 668 npinfo->poll_owner = -1;
652 npinfo->tries = MAX_RETRIES; 669 npinfo->tries = MAX_RETRIES;
653 spin_lock_init(&npinfo->rx_lock); 670 spin_lock_init(&npinfo->rx_lock);
671 skb_queue_head_init(&npinfo->arp_tx);
654 } else 672 } else
655 npinfo = ndev->npinfo; 673 npinfo = ndev->npinfo;
656 674
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8e5044ba3a..6edbb90cbc 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1739,12 +1739,15 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1739 unsigned int to, struct ts_config *config, 1739 unsigned int to, struct ts_config *config,
1740 struct ts_state *state) 1740 struct ts_state *state)
1741{ 1741{
1742 unsigned int ret;
1743
1742 config->get_next_block = skb_ts_get_next_block; 1744 config->get_next_block = skb_ts_get_next_block;
1743 config->finish = skb_ts_finish; 1745 config->finish = skb_ts_finish;
1744 1746
1745 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 1747 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
1746 1748
1747 return textsearch_find(config, state); 1749 ret = textsearch_find(config, state);
1750 return (ret <= to - from ? ret : UINT_MAX);
1748} 1751}
1749 1752
1750/** 1753/**
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0e029c4e29..c04176be7e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2166,7 +2166,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
2166 if (!pskb_may_pull(skb, thlen)) 2166 if (!pskb_may_pull(skb, thlen))
2167 goto out; 2167 goto out;
2168 2168
2169 oldlen = ~htonl(skb->len); 2169 oldlen = (u16)~skb->len;
2170 __skb_pull(skb, thlen); 2170 __skb_pull(skb, thlen);
2171 2171
2172 segs = skb_segment(skb, sg); 2172 segs = skb_segment(skb, sg);
@@ -2174,7 +2174,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
2174 goto out; 2174 goto out;
2175 2175
2176 len = skb_shinfo(skb)->gso_size; 2176 len = skb_shinfo(skb)->gso_size;
2177 delta = csum_add(oldlen, htonl(thlen + len)); 2177 delta = htonl(oldlen + (thlen + len));
2178 2178
2179 skb = segs; 2179 skb = segs;
2180 th = skb->h.th; 2180 th = skb->h.th;
@@ -2183,10 +2183,10 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
2183 do { 2183 do {
2184 th->fin = th->psh = 0; 2184 th->fin = th->psh = 0;
2185 2185
2186 if (skb->ip_summed == CHECKSUM_NONE) { 2186 th->check = ~csum_fold(th->check + delta);
2187 th->check = csum_fold(csum_partial( 2187 if (skb->ip_summed != CHECKSUM_HW)
2188 skb->h.raw, thlen, csum_add(skb->csum, delta))); 2188 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2189 } 2189 skb->csum));
2190 2190
2191 seq += len; 2191 seq += len;
2192 skb = skb->next; 2192 skb = skb->next;
@@ -2196,11 +2196,11 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
2196 th->cwr = 0; 2196 th->cwr = 0;
2197 } while (skb->next); 2197 } while (skb->next);
2198 2198
2199 if (skb->ip_summed == CHECKSUM_NONE) { 2199 delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
2200 delta = csum_add(oldlen, htonl(skb->tail - skb->h.raw)); 2200 th->check = ~csum_fold(th->check + delta);
2201 th->check = csum_fold(csum_partial( 2201 if (skb->ip_summed != CHECKSUM_HW)
2202 skb->h.raw, thlen, csum_add(skb->csum, delta))); 2202 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2203 } 2203 skb->csum));
2204 2204
2205out: 2205out:
2206 return segs; 2206 return segs;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8a77793278..e728980160 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -349,7 +349,7 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif,
349 (strict & RT6_SELECT_F_REACHABLE) && 349 (strict & RT6_SELECT_F_REACHABLE) &&
350 last && last != rt0) { 350 last && last != rt0) {
351 /* no entries matched; do round-robin */ 351 /* no entries matched; do round-robin */
352 static spinlock_t lock = SPIN_LOCK_UNLOCKED; 352 static DEFINE_SPINLOCK(lock);
353 spin_lock(&lock); 353 spin_lock(&lock);
354 *head = rt0->u.next; 354 *head = rt0->u.next;
355 rt0->u.next = last->u.next; 355 rt0->u.next = last->u.next;
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index b3b9097c87..c11737f472 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -725,15 +725,17 @@ void nr_link_failed(ax25_cb *ax25, int reason)
725 struct nr_node *nr_node = NULL; 725 struct nr_node *nr_node = NULL;
726 726
727 spin_lock_bh(&nr_neigh_list_lock); 727 spin_lock_bh(&nr_neigh_list_lock);
728 nr_neigh_for_each(s, node, &nr_neigh_list) 728 nr_neigh_for_each(s, node, &nr_neigh_list) {
729 if (s->ax25 == ax25) { 729 if (s->ax25 == ax25) {
730 nr_neigh_hold(s); 730 nr_neigh_hold(s);
731 nr_neigh = s; 731 nr_neigh = s;
732 break; 732 break;
733 } 733 }
734 }
734 spin_unlock_bh(&nr_neigh_list_lock); 735 spin_unlock_bh(&nr_neigh_list_lock);
735 736
736 if (nr_neigh == NULL) return; 737 if (nr_neigh == NULL)
738 return;
737 739
738 nr_neigh->ax25 = NULL; 740 nr_neigh->ax25 = NULL;
739 ax25_cb_put(ax25); 741 ax25_cb_put(ax25);
@@ -743,11 +745,13 @@ void nr_link_failed(ax25_cb *ax25, int reason)
743 return; 745 return;
744 } 746 }
745 spin_lock_bh(&nr_node_list_lock); 747 spin_lock_bh(&nr_node_list_lock);
746 nr_node_for_each(nr_node, node, &nr_node_list) 748 nr_node_for_each(nr_node, node, &nr_node_list) {
747 nr_node_lock(nr_node); 749 nr_node_lock(nr_node);
748 if (nr_node->which < nr_node->count && nr_node->routes[nr_node->which].neighbour == nr_neigh) 750 if (nr_node->which < nr_node->count &&
751 nr_node->routes[nr_node->which].neighbour == nr_neigh)
749 nr_node->which++; 752 nr_node->which++;
750 nr_node_unlock(nr_node); 753 nr_node_unlock(nr_node);
754 }
751 spin_unlock_bh(&nr_node_list_lock); 755 spin_unlock_bh(&nr_node_list_lock);
752 nr_neigh_put(nr_neigh); 756 nr_neigh_put(nr_neigh);
753} 757}
diff --git a/net/rxrpc/call.c b/net/rxrpc/call.c
index c4aeb7d402..d07122b57e 100644
--- a/net/rxrpc/call.c
+++ b/net/rxrpc/call.c
@@ -1098,8 +1098,7 @@ static void rxrpc_call_receive_data_packet(struct rxrpc_call *call,
1098 1098
1099 call->app_ready_seq = pmsg->seq; 1099 call->app_ready_seq = pmsg->seq;
1100 call->app_ready_qty += pmsg->dsize; 1100 call->app_ready_qty += pmsg->dsize;
1101 list_del_init(&pmsg->link); 1101 list_move_tail(&pmsg->link, &call->app_readyq);
1102 list_add_tail(&pmsg->link, &call->app_readyq);
1103 } 1102 }
1104 1103
1105 /* see if we've got the last packet yet */ 1104 /* see if we've got the last packet yet */
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c
index 0e0a455349..573b572f8f 100644
--- a/net/rxrpc/connection.c
+++ b/net/rxrpc/connection.c
@@ -402,8 +402,7 @@ void rxrpc_put_connection(struct rxrpc_connection *conn)
402 402
403 /* move to graveyard queue */ 403 /* move to graveyard queue */
404 _debug("burying connection: {%08x}", ntohl(conn->conn_id)); 404 _debug("burying connection: {%08x}", ntohl(conn->conn_id));
405 list_del(&conn->link); 405 list_move_tail(&conn->link, &peer->conn_graveyard);
406 list_add_tail(&conn->link, &peer->conn_graveyard);
407 406
408 rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ); 407 rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ);
409 408
diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c
index 1aadd026d3..cea4eb5e24 100644
--- a/net/rxrpc/krxsecd.c
+++ b/net/rxrpc/krxsecd.c
@@ -160,8 +160,7 @@ void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans)
160 list_for_each_safe(_p, _n, &rxrpc_krxsecd_initmsgq) { 160 list_for_each_safe(_p, _n, &rxrpc_krxsecd_initmsgq) {
161 msg = list_entry(_p, struct rxrpc_message, link); 161 msg = list_entry(_p, struct rxrpc_message, link);
162 if (msg->trans == trans) { 162 if (msg->trans == trans) {
163 list_del(&msg->link); 163 list_move_tail(&msg->link, &tmp);
164 list_add_tail(&msg->link, &tmp);
165 atomic_dec(&rxrpc_krxsecd_qcount); 164 atomic_dec(&rxrpc_krxsecd_qcount);
166 } 165 }
167 } 166 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 129e2bd36a..b8714a87b3 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -169,7 +169,7 @@ gss_import_sec_context_kerberos(const void *p,
169 } 169 }
170 170
171 ctx_id->internal_ctx_id = ctx; 171 ctx_id->internal_ctx_id = ctx;
172 dprintk("RPC: Succesfully imported new context.\n"); 172 dprintk("RPC: Successfully imported new context.\n");
173 return 0; 173 return 0;
174 174
175out_err_free_key2: 175out_err_free_key2:
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index f43311221a..2f312164d6 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -70,7 +70,7 @@
70# define RPCDBG_FACILITY RPCDBG_AUTH 70# define RPCDBG_FACILITY RPCDBG_AUTH
71#endif 71#endif
72 72
73spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED; 73DEFINE_SPINLOCK(krb5_seq_lock);
74 74
75u32 75u32
76gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, 76gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 5bf11ccba7..3d0432aa45 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -201,7 +201,7 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
201 201
202 ctx_id->internal_ctx_id = ctx; 202 ctx_id->internal_ctx_id = ctx;
203 203
204 dprintk("Succesfully imported new spkm context.\n"); 204 dprintk("Successfully imported new spkm context.\n");
205 return 0; 205 return 0;
206 206
207out_err_free_key2: 207out_err_free_key2:
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 2c4ecbe500..1bb75703f3 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -49,13 +49,19 @@
49#include "name_table.h" 49#include "name_table.h"
50#include "bcast.h" 50#include "bcast.h"
51 51
52
53#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ 52#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
54 53
55#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ 54#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
56 55
57#define BCLINK_LOG_BUF_SIZE 0 56#define BCLINK_LOG_BUF_SIZE 0
58 57
58/*
59 * Loss rate for incoming broadcast frames; used to test retransmission code.
60 * Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
61 */
62
63#define TIPC_BCAST_LOSS_RATE 0
64
59/** 65/**
60 * struct bcbearer_pair - a pair of bearers used by broadcast link 66 * struct bcbearer_pair - a pair of bearers used by broadcast link
61 * @primary: pointer to primary bearer 67 * @primary: pointer to primary bearer
@@ -75,7 +81,14 @@ struct bcbearer_pair {
75 * @bearer: (non-standard) broadcast bearer structure 81 * @bearer: (non-standard) broadcast bearer structure
76 * @media: (non-standard) broadcast media structure 82 * @media: (non-standard) broadcast media structure
77 * @bpairs: array of bearer pairs 83 * @bpairs: array of bearer pairs
78 * @bpairs_temp: array of bearer pairs used during creation of "bpairs" 84 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
85 * @remains: temporary node map used by tipc_bcbearer_send()
86 * @remains_new: temporary node map used tipc_bcbearer_send()
87 *
88 * Note: The fields labelled "temporary" are incorporated into the bearer
89 * to avoid consuming potentially limited stack space through the use of
90 * large local variables within multicast routines. Concurrent access is
91 * prevented through use of the spinlock "bc_lock".
79 */ 92 */
80 93
81struct bcbearer { 94struct bcbearer {
@@ -83,6 +96,8 @@ struct bcbearer {
83 struct media media; 96 struct media media;
84 struct bcbearer_pair bpairs[MAX_BEARERS]; 97 struct bcbearer_pair bpairs[MAX_BEARERS];
85 struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1]; 98 struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
99 struct node_map remains;
100 struct node_map remains_new;
86}; 101};
87 102
88/** 103/**
@@ -102,7 +117,7 @@ struct bclink {
102static struct bcbearer *bcbearer = NULL; 117static struct bcbearer *bcbearer = NULL;
103static struct bclink *bclink = NULL; 118static struct bclink *bclink = NULL;
104static struct link *bcl = NULL; 119static struct link *bcl = NULL;
105static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED; 120static DEFINE_SPINLOCK(bc_lock);
106 121
107char tipc_bclink_name[] = "multicast-link"; 122char tipc_bclink_name[] = "multicast-link";
108 123
@@ -165,21 +180,18 @@ static int bclink_ack_allowed(u32 n)
165 * @after: sequence number of last packet to *not* retransmit 180 * @after: sequence number of last packet to *not* retransmit
166 * @to: sequence number of last packet to retransmit 181 * @to: sequence number of last packet to retransmit
167 * 182 *
168 * Called with 'node' locked, bc_lock unlocked 183 * Called with bc_lock locked
169 */ 184 */
170 185
171static void bclink_retransmit_pkt(u32 after, u32 to) 186static void bclink_retransmit_pkt(u32 after, u32 to)
172{ 187{
173 struct sk_buff *buf; 188 struct sk_buff *buf;
174 189
175 spin_lock_bh(&bc_lock);
176 buf = bcl->first_out; 190 buf = bcl->first_out;
177 while (buf && less_eq(buf_seqno(buf), after)) { 191 while (buf && less_eq(buf_seqno(buf), after)) {
178 buf = buf->next; 192 buf = buf->next;
179 } 193 }
180 if (buf != NULL) 194 tipc_link_retransmit(bcl, buf, mod(to - after));
181 tipc_link_retransmit(bcl, buf, mod(to - after));
182 spin_unlock_bh(&bc_lock);
183} 195}
184 196
185/** 197/**
@@ -346,8 +358,10 @@ static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 g
346 for (; buf; buf = buf->next) { 358 for (; buf; buf = buf->next) {
347 u32 seqno = buf_seqno(buf); 359 u32 seqno = buf_seqno(buf);
348 360
349 if (mod(seqno - prev) != 1) 361 if (mod(seqno - prev) != 1) {
350 buf = NULL; 362 buf = NULL;
363 break;
364 }
351 if (seqno == gap_after) 365 if (seqno == gap_after)
352 break; 366 break;
353 prev = seqno; 367 prev = seqno;
@@ -399,7 +413,10 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
399 */ 413 */
400 414
401void tipc_bclink_recv_pkt(struct sk_buff *buf) 415void tipc_bclink_recv_pkt(struct sk_buff *buf)
402{ 416{
417#if (TIPC_BCAST_LOSS_RATE)
418 static int rx_count = 0;
419#endif
403 struct tipc_msg *msg = buf_msg(buf); 420 struct tipc_msg *msg = buf_msg(buf);
404 struct node* node = tipc_node_find(msg_prevnode(msg)); 421 struct node* node = tipc_node_find(msg_prevnode(msg));
405 u32 next_in; 422 u32 next_in;
@@ -420,9 +437,13 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
420 tipc_node_lock(node); 437 tipc_node_lock(node);
421 tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); 438 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
422 tipc_node_unlock(node); 439 tipc_node_unlock(node);
440 spin_lock_bh(&bc_lock);
423 bcl->stats.recv_nacks++; 441 bcl->stats.recv_nacks++;
442 bcl->owner->next = node; /* remember requestor */
424 bclink_retransmit_pkt(msg_bcgap_after(msg), 443 bclink_retransmit_pkt(msg_bcgap_after(msg),
425 msg_bcgap_to(msg)); 444 msg_bcgap_to(msg));
445 bcl->owner->next = NULL;
446 spin_unlock_bh(&bc_lock);
426 } else { 447 } else {
427 tipc_bclink_peek_nack(msg_destnode(msg), 448 tipc_bclink_peek_nack(msg_destnode(msg),
428 msg_bcast_tag(msg), 449 msg_bcast_tag(msg),
@@ -433,6 +454,14 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
433 return; 454 return;
434 } 455 }
435 456
457#if (TIPC_BCAST_LOSS_RATE)
458 if (++rx_count == TIPC_BCAST_LOSS_RATE) {
459 rx_count = 0;
460 buf_discard(buf);
461 return;
462 }
463#endif
464
436 tipc_node_lock(node); 465 tipc_node_lock(node);
437receive: 466receive:
438 deferred = node->bclink.deferred_head; 467 deferred = node->bclink.deferred_head;
@@ -531,12 +560,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
531{ 560{
532 static int send_count = 0; 561 static int send_count = 0;
533 562
534 struct node_map *remains;
535 struct node_map *remains_new;
536 struct node_map *remains_tmp;
537 int bp_index; 563 int bp_index;
538 int swap_time; 564 int swap_time;
539 int err;
540 565
541 /* Prepare buffer for broadcasting (if first time trying to send it) */ 566 /* Prepare buffer for broadcasting (if first time trying to send it) */
542 567
@@ -557,9 +582,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
557 582
558 /* Send buffer over bearers until all targets reached */ 583 /* Send buffer over bearers until all targets reached */
559 584
560 remains = kmalloc(sizeof(struct node_map), GFP_ATOMIC); 585 bcbearer->remains = tipc_cltr_bcast_nodes;
561 remains_new = kmalloc(sizeof(struct node_map), GFP_ATOMIC);
562 *remains = tipc_cltr_bcast_nodes;
563 586
564 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { 587 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
565 struct bearer *p = bcbearer->bpairs[bp_index].primary; 588 struct bearer *p = bcbearer->bpairs[bp_index].primary;
@@ -568,8 +591,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
568 if (!p) 591 if (!p)
569 break; /* no more bearers to try */ 592 break; /* no more bearers to try */
570 593
571 tipc_nmap_diff(remains, &p->nodes, remains_new); 594 tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new);
572 if (remains_new->count == remains->count) 595 if (bcbearer->remains_new.count == bcbearer->remains.count)
573 continue; /* bearer pair doesn't add anything */ 596 continue; /* bearer pair doesn't add anything */
574 597
575 if (!p->publ.blocked && 598 if (!p->publ.blocked &&
@@ -587,27 +610,17 @@ swap:
587 bcbearer->bpairs[bp_index].primary = s; 610 bcbearer->bpairs[bp_index].primary = s;
588 bcbearer->bpairs[bp_index].secondary = p; 611 bcbearer->bpairs[bp_index].secondary = p;
589update: 612update:
590 if (remains_new->count == 0) { 613 if (bcbearer->remains_new.count == 0)
591 err = TIPC_OK; 614 return TIPC_OK;
592 goto out;
593 }
594 615
595 /* swap map */ 616 bcbearer->remains = bcbearer->remains_new;
596 remains_tmp = remains;
597 remains = remains_new;
598 remains_new = remains_tmp;
599 } 617 }
600 618
601 /* Unable to reach all targets */ 619 /* Unable to reach all targets */
602 620
603 bcbearer->bearer.publ.blocked = 1; 621 bcbearer->bearer.publ.blocked = 1;
604 bcl->stats.bearer_congs++; 622 bcl->stats.bearer_congs++;
605 err = ~TIPC_OK; 623 return ~TIPC_OK;
606
607 out:
608 kfree(remains_new);
609 kfree(remains);
610 return err;
611} 624}
612 625
613/** 626/**
@@ -765,7 +778,7 @@ int tipc_bclink_init(void)
765 bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC); 778 bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
766 if (!bcbearer || !bclink) { 779 if (!bcbearer || !bclink) {
767 nomem: 780 nomem:
768 warn("Memory squeeze; Failed to create multicast link\n"); 781 warn("Multicast link creation failed, no memory\n");
769 kfree(bcbearer); 782 kfree(bcbearer);
770 bcbearer = NULL; 783 bcbearer = NULL;
771 kfree(bclink); 784 kfree(bclink);
@@ -783,7 +796,7 @@ int tipc_bclink_init(void)
783 memset(bclink, 0, sizeof(struct bclink)); 796 memset(bclink, 0, sizeof(struct bclink));
784 INIT_LIST_HEAD(&bcl->waiting_ports); 797 INIT_LIST_HEAD(&bcl->waiting_ports);
785 bcl->next_out_no = 1; 798 bcl->next_out_no = 1;
786 bclink->node.lock = SPIN_LOCK_UNLOCKED; 799 spin_lock_init(&bclink->node.lock);
787 bcl->owner = &bclink->node; 800 bcl->owner = &bclink->node;
788 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 801 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
789 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 802 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 0e3be2ab33..b243d9d495 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -180,7 +180,7 @@ static inline void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
180 if (!item->next) { 180 if (!item->next) {
181 item->next = kmalloc(sizeof(*item), GFP_ATOMIC); 181 item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
182 if (!item->next) { 182 if (!item->next) {
183 warn("Memory squeeze: multicast destination port list is incomplete\n"); 183 warn("Incomplete multicast delivery, no memory\n");
184 return; 184 return;
185 } 185 }
186 item->next->next = NULL; 186 item->next->next = NULL;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index e213a8e548..7ef17a449c 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -112,39 +112,42 @@ int tipc_register_media(u32 media_type,
112 goto exit; 112 goto exit;
113 113
114 if (!media_name_valid(name)) { 114 if (!media_name_valid(name)) {
115 warn("Media registration error: illegal name <%s>\n", name); 115 warn("Media <%s> rejected, illegal name\n", name);
116 goto exit; 116 goto exit;
117 } 117 }
118 if (!bcast_addr) { 118 if (!bcast_addr) {
119 warn("Media registration error: no broadcast address supplied\n"); 119 warn("Media <%s> rejected, no broadcast address\n", name);
120 goto exit; 120 goto exit;
121 } 121 }
122 if ((bearer_priority < TIPC_MIN_LINK_PRI) && 122 if ((bearer_priority < TIPC_MIN_LINK_PRI) &&
123 (bearer_priority > TIPC_MAX_LINK_PRI)) { 123 (bearer_priority > TIPC_MAX_LINK_PRI)) {
124 warn("Media registration error: priority %u\n", bearer_priority); 124 warn("Media <%s> rejected, illegal priority (%u)\n", name,
125 bearer_priority);
125 goto exit; 126 goto exit;
126 } 127 }
127 if ((link_tolerance < TIPC_MIN_LINK_TOL) || 128 if ((link_tolerance < TIPC_MIN_LINK_TOL) ||
128 (link_tolerance > TIPC_MAX_LINK_TOL)) { 129 (link_tolerance > TIPC_MAX_LINK_TOL)) {
129 warn("Media registration error: tolerance %u\n", link_tolerance); 130 warn("Media <%s> rejected, illegal tolerance (%u)\n", name,
131 link_tolerance);
130 goto exit; 132 goto exit;
131 } 133 }
132 134
133 media_id = media_count++; 135 media_id = media_count++;
134 if (media_id >= MAX_MEDIA) { 136 if (media_id >= MAX_MEDIA) {
135 warn("Attempt to register more than %u media\n", MAX_MEDIA); 137 warn("Media <%s> rejected, media limit reached (%u)\n", name,
138 MAX_MEDIA);
136 media_count--; 139 media_count--;
137 goto exit; 140 goto exit;
138 } 141 }
139 for (i = 0; i < media_id; i++) { 142 for (i = 0; i < media_id; i++) {
140 if (media_list[i].type_id == media_type) { 143 if (media_list[i].type_id == media_type) {
141 warn("Attempt to register second media with type %u\n", 144 warn("Media <%s> rejected, duplicate type (%u)\n", name,
142 media_type); 145 media_type);
143 media_count--; 146 media_count--;
144 goto exit; 147 goto exit;
145 } 148 }
146 if (!strcmp(name, media_list[i].name)) { 149 if (!strcmp(name, media_list[i].name)) {
147 warn("Attempt to re-register media name <%s>\n", name); 150 warn("Media <%s> rejected, duplicate name\n", name);
148 media_count--; 151 media_count--;
149 goto exit; 152 goto exit;
150 } 153 }
@@ -283,6 +286,9 @@ static struct bearer *bearer_find(const char *name)
283 struct bearer *b_ptr; 286 struct bearer *b_ptr;
284 u32 i; 287 u32 i;
285 288
289 if (tipc_mode != TIPC_NET_MODE)
290 return NULL;
291
286 for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) { 292 for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
287 if (b_ptr->active && (!strcmp(b_ptr->publ.name, name))) 293 if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
288 return b_ptr; 294 return b_ptr;
@@ -475,26 +481,33 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
475 u32 i; 481 u32 i;
476 int res = -EINVAL; 482 int res = -EINVAL;
477 483
478 if (tipc_mode != TIPC_NET_MODE) 484 if (tipc_mode != TIPC_NET_MODE) {
485 warn("Bearer <%s> rejected, not supported in standalone mode\n",
486 name);
479 return -ENOPROTOOPT; 487 return -ENOPROTOOPT;
480 488 }
481 if (!bearer_name_validate(name, &b_name) || 489 if (!bearer_name_validate(name, &b_name)) {
482 !tipc_addr_domain_valid(bcast_scope) || 490 warn("Bearer <%s> rejected, illegal name\n", name);
483 !in_scope(bcast_scope, tipc_own_addr))
484 return -EINVAL; 491 return -EINVAL;
485 492 }
493 if (!tipc_addr_domain_valid(bcast_scope) ||
494 !in_scope(bcast_scope, tipc_own_addr)) {
495 warn("Bearer <%s> rejected, illegal broadcast scope\n", name);
496 return -EINVAL;
497 }
486 if ((priority < TIPC_MIN_LINK_PRI || 498 if ((priority < TIPC_MIN_LINK_PRI ||
487 priority > TIPC_MAX_LINK_PRI) && 499 priority > TIPC_MAX_LINK_PRI) &&
488 (priority != TIPC_MEDIA_LINK_PRI)) 500 (priority != TIPC_MEDIA_LINK_PRI)) {
501 warn("Bearer <%s> rejected, illegal priority\n", name);
489 return -EINVAL; 502 return -EINVAL;
503 }
490 504
491 write_lock_bh(&tipc_net_lock); 505 write_lock_bh(&tipc_net_lock);
492 if (!tipc_bearers)
493 goto failed;
494 506
495 m_ptr = media_find(b_name.media_name); 507 m_ptr = media_find(b_name.media_name);
496 if (!m_ptr) { 508 if (!m_ptr) {
497 warn("No media <%s>\n", b_name.media_name); 509 warn("Bearer <%s> rejected, media <%s> not registered\n", name,
510 b_name.media_name);
498 goto failed; 511 goto failed;
499 } 512 }
500 513
@@ -510,23 +523,24 @@ restart:
510 continue; 523 continue;
511 } 524 }
512 if (!strcmp(name, tipc_bearers[i].publ.name)) { 525 if (!strcmp(name, tipc_bearers[i].publ.name)) {
513 warn("Bearer <%s> already enabled\n", name); 526 warn("Bearer <%s> rejected, already enabled\n", name);
514 goto failed; 527 goto failed;
515 } 528 }
516 if ((tipc_bearers[i].priority == priority) && 529 if ((tipc_bearers[i].priority == priority) &&
517 (++with_this_prio > 2)) { 530 (++with_this_prio > 2)) {
518 if (priority-- == 0) { 531 if (priority-- == 0) {
519 warn("Third bearer <%s> with priority %u, unable to lower to %u\n", 532 warn("Bearer <%s> rejected, duplicate priority\n",
520 name, priority + 1, priority); 533 name);
521 goto failed; 534 goto failed;
522 } 535 }
523 warn("Third bearer <%s> with priority %u, lowering to %u\n", 536 warn("Bearer <%s> priority adjustment required %u->%u\n",
524 name, priority + 1, priority); 537 name, priority + 1, priority);
525 goto restart; 538 goto restart;
526 } 539 }
527 } 540 }
528 if (bearer_id >= MAX_BEARERS) { 541 if (bearer_id >= MAX_BEARERS) {
529 warn("Attempt to enable more than %d bearers\n", MAX_BEARERS); 542 warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
543 name, MAX_BEARERS);
530 goto failed; 544 goto failed;
531 } 545 }
532 546
@@ -536,7 +550,7 @@ restart:
536 strcpy(b_ptr->publ.name, name); 550 strcpy(b_ptr->publ.name, name);
537 res = m_ptr->enable_bearer(&b_ptr->publ); 551 res = m_ptr->enable_bearer(&b_ptr->publ);
538 if (res) { 552 if (res) {
539 warn("Failed to enable bearer <%s>\n", name); 553 warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res);
540 goto failed; 554 goto failed;
541 } 555 }
542 556
@@ -552,7 +566,7 @@ restart:
552 b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr, 566 b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
553 bcast_scope, 2); 567 bcast_scope, 2);
554 } 568 }
555 b_ptr->publ.lock = SPIN_LOCK_UNLOCKED; 569 spin_lock_init(&b_ptr->publ.lock);
556 write_unlock_bh(&tipc_net_lock); 570 write_unlock_bh(&tipc_net_lock);
557 info("Enabled bearer <%s>, discovery domain %s, priority %u\n", 571 info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
558 name, addr_string_fill(addr_string, bcast_scope), priority); 572 name, addr_string_fill(addr_string, bcast_scope), priority);
@@ -573,9 +587,6 @@ int tipc_block_bearer(const char *name)
573 struct link *l_ptr; 587 struct link *l_ptr;
574 struct link *temp_l_ptr; 588 struct link *temp_l_ptr;
575 589
576 if (tipc_mode != TIPC_NET_MODE)
577 return -ENOPROTOOPT;
578
579 read_lock_bh(&tipc_net_lock); 590 read_lock_bh(&tipc_net_lock);
580 b_ptr = bearer_find(name); 591 b_ptr = bearer_find(name);
581 if (!b_ptr) { 592 if (!b_ptr) {
@@ -584,6 +595,7 @@ int tipc_block_bearer(const char *name)
584 return -EINVAL; 595 return -EINVAL;
585 } 596 }
586 597
598 info("Blocking bearer <%s>\n", name);
587 spin_lock_bh(&b_ptr->publ.lock); 599 spin_lock_bh(&b_ptr->publ.lock);
588 b_ptr->publ.blocked = 1; 600 b_ptr->publ.blocked = 1;
589 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 601 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
@@ -595,7 +607,6 @@ int tipc_block_bearer(const char *name)
595 } 607 }
596 spin_unlock_bh(&b_ptr->publ.lock); 608 spin_unlock_bh(&b_ptr->publ.lock);
597 read_unlock_bh(&tipc_net_lock); 609 read_unlock_bh(&tipc_net_lock);
598 info("Blocked bearer <%s>\n", name);
599 return TIPC_OK; 610 return TIPC_OK;
600} 611}
601 612
@@ -611,15 +622,13 @@ static int bearer_disable(const char *name)
611 struct link *l_ptr; 622 struct link *l_ptr;
612 struct link *temp_l_ptr; 623 struct link *temp_l_ptr;
613 624
614 if (tipc_mode != TIPC_NET_MODE)
615 return -ENOPROTOOPT;
616
617 b_ptr = bearer_find(name); 625 b_ptr = bearer_find(name);
618 if (!b_ptr) { 626 if (!b_ptr) {
619 warn("Attempt to disable unknown bearer <%s>\n", name); 627 warn("Attempt to disable unknown bearer <%s>\n", name);
620 return -EINVAL; 628 return -EINVAL;
621 } 629 }
622 630
631 info("Disabling bearer <%s>\n", name);
623 tipc_disc_stop_link_req(b_ptr->link_req); 632 tipc_disc_stop_link_req(b_ptr->link_req);
624 spin_lock_bh(&b_ptr->publ.lock); 633 spin_lock_bh(&b_ptr->publ.lock);
625 b_ptr->link_req = NULL; 634 b_ptr->link_req = NULL;
@@ -635,7 +644,6 @@ static int bearer_disable(const char *name)
635 tipc_link_delete(l_ptr); 644 tipc_link_delete(l_ptr);
636 } 645 }
637 spin_unlock_bh(&b_ptr->publ.lock); 646 spin_unlock_bh(&b_ptr->publ.lock);
638 info("Disabled bearer <%s>\n", name);
639 memset(b_ptr, 0, sizeof(struct bearer)); 647 memset(b_ptr, 0, sizeof(struct bearer));
640 return TIPC_OK; 648 return TIPC_OK;
641} 649}
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 1aed81584e..1dcb6940e3 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -60,8 +60,10 @@ struct cluster *tipc_cltr_create(u32 addr)
60 int alloc; 60 int alloc;
61 61
62 c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC); 62 c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC);
63 if (c_ptr == NULL) 63 if (c_ptr == NULL) {
64 warn("Cluster creation failure, no memory\n");
64 return NULL; 65 return NULL;
66 }
65 memset(c_ptr, 0, sizeof(*c_ptr)); 67 memset(c_ptr, 0, sizeof(*c_ptr));
66 68
67 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); 69 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
@@ -70,30 +72,32 @@ struct cluster *tipc_cltr_create(u32 addr)
70 else 72 else
71 max_nodes = tipc_max_nodes + 1; 73 max_nodes = tipc_max_nodes + 1;
72 alloc = sizeof(void *) * (max_nodes + 1); 74 alloc = sizeof(void *) * (max_nodes + 1);
75
73 c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC); 76 c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC);
74 if (c_ptr->nodes == NULL) { 77 if (c_ptr->nodes == NULL) {
78 warn("Cluster creation failure, no memory for node area\n");
75 kfree(c_ptr); 79 kfree(c_ptr);
76 return NULL; 80 return NULL;
77 } 81 }
78 memset(c_ptr->nodes, 0, alloc); 82 memset(c_ptr->nodes, 0, alloc);
83
79 if (in_own_cluster(addr)) 84 if (in_own_cluster(addr))
80 tipc_local_nodes = c_ptr->nodes; 85 tipc_local_nodes = c_ptr->nodes;
81 c_ptr->highest_slave = LOWEST_SLAVE - 1; 86 c_ptr->highest_slave = LOWEST_SLAVE - 1;
82 c_ptr->highest_node = 0; 87 c_ptr->highest_node = 0;
83 88
84 z_ptr = tipc_zone_find(tipc_zone(addr)); 89 z_ptr = tipc_zone_find(tipc_zone(addr));
85 if (z_ptr == NULL) { 90 if (!z_ptr) {
86 z_ptr = tipc_zone_create(addr); 91 z_ptr = tipc_zone_create(addr);
87 } 92 }
88 if (z_ptr != NULL) { 93 if (!z_ptr) {
89 tipc_zone_attach_cluster(z_ptr, c_ptr); 94 kfree(c_ptr->nodes);
90 c_ptr->owner = z_ptr;
91 }
92 else {
93 kfree(c_ptr); 95 kfree(c_ptr);
94 c_ptr = NULL; 96 return NULL;
95 } 97 }
96 98
99 tipc_zone_attach_cluster(z_ptr, c_ptr);
100 c_ptr->owner = z_ptr;
97 return c_ptr; 101 return c_ptr;
98} 102}
99 103
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 48b5de2dbe..285e1bc2d8 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -63,7 +63,7 @@ struct manager {
63 63
64static struct manager mng = { 0}; 64static struct manager mng = { 0};
65 65
66static spinlock_t config_lock = SPIN_LOCK_UNLOCKED; 66static DEFINE_SPINLOCK(config_lock);
67 67
68static const void *req_tlv_area; /* request message TLV area */ 68static const void *req_tlv_area; /* request message TLV area */
69static int req_tlv_space; /* request message TLV area size */ 69static int req_tlv_space; /* request message TLV area size */
@@ -291,13 +291,22 @@ static struct sk_buff *cfg_set_own_addr(void)
291 if (!tipc_addr_node_valid(addr)) 291 if (!tipc_addr_node_valid(addr))
292 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 292 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
293 " (node address)"); 293 " (node address)");
294 if (tipc_own_addr) 294 if (tipc_mode == TIPC_NET_MODE)
295 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 295 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
296 " (cannot change node address once assigned)"); 296 " (cannot change node address once assigned)");
297 tipc_own_addr = addr;
298
299 /*
300 * Must release all spinlocks before calling start_net() because
301 * Linux version of TIPC calls eth_media_start() which calls
302 * register_netdevice_notifier() which may block!
303 *
304 * Temporarily releasing the lock should be harmless for non-Linux TIPC,
305 * but Linux version of eth_media_start() should really be reworked
306 * so that it can be called with spinlocks held.
307 */
297 308
298 spin_unlock_bh(&config_lock); 309 spin_unlock_bh(&config_lock);
299 tipc_core_stop_net();
300 tipc_own_addr = addr;
301 tipc_core_start_net(); 310 tipc_core_start_net();
302 spin_lock_bh(&config_lock); 311 spin_lock_bh(&config_lock);
303 return tipc_cfg_reply_none(); 312 return tipc_cfg_reply_none();
@@ -350,50 +359,21 @@ static struct sk_buff *cfg_set_max_subscriptions(void)
350 359
351static struct sk_buff *cfg_set_max_ports(void) 360static struct sk_buff *cfg_set_max_ports(void)
352{ 361{
353 int orig_mode;
354 u32 value; 362 u32 value;
355 363
356 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 364 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
357 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 365 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
358 value = *(u32 *)TLV_DATA(req_tlv_area); 366 value = *(u32 *)TLV_DATA(req_tlv_area);
359 value = ntohl(value); 367 value = ntohl(value);
368 if (value == tipc_max_ports)
369 return tipc_cfg_reply_none();
360 if (value != delimit(value, 127, 65535)) 370 if (value != delimit(value, 127, 65535))
361 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 371 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
362 " (max ports must be 127-65535)"); 372 " (max ports must be 127-65535)");
363 373 if (tipc_mode != TIPC_NOT_RUNNING)
364 if (value == tipc_max_ports)
365 return tipc_cfg_reply_none();
366
367 if (atomic_read(&tipc_user_count) > 2)
368 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 374 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
369 " (cannot change max ports while TIPC users exist)"); 375 " (cannot change max ports while TIPC is active)");
370
371 spin_unlock_bh(&config_lock);
372 orig_mode = tipc_get_mode();
373 if (orig_mode == TIPC_NET_MODE)
374 tipc_core_stop_net();
375 tipc_core_stop();
376 tipc_max_ports = value; 376 tipc_max_ports = value;
377 tipc_core_start();
378 if (orig_mode == TIPC_NET_MODE)
379 tipc_core_start_net();
380 spin_lock_bh(&config_lock);
381 return tipc_cfg_reply_none();
382}
383
384static struct sk_buff *set_net_max(int value, int *parameter)
385{
386 int orig_mode;
387
388 if (value != *parameter) {
389 orig_mode = tipc_get_mode();
390 if (orig_mode == TIPC_NET_MODE)
391 tipc_core_stop_net();
392 *parameter = value;
393 if (orig_mode == TIPC_NET_MODE)
394 tipc_core_start_net();
395 }
396
397 return tipc_cfg_reply_none(); 377 return tipc_cfg_reply_none();
398} 378}
399 379
@@ -405,10 +385,16 @@ static struct sk_buff *cfg_set_max_zones(void)
405 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 385 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
406 value = *(u32 *)TLV_DATA(req_tlv_area); 386 value = *(u32 *)TLV_DATA(req_tlv_area);
407 value = ntohl(value); 387 value = ntohl(value);
388 if (value == tipc_max_zones)
389 return tipc_cfg_reply_none();
408 if (value != delimit(value, 1, 255)) 390 if (value != delimit(value, 1, 255))
409 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 391 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
410 " (max zones must be 1-255)"); 392 " (max zones must be 1-255)");
411 return set_net_max(value, &tipc_max_zones); 393 if (tipc_mode == TIPC_NET_MODE)
394 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
395 " (cannot change max zones once TIPC has joined a network)");
396 tipc_max_zones = value;
397 return tipc_cfg_reply_none();
412} 398}
413 399
414static struct sk_buff *cfg_set_max_clusters(void) 400static struct sk_buff *cfg_set_max_clusters(void)
@@ -419,8 +405,8 @@ static struct sk_buff *cfg_set_max_clusters(void)
419 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 405 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
420 value = *(u32 *)TLV_DATA(req_tlv_area); 406 value = *(u32 *)TLV_DATA(req_tlv_area);
421 value = ntohl(value); 407 value = ntohl(value);
422 if (value != 1) 408 if (value != delimit(value, 1, 1))
423 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 409 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
424 " (max clusters fixed at 1)"); 410 " (max clusters fixed at 1)");
425 return tipc_cfg_reply_none(); 411 return tipc_cfg_reply_none();
426} 412}
@@ -433,10 +419,16 @@ static struct sk_buff *cfg_set_max_nodes(void)
433 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 419 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
434 value = *(u32 *)TLV_DATA(req_tlv_area); 420 value = *(u32 *)TLV_DATA(req_tlv_area);
435 value = ntohl(value); 421 value = ntohl(value);
422 if (value == tipc_max_nodes)
423 return tipc_cfg_reply_none();
436 if (value != delimit(value, 8, 2047)) 424 if (value != delimit(value, 8, 2047))
437 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 425 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
438 " (max nodes must be 8-2047)"); 426 " (max nodes must be 8-2047)");
439 return set_net_max(value, &tipc_max_nodes); 427 if (tipc_mode == TIPC_NET_MODE)
428 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
429 " (cannot change max nodes once TIPC has joined a network)");
430 tipc_max_nodes = value;
431 return tipc_cfg_reply_none();
440} 432}
441 433
442static struct sk_buff *cfg_set_max_slaves(void) 434static struct sk_buff *cfg_set_max_slaves(void)
@@ -461,15 +453,16 @@ static struct sk_buff *cfg_set_netid(void)
461 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 453 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
462 value = *(u32 *)TLV_DATA(req_tlv_area); 454 value = *(u32 *)TLV_DATA(req_tlv_area);
463 value = ntohl(value); 455 value = ntohl(value);
456 if (value == tipc_net_id)
457 return tipc_cfg_reply_none();
464 if (value != delimit(value, 1, 9999)) 458 if (value != delimit(value, 1, 9999))
465 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 459 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
466 " (network id must be 1-9999)"); 460 " (network id must be 1-9999)");
467 461 if (tipc_mode == TIPC_NET_MODE)
468 if (tipc_own_addr)
469 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 462 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
470 " (cannot change network id once part of network)"); 463 " (cannot change network id once TIPC has joined a network)");
471 464 tipc_net_id = value;
472 return set_net_max(value, &tipc_net_id); 465 return tipc_cfg_reply_none();
473} 466}
474 467
475struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area, 468struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
@@ -649,7 +642,7 @@ static void cfg_named_msg_event(void *userdata,
649 if ((size < sizeof(*req_hdr)) || 642 if ((size < sizeof(*req_hdr)) ||
650 (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) || 643 (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
651 (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) { 644 (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
652 warn("discarded invalid configuration message\n"); 645 warn("Invalid configuration message discarded\n");
653 return; 646 return;
654 } 647 }
655 648
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 3d0a8ee4e1..5003acb159 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -2,7 +2,7 @@
2 * net/tipc/core.c: TIPC module code 2 * net/tipc/core.c: TIPC module code
3 * 3 *
4 * Copyright (c) 2003-2006, Ericsson AB 4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -57,7 +57,7 @@ void tipc_socket_stop(void);
57int tipc_netlink_start(void); 57int tipc_netlink_start(void);
58void tipc_netlink_stop(void); 58void tipc_netlink_stop(void);
59 59
60#define MOD_NAME "tipc_start: " 60#define TIPC_MOD_VER "1.6.1"
61 61
62#ifndef CONFIG_TIPC_ZONES 62#ifndef CONFIG_TIPC_ZONES
63#define CONFIG_TIPC_ZONES 3 63#define CONFIG_TIPC_ZONES 3
@@ -198,7 +198,7 @@ static int __init tipc_init(void)
198 tipc_max_publications = 10000; 198 tipc_max_publications = 10000;
199 tipc_max_subscriptions = 2000; 199 tipc_max_subscriptions = 2000;
200 tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536); 200 tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536);
201 tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 511); 201 tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 255);
202 tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1); 202 tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1);
203 tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047); 203 tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047);
204 tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047); 204 tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047);
@@ -224,6 +224,7 @@ module_exit(tipc_exit);
224 224
225MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication"); 225MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication");
226MODULE_LICENSE("Dual BSD/GPL"); 226MODULE_LICENSE("Dual BSD/GPL");
227MODULE_VERSION(TIPC_MOD_VER);
227 228
228/* Native TIPC API for kernel-space applications (see tipc.h) */ 229/* Native TIPC API for kernel-space applications (see tipc.h) */
229 230
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 1f2e8b27a1..86f54f3512 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -2,7 +2,7 @@
2 * net/tipc/core.h: Include file for TIPC global declarations 2 * net/tipc/core.h: Include file for TIPC global declarations
3 * 3 *
4 * Copyright (c) 2005-2006, Ericsson AB 4 * Copyright (c) 2005-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -111,10 +111,6 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
111 111
112#else 112#else
113 113
114#ifndef DBG_OUTPUT
115#define DBG_OUTPUT NULL
116#endif
117
118/* 114/*
119 * TIPC debug support not included: 115 * TIPC debug support not included:
120 * - system messages are printed to system console 116 * - system messages are printed to system console
@@ -129,6 +125,19 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
129#define msg_dbg(msg,txt) do {} while (0) 125#define msg_dbg(msg,txt) do {} while (0)
130#define dump(fmt,arg...) do {} while (0) 126#define dump(fmt,arg...) do {} while (0)
131 127
128
129/*
130 * TIPC_OUTPUT is defined to be the system console, while DBG_OUTPUT is
131 * the null print buffer. Thes ensures that any system or debug messages
132 * that are generated without using the above macros are handled correctly.
133 */
134
135#undef TIPC_OUTPUT
136#define TIPC_OUTPUT TIPC_CONS
137
138#undef DBG_OUTPUT
139#define DBG_OUTPUT NULL
140
132#endif 141#endif
133 142
134 143
@@ -309,7 +318,7 @@ static inline struct sk_buff *buf_acquire(u32 size)
309 * buf_discard - frees a TIPC message buffer 318 * buf_discard - frees a TIPC message buffer
310 * @skb: message buffer 319 * @skb: message buffer
311 * 320 *
312 * Frees a new buffer. If passed NULL, just returns. 321 * Frees a message buffer. If passed NULL, just returns.
313 */ 322 */
314 323
315static inline void buf_discard(struct sk_buff *skb) 324static inline void buf_discard(struct sk_buff *skb)
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
index 26ef95d5fe..55130655e1 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/dbg.c
@@ -41,7 +41,7 @@
41#define MAX_STRING 512 41#define MAX_STRING 512
42 42
43static char print_string[MAX_STRING]; 43static char print_string[MAX_STRING];
44static spinlock_t print_lock = SPIN_LOCK_UNLOCKED; 44static DEFINE_SPINLOCK(print_lock);
45 45
46static struct print_buf cons_buf = { NULL, 0, NULL, NULL }; 46static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
47struct print_buf *TIPC_CONS = &cons_buf; 47struct print_buf *TIPC_CONS = &cons_buf;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 92601385e5..2b84412031 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -2,7 +2,7 @@
2 * net/tipc/discover.c 2 * net/tipc/discover.c
3 * 3 *
4 * Copyright (c) 2003-2006, Ericsson AB 4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -176,7 +176,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
176 n_ptr = tipc_node_create(orig); 176 n_ptr = tipc_node_create(orig);
177 } 177 }
178 if (n_ptr == NULL) { 178 if (n_ptr == NULL) {
179 warn("Memory squeeze; Failed to create node\n");
180 return; 179 return;
181 } 180 }
182 spin_lock_bh(&n_ptr->lock); 181 spin_lock_bh(&n_ptr->lock);
@@ -191,10 +190,8 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
191 } 190 }
192 addr = &link->media_addr; 191 addr = &link->media_addr;
193 if (memcmp(addr, &media_addr, sizeof(*addr))) { 192 if (memcmp(addr, &media_addr, sizeof(*addr))) {
194 char addr_string[16]; 193 warn("Resetting link <%s>, peer interface address changed\n",
195 194 link->name);
196 warn("New bearer address for %s\n",
197 addr_string_fill(addr_string, orig));
198 memcpy(addr, &media_addr, sizeof(*addr)); 195 memcpy(addr, &media_addr, sizeof(*addr));
199 tipc_link_reset(link); 196 tipc_link_reset(link);
200 } 197 }
@@ -270,8 +267,8 @@ static void disc_timeout(struct link_req *req)
270 /* leave timer interval "as is" if already at a "normal" rate */ 267 /* leave timer interval "as is" if already at a "normal" rate */
271 } else { 268 } else {
272 req->timer_intv *= 2; 269 req->timer_intv *= 2;
273 if (req->timer_intv > TIPC_LINK_REQ_SLOW) 270 if (req->timer_intv > TIPC_LINK_REQ_FAST)
274 req->timer_intv = TIPC_LINK_REQ_SLOW; 271 req->timer_intv = TIPC_LINK_REQ_FAST;
275 if ((req->timer_intv == TIPC_LINK_REQ_FAST) && 272 if ((req->timer_intv == TIPC_LINK_REQ_FAST) &&
276 (req->bearer->nodes.count)) 273 (req->bearer->nodes.count))
277 req->timer_intv = TIPC_LINK_REQ_SLOW; 274 req->timer_intv = TIPC_LINK_REQ_SLOW;
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 7a252785f7..682da4a280 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -2,7 +2,7 @@
2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC 2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC
3 * 3 *
4 * Copyright (c) 2001-2006, Ericsson AB 4 * Copyright (c) 2001-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -98,17 +98,19 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
98 u32 size; 98 u32 size;
99 99
100 if (likely(eb_ptr->bearer)) { 100 if (likely(eb_ptr->bearer)) {
101 size = msg_size((struct tipc_msg *)buf->data); 101 if (likely(!dev->promiscuity) ||
102 skb_trim(buf, size); 102 !memcmp(buf->mac.raw,dev->dev_addr,ETH_ALEN) ||
103 if (likely(buf->len == size)) { 103 !memcmp(buf->mac.raw,dev->broadcast,ETH_ALEN)) {
104 buf->next = NULL; 104 size = msg_size((struct tipc_msg *)buf->data);
105 tipc_recv_msg(buf, eb_ptr->bearer); 105 skb_trim(buf, size);
106 } else { 106 if (likely(buf->len == size)) {
107 kfree_skb(buf); 107 buf->next = NULL;
108 tipc_recv_msg(buf, eb_ptr->bearer);
109 return TIPC_OK;
110 }
108 } 111 }
109 } else {
110 kfree_skb(buf);
111 } 112 }
113 kfree_skb(buf);
112 return TIPC_OK; 114 return TIPC_OK;
113} 115}
114 116
@@ -125,8 +127,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
125 127
126 /* Find device with specified name */ 128 /* Find device with specified name */
127 129
128 while (dev && dev->name && 130 while (dev && dev->name && strncmp(dev->name, driver_name, IFNAMSIZ)) {
129 (memcmp(dev->name, driver_name, strlen(dev->name)))) {
130 dev = dev->next; 131 dev = dev->next;
131 } 132 }
132 if (!dev) 133 if (!dev)
@@ -252,7 +253,9 @@ int tipc_eth_media_start(void)
252 if (eth_started) 253 if (eth_started)
253 return -EINVAL; 254 return -EINVAL;
254 255
255 memset(&bcast_addr, 0xff, sizeof(bcast_addr)); 256 bcast_addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
257 memset(&bcast_addr.dev_addr, 0xff, ETH_ALEN);
258
256 memset(eth_bearers, 0, sizeof(eth_bearers)); 259 memset(eth_bearers, 0, sizeof(eth_bearers));
257 260
258 res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth", 261 res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index 966f70a1b6..ae6ddf00a1 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -44,7 +44,7 @@ struct queue_item {
44 44
45static kmem_cache_t *tipc_queue_item_cache; 45static kmem_cache_t *tipc_queue_item_cache;
46static struct list_head signal_queue_head; 46static struct list_head signal_queue_head;
47static spinlock_t qitem_lock = SPIN_LOCK_UNLOCKED; 47static DEFINE_SPINLOCK(qitem_lock);
48static int handler_enabled = 0; 48static int handler_enabled = 0;
49 49
50static void process_signal_queue(unsigned long dummy); 50static void process_signal_queue(unsigned long dummy);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 784b24b6d1..d646580537 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -419,7 +419,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
419 419
420 l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC); 420 l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC);
421 if (!l_ptr) { 421 if (!l_ptr) {
422 warn("Memory squeeze; Failed to create link\n"); 422 warn("Link creation failed, no memory\n");
423 return NULL; 423 return NULL;
424 } 424 }
425 memset(l_ptr, 0, sizeof(*l_ptr)); 425 memset(l_ptr, 0, sizeof(*l_ptr));
@@ -469,7 +469,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
469 469
470 if (!pb) { 470 if (!pb) {
471 kfree(l_ptr); 471 kfree(l_ptr);
472 warn("Memory squeeze; Failed to create link\n"); 472 warn("Link creation failed, no memory for print buffer\n");
473 return NULL; 473 return NULL;
474 } 474 }
475 tipc_printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE); 475 tipc_printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
@@ -574,7 +574,6 @@ void tipc_link_wakeup_ports(struct link *l_ptr, int all)
574 break; 574 break;
575 list_del_init(&p_ptr->wait_list); 575 list_del_init(&p_ptr->wait_list);
576 p_ptr->congested_link = NULL; 576 p_ptr->congested_link = NULL;
577 assert(p_ptr->wakeup);
578 spin_lock_bh(p_ptr->publ.lock); 577 spin_lock_bh(p_ptr->publ.lock);
579 p_ptr->publ.congested = 0; 578 p_ptr->publ.congested = 0;
580 p_ptr->wakeup(&p_ptr->publ); 579 p_ptr->wakeup(&p_ptr->publ);
@@ -691,6 +690,7 @@ void tipc_link_reset(struct link *l_ptr)
691 struct sk_buff *buf; 690 struct sk_buff *buf;
692 u32 prev_state = l_ptr->state; 691 u32 prev_state = l_ptr->state;
693 u32 checkpoint = l_ptr->next_in_no; 692 u32 checkpoint = l_ptr->next_in_no;
693 int was_active_link = tipc_link_is_active(l_ptr);
694 694
695 msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1); 695 msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1);
696 696
@@ -712,7 +712,7 @@ void tipc_link_reset(struct link *l_ptr)
712 tipc_printf(TIPC_CONS, "\nReset link <%s>\n", l_ptr->name); 712 tipc_printf(TIPC_CONS, "\nReset link <%s>\n", l_ptr->name);
713 dbg_link_dump(); 713 dbg_link_dump();
714#endif 714#endif
715 if (tipc_node_has_active_links(l_ptr->owner) && 715 if (was_active_link && tipc_node_has_active_links(l_ptr->owner) &&
716 l_ptr->owner->permit_changeover) { 716 l_ptr->owner->permit_changeover) {
717 l_ptr->reset_checkpoint = checkpoint; 717 l_ptr->reset_checkpoint = checkpoint;
718 l_ptr->exp_msg_count = START_CHANGEOVER; 718 l_ptr->exp_msg_count = START_CHANGEOVER;
@@ -755,7 +755,7 @@ void tipc_link_reset(struct link *l_ptr)
755 755
756static void link_activate(struct link *l_ptr) 756static void link_activate(struct link *l_ptr)
757{ 757{
758 l_ptr->next_in_no = 1; 758 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
759 tipc_node_link_up(l_ptr->owner, l_ptr); 759 tipc_node_link_up(l_ptr->owner, l_ptr);
760 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 760 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
761 link_send_event(tipc_cfg_link_event, l_ptr, 1); 761 link_send_event(tipc_cfg_link_event, l_ptr, 1);
@@ -820,6 +820,8 @@ static void link_state_event(struct link *l_ptr, unsigned event)
820 break; 820 break;
821 case RESET_MSG: 821 case RESET_MSG:
822 dbg_link("RES -> RR\n"); 822 dbg_link("RES -> RR\n");
823 info("Resetting link <%s>, requested by peer\n",
824 l_ptr->name);
823 tipc_link_reset(l_ptr); 825 tipc_link_reset(l_ptr);
824 l_ptr->state = RESET_RESET; 826 l_ptr->state = RESET_RESET;
825 l_ptr->fsm_msg_cnt = 0; 827 l_ptr->fsm_msg_cnt = 0;
@@ -844,6 +846,8 @@ static void link_state_event(struct link *l_ptr, unsigned event)
844 break; 846 break;
845 case RESET_MSG: 847 case RESET_MSG:
846 dbg_link("RES -> RR\n"); 848 dbg_link("RES -> RR\n");
849 info("Resetting link <%s>, requested by peer "
850 "while probing\n", l_ptr->name);
847 tipc_link_reset(l_ptr); 851 tipc_link_reset(l_ptr);
848 l_ptr->state = RESET_RESET; 852 l_ptr->state = RESET_RESET;
849 l_ptr->fsm_msg_cnt = 0; 853 l_ptr->fsm_msg_cnt = 0;
@@ -875,6 +879,8 @@ static void link_state_event(struct link *l_ptr, unsigned event)
875 } else { /* Link has failed */ 879 } else { /* Link has failed */
876 dbg_link("-> RU (%u probes unanswered)\n", 880 dbg_link("-> RU (%u probes unanswered)\n",
877 l_ptr->fsm_msg_cnt); 881 l_ptr->fsm_msg_cnt);
882 warn("Resetting link <%s>, peer not responding\n",
883 l_ptr->name);
878 tipc_link_reset(l_ptr); 884 tipc_link_reset(l_ptr);
879 l_ptr->state = RESET_UNKNOWN; 885 l_ptr->state = RESET_UNKNOWN;
880 l_ptr->fsm_msg_cnt = 0; 886 l_ptr->fsm_msg_cnt = 0;
@@ -1050,7 +1056,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1050 msg_dbg(msg, "TIPC: Congestion, throwing away\n"); 1056 msg_dbg(msg, "TIPC: Congestion, throwing away\n");
1051 buf_discard(buf); 1057 buf_discard(buf);
1052 if (imp > CONN_MANAGER) { 1058 if (imp > CONN_MANAGER) {
1053 warn("Resetting <%s>, send queue full", l_ptr->name); 1059 warn("Resetting link <%s>, send queue full", l_ptr->name);
1054 tipc_link_reset(l_ptr); 1060 tipc_link_reset(l_ptr);
1055 } 1061 }
1056 return dsz; 1062 return dsz;
@@ -1135,9 +1141,13 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
1135 if (n_ptr) { 1141 if (n_ptr) {
1136 tipc_node_lock(n_ptr); 1142 tipc_node_lock(n_ptr);
1137 l_ptr = n_ptr->active_links[selector & 1]; 1143 l_ptr = n_ptr->active_links[selector & 1];
1138 dbg("tipc_link_send: found link %x for dest %x\n", l_ptr, dest);
1139 if (l_ptr) { 1144 if (l_ptr) {
1145 dbg("tipc_link_send: found link %x for dest %x\n", l_ptr, dest);
1140 res = tipc_link_send_buf(l_ptr, buf); 1146 res = tipc_link_send_buf(l_ptr, buf);
1147 } else {
1148 dbg("Attempt to send msg to unreachable node:\n");
1149 msg_dbg(buf_msg(buf),">>>");
1150 buf_discard(buf);
1141 } 1151 }
1142 tipc_node_unlock(n_ptr); 1152 tipc_node_unlock(n_ptr);
1143 } else { 1153 } else {
@@ -1242,8 +1252,6 @@ int tipc_link_send_sections_fast(struct port *sender,
1242 int res; 1252 int res;
1243 u32 selector = msg_origport(hdr) & 1; 1253 u32 selector = msg_origport(hdr) & 1;
1244 1254
1245 assert(destaddr != tipc_own_addr);
1246
1247again: 1255again:
1248 /* 1256 /*
1249 * Try building message using port's max_pkt hint. 1257 * Try building message using port's max_pkt hint.
@@ -1604,40 +1612,121 @@ void tipc_link_push_queue(struct link *l_ptr)
1604 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); 1612 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1605} 1613}
1606 1614
1615static void link_reset_all(unsigned long addr)
1616{
1617 struct node *n_ptr;
1618 char addr_string[16];
1619 u32 i;
1620
1621 read_lock_bh(&tipc_net_lock);
1622 n_ptr = tipc_node_find((u32)addr);
1623 if (!n_ptr) {
1624 read_unlock_bh(&tipc_net_lock);
1625 return; /* node no longer exists */
1626 }
1627
1628 tipc_node_lock(n_ptr);
1629
1630 warn("Resetting all links to %s\n",
1631 addr_string_fill(addr_string, n_ptr->addr));
1632
1633 for (i = 0; i < MAX_BEARERS; i++) {
1634 if (n_ptr->links[i]) {
1635 link_print(n_ptr->links[i], TIPC_OUTPUT,
1636 "Resetting link\n");
1637 tipc_link_reset(n_ptr->links[i]);
1638 }
1639 }
1640
1641 tipc_node_unlock(n_ptr);
1642 read_unlock_bh(&tipc_net_lock);
1643}
1644
1645static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
1646{
1647 struct tipc_msg *msg = buf_msg(buf);
1648
1649 warn("Retransmission failure on link <%s>\n", l_ptr->name);
1650 tipc_msg_print(TIPC_OUTPUT, msg, ">RETR-FAIL>");
1651
1652 if (l_ptr->addr) {
1653
1654 /* Handle failure on standard link */
1655
1656 link_print(l_ptr, TIPC_OUTPUT, "Resetting link\n");
1657 tipc_link_reset(l_ptr);
1658
1659 } else {
1660
1661 /* Handle failure on broadcast link */
1662
1663 struct node *n_ptr;
1664 char addr_string[16];
1665
1666 tipc_printf(TIPC_OUTPUT, "Msg seq number: %u, ", msg_seqno(msg));
1667 tipc_printf(TIPC_OUTPUT, "Outstanding acks: %u\n", (u32)TIPC_SKB_CB(buf)->handle);
1668
1669 n_ptr = l_ptr->owner->next;
1670 tipc_node_lock(n_ptr);
1671
1672 addr_string_fill(addr_string, n_ptr->addr);
1673 tipc_printf(TIPC_OUTPUT, "Multicast link info for %s\n", addr_string);
1674 tipc_printf(TIPC_OUTPUT, "Supported: %d, ", n_ptr->bclink.supported);
1675 tipc_printf(TIPC_OUTPUT, "Acked: %u\n", n_ptr->bclink.acked);
1676 tipc_printf(TIPC_OUTPUT, "Last in: %u, ", n_ptr->bclink.last_in);
1677 tipc_printf(TIPC_OUTPUT, "Gap after: %u, ", n_ptr->bclink.gap_after);
1678 tipc_printf(TIPC_OUTPUT, "Gap to: %u\n", n_ptr->bclink.gap_to);
1679 tipc_printf(TIPC_OUTPUT, "Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
1680
1681 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1682
1683 tipc_node_unlock(n_ptr);
1684
1685 l_ptr->stale_count = 0;
1686 }
1687}
1688
1607void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf, 1689void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1608 u32 retransmits) 1690 u32 retransmits)
1609{ 1691{
1610 struct tipc_msg *msg; 1692 struct tipc_msg *msg;
1611 1693
1694 if (!buf)
1695 return;
1696
1697 msg = buf_msg(buf);
1698
1612 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr); 1699 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
1613 1700
1614 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) { 1701 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1615 msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>"); 1702 if (!skb_cloned(buf)) {
1616 dbg_print_link(l_ptr, " "); 1703 msg_dbg(msg, ">NO_RETR->BCONG>");
1617 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf)); 1704 dbg_print_link(l_ptr, " ");
1618 l_ptr->retransm_queue_size = retransmits; 1705 l_ptr->retransm_queue_head = msg_seqno(msg);
1619 return; 1706 l_ptr->retransm_queue_size = retransmits;
1707 return;
1708 } else {
1709 /* Don't retransmit if driver already has the buffer */
1710 }
1711 } else {
1712 /* Detect repeated retransmit failures on uncongested bearer */
1713
1714 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1715 if (++l_ptr->stale_count > 100) {
1716 link_retransmit_failure(l_ptr, buf);
1717 return;
1718 }
1719 } else {
1720 l_ptr->last_retransmitted = msg_seqno(msg);
1721 l_ptr->stale_count = 1;
1722 }
1620 } 1723 }
1724
1621 while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) { 1725 while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
1622 msg = buf_msg(buf); 1726 msg = buf_msg(buf);
1623 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1727 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1624 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1728 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1625 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1729 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1626 /* Catch if retransmissions fail repeatedly: */
1627 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1628 if (++l_ptr->stale_count > 100) {
1629 tipc_msg_print(TIPC_CONS, buf_msg(buf), ">RETR>");
1630 info("...Retransmitted %u times\n",
1631 l_ptr->stale_count);
1632 link_print(l_ptr, TIPC_CONS, "Resetting Link\n");
1633 tipc_link_reset(l_ptr);
1634 break;
1635 }
1636 } else {
1637 l_ptr->stale_count = 0;
1638 }
1639 l_ptr->last_retransmitted = msg_seqno(msg);
1640
1641 msg_dbg(buf_msg(buf), ">RETR>"); 1730 msg_dbg(buf_msg(buf), ">RETR>");
1642 buf = buf->next; 1731 buf = buf->next;
1643 retransmits--; 1732 retransmits--;
@@ -1650,6 +1739,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1650 return; 1739 return;
1651 } 1740 }
1652 } 1741 }
1742
1653 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; 1743 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1654} 1744}
1655 1745
@@ -1720,6 +1810,11 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1720 link_recv_non_seq(buf); 1810 link_recv_non_seq(buf);
1721 continue; 1811 continue;
1722 } 1812 }
1813
1814 if (unlikely(!msg_short(msg) &&
1815 (msg_destnode(msg) != tipc_own_addr)))
1816 goto cont;
1817
1723 n_ptr = tipc_node_find(msg_prevnode(msg)); 1818 n_ptr = tipc_node_find(msg_prevnode(msg));
1724 if (unlikely(!n_ptr)) 1819 if (unlikely(!n_ptr))
1725 goto cont; 1820 goto cont;
@@ -2140,7 +2235,7 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2140 2235
2141 if (msg_linkprio(msg) && 2236 if (msg_linkprio(msg) &&
2142 (msg_linkprio(msg) != l_ptr->priority)) { 2237 (msg_linkprio(msg) != l_ptr->priority)) {
2143 warn("Changing prio <%s>: %u->%u\n", 2238 warn("Resetting link <%s>, priority change %u->%u\n",
2144 l_ptr->name, l_ptr->priority, msg_linkprio(msg)); 2239 l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2145 l_ptr->priority = msg_linkprio(msg); 2240 l_ptr->priority = msg_linkprio(msg);
2146 tipc_link_reset(l_ptr); /* Enforce change to take effect */ 2241 tipc_link_reset(l_ptr); /* Enforce change to take effect */
@@ -2209,17 +2304,22 @@ void tipc_link_tunnel(struct link *l_ptr,
2209 u32 length = msg_size(msg); 2304 u32 length = msg_size(msg);
2210 2305
2211 tunnel = l_ptr->owner->active_links[selector & 1]; 2306 tunnel = l_ptr->owner->active_links[selector & 1];
2212 if (!tipc_link_is_up(tunnel)) 2307 if (!tipc_link_is_up(tunnel)) {
2308 warn("Link changeover error, "
2309 "tunnel link no longer available\n");
2213 return; 2310 return;
2311 }
2214 msg_set_size(tunnel_hdr, length + INT_H_SIZE); 2312 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2215 buf = buf_acquire(length + INT_H_SIZE); 2313 buf = buf_acquire(length + INT_H_SIZE);
2216 if (!buf) 2314 if (!buf) {
2315 warn("Link changeover error, "
2316 "unable to send tunnel msg\n");
2217 return; 2317 return;
2318 }
2218 memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE); 2319 memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE);
2219 memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length); 2320 memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length);
2220 dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane); 2321 dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
2221 msg_dbg(buf_msg(buf), ">SEND>"); 2322 msg_dbg(buf_msg(buf), ">SEND>");
2222 assert(tunnel);
2223 tipc_link_send_buf(tunnel, buf); 2323 tipc_link_send_buf(tunnel, buf);
2224} 2324}
2225 2325
@@ -2235,23 +2335,27 @@ void tipc_link_changeover(struct link *l_ptr)
2235 u32 msgcount = l_ptr->out_queue_size; 2335 u32 msgcount = l_ptr->out_queue_size;
2236 struct sk_buff *crs = l_ptr->first_out; 2336 struct sk_buff *crs = l_ptr->first_out;
2237 struct link *tunnel = l_ptr->owner->active_links[0]; 2337 struct link *tunnel = l_ptr->owner->active_links[0];
2238 int split_bundles = tipc_node_has_redundant_links(l_ptr->owner);
2239 struct tipc_msg tunnel_hdr; 2338 struct tipc_msg tunnel_hdr;
2339 int split_bundles;
2240 2340
2241 if (!tunnel) 2341 if (!tunnel)
2242 return; 2342 return;
2243 2343
2244 if (!l_ptr->owner->permit_changeover) 2344 if (!l_ptr->owner->permit_changeover) {
2345 warn("Link changeover error, "
2346 "peer did not permit changeover\n");
2245 return; 2347 return;
2348 }
2246 2349
2247 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2350 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2248 ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr); 2351 ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
2249 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2352 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2250 msg_set_msgcnt(&tunnel_hdr, msgcount); 2353 msg_set_msgcnt(&tunnel_hdr, msgcount);
2354 dbg("Link changeover requires %u tunnel messages\n", msgcount);
2355
2251 if (!l_ptr->first_out) { 2356 if (!l_ptr->first_out) {
2252 struct sk_buff *buf; 2357 struct sk_buff *buf;
2253 2358
2254 assert(!msgcount);
2255 buf = buf_acquire(INT_H_SIZE); 2359 buf = buf_acquire(INT_H_SIZE);
2256 if (buf) { 2360 if (buf) {
2257 memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE); 2361 memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
@@ -2261,10 +2365,15 @@ void tipc_link_changeover(struct link *l_ptr)
2261 msg_dbg(&tunnel_hdr, "EMPTY>SEND>"); 2365 msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
2262 tipc_link_send_buf(tunnel, buf); 2366 tipc_link_send_buf(tunnel, buf);
2263 } else { 2367 } else {
2264 warn("Memory squeeze; link changeover failed\n"); 2368 warn("Link changeover error, "
2369 "unable to send changeover msg\n");
2265 } 2370 }
2266 return; 2371 return;
2267 } 2372 }
2373
2374 split_bundles = (l_ptr->owner->active_links[0] !=
2375 l_ptr->owner->active_links[1]);
2376
2268 while (crs) { 2377 while (crs) {
2269 struct tipc_msg *msg = buf_msg(crs); 2378 struct tipc_msg *msg = buf_msg(crs);
2270 2379
@@ -2310,7 +2419,8 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2310 msg_set_size(&tunnel_hdr, length + INT_H_SIZE); 2419 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2311 outbuf = buf_acquire(length + INT_H_SIZE); 2420 outbuf = buf_acquire(length + INT_H_SIZE);
2312 if (outbuf == NULL) { 2421 if (outbuf == NULL) {
2313 warn("Memory squeeze; buffer duplication failed\n"); 2422 warn("Link changeover error, "
2423 "unable to send duplicate msg\n");
2314 return; 2424 return;
2315 } 2425 }
2316 memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE); 2426 memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
@@ -2364,11 +2474,15 @@ static int link_recv_changeover_msg(struct link **l_ptr,
2364 u32 msg_count = msg_msgcnt(tunnel_msg); 2474 u32 msg_count = msg_msgcnt(tunnel_msg);
2365 2475
2366 dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)]; 2476 dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
2367 assert(dest_link != *l_ptr);
2368 if (!dest_link) { 2477 if (!dest_link) {
2369 msg_dbg(tunnel_msg, "NOLINK/<REC<"); 2478 msg_dbg(tunnel_msg, "NOLINK/<REC<");
2370 goto exit; 2479 goto exit;
2371 } 2480 }
2481 if (dest_link == *l_ptr) {
2482 err("Unexpected changeover message on link <%s>\n",
2483 (*l_ptr)->name);
2484 goto exit;
2485 }
2372 dbg("%c<-%c:", dest_link->b_ptr->net_plane, 2486 dbg("%c<-%c:", dest_link->b_ptr->net_plane,
2373 (*l_ptr)->b_ptr->net_plane); 2487 (*l_ptr)->b_ptr->net_plane);
2374 *l_ptr = dest_link; 2488 *l_ptr = dest_link;
@@ -2381,7 +2495,7 @@ static int link_recv_changeover_msg(struct link **l_ptr,
2381 } 2495 }
2382 *buf = buf_extract(tunnel_buf,INT_H_SIZE); 2496 *buf = buf_extract(tunnel_buf,INT_H_SIZE);
2383 if (*buf == NULL) { 2497 if (*buf == NULL) {
2384 warn("Memory squeeze; failed to extract msg\n"); 2498 warn("Link changeover error, duplicate msg dropped\n");
2385 goto exit; 2499 goto exit;
2386 } 2500 }
2387 msg_dbg(tunnel_msg, "TNL<REC<"); 2501 msg_dbg(tunnel_msg, "TNL<REC<");
@@ -2393,13 +2507,17 @@ static int link_recv_changeover_msg(struct link **l_ptr,
2393 2507
2394 if (tipc_link_is_up(dest_link)) { 2508 if (tipc_link_is_up(dest_link)) {
2395 msg_dbg(tunnel_msg, "UP/FIRST/<REC<"); 2509 msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
2510 info("Resetting link <%s>, changeover initiated by peer\n",
2511 dest_link->name);
2396 tipc_link_reset(dest_link); 2512 tipc_link_reset(dest_link);
2397 dest_link->exp_msg_count = msg_count; 2513 dest_link->exp_msg_count = msg_count;
2514 dbg("Expecting %u tunnelled messages\n", msg_count);
2398 if (!msg_count) 2515 if (!msg_count)
2399 goto exit; 2516 goto exit;
2400 } else if (dest_link->exp_msg_count == START_CHANGEOVER) { 2517 } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2401 msg_dbg(tunnel_msg, "BLK/FIRST/<REC<"); 2518 msg_dbg(tunnel_msg, "BLK/FIRST/<REC<");
2402 dest_link->exp_msg_count = msg_count; 2519 dest_link->exp_msg_count = msg_count;
2520 dbg("Expecting %u tunnelled messages\n", msg_count);
2403 if (!msg_count) 2521 if (!msg_count)
2404 goto exit; 2522 goto exit;
2405 } 2523 }
@@ -2407,6 +2525,8 @@ static int link_recv_changeover_msg(struct link **l_ptr,
2407 /* Receive original message */ 2525 /* Receive original message */
2408 2526
2409 if (dest_link->exp_msg_count == 0) { 2527 if (dest_link->exp_msg_count == 0) {
2528 warn("Link switchover error, "
2529 "got too many tunnelled messages\n");
2410 msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<"); 2530 msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<");
2411 dbg_print_link(dest_link, "LINK:"); 2531 dbg_print_link(dest_link, "LINK:");
2412 goto exit; 2532 goto exit;
@@ -2422,7 +2542,7 @@ static int link_recv_changeover_msg(struct link **l_ptr,
2422 buf_discard(tunnel_buf); 2542 buf_discard(tunnel_buf);
2423 return 1; 2543 return 1;
2424 } else { 2544 } else {
2425 warn("Memory squeeze; dropped incoming msg\n"); 2545 warn("Link changeover error, original msg dropped\n");
2426 } 2546 }
2427 } 2547 }
2428exit: 2548exit:
@@ -2444,13 +2564,8 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
2444 while (msgcount--) { 2564 while (msgcount--) {
2445 obuf = buf_extract(buf, pos); 2565 obuf = buf_extract(buf, pos);
2446 if (obuf == NULL) { 2566 if (obuf == NULL) {
2447 char addr_string[16]; 2567 warn("Link unable to unbundle message(s)\n");
2448 2568 break;
2449 warn("Buffer allocation failure;\n");
2450 warn(" incoming message(s) from %s lost\n",
2451 addr_string_fill(addr_string,
2452 msg_orignode(buf_msg(buf))));
2453 return;
2454 }; 2569 };
2455 pos += align(msg_size(buf_msg(obuf))); 2570 pos += align(msg_size(buf_msg(obuf)));
2456 msg_dbg(buf_msg(obuf), " /"); 2571 msg_dbg(buf_msg(obuf), " /");
@@ -2508,7 +2623,7 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2508 } 2623 }
2509 fragm = buf_acquire(fragm_sz + INT_H_SIZE); 2624 fragm = buf_acquire(fragm_sz + INT_H_SIZE);
2510 if (fragm == NULL) { 2625 if (fragm == NULL) {
2511 warn("Memory squeeze; failed to fragment msg\n"); 2626 warn("Link unable to fragment message\n");
2512 dsz = -ENOMEM; 2627 dsz = -ENOMEM;
2513 goto exit; 2628 goto exit;
2514 } 2629 }
@@ -2623,7 +2738,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2623 set_fragm_size(pbuf,fragm_sz); 2738 set_fragm_size(pbuf,fragm_sz);
2624 set_expected_frags(pbuf,exp_fragm_cnt - 1); 2739 set_expected_frags(pbuf,exp_fragm_cnt - 1);
2625 } else { 2740 } else {
2626 warn("Memory squeeze; got no defragmenting buffer\n"); 2741 warn("Link unable to reassemble fragmented message\n");
2627 } 2742 }
2628 buf_discard(fbuf); 2743 buf_discard(fbuf);
2629 return 0; 2744 return 0;
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index a3bbc891f9..f0b063bcc2 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -127,7 +127,7 @@ void tipc_named_publish(struct publication *publ)
127 127
128 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0); 128 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
129 if (!buf) { 129 if (!buf) {
130 warn("Memory squeeze; failed to distribute publication\n"); 130 warn("Publication distribution failure\n");
131 return; 131 return;
132 } 132 }
133 133
@@ -151,7 +151,7 @@ void tipc_named_withdraw(struct publication *publ)
151 151
152 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); 152 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
153 if (!buf) { 153 if (!buf) {
154 warn("Memory squeeze; failed to distribute withdrawal\n"); 154 warn("Withdrawl distribution failure\n");
155 return; 155 return;
156 } 156 }
157 157
@@ -174,7 +174,6 @@ void tipc_named_node_up(unsigned long node)
174 u32 rest; 174 u32 rest;
175 u32 max_item_buf; 175 u32 max_item_buf;
176 176
177 assert(in_own_cluster(node));
178 read_lock_bh(&tipc_nametbl_lock); 177 read_lock_bh(&tipc_nametbl_lock);
179 max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE; 178 max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
180 max_item_buf *= ITEM_SIZE; 179 max_item_buf *= ITEM_SIZE;
@@ -185,8 +184,8 @@ void tipc_named_node_up(unsigned long node)
185 left = (rest <= max_item_buf) ? rest : max_item_buf; 184 left = (rest <= max_item_buf) ? rest : max_item_buf;
186 rest -= left; 185 rest -= left;
187 buf = named_prepare_buf(PUBLICATION, left, node); 186 buf = named_prepare_buf(PUBLICATION, left, node);
188 if (buf == NULL) { 187 if (!buf) {
189 warn("Memory Squeeze; could not send publication\n"); 188 warn("Bulk publication distribution failure\n");
190 goto exit; 189 goto exit;
191 } 190 }
192 item = (struct distr_item *)msg_data(buf_msg(buf)); 191 item = (struct distr_item *)msg_data(buf_msg(buf));
@@ -221,15 +220,24 @@ exit:
221static void node_is_down(struct publication *publ) 220static void node_is_down(struct publication *publ)
222{ 221{
223 struct publication *p; 222 struct publication *p;
223
224 write_lock_bh(&tipc_nametbl_lock); 224 write_lock_bh(&tipc_nametbl_lock);
225 dbg("node_is_down: withdrawing %u, %u, %u\n", 225 dbg("node_is_down: withdrawing %u, %u, %u\n",
226 publ->type, publ->lower, publ->upper); 226 publ->type, publ->lower, publ->upper);
227 publ->key += 1222345; 227 publ->key += 1222345;
228 p = tipc_nametbl_remove_publ(publ->type, publ->lower, 228 p = tipc_nametbl_remove_publ(publ->type, publ->lower,
229 publ->node, publ->ref, publ->key); 229 publ->node, publ->ref, publ->key);
230 assert(p == publ);
231 write_unlock_bh(&tipc_nametbl_lock); 230 write_unlock_bh(&tipc_nametbl_lock);
232 kfree(publ); 231
232 if (p != publ) {
233 err("Unable to remove publication from failed node\n"
234 "(type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
235 publ->type, publ->lower, publ->node, publ->ref, publ->key);
236 }
237
238 if (p) {
239 kfree(p);
240 }
233} 241}
234 242
235/** 243/**
@@ -275,9 +283,15 @@ void tipc_named_recv(struct sk_buff *buf)
275 if (publ) { 283 if (publ) {
276 tipc_nodesub_unsubscribe(&publ->subscr); 284 tipc_nodesub_unsubscribe(&publ->subscr);
277 kfree(publ); 285 kfree(publ);
286 } else {
287 err("Unable to remove publication by node 0x%x\n"
288 "(type=%u, lower=%u, ref=%u, key=%u)\n",
289 msg_orignode(msg),
290 ntohl(item->type), ntohl(item->lower),
291 ntohl(item->ref), ntohl(item->key));
278 } 292 }
279 } else { 293 } else {
280 warn("tipc_named_recv: unknown msg\n"); 294 warn("Unrecognized name table message received\n");
281 } 295 }
282 item++; 296 item++;
283 } 297 }
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index d129422fc5..a6926ff07b 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -71,7 +71,7 @@ struct sub_seq {
71 * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type'; 71 * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
72 * sub-sequences are sorted in ascending order 72 * sub-sequences are sorted in ascending order
73 * @alloc: number of sub-sequences currently in array 73 * @alloc: number of sub-sequences currently in array
74 * @first_free: upper bound of highest sub-sequence + 1 74 * @first_free: array index of first unused sub-sequence entry
75 * @ns_list: links to adjacent name sequences in hash chain 75 * @ns_list: links to adjacent name sequences in hash chain
76 * @subscriptions: list of subscriptions for this 'type' 76 * @subscriptions: list of subscriptions for this 'type'
77 * @lock: spinlock controlling access to name sequence structure 77 * @lock: spinlock controlling access to name sequence structure
@@ -101,7 +101,7 @@ struct name_table {
101 101
102static struct name_table table = { NULL } ; 102static struct name_table table = { NULL } ;
103static atomic_t rsv_publ_ok = ATOMIC_INIT(0); 103static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
104rwlock_t tipc_nametbl_lock = RW_LOCK_UNLOCKED; 104DEFINE_RWLOCK(tipc_nametbl_lock);
105 105
106 106
107static int hash(int x) 107static int hash(int x)
@@ -120,7 +120,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
120 struct publication *publ = 120 struct publication *publ =
121 (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC); 121 (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC);
122 if (publ == NULL) { 122 if (publ == NULL) {
123 warn("Memory squeeze; failed to create publication\n"); 123 warn("Publication creation failure, no memory\n");
124 return NULL; 124 return NULL;
125 } 125 }
126 126
@@ -165,17 +165,17 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
165 struct sub_seq *sseq = tipc_subseq_alloc(1); 165 struct sub_seq *sseq = tipc_subseq_alloc(1);
166 166
167 if (!nseq || !sseq) { 167 if (!nseq || !sseq) {
168 warn("Memory squeeze; failed to create name sequence\n"); 168 warn("Name sequence creation failed, no memory\n");
169 kfree(nseq); 169 kfree(nseq);
170 kfree(sseq); 170 kfree(sseq);
171 return NULL; 171 return NULL;
172 } 172 }
173 173
174 memset(nseq, 0, sizeof(*nseq)); 174 memset(nseq, 0, sizeof(*nseq));
175 nseq->lock = SPIN_LOCK_UNLOCKED; 175 spin_lock_init(&nseq->lock);
176 nseq->type = type; 176 nseq->type = type;
177 nseq->sseqs = sseq; 177 nseq->sseqs = sseq;
178 dbg("tipc_nameseq_create() nseq = %x type %u, ssseqs %x, ff: %u\n", 178 dbg("tipc_nameseq_create(): nseq = %p, type %u, ssseqs %p, ff: %u\n",
179 nseq, type, nseq->sseqs, nseq->first_free); 179 nseq, type, nseq->sseqs, nseq->first_free);
180 nseq->alloc = 1; 180 nseq->alloc = 1;
181 INIT_HLIST_NODE(&nseq->ns_list); 181 INIT_HLIST_NODE(&nseq->ns_list);
@@ -253,16 +253,16 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
253 struct sub_seq *sseq; 253 struct sub_seq *sseq;
254 int created_subseq = 0; 254 int created_subseq = 0;
255 255
256 assert(nseq->first_free <= nseq->alloc);
257 sseq = nameseq_find_subseq(nseq, lower); 256 sseq = nameseq_find_subseq(nseq, lower);
258 dbg("nameseq_ins: for seq %x,<%u,%u>, found sseq %x\n", 257 dbg("nameseq_ins: for seq %p, {%u,%u}, found sseq %p\n",
259 nseq, type, lower, sseq); 258 nseq, type, lower, sseq);
260 if (sseq) { 259 if (sseq) {
261 260
262 /* Lower end overlaps existing entry => need an exact match */ 261 /* Lower end overlaps existing entry => need an exact match */
263 262
264 if ((sseq->lower != lower) || (sseq->upper != upper)) { 263 if ((sseq->lower != lower) || (sseq->upper != upper)) {
265 warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper); 264 warn("Cannot publish {%u,%u,%u}, overlap error\n",
265 type, lower, upper);
266 return NULL; 266 return NULL;
267 } 267 }
268 } else { 268 } else {
@@ -277,25 +277,27 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
277 277
278 if ((inspos < nseq->first_free) && 278 if ((inspos < nseq->first_free) &&
279 (upper >= nseq->sseqs[inspos].lower)) { 279 (upper >= nseq->sseqs[inspos].lower)) {
280 warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper); 280 warn("Cannot publish {%u,%u,%u}, overlap error\n",
281 type, lower, upper);
281 return NULL; 282 return NULL;
282 } 283 }
283 284
284 /* Ensure there is space for new sub-sequence */ 285 /* Ensure there is space for new sub-sequence */
285 286
286 if (nseq->first_free == nseq->alloc) { 287 if (nseq->first_free == nseq->alloc) {
287 struct sub_seq *sseqs = nseq->sseqs; 288 struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
288 nseq->sseqs = tipc_subseq_alloc(nseq->alloc * 2); 289
289 if (nseq->sseqs != NULL) { 290 if (!sseqs) {
290 memcpy(nseq->sseqs, sseqs, 291 warn("Cannot publish {%u,%u,%u}, no memory\n",
291 nseq->alloc * sizeof (struct sub_seq)); 292 type, lower, upper);
292 kfree(sseqs);
293 dbg("Allocated %u sseqs\n", nseq->alloc);
294 nseq->alloc *= 2;
295 } else {
296 warn("Memory squeeze; failed to create sub-sequence\n");
297 return NULL; 293 return NULL;
298 } 294 }
295 dbg("Allocated %u more sseqs\n", nseq->alloc);
296 memcpy(sseqs, nseq->sseqs,
297 nseq->alloc * sizeof(struct sub_seq));
298 kfree(nseq->sseqs);
299 nseq->sseqs = sseqs;
300 nseq->alloc *= 2;
299 } 301 }
300 dbg("Have %u sseqs for type %u\n", nseq->alloc, type); 302 dbg("Have %u sseqs for type %u\n", nseq->alloc, type);
301 303
@@ -311,7 +313,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
311 sseq->upper = upper; 313 sseq->upper = upper;
312 created_subseq = 1; 314 created_subseq = 1;
313 } 315 }
314 dbg("inserting (%u %u %u) from %x:%u into sseq %x(%u,%u) of seq %x\n", 316 dbg("inserting {%u,%u,%u} from <0x%x:%u> into sseq %p(%u,%u) of seq %p\n",
315 type, lower, upper, node, port, sseq, 317 type, lower, upper, node, port, sseq,
316 sseq->lower, sseq->upper, nseq); 318 sseq->lower, sseq->upper, nseq);
317 319
@@ -320,7 +322,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
320 publ = publ_create(type, lower, upper, scope, node, port, key); 322 publ = publ_create(type, lower, upper, scope, node, port, key);
321 if (!publ) 323 if (!publ)
322 return NULL; 324 return NULL;
323 dbg("inserting publ %x, node=%x publ->node=%x, subscr->node=%x\n", 325 dbg("inserting publ %p, node=0x%x publ->node=0x%x, subscr->node=%p\n",
324 publ, node, publ->node, publ->subscr.node); 326 publ, node, publ->node, publ->subscr.node);
325 327
326 if (!sseq->zone_list) 328 if (!sseq->zone_list)
@@ -367,45 +369,47 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
367 369
368/** 370/**
369 * tipc_nameseq_remove_publ - 371 * tipc_nameseq_remove_publ -
372 *
373 * NOTE: There may be cases where TIPC is asked to remove a publication
374 * that is not in the name table. For example, if another node issues a
375 * publication for a name sequence that overlaps an existing name sequence
376 * the publication will not be recorded, which means the publication won't
377 * be found when the name sequence is later withdrawn by that node.
378 * A failed withdraw request simply returns a failure indication and lets the
379 * caller issue any error or warning messages associated with such a problem.
370 */ 380 */
371 381
372static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst, 382static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
373 u32 node, u32 ref, u32 key) 383 u32 node, u32 ref, u32 key)
374{ 384{
375 struct publication *publ; 385 struct publication *publ;
386 struct publication *curr;
376 struct publication *prev; 387 struct publication *prev;
377 struct sub_seq *sseq = nameseq_find_subseq(nseq, inst); 388 struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
378 struct sub_seq *free; 389 struct sub_seq *free;
379 struct subscription *s, *st; 390 struct subscription *s, *st;
380 int removed_subseq = 0; 391 int removed_subseq = 0;
381 392
382 assert(nseq); 393 if (!sseq)
383
384 if (!sseq) {
385 int i;
386
387 warn("Withdraw unknown <%u,%u>?\n", nseq->type, inst);
388 assert(nseq->sseqs);
389 dbg("Dumping subseqs %x for %x, alloc = %u,ff=%u\n",
390 nseq->sseqs, nseq, nseq->alloc,
391 nseq->first_free);
392 for (i = 0; i < nseq->first_free; i++) {
393 dbg("Subseq %u(%x): lower = %u,upper = %u\n",
394 i, &nseq->sseqs[i], nseq->sseqs[i].lower,
395 nseq->sseqs[i].upper);
396 }
397 return NULL; 394 return NULL;
398 } 395
399 dbg("nameseq_remove: seq: %x, sseq %x, <%u,%u> key %u\n", 396 dbg("tipc_nameseq_remove_publ: seq: %p, sseq %p, {%u,%u}, key %u\n",
400 nseq, sseq, nseq->type, inst, key); 397 nseq, sseq, nseq->type, inst, key);
401 398
399 /* Remove publication from zone scope list */
400
402 prev = sseq->zone_list; 401 prev = sseq->zone_list;
403 publ = sseq->zone_list->zone_list_next; 402 publ = sseq->zone_list->zone_list_next;
404 while ((publ->key != key) || (publ->ref != ref) || 403 while ((publ->key != key) || (publ->ref != ref) ||
405 (publ->node && (publ->node != node))) { 404 (publ->node && (publ->node != node))) {
406 prev = publ; 405 prev = publ;
407 publ = publ->zone_list_next; 406 publ = publ->zone_list_next;
408 assert(prev != sseq->zone_list); 407 if (prev == sseq->zone_list) {
408
409 /* Prevent endless loop if publication not found */
410
411 return NULL;
412 }
409 } 413 }
410 if (publ != sseq->zone_list) 414 if (publ != sseq->zone_list)
411 prev->zone_list_next = publ->zone_list_next; 415 prev->zone_list_next = publ->zone_list_next;
@@ -416,14 +420,24 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
416 sseq->zone_list = NULL; 420 sseq->zone_list = NULL;
417 } 421 }
418 422
423 /* Remove publication from cluster scope list, if present */
424
419 if (in_own_cluster(node)) { 425 if (in_own_cluster(node)) {
420 prev = sseq->cluster_list; 426 prev = sseq->cluster_list;
421 publ = sseq->cluster_list->cluster_list_next; 427 curr = sseq->cluster_list->cluster_list_next;
422 while ((publ->key != key) || (publ->ref != ref) || 428 while (curr != publ) {
423 (publ->node && (publ->node != node))) { 429 prev = curr;
424 prev = publ; 430 curr = curr->cluster_list_next;
425 publ = publ->cluster_list_next; 431 if (prev == sseq->cluster_list) {
426 assert(prev != sseq->cluster_list); 432
433 /* Prevent endless loop for malformed list */
434
435 err("Unable to de-list cluster publication\n"
436 "{%u%u}, node=0x%x, ref=%u, key=%u)\n",
437 publ->type, publ->lower, publ->node,
438 publ->ref, publ->key);
439 goto end_cluster;
440 }
427 } 441 }
428 if (publ != sseq->cluster_list) 442 if (publ != sseq->cluster_list)
429 prev->cluster_list_next = publ->cluster_list_next; 443 prev->cluster_list_next = publ->cluster_list_next;
@@ -434,15 +448,26 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
434 sseq->cluster_list = NULL; 448 sseq->cluster_list = NULL;
435 } 449 }
436 } 450 }
451end_cluster:
452
453 /* Remove publication from node scope list, if present */
437 454
438 if (node == tipc_own_addr) { 455 if (node == tipc_own_addr) {
439 prev = sseq->node_list; 456 prev = sseq->node_list;
440 publ = sseq->node_list->node_list_next; 457 curr = sseq->node_list->node_list_next;
441 while ((publ->key != key) || (publ->ref != ref) || 458 while (curr != publ) {
442 (publ->node && (publ->node != node))) { 459 prev = curr;
443 prev = publ; 460 curr = curr->node_list_next;
444 publ = publ->node_list_next; 461 if (prev == sseq->node_list) {
445 assert(prev != sseq->node_list); 462
463 /* Prevent endless loop for malformed list */
464
465 err("Unable to de-list node publication\n"
466 "{%u%u}, node=0x%x, ref=%u, key=%u)\n",
467 publ->type, publ->lower, publ->node,
468 publ->ref, publ->key);
469 goto end_node;
470 }
446 } 471 }
447 if (publ != sseq->node_list) 472 if (publ != sseq->node_list)
448 prev->node_list_next = publ->node_list_next; 473 prev->node_list_next = publ->node_list_next;
@@ -453,22 +478,18 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
453 sseq->node_list = NULL; 478 sseq->node_list = NULL;
454 } 479 }
455 } 480 }
456 assert(!publ->node || (publ->node == node)); 481end_node:
457 assert(publ->ref == ref);
458 assert(publ->key == key);
459 482
460 /* 483 /* Contract subseq list if no more publications for that subseq */
461 * Contract subseq list if no more publications: 484
462 */ 485 if (!sseq->zone_list) {
463 if (!sseq->node_list && !sseq->cluster_list && !sseq->zone_list) {
464 free = &nseq->sseqs[nseq->first_free--]; 486 free = &nseq->sseqs[nseq->first_free--];
465 memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof (*sseq)); 487 memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof (*sseq));
466 removed_subseq = 1; 488 removed_subseq = 1;
467 } 489 }
468 490
469 /* 491 /* Notify any waiting subscriptions */
470 * Any subscriptions waiting ? 492
471 */
472 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { 493 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
473 tipc_subscr_report_overlap(s, 494 tipc_subscr_report_overlap(s,
474 publ->lower, 495 publ->lower,
@@ -478,6 +499,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
478 publ->node, 499 publ->node,
479 removed_subseq); 500 removed_subseq);
480 } 501 }
502
481 return publ; 503 return publ;
482} 504}
483 505
@@ -530,7 +552,7 @@ static struct name_seq *nametbl_find_seq(u32 type)
530 seq_head = &table.types[hash(type)]; 552 seq_head = &table.types[hash(type)];
531 hlist_for_each_entry(ns, seq_node, seq_head, ns_list) { 553 hlist_for_each_entry(ns, seq_node, seq_head, ns_list) {
532 if (ns->type == type) { 554 if (ns->type == type) {
533 dbg("found %x\n", ns); 555 dbg("found %p\n", ns);
534 return ns; 556 return ns;
535 } 557 }
536 } 558 }
@@ -543,22 +565,21 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
543{ 565{
544 struct name_seq *seq = nametbl_find_seq(type); 566 struct name_seq *seq = nametbl_find_seq(type);
545 567
546 dbg("ins_publ: <%u,%x,%x> found %x\n", type, lower, upper, seq); 568 dbg("tipc_nametbl_insert_publ: {%u,%u,%u} found %p\n", type, lower, upper, seq);
547 if (lower > upper) { 569 if (lower > upper) {
548 warn("Failed to publish illegal <%u,%u,%u>\n", 570 warn("Failed to publish illegal {%u,%u,%u}\n",
549 type, lower, upper); 571 type, lower, upper);
550 return NULL; 572 return NULL;
551 } 573 }
552 574
553 dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node); 575 dbg("Publishing {%u,%u,%u} from 0x%x\n", type, lower, upper, node);
554 if (!seq) { 576 if (!seq) {
555 seq = tipc_nameseq_create(type, &table.types[hash(type)]); 577 seq = tipc_nameseq_create(type, &table.types[hash(type)]);
556 dbg("tipc_nametbl_insert_publ: created %x\n", seq); 578 dbg("tipc_nametbl_insert_publ: created %p\n", seq);
557 } 579 }
558 if (!seq) 580 if (!seq)
559 return NULL; 581 return NULL;
560 582
561 assert(seq->type == type);
562 return tipc_nameseq_insert_publ(seq, type, lower, upper, 583 return tipc_nameseq_insert_publ(seq, type, lower, upper,
563 scope, node, port, key); 584 scope, node, port, key);
564} 585}
@@ -572,7 +593,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
572 if (!seq) 593 if (!seq)
573 return NULL; 594 return NULL;
574 595
575 dbg("Withdrawing <%u,%u> from %x\n", type, lower, node); 596 dbg("Withdrawing {%u,%u} from 0x%x\n", type, lower, node);
576 publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key); 597 publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
577 598
578 if (!seq->first_free && list_empty(&seq->subscriptions)) { 599 if (!seq->first_free && list_empty(&seq->subscriptions)) {
@@ -738,12 +759,12 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
738 struct publication *publ; 759 struct publication *publ;
739 760
740 if (table.local_publ_count >= tipc_max_publications) { 761 if (table.local_publ_count >= tipc_max_publications) {
741 warn("Failed publish: max %u local publication\n", 762 warn("Publication failed, local publication limit reached (%u)\n",
742 tipc_max_publications); 763 tipc_max_publications);
743 return NULL; 764 return NULL;
744 } 765 }
745 if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) { 766 if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) {
746 warn("Failed to publish reserved name <%u,%u,%u>\n", 767 warn("Publication failed, reserved name {%u,%u,%u}\n",
747 type, lower, upper); 768 type, lower, upper);
748 return NULL; 769 return NULL;
749 } 770 }
@@ -767,10 +788,10 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
767{ 788{
768 struct publication *publ; 789 struct publication *publ;
769 790
770 dbg("tipc_nametbl_withdraw:<%d,%d,%d>\n", type, lower, key); 791 dbg("tipc_nametbl_withdraw: {%u,%u}, key=%u\n", type, lower, key);
771 write_lock_bh(&tipc_nametbl_lock); 792 write_lock_bh(&tipc_nametbl_lock);
772 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); 793 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
773 if (publ) { 794 if (likely(publ)) {
774 table.local_publ_count--; 795 table.local_publ_count--;
775 if (publ->scope != TIPC_NODE_SCOPE) 796 if (publ->scope != TIPC_NODE_SCOPE)
776 tipc_named_withdraw(publ); 797 tipc_named_withdraw(publ);
@@ -780,6 +801,9 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
780 return 1; 801 return 1;
781 } 802 }
782 write_unlock_bh(&tipc_nametbl_lock); 803 write_unlock_bh(&tipc_nametbl_lock);
804 err("Unable to remove local publication\n"
805 "(type=%u, lower=%u, ref=%u, key=%u)\n",
806 type, lower, ref, key);
783 return 0; 807 return 0;
784} 808}
785 809
@@ -787,8 +811,7 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
787 * tipc_nametbl_subscribe - add a subscription object to the name table 811 * tipc_nametbl_subscribe - add a subscription object to the name table
788 */ 812 */
789 813
790void 814void tipc_nametbl_subscribe(struct subscription *s)
791tipc_nametbl_subscribe(struct subscription *s)
792{ 815{
793 u32 type = s->seq.type; 816 u32 type = s->seq.type;
794 struct name_seq *seq; 817 struct name_seq *seq;
@@ -800,11 +823,13 @@ tipc_nametbl_subscribe(struct subscription *s)
800 } 823 }
801 if (seq){ 824 if (seq){
802 spin_lock_bh(&seq->lock); 825 spin_lock_bh(&seq->lock);
803 dbg("tipc_nametbl_subscribe:found %x for <%u,%u,%u>\n", 826 dbg("tipc_nametbl_subscribe:found %p for {%u,%u,%u}\n",
804 seq, type, s->seq.lower, s->seq.upper); 827 seq, type, s->seq.lower, s->seq.upper);
805 assert(seq->type == type);
806 tipc_nameseq_subscribe(seq, s); 828 tipc_nameseq_subscribe(seq, s);
807 spin_unlock_bh(&seq->lock); 829 spin_unlock_bh(&seq->lock);
830 } else {
831 warn("Failed to create subscription for {%u,%u,%u}\n",
832 s->seq.type, s->seq.lower, s->seq.upper);
808 } 833 }
809 write_unlock_bh(&tipc_nametbl_lock); 834 write_unlock_bh(&tipc_nametbl_lock);
810} 835}
@@ -813,8 +838,7 @@ tipc_nametbl_subscribe(struct subscription *s)
813 * tipc_nametbl_unsubscribe - remove a subscription object from name table 838 * tipc_nametbl_unsubscribe - remove a subscription object from name table
814 */ 839 */
815 840
816void 841void tipc_nametbl_unsubscribe(struct subscription *s)
817tipc_nametbl_unsubscribe(struct subscription *s)
818{ 842{
819 struct name_seq *seq; 843 struct name_seq *seq;
820 844
@@ -1049,35 +1073,20 @@ int tipc_nametbl_init(void)
1049 1073
1050void tipc_nametbl_stop(void) 1074void tipc_nametbl_stop(void)
1051{ 1075{
1052 struct hlist_head *seq_head;
1053 struct hlist_node *seq_node;
1054 struct hlist_node *tmp;
1055 struct name_seq *seq;
1056 u32 i; 1076 u32 i;
1057 1077
1058 if (!table.types) 1078 if (!table.types)
1059 return; 1079 return;
1060 1080
1081 /* Verify name table is empty, then release it */
1082
1061 write_lock_bh(&tipc_nametbl_lock); 1083 write_lock_bh(&tipc_nametbl_lock);
1062 for (i = 0; i < tipc_nametbl_size; i++) { 1084 for (i = 0; i < tipc_nametbl_size; i++) {
1063 seq_head = &table.types[i]; 1085 if (!hlist_empty(&table.types[i]))
1064 hlist_for_each_entry_safe(seq, seq_node, tmp, seq_head, ns_list) { 1086 err("tipc_nametbl_stop(): hash chain %u is non-null\n", i);
1065 struct sub_seq *sseq = seq->sseqs;
1066
1067 for (; sseq != &seq->sseqs[seq->first_free]; sseq++) {
1068 struct publication *publ = sseq->zone_list;
1069 assert(publ);
1070 do {
1071 struct publication *next =
1072 publ->zone_list_next;
1073 kfree(publ);
1074 publ = next;
1075 }
1076 while (publ != sseq->zone_list);
1077 }
1078 }
1079 } 1087 }
1080 kfree(table.types); 1088 kfree(table.types);
1081 table.types = NULL; 1089 table.types = NULL;
1082 write_unlock_bh(&tipc_nametbl_lock); 1090 write_unlock_bh(&tipc_nametbl_lock);
1083} 1091}
1092
diff --git a/net/tipc/net.c b/net/tipc/net.c
index f7c8223ddf..e5a359ab49 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -115,7 +115,7 @@
115 * - A local spin_lock protecting the queue of subscriber events. 115 * - A local spin_lock protecting the queue of subscriber events.
116*/ 116*/
117 117
118rwlock_t tipc_net_lock = RW_LOCK_UNLOCKED; 118DEFINE_RWLOCK(tipc_net_lock);
119struct network tipc_net = { NULL }; 119struct network tipc_net = { NULL };
120 120
121struct node *tipc_net_select_remote_node(u32 addr, u32 ref) 121struct node *tipc_net_select_remote_node(u32 addr, u32 ref)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 0d5db06e20..861322b935 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -61,34 +61,37 @@ struct node *tipc_node_create(u32 addr)
61 struct node **curr_node; 61 struct node **curr_node;
62 62
63 n_ptr = kmalloc(sizeof(*n_ptr),GFP_ATOMIC); 63 n_ptr = kmalloc(sizeof(*n_ptr),GFP_ATOMIC);
64 if (n_ptr != NULL) { 64 if (!n_ptr) {
65 memset(n_ptr, 0, sizeof(*n_ptr)); 65 warn("Node creation failed, no memory\n");
66 n_ptr->addr = addr; 66 return NULL;
67 n_ptr->lock = SPIN_LOCK_UNLOCKED; 67 }
68 INIT_LIST_HEAD(&n_ptr->nsub); 68
69 69 c_ptr = tipc_cltr_find(addr);
70 c_ptr = tipc_cltr_find(addr); 70 if (!c_ptr) {
71 if (c_ptr == NULL) 71 c_ptr = tipc_cltr_create(addr);
72 c_ptr = tipc_cltr_create(addr); 72 }
73 if (c_ptr != NULL) { 73 if (!c_ptr) {
74 n_ptr->owner = c_ptr; 74 kfree(n_ptr);
75 tipc_cltr_attach_node(c_ptr, n_ptr); 75 return NULL;
76 n_ptr->last_router = -1; 76 }
77 77
78 /* Insert node into ordered list */ 78 memset(n_ptr, 0, sizeof(*n_ptr));
79 for (curr_node = &tipc_nodes; *curr_node; 79 n_ptr->addr = addr;
80 curr_node = &(*curr_node)->next) { 80 spin_lock_init(&n_ptr->lock);
81 if (addr < (*curr_node)->addr) { 81 INIT_LIST_HEAD(&n_ptr->nsub);
82 n_ptr->next = *curr_node; 82 n_ptr->owner = c_ptr;
83 break; 83 tipc_cltr_attach_node(c_ptr, n_ptr);
84 } 84 n_ptr->last_router = -1;
85 } 85
86 (*curr_node) = n_ptr; 86 /* Insert node into ordered list */
87 } else { 87 for (curr_node = &tipc_nodes; *curr_node;
88 kfree(n_ptr); 88 curr_node = &(*curr_node)->next) {
89 n_ptr = NULL; 89 if (addr < (*curr_node)->addr) {
90 } 90 n_ptr->next = *curr_node;
91 } 91 break;
92 }
93 }
94 (*curr_node) = n_ptr;
92 return n_ptr; 95 return n_ptr;
93} 96}
94 97
@@ -122,6 +125,8 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
122{ 125{
123 struct link **active = &n_ptr->active_links[0]; 126 struct link **active = &n_ptr->active_links[0];
124 127
128 n_ptr->working_links++;
129
125 info("Established link <%s> on network plane %c\n", 130 info("Established link <%s> on network plane %c\n",
126 l_ptr->name, l_ptr->b_ptr->net_plane); 131 l_ptr->name, l_ptr->b_ptr->net_plane);
127 132
@@ -132,7 +137,7 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
132 return; 137 return;
133 } 138 }
134 if (l_ptr->priority < active[0]->priority) { 139 if (l_ptr->priority < active[0]->priority) {
135 info("Link is standby\n"); 140 info("New link <%s> becomes standby\n", l_ptr->name);
136 return; 141 return;
137 } 142 }
138 tipc_link_send_duplicate(active[0], l_ptr); 143 tipc_link_send_duplicate(active[0], l_ptr);
@@ -140,8 +145,9 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
140 active[0] = l_ptr; 145 active[0] = l_ptr;
141 return; 146 return;
142 } 147 }
143 info("Link <%s> on network plane %c becomes standby\n", 148 info("Old link <%s> becomes standby\n", active[0]->name);
144 active[0]->name, active[0]->b_ptr->net_plane); 149 if (active[1] != active[0])
150 info("Old link <%s> becomes standby\n", active[1]->name);
145 active[0] = active[1] = l_ptr; 151 active[0] = active[1] = l_ptr;
146} 152}
147 153
@@ -181,6 +187,8 @@ void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr)
181{ 187{
182 struct link **active; 188 struct link **active;
183 189
190 n_ptr->working_links--;
191
184 if (!tipc_link_is_active(l_ptr)) { 192 if (!tipc_link_is_active(l_ptr)) {
185 info("Lost standby link <%s> on network plane %c\n", 193 info("Lost standby link <%s> on network plane %c\n",
186 l_ptr->name, l_ptr->b_ptr->net_plane); 194 l_ptr->name, l_ptr->b_ptr->net_plane);
@@ -210,8 +218,7 @@ int tipc_node_has_active_links(struct node *n_ptr)
210 218
211int tipc_node_has_redundant_links(struct node *n_ptr) 219int tipc_node_has_redundant_links(struct node *n_ptr)
212{ 220{
213 return (tipc_node_has_active_links(n_ptr) && 221 return (n_ptr->working_links > 1);
214 (n_ptr->active_links[0] != n_ptr->active_links[1]));
215} 222}
216 223
217static int tipc_node_has_active_routes(struct node *n_ptr) 224static int tipc_node_has_active_routes(struct node *n_ptr)
@@ -234,7 +241,6 @@ struct node *tipc_node_attach_link(struct link *l_ptr)
234 u32 bearer_id = l_ptr->b_ptr->identity; 241 u32 bearer_id = l_ptr->b_ptr->identity;
235 char addr_string[16]; 242 char addr_string[16];
236 243
237 assert(bearer_id < MAX_BEARERS);
238 if (n_ptr->link_cnt >= 2) { 244 if (n_ptr->link_cnt >= 2) {
239 char addr_string[16]; 245 char addr_string[16];
240 246
@@ -249,7 +255,7 @@ struct node *tipc_node_attach_link(struct link *l_ptr)
249 n_ptr->link_cnt++; 255 n_ptr->link_cnt++;
250 return n_ptr; 256 return n_ptr;
251 } 257 }
252 err("Attempt to establish second link on <%s> to <%s> \n", 258 err("Attempt to establish second link on <%s> to %s \n",
253 l_ptr->b_ptr->publ.name, 259 l_ptr->b_ptr->publ.name,
254 addr_string_fill(addr_string, l_ptr->addr)); 260 addr_string_fill(addr_string, l_ptr->addr));
255 } 261 }
@@ -314,7 +320,7 @@ static void node_established_contact(struct node *n_ptr)
314 struct cluster *c_ptr; 320 struct cluster *c_ptr;
315 321
316 dbg("node_established_contact:-> %x\n", n_ptr->addr); 322 dbg("node_established_contact:-> %x\n", n_ptr->addr);
317 if (!tipc_node_has_active_routes(n_ptr)) { 323 if (!tipc_node_has_active_routes(n_ptr) && in_own_cluster(n_ptr->addr)) {
318 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 324 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
319 } 325 }
320 326
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 781126e084..a07cc79ea6 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -51,6 +51,7 @@
51 * @nsub: list of "node down" subscriptions monitoring node 51 * @nsub: list of "node down" subscriptions monitoring node
52 * @active_links: pointers to active links to node 52 * @active_links: pointers to active links to node
53 * @links: pointers to all links to node 53 * @links: pointers to all links to node
54 * @working_links: number of working links to node (both active and standby)
54 * @link_cnt: number of links to node 55 * @link_cnt: number of links to node
55 * @permit_changeover: non-zero if node has redundant links to this system 56 * @permit_changeover: non-zero if node has redundant links to this system
56 * @routers: bitmap (used for multicluster communication) 57 * @routers: bitmap (used for multicluster communication)
@@ -76,6 +77,7 @@ struct node {
76 struct link *active_links[2]; 77 struct link *active_links[2];
77 struct link *links[MAX_BEARERS]; 78 struct link *links[MAX_BEARERS];
78 int link_cnt; 79 int link_cnt;
80 int working_links;
79 int permit_changeover; 81 int permit_changeover;
80 u32 routers[512/32]; 82 u32 routers[512/32];
81 int last_router; 83 int last_router;
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index cff4068cc7..cc3fff3dec 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -47,18 +47,19 @@
47void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr, 47void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
48 void *usr_handle, net_ev_handler handle_down) 48 void *usr_handle, net_ev_handler handle_down)
49{ 49{
50 node_sub->node = NULL; 50 if (addr == tipc_own_addr) {
51 if (addr == tipc_own_addr) 51 node_sub->node = NULL;
52 return; 52 return;
53 if (!tipc_addr_node_valid(addr)) { 53 }
54 warn("node_subscr with illegal %x\n", addr); 54
55 node_sub->node = tipc_node_find(addr);
56 if (!node_sub->node) {
57 warn("Node subscription rejected, unknown node 0x%x\n", addr);
55 return; 58 return;
56 } 59 }
57
58 node_sub->handle_node_down = handle_down; 60 node_sub->handle_node_down = handle_down;
59 node_sub->usr_handle = usr_handle; 61 node_sub->usr_handle = usr_handle;
60 node_sub->node = tipc_node_find(addr); 62
61 assert(node_sub->node);
62 tipc_node_lock(node_sub->node); 63 tipc_node_lock(node_sub->node);
63 list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub); 64 list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
64 tipc_node_unlock(node_sub->node); 65 tipc_node_unlock(node_sub->node);
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 67e96cb1e8..3251c8d8e5 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -57,8 +57,8 @@
57static struct sk_buff *msg_queue_head = NULL; 57static struct sk_buff *msg_queue_head = NULL;
58static struct sk_buff *msg_queue_tail = NULL; 58static struct sk_buff *msg_queue_tail = NULL;
59 59
60spinlock_t tipc_port_list_lock = SPIN_LOCK_UNLOCKED; 60DEFINE_SPINLOCK(tipc_port_list_lock);
61static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED; 61static DEFINE_SPINLOCK(queue_lock);
62 62
63static LIST_HEAD(ports); 63static LIST_HEAD(ports);
64static void port_handle_node_down(unsigned long ref); 64static void port_handle_node_down(unsigned long ref);
@@ -168,7 +168,6 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
168 struct port_list *item = dp; 168 struct port_list *item = dp;
169 int cnt = 0; 169 int cnt = 0;
170 170
171 assert(buf);
172 msg = buf_msg(buf); 171 msg = buf_msg(buf);
173 172
174 /* Create destination port list, if one wasn't supplied */ 173 /* Create destination port list, if one wasn't supplied */
@@ -196,7 +195,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
196 struct sk_buff *b = skb_clone(buf, GFP_ATOMIC); 195 struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
197 196
198 if (b == NULL) { 197 if (b == NULL) {
199 warn("Buffer allocation failure\n"); 198 warn("Unable to deliver multicast message(s)\n");
200 msg_dbg(msg, "LOST:"); 199 msg_dbg(msg, "LOST:");
201 goto exit; 200 goto exit;
202 } 201 }
@@ -228,14 +227,14 @@ u32 tipc_createport_raw(void *usr_handle,
228 u32 ref; 227 u32 ref;
229 228
230 p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC); 229 p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC);
231 if (p_ptr == NULL) { 230 if (!p_ptr) {
232 warn("Memory squeeze; failed to create port\n"); 231 warn("Port creation failed, no memory\n");
233 return 0; 232 return 0;
234 } 233 }
235 memset(p_ptr, 0, sizeof(*p_ptr)); 234 memset(p_ptr, 0, sizeof(*p_ptr));
236 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock); 235 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
237 if (!ref) { 236 if (!ref) {
238 warn("Reference Table Exhausted\n"); 237 warn("Port creation failed, reference table exhausted\n");
239 kfree(p_ptr); 238 kfree(p_ptr);
240 return 0; 239 return 0;
241 } 240 }
@@ -810,18 +809,20 @@ static void port_dispatcher_sigh(void *dummy)
810 void *usr_handle; 809 void *usr_handle;
811 int connected; 810 int connected;
812 int published; 811 int published;
812 u32 message_type;
813 813
814 struct sk_buff *next = buf->next; 814 struct sk_buff *next = buf->next;
815 struct tipc_msg *msg = buf_msg(buf); 815 struct tipc_msg *msg = buf_msg(buf);
816 u32 dref = msg_destport(msg); 816 u32 dref = msg_destport(msg);
817 817
818 message_type = msg_type(msg);
819 if (message_type > TIPC_DIRECT_MSG)
820 goto reject; /* Unsupported message type */
821
818 p_ptr = tipc_port_lock(dref); 822 p_ptr = tipc_port_lock(dref);
819 if (!p_ptr) { 823 if (!p_ptr)
820 /* Port deleted while msg in queue */ 824 goto reject; /* Port deleted while msg in queue */
821 tipc_reject_msg(buf, TIPC_ERR_NO_PORT); 825
822 buf = next;
823 continue;
824 }
825 orig.ref = msg_origport(msg); 826 orig.ref = msg_origport(msg);
826 orig.node = msg_orignode(msg); 827 orig.node = msg_orignode(msg);
827 up_ptr = p_ptr->user_port; 828 up_ptr = p_ptr->user_port;
@@ -832,7 +833,7 @@ static void port_dispatcher_sigh(void *dummy)
832 if (unlikely(msg_errcode(msg))) 833 if (unlikely(msg_errcode(msg)))
833 goto err; 834 goto err;
834 835
835 switch (msg_type(msg)) { 836 switch (message_type) {
836 837
837 case TIPC_CONN_MSG:{ 838 case TIPC_CONN_MSG:{
838 tipc_conn_msg_event cb = up_ptr->conn_msg_cb; 839 tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
@@ -874,6 +875,7 @@ static void port_dispatcher_sigh(void *dummy)
874 &orig); 875 &orig);
875 break; 876 break;
876 } 877 }
878 case TIPC_MCAST_MSG:
877 case TIPC_NAMED_MSG:{ 879 case TIPC_NAMED_MSG:{
878 tipc_named_msg_event cb = up_ptr->named_msg_cb; 880 tipc_named_msg_event cb = up_ptr->named_msg_cb;
879 881
@@ -886,7 +888,8 @@ static void port_dispatcher_sigh(void *dummy)
886 goto reject; 888 goto reject;
887 dseq.type = msg_nametype(msg); 889 dseq.type = msg_nametype(msg);
888 dseq.lower = msg_nameinst(msg); 890 dseq.lower = msg_nameinst(msg);
889 dseq.upper = dseq.lower; 891 dseq.upper = (message_type == TIPC_NAMED_MSG)
892 ? dseq.lower : msg_nameupper(msg);
890 skb_pull(buf, msg_hdr_sz(msg)); 893 skb_pull(buf, msg_hdr_sz(msg));
891 cb(usr_handle, dref, &buf, msg_data(msg), 894 cb(usr_handle, dref, &buf, msg_data(msg),
892 msg_data_sz(msg), msg_importance(msg), 895 msg_data_sz(msg), msg_importance(msg),
@@ -899,7 +902,7 @@ static void port_dispatcher_sigh(void *dummy)
899 buf = next; 902 buf = next;
900 continue; 903 continue;
901err: 904err:
902 switch (msg_type(msg)) { 905 switch (message_type) {
903 906
904 case TIPC_CONN_MSG:{ 907 case TIPC_CONN_MSG:{
905 tipc_conn_shutdown_event cb = 908 tipc_conn_shutdown_event cb =
@@ -931,6 +934,7 @@ err:
931 msg_data_sz(msg), msg_errcode(msg), &orig); 934 msg_data_sz(msg), msg_errcode(msg), &orig);
932 break; 935 break;
933 } 936 }
937 case TIPC_MCAST_MSG:
934 case TIPC_NAMED_MSG:{ 938 case TIPC_NAMED_MSG:{
935 tipc_named_msg_err_event cb = 939 tipc_named_msg_err_event cb =
936 up_ptr->named_err_cb; 940 up_ptr->named_err_cb;
@@ -940,7 +944,8 @@ err:
940 break; 944 break;
941 dseq.type = msg_nametype(msg); 945 dseq.type = msg_nametype(msg);
942 dseq.lower = msg_nameinst(msg); 946 dseq.lower = msg_nameinst(msg);
943 dseq.upper = dseq.lower; 947 dseq.upper = (message_type == TIPC_NAMED_MSG)
948 ? dseq.lower : msg_nameupper(msg);
944 skb_pull(buf, msg_hdr_sz(msg)); 949 skb_pull(buf, msg_hdr_sz(msg));
945 cb(usr_handle, dref, &buf, msg_data(msg), 950 cb(usr_handle, dref, &buf, msg_data(msg),
946 msg_data_sz(msg), msg_errcode(msg), &dseq); 951 msg_data_sz(msg), msg_errcode(msg), &dseq);
@@ -1054,7 +1059,8 @@ int tipc_createport(u32 user_ref,
1054 u32 ref; 1059 u32 ref;
1055 1060
1056 up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC); 1061 up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
1057 if (up_ptr == NULL) { 1062 if (!up_ptr) {
1063 warn("Port creation failed, no memory\n");
1058 return -ENOMEM; 1064 return -ENOMEM;
1059 } 1065 }
1060 ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance); 1066 ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance);
@@ -1165,8 +1171,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1165 p_ptr = tipc_port_lock(ref); 1171 p_ptr = tipc_port_lock(ref);
1166 if (!p_ptr) 1172 if (!p_ptr)
1167 return -EINVAL; 1173 return -EINVAL;
1168 if (!p_ptr->publ.published)
1169 goto exit;
1170 if (!seq) { 1174 if (!seq) {
1171 list_for_each_entry_safe(publ, tpubl, 1175 list_for_each_entry_safe(publ, tpubl,
1172 &p_ptr->publications, pport_list) { 1176 &p_ptr->publications, pport_list) {
@@ -1193,7 +1197,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1193 } 1197 }
1194 if (list_empty(&p_ptr->publications)) 1198 if (list_empty(&p_ptr->publications))
1195 p_ptr->publ.published = 0; 1199 p_ptr->publ.published = 0;
1196exit:
1197 tipc_port_unlock(p_ptr); 1200 tipc_port_unlock(p_ptr);
1198 return res; 1201 return res;
1199} 1202}
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 33bbf50950..596d3c8ff7 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -63,7 +63,7 @@
63 63
64struct ref_table tipc_ref_table = { NULL }; 64struct ref_table tipc_ref_table = { NULL };
65 65
66static rwlock_t ref_table_lock = RW_LOCK_UNLOCKED; 66static DEFINE_RWLOCK(ref_table_lock);
67 67
68/** 68/**
69 * tipc_ref_table_init - create reference table for objects 69 * tipc_ref_table_init - create reference table for objects
@@ -87,7 +87,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
87 index_mask = sz - 1; 87 index_mask = sz - 1;
88 for (i = sz - 1; i >= 0; i--) { 88 for (i = sz - 1; i >= 0; i--) {
89 table[i].object = NULL; 89 table[i].object = NULL;
90 table[i].lock = SPIN_LOCK_UNLOCKED; 90 spin_lock_init(&table[i].lock);
91 table[i].data.next_plus_upper = (start & ~index_mask) + i - 1; 91 table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
92 } 92 }
93 tipc_ref_table.entries = table; 93 tipc_ref_table.entries = table;
@@ -127,7 +127,14 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
127 u32 next_plus_upper; 127 u32 next_plus_upper;
128 u32 reference = 0; 128 u32 reference = 0;
129 129
130 assert(tipc_ref_table.entries && object); 130 if (!object) {
131 err("Attempt to acquire reference to non-existent object\n");
132 return 0;
133 }
134 if (!tipc_ref_table.entries) {
135 err("Reference table not found during acquisition attempt\n");
136 return 0;
137 }
131 138
132 write_lock_bh(&ref_table_lock); 139 write_lock_bh(&ref_table_lock);
133 if (tipc_ref_table.first_free) { 140 if (tipc_ref_table.first_free) {
@@ -162,15 +169,28 @@ void tipc_ref_discard(u32 ref)
162 u32 index; 169 u32 index;
163 u32 index_mask; 170 u32 index_mask;
164 171
165 assert(tipc_ref_table.entries); 172 if (!ref) {
166 assert(ref != 0); 173 err("Attempt to discard reference 0\n");
174 return;
175 }
176 if (!tipc_ref_table.entries) {
177 err("Reference table not found during discard attempt\n");
178 return;
179 }
167 180
168 write_lock_bh(&ref_table_lock); 181 write_lock_bh(&ref_table_lock);
169 index_mask = tipc_ref_table.index_mask; 182 index_mask = tipc_ref_table.index_mask;
170 index = ref & index_mask; 183 index = ref & index_mask;
171 entry = &(tipc_ref_table.entries[index]); 184 entry = &(tipc_ref_table.entries[index]);
172 assert(entry->object != 0); 185
173 assert(entry->data.reference == ref); 186 if (!entry->object) {
187 err("Attempt to discard reference to non-existent object\n");
188 goto exit;
189 }
190 if (entry->data.reference != ref) {
191 err("Attempt to discard non-existent reference\n");
192 goto exit;
193 }
174 194
175 /* mark entry as unused */ 195 /* mark entry as unused */
176 entry->object = NULL; 196 entry->object = NULL;
@@ -184,6 +204,7 @@ void tipc_ref_discard(u32 ref)
184 204
185 /* increment upper bits of entry to invalidate subsequent references */ 205 /* increment upper bits of entry to invalidate subsequent references */
186 entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1); 206 entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
207exit:
187 write_unlock_bh(&ref_table_lock); 208 write_unlock_bh(&ref_table_lock);
188} 209}
189 210
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 648a734e60..32d778448a 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -169,12 +169,6 @@ static int tipc_create(struct socket *sock, int protocol)
169 struct sock *sk; 169 struct sock *sk;
170 u32 ref; 170 u32 ref;
171 171
172 if ((sock->type != SOCK_STREAM) &&
173 (sock->type != SOCK_SEQPACKET) &&
174 (sock->type != SOCK_DGRAM) &&
175 (sock->type != SOCK_RDM))
176 return -EPROTOTYPE;
177
178 if (unlikely(protocol != 0)) 172 if (unlikely(protocol != 0))
179 return -EPROTONOSUPPORT; 173 return -EPROTONOSUPPORT;
180 174
@@ -199,6 +193,9 @@ static int tipc_create(struct socket *sock, int protocol)
199 sock->ops = &msg_ops; 193 sock->ops = &msg_ops;
200 sock->state = SS_READY; 194 sock->state = SS_READY;
201 break; 195 break;
196 default:
197 tipc_deleteport(ref);
198 return -EPROTOTYPE;
202 } 199 }
203 200
204 sk = sk_alloc(AF_TIPC, GFP_KERNEL, &tipc_proto, 1); 201 sk = sk_alloc(AF_TIPC, GFP_KERNEL, &tipc_proto, 1);
@@ -426,7 +423,7 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
426 423
427 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr))) 424 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
428 return -EFAULT; 425 return -EFAULT;
429 if ((ntohs(hdr.tcm_type) & 0xC000) & (!capable(CAP_NET_ADMIN))) 426 if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
430 return -EACCES; 427 return -EACCES;
431 428
432 return 0; 429 return 0;
@@ -437,7 +434,7 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
437 * @iocb: (unused) 434 * @iocb: (unused)
438 * @sock: socket structure 435 * @sock: socket structure
439 * @m: message to send 436 * @m: message to send
440 * @total_len: (unused) 437 * @total_len: length of message
441 * 438 *
442 * Message must have an destination specified explicitly. 439 * Message must have an destination specified explicitly.
443 * Used for SOCK_RDM and SOCK_DGRAM messages, 440 * Used for SOCK_RDM and SOCK_DGRAM messages,
@@ -458,7 +455,8 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
458 455
459 if (unlikely(!dest)) 456 if (unlikely(!dest))
460 return -EDESTADDRREQ; 457 return -EDESTADDRREQ;
461 if (unlikely(dest->family != AF_TIPC)) 458 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
459 (dest->family != AF_TIPC)))
462 return -EINVAL; 460 return -EINVAL;
463 461
464 needs_conn = (sock->state != SS_READY); 462 needs_conn = (sock->state != SS_READY);
@@ -470,6 +468,10 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
470 if ((tsock->p->published) || 468 if ((tsock->p->published) ||
471 ((sock->type == SOCK_STREAM) && (total_len != 0))) 469 ((sock->type == SOCK_STREAM) && (total_len != 0)))
472 return -EOPNOTSUPP; 470 return -EOPNOTSUPP;
471 if (dest->addrtype == TIPC_ADDR_NAME) {
472 tsock->p->conn_type = dest->addr.name.name.type;
473 tsock->p->conn_instance = dest->addr.name.name.instance;
474 }
473 } 475 }
474 476
475 if (down_interruptible(&tsock->sem)) 477 if (down_interruptible(&tsock->sem))
@@ -538,7 +540,7 @@ exit:
538 * @iocb: (unused) 540 * @iocb: (unused)
539 * @sock: socket structure 541 * @sock: socket structure
540 * @m: message to send 542 * @m: message to send
541 * @total_len: (unused) 543 * @total_len: length of message
542 * 544 *
543 * Used for SOCK_SEQPACKET messages and SOCK_STREAM data. 545 * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
544 * 546 *
@@ -561,15 +563,15 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
561 return -ERESTARTSYS; 563 return -ERESTARTSYS;
562 } 564 }
563 565
564 if (unlikely(sock->state != SS_CONNECTED)) {
565 if (sock->state == SS_DISCONNECTING)
566 res = -EPIPE;
567 else
568 res = -ENOTCONN;
569 goto exit;
570 }
571
572 do { 566 do {
567 if (unlikely(sock->state != SS_CONNECTED)) {
568 if (sock->state == SS_DISCONNECTING)
569 res = -EPIPE;
570 else
571 res = -ENOTCONN;
572 goto exit;
573 }
574
573 res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov); 575 res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov);
574 if (likely(res != -ELINKCONG)) { 576 if (likely(res != -ELINKCONG)) {
575exit: 577exit:
@@ -597,7 +599,8 @@ exit:
597 * 599 *
598 * Used for SOCK_STREAM data. 600 * Used for SOCK_STREAM data.
599 * 601 *
600 * Returns the number of bytes sent on success, or errno otherwise 602 * Returns the number of bytes sent on success (or partial success),
603 * or errno if no data sent
601 */ 604 */
602 605
603 606
@@ -611,6 +614,7 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
611 char __user *curr_start; 614 char __user *curr_start;
612 int curr_left; 615 int curr_left;
613 int bytes_to_send; 616 int bytes_to_send;
617 int bytes_sent;
614 int res; 618 int res;
615 619
616 if (likely(total_len <= TIPC_MAX_USER_MSG_SIZE)) 620 if (likely(total_len <= TIPC_MAX_USER_MSG_SIZE))
@@ -633,11 +637,11 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
633 * of small iovec entries into send_packet(). 637 * of small iovec entries into send_packet().
634 */ 638 */
635 639
636 my_msg = *m; 640 curr_iov = m->msg_iov;
637 curr_iov = my_msg.msg_iov; 641 curr_iovlen = m->msg_iovlen;
638 curr_iovlen = my_msg.msg_iovlen;
639 my_msg.msg_iov = &my_iov; 642 my_msg.msg_iov = &my_iov;
640 my_msg.msg_iovlen = 1; 643 my_msg.msg_iovlen = 1;
644 bytes_sent = 0;
641 645
642 while (curr_iovlen--) { 646 while (curr_iovlen--) {
643 curr_start = curr_iov->iov_base; 647 curr_start = curr_iov->iov_base;
@@ -648,16 +652,18 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
648 ? curr_left : TIPC_MAX_USER_MSG_SIZE; 652 ? curr_left : TIPC_MAX_USER_MSG_SIZE;
649 my_iov.iov_base = curr_start; 653 my_iov.iov_base = curr_start;
650 my_iov.iov_len = bytes_to_send; 654 my_iov.iov_len = bytes_to_send;
651 if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0) 655 if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0) {
652 return res; 656 return bytes_sent ? bytes_sent : res;
657 }
653 curr_left -= bytes_to_send; 658 curr_left -= bytes_to_send;
654 curr_start += bytes_to_send; 659 curr_start += bytes_to_send;
660 bytes_sent += bytes_to_send;
655 } 661 }
656 662
657 curr_iov++; 663 curr_iov++;
658 } 664 }
659 665
660 return total_len; 666 return bytes_sent;
661} 667}
662 668
663/** 669/**
@@ -727,6 +733,7 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
727 u32 anc_data[3]; 733 u32 anc_data[3];
728 u32 err; 734 u32 err;
729 u32 dest_type; 735 u32 dest_type;
736 int has_name;
730 int res; 737 int res;
731 738
732 if (likely(m->msg_controllen == 0)) 739 if (likely(m->msg_controllen == 0))
@@ -738,10 +745,10 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
738 if (unlikely(err)) { 745 if (unlikely(err)) {
739 anc_data[0] = err; 746 anc_data[0] = err;
740 anc_data[1] = msg_data_sz(msg); 747 anc_data[1] = msg_data_sz(msg);
741 if ((res = put_cmsg(m, SOL_SOCKET, TIPC_ERRINFO, 8, anc_data))) 748 if ((res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data)))
742 return res; 749 return res;
743 if (anc_data[1] && 750 if (anc_data[1] &&
744 (res = put_cmsg(m, SOL_SOCKET, TIPC_RETDATA, anc_data[1], 751 (res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
745 msg_data(msg)))) 752 msg_data(msg))))
746 return res; 753 return res;
747 } 754 }
@@ -751,25 +758,28 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
751 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; 758 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
752 switch (dest_type) { 759 switch (dest_type) {
753 case TIPC_NAMED_MSG: 760 case TIPC_NAMED_MSG:
761 has_name = 1;
754 anc_data[0] = msg_nametype(msg); 762 anc_data[0] = msg_nametype(msg);
755 anc_data[1] = msg_namelower(msg); 763 anc_data[1] = msg_namelower(msg);
756 anc_data[2] = msg_namelower(msg); 764 anc_data[2] = msg_namelower(msg);
757 break; 765 break;
758 case TIPC_MCAST_MSG: 766 case TIPC_MCAST_MSG:
767 has_name = 1;
759 anc_data[0] = msg_nametype(msg); 768 anc_data[0] = msg_nametype(msg);
760 anc_data[1] = msg_namelower(msg); 769 anc_data[1] = msg_namelower(msg);
761 anc_data[2] = msg_nameupper(msg); 770 anc_data[2] = msg_nameupper(msg);
762 break; 771 break;
763 case TIPC_CONN_MSG: 772 case TIPC_CONN_MSG:
773 has_name = (tport->conn_type != 0);
764 anc_data[0] = tport->conn_type; 774 anc_data[0] = tport->conn_type;
765 anc_data[1] = tport->conn_instance; 775 anc_data[1] = tport->conn_instance;
766 anc_data[2] = tport->conn_instance; 776 anc_data[2] = tport->conn_instance;
767 break; 777 break;
768 default: 778 default:
769 anc_data[0] = 0; 779 has_name = 0;
770 } 780 }
771 if (anc_data[0] && 781 if (has_name &&
772 (res = put_cmsg(m, SOL_SOCKET, TIPC_DESTNAME, 12, anc_data))) 782 (res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data)))
773 return res; 783 return res;
774 784
775 return 0; 785 return 0;
@@ -960,7 +970,7 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
960restart: 970restart:
961 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) && 971 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
962 (flags & MSG_DONTWAIT))) { 972 (flags & MSG_DONTWAIT))) {
963 res = (sz_copied == 0) ? -EWOULDBLOCK : 0; 973 res = -EWOULDBLOCK;
964 goto exit; 974 goto exit;
965 } 975 }
966 976
@@ -1051,7 +1061,7 @@ restart:
1051 1061
1052exit: 1062exit:
1053 up(&tsock->sem); 1063 up(&tsock->sem);
1054 return res ? res : sz_copied; 1064 return sz_copied ? sz_copied : res;
1055} 1065}
1056 1066
1057/** 1067/**
@@ -1236,7 +1246,8 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1236 if (sock->state == SS_READY) 1246 if (sock->state == SS_READY)
1237 return -EOPNOTSUPP; 1247 return -EOPNOTSUPP;
1238 1248
1239 /* MOVE THE REST OF THIS ERROR CHECKING TO send_msg()? */ 1249 /* Issue Posix-compliant error code if socket is in the wrong state */
1250
1240 if (sock->state == SS_LISTENING) 1251 if (sock->state == SS_LISTENING)
1241 return -EOPNOTSUPP; 1252 return -EOPNOTSUPP;
1242 if (sock->state == SS_CONNECTING) 1253 if (sock->state == SS_CONNECTING)
@@ -1244,13 +1255,20 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1244 if (sock->state != SS_UNCONNECTED) 1255 if (sock->state != SS_UNCONNECTED)
1245 return -EISCONN; 1256 return -EISCONN;
1246 1257
1247 if ((dst->family != AF_TIPC) || 1258 /*
1248 ((dst->addrtype != TIPC_ADDR_NAME) && (dst->addrtype != TIPC_ADDR_ID))) 1259 * Reject connection attempt using multicast address
1260 *
1261 * Note: send_msg() validates the rest of the address fields,
1262 * so there's no need to do it here
1263 */
1264
1265 if (dst->addrtype == TIPC_ADDR_MCAST)
1249 return -EINVAL; 1266 return -EINVAL;
1250 1267
1251 /* Send a 'SYN-' to destination */ 1268 /* Send a 'SYN-' to destination */
1252 1269
1253 m.msg_name = dest; 1270 m.msg_name = dest;
1271 m.msg_namelen = destlen;
1254 if ((res = send_msg(NULL, sock, &m, 0)) < 0) { 1272 if ((res = send_msg(NULL, sock, &m, 0)) < 0) {
1255 sock->state = SS_DISCONNECTING; 1273 sock->state = SS_DISCONNECTING;
1256 return res; 1274 return res;
@@ -1269,10 +1287,6 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1269 msg = buf_msg(buf); 1287 msg = buf_msg(buf);
1270 res = auto_connect(sock, tsock, msg); 1288 res = auto_connect(sock, tsock, msg);
1271 if (!res) { 1289 if (!res) {
1272 if (dst->addrtype == TIPC_ADDR_NAME) {
1273 tsock->p->conn_type = dst->addr.name.name.type;
1274 tsock->p->conn_instance = dst->addr.name.name.instance;
1275 }
1276 if (!msg_data_sz(msg)) 1290 if (!msg_data_sz(msg))
1277 advance_queue(tsock); 1291 advance_queue(tsock);
1278 } 1292 }
@@ -1386,7 +1400,7 @@ exit:
1386/** 1400/**
1387 * shutdown - shutdown socket connection 1401 * shutdown - shutdown socket connection
1388 * @sock: socket structure 1402 * @sock: socket structure
1389 * @how: direction to close (always treated as read + write) 1403 * @how: direction to close (unused; always treated as read + write)
1390 * 1404 *
1391 * Terminates connection (if necessary), then purges socket's receive queue. 1405 * Terminates connection (if necessary), then purges socket's receive queue.
1392 * 1406 *
@@ -1469,7 +1483,8 @@ restart:
1469 * Returns 0 on success, errno otherwise 1483 * Returns 0 on success, errno otherwise
1470 */ 1484 */
1471 1485
1472static int setsockopt(struct socket *sock, int lvl, int opt, char *ov, int ol) 1486static int setsockopt(struct socket *sock,
1487 int lvl, int opt, char __user *ov, int ol)
1473{ 1488{
1474 struct tipc_sock *tsock = tipc_sk(sock->sk); 1489 struct tipc_sock *tsock = tipc_sk(sock->sk);
1475 u32 value; 1490 u32 value;
@@ -1525,7 +1540,8 @@ static int setsockopt(struct socket *sock, int lvl, int opt, char *ov, int ol)
1525 * Returns 0 on success, errno otherwise 1540 * Returns 0 on success, errno otherwise
1526 */ 1541 */
1527 1542
1528static int getsockopt(struct socket *sock, int lvl, int opt, char *ov, int *ol) 1543static int getsockopt(struct socket *sock,
1544 int lvl, int opt, char __user *ov, int *ol)
1529{ 1545{
1530 struct tipc_sock *tsock = tipc_sk(sock->sk); 1546 struct tipc_sock *tsock = tipc_sk(sock->sk);
1531 int len; 1547 int len;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index c5f026c7fd..e19b4bcd67 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -266,7 +266,8 @@ static void subscr_subscribe(struct tipc_subscr *s,
266 /* Refuse subscription if global limit exceeded */ 266 /* Refuse subscription if global limit exceeded */
267 267
268 if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) { 268 if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
269 warn("Failed: max %u subscriptions\n", tipc_max_subscriptions); 269 warn("Subscription rejected, subscription limit reached (%u)\n",
270 tipc_max_subscriptions);
270 subscr_terminate(subscriber); 271 subscr_terminate(subscriber);
271 return; 272 return;
272 } 273 }
@@ -274,8 +275,8 @@ static void subscr_subscribe(struct tipc_subscr *s,
274 /* Allocate subscription object */ 275 /* Allocate subscription object */
275 276
276 sub = kmalloc(sizeof(*sub), GFP_ATOMIC); 277 sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
277 if (sub == NULL) { 278 if (!sub) {
278 warn("Memory squeeze; ignoring subscription\n"); 279 warn("Subscription rejected, no memory\n");
279 subscr_terminate(subscriber); 280 subscr_terminate(subscriber);
280 return; 281 return;
281 } 282 }
@@ -298,8 +299,7 @@ static void subscr_subscribe(struct tipc_subscr *s,
298 if ((((sub->filter != TIPC_SUB_PORTS) 299 if ((((sub->filter != TIPC_SUB_PORTS)
299 && (sub->filter != TIPC_SUB_SERVICE))) 300 && (sub->filter != TIPC_SUB_SERVICE)))
300 || (sub->seq.lower > sub->seq.upper)) { 301 || (sub->seq.lower > sub->seq.upper)) {
301 warn("Rejecting illegal subscription %u,%u,%u\n", 302 warn("Subscription rejected, illegal request\n");
302 sub->seq.type, sub->seq.lower, sub->seq.upper);
303 kfree(sub); 303 kfree(sub);
304 subscr_terminate(subscriber); 304 subscr_terminate(subscriber);
305 return; 305 return;
@@ -387,7 +387,7 @@ static void subscr_named_msg_event(void *usr_handle,
387 dbg("subscr_named_msg_event: orig = %x own = %x,\n", 387 dbg("subscr_named_msg_event: orig = %x own = %x,\n",
388 orig->node, tipc_own_addr); 388 orig->node, tipc_own_addr);
389 if (size && (size != sizeof(struct tipc_subscr))) { 389 if (size && (size != sizeof(struct tipc_subscr))) {
390 warn("Received tipc_subscr of invalid size\n"); 390 warn("Subscriber rejected, invalid subscription size\n");
391 return; 391 return;
392 } 392 }
393 393
@@ -395,7 +395,7 @@ static void subscr_named_msg_event(void *usr_handle,
395 395
396 subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC); 396 subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC);
397 if (subscriber == NULL) { 397 if (subscriber == NULL) {
398 warn("Memory squeeze; ignoring subscriber setup\n"); 398 warn("Subscriber rejected, no memory\n");
399 return; 399 return;
400 } 400 }
401 memset(subscriber, 0, sizeof(struct subscriber)); 401 memset(subscriber, 0, sizeof(struct subscriber));
@@ -403,7 +403,7 @@ static void subscr_named_msg_event(void *usr_handle,
403 INIT_LIST_HEAD(&subscriber->subscriber_list); 403 INIT_LIST_HEAD(&subscriber->subscriber_list);
404 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock); 404 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
405 if (subscriber->ref == 0) { 405 if (subscriber->ref == 0) {
406 warn("Failed to acquire subscriber reference\n"); 406 warn("Subscriber rejected, reference table exhausted\n");
407 kfree(subscriber); 407 kfree(subscriber);
408 return; 408 return;
409 } 409 }
@@ -422,7 +422,7 @@ static void subscr_named_msg_event(void *usr_handle,
422 NULL, 422 NULL,
423 &subscriber->port_ref); 423 &subscriber->port_ref);
424 if (subscriber->port_ref == 0) { 424 if (subscriber->port_ref == 0) {
425 warn("Memory squeeze; failed to create subscription port\n"); 425 warn("Subscriber rejected, unable to create port\n");
426 tipc_ref_discard(subscriber->ref); 426 tipc_ref_discard(subscriber->ref);
427 kfree(subscriber); 427 kfree(subscriber);
428 return; 428 return;
@@ -457,7 +457,7 @@ int tipc_subscr_start(void)
457 int res = -1; 457 int res = -1;
458 458
459 memset(&topsrv, 0, sizeof (topsrv)); 459 memset(&topsrv, 0, sizeof (topsrv));
460 topsrv.lock = SPIN_LOCK_UNLOCKED; 460 spin_lock_init(&topsrv.lock);
461 INIT_LIST_HEAD(&topsrv.subscriber_list); 461 INIT_LIST_HEAD(&topsrv.subscriber_list);
462 462
463 spin_lock_bh(&topsrv.lock); 463 spin_lock_bh(&topsrv.lock);
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
index 3f3f933976..1e3ae57c72 100644
--- a/net/tipc/user_reg.c
+++ b/net/tipc/user_reg.c
@@ -67,7 +67,7 @@ struct tipc_user {
67 67
68static struct tipc_user *users = NULL; 68static struct tipc_user *users = NULL;
69static u32 next_free_user = MAX_USERID + 1; 69static u32 next_free_user = MAX_USERID + 1;
70static spinlock_t reg_lock = SPIN_LOCK_UNLOCKED; 70static DEFINE_SPINLOCK(reg_lock);
71 71
72/** 72/**
73 * reg_init - create TIPC user registry (but don't activate it) 73 * reg_init - create TIPC user registry (but don't activate it)
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
index 2803e1b4f1..316c4872ff 100644
--- a/net/tipc/zone.c
+++ b/net/tipc/zone.c
@@ -44,19 +44,24 @@
44 44
45struct _zone *tipc_zone_create(u32 addr) 45struct _zone *tipc_zone_create(u32 addr)
46{ 46{
47 struct _zone *z_ptr = NULL; 47 struct _zone *z_ptr;
48 u32 z_num; 48 u32 z_num;
49 49
50 if (!tipc_addr_domain_valid(addr)) 50 if (!tipc_addr_domain_valid(addr)) {
51 err("Zone creation failed, invalid domain 0x%x\n", addr);
51 return NULL; 52 return NULL;
53 }
52 54
53 z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC); 55 z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC);
54 if (z_ptr != NULL) { 56 if (!z_ptr) {
55 memset(z_ptr, 0, sizeof(*z_ptr)); 57 warn("Zone creation failed, insufficient memory\n");
56 z_num = tipc_zone(addr); 58 return NULL;
57 z_ptr->addr = tipc_addr(z_num, 0, 0);
58 tipc_net.zones[z_num] = z_ptr;
59 } 59 }
60
61 memset(z_ptr, 0, sizeof(*z_ptr));
62 z_num = tipc_zone(addr);
63 z_ptr->addr = tipc_addr(z_num, 0, 0);
64 tipc_net.zones[z_num] = z_ptr;
60 return z_ptr; 65 return z_ptr;
61} 66}
62 67
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index e48e60da30..02a7eea5fd 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -8,7 +8,7 @@ PHONY := __build
8__build: 8__build:
9 9
10# Read .config if it exist, otherwise ignore 10# Read .config if it exist, otherwise ignore
11-include .config 11-include include/config/auto.conf
12 12
13include scripts/Kbuild.include 13include scripts/Kbuild.include
14 14
@@ -140,6 +140,15 @@ cmd_cc_i_c = $(CPP) $(c_flags) -o $@ $<
140%.i: %.c FORCE 140%.i: %.c FORCE
141 $(call if_changed_dep,cc_i_c) 141 $(call if_changed_dep,cc_i_c)
142 142
143quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@
144cmd_cc_symtypes_c = \
145 $(CPP) -D__GENKSYMS__ $(c_flags) $< \
146 | $(GENKSYMS) -T $@ >/dev/null; \
147 test -s $@ || rm -f $@
148
149%.symtypes : %.c FORCE
150 $(call if_changed_dep,cc_symtypes_c)
151
143# C (.c) files 152# C (.c) files
144# The C file is compiled and updated dependency information is generated. 153# The C file is compiled and updated dependency information is generated.
145# (See cmd_cc_o_c + relevant part of rule_cc_o_c) 154# (See cmd_cc_o_c + relevant part of rule_cc_o_c)
@@ -166,7 +175,8 @@ cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $<
166cmd_modversions = \ 175cmd_modversions = \
167 if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ 176 if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \
168 $(CPP) -D__GENKSYMS__ $(c_flags) $< \ 177 $(CPP) -D__GENKSYMS__ $(c_flags) $< \
169 | $(GENKSYMS) -a $(ARCH) \ 178 | $(GENKSYMS) $(if $(KBUILD_SYMTYPES), \
179 -T $(@D)/$(@F:.o=.symtypes)) -a $(ARCH) \
170 > $(@D)/.tmp_$(@F:.o=.ver); \ 180 > $(@D)/.tmp_$(@F:.o=.ver); \
171 \ 181 \
172 $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ 182 $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
index 2d519704b8..2b066d12af 100644
--- a/scripts/Makefile.host
+++ b/scripts/Makefile.host
@@ -33,8 +33,8 @@
33__hostprogs := $(sort $(hostprogs-y)$(hostprogs-m)) 33__hostprogs := $(sort $(hostprogs-y)$(hostprogs-m))
34 34
35# hostprogs-y := tools/build may have been specified. Retreive directory 35# hostprogs-y := tools/build may have been specified. Retreive directory
36obj-dirs += $(foreach f,$(__hostprogs), $(if $(dir $(f)),$(dir $(f)))) 36host-objdirs := $(foreach f,$(__hostprogs), $(if $(dir $(f)),$(dir $(f))))
37obj-dirs := $(strip $(sort $(filter-out ./,$(obj-dirs)))) 37host-objdirs := $(strip $(sort $(filter-out ./,$(host-objdirs))))
38 38
39 39
40# C code 40# C code
@@ -73,7 +73,9 @@ host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
73host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs)) 73host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
74host-cshlib := $(addprefix $(obj)/,$(host-cshlib)) 74host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
75host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs)) 75host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
76obj-dirs := $(addprefix $(obj)/,$(obj-dirs)) 76host-objdirs := $(addprefix $(obj)/,$(host-objdirs))
77
78obj-dirs += $(host-objdirs)
77 79
78##### 80#####
79# Handle options to gcc. Support building with separate output directory 81# Handle options to gcc. Support building with separate output directory
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index 2686dd5dce..f0ff248f5e 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -17,7 +17,7 @@ __modinst: $(modules)
17 @: 17 @:
18 18
19quiet_cmd_modules_install = INSTALL $@ 19quiet_cmd_modules_install = INSTALL $@
20 cmd_modules_install = mkdir -p $(2); cp $@ $(2) 20 cmd_modules_install = mkdir -p $(2); cp $@ $(2) ; $(mod_strip_cmd) $(2)/$(notdir $@)
21 21
22# Modules built outside the kernel source tree go into extra by default 22# Modules built outside the kernel source tree go into extra by default
23INSTALL_MOD_DIR ?= extra 23INSTALL_MOD_DIR ?= extra
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 0e056cffff..576cce5e38 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -35,7 +35,7 @@
35PHONY := _modpost 35PHONY := _modpost
36_modpost: __modpost 36_modpost: __modpost
37 37
38include .config 38include include/config/auto.conf
39include scripts/Kbuild.include 39include scripts/Kbuild.include
40include scripts/Makefile.lib 40include scripts/Makefile.lib
41 41
diff --git a/scripts/basic/Makefile b/scripts/basic/Makefile
index f22e94c3a2..2f60070f97 100644
--- a/scripts/basic/Makefile
+++ b/scripts/basic/Makefile
@@ -1,17 +1,15 @@
1### 1###
2# Makefile.basic list the most basic programs used during the build process. 2# Makefile.basic list the most basic programs used during the build process.
3# The programs listed herein is what is needed to do the basic stuff, 3# The programs listed herein is what is needed to do the basic stuff,
4# such as splitting .config and fix dependency file. 4# such as fix dependency file.
5# This initial step is needed to avoid files to be recompiled 5# This initial step is needed to avoid files to be recompiled
6# when kernel configuration changes (which is what happens when 6# when kernel configuration changes (which is what happens when
7# .config is included by main Makefile. 7# .config is included by main Makefile.
8# --------------------------------------------------------------------------- 8# ---------------------------------------------------------------------------
9# fixdep: Used to generate dependency information during build process 9# fixdep: Used to generate dependency information during build process
10# split-include: Divide all config symbols up in a number of files in
11# include/config/...
12# docproc: Used in Documentation/docbook 10# docproc: Used in Documentation/docbook
13 11
14hostprogs-y := fixdep split-include docproc 12hostprogs-y := fixdep docproc
15always := $(hostprogs-y) 13always := $(hostprogs-y)
16 14
17# fixdep is needed to compile other host programs 15# fixdep is needed to compile other host programs
diff --git a/scripts/basic/split-include.c b/scripts/basic/split-include.c
deleted file mode 100644
index 459c45276c..0000000000
--- a/scripts/basic/split-include.c
+++ /dev/null
@@ -1,226 +0,0 @@
1/*
2 * split-include.c
3 *
4 * Copyright abandoned, Michael Chastain, <mailto:mec@shout.net>.
5 * This is a C version of syncdep.pl by Werner Almesberger.
6 *
7 * This program takes autoconf.h as input and outputs a directory full
8 * of one-line include files, merging onto the old values.
9 *
10 * Think of the configuration options as key-value pairs. Then there
11 * are five cases:
12 *
13 * key old value new value action
14 *
15 * KEY-1 VALUE-1 VALUE-1 leave file alone
16 * KEY-2 VALUE-2A VALUE-2B write VALUE-2B into file
17 * KEY-3 - VALUE-3 write VALUE-3 into file
18 * KEY-4 VALUE-4 - write an empty file
19 * KEY-5 (empty) - leave old empty file alone
20 */
21
22#include <sys/stat.h>
23#include <sys/types.h>
24
25#include <ctype.h>
26#include <errno.h>
27#include <fcntl.h>
28#include <stdio.h>
29#include <stdlib.h>
30#include <string.h>
31#include <unistd.h>
32
33#define ERROR_EXIT(strExit) \
34 { \
35 const int errnoSave = errno; \
36 fprintf(stderr, "%s: ", str_my_name); \
37 errno = errnoSave; \
38 perror((strExit)); \
39 exit(1); \
40 }
41
42
43
44int main(int argc, const char * argv [])
45{
46 const char * str_my_name;
47 const char * str_file_autoconf;
48 const char * str_dir_config;
49
50 FILE * fp_config;
51 FILE * fp_target;
52 FILE * fp_find;
53
54 int buffer_size;
55
56 char * line;
57 char * old_line;
58 char * list_target;
59 char * ptarget;
60
61 struct stat stat_buf;
62
63 /* Check arg count. */
64 if (argc != 3)
65 {
66 fprintf(stderr, "%s: wrong number of arguments.\n", argv[0]);
67 exit(1);
68 }
69
70 str_my_name = argv[0];
71 str_file_autoconf = argv[1];
72 str_dir_config = argv[2];
73
74 /* Find a buffer size. */
75 if (stat(str_file_autoconf, &stat_buf) != 0)
76 ERROR_EXIT(str_file_autoconf);
77 buffer_size = 2 * stat_buf.st_size + 4096;
78
79 /* Allocate buffers. */
80 if ( (line = malloc(buffer_size)) == NULL
81 || (old_line = malloc(buffer_size)) == NULL
82 || (list_target = malloc(buffer_size)) == NULL )
83 ERROR_EXIT(str_file_autoconf);
84
85 /* Open autoconfig file. */
86 if ((fp_config = fopen(str_file_autoconf, "r")) == NULL)
87 ERROR_EXIT(str_file_autoconf);
88
89 /* Make output directory if needed. */
90 if (stat(str_dir_config, &stat_buf) != 0)
91 {
92 if (mkdir(str_dir_config, 0755) != 0)
93 ERROR_EXIT(str_dir_config);
94 }
95
96 /* Change to output directory. */
97 if (chdir(str_dir_config) != 0)
98 ERROR_EXIT(str_dir_config);
99
100 /* Put initial separator into target list. */
101 ptarget = list_target;
102 *ptarget++ = '\n';
103
104 /* Read config lines. */
105 while (fgets(line, buffer_size, fp_config))
106 {
107 const char * str_config;
108 int is_same;
109 int itarget;
110
111 if (line[0] != '#')
112 continue;
113 if ((str_config = strstr(line, "CONFIG_")) == NULL)
114 continue;
115
116 /* Make the output file name. */
117 str_config += sizeof("CONFIG_") - 1;
118 for (itarget = 0; !isspace(str_config[itarget]); itarget++)
119 {
120 int c = (unsigned char) str_config[itarget];
121 if (isupper(c)) c = tolower(c);
122 if (c == '_') c = '/';
123 ptarget[itarget] = c;
124 }
125 ptarget[itarget++] = '.';
126 ptarget[itarget++] = 'h';
127 ptarget[itarget++] = '\0';
128
129 /* Check for existing file. */
130 is_same = 0;
131 if ((fp_target = fopen(ptarget, "r")) != NULL)
132 {
133 fgets(old_line, buffer_size, fp_target);
134 if (fclose(fp_target) != 0)
135 ERROR_EXIT(ptarget);
136 if (!strcmp(line, old_line))
137 is_same = 1;
138 }
139
140 if (!is_same)
141 {
142 /* Auto-create directories. */
143 int islash;
144 for (islash = 0; islash < itarget; islash++)
145 {
146 if (ptarget[islash] == '/')
147 {
148 ptarget[islash] = '\0';
149 if (stat(ptarget, &stat_buf) != 0
150 && mkdir(ptarget, 0755) != 0)
151 ERROR_EXIT( ptarget );
152 ptarget[islash] = '/';
153 }
154 }
155
156 /* Write the file. */
157 if ((fp_target = fopen(ptarget, "w" )) == NULL)
158 ERROR_EXIT(ptarget);
159 fputs(line, fp_target);
160 if (ferror(fp_target) || fclose(fp_target) != 0)
161 ERROR_EXIT(ptarget);
162 }
163
164 /* Update target list */
165 ptarget += itarget;
166 *(ptarget-1) = '\n';
167 }
168
169 /*
170 * Close autoconfig file.
171 * Terminate the target list.
172 */
173 if (fclose(fp_config) != 0)
174 ERROR_EXIT(str_file_autoconf);
175 *ptarget = '\0';
176
177 /*
178 * Fix up existing files which have no new value.
179 * This is Case 4 and Case 5.
180 *
181 * I re-read the tree and filter it against list_target.
182 * This is crude. But it avoids data copies. Also, list_target
183 * is compact and contiguous, so it easily fits into cache.
184 *
185 * Notice that list_target contains strings separated by \n,
186 * with a \n before the first string and after the last.
187 * fgets gives the incoming names a terminating \n.
188 * So by having an initial \n, strstr will find exact matches.
189 */
190
191 fp_find = popen("find * -type f -name \"*.h\" -print", "r");
192 if (fp_find == 0)
193 ERROR_EXIT( "find" );
194
195 line[0] = '\n';
196 while (fgets(line+1, buffer_size, fp_find))
197 {
198 if (strstr(list_target, line) == NULL)
199 {
200 /*
201 * This is an old file with no CONFIG_* flag in autoconf.h.
202 */
203
204 /* First strip the \n. */
205 line[strlen(line)-1] = '\0';
206
207 /* Grab size. */
208 if (stat(line+1, &stat_buf) != 0)
209 ERROR_EXIT(line);
210
211 /* If file is not empty, make it empty and give it a fresh date. */
212 if (stat_buf.st_size != 0)
213 {
214 if ((fp_target = fopen(line+1, "w")) == NULL)
215 ERROR_EXIT(line);
216 if (fclose(fp_target) != 0)
217 ERROR_EXIT(line);
218 }
219 }
220 }
221
222 if (pclose(fp_find) != 0)
223 ERROR_EXIT("find");
224
225 return 0;
226}
diff --git a/scripts/export_report.pl b/scripts/export_report.pl
new file mode 100644
index 0000000000..9ed00d9bb0
--- /dev/null
+++ b/scripts/export_report.pl
@@ -0,0 +1,169 @@
1#!/usr/bin/perl -w
2#
3# (C) Copyright IBM Corporation 2006.
4# Released under GPL v2.
5# Author : Ram Pai (linuxram@us.ibm.com)
6#
7# Usage: export_report.pl -k Module.symvers [-o report_file ] -f *.mod.c
8#
9
10use Getopt::Std;
11use strict;
12
13sub numerically {
14 my $no1 = (split /\s+/, $a)[1];
15 my $no2 = (split /\s+/, $b)[1];
16 return $no1 <=> $no2;
17}
18
19sub alphabetically {
20 my ($module1, $value1) = @{$a};
21 my ($module2, $value2) = @{$b};
22 return $value1 <=> $value2 || $module2 cmp $module1;
23}
24
25sub print_depends_on {
26 my ($href) = @_;
27 print "\n";
28 while (my ($mod, $list) = each %$href) {
29 print "\t$mod:\n";
30 foreach my $sym (sort numerically @{$list}) {
31 my ($symbol, $no) = split /\s+/, $sym;
32 printf("\t\t%-25s\t%-25d\n", $symbol, $no);
33 }
34 print "\n";
35 }
36 print "\n";
37 print "~"x80 , "\n";
38}
39
40sub usage {
41 print "Usage: @_ -h -k Module.symvers [ -o outputfile ] \n",
42 "\t-f: treat all the non-option argument as .mod.c files. ",
43 "Recommend using this as the last option\n",
44 "\t-h: print detailed help\n",
45 "\t-k: the path to Module.symvers file. By default uses ",
46 "the file from the current directory\n",
47 "\t-o outputfile: output the report to outputfile\n";
48 exit 0;
49}
50
51sub collectcfiles {
52 my @file = `cat .tmp_versions/*.mod | grep '.*\.ko\$'`;
53 @file = grep {s/\.ko/.mod.c/} @file;
54 chomp @file;
55 return @file;
56}
57
58my (%SYMBOL, %MODULE, %opt, @allcfiles);
59
60if (not getopts('hk:o:f',\%opt) or defined $opt{'h'}) {
61 usage($0);
62}
63
64if (defined $opt{'f'}) {
65 @allcfiles = @ARGV;
66} else {
67 @allcfiles = collectcfiles();
68}
69
70if (not defined $opt{'k'}) {
71 $opt{'k'} = "Module.symvers";
72}
73
74unless (open(MODULE_SYMVERS, $opt{'k'})) {
75 die "Sorry, cannot open $opt{'k'}: $!\n";
76}
77
78if (defined $opt{'o'}) {
79 unless (open(OUTPUT_HANDLE, ">$opt{'o'}")) {
80 die "Sorry, cannot open $opt{'o'} $!\n";
81 }
82 select OUTPUT_HANDLE;
83}
84#
85# collect all the symbols and their attributes from the
86# Module.symvers file
87#
88while ( <MODULE_SYMVERS> ) {
89 chomp;
90 my (undef, $symbol, $module, $gpl) = split;
91 $SYMBOL { $symbol } = [ $module , "0" , $symbol, $gpl];
92}
93close(MODULE_SYMVERS);
94
95#
96# collect the usage count of each symbol.
97#
98foreach my $thismod (@allcfiles) {
99 unless (open(MODULE_MODULE, $thismod)) {
100 print "Sorry, cannot open $thismod: $!\n";
101 next;
102 }
103 my $state=0;
104 while ( <MODULE_MODULE> ) {
105 chomp;
106 if ($state eq 0) {
107 $state = 1 if ($_ =~ /static const struct modversion_info/);
108 next;
109 }
110 if ($state eq 1) {
111 $state = 2 if ($_ =~ /__attribute__\(\(section\("__versions"\)\)\)/);
112 next;
113 }
114 if ($state eq 2) {
115 if ( $_ !~ /0x[0-9a-f]{7,8},/ ) {
116 next;
117 }
118 my $sym = (split /([,"])/,)[4];
119 my ($module, $value, $symbol, $gpl) = @{$SYMBOL{$sym}};
120 $SYMBOL{ $sym } = [ $module, $value+1, $symbol, $gpl];
121 push(@{$MODULE{$thismod}} , $sym);
122 }
123 }
124 if ($state ne 2) {
125 print "WARNING:$thismod is not built with CONFIG_MODVERSION enabled\n";
126 }
127 close(MODULE_MODULE);
128}
129
130print "\tThis file reports the exported symbols usage patterns by in-tree\n",
131 "\t\t\t\tmodules\n";
132printf("%s\n\n\n","x"x80);
133printf("\t\t\t\tINDEX\n\n\n");
134printf("SECTION 1: Usage counts of all exported symbols\n");
135printf("SECTION 2: List of modules and the exported symbols they use\n");
136printf("%s\n\n\n","x"x80);
137printf("SECTION 1:\tThe exported symbols and their usage count\n\n");
138printf("%-25s\t%-25s\t%-5s\t%-25s\n", "Symbol", "Module", "Usage count",
139 "export type");
140
141#
142# print the list of unused exported symbols
143#
144foreach my $list (sort alphabetically values(%SYMBOL)) {
145 my ($module, $value, $symbol, $gpl) = @{$list};
146 printf("%-25s\t%-25s\t%-10s\t", $symbol, $module, $value);
147 if (defined $gpl) {
148 printf("%-25s\n",$gpl);
149 } else {
150 printf("\n");
151 }
152}
153printf("%s\n\n\n","x"x80);
154
155printf("SECTION 2:\n\tThis section reports export-symbol-usage of in-kernel
156modules. Each module lists the modules, and the symbols from that module that
157it uses. Each listed symbol reports the number of modules using it\n");
158
159print "~"x80 , "\n";
160while (my ($thismod, $list) = each %MODULE) {
161 my %depends;
162 $thismod =~ s/\.mod\.c/.ko/;
163 print "\t\t\t$thismod\n";
164 foreach my $symbol (@{$list}) {
165 my ($module, $value, undef, $gpl) = @{$SYMBOL{$symbol}};
166 push (@{$depends{"$module"}}, "$symbol $value");
167 }
168 print_depends_on(\%depends);
169}
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
index 5b0344e20d..b0381823e4 100644
--- a/scripts/genksyms/genksyms.c
+++ b/scripts/genksyms/genksyms.c
@@ -42,7 +42,7 @@ static FILE *debugfile;
42int cur_line = 1; 42int cur_line = 1;
43char *cur_filename; 43char *cur_filename;
44 44
45static int flag_debug, flag_dump_defs, flag_warnings; 45static int flag_debug, flag_dump_defs, flag_dump_types, flag_warnings;
46static const char *arch = ""; 46static const char *arch = "";
47static const char *mod_prefix = ""; 47static const char *mod_prefix = "";
48 48
@@ -50,6 +50,7 @@ static int errors;
50static int nsyms; 50static int nsyms;
51 51
52static struct symbol *expansion_trail; 52static struct symbol *expansion_trail;
53static struct symbol *visited_symbols;
53 54
54static const char *const symbol_type_name[] = { 55static const char *const symbol_type_name[] = {
55 "normal", "typedef", "enum", "struct", "union" 56 "normal", "typedef", "enum", "struct", "union"
@@ -176,6 +177,7 @@ struct symbol *add_symbol(const char *name, enum symbol_type type,
176 sym->type = type; 177 sym->type = type;
177 sym->defn = defn; 178 sym->defn = defn;
178 sym->expansion_trail = NULL; 179 sym->expansion_trail = NULL;
180 sym->visited = NULL;
179 sym->is_extern = is_extern; 181 sym->is_extern = is_extern;
180 182
181 sym->hash_next = symtab[h]; 183 sym->hash_next = symtab[h];
@@ -236,26 +238,11 @@ static int equal_list(struct string_list *a, struct string_list *b)
236 238
237static void print_node(FILE * f, struct string_list *list) 239static void print_node(FILE * f, struct string_list *list)
238{ 240{
239 switch (list->tag) { 241 if (list->tag != SYM_NORMAL) {
240 case SYM_STRUCT: 242 putc(symbol_type_name[list->tag][0], f);
241 putc('s', f);
242 goto printit;
243 case SYM_UNION:
244 putc('u', f);
245 goto printit;
246 case SYM_ENUM:
247 putc('e', f);
248 goto printit;
249 case SYM_TYPEDEF:
250 putc('t', f);
251 goto printit;
252
253 printit:
254 putc('#', f); 243 putc('#', f);
255 case SYM_NORMAL:
256 fputs(list->string, f);
257 break;
258 } 244 }
245 fputs(list->string, f);
259} 246}
260 247
261static void print_list(FILE * f, struct string_list *list) 248static void print_list(FILE * f, struct string_list *list)
@@ -287,9 +274,9 @@ static void print_list(FILE * f, struct string_list *list)
287 } 274 }
288} 275}
289 276
290static unsigned long expand_and_crc_list(struct string_list *list, 277static unsigned long expand_and_crc_sym(struct symbol *sym, unsigned long crc)
291 unsigned long crc)
292{ 278{
279 struct string_list *list = sym->defn;
293 struct string_list **e, **b; 280 struct string_list **e, **b;
294 struct string_list *tmp, **tmp2; 281 struct string_list *tmp, **tmp2;
295 int elem = 1; 282 int elem = 1;
@@ -332,7 +319,7 @@ static unsigned long expand_and_crc_list(struct string_list *list,
332 } else { 319 } else {
333 subsym->expansion_trail = expansion_trail; 320 subsym->expansion_trail = expansion_trail;
334 expansion_trail = subsym; 321 expansion_trail = subsym;
335 crc = expand_and_crc_list(subsym->defn, crc); 322 crc = expand_and_crc_sym(subsym, crc);
336 } 323 }
337 break; 324 break;
338 325
@@ -382,12 +369,22 @@ static unsigned long expand_and_crc_list(struct string_list *list,
382 } else { 369 } else {
383 subsym->expansion_trail = expansion_trail; 370 subsym->expansion_trail = expansion_trail;
384 expansion_trail = subsym; 371 expansion_trail = subsym;
385 crc = expand_and_crc_list(subsym->defn, crc); 372 crc = expand_and_crc_sym(subsym, crc);
386 } 373 }
387 break; 374 break;
388 } 375 }
389 } 376 }
390 377
378 {
379 static struct symbol **end = &visited_symbols;
380
381 if (!sym->visited) {
382 *end = sym;
383 end = &sym->visited;
384 sym->visited = (struct symbol *)-1L;
385 }
386 }
387
391 return crc; 388 return crc;
392} 389}
393 390
@@ -406,7 +403,7 @@ void export_symbol(const char *name)
406 403
407 expansion_trail = (struct symbol *)-1L; 404 expansion_trail = (struct symbol *)-1L;
408 405
409 crc = expand_and_crc_list(sym->defn, 0xffffffff) ^ 0xffffffff; 406 crc = expand_and_crc_sym(sym, 0xffffffff) ^ 0xffffffff;
410 407
411 sym = expansion_trail; 408 sym = expansion_trail;
412 while (sym != (struct symbol *)-1L) { 409 while (sym != (struct symbol *)-1L) {
@@ -464,6 +461,7 @@ static void genksyms_usage(void)
464 461
465int main(int argc, char **argv) 462int main(int argc, char **argv)
466{ 463{
464 FILE *dumpfile = NULL;
467 int o; 465 int o;
468 466
469#ifdef __GNU_LIBRARY__ 467#ifdef __GNU_LIBRARY__
@@ -473,15 +471,16 @@ int main(int argc, char **argv)
473 {"warnings", 0, 0, 'w'}, 471 {"warnings", 0, 0, 'w'},
474 {"quiet", 0, 0, 'q'}, 472 {"quiet", 0, 0, 'q'},
475 {"dump", 0, 0, 'D'}, 473 {"dump", 0, 0, 'D'},
474 {"dump-types", 1, 0, 'T'},
476 {"version", 0, 0, 'V'}, 475 {"version", 0, 0, 'V'},
477 {"help", 0, 0, 'h'}, 476 {"help", 0, 0, 'h'},
478 {0, 0, 0, 0} 477 {0, 0, 0, 0}
479 }; 478 };
480 479
481 while ((o = getopt_long(argc, argv, "a:dwqVDk:p:", 480 while ((o = getopt_long(argc, argv, "a:dwqVDT:k:p:",
482 &long_opts[0], NULL)) != EOF) 481 &long_opts[0], NULL)) != EOF)
483#else /* __GNU_LIBRARY__ */ 482#else /* __GNU_LIBRARY__ */
484 while ((o = getopt(argc, argv, "a:dwqVDk:p:")) != EOF) 483 while ((o = getopt(argc, argv, "a:dwqVDT:k:p:")) != EOF)
485#endif /* __GNU_LIBRARY__ */ 484#endif /* __GNU_LIBRARY__ */
486 switch (o) { 485 switch (o) {
487 case 'a': 486 case 'a':
@@ -502,6 +501,14 @@ int main(int argc, char **argv)
502 case 'D': 501 case 'D':
503 flag_dump_defs = 1; 502 flag_dump_defs = 1;
504 break; 503 break;
504 case 'T':
505 flag_dump_types = 1;
506 dumpfile = fopen(optarg, "w");
507 if (!dumpfile) {
508 perror(optarg);
509 return 1;
510 }
511 break;
505 case 'h': 512 case 'h':
506 genksyms_usage(); 513 genksyms_usage();
507 return 0; 514 return 0;
@@ -524,6 +531,24 @@ int main(int argc, char **argv)
524 531
525 yyparse(); 532 yyparse();
526 533
534 if (flag_dump_types && visited_symbols) {
535 while (visited_symbols != (struct symbol *)-1L) {
536 struct symbol *sym = visited_symbols;
537
538 if (sym->type != SYM_NORMAL) {
539 putc(symbol_type_name[sym->type][0], dumpfile);
540 putc('#', dumpfile);
541 }
542 fputs(sym->name, dumpfile);
543 putc(' ', dumpfile);
544 print_list(dumpfile, sym->defn);
545 putc('\n', dumpfile);
546
547 visited_symbols = sym->visited;
548 sym->visited = NULL;
549 }
550 }
551
527 if (flag_debug) { 552 if (flag_debug) {
528 fprintf(debugfile, "Hash table occupancy %d/%d = %g\n", 553 fprintf(debugfile, "Hash table occupancy %d/%d = %g\n",
529 nsyms, HASH_BUCKETS, 554 nsyms, HASH_BUCKETS,
diff --git a/scripts/genksyms/genksyms.h b/scripts/genksyms/genksyms.h
index ab6f34f387..2668287aa4 100644
--- a/scripts/genksyms/genksyms.h
+++ b/scripts/genksyms/genksyms.h
@@ -41,6 +41,7 @@ struct symbol {
41 enum symbol_type type; 41 enum symbol_type type;
42 struct string_list *defn; 42 struct string_list *defn;
43 struct symbol *expansion_trail; 43 struct symbol *expansion_trail;
44 struct symbol *visited;
44 int is_extern; 45 int is_extern;
45}; 46};
46 47
diff --git a/scripts/genksyms/lex.c_shipped b/scripts/genksyms/lex.c_shipped
index 1218053ee9..37ba98241b 100644
--- a/scripts/genksyms/lex.c_shipped
+++ b/scripts/genksyms/lex.c_shipped
@@ -2023,7 +2023,7 @@ repeat:
2023 break; 2023 break;
2024 2024
2025 default: 2025 default:
2026 abort(); 2026 exit(1);
2027 } 2027 }
2028fini: 2028fini:
2029 2029
diff --git a/scripts/genksyms/lex.l b/scripts/genksyms/lex.l
index fe0dfeedf0..5e544a0667 100644
--- a/scripts/genksyms/lex.l
+++ b/scripts/genksyms/lex.l
@@ -392,7 +392,7 @@ repeat:
392 break; 392 break;
393 393
394 default: 394 default:
395 abort(); 395 exit(1);
396 } 396 }
397fini: 397fini:
398 398
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 8012d10768..4dcb8867b5 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -539,6 +539,7 @@ int main(int ac, char **av)
539 name = av[i]; 539 name = av[i];
540 if (!name) { 540 if (!name) {
541 printf(_("%s: Kconfig file missing\n"), av[0]); 541 printf(_("%s: Kconfig file missing\n"), av[0]);
542 exit(1);
542 } 543 }
543 conf_parse(name); 544 conf_parse(name);
544 //zconfdump(stdout); 545 //zconfdump(stdout);
@@ -573,7 +574,7 @@ int main(int ac, char **av)
573 case set_random: 574 case set_random:
574 name = getenv("KCONFIG_ALLCONFIG"); 575 name = getenv("KCONFIG_ALLCONFIG");
575 if (name && !stat(name, &tmpstat)) { 576 if (name && !stat(name, &tmpstat)) {
576 conf_read_simple(name); 577 conf_read_simple(name, S_DEF_USER);
577 break; 578 break;
578 } 579 }
579 switch (input_mode) { 580 switch (input_mode) {
@@ -584,9 +585,9 @@ int main(int ac, char **av)
584 default: break; 585 default: break;
585 } 586 }
586 if (!stat(name, &tmpstat)) 587 if (!stat(name, &tmpstat))
587 conf_read_simple(name); 588 conf_read_simple(name, S_DEF_USER);
588 else if (!stat("all.config", &tmpstat)) 589 else if (!stat("all.config", &tmpstat))
589 conf_read_simple("all.config"); 590 conf_read_simple("all.config", S_DEF_USER);
590 break; 591 break;
591 default: 592 default:
592 break; 593 break;
@@ -599,7 +600,15 @@ int main(int ac, char **av)
599 input_mode = ask_silent; 600 input_mode = ask_silent;
600 valid_stdin = 1; 601 valid_stdin = 1;
601 } 602 }
602 } 603 } else if (sym_change_count) {
604 name = getenv("KCONFIG_NOSILENTUPDATE");
605 if (name && *name) {
606 fprintf(stderr, _("\n*** Kernel configuration requires explicit update.\n\n"));
607 return 1;
608 }
609 } else
610 goto skip_check;
611
603 do { 612 do {
604 conf_cnt = 0; 613 conf_cnt = 0;
605 check_conf(&rootmenu); 614 check_conf(&rootmenu);
@@ -608,5 +617,11 @@ int main(int ac, char **av)
608 fprintf(stderr, _("\n*** Error during writing of the kernel configuration.\n\n")); 617 fprintf(stderr, _("\n*** Error during writing of the kernel configuration.\n\n"));
609 return 1; 618 return 1;
610 } 619 }
620skip_check:
621 if (input_mode == ask_silent && conf_write_autoconf()) {
622 fprintf(stderr, _("\n*** Error during writing of the kernel configuration.\n\n"));
623 return 1;
624 }
625
611 return 0; 626 return 0;
612} 627}
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 1b5df589f3..2ee48c377b 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -5,6 +5,7 @@
5 5
6#include <sys/stat.h> 6#include <sys/stat.h>
7#include <ctype.h> 7#include <ctype.h>
8#include <fcntl.h>
8#include <stdio.h> 9#include <stdio.h>
9#include <stdlib.h> 10#include <stdlib.h>
10#include <string.h> 11#include <string.h>
@@ -20,19 +21,8 @@ static void conf_warning(const char *fmt, ...)
20static const char *conf_filename; 21static const char *conf_filename;
21static int conf_lineno, conf_warnings, conf_unsaved; 22static int conf_lineno, conf_warnings, conf_unsaved;
22 23
23const char conf_def_filename[] = ".config";
24
25const char conf_defname[] = "arch/$ARCH/defconfig"; 24const char conf_defname[] = "arch/$ARCH/defconfig";
26 25
27const char *conf_confnames[] = {
28 ".config",
29 "/lib/modules/$UNAME_RELEASE/.config",
30 "/etc/kernel-config",
31 "/boot/config-$UNAME_RELEASE",
32 conf_defname,
33 NULL,
34};
35
36static void conf_warning(const char *fmt, ...) 26static void conf_warning(const char *fmt, ...)
37{ 27{
38 va_list ap; 28 va_list ap;
@@ -44,6 +34,13 @@ static void conf_warning(const char *fmt, ...)
44 conf_warnings++; 34 conf_warnings++;
45} 35}
46 36
37const char *conf_get_configname(void)
38{
39 char *name = getenv("KCONFIG_CONFIG");
40
41 return name ? name : ".config";
42}
43
47static char *conf_expand_value(const char *in) 44static char *conf_expand_value(const char *in)
48{ 45{
49 struct symbol *sym; 46 struct symbol *sym;
@@ -86,51 +83,65 @@ char *conf_get_default_confname(void)
86 return name; 83 return name;
87} 84}
88 85
89int conf_read_simple(const char *name) 86int conf_read_simple(const char *name, int def)
90{ 87{
91 FILE *in = NULL; 88 FILE *in = NULL;
92 char line[1024]; 89 char line[1024];
93 char *p, *p2; 90 char *p, *p2;
94 struct symbol *sym; 91 struct symbol *sym;
95 int i; 92 int i, def_flags;
96 93
97 if (name) { 94 if (name) {
98 in = zconf_fopen(name); 95 in = zconf_fopen(name);
99 } else { 96 } else {
100 const char **names = conf_confnames; 97 struct property *prop;
101 while ((name = *names++)) { 98
102 name = conf_expand_value(name); 99 name = conf_get_configname();
100 in = zconf_fopen(name);
101 if (in)
102 goto load;
103 sym_change_count++;
104 if (!sym_defconfig_list)
105 return 1;
106
107 for_all_defaults(sym_defconfig_list, prop) {
108 if (expr_calc_value(prop->visible.expr) == no ||
109 prop->expr->type != E_SYMBOL)
110 continue;
111 name = conf_expand_value(prop->expr->left.sym->name);
103 in = zconf_fopen(name); 112 in = zconf_fopen(name);
104 if (in) { 113 if (in) {
105 printf(_("#\n" 114 printf(_("#\n"
106 "# using defaults found in %s\n" 115 "# using defaults found in %s\n"
107 "#\n"), name); 116 "#\n"), name);
108 break; 117 goto load;
109 } 118 }
110 } 119 }
111 } 120 }
112 if (!in) 121 if (!in)
113 return 1; 122 return 1;
114 123
124load:
115 conf_filename = name; 125 conf_filename = name;
116 conf_lineno = 0; 126 conf_lineno = 0;
117 conf_warnings = 0; 127 conf_warnings = 0;
118 conf_unsaved = 0; 128 conf_unsaved = 0;
119 129
130 def_flags = SYMBOL_DEF << def;
120 for_all_symbols(i, sym) { 131 for_all_symbols(i, sym) {
121 sym->flags |= SYMBOL_NEW | SYMBOL_CHANGED; 132 sym->flags |= SYMBOL_CHANGED;
133 sym->flags &= ~(def_flags|SYMBOL_VALID);
122 if (sym_is_choice(sym)) 134 if (sym_is_choice(sym))
123 sym->flags &= ~SYMBOL_NEW; 135 sym->flags |= def_flags;
124 sym->flags &= ~SYMBOL_VALID;
125 switch (sym->type) { 136 switch (sym->type) {
126 case S_INT: 137 case S_INT:
127 case S_HEX: 138 case S_HEX:
128 case S_STRING: 139 case S_STRING:
129 if (sym->user.val) 140 if (sym->def[def].val)
130 free(sym->user.val); 141 free(sym->def[def].val);
131 default: 142 default:
132 sym->user.val = NULL; 143 sym->def[def].val = NULL;
133 sym->user.tri = no; 144 sym->def[def].tri = no;
134 } 145 }
135 } 146 }
136 147
@@ -147,19 +158,26 @@ int conf_read_simple(const char *name)
147 *p++ = 0; 158 *p++ = 0;
148 if (strncmp(p, "is not set", 10)) 159 if (strncmp(p, "is not set", 10))
149 continue; 160 continue;
150 sym = sym_find(line + 9); 161 if (def == S_DEF_USER) {
151 if (!sym) { 162 sym = sym_find(line + 9);
152 conf_warning("trying to assign nonexistent symbol %s", line + 9); 163 if (!sym) {
153 break; 164 conf_warning("trying to assign nonexistent symbol %s", line + 9);
154 } else if (!(sym->flags & SYMBOL_NEW)) { 165 break;
166 }
167 } else {
168 sym = sym_lookup(line + 9, 0);
169 if (sym->type == S_UNKNOWN)
170 sym->type = S_BOOLEAN;
171 }
172 if (sym->flags & def_flags) {
155 conf_warning("trying to reassign symbol %s", sym->name); 173 conf_warning("trying to reassign symbol %s", sym->name);
156 break; 174 break;
157 } 175 }
158 switch (sym->type) { 176 switch (sym->type) {
159 case S_BOOLEAN: 177 case S_BOOLEAN:
160 case S_TRISTATE: 178 case S_TRISTATE:
161 sym->user.tri = no; 179 sym->def[def].tri = no;
162 sym->flags &= ~SYMBOL_NEW; 180 sym->flags |= def_flags;
163 break; 181 break;
164 default: 182 default:
165 ; 183 ;
@@ -177,34 +195,48 @@ int conf_read_simple(const char *name)
177 p2 = strchr(p, '\n'); 195 p2 = strchr(p, '\n');
178 if (p2) 196 if (p2)
179 *p2 = 0; 197 *p2 = 0;
180 sym = sym_find(line + 7); 198 if (def == S_DEF_USER) {
181 if (!sym) { 199 sym = sym_find(line + 7);
182 conf_warning("trying to assign nonexistent symbol %s", line + 7); 200 if (!sym) {
183 break; 201 conf_warning("trying to assign nonexistent symbol %s", line + 7);
184 } else if (!(sym->flags & SYMBOL_NEW)) { 202 break;
203 }
204 } else {
205 sym = sym_lookup(line + 7, 0);
206 if (sym->type == S_UNKNOWN)
207 sym->type = S_OTHER;
208 }
209 if (sym->flags & def_flags) {
185 conf_warning("trying to reassign symbol %s", sym->name); 210 conf_warning("trying to reassign symbol %s", sym->name);
186 break; 211 break;
187 } 212 }
188 switch (sym->type) { 213 switch (sym->type) {
189 case S_TRISTATE: 214 case S_TRISTATE:
190 if (p[0] == 'm') { 215 if (p[0] == 'm') {
191 sym->user.tri = mod; 216 sym->def[def].tri = mod;
192 sym->flags &= ~SYMBOL_NEW; 217 sym->flags |= def_flags;
193 break; 218 break;
194 } 219 }
195 case S_BOOLEAN: 220 case S_BOOLEAN:
196 if (p[0] == 'y') { 221 if (p[0] == 'y') {
197 sym->user.tri = yes; 222 sym->def[def].tri = yes;
198 sym->flags &= ~SYMBOL_NEW; 223 sym->flags |= def_flags;
199 break; 224 break;
200 } 225 }
201 if (p[0] == 'n') { 226 if (p[0] == 'n') {
202 sym->user.tri = no; 227 sym->def[def].tri = no;
203 sym->flags &= ~SYMBOL_NEW; 228 sym->flags |= def_flags;
204 break; 229 break;
205 } 230 }
206 conf_warning("symbol value '%s' invalid for %s", p, sym->name); 231 conf_warning("symbol value '%s' invalid for %s", p, sym->name);
207 break; 232 break;
233 case S_OTHER:
234 if (*p != '"') {
235 for (p2 = p; *p2 && !isspace(*p2); p2++)
236 ;
237 sym->type = S_STRING;
238 goto done;
239 }
208 case S_STRING: 240 case S_STRING:
209 if (*p++ != '"') 241 if (*p++ != '"')
210 break; 242 break;
@@ -221,9 +253,10 @@ int conf_read_simple(const char *name)
221 } 253 }
222 case S_INT: 254 case S_INT:
223 case S_HEX: 255 case S_HEX:
256 done:
224 if (sym_string_valid(sym, p)) { 257 if (sym_string_valid(sym, p)) {
225 sym->user.val = strdup(p); 258 sym->def[def].val = strdup(p);
226 sym->flags &= ~SYMBOL_NEW; 259 sym->flags |= def_flags;
227 } else { 260 } else {
228 conf_warning("symbol value '%s' invalid for %s", p, sym->name); 261 conf_warning("symbol value '%s' invalid for %s", p, sym->name);
229 continue; 262 continue;
@@ -241,24 +274,24 @@ int conf_read_simple(const char *name)
241 } 274 }
242 if (sym && sym_is_choice_value(sym)) { 275 if (sym && sym_is_choice_value(sym)) {
243 struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym)); 276 struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym));
244 switch (sym->user.tri) { 277 switch (sym->def[def].tri) {
245 case no: 278 case no:
246 break; 279 break;
247 case mod: 280 case mod:
248 if (cs->user.tri == yes) { 281 if (cs->def[def].tri == yes) {
249 conf_warning("%s creates inconsistent choice state", sym->name); 282 conf_warning("%s creates inconsistent choice state", sym->name);
250 cs->flags |= SYMBOL_NEW; 283 cs->flags &= ~def_flags;
251 } 284 }
252 break; 285 break;
253 case yes: 286 case yes:
254 if (cs->user.tri != no) { 287 if (cs->def[def].tri != no) {
255 conf_warning("%s creates inconsistent choice state", sym->name); 288 conf_warning("%s creates inconsistent choice state", sym->name);
256 cs->flags |= SYMBOL_NEW; 289 cs->flags &= ~def_flags;
257 } else 290 } else
258 cs->user.val = sym; 291 cs->def[def].val = sym;
259 break; 292 break;
260 } 293 }
261 cs->user.tri = E_OR(cs->user.tri, sym->user.tri); 294 cs->def[def].tri = E_OR(cs->def[def].tri, sym->def[def].tri);
262 } 295 }
263 } 296 }
264 fclose(in); 297 fclose(in);
@@ -273,9 +306,11 @@ int conf_read(const char *name)
273 struct symbol *sym; 306 struct symbol *sym;
274 struct property *prop; 307 struct property *prop;
275 struct expr *e; 308 struct expr *e;
276 int i; 309 int i, flags;
310
311 sym_change_count = 0;
277 312
278 if (conf_read_simple(name)) 313 if (conf_read_simple(name, S_DEF_USER))
279 return 1; 314 return 1;
280 315
281 for_all_symbols(i, sym) { 316 for_all_symbols(i, sym) {
@@ -287,12 +322,12 @@ int conf_read(const char *name)
287 switch (sym->type) { 322 switch (sym->type) {
288 case S_BOOLEAN: 323 case S_BOOLEAN:
289 case S_TRISTATE: 324 case S_TRISTATE:
290 if (sym->user.tri != sym_get_tristate_value(sym)) 325 if (sym->def[S_DEF_USER].tri != sym_get_tristate_value(sym))
291 break; 326 break;
292 if (!sym_is_choice(sym)) 327 if (!sym_is_choice(sym))
293 goto sym_ok; 328 goto sym_ok;
294 default: 329 default:
295 if (!strcmp(sym->curr.val, sym->user.val)) 330 if (!strcmp(sym->curr.val, sym->def[S_DEF_USER].val))
296 goto sym_ok; 331 goto sym_ok;
297 break; 332 break;
298 } 333 }
@@ -304,15 +339,13 @@ int conf_read(const char *name)
304 sym_ok: 339 sym_ok:
305 if (sym_has_value(sym) && !sym_is_choice_value(sym)) { 340 if (sym_has_value(sym) && !sym_is_choice_value(sym)) {
306 if (sym->visible == no) 341 if (sym->visible == no)
307 sym->flags |= SYMBOL_NEW; 342 sym->flags &= ~SYMBOL_DEF_USER;
308 switch (sym->type) { 343 switch (sym->type) {
309 case S_STRING: 344 case S_STRING:
310 case S_INT: 345 case S_INT:
311 case S_HEX: 346 case S_HEX:
312 if (!sym_string_within_range(sym, sym->user.val)) { 347 if (!sym_string_within_range(sym, sym->def[S_DEF_USER].val))
313 sym->flags |= SYMBOL_NEW; 348 sym->flags &= ~(SYMBOL_VALID|SYMBOL_DEF_USER);
314 sym->flags &= ~SYMBOL_VALID;
315 }
316 default: 349 default:
317 break; 350 break;
318 } 351 }
@@ -320,19 +353,21 @@ int conf_read(const char *name)
320 if (!sym_is_choice(sym)) 353 if (!sym_is_choice(sym))
321 continue; 354 continue;
322 prop = sym_get_choice_prop(sym); 355 prop = sym_get_choice_prop(sym);
356 flags = sym->flags;
323 for (e = prop->expr; e; e = e->left.expr) 357 for (e = prop->expr; e; e = e->left.expr)
324 if (e->right.sym->visible != no) 358 if (e->right.sym->visible != no)
325 sym->flags |= e->right.sym->flags & SYMBOL_NEW; 359 flags &= e->right.sym->flags;
360 sym->flags |= flags & SYMBOL_DEF_USER;
326 } 361 }
327 362
328 sym_change_count = conf_warnings || conf_unsaved; 363 sym_change_count += conf_warnings || conf_unsaved;
329 364
330 return 0; 365 return 0;
331} 366}
332 367
333int conf_write(const char *name) 368int conf_write(const char *name)
334{ 369{
335 FILE *out, *out_h; 370 FILE *out;
336 struct symbol *sym; 371 struct symbol *sym;
337 struct menu *menu; 372 struct menu *menu;
338 const char *basename; 373 const char *basename;
@@ -351,7 +386,7 @@ int conf_write(const char *name)
351 if (!stat(name, &st) && S_ISDIR(st.st_mode)) { 386 if (!stat(name, &st) && S_ISDIR(st.st_mode)) {
352 strcpy(dirname, name); 387 strcpy(dirname, name);
353 strcat(dirname, "/"); 388 strcat(dirname, "/");
354 basename = conf_def_filename; 389 basename = conf_get_configname();
355 } else if ((slash = strrchr(name, '/'))) { 390 } else if ((slash = strrchr(name, '/'))) {
356 int size = slash - name + 1; 391 int size = slash - name + 1;
357 memcpy(dirname, name, size); 392 memcpy(dirname, name, size);
@@ -359,23 +394,24 @@ int conf_write(const char *name)
359 if (slash[1]) 394 if (slash[1])
360 basename = slash + 1; 395 basename = slash + 1;
361 else 396 else
362 basename = conf_def_filename; 397 basename = conf_get_configname();
363 } else 398 } else
364 basename = name; 399 basename = name;
365 } else 400 } else
366 basename = conf_def_filename; 401 basename = conf_get_configname();
367 402
368 sprintf(newname, "%s.tmpconfig.%d", dirname, (int)getpid()); 403 sprintf(newname, "%s%s", dirname, basename);
369 out = fopen(newname, "w"); 404 env = getenv("KCONFIG_OVERWRITECONFIG");
405 if (!env || !*env) {
406 sprintf(tmpname, "%s.tmpconfig.%d", dirname, (int)getpid());
407 out = fopen(tmpname, "w");
408 } else {
409 *tmpname = 0;
410 out = fopen(newname, "w");
411 }
370 if (!out) 412 if (!out)
371 return 1; 413 return 1;
372 out_h = NULL; 414
373 if (!name) {
374 out_h = fopen(".tmpconfig.h", "w");
375 if (!out_h)
376 return 1;
377 file_write_dep(NULL);
378 }
379 sym = sym_lookup("KERNELVERSION", 0); 415 sym = sym_lookup("KERNELVERSION", 0);
380 sym_calc_value(sym); 416 sym_calc_value(sym);
381 time(&now); 417 time(&now);
@@ -391,16 +427,6 @@ int conf_write(const char *name)
391 sym_get_string_value(sym), 427 sym_get_string_value(sym),
392 use_timestamp ? "# " : "", 428 use_timestamp ? "# " : "",
393 use_timestamp ? ctime(&now) : ""); 429 use_timestamp ? ctime(&now) : "");
394 if (out_h)
395 fprintf(out_h, "/*\n"
396 " * Automatically generated C config: don't edit\n"
397 " * Linux kernel version: %s\n"
398 "%s%s"
399 " */\n"
400 "#define AUTOCONF_INCLUDED\n",
401 sym_get_string_value(sym),
402 use_timestamp ? " * " : "",
403 use_timestamp ? ctime(&now) : "");
404 430
405 if (!sym_change_count) 431 if (!sym_change_count)
406 sym_clear_all_valid(); 432 sym_clear_all_valid();
@@ -416,11 +442,6 @@ int conf_write(const char *name)
416 "#\n" 442 "#\n"
417 "# %s\n" 443 "# %s\n"
418 "#\n", str); 444 "#\n", str);
419 if (out_h)
420 fprintf(out_h, "\n"
421 "/*\n"
422 " * %s\n"
423 " */\n", str);
424 } else if (!(sym->flags & SYMBOL_CHOICE)) { 445 } else if (!(sym->flags & SYMBOL_CHOICE)) {
425 sym_calc_value(sym); 446 sym_calc_value(sym);
426 if (!(sym->flags & SYMBOL_WRITE)) 447 if (!(sym->flags & SYMBOL_WRITE))
@@ -438,59 +459,39 @@ int conf_write(const char *name)
438 switch (sym_get_tristate_value(sym)) { 459 switch (sym_get_tristate_value(sym)) {
439 case no: 460 case no:
440 fprintf(out, "# CONFIG_%s is not set\n", sym->name); 461 fprintf(out, "# CONFIG_%s is not set\n", sym->name);
441 if (out_h)
442 fprintf(out_h, "#undef CONFIG_%s\n", sym->name);
443 break; 462 break;
444 case mod: 463 case mod:
445 fprintf(out, "CONFIG_%s=m\n", sym->name); 464 fprintf(out, "CONFIG_%s=m\n", sym->name);
446 if (out_h)
447 fprintf(out_h, "#define CONFIG_%s_MODULE 1\n", sym->name);
448 break; 465 break;
449 case yes: 466 case yes:
450 fprintf(out, "CONFIG_%s=y\n", sym->name); 467 fprintf(out, "CONFIG_%s=y\n", sym->name);
451 if (out_h)
452 fprintf(out_h, "#define CONFIG_%s 1\n", sym->name);
453 break; 468 break;
454 } 469 }
455 break; 470 break;
456 case S_STRING: 471 case S_STRING:
457 // fix me
458 str = sym_get_string_value(sym); 472 str = sym_get_string_value(sym);
459 fprintf(out, "CONFIG_%s=\"", sym->name); 473 fprintf(out, "CONFIG_%s=\"", sym->name);
460 if (out_h) 474 while (1) {
461 fprintf(out_h, "#define CONFIG_%s \"", sym->name);
462 do {
463 l = strcspn(str, "\"\\"); 475 l = strcspn(str, "\"\\");
464 if (l) { 476 if (l) {
465 fwrite(str, l, 1, out); 477 fwrite(str, l, 1, out);
466 if (out_h) 478 str += l;
467 fwrite(str, l, 1, out_h);
468 }
469 str += l;
470 while (*str == '\\' || *str == '"') {
471 fprintf(out, "\\%c", *str);
472 if (out_h)
473 fprintf(out_h, "\\%c", *str);
474 str++;
475 } 479 }
476 } while (*str); 480 if (!*str)
481 break;
482 fprintf(out, "\\%c", *str++);
483 }
477 fputs("\"\n", out); 484 fputs("\"\n", out);
478 if (out_h)
479 fputs("\"\n", out_h);
480 break; 485 break;
481 case S_HEX: 486 case S_HEX:
482 str = sym_get_string_value(sym); 487 str = sym_get_string_value(sym);
483 if (str[0] != '0' || (str[1] != 'x' && str[1] != 'X')) { 488 if (str[0] != '0' || (str[1] != 'x' && str[1] != 'X')) {
484 fprintf(out, "CONFIG_%s=%s\n", sym->name, str); 489 fprintf(out, "CONFIG_%s=%s\n", sym->name, str);
485 if (out_h)
486 fprintf(out_h, "#define CONFIG_%s 0x%s\n", sym->name, str);
487 break; 490 break;
488 } 491 }
489 case S_INT: 492 case S_INT:
490 str = sym_get_string_value(sym); 493 str = sym_get_string_value(sym);
491 fprintf(out, "CONFIG_%s=%s\n", sym->name, str); 494 fprintf(out, "CONFIG_%s=%s\n", sym->name, str);
492 if (out_h)
493 fprintf(out_h, "#define CONFIG_%s %s\n", sym->name, str);
494 break; 495 break;
495 } 496 }
496 } 497 }
@@ -510,21 +511,253 @@ int conf_write(const char *name)
510 } 511 }
511 } 512 }
512 fclose(out); 513 fclose(out);
513 if (out_h) { 514
514 fclose(out_h); 515 if (*tmpname) {
515 rename(".tmpconfig.h", "include/linux/autoconf.h"); 516 strcat(dirname, name ? name : conf_get_configname());
517 strcat(dirname, ".old");
518 rename(newname, dirname);
519 if (rename(tmpname, newname))
520 return 1;
516 } 521 }
517 if (!name || basename != conf_def_filename) { 522
518 if (!name) 523 printf(_("#\n"
519 name = conf_def_filename; 524 "# configuration written to %s\n"
520 sprintf(tmpname, "%s.old", name); 525 "#\n"), newname);
521 rename(name, tmpname); 526
527 sym_change_count = 0;
528
529 return 0;
530}
531
532int conf_split_config(void)
533{
534 char *name, path[128];
535 char *s, *d, c;
536 struct symbol *sym;
537 struct stat sb;
538 int res, i, fd;
539
540 name = getenv("KCONFIG_AUTOCONFIG");
541 if (!name)
542 name = "include/config/auto.conf";
543 conf_read_simple(name, S_DEF_AUTO);
544
545 if (chdir("include/config"))
546 return 1;
547
548 res = 0;
549 for_all_symbols(i, sym) {
550 sym_calc_value(sym);
551 if ((sym->flags & SYMBOL_AUTO) || !sym->name)
552 continue;
553 if (sym->flags & SYMBOL_WRITE) {
554 if (sym->flags & SYMBOL_DEF_AUTO) {
555 /*
556 * symbol has old and new value,
557 * so compare them...
558 */
559 switch (sym->type) {
560 case S_BOOLEAN:
561 case S_TRISTATE:
562 if (sym_get_tristate_value(sym) ==
563 sym->def[S_DEF_AUTO].tri)
564 continue;
565 break;
566 case S_STRING:
567 case S_HEX:
568 case S_INT:
569 if (!strcmp(sym_get_string_value(sym),
570 sym->def[S_DEF_AUTO].val))
571 continue;
572 break;
573 default:
574 break;
575 }
576 } else {
577 /*
578 * If there is no old value, only 'no' (unset)
579 * is allowed as new value.
580 */
581 switch (sym->type) {
582 case S_BOOLEAN:
583 case S_TRISTATE:
584 if (sym_get_tristate_value(sym) == no)
585 continue;
586 break;
587 default:
588 break;
589 }
590 }
591 } else if (!(sym->flags & SYMBOL_DEF_AUTO))
592 /* There is neither an old nor a new value. */
593 continue;
594 /* else
595 * There is an old value, but no new value ('no' (unset)
596 * isn't saved in auto.conf, so the old value is always
597 * different from 'no').
598 */
599
600 /* Replace all '_' and append ".h" */
601 s = sym->name;
602 d = path;
603 while ((c = *s++)) {
604 c = tolower(c);
605 *d++ = (c == '_') ? '/' : c;
606 }
607 strcpy(d, ".h");
608
609 /* Assume directory path already exists. */
610 fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644);
611 if (fd == -1) {
612 if (errno != ENOENT) {
613 res = 1;
614 break;
615 }
616 /*
617 * Create directory components,
618 * unless they exist already.
619 */
620 d = path;
621 while ((d = strchr(d, '/'))) {
622 *d = 0;
623 if (stat(path, &sb) && mkdir(path, 0755)) {
624 res = 1;
625 goto out;
626 }
627 *d++ = '/';
628 }
629 /* Try it again. */
630 fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644);
631 if (fd == -1) {
632 res = 1;
633 break;
634 }
635 }
636 close(fd);
522 } 637 }
523 sprintf(tmpname, "%s%s", dirname, basename); 638out:
524 if (rename(newname, tmpname)) 639 if (chdir("../.."))
525 return 1; 640 return 1;
526 641
527 sym_change_count = 0; 642 return res;
643}
644
645int conf_write_autoconf(void)
646{
647 struct symbol *sym;
648 const char *str;
649 char *name;
650 FILE *out, *out_h;
651 time_t now;
652 int i, l;
653
654 sym_clear_all_valid();
655
656 file_write_dep("include/config/auto.conf.cmd");
657
658 if (conf_split_config())
659 return 1;
660
661 out = fopen(".tmpconfig", "w");
662 if (!out)
663 return 1;
664
665 out_h = fopen(".tmpconfig.h", "w");
666 if (!out_h) {
667 fclose(out);
668 return 1;
669 }
670
671 sym = sym_lookup("KERNELVERSION", 0);
672 sym_calc_value(sym);
673 time(&now);
674 fprintf(out, "#\n"
675 "# Automatically generated make config: don't edit\n"
676 "# Linux kernel version: %s\n"
677 "# %s"
678 "#\n",
679 sym_get_string_value(sym), ctime(&now));
680 fprintf(out_h, "/*\n"
681 " * Automatically generated C config: don't edit\n"
682 " * Linux kernel version: %s\n"
683 " * %s"
684 " */\n"
685 "#define AUTOCONF_INCLUDED\n",
686 sym_get_string_value(sym), ctime(&now));
687
688 for_all_symbols(i, sym) {
689 sym_calc_value(sym);
690 if (!(sym->flags & SYMBOL_WRITE) || !sym->name)
691 continue;
692 switch (sym->type) {
693 case S_BOOLEAN:
694 case S_TRISTATE:
695 switch (sym_get_tristate_value(sym)) {
696 case no:
697 break;
698 case mod:
699 fprintf(out, "CONFIG_%s=m\n", sym->name);
700 fprintf(out_h, "#define CONFIG_%s_MODULE 1\n", sym->name);
701 break;
702 case yes:
703 fprintf(out, "CONFIG_%s=y\n", sym->name);
704 fprintf(out_h, "#define CONFIG_%s 1\n", sym->name);
705 break;
706 }
707 break;
708 case S_STRING:
709 str = sym_get_string_value(sym);
710 fprintf(out, "CONFIG_%s=\"", sym->name);
711 fprintf(out_h, "#define CONFIG_%s \"", sym->name);
712 while (1) {
713 l = strcspn(str, "\"\\");
714 if (l) {
715 fwrite(str, l, 1, out);
716 fwrite(str, l, 1, out_h);
717 str += l;
718 }
719 if (!*str)
720 break;
721 fprintf(out, "\\%c", *str);
722 fprintf(out_h, "\\%c", *str);
723 str++;
724 }
725 fputs("\"\n", out);
726 fputs("\"\n", out_h);
727 break;
728 case S_HEX:
729 str = sym_get_string_value(sym);
730 if (str[0] != '0' || (str[1] != 'x' && str[1] != 'X')) {
731 fprintf(out, "CONFIG_%s=%s\n", sym->name, str);
732 fprintf(out_h, "#define CONFIG_%s 0x%s\n", sym->name, str);
733 break;
734 }
735 case S_INT:
736 str = sym_get_string_value(sym);
737 fprintf(out, "CONFIG_%s=%s\n", sym->name, str);
738 fprintf(out_h, "#define CONFIG_%s %s\n", sym->name, str);
739 break;
740 default:
741 break;
742 }
743 }
744 fclose(out);
745 fclose(out_h);
746
747 name = getenv("KCONFIG_AUTOHEADER");
748 if (!name)
749 name = "include/linux/autoconf.h";
750 if (rename(".tmpconfig.h", name))
751 return 1;
752 name = getenv("KCONFIG_AUTOCONFIG");
753 if (!name)
754 name = "include/config/auto.conf";
755 /*
756 * This must be the last step, kbuild has a dependency on auto.conf
757 * and this marks the successful completion of the previous steps.
758 */
759 if (rename(".tmpconfig", name))
760 return 1;
528 761
529 return 0; 762 return 0;
530} 763}
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index 30e4f9d69c..6f98dbfe70 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -145,7 +145,8 @@ static void __expr_eliminate_eq(enum expr_type type, struct expr **ep1, struct e
145 return; 145 return;
146 } 146 }
147 if (e1->type == E_SYMBOL && e2->type == E_SYMBOL && 147 if (e1->type == E_SYMBOL && e2->type == E_SYMBOL &&
148 e1->left.sym == e2->left.sym && (e1->left.sym->flags & (SYMBOL_YES|SYMBOL_NO))) 148 e1->left.sym == e2->left.sym &&
149 (e1->left.sym == &symbol_yes || e1->left.sym == &symbol_no))
149 return; 150 return;
150 if (!expr_eq(e1, e2)) 151 if (!expr_eq(e1, e2))
151 return; 152 return;
@@ -1012,73 +1013,73 @@ int expr_compare_type(enum expr_type t1, enum expr_type t2)
1012#endif 1013#endif
1013} 1014}
1014 1015
1015void expr_print(struct expr *e, void (*fn)(void *, const char *), void *data, int prevtoken) 1016void expr_print(struct expr *e, void (*fn)(void *, struct symbol *, const char *), void *data, int prevtoken)
1016{ 1017{
1017 if (!e) { 1018 if (!e) {
1018 fn(data, "y"); 1019 fn(data, NULL, "y");
1019 return; 1020 return;
1020 } 1021 }
1021 1022
1022 if (expr_compare_type(prevtoken, e->type) > 0) 1023 if (expr_compare_type(prevtoken, e->type) > 0)
1023 fn(data, "("); 1024 fn(data, NULL, "(");
1024 switch (e->type) { 1025 switch (e->type) {
1025 case E_SYMBOL: 1026 case E_SYMBOL:
1026 if (e->left.sym->name) 1027 if (e->left.sym->name)
1027 fn(data, e->left.sym->name); 1028 fn(data, e->left.sym, e->left.sym->name);
1028 else 1029 else
1029 fn(data, "<choice>"); 1030 fn(data, NULL, "<choice>");
1030 break; 1031 break;
1031 case E_NOT: 1032 case E_NOT:
1032 fn(data, "!"); 1033 fn(data, NULL, "!");
1033 expr_print(e->left.expr, fn, data, E_NOT); 1034 expr_print(e->left.expr, fn, data, E_NOT);
1034 break; 1035 break;
1035 case E_EQUAL: 1036 case E_EQUAL:
1036 fn(data, e->left.sym->name); 1037 fn(data, e->left.sym, e->left.sym->name);
1037 fn(data, "="); 1038 fn(data, NULL, "=");
1038 fn(data, e->right.sym->name); 1039 fn(data, e->right.sym, e->right.sym->name);
1039 break; 1040 break;
1040 case E_UNEQUAL: 1041 case E_UNEQUAL:
1041 fn(data, e->left.sym->name); 1042 fn(data, e->left.sym, e->left.sym->name);
1042 fn(data, "!="); 1043 fn(data, NULL, "!=");
1043 fn(data, e->right.sym->name); 1044 fn(data, e->right.sym, e->right.sym->name);
1044 break; 1045 break;
1045 case E_OR: 1046 case E_OR:
1046 expr_print(e->left.expr, fn, data, E_OR); 1047 expr_print(e->left.expr, fn, data, E_OR);
1047 fn(data, " || "); 1048 fn(data, NULL, " || ");
1048 expr_print(e->right.expr, fn, data, E_OR); 1049 expr_print(e->right.expr, fn, data, E_OR);
1049 break; 1050 break;
1050 case E_AND: 1051 case E_AND:
1051 expr_print(e->left.expr, fn, data, E_AND); 1052 expr_print(e->left.expr, fn, data, E_AND);
1052 fn(data, " && "); 1053 fn(data, NULL, " && ");
1053 expr_print(e->right.expr, fn, data, E_AND); 1054 expr_print(e->right.expr, fn, data, E_AND);
1054 break; 1055 break;
1055 case E_CHOICE: 1056 case E_CHOICE:
1056 fn(data, e->right.sym->name); 1057 fn(data, e->right.sym, e->right.sym->name);
1057 if (e->left.expr) { 1058 if (e->left.expr) {
1058 fn(data, " ^ "); 1059 fn(data, NULL, " ^ ");
1059 expr_print(e->left.expr, fn, data, E_CHOICE); 1060 expr_print(e->left.expr, fn, data, E_CHOICE);
1060 } 1061 }
1061 break; 1062 break;
1062 case E_RANGE: 1063 case E_RANGE:
1063 fn(data, "["); 1064 fn(data, NULL, "[");
1064 fn(data, e->left.sym->name); 1065 fn(data, e->left.sym, e->left.sym->name);
1065 fn(data, " "); 1066 fn(data, NULL, " ");
1066 fn(data, e->right.sym->name); 1067 fn(data, e->right.sym, e->right.sym->name);
1067 fn(data, "]"); 1068 fn(data, NULL, "]");
1068 break; 1069 break;
1069 default: 1070 default:
1070 { 1071 {
1071 char buf[32]; 1072 char buf[32];
1072 sprintf(buf, "<unknown type %d>", e->type); 1073 sprintf(buf, "<unknown type %d>", e->type);
1073 fn(data, buf); 1074 fn(data, NULL, buf);
1074 break; 1075 break;
1075 } 1076 }
1076 } 1077 }
1077 if (expr_compare_type(prevtoken, e->type) > 0) 1078 if (expr_compare_type(prevtoken, e->type) > 0)
1078 fn(data, ")"); 1079 fn(data, NULL, ")");
1079} 1080}
1080 1081
1081static void expr_print_file_helper(void *data, const char *str) 1082static void expr_print_file_helper(void *data, struct symbol *sym, const char *str)
1082{ 1083{
1083 fwrite(str, strlen(str), 1, data); 1084 fwrite(str, strlen(str), 1, data);
1084} 1085}
@@ -1088,7 +1089,7 @@ void expr_fprint(struct expr *e, FILE *out)
1088 expr_print(e, expr_print_file_helper, out, E_NONE); 1089 expr_print(e, expr_print_file_helper, out, E_NONE);
1089} 1090}
1090 1091
1091static void expr_print_gstr_helper(void *data, const char *str) 1092static void expr_print_gstr_helper(void *data, struct symbol *sym, const char *str)
1092{ 1093{
1093 str_append((struct gstr*)data, str); 1094 str_append((struct gstr*)data, str);
1094} 1095}
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 1b36ef18c4..6084525f60 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -63,12 +63,18 @@ enum symbol_type {
63 S_UNKNOWN, S_BOOLEAN, S_TRISTATE, S_INT, S_HEX, S_STRING, S_OTHER 63 S_UNKNOWN, S_BOOLEAN, S_TRISTATE, S_INT, S_HEX, S_STRING, S_OTHER
64}; 64};
65 65
66enum {
67 S_DEF_USER, /* main user value */
68 S_DEF_AUTO,
69};
70
66struct symbol { 71struct symbol {
67 struct symbol *next; 72 struct symbol *next;
68 char *name; 73 char *name;
69 char *help; 74 char *help;
70 enum symbol_type type; 75 enum symbol_type type;
71 struct symbol_value curr, user; 76 struct symbol_value curr;
77 struct symbol_value def[4];
72 tristate visible; 78 tristate visible;
73 int flags; 79 int flags;
74 struct property *prop; 80 struct property *prop;
@@ -78,10 +84,7 @@ struct symbol {
78 84
79#define for_all_symbols(i, sym) for (i = 0; i < 257; i++) for (sym = symbol_hash[i]; sym; sym = sym->next) if (sym->type != S_OTHER) 85#define for_all_symbols(i, sym) for (i = 0; i < 257; i++) for (sym = symbol_hash[i]; sym; sym = sym->next) if (sym->type != S_OTHER)
80 86
81#define SYMBOL_YES 0x0001 87#define SYMBOL_CONST 0x0001
82#define SYMBOL_MOD 0x0002
83#define SYMBOL_NO 0x0004
84#define SYMBOL_CONST 0x0007
85#define SYMBOL_CHECK 0x0008 88#define SYMBOL_CHECK 0x0008
86#define SYMBOL_CHOICE 0x0010 89#define SYMBOL_CHOICE 0x0010
87#define SYMBOL_CHOICEVAL 0x0020 90#define SYMBOL_CHOICEVAL 0x0020
@@ -90,10 +93,14 @@ struct symbol {
90#define SYMBOL_OPTIONAL 0x0100 93#define SYMBOL_OPTIONAL 0x0100
91#define SYMBOL_WRITE 0x0200 94#define SYMBOL_WRITE 0x0200
92#define SYMBOL_CHANGED 0x0400 95#define SYMBOL_CHANGED 0x0400
93#define SYMBOL_NEW 0x0800
94#define SYMBOL_AUTO 0x1000 96#define SYMBOL_AUTO 0x1000
95#define SYMBOL_CHECKED 0x2000 97#define SYMBOL_CHECKED 0x2000
96#define SYMBOL_WARNED 0x8000 98#define SYMBOL_WARNED 0x8000
99#define SYMBOL_DEF 0x10000
100#define SYMBOL_DEF_USER 0x10000
101#define SYMBOL_DEF_AUTO 0x20000
102#define SYMBOL_DEF3 0x40000
103#define SYMBOL_DEF4 0x80000
97 104
98#define SYMBOL_MAXLENGTH 256 105#define SYMBOL_MAXLENGTH 256
99#define SYMBOL_HASHSIZE 257 106#define SYMBOL_HASHSIZE 257
@@ -149,6 +156,7 @@ struct file *lookup_file(const char *name);
149 156
150extern struct symbol symbol_yes, symbol_no, symbol_mod; 157extern struct symbol symbol_yes, symbol_no, symbol_mod;
151extern struct symbol *modules_sym; 158extern struct symbol *modules_sym;
159extern struct symbol *sym_defconfig_list;
152extern int cdebug; 160extern int cdebug;
153struct expr *expr_alloc_symbol(struct symbol *sym); 161struct expr *expr_alloc_symbol(struct symbol *sym);
154struct expr *expr_alloc_one(enum expr_type type, struct expr *ce); 162struct expr *expr_alloc_one(enum expr_type type, struct expr *ce);
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index 665bd5300a..7b0d3a93d5 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -114,12 +114,6 @@ const char *dbg_print_flags(int val)
114 114
115 bzero(buf, 256); 115 bzero(buf, 256);
116 116
117 if (val & SYMBOL_YES)
118 strcat(buf, "yes/");
119 if (val & SYMBOL_MOD)
120 strcat(buf, "mod/");
121 if (val & SYMBOL_NO)
122 strcat(buf, "no/");
123 if (val & SYMBOL_CONST) 117 if (val & SYMBOL_CONST)
124 strcat(buf, "const/"); 118 strcat(buf, "const/");
125 if (val & SYMBOL_CHECK) 119 if (val & SYMBOL_CHECK)
@@ -138,8 +132,6 @@ const char *dbg_print_flags(int val)
138 strcat(buf, "write/"); 132 strcat(buf, "write/");
139 if (val & SYMBOL_CHANGED) 133 if (val & SYMBOL_CHANGED)
140 strcat(buf, "changed/"); 134 strcat(buf, "changed/");
141 if (val & SYMBOL_NEW)
142 strcat(buf, "new/");
143 if (val & SYMBOL_AUTO) 135 if (val & SYMBOL_AUTO)
144 strcat(buf, "auto/"); 136 strcat(buf, "auto/");
145 137
@@ -1192,9 +1184,7 @@ static gchar **fill_row(struct menu *menu)
1192 1184
1193 row[COL_OPTION] = 1185 row[COL_OPTION] =
1194 g_strdup_printf("%s %s", menu_get_prompt(menu), 1186 g_strdup_printf("%s %s", menu_get_prompt(menu),
1195 sym ? (sym-> 1187 sym && sym_has_value(sym) ? "(NEW)" : "");
1196 flags & SYMBOL_NEW ? "(NEW)" : "") :
1197 "");
1198 1188
1199 if (show_all && !menu_is_visible(menu)) 1189 if (show_all && !menu_is_visible(menu))
1200 row[COL_COLOR] = g_strdup("DarkGray"); 1190 row[COL_COLOR] = g_strdup("DarkGray");
diff --git a/scripts/kconfig/lex.zconf.c_shipped b/scripts/kconfig/lex.zconf.c_shipped
index 24e3c8cbb7..800f8c71c4 100644
--- a/scripts/kconfig/lex.zconf.c_shipped
+++ b/scripts/kconfig/lex.zconf.c_shipped
@@ -8,7 +8,7 @@
8#define FLEX_SCANNER 8#define FLEX_SCANNER
9#define YY_FLEX_MAJOR_VERSION 2 9#define YY_FLEX_MAJOR_VERSION 2
10#define YY_FLEX_MINOR_VERSION 5 10#define YY_FLEX_MINOR_VERSION 5
11#define YY_FLEX_SUBMINOR_VERSION 31 11#define YY_FLEX_SUBMINOR_VERSION 33
12#if YY_FLEX_SUBMINOR_VERSION > 0 12#if YY_FLEX_SUBMINOR_VERSION > 0
13#define FLEX_BETA 13#define FLEX_BETA
14#endif 14#endif
@@ -30,7 +30,15 @@
30 30
31/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */ 31/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
32 32
33#if defined __STDC_VERSION__ && __STDC_VERSION__ >= 199901L 33#if __STDC_VERSION__ >= 199901L
34
35/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
36 * if you want the limit (max/min) macros for int types.
37 */
38#ifndef __STDC_LIMIT_MACROS
39#define __STDC_LIMIT_MACROS 1
40#endif
41
34#include <inttypes.h> 42#include <inttypes.h>
35typedef int8_t flex_int8_t; 43typedef int8_t flex_int8_t;
36typedef uint8_t flex_uint8_t; 44typedef uint8_t flex_uint8_t;
@@ -134,6 +142,10 @@ typedef unsigned int flex_uint32_t;
134#define YY_BUF_SIZE 16384 142#define YY_BUF_SIZE 16384
135#endif 143#endif
136 144
145/* The state buf must be large enough to hold one state per character in the main buffer.
146 */
147#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type))
148
137#ifndef YY_TYPEDEF_YY_BUFFER_STATE 149#ifndef YY_TYPEDEF_YY_BUFFER_STATE
138#define YY_TYPEDEF_YY_BUFFER_STATE 150#define YY_TYPEDEF_YY_BUFFER_STATE
139typedef struct yy_buffer_state *YY_BUFFER_STATE; 151typedef struct yy_buffer_state *YY_BUFFER_STATE;
@@ -267,7 +279,7 @@ int zconfleng;
267 279
268/* Points to current character in buffer. */ 280/* Points to current character in buffer. */
269static char *yy_c_buf_p = (char *) 0; 281static char *yy_c_buf_p = (char *) 0;
270static int yy_init = 1; /* whether we need to initialize */ 282static int yy_init = 0; /* whether we need to initialize */
271static int yy_start = 0; /* start state number */ 283static int yy_start = 0; /* start state number */
272 284
273/* Flag which is used to allow zconfwrap()'s to do buffer switches 285/* Flag which is used to allow zconfwrap()'s to do buffer switches
@@ -820,6 +832,8 @@ void alloc_string(const char *str, int size)
820#define YY_EXTRA_TYPE void * 832#define YY_EXTRA_TYPE void *
821#endif 833#endif
822 834
835static int yy_init_globals (void );
836
823/* Macros after this point can all be overridden by user definitions in 837/* Macros after this point can all be overridden by user definitions in
824 * section 1. 838 * section 1.
825 */ 839 */
@@ -942,9 +956,9 @@ YY_DECL
942 int str = 0; 956 int str = 0;
943 int ts, i; 957 int ts, i;
944 958
945 if ( (yy_init) ) 959 if ( !(yy_init) )
946 { 960 {
947 (yy_init) = 0; 961 (yy_init) = 1;
948 962
949#ifdef YY_USER_INIT 963#ifdef YY_USER_INIT
950 YY_USER_INIT; 964 YY_USER_INIT;
@@ -1452,7 +1466,7 @@ static int yy_get_next_buffer (void)
1452 1466
1453 else 1467 else
1454 { 1468 {
1455 size_t num_to_read = 1469 int num_to_read =
1456 YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1; 1470 YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
1457 1471
1458 while ( num_to_read <= 0 ) 1472 while ( num_to_read <= 0 )
@@ -1969,16 +1983,16 @@ YY_BUFFER_STATE zconf_scan_buffer (char * base, yy_size_t size )
1969 1983
1970/** Setup the input buffer state to scan a string. The next call to zconflex() will 1984/** Setup the input buffer state to scan a string. The next call to zconflex() will
1971 * scan from a @e copy of @a str. 1985 * scan from a @e copy of @a str.
1972 * @param yy_str a NUL-terminated string to scan 1986 * @param yystr a NUL-terminated string to scan
1973 * 1987 *
1974 * @return the newly allocated buffer state object. 1988 * @return the newly allocated buffer state object.
1975 * @note If you want to scan bytes that may contain NUL values, then use 1989 * @note If you want to scan bytes that may contain NUL values, then use
1976 * zconf_scan_bytes() instead. 1990 * zconf_scan_bytes() instead.
1977 */ 1991 */
1978YY_BUFFER_STATE zconf_scan_string (yyconst char * yy_str ) 1992YY_BUFFER_STATE zconf_scan_string (yyconst char * yystr )
1979{ 1993{
1980 1994
1981 return zconf_scan_bytes(yy_str,strlen(yy_str) ); 1995 return zconf_scan_bytes(yystr,strlen(yystr) );
1982} 1996}
1983 1997
1984/** Setup the input buffer state to scan the given bytes. The next call to zconflex() will 1998/** Setup the input buffer state to scan the given bytes. The next call to zconflex() will
@@ -1988,7 +2002,7 @@ YY_BUFFER_STATE zconf_scan_string (yyconst char * yy_str )
1988 * 2002 *
1989 * @return the newly allocated buffer state object. 2003 * @return the newly allocated buffer state object.
1990 */ 2004 */
1991YY_BUFFER_STATE zconf_scan_bytes (yyconst char * bytes, int len ) 2005YY_BUFFER_STATE zconf_scan_bytes (yyconst char * yybytes, int _yybytes_len )
1992{ 2006{
1993 YY_BUFFER_STATE b; 2007 YY_BUFFER_STATE b;
1994 char *buf; 2008 char *buf;
@@ -1996,15 +2010,15 @@ YY_BUFFER_STATE zconf_scan_bytes (yyconst char * bytes, int len )
1996 int i; 2010 int i;
1997 2011
1998 /* Get memory for full buffer, including space for trailing EOB's. */ 2012 /* Get memory for full buffer, including space for trailing EOB's. */
1999 n = len + 2; 2013 n = _yybytes_len + 2;
2000 buf = (char *) zconfalloc(n ); 2014 buf = (char *) zconfalloc(n );
2001 if ( ! buf ) 2015 if ( ! buf )
2002 YY_FATAL_ERROR( "out of dynamic memory in zconf_scan_bytes()" ); 2016 YY_FATAL_ERROR( "out of dynamic memory in zconf_scan_bytes()" );
2003 2017
2004 for ( i = 0; i < len; ++i ) 2018 for ( i = 0; i < _yybytes_len; ++i )
2005 buf[i] = bytes[i]; 2019 buf[i] = yybytes[i];
2006 2020
2007 buf[len] = buf[len+1] = YY_END_OF_BUFFER_CHAR; 2021 buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR;
2008 2022
2009 b = zconf_scan_buffer(buf,n ); 2023 b = zconf_scan_buffer(buf,n );
2010 if ( ! b ) 2024 if ( ! b )
@@ -2125,6 +2139,34 @@ void zconfset_debug (int bdebug )
2125 zconf_flex_debug = bdebug ; 2139 zconf_flex_debug = bdebug ;
2126} 2140}
2127 2141
2142static int yy_init_globals (void)
2143{
2144 /* Initialization is the same as for the non-reentrant scanner.
2145 * This function is called from zconflex_destroy(), so don't allocate here.
2146 */
2147
2148 (yy_buffer_stack) = 0;
2149 (yy_buffer_stack_top) = 0;
2150 (yy_buffer_stack_max) = 0;
2151 (yy_c_buf_p) = (char *) 0;
2152 (yy_init) = 0;
2153 (yy_start) = 0;
2154
2155/* Defined in main.c */
2156#ifdef YY_STDINIT
2157 zconfin = stdin;
2158 zconfout = stdout;
2159#else
2160 zconfin = (FILE *) 0;
2161 zconfout = (FILE *) 0;
2162#endif
2163
2164 /* For future reference: Set errno on error, since we are called by
2165 * zconflex_init()
2166 */
2167 return 0;
2168}
2169
2128/* zconflex_destroy is for both reentrant and non-reentrant scanners. */ 2170/* zconflex_destroy is for both reentrant and non-reentrant scanners. */
2129int zconflex_destroy (void) 2171int zconflex_destroy (void)
2130{ 2172{
@@ -2140,6 +2182,10 @@ int zconflex_destroy (void)
2140 zconffree((yy_buffer_stack) ); 2182 zconffree((yy_buffer_stack) );
2141 (yy_buffer_stack) = NULL; 2183 (yy_buffer_stack) = NULL;
2142 2184
2185 /* Reset the globals. This is important in a non-reentrant scanner so the next time
2186 * zconflex() is called, initialization will occur. */
2187 yy_init_globals( );
2188
2143 return 0; 2189 return 0;
2144} 2190}
2145 2191
@@ -2151,7 +2197,7 @@ int zconflex_destroy (void)
2151static void yy_flex_strncpy (char* s1, yyconst char * s2, int n ) 2197static void yy_flex_strncpy (char* s1, yyconst char * s2, int n )
2152{ 2198{
2153 register int i; 2199 register int i;
2154 for ( i = 0; i < n; ++i ) 2200 for ( i = 0; i < n; ++i )
2155 s1[i] = s2[i]; 2201 s1[i] = s2[i];
2156} 2202}
2157#endif 2203#endif
@@ -2160,7 +2206,7 @@ static void yy_flex_strncpy (char* s1, yyconst char * s2, int n )
2160static int yy_flex_strlen (yyconst char * s ) 2206static int yy_flex_strlen (yyconst char * s )
2161{ 2207{
2162 register int n; 2208 register int n;
2163 for ( n = 0; s[n]; ++n ) 2209 for ( n = 0; s[n]; ++n )
2164 ; 2210 ;
2165 2211
2166 return n; 2212 return n;
@@ -2191,19 +2237,6 @@ void zconffree (void * ptr )
2191 2237
2192#define YYTABLES_NAME "yytables" 2238#define YYTABLES_NAME "yytables"
2193 2239
2194#undef YY_NEW_FILE
2195#undef YY_FLUSH_BUFFER
2196#undef yy_set_bol
2197#undef yy_new_buffer
2198#undef yy_set_interactive
2199#undef yytext_ptr
2200#undef YY_DO_BEFORE_ACTION
2201
2202#ifdef YY_DECL_IS_OURS
2203#undef YY_DECL_IS_OURS
2204#undef YY_DECL
2205#endif
2206
2207void zconf_starthelp(void) 2240void zconf_starthelp(void)
2208{ 2241{
2209 new_string(); 2242 new_string();
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index 527f60c99c..2628023a1f 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -40,6 +40,10 @@ extern "C" {
40 40
41#define TF_COMMAND 0x0001 41#define TF_COMMAND 0x0001
42#define TF_PARAM 0x0002 42#define TF_PARAM 0x0002
43#define TF_OPTION 0x0004
44
45#define T_OPT_MODULES 1
46#define T_OPT_DEFCONFIG_LIST 2
43 47
44struct kconf_id { 48struct kconf_id {
45 int name; 49 int name;
@@ -60,8 +64,6 @@ int zconf_lineno(void);
60char *zconf_curname(void); 64char *zconf_curname(void);
61 65
62/* confdata.c */ 66/* confdata.c */
63extern const char conf_def_filename[];
64
65char *conf_get_default_confname(void); 67char *conf_get_default_confname(void);
66 68
67/* kconfig_load.c */ 69/* kconfig_load.c */
@@ -78,6 +80,7 @@ struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *e
78struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep); 80struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep);
79void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep); 81void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep);
80void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep); 82void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep);
83void menu_add_option(int token, char *arg);
81void menu_finalize(struct menu *parent); 84void menu_finalize(struct menu *parent);
82void menu_set_type(int type); 85void menu_set_type(int type);
83 86
@@ -99,6 +102,7 @@ const char *str_get(struct gstr *gs);
99/* symbol.c */ 102/* symbol.c */
100void sym_init(void); 103void sym_init(void);
101void sym_clear_all_valid(void); 104void sym_clear_all_valid(void);
105void sym_set_all_changed(void);
102void sym_set_changed(struct symbol *sym); 106void sym_set_changed(struct symbol *sym);
103struct symbol *sym_check_deps(struct symbol *sym); 107struct symbol *sym_check_deps(struct symbol *sym);
104struct property *prop_alloc(enum prop_type type, struct symbol *sym); 108struct property *prop_alloc(enum prop_type type, struct symbol *sym);
@@ -137,7 +141,7 @@ static inline bool sym_is_optional(struct symbol *sym)
137 141
138static inline bool sym_has_value(struct symbol *sym) 142static inline bool sym_has_value(struct symbol *sym)
139{ 143{
140 return sym->flags & SYMBOL_NEW ? false : true; 144 return sym->flags & SYMBOL_DEF_USER ? true : false;
141} 145}
142 146
143#ifdef __cplusplus 147#ifdef __cplusplus
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h
index b6a389c5fc..a263746cfa 100644
--- a/scripts/kconfig/lkc_proto.h
+++ b/scripts/kconfig/lkc_proto.h
@@ -2,8 +2,9 @@
2/* confdata.c */ 2/* confdata.c */
3P(conf_parse,void,(const char *name)); 3P(conf_parse,void,(const char *name));
4P(conf_read,int,(const char *name)); 4P(conf_read,int,(const char *name));
5P(conf_read_simple,int,(const char *name)); 5P(conf_read_simple,int,(const char *name, int));
6P(conf_write,int,(const char *name)); 6P(conf_write,int,(const char *name));
7P(conf_write_autoconf,int,(void));
7 8
8/* menu.c */ 9/* menu.c */
9P(rootmenu,struct menu,); 10P(rootmenu,struct menu,);
@@ -38,4 +39,4 @@ P(prop_get_type_name,const char *,(enum prop_type type));
38 39
39/* expr.c */ 40/* expr.c */
40P(expr_compare_type,int,(enum expr_type t1, enum expr_type t2)); 41P(expr_compare_type,int,(enum expr_type t1, enum expr_type t2));
41P(expr_print,void,(struct expr *e, void (*fn)(void *, const char *), void *data, int prevtoken)); 42P(expr_print,void,(struct expr *e, void (*fn)(void *, struct symbol *, const char *), void *data, int prevtoken));
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 0fce20cb7f..c86c27f2c7 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -114,7 +114,7 @@ void menu_set_type(int type)
114 sym->type = type; 114 sym->type = type;
115 return; 115 return;
116 } 116 }
117 menu_warn(current_entry, "type of '%s' redefined from '%s' to '%s'\n", 117 menu_warn(current_entry, "type of '%s' redefined from '%s' to '%s'",
118 sym->name ? sym->name : "<choice>", 118 sym->name ? sym->name : "<choice>",
119 sym_type_name(sym->type), sym_type_name(type)); 119 sym_type_name(sym->type), sym_type_name(type));
120} 120}
@@ -124,15 +124,20 @@ struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *e
124 struct property *prop = prop_alloc(type, current_entry->sym); 124 struct property *prop = prop_alloc(type, current_entry->sym);
125 125
126 prop->menu = current_entry; 126 prop->menu = current_entry;
127 prop->text = prompt;
128 prop->expr = expr; 127 prop->expr = expr;
129 prop->visible.expr = menu_check_dep(dep); 128 prop->visible.expr = menu_check_dep(dep);
130 129
131 if (prompt) { 130 if (prompt) {
131 if (isspace(*prompt)) {
132 prop_warn(prop, "leading whitespace ignored");
133 while (isspace(*prompt))
134 prompt++;
135 }
132 if (current_entry->prompt) 136 if (current_entry->prompt)
133 menu_warn(current_entry, "prompt redefined\n"); 137 prop_warn(prop, "prompt redefined");
134 current_entry->prompt = prop; 138 current_entry->prompt = prop;
135 } 139 }
140 prop->text = prompt;
136 141
137 return prop; 142 return prop;
138} 143}
@@ -152,6 +157,24 @@ void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep)
152 menu_add_prop(type, NULL, expr_alloc_symbol(sym), dep); 157 menu_add_prop(type, NULL, expr_alloc_symbol(sym), dep);
153} 158}
154 159
160void menu_add_option(int token, char *arg)
161{
162 struct property *prop;
163
164 switch (token) {
165 case T_OPT_MODULES:
166 prop = prop_alloc(P_DEFAULT, modules_sym);
167 prop->expr = expr_alloc_symbol(current_entry->sym);
168 break;
169 case T_OPT_DEFCONFIG_LIST:
170 if (!sym_defconfig_list)
171 sym_defconfig_list = current_entry->sym;
172 else if (sym_defconfig_list != current_entry->sym)
173 zconf_error("trying to redefine defconfig symbol");
174 break;
175 }
176}
177
155static int menu_range_valid_sym(struct symbol *sym, struct symbol *sym2) 178static int menu_range_valid_sym(struct symbol *sym, struct symbol *sym2)
156{ 179{
157 return sym2->type == S_INT || sym2->type == S_HEX || 180 return sym2->type == S_INT || sym2->type == S_HEX ||
@@ -325,11 +348,10 @@ void menu_finalize(struct menu *parent)
325 348
326 if (sym && !(sym->flags & SYMBOL_WARNED)) { 349 if (sym && !(sym->flags & SYMBOL_WARNED)) {
327 if (sym->type == S_UNKNOWN) 350 if (sym->type == S_UNKNOWN)
328 menu_warn(parent, "config symbol defined " 351 menu_warn(parent, "config symbol defined without type");
329 "without type\n");
330 352
331 if (sym_is_choice(sym) && !parent->prompt) 353 if (sym_is_choice(sym) && !parent->prompt)
332 menu_warn(parent, "choice must have a prompt\n"); 354 menu_warn(parent, "choice must have a prompt");
333 355
334 /* Check properties connected to this symbol */ 356 /* Check properties connected to this symbol */
335 sym_check_prop(sym); 357 sym_check_prop(sym);
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index 4590cd3162..393f3749f3 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -6,16 +6,20 @@
6#include <qapplication.h> 6#include <qapplication.h>
7#include <qmainwindow.h> 7#include <qmainwindow.h>
8#include <qtoolbar.h> 8#include <qtoolbar.h>
9#include <qlayout.h>
9#include <qvbox.h> 10#include <qvbox.h>
10#include <qsplitter.h> 11#include <qsplitter.h>
11#include <qlistview.h> 12#include <qlistview.h>
12#include <qtextview.h> 13#include <qtextbrowser.h>
13#include <qlineedit.h> 14#include <qlineedit.h>
15#include <qlabel.h>
16#include <qpushbutton.h>
14#include <qmenubar.h> 17#include <qmenubar.h>
15#include <qmessagebox.h> 18#include <qmessagebox.h>
16#include <qaction.h> 19#include <qaction.h>
17#include <qheader.h> 20#include <qheader.h>
18#include <qfiledialog.h> 21#include <qfiledialog.h>
22#include <qdragobject.h>
19#include <qregexp.h> 23#include <qregexp.h>
20 24
21#include <stdlib.h> 25#include <stdlib.h>
@@ -32,32 +36,16 @@
32#endif 36#endif
33 37
34static QApplication *configApp; 38static QApplication *configApp;
39static ConfigSettings *configSettings;
35 40
36static inline QString qgettext(const char* str) 41static inline QString qgettext(const char* str)
37{ 42{
38 return QString::fromLocal8Bit(gettext(str)); 43 return QString::fromLocal8Bit(gettext(str));
39} 44}
40 45
41static inline QString qgettext(const QString& str) 46static inline QString qgettext(const QString& str)
42{ 47{
43 return QString::fromLocal8Bit(gettext(str.latin1())); 48 return QString::fromLocal8Bit(gettext(str.latin1()));
44}
45
46ConfigSettings::ConfigSettings()
47 : showAll(false), showName(false), showRange(false), showData(false)
48{
49}
50
51#if QT_VERSION >= 300
52/**
53 * Reads the list column settings from the application settings.
54 */
55void ConfigSettings::readListSettings()
56{
57 showAll = readBoolEntry("/kconfig/qconf/showAll", false);
58 showName = readBoolEntry("/kconfig/qconf/showName", false);
59 showRange = readBoolEntry("/kconfig/qconf/showRange", false);
60 showData = readBoolEntry("/kconfig/qconf/showData", false);
61} 49}
62 50
63/** 51/**
@@ -88,76 +76,7 @@ bool ConfigSettings::writeSizes(const QString& key, const QValueList<int>& value
88 stringList.push_back(QString::number(*it)); 76 stringList.push_back(QString::number(*it));
89 return writeEntry(key, stringList); 77 return writeEntry(key, stringList);
90} 78}
91#endif
92
93
94/*
95 * update all the children of a menu entry
96 * removes/adds the entries from the parent widget as necessary
97 *
98 * parent: either the menu list widget or a menu entry widget
99 * menu: entry to be updated
100 */
101template <class P>
102void ConfigList::updateMenuList(P* parent, struct menu* menu)
103{
104 struct menu* child;
105 ConfigItem* item;
106 ConfigItem* last;
107 bool visible;
108 enum prop_type type;
109 79
110 if (!menu) {
111 while ((item = parent->firstChild()))
112 delete item;
113 return;
114 }
115
116 last = parent->firstChild();
117 if (last && !last->goParent)
118 last = 0;
119 for (child = menu->list; child; child = child->next) {
120 item = last ? last->nextSibling() : parent->firstChild();
121 type = child->prompt ? child->prompt->type : P_UNKNOWN;
122
123 switch (mode) {
124 case menuMode:
125 if (!(child->flags & MENU_ROOT))
126 goto hide;
127 break;
128 case symbolMode:
129 if (child->flags & MENU_ROOT)
130 goto hide;
131 break;
132 default:
133 break;
134 }
135
136 visible = menu_is_visible(child);
137 if (showAll || visible) {
138 if (!item || item->menu != child)
139 item = new ConfigItem(parent, last, child, visible);
140 else
141 item->testUpdateMenu(visible);
142
143 if (mode == fullMode || mode == menuMode || type != P_MENU)
144 updateMenuList(item, child);
145 else
146 updateMenuList(item, 0);
147 last = item;
148 continue;
149 }
150 hide:
151 if (item && item->menu == child) {
152 last = parent->firstChild();
153 if (last == item)
154 last = 0;
155 else while (last->nextSibling() != item)
156 last = last->nextSibling();
157 delete item;
158 }
159 }
160}
161 80
162#if QT_VERSION >= 300 81#if QT_VERSION >= 300
163/* 82/*
@@ -355,6 +274,12 @@ ConfigItem::~ConfigItem(void)
355 } 274 }
356} 275}
357 276
277ConfigLineEdit::ConfigLineEdit(ConfigView* parent)
278 : Parent(parent)
279{
280 connect(this, SIGNAL(lostFocus()), SLOT(hide()));
281}
282
358void ConfigLineEdit::show(ConfigItem* i) 283void ConfigLineEdit::show(ConfigItem* i)
359{ 284{
360 item = i; 285 item = i;
@@ -385,14 +310,14 @@ void ConfigLineEdit::keyPressEvent(QKeyEvent* e)
385 hide(); 310 hide();
386} 311}
387 312
388ConfigList::ConfigList(ConfigView* p, ConfigMainWindow* cv, ConfigSettings* configSettings) 313ConfigList::ConfigList(ConfigView* p, const char *name)
389 : Parent(p), cview(cv), 314 : Parent(p, name),
390 updateAll(false), 315 updateAll(false),
391 symbolYesPix(xpm_symbol_yes), symbolModPix(xpm_symbol_mod), symbolNoPix(xpm_symbol_no), 316 symbolYesPix(xpm_symbol_yes), symbolModPix(xpm_symbol_mod), symbolNoPix(xpm_symbol_no),
392 choiceYesPix(xpm_choice_yes), choiceNoPix(xpm_choice_no), 317 choiceYesPix(xpm_choice_yes), choiceNoPix(xpm_choice_no),
393 menuPix(xpm_menu), menuInvPix(xpm_menu_inv), menuBackPix(xpm_menuback), voidPix(xpm_void), 318 menuPix(xpm_menu), menuInvPix(xpm_menu_inv), menuBackPix(xpm_menuback), voidPix(xpm_void),
394 showAll(false), showName(false), showRange(false), showData(false), 319 showAll(false), showName(false), showRange(false), showData(false),
395 rootEntry(0) 320 rootEntry(0), headerPopup(0)
396{ 321{
397 int i; 322 int i;
398 323
@@ -406,11 +331,14 @@ ConfigList::ConfigList(ConfigView* p, ConfigMainWindow* cv, ConfigSettings* conf
406 connect(this, SIGNAL(selectionChanged(void)), 331 connect(this, SIGNAL(selectionChanged(void)),
407 SLOT(updateSelection(void))); 332 SLOT(updateSelection(void)));
408 333
409 if (configSettings) { 334 if (name) {
410 showAll = configSettings->showAll; 335 configSettings->beginGroup(name);
411 showName = configSettings->showName; 336 showAll = configSettings->readBoolEntry("/showAll", false);
412 showRange = configSettings->showRange; 337 showName = configSettings->readBoolEntry("/showName", false);
413 showData = configSettings->showData; 338 showRange = configSettings->readBoolEntry("/showRange", false);
339 showData = configSettings->readBoolEntry("/showData", false);
340 configSettings->endGroup();
341 connect(configApp, SIGNAL(aboutToQuit()), SLOT(saveSettings()));
414 } 342 }
415 343
416 for (i = 0; i < colNr; i++) 344 for (i = 0; i < colNr; i++)
@@ -441,6 +369,30 @@ void ConfigList::reinit(void)
441 updateListAll(); 369 updateListAll();
442} 370}
443 371
372void ConfigList::saveSettings(void)
373{
374 if (name()) {
375 configSettings->beginGroup(name());
376 configSettings->writeEntry("/showName", showName);
377 configSettings->writeEntry("/showRange", showRange);
378 configSettings->writeEntry("/showData", showData);
379 configSettings->writeEntry("/showAll", showAll);
380 configSettings->endGroup();
381 }
382}
383
384ConfigItem* ConfigList::findConfigItem(struct menu *menu)
385{
386 ConfigItem* item = (ConfigItem*)menu->data;
387
388 for (; item; item = item->nextItem) {
389 if (this == item->listView())
390 break;
391 }
392
393 return item;
394}
395
444void ConfigList::updateSelection(void) 396void ConfigList::updateSelection(void)
445{ 397{
446 struct menu *menu; 398 struct menu *menu;
@@ -450,9 +402,8 @@ void ConfigList::updateSelection(void)
450 if (!item) 402 if (!item)
451 return; 403 return;
452 404
453 cview->setHelp(item);
454
455 menu = item->menu; 405 menu = item->menu;
406 emit menuChanged(menu);
456 if (!menu) 407 if (!menu)
457 return; 408 return;
458 type = menu->prompt ? menu->prompt->type : P_UNKNOWN; 409 type = menu->prompt ? menu->prompt->type : P_UNKNOWN;
@@ -464,8 +415,20 @@ void ConfigList::updateList(ConfigItem* item)
464{ 415{
465 ConfigItem* last = 0; 416 ConfigItem* last = 0;
466 417
467 if (!rootEntry) 418 if (!rootEntry) {
468 goto update; 419 if (mode != listMode)
420 goto update;
421 QListViewItemIterator it(this);
422 ConfigItem* item;
423
424 for (; it.current(); ++it) {
425 item = (ConfigItem*)it.current();
426 if (!item->menu)
427 continue;
428 item->testUpdateMenu(menu_is_visible(item->menu));
429 }
430 return;
431 }
469 432
470 if (rootEntry != &rootmenu && (mode == singleMode || 433 if (rootEntry != &rootmenu && (mode == singleMode ||
471 (mode == symbolMode && rootEntry->parent != &rootmenu))) { 434 (mode == symbolMode && rootEntry->parent != &rootmenu))) {
@@ -491,14 +454,6 @@ update:
491 triggerUpdate(); 454 triggerUpdate();
492} 455}
493 456
494void ConfigList::setAllOpen(bool open)
495{
496 QListViewItemIterator it(this);
497
498 for (; it.current(); it++)
499 it.current()->setOpen(open);
500}
501
502void ConfigList::setValue(ConfigItem* item, tristate val) 457void ConfigList::setValue(ConfigItem* item, tristate val)
503{ 458{
504 struct symbol* sym; 459 struct symbol* sym;
@@ -581,6 +536,7 @@ void ConfigList::setRootMenu(struct menu *menu)
581 rootEntry = menu; 536 rootEntry = menu;
582 updateListAll(); 537 updateListAll();
583 setSelected(currentItem(), hasFocus()); 538 setSelected(currentItem(), hasFocus());
539 ensureItemVisible(currentItem());
584} 540}
585 541
586void ConfigList::setParentMenu(void) 542void ConfigList::setParentMenu(void)
@@ -603,6 +559,74 @@ void ConfigList::setParentMenu(void)
603 } 559 }
604} 560}
605 561
562/*
563 * update all the children of a menu entry
564 * removes/adds the entries from the parent widget as necessary
565 *
566 * parent: either the menu list widget or a menu entry widget
567 * menu: entry to be updated
568 */
569template <class P>
570void ConfigList::updateMenuList(P* parent, struct menu* menu)
571{
572 struct menu* child;
573 ConfigItem* item;
574 ConfigItem* last;
575 bool visible;
576 enum prop_type type;
577
578 if (!menu) {
579 while ((item = parent->firstChild()))
580 delete item;
581 return;
582 }
583
584 last = parent->firstChild();
585 if (last && !last->goParent)
586 last = 0;
587 for (child = menu->list; child; child = child->next) {
588 item = last ? last->nextSibling() : parent->firstChild();
589 type = child->prompt ? child->prompt->type : P_UNKNOWN;
590
591 switch (mode) {
592 case menuMode:
593 if (!(child->flags & MENU_ROOT))
594 goto hide;
595 break;
596 case symbolMode:
597 if (child->flags & MENU_ROOT)
598 goto hide;
599 break;
600 default:
601 break;
602 }
603
604 visible = menu_is_visible(child);
605 if (showAll || visible) {
606 if (!item || item->menu != child)
607 item = new ConfigItem(parent, last, child, visible);
608 else
609 item->testUpdateMenu(visible);
610
611 if (mode == fullMode || mode == menuMode || type != P_MENU)
612 updateMenuList(item, child);
613 else
614 updateMenuList(item, 0);
615 last = item;
616 continue;
617 }
618 hide:
619 if (item && item->menu == child) {
620 last = parent->firstChild();
621 if (last == item)
622 last = 0;
623 else while (last->nextSibling() != item)
624 last = last->nextSibling();
625 delete item;
626 }
627 }
628}
629
606void ConfigList::keyPressEvent(QKeyEvent* ev) 630void ConfigList::keyPressEvent(QKeyEvent* ev)
607{ 631{
608 QListViewItem* i = currentItem(); 632 QListViewItem* i = currentItem();
@@ -610,7 +634,7 @@ void ConfigList::keyPressEvent(QKeyEvent* ev)
610 struct menu *menu; 634 struct menu *menu;
611 enum prop_type type; 635 enum prop_type type;
612 636
613 if (ev->key() == Key_Escape && mode != fullMode) { 637 if (ev->key() == Key_Escape && mode != fullMode && mode != listMode) {
614 emit parentSelected(); 638 emit parentSelected();
615 ev->accept(); 639 ev->accept();
616 return; 640 return;
@@ -755,23 +779,62 @@ skip:
755 779
756void ConfigList::focusInEvent(QFocusEvent *e) 780void ConfigList::focusInEvent(QFocusEvent *e)
757{ 781{
782 struct menu *menu = NULL;
783
758 Parent::focusInEvent(e); 784 Parent::focusInEvent(e);
759 785
760 QListViewItem* item = currentItem(); 786 ConfigItem* item = (ConfigItem *)currentItem();
761 if (!item) 787 if (item) {
762 return; 788 setSelected(item, TRUE);
789 menu = item->menu;
790 }
791 emit gotFocus(menu);
792}
763 793
764 setSelected(item, TRUE); 794void ConfigList::contextMenuEvent(QContextMenuEvent *e)
765 emit gotFocus(); 795{
796 if (e->y() <= header()->geometry().bottom()) {
797 if (!headerPopup) {
798 QAction *action;
799
800 headerPopup = new QPopupMenu(this);
801 action = new QAction("Show Name", 0, this);
802 action->setToggleAction(TRUE);
803 connect(action, SIGNAL(toggled(bool)),
804 parent(), SLOT(setShowName(bool)));
805 connect(parent(), SIGNAL(showNameChanged(bool)),
806 action, SLOT(setOn(bool)));
807 action->setOn(showName);
808 action->addTo(headerPopup);
809 action = new QAction("Show Range", 0, this);
810 action->setToggleAction(TRUE);
811 connect(action, SIGNAL(toggled(bool)),
812 parent(), SLOT(setShowRange(bool)));
813 connect(parent(), SIGNAL(showRangeChanged(bool)),
814 action, SLOT(setOn(bool)));
815 action->setOn(showRange);
816 action->addTo(headerPopup);
817 action = new QAction("Show Data", 0, this);
818 action->setToggleAction(TRUE);
819 connect(action, SIGNAL(toggled(bool)),
820 parent(), SLOT(setShowData(bool)));
821 connect(parent(), SIGNAL(showDataChanged(bool)),
822 action, SLOT(setOn(bool)));
823 action->setOn(showData);
824 action->addTo(headerPopup);
825 }
826 headerPopup->exec(e->globalPos());
827 e->accept();
828 } else
829 e->ignore();
766} 830}
767 831
768ConfigView* ConfigView::viewList; 832ConfigView* ConfigView::viewList;
769 833
770ConfigView::ConfigView(QWidget* parent, ConfigMainWindow* cview, 834ConfigView::ConfigView(QWidget* parent, const char *name)
771 ConfigSettings *configSettings) 835 : Parent(parent, name)
772 : Parent(parent)
773{ 836{
774 list = new ConfigList(this, cview, configSettings); 837 list = new ConfigList(this, name);
775 lineEdit = new ConfigLineEdit(this); 838 lineEdit = new ConfigLineEdit(this);
776 lineEdit->hide(); 839 lineEdit->hide();
777 840
@@ -791,6 +854,50 @@ ConfigView::~ConfigView(void)
791 } 854 }
792} 855}
793 856
857void ConfigView::setShowAll(bool b)
858{
859 if (list->showAll != b) {
860 list->showAll = b;
861 list->updateListAll();
862 emit showAllChanged(b);
863 }
864}
865
866void ConfigView::setShowName(bool b)
867{
868 if (list->showName != b) {
869 list->showName = b;
870 list->reinit();
871 emit showNameChanged(b);
872 }
873}
874
875void ConfigView::setShowRange(bool b)
876{
877 if (list->showRange != b) {
878 list->showRange = b;
879 list->reinit();
880 emit showRangeChanged(b);
881 }
882}
883
884void ConfigView::setShowData(bool b)
885{
886 if (list->showData != b) {
887 list->showData = b;
888 list->reinit();
889 emit showDataChanged(b);
890 }
891}
892
893void ConfigList::setAllOpen(bool open)
894{
895 QListViewItemIterator it(this);
896
897 for (; it.current(); it++)
898 it.current()->setOpen(open);
899}
900
794void ConfigView::updateList(ConfigItem* item) 901void ConfigView::updateList(ConfigItem* item)
795{ 902{
796 ConfigView* v; 903 ConfigView* v;
@@ -807,6 +914,347 @@ void ConfigView::updateListAll(void)
807 v->list->updateListAll(); 914 v->list->updateListAll();
808} 915}
809 916
917ConfigInfoView::ConfigInfoView(QWidget* parent, const char *name)
918 : Parent(parent, name), menu(0)
919{
920 if (name) {
921 configSettings->beginGroup(name);
922 _showDebug = configSettings->readBoolEntry("/showDebug", false);
923 configSettings->endGroup();
924 connect(configApp, SIGNAL(aboutToQuit()), SLOT(saveSettings()));
925 }
926}
927
928void ConfigInfoView::saveSettings(void)
929{
930 if (name()) {
931 configSettings->beginGroup(name());
932 configSettings->writeEntry("/showDebug", showDebug());
933 configSettings->endGroup();
934 }
935}
936
937void ConfigInfoView::setShowDebug(bool b)
938{
939 if (_showDebug != b) {
940 _showDebug = b;
941 if (menu)
942 menuInfo();
943 else if (sym)
944 symbolInfo();
945 emit showDebugChanged(b);
946 }
947}
948
949void ConfigInfoView::setInfo(struct menu *m)
950{
951 if (menu == m)
952 return;
953 menu = m;
954 if (!menu)
955 clear();
956 else
957 menuInfo();
958}
959
960void ConfigInfoView::setSource(const QString& name)
961{
962 const char *p = name.latin1();
963
964 menu = NULL;
965 sym = NULL;
966
967 switch (p[0]) {
968 case 'm':
969 struct menu *m;
970
971 if (sscanf(p, "m%p", &m) == 1 && menu != m) {
972 menu = m;
973 menuInfo();
974 emit menuSelected(menu);
975 }
976 break;
977 case 's':
978 struct symbol *s;
979
980 if (sscanf(p, "s%p", &s) == 1 && sym != s) {
981 sym = s;
982 symbolInfo();
983 }
984 break;
985 }
986}
987
988void ConfigInfoView::symbolInfo(void)
989{
990 QString str;
991
992 str += "<big>Symbol: <b>";
993 str += print_filter(sym->name);
994 str += "</b></big><br><br>value: ";
995 str += print_filter(sym_get_string_value(sym));
996 str += "<br>visibility: ";
997 str += sym->visible == yes ? "y" : sym->visible == mod ? "m" : "n";
998 str += "<br>";
999 str += debug_info(sym);
1000
1001 setText(str);
1002}
1003
1004void ConfigInfoView::menuInfo(void)
1005{
1006 struct symbol* sym;
1007 QString head, debug, help;
1008
1009 sym = menu->sym;
1010 if (sym) {
1011 if (menu->prompt) {
1012 head += "<big><b>";
1013 head += print_filter(_(menu->prompt->text));
1014 head += "</b></big>";
1015 if (sym->name) {
1016 head += " (";
1017 if (showDebug())
1018 head += QString().sprintf("<a href=\"s%p\">", sym);
1019 head += print_filter(sym->name);
1020 if (showDebug())
1021 head += "</a>";
1022 head += ")";
1023 }
1024 } else if (sym->name) {
1025 head += "<big><b>";
1026 if (showDebug())
1027 head += QString().sprintf("<a href=\"s%p\">", sym);
1028 head += print_filter(sym->name);
1029 if (showDebug())
1030 head += "</a>";
1031 head += "</b></big>";
1032 }
1033 head += "<br><br>";
1034
1035 if (showDebug())
1036 debug = debug_info(sym);
1037
1038 help = print_filter(_(sym->help));
1039 } else if (menu->prompt) {
1040 head += "<big><b>";
1041 head += print_filter(_(menu->prompt->text));
1042 head += "</b></big><br><br>";
1043 if (showDebug()) {
1044 if (menu->prompt->visible.expr) {
1045 debug += "&nbsp;&nbsp;dep: ";
1046 expr_print(menu->prompt->visible.expr, expr_print_help, &debug, E_NONE);
1047 debug += "<br><br>";
1048 }
1049 }
1050 }
1051 if (showDebug())
1052 debug += QString().sprintf("defined at %s:%d<br><br>", menu->file->name, menu->lineno);
1053
1054 setText(head + debug + help);
1055}
1056
1057QString ConfigInfoView::debug_info(struct symbol *sym)
1058{
1059 QString debug;
1060
1061 debug += "type: ";
1062 debug += print_filter(sym_type_name(sym->type));
1063 if (sym_is_choice(sym))
1064 debug += " (choice)";
1065 debug += "<br>";
1066 if (sym->rev_dep.expr) {
1067 debug += "reverse dep: ";
1068 expr_print(sym->rev_dep.expr, expr_print_help, &debug, E_NONE);
1069 debug += "<br>";
1070 }
1071 for (struct property *prop = sym->prop; prop; prop = prop->next) {
1072 switch (prop->type) {
1073 case P_PROMPT:
1074 case P_MENU:
1075 debug += QString().sprintf("prompt: <a href=\"m%p\">", prop->menu);
1076 debug += print_filter(_(prop->text));
1077 debug += "</a><br>";
1078 break;
1079 case P_DEFAULT:
1080 debug += "default: ";
1081 expr_print(prop->expr, expr_print_help, &debug, E_NONE);
1082 debug += "<br>";
1083 break;
1084 case P_CHOICE:
1085 if (sym_is_choice(sym)) {
1086 debug += "choice: ";
1087 expr_print(prop->expr, expr_print_help, &debug, E_NONE);
1088 debug += "<br>";
1089 }
1090 break;
1091 case P_SELECT:
1092 debug += "select: ";
1093 expr_print(prop->expr, expr_print_help, &debug, E_NONE);
1094 debug += "<br>";
1095 break;
1096 case P_RANGE:
1097 debug += "range: ";
1098 expr_print(prop->expr, expr_print_help, &debug, E_NONE);
1099 debug += "<br>";
1100 break;
1101 default:
1102 debug += "unknown property: ";
1103 debug += prop_get_type_name(prop->type);
1104 debug += "<br>";
1105 }
1106 if (prop->visible.expr) {
1107 debug += "&nbsp;&nbsp;&nbsp;&nbsp;dep: ";
1108 expr_print(prop->visible.expr, expr_print_help, &debug, E_NONE);
1109 debug += "<br>";
1110 }
1111 }
1112 debug += "<br>";
1113
1114 return debug;
1115}
1116
1117QString ConfigInfoView::print_filter(const QString &str)
1118{
1119 QRegExp re("[<>&\"\\n]");
1120 QString res = str;
1121 for (int i = 0; (i = res.find(re, i)) >= 0;) {
1122 switch (res[i].latin1()) {
1123 case '<':
1124 res.replace(i, 1, "&lt;");
1125 i += 4;
1126 break;
1127 case '>':
1128 res.replace(i, 1, "&gt;");
1129 i += 4;
1130 break;
1131 case '&':
1132 res.replace(i, 1, "&amp;");
1133 i += 5;
1134 break;
1135 case '"':
1136 res.replace(i, 1, "&quot;");
1137 i += 6;
1138 break;
1139 case '\n':
1140 res.replace(i, 1, "<br>");
1141 i += 4;
1142 break;
1143 }
1144 }
1145 return res;
1146}
1147
1148void ConfigInfoView::expr_print_help(void *data, struct symbol *sym, const char *str)
1149{
1150 QString* text = reinterpret_cast<QString*>(data);
1151 QString str2 = print_filter(str);
1152
1153 if (sym && sym->name && !(sym->flags & SYMBOL_CONST)) {
1154 *text += QString().sprintf("<a href=\"s%p\">", sym);
1155 *text += str2;
1156 *text += "</a>";
1157 } else
1158 *text += str2;
1159}
1160
1161QPopupMenu* ConfigInfoView::createPopupMenu(const QPoint& pos)
1162{
1163 QPopupMenu* popup = Parent::createPopupMenu(pos);
1164 QAction* action = new QAction("Show Debug Info", 0, popup);
1165 action->setToggleAction(TRUE);
1166 connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
1167 connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool)));
1168 action->setOn(showDebug());
1169 popup->insertSeparator();
1170 action->addTo(popup);
1171 return popup;
1172}
1173
1174void ConfigInfoView::contentsContextMenuEvent(QContextMenuEvent *e)
1175{
1176 Parent::contentsContextMenuEvent(e);
1177}
1178
1179ConfigSearchWindow::ConfigSearchWindow(QWidget* parent, const char *name)
1180 : Parent(parent, name), result(NULL)
1181{
1182 setCaption("Search Config");
1183
1184 QVBoxLayout* layout1 = new QVBoxLayout(this, 11, 6);
1185 QHBoxLayout* layout2 = new QHBoxLayout(0, 0, 6);
1186 layout2->addWidget(new QLabel("Find:", this));
1187 editField = new QLineEdit(this);
1188 connect(editField, SIGNAL(returnPressed()), SLOT(search()));
1189 layout2->addWidget(editField);
1190 searchButton = new QPushButton("Search", this);
1191 searchButton->setAutoDefault(FALSE);
1192 connect(searchButton, SIGNAL(clicked()), SLOT(search()));
1193 layout2->addWidget(searchButton);
1194 layout1->addLayout(layout2);
1195
1196 split = new QSplitter(this);
1197 split->setOrientation(QSplitter::Vertical);
1198 list = new ConfigView(split, name);
1199 list->list->mode = listMode;
1200 info = new ConfigInfoView(split, name);
1201 connect(list->list, SIGNAL(menuChanged(struct menu *)),
1202 info, SLOT(setInfo(struct menu *)));
1203 layout1->addWidget(split);
1204
1205 if (name) {
1206 int x, y, width, height;
1207 bool ok;
1208
1209 configSettings->beginGroup(name);
1210 width = configSettings->readNumEntry("/window width", parent->width() / 2);
1211 height = configSettings->readNumEntry("/window height", parent->height() / 2);
1212 resize(width, height);
1213 x = configSettings->readNumEntry("/window x", 0, &ok);
1214 if (ok)
1215 y = configSettings->readNumEntry("/window y", 0, &ok);
1216 if (ok)
1217 move(x, y);
1218 QValueList<int> sizes = configSettings->readSizes("/split", &ok);
1219 if (ok)
1220 split->setSizes(sizes);
1221 configSettings->endGroup();
1222 connect(configApp, SIGNAL(aboutToQuit()), SLOT(saveSettings()));
1223 }
1224}
1225
1226void ConfigSearchWindow::saveSettings(void)
1227{
1228 if (name()) {
1229 configSettings->beginGroup(name());
1230 configSettings->writeEntry("/window x", pos().x());
1231 configSettings->writeEntry("/window y", pos().y());
1232 configSettings->writeEntry("/window width", size().width());
1233 configSettings->writeEntry("/window height", size().height());
1234 configSettings->writeSizes("/split", split->sizes());
1235 configSettings->endGroup();
1236 }
1237}
1238
1239void ConfigSearchWindow::search(void)
1240{
1241 struct symbol **p;
1242 struct property *prop;
1243 ConfigItem *lastItem = NULL;
1244
1245 free(result);
1246 list->list->clear();
1247
1248 result = sym_re_search(editField->text().latin1());
1249 if (!result)
1250 return;
1251 for (p = result; *p; p++) {
1252 for_all_prompts((*p), prop)
1253 lastItem = new ConfigItem(list->list, lastItem, prop->menu,
1254 menu_is_visible(prop->menu));
1255 }
1256}
1257
810/* 1258/*
811 * Construct the complete config widget 1259 * Construct the complete config widget
812 */ 1260 */
@@ -818,42 +1266,30 @@ ConfigMainWindow::ConfigMainWindow(void)
818 1266
819 QWidget *d = configApp->desktop(); 1267 QWidget *d = configApp->desktop();
820 1268
821 ConfigSettings* configSettings = new ConfigSettings(); 1269 width = configSettings->readNumEntry("/window width", d->width() - 64);
822#if QT_VERSION >= 300 1270 height = configSettings->readNumEntry("/window height", d->height() - 64);
823 width = configSettings->readNumEntry("/kconfig/qconf/window width", d->width() - 64);
824 height = configSettings->readNumEntry("/kconfig/qconf/window height", d->height() - 64);
825 resize(width, height); 1271 resize(width, height);
826 x = configSettings->readNumEntry("/kconfig/qconf/window x", 0, &ok); 1272 x = configSettings->readNumEntry("/window x", 0, &ok);
827 if (ok) 1273 if (ok)
828 y = configSettings->readNumEntry("/kconfig/qconf/window y", 0, &ok); 1274 y = configSettings->readNumEntry("/window y", 0, &ok);
829 if (ok) 1275 if (ok)
830 move(x, y); 1276 move(x, y);
831 showDebug = configSettings->readBoolEntry("/kconfig/qconf/showDebug", false);
832
833 // read list settings into configSettings, will be used later for ConfigList setup
834 configSettings->readListSettings();
835#else
836 width = d->width() - 64;
837 height = d->height() - 64;
838 resize(width, height);
839 showDebug = false;
840#endif
841 1277
842 split1 = new QSplitter(this); 1278 split1 = new QSplitter(this);
843 split1->setOrientation(QSplitter::Horizontal); 1279 split1->setOrientation(QSplitter::Horizontal);
844 setCentralWidget(split1); 1280 setCentralWidget(split1);
845 1281
846 menuView = new ConfigView(split1, this, configSettings); 1282 menuView = new ConfigView(split1, "menu");
847 menuList = menuView->list; 1283 menuList = menuView->list;
848 1284
849 split2 = new QSplitter(split1); 1285 split2 = new QSplitter(split1);
850 split2->setOrientation(QSplitter::Vertical); 1286 split2->setOrientation(QSplitter::Vertical);
851 1287
852 // create config tree 1288 // create config tree
853 configView = new ConfigView(split2, this, configSettings); 1289 configView = new ConfigView(split2, "config");
854 configList = configView->list; 1290 configList = configView->list;
855 1291
856 helpText = new QTextView(split2); 1292 helpText = new ConfigInfoView(split2, "help");
857 helpText->setTextFormat(Qt::RichText); 1293 helpText->setTextFormat(Qt::RichText);
858 1294
859 setTabOrder(configList, helpText); 1295 setTabOrder(configList, helpText);
@@ -873,6 +1309,8 @@ ConfigMainWindow::ConfigMainWindow(void)
873 connect(saveAction, SIGNAL(activated()), SLOT(saveConfig())); 1309 connect(saveAction, SIGNAL(activated()), SLOT(saveConfig()));
874 QAction *saveAsAction = new QAction("Save As...", "Save &As...", 0, this); 1310 QAction *saveAsAction = new QAction("Save As...", "Save &As...", 0, this);
875 connect(saveAsAction, SIGNAL(activated()), SLOT(saveConfigAs())); 1311 connect(saveAsAction, SIGNAL(activated()), SLOT(saveConfigAs()));
1312 QAction *searchAction = new QAction("Search", "&Search", CTRL+Key_F, this);
1313 connect(searchAction, SIGNAL(activated()), SLOT(searchConfig()));
876 QAction *singleViewAction = new QAction("Single View", QPixmap(xpm_single_view), "Split View", 0, this); 1314 QAction *singleViewAction = new QAction("Single View", QPixmap(xpm_single_view), "Split View", 0, this);
877 connect(singleViewAction, SIGNAL(activated()), SLOT(showSingleView())); 1315 connect(singleViewAction, SIGNAL(activated()), SLOT(showSingleView()));
878 QAction *splitViewAction = new QAction("Split View", QPixmap(xpm_split_view), "Split View", 0, this); 1316 QAction *splitViewAction = new QAction("Split View", QPixmap(xpm_split_view), "Split View", 0, this);
@@ -882,24 +1320,29 @@ ConfigMainWindow::ConfigMainWindow(void)
882 1320
883 QAction *showNameAction = new QAction(NULL, "Show Name", 0, this); 1321 QAction *showNameAction = new QAction(NULL, "Show Name", 0, this);
884 showNameAction->setToggleAction(TRUE); 1322 showNameAction->setToggleAction(TRUE);
885 showNameAction->setOn(configList->showName); 1323 connect(showNameAction, SIGNAL(toggled(bool)), configView, SLOT(setShowName(bool)));
886 connect(showNameAction, SIGNAL(toggled(bool)), SLOT(setShowName(bool))); 1324 connect(configView, SIGNAL(showNameChanged(bool)), showNameAction, SLOT(setOn(bool)));
1325 showNameAction->setOn(configView->showName());
887 QAction *showRangeAction = new QAction(NULL, "Show Range", 0, this); 1326 QAction *showRangeAction = new QAction(NULL, "Show Range", 0, this);
888 showRangeAction->setToggleAction(TRUE); 1327 showRangeAction->setToggleAction(TRUE);
1328 connect(showRangeAction, SIGNAL(toggled(bool)), configView, SLOT(setShowRange(bool)));
1329 connect(configView, SIGNAL(showRangeChanged(bool)), showRangeAction, SLOT(setOn(bool)));
889 showRangeAction->setOn(configList->showRange); 1330 showRangeAction->setOn(configList->showRange);
890 connect(showRangeAction, SIGNAL(toggled(bool)), SLOT(setShowRange(bool)));
891 QAction *showDataAction = new QAction(NULL, "Show Data", 0, this); 1331 QAction *showDataAction = new QAction(NULL, "Show Data", 0, this);
892 showDataAction->setToggleAction(TRUE); 1332 showDataAction->setToggleAction(TRUE);
1333 connect(showDataAction, SIGNAL(toggled(bool)), configView, SLOT(setShowData(bool)));
1334 connect(configView, SIGNAL(showDataChanged(bool)), showDataAction, SLOT(setOn(bool)));
893 showDataAction->setOn(configList->showData); 1335 showDataAction->setOn(configList->showData);
894 connect(showDataAction, SIGNAL(toggled(bool)), SLOT(setShowData(bool)));
895 QAction *showAllAction = new QAction(NULL, "Show All Options", 0, this); 1336 QAction *showAllAction = new QAction(NULL, "Show All Options", 0, this);
896 showAllAction->setToggleAction(TRUE); 1337 showAllAction->setToggleAction(TRUE);
1338 connect(showAllAction, SIGNAL(toggled(bool)), configView, SLOT(setShowAll(bool)));
1339 connect(showAllAction, SIGNAL(toggled(bool)), menuView, SLOT(setShowAll(bool)));
897 showAllAction->setOn(configList->showAll); 1340 showAllAction->setOn(configList->showAll);
898 connect(showAllAction, SIGNAL(toggled(bool)), SLOT(setShowAll(bool)));
899 QAction *showDebugAction = new QAction(NULL, "Show Debug Info", 0, this); 1341 QAction *showDebugAction = new QAction(NULL, "Show Debug Info", 0, this);
900 showDebugAction->setToggleAction(TRUE); 1342 showDebugAction->setToggleAction(TRUE);
901 showDebugAction->setOn(showDebug); 1343 connect(showDebugAction, SIGNAL(toggled(bool)), helpText, SLOT(setShowDebug(bool)));
902 connect(showDebugAction, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool))); 1344 connect(helpText, SIGNAL(showDebugChanged(bool)), showDebugAction, SLOT(setOn(bool)));
1345 showDebugAction->setOn(helpText->showDebug());
903 1346
904 QAction *showIntroAction = new QAction(NULL, "Introduction", 0, this); 1347 QAction *showIntroAction = new QAction(NULL, "Introduction", 0, this);
905 connect(showIntroAction, SIGNAL(activated()), SLOT(showIntro())); 1348 connect(showIntroAction, SIGNAL(activated()), SLOT(showIntro()));
@@ -923,6 +1366,8 @@ ConfigMainWindow::ConfigMainWindow(void)
923 saveAction->addTo(config); 1366 saveAction->addTo(config);
924 saveAsAction->addTo(config); 1367 saveAsAction->addTo(config);
925 config->insertSeparator(); 1368 config->insertSeparator();
1369 searchAction->addTo(config);
1370 config->insertSeparator();
926 quitAction->addTo(config); 1371 quitAction->addTo(config);
927 1372
928 // create options menu 1373 // create options menu
@@ -942,20 +1387,27 @@ ConfigMainWindow::ConfigMainWindow(void)
942 showIntroAction->addTo(helpMenu); 1387 showIntroAction->addTo(helpMenu);
943 showAboutAction->addTo(helpMenu); 1388 showAboutAction->addTo(helpMenu);
944 1389
1390 connect(configList, SIGNAL(menuChanged(struct menu *)),
1391 helpText, SLOT(setInfo(struct menu *)));
945 connect(configList, SIGNAL(menuSelected(struct menu *)), 1392 connect(configList, SIGNAL(menuSelected(struct menu *)),
946 SLOT(changeMenu(struct menu *))); 1393 SLOT(changeMenu(struct menu *)));
947 connect(configList, SIGNAL(parentSelected()), 1394 connect(configList, SIGNAL(parentSelected()),
948 SLOT(goBack())); 1395 SLOT(goBack()));
1396 connect(menuList, SIGNAL(menuChanged(struct menu *)),
1397 helpText, SLOT(setInfo(struct menu *)));
949 connect(menuList, SIGNAL(menuSelected(struct menu *)), 1398 connect(menuList, SIGNAL(menuSelected(struct menu *)),
950 SLOT(changeMenu(struct menu *))); 1399 SLOT(changeMenu(struct menu *)));
951 1400
952 connect(configList, SIGNAL(gotFocus(void)), 1401 connect(configList, SIGNAL(gotFocus(struct menu *)),
953 SLOT(listFocusChanged(void))); 1402 helpText, SLOT(setInfo(struct menu *)));
954 connect(menuList, SIGNAL(gotFocus(void)), 1403 connect(menuList, SIGNAL(gotFocus(struct menu *)),
1404 helpText, SLOT(setInfo(struct menu *)));
1405 connect(menuList, SIGNAL(gotFocus(struct menu *)),
955 SLOT(listFocusChanged(void))); 1406 SLOT(listFocusChanged(void)));
1407 connect(helpText, SIGNAL(menuSelected(struct menu *)),
1408 SLOT(setMenuLink(struct menu *)));
956 1409
957#if QT_VERSION >= 300 1410 QString listMode = configSettings->readEntry("/listMode", "symbol");
958 QString listMode = configSettings->readEntry("/kconfig/qconf/listMode", "symbol");
959 if (listMode == "single") 1411 if (listMode == "single")
960 showSingleView(); 1412 showSingleView();
961 else if (listMode == "full") 1413 else if (listMode == "full")
@@ -964,162 +1416,13 @@ ConfigMainWindow::ConfigMainWindow(void)
964 showSplitView(); 1416 showSplitView();
965 1417
966 // UI setup done, restore splitter positions 1418 // UI setup done, restore splitter positions
967 QValueList<int> sizes = configSettings->readSizes("/kconfig/qconf/split1", &ok); 1419 QValueList<int> sizes = configSettings->readSizes("/split1", &ok);
968 if (ok) 1420 if (ok)
969 split1->setSizes(sizes); 1421 split1->setSizes(sizes);
970 1422
971 sizes = configSettings->readSizes("/kconfig/qconf/split2", &ok); 1423 sizes = configSettings->readSizes("/split2", &ok);
972 if (ok) 1424 if (ok)
973 split2->setSizes(sizes); 1425 split2->setSizes(sizes);
974#else
975 showSplitView();
976#endif
977 delete configSettings;
978}
979
980static QString print_filter(const QString &str)
981{
982 QRegExp re("[<>&\"\\n]");
983 QString res = str;
984 for (int i = 0; (i = res.find(re, i)) >= 0;) {
985 switch (res[i].latin1()) {
986 case '<':
987 res.replace(i, 1, "&lt;");
988 i += 4;
989 break;
990 case '>':
991 res.replace(i, 1, "&gt;");
992 i += 4;
993 break;
994 case '&':
995 res.replace(i, 1, "&amp;");
996 i += 5;
997 break;
998 case '"':
999 res.replace(i, 1, "&quot;");
1000 i += 6;
1001 break;
1002 case '\n':
1003 res.replace(i, 1, "<br>");
1004 i += 4;
1005 break;
1006 }
1007 }
1008 return res;
1009}
1010
1011static void expr_print_help(void *data, const char *str)
1012{
1013 reinterpret_cast<QString*>(data)->append(print_filter(str));
1014}
1015
1016/*
1017 * display a new help entry as soon as a new menu entry is selected
1018 */
1019void ConfigMainWindow::setHelp(QListViewItem* item)
1020{
1021 struct symbol* sym;
1022 struct menu* menu = 0;
1023
1024 configList->parent()->lineEdit->hide();
1025 if (item)
1026 menu = ((ConfigItem*)item)->menu;
1027 if (!menu) {
1028 helpText->setText(QString::null);
1029 return;
1030 }
1031
1032 QString head, debug, help;
1033 menu = ((ConfigItem*)item)->menu;
1034 sym = menu->sym;
1035 if (sym) {
1036 if (menu->prompt) {
1037 head += "<big><b>";
1038 head += print_filter(_(menu->prompt->text));
1039 head += "</b></big>";
1040 if (sym->name) {
1041 head += " (";
1042 head += print_filter(_(sym->name));
1043 head += ")";
1044 }
1045 } else if (sym->name) {
1046 head += "<big><b>";
1047 head += print_filter(_(sym->name));
1048 head += "</b></big>";
1049 }
1050 head += "<br><br>";
1051
1052 if (showDebug) {
1053 debug += "type: ";
1054 debug += print_filter(sym_type_name(sym->type));
1055 if (sym_is_choice(sym))
1056 debug += " (choice)";
1057 debug += "<br>";
1058 if (sym->rev_dep.expr) {
1059 debug += "reverse dep: ";
1060 expr_print(sym->rev_dep.expr, expr_print_help, &debug, E_NONE);
1061 debug += "<br>";
1062 }
1063 for (struct property *prop = sym->prop; prop; prop = prop->next) {
1064 switch (prop->type) {
1065 case P_PROMPT:
1066 case P_MENU:
1067 debug += "prompt: ";
1068 debug += print_filter(_(prop->text));
1069 debug += "<br>";
1070 break;
1071 case P_DEFAULT:
1072 debug += "default: ";
1073 expr_print(prop->expr, expr_print_help, &debug, E_NONE);
1074 debug += "<br>";
1075 break;
1076 case P_CHOICE:
1077 if (sym_is_choice(sym)) {
1078 debug += "choice: ";
1079 expr_print(prop->expr, expr_print_help, &debug, E_NONE);
1080 debug += "<br>";
1081 }
1082 break;
1083 case P_SELECT:
1084 debug += "select: ";
1085 expr_print(prop->expr, expr_print_help, &debug, E_NONE);
1086 debug += "<br>";
1087 break;
1088 case P_RANGE:
1089 debug += "range: ";
1090 expr_print(prop->expr, expr_print_help, &debug, E_NONE);
1091 debug += "<br>";
1092 break;
1093 default:
1094 debug += "unknown property: ";
1095 debug += prop_get_type_name(prop->type);
1096 debug += "<br>";
1097 }
1098 if (prop->visible.expr) {
1099 debug += "&nbsp;&nbsp;&nbsp;&nbsp;dep: ";
1100 expr_print(prop->visible.expr, expr_print_help, &debug, E_NONE);
1101 debug += "<br>";
1102 }
1103 }
1104 debug += "<br>";
1105 }
1106
1107 help = print_filter(_(sym->help));
1108 } else if (menu->prompt) {
1109 head += "<big><b>";
1110 head += print_filter(_(menu->prompt->text));
1111 head += "</b></big><br><br>";
1112 if (showDebug) {
1113 if (menu->prompt->visible.expr) {
1114 debug += "&nbsp;&nbsp;dep: ";
1115 expr_print(menu->prompt->visible.expr, expr_print_help, &debug, E_NONE);
1116 debug += "<br><br>";
1117 }
1118 }
1119 }
1120 if (showDebug)
1121 debug += QString().sprintf("defined at %s:%d<br><br>", menu->file->name, menu->lineno);
1122 helpText->setText(head + debug + help);
1123} 1426}
1124 1427
1125void ConfigMainWindow::loadConfig(void) 1428void ConfigMainWindow::loadConfig(void)
@@ -1147,21 +1450,73 @@ void ConfigMainWindow::saveConfigAs(void)
1147 QMessageBox::information(this, "qconf", "Unable to save configuration!"); 1450 QMessageBox::information(this, "qconf", "Unable to save configuration!");
1148} 1451}
1149 1452
1453void ConfigMainWindow::searchConfig(void)
1454{
1455 if (!searchWindow)
1456 searchWindow = new ConfigSearchWindow(this, "search");
1457 searchWindow->show();
1458}
1459
1150void ConfigMainWindow::changeMenu(struct menu *menu) 1460void ConfigMainWindow::changeMenu(struct menu *menu)
1151{ 1461{
1152 configList->setRootMenu(menu); 1462 configList->setRootMenu(menu);
1153 backAction->setEnabled(TRUE); 1463 backAction->setEnabled(TRUE);
1154} 1464}
1155 1465
1156void ConfigMainWindow::listFocusChanged(void) 1466void ConfigMainWindow::setMenuLink(struct menu *menu)
1157{ 1467{
1158 if (menuList->hasFocus()) { 1468 struct menu *parent;
1159 if (menuList->mode == menuMode) 1469 ConfigList* list = NULL;
1470 ConfigItem* item;
1471
1472 if (!menu_is_visible(menu) && !configView->showAll())
1473 return;
1474
1475 switch (configList->mode) {
1476 case singleMode:
1477 list = configList;
1478 parent = menu_get_parent_menu(menu);
1479 if (!parent)
1480 return;
1481 list->setRootMenu(parent);
1482 break;
1483 case symbolMode:
1484 if (menu->flags & MENU_ROOT) {
1485 configList->setRootMenu(menu);
1160 configList->clearSelection(); 1486 configList->clearSelection();
1161 setHelp(menuList->selectedItem()); 1487 list = menuList;
1162 } else if (configList->hasFocus()) { 1488 } else {
1163 setHelp(configList->selectedItem()); 1489 list = configList;
1490 parent = menu_get_parent_menu(menu->parent);
1491 if (!parent)
1492 return;
1493 item = menuList->findConfigItem(parent);
1494 if (item) {
1495 menuList->setSelected(item, TRUE);
1496 menuList->ensureItemVisible(item);
1497 }
1498 list->setRootMenu(parent);
1499 }
1500 break;
1501 case fullMode:
1502 list = configList;
1503 break;
1164 } 1504 }
1505
1506 if (list) {
1507 item = list->findConfigItem(menu);
1508 if (item) {
1509 list->setSelected(item, TRUE);
1510 list->ensureItemVisible(item);
1511 list->setFocus();
1512 }
1513 }
1514}
1515
1516void ConfigMainWindow::listFocusChanged(void)
1517{
1518 if (menuList->mode == menuMode)
1519 configList->clearSelection();
1165} 1520}
1166 1521
1167void ConfigMainWindow::goBack(void) 1522void ConfigMainWindow::goBack(void)
@@ -1223,53 +1578,6 @@ void ConfigMainWindow::showFullView(void)
1223 configList->setFocus(); 1578 configList->setFocus();
1224} 1579}
1225 1580
1226void ConfigMainWindow::setShowAll(bool b)
1227{
1228 if (configList->showAll == b)
1229 return;
1230 configList->showAll = b;
1231 configList->updateListAll();
1232 menuList->showAll = b;
1233 menuList->updateListAll();
1234}
1235
1236void ConfigMainWindow::setShowDebug(bool b)
1237{
1238 if (showDebug == b)
1239 return;
1240 showDebug = b;
1241}
1242
1243void ConfigMainWindow::setShowName(bool b)
1244{
1245 if (configList->showName == b)
1246 return;
1247 configList->showName = b;
1248 configList->reinit();
1249 menuList->showName = b;
1250 menuList->reinit();
1251}
1252
1253void ConfigMainWindow::setShowRange(bool b)
1254{
1255 if (configList->showRange == b)
1256 return;
1257 configList->showRange = b;
1258 configList->reinit();
1259 menuList->showRange = b;
1260 menuList->reinit();
1261}
1262
1263void ConfigMainWindow::setShowData(bool b)
1264{
1265 if (configList->showData == b)
1266 return;
1267 configList->showData = b;
1268 configList->reinit();
1269 menuList->showData = b;
1270 menuList->reinit();
1271}
1272
1273/* 1581/*
1274 * ask for saving configuration before quitting 1582 * ask for saving configuration before quitting
1275 * TODO ask only when something changed 1583 * TODO ask only when something changed
@@ -1324,17 +1632,10 @@ void ConfigMainWindow::showAbout(void)
1324 1632
1325void ConfigMainWindow::saveSettings(void) 1633void ConfigMainWindow::saveSettings(void)
1326{ 1634{
1327#if QT_VERSION >= 300 1635 configSettings->writeEntry("/window x", pos().x());
1328 ConfigSettings *configSettings = new ConfigSettings; 1636 configSettings->writeEntry("/window y", pos().y());
1329 configSettings->writeEntry("/kconfig/qconf/window x", pos().x()); 1637 configSettings->writeEntry("/window width", size().width());
1330 configSettings->writeEntry("/kconfig/qconf/window y", pos().y()); 1638 configSettings->writeEntry("/window height", size().height());
1331 configSettings->writeEntry("/kconfig/qconf/window width", size().width());
1332 configSettings->writeEntry("/kconfig/qconf/window height", size().height());
1333 configSettings->writeEntry("/kconfig/qconf/showName", configList->showName);
1334 configSettings->writeEntry("/kconfig/qconf/showRange", configList->showRange);
1335 configSettings->writeEntry("/kconfig/qconf/showData", configList->showData);
1336 configSettings->writeEntry("/kconfig/qconf/showAll", configList->showAll);
1337 configSettings->writeEntry("/kconfig/qconf/showDebug", showDebug);
1338 1639
1339 QString entry; 1640 QString entry;
1340 switch(configList->mode) { 1641 switch(configList->mode) {
@@ -1350,13 +1651,10 @@ void ConfigMainWindow::saveSettings(void)
1350 entry = "full"; 1651 entry = "full";
1351 break; 1652 break;
1352 } 1653 }
1353 configSettings->writeEntry("/kconfig/qconf/listMode", entry); 1654 configSettings->writeEntry("/listMode", entry);
1354 1655
1355 configSettings->writeSizes("/kconfig/qconf/split1", split1->sizes()); 1656 configSettings->writeSizes("/split1", split1->sizes());
1356 configSettings->writeSizes("/kconfig/qconf/split2", split2->sizes()); 1657 configSettings->writeSizes("/split2", split2->sizes());
1357
1358 delete configSettings;
1359#endif
1360} 1658}
1361 1659
1362void fixup_rootmenu(struct menu *menu) 1660void fixup_rootmenu(struct menu *menu)
@@ -1414,13 +1712,19 @@ int main(int ac, char** av)
1414 conf_read(NULL); 1712 conf_read(NULL);
1415 //zconfdump(stdout); 1713 //zconfdump(stdout);
1416 1714
1715 configSettings = new ConfigSettings();
1716 configSettings->beginGroup("/kconfig/qconf");
1417 v = new ConfigMainWindow(); 1717 v = new ConfigMainWindow();
1418 1718
1419 //zconfdump(stdout); 1719 //zconfdump(stdout);
1420 v->show(); 1720 configApp->setMainWidget(v);
1421 configApp->connect(configApp, SIGNAL(lastWindowClosed()), SLOT(quit())); 1721 configApp->connect(configApp, SIGNAL(lastWindowClosed()), SLOT(quit()));
1422 configApp->connect(configApp, SIGNAL(aboutToQuit()), v, SLOT(saveSettings())); 1722 configApp->connect(configApp, SIGNAL(aboutToQuit()), v, SLOT(saveSettings()));
1723 v->show();
1423 configApp->exec(); 1724 configApp->exec();
1424 1725
1726 configSettings->endGroup();
1727 delete configSettings;
1728
1425 return 0; 1729 return 0;
1426} 1730}
diff --git a/scripts/kconfig/qconf.h b/scripts/kconfig/qconf.h
index e52f3e90bf..6a9e3b14c2 100644
--- a/scripts/kconfig/qconf.h
+++ b/scripts/kconfig/qconf.h
@@ -7,9 +7,25 @@
7#if QT_VERSION >= 300 7#if QT_VERSION >= 300
8#include <qsettings.h> 8#include <qsettings.h>
9#else 9#else
10class QSettings { }; 10class QSettings {
11public:
12 void beginGroup(const QString& group) { }
13 void endGroup(void) { }
14 bool readBoolEntry(const QString& key, bool def = FALSE, bool* ok = 0) const
15 { if (ok) *ok = FALSE; return def; }
16 int readNumEntry(const QString& key, int def = 0, bool* ok = 0) const
17 { if (ok) *ok = FALSE; return def; }
18 QString readEntry(const QString& key, const QString& def = QString::null, bool* ok = 0) const
19 { if (ok) *ok = FALSE; return def; }
20 QStringList readListEntry(const QString& key, bool* ok = 0) const
21 { if (ok) *ok = FALSE; return QStringList(); }
22 template <class t>
23 bool writeEntry(const QString& key, t value)
24 { return TRUE; }
25};
11#endif 26#endif
12 27
28class ConfigView;
13class ConfigList; 29class ConfigList;
14class ConfigItem; 30class ConfigItem;
15class ConfigLineEdit; 31class ConfigLineEdit;
@@ -18,64 +34,38 @@ class ConfigMainWindow;
18 34
19class ConfigSettings : public QSettings { 35class ConfigSettings : public QSettings {
20public: 36public:
21 ConfigSettings();
22
23#if QT_VERSION >= 300
24 void readListSettings();
25 QValueList<int> readSizes(const QString& key, bool *ok); 37 QValueList<int> readSizes(const QString& key, bool *ok);
26 bool writeSizes(const QString& key, const QValueList<int>& value); 38 bool writeSizes(const QString& key, const QValueList<int>& value);
27#endif
28
29 bool showAll;
30 bool showName;
31 bool showRange;
32 bool showData;
33};
34
35class ConfigView : public QVBox {
36 Q_OBJECT
37 typedef class QVBox Parent;
38public:
39 ConfigView(QWidget* parent, ConfigMainWindow* cview, ConfigSettings* configSettings);
40 ~ConfigView(void);
41 static void updateList(ConfigItem* item);
42 static void updateListAll(void);
43
44public:
45 ConfigList* list;
46 ConfigLineEdit* lineEdit;
47
48 static ConfigView* viewList;
49 ConfigView* nextView;
50}; 39};
51 40
52enum colIdx { 41enum colIdx {
53 promptColIdx, nameColIdx, noColIdx, modColIdx, yesColIdx, dataColIdx, colNr 42 promptColIdx, nameColIdx, noColIdx, modColIdx, yesColIdx, dataColIdx, colNr
54}; 43};
55enum listMode { 44enum listMode {
56 singleMode, menuMode, symbolMode, fullMode 45 singleMode, menuMode, symbolMode, fullMode, listMode
57}; 46};
58 47
59class ConfigList : public QListView { 48class ConfigList : public QListView {
60 Q_OBJECT 49 Q_OBJECT
61 typedef class QListView Parent; 50 typedef class QListView Parent;
62public: 51public:
63 ConfigList(ConfigView* p, ConfigMainWindow* cview, ConfigSettings *configSettings); 52 ConfigList(ConfigView* p, const char *name = 0);
64 void reinit(void); 53 void reinit(void);
65 ConfigView* parent(void) const 54 ConfigView* parent(void) const
66 { 55 {
67 return (ConfigView*)Parent::parent(); 56 return (ConfigView*)Parent::parent();
68 } 57 }
58 ConfigItem* findConfigItem(struct menu *);
69 59
70protected: 60protected:
71 ConfigMainWindow* cview;
72
73 void keyPressEvent(QKeyEvent *e); 61 void keyPressEvent(QKeyEvent *e);
74 void contentsMousePressEvent(QMouseEvent *e); 62 void contentsMousePressEvent(QMouseEvent *e);
75 void contentsMouseReleaseEvent(QMouseEvent *e); 63 void contentsMouseReleaseEvent(QMouseEvent *e);
76 void contentsMouseMoveEvent(QMouseEvent *e); 64 void contentsMouseMoveEvent(QMouseEvent *e);
77 void contentsMouseDoubleClickEvent(QMouseEvent *e); 65 void contentsMouseDoubleClickEvent(QMouseEvent *e);
78 void focusInEvent(QFocusEvent *e); 66 void focusInEvent(QFocusEvent *e);
67 void contextMenuEvent(QContextMenuEvent *e);
68
79public slots: 69public slots:
80 void setRootMenu(struct menu *menu); 70 void setRootMenu(struct menu *menu);
81 71
@@ -83,10 +73,12 @@ public slots:
83 void setValue(ConfigItem* item, tristate val); 73 void setValue(ConfigItem* item, tristate val);
84 void changeValue(ConfigItem* item); 74 void changeValue(ConfigItem* item);
85 void updateSelection(void); 75 void updateSelection(void);
76 void saveSettings(void);
86signals: 77signals:
78 void menuChanged(struct menu *menu);
87 void menuSelected(struct menu *menu); 79 void menuSelected(struct menu *menu);
88 void parentSelected(void); 80 void parentSelected(void);
89 void gotFocus(void); 81 void gotFocus(struct menu *);
90 82
91public: 83public:
92 void updateListAll(void) 84 void updateListAll(void)
@@ -137,6 +129,7 @@ public:
137 struct menu *rootEntry; 129 struct menu *rootEntry;
138 QColorGroup disabledColorGroup; 130 QColorGroup disabledColorGroup;
139 QColorGroup inactivedColorGroup; 131 QColorGroup inactivedColorGroup;
132 QPopupMenu* headerPopup;
140 133
141private: 134private:
142 int colMap[colNr]; 135 int colMap[colNr];
@@ -208,9 +201,7 @@ class ConfigLineEdit : public QLineEdit {
208 Q_OBJECT 201 Q_OBJECT
209 typedef class QLineEdit Parent; 202 typedef class QLineEdit Parent;
210public: 203public:
211 ConfigLineEdit(ConfigView* parent) 204 ConfigLineEdit(ConfigView* parent);
212 : Parent(parent)
213 { }
214 ConfigView* parent(void) const 205 ConfigView* parent(void) const
215 { 206 {
216 return (ConfigView*)Parent::parent(); 207 return (ConfigView*)Parent::parent();
@@ -222,26 +213,104 @@ public:
222 ConfigItem *item; 213 ConfigItem *item;
223}; 214};
224 215
216class ConfigView : public QVBox {
217 Q_OBJECT
218 typedef class QVBox Parent;
219public:
220 ConfigView(QWidget* parent, const char *name = 0);
221 ~ConfigView(void);
222 static void updateList(ConfigItem* item);
223 static void updateListAll(void);
224
225 bool showAll(void) const { return list->showAll; }
226 bool showName(void) const { return list->showName; }
227 bool showRange(void) const { return list->showRange; }
228 bool showData(void) const { return list->showData; }
229public slots:
230 void setShowAll(bool);
231 void setShowName(bool);
232 void setShowRange(bool);
233 void setShowData(bool);
234signals:
235 void showAllChanged(bool);
236 void showNameChanged(bool);
237 void showRangeChanged(bool);
238 void showDataChanged(bool);
239public:
240 ConfigList* list;
241 ConfigLineEdit* lineEdit;
242
243 static ConfigView* viewList;
244 ConfigView* nextView;
245};
246
247class ConfigInfoView : public QTextBrowser {
248 Q_OBJECT
249 typedef class QTextBrowser Parent;
250public:
251 ConfigInfoView(QWidget* parent, const char *name = 0);
252 bool showDebug(void) const { return _showDebug; }
253
254public slots:
255 void setInfo(struct menu *menu);
256 void saveSettings(void);
257 void setSource(const QString& name);
258 void setShowDebug(bool);
259
260signals:
261 void showDebugChanged(bool);
262 void menuSelected(struct menu *);
263
264protected:
265 void symbolInfo(void);
266 void menuInfo(void);
267 QString debug_info(struct symbol *sym);
268 static QString print_filter(const QString &str);
269 static void expr_print_help(void *data, struct symbol *sym, const char *str);
270 QPopupMenu* createPopupMenu(const QPoint& pos);
271 void contentsContextMenuEvent(QContextMenuEvent *e);
272
273 struct symbol *sym;
274 struct menu *menu;
275 bool _showDebug;
276};
277
278class ConfigSearchWindow : public QDialog {
279 Q_OBJECT
280 typedef class QDialog Parent;
281public:
282 ConfigSearchWindow(QWidget* parent, const char *name = 0);
283
284public slots:
285 void saveSettings(void);
286 void search(void);
287
288protected:
289 QLineEdit* editField;
290 QPushButton* searchButton;
291 QSplitter* split;
292 ConfigView* list;
293 ConfigInfoView* info;
294
295 struct symbol **result;
296};
297
225class ConfigMainWindow : public QMainWindow { 298class ConfigMainWindow : public QMainWindow {
226 Q_OBJECT 299 Q_OBJECT
227public: 300public:
228 ConfigMainWindow(void); 301 ConfigMainWindow(void);
229public slots: 302public slots:
230 void setHelp(QListViewItem* item);
231 void changeMenu(struct menu *); 303 void changeMenu(struct menu *);
304 void setMenuLink(struct menu *);
232 void listFocusChanged(void); 305 void listFocusChanged(void);
233 void goBack(void); 306 void goBack(void);
234 void loadConfig(void); 307 void loadConfig(void);
235 void saveConfig(void); 308 void saveConfig(void);
236 void saveConfigAs(void); 309 void saveConfigAs(void);
310 void searchConfig(void);
237 void showSingleView(void); 311 void showSingleView(void);
238 void showSplitView(void); 312 void showSplitView(void);
239 void showFullView(void); 313 void showFullView(void);
240 void setShowAll(bool);
241 void setShowDebug(bool);
242 void setShowRange(bool);
243 void setShowName(bool);
244 void setShowData(bool);
245 void showIntro(void); 314 void showIntro(void);
246 void showAbout(void); 315 void showAbout(void);
247 void saveSettings(void); 316 void saveSettings(void);
@@ -249,15 +318,14 @@ public slots:
249protected: 318protected:
250 void closeEvent(QCloseEvent *e); 319 void closeEvent(QCloseEvent *e);
251 320
321 ConfigSearchWindow *searchWindow;
252 ConfigView *menuView; 322 ConfigView *menuView;
253 ConfigList *menuList; 323 ConfigList *menuList;
254 ConfigView *configView; 324 ConfigView *configView;
255 ConfigList *configList; 325 ConfigList *configList;
256 QTextView *helpText; 326 ConfigInfoView *helpText;
257 QToolBar *toolBar; 327 QToolBar *toolBar;
258 QAction *backAction; 328 QAction *backAction;
259 QSplitter* split1; 329 QSplitter* split1;
260 QSplitter* split2; 330 QSplitter* split2;
261
262 bool showDebug;
263}; 331};
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 3d7877afcc..ee225ced2c 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -15,15 +15,15 @@
15struct symbol symbol_yes = { 15struct symbol symbol_yes = {
16 .name = "y", 16 .name = "y",
17 .curr = { "y", yes }, 17 .curr = { "y", yes },
18 .flags = SYMBOL_YES|SYMBOL_VALID, 18 .flags = SYMBOL_CONST|SYMBOL_VALID,
19}, symbol_mod = { 19}, symbol_mod = {
20 .name = "m", 20 .name = "m",
21 .curr = { "m", mod }, 21 .curr = { "m", mod },
22 .flags = SYMBOL_MOD|SYMBOL_VALID, 22 .flags = SYMBOL_CONST|SYMBOL_VALID,
23}, symbol_no = { 23}, symbol_no = {
24 .name = "n", 24 .name = "n",
25 .curr = { "n", no }, 25 .curr = { "n", no },
26 .flags = SYMBOL_NO|SYMBOL_VALID, 26 .flags = SYMBOL_CONST|SYMBOL_VALID,
27}, symbol_empty = { 27}, symbol_empty = {
28 .name = "", 28 .name = "",
29 .curr = { "", no }, 29 .curr = { "", no },
@@ -31,6 +31,7 @@ struct symbol symbol_yes = {
31}; 31};
32 32
33int sym_change_count; 33int sym_change_count;
34struct symbol *sym_defconfig_list;
34struct symbol *modules_sym; 35struct symbol *modules_sym;
35tristate modules_val; 36tristate modules_val;
36 37
@@ -227,7 +228,7 @@ static struct symbol *sym_calc_choice(struct symbol *sym)
227 struct expr *e; 228 struct expr *e;
228 229
229 /* is the user choice visible? */ 230 /* is the user choice visible? */
230 def_sym = sym->user.val; 231 def_sym = sym->def[S_DEF_USER].val;
231 if (def_sym) { 232 if (def_sym) {
232 sym_calc_visibility(def_sym); 233 sym_calc_visibility(def_sym);
233 if (def_sym->visible != no) 234 if (def_sym->visible != no)
@@ -306,7 +307,7 @@ void sym_calc_value(struct symbol *sym)
306 } else if (E_OR(sym->visible, sym->rev_dep.tri) != no) { 307 } else if (E_OR(sym->visible, sym->rev_dep.tri) != no) {
307 sym->flags |= SYMBOL_WRITE; 308 sym->flags |= SYMBOL_WRITE;
308 if (sym_has_value(sym)) 309 if (sym_has_value(sym))
309 newval.tri = sym->user.tri; 310 newval.tri = sym->def[S_DEF_USER].tri;
310 else if (!sym_is_choice(sym)) { 311 else if (!sym_is_choice(sym)) {
311 prop = sym_get_default_prop(sym); 312 prop = sym_get_default_prop(sym);
312 if (prop) 313 if (prop)
@@ -329,7 +330,7 @@ void sym_calc_value(struct symbol *sym)
329 if (sym->visible != no) { 330 if (sym->visible != no) {
330 sym->flags |= SYMBOL_WRITE; 331 sym->flags |= SYMBOL_WRITE;
331 if (sym_has_value(sym)) { 332 if (sym_has_value(sym)) {
332 newval.val = sym->user.val; 333 newval.val = sym->def[S_DEF_USER].val;
333 break; 334 break;
334 } 335 }
335 } 336 }
@@ -352,10 +353,13 @@ void sym_calc_value(struct symbol *sym)
352 sym->curr.val = sym_calc_choice(sym); 353 sym->curr.val = sym_calc_choice(sym);
353 sym_validate_range(sym); 354 sym_validate_range(sym);
354 355
355 if (memcmp(&oldval, &sym->curr, sizeof(oldval))) 356 if (memcmp(&oldval, &sym->curr, sizeof(oldval))) {
356 sym_set_changed(sym); 357 sym_set_changed(sym);
357 if (modules_sym == sym) 358 if (modules_sym == sym) {
358 modules_val = modules_sym->curr.tri; 359 sym_set_all_changed();
360 modules_val = modules_sym->curr.tri;
361 }
362 }
359 363
360 if (sym_is_choice(sym)) { 364 if (sym_is_choice(sym)) {
361 int flags = sym->flags & (SYMBOL_CHANGED | SYMBOL_WRITE); 365 int flags = sym->flags & (SYMBOL_CHANGED | SYMBOL_WRITE);
@@ -426,8 +430,8 @@ bool sym_set_tristate_value(struct symbol *sym, tristate val)
426 if (oldval != val && !sym_tristate_within_range(sym, val)) 430 if (oldval != val && !sym_tristate_within_range(sym, val))
427 return false; 431 return false;
428 432
429 if (sym->flags & SYMBOL_NEW) { 433 if (!(sym->flags & SYMBOL_DEF_USER)) {
430 sym->flags &= ~SYMBOL_NEW; 434 sym->flags |= SYMBOL_DEF_USER;
431 sym_set_changed(sym); 435 sym_set_changed(sym);
432 } 436 }
433 /* 437 /*
@@ -439,21 +443,18 @@ bool sym_set_tristate_value(struct symbol *sym, tristate val)
439 struct property *prop; 443 struct property *prop;
440 struct expr *e; 444 struct expr *e;
441 445
442 cs->user.val = sym; 446 cs->def[S_DEF_USER].val = sym;
443 cs->flags &= ~SYMBOL_NEW; 447 cs->flags |= SYMBOL_DEF_USER;
444 prop = sym_get_choice_prop(cs); 448 prop = sym_get_choice_prop(cs);
445 for (e = prop->expr; e; e = e->left.expr) { 449 for (e = prop->expr; e; e = e->left.expr) {
446 if (e->right.sym->visible != no) 450 if (e->right.sym->visible != no)
447 e->right.sym->flags &= ~SYMBOL_NEW; 451 e->right.sym->flags |= SYMBOL_DEF_USER;
448 } 452 }
449 } 453 }
450 454
451 sym->user.tri = val; 455 sym->def[S_DEF_USER].tri = val;
452 if (oldval != val) { 456 if (oldval != val)
453 sym_clear_all_valid(); 457 sym_clear_all_valid();
454 if (sym == modules_sym)
455 sym_set_all_changed();
456 }
457 458
458 return true; 459 return true;
459} 460}
@@ -591,20 +592,20 @@ bool sym_set_string_value(struct symbol *sym, const char *newval)
591 if (!sym_string_within_range(sym, newval)) 592 if (!sym_string_within_range(sym, newval))
592 return false; 593 return false;
593 594
594 if (sym->flags & SYMBOL_NEW) { 595 if (!(sym->flags & SYMBOL_DEF_USER)) {
595 sym->flags &= ~SYMBOL_NEW; 596 sym->flags |= SYMBOL_DEF_USER;
596 sym_set_changed(sym); 597 sym_set_changed(sym);
597 } 598 }
598 599
599 oldval = sym->user.val; 600 oldval = sym->def[S_DEF_USER].val;
600 size = strlen(newval) + 1; 601 size = strlen(newval) + 1;
601 if (sym->type == S_HEX && (newval[0] != '0' || (newval[1] != 'x' && newval[1] != 'X'))) { 602 if (sym->type == S_HEX && (newval[0] != '0' || (newval[1] != 'x' && newval[1] != 'X'))) {
602 size += 2; 603 size += 2;
603 sym->user.val = val = malloc(size); 604 sym->def[S_DEF_USER].val = val = malloc(size);
604 *val++ = '0'; 605 *val++ = '0';
605 *val++ = 'x'; 606 *val++ = 'x';
606 } else if (!oldval || strcmp(oldval, newval)) 607 } else if (!oldval || strcmp(oldval, newval))
607 sym->user.val = val = malloc(size); 608 sym->def[S_DEF_USER].val = val = malloc(size);
608 else 609 else
609 return true; 610 return true;
610 611
@@ -679,7 +680,6 @@ struct symbol *sym_lookup(const char *name, int isconst)
679 memset(symbol, 0, sizeof(*symbol)); 680 memset(symbol, 0, sizeof(*symbol));
680 symbol->name = new_name; 681 symbol->name = new_name;
681 symbol->type = S_UNKNOWN; 682 symbol->type = S_UNKNOWN;
682 symbol->flags = SYMBOL_NEW;
683 if (isconst) 683 if (isconst)
684 symbol->flags |= SYMBOL_CONST; 684 symbol->flags |= SYMBOL_CONST;
685 685
diff --git a/scripts/kconfig/util.c b/scripts/kconfig/util.c
index 656d2c87d6..e3f28b9d59 100644
--- a/scripts/kconfig/util.c
+++ b/scripts/kconfig/util.c
@@ -44,7 +44,9 @@ int file_write_dep(const char *name)
44 else 44 else
45 fprintf(out, "\t%s\n", file->name); 45 fprintf(out, "\t%s\n", file->name);
46 } 46 }
47 fprintf(out, "\n.config include/linux/autoconf.h: $(deps_config)\n\n$(deps_config):\n"); 47 fprintf(out, "\ninclude/config/auto.conf: \\\n"
48 "\t$(deps_config)\n\n"
49 "$(deps_config): ;\n");
48 fclose(out); 50 fclose(out);
49 rename("..config.tmp", name); 51 rename("..config.tmp", name);
50 return 0; 52 return 0;
diff --git a/scripts/kconfig/zconf.gperf b/scripts/kconfig/zconf.gperf
index b03220600b..9b44c80dd8 100644
--- a/scripts/kconfig/zconf.gperf
+++ b/scripts/kconfig/zconf.gperf
@@ -39,5 +39,8 @@ string, T_TYPE, TF_COMMAND, S_STRING
39select, T_SELECT, TF_COMMAND 39select, T_SELECT, TF_COMMAND
40enable, T_SELECT, TF_COMMAND 40enable, T_SELECT, TF_COMMAND
41range, T_RANGE, TF_COMMAND 41range, T_RANGE, TF_COMMAND
42option, T_OPTION, TF_COMMAND
42on, T_ON, TF_PARAM 43on, T_ON, TF_PARAM
44modules, T_OPT_MODULES, TF_OPTION
45defconfig_list, T_OPT_DEFCONFIG_LIST,TF_OPTION
43%% 46%%
diff --git a/scripts/kconfig/zconf.hash.c_shipped b/scripts/kconfig/zconf.hash.c_shipped
index 345f0fc07c..47c8b5babf 100644
--- a/scripts/kconfig/zconf.hash.c_shipped
+++ b/scripts/kconfig/zconf.hash.c_shipped
@@ -53,10 +53,10 @@ kconf_id_hash (register const char *str, register unsigned int len)
53 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 53 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
54 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 54 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
55 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 55 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
56 47, 47, 47, 47, 47, 47, 47, 25, 10, 15, 56 47, 47, 47, 47, 47, 47, 47, 25, 30, 15,
57 0, 0, 5, 47, 0, 0, 47, 47, 0, 10, 57 0, 15, 0, 47, 5, 15, 47, 47, 30, 20,
58 0, 20, 20, 20, 5, 0, 0, 20, 47, 47, 58 5, 0, 25, 15, 0, 0, 10, 35, 47, 47,
59 20, 47, 47, 47, 47, 47, 47, 47, 47, 47, 59 5, 47, 47, 47, 47, 47, 47, 47, 47, 47,
60 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 60 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
61 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 61 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
62 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 62 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
@@ -88,69 +88,75 @@ kconf_id_hash (register const char *str, register unsigned int len)
88 88
89struct kconf_id_strings_t 89struct kconf_id_strings_t
90 { 90 {
91 char kconf_id_strings_str2[sizeof("if")]; 91 char kconf_id_strings_str2[sizeof("on")];
92 char kconf_id_strings_str3[sizeof("int")]; 92 char kconf_id_strings_str6[sizeof("string")];
93 char kconf_id_strings_str4[sizeof("help")]; 93 char kconf_id_strings_str7[sizeof("default")];
94 char kconf_id_strings_str5[sizeof("endif")]; 94 char kconf_id_strings_str8[sizeof("def_bool")];
95 char kconf_id_strings_str6[sizeof("select")];
96 char kconf_id_strings_str7[sizeof("endmenu")];
97 char kconf_id_strings_str8[sizeof("tristate")];
98 char kconf_id_strings_str9[sizeof("endchoice")];
99 char kconf_id_strings_str10[sizeof("range")]; 95 char kconf_id_strings_str10[sizeof("range")];
100 char kconf_id_strings_str11[sizeof("string")]; 96 char kconf_id_strings_str11[sizeof("def_boolean")];
101 char kconf_id_strings_str12[sizeof("default")]; 97 char kconf_id_strings_str12[sizeof("def_tristate")];
102 char kconf_id_strings_str13[sizeof("def_bool")]; 98 char kconf_id_strings_str13[sizeof("hex")];
103 char kconf_id_strings_str14[sizeof("menu")]; 99 char kconf_id_strings_str14[sizeof("defconfig_list")];
104 char kconf_id_strings_str16[sizeof("def_boolean")]; 100 char kconf_id_strings_str16[sizeof("option")];
105 char kconf_id_strings_str17[sizeof("def_tristate")]; 101 char kconf_id_strings_str17[sizeof("if")];
106 char kconf_id_strings_str18[sizeof("mainmenu")]; 102 char kconf_id_strings_str18[sizeof("optional")];
107 char kconf_id_strings_str20[sizeof("menuconfig")]; 103 char kconf_id_strings_str20[sizeof("endif")];
108 char kconf_id_strings_str21[sizeof("config")]; 104 char kconf_id_strings_str21[sizeof("choice")];
109 char kconf_id_strings_str22[sizeof("on")]; 105 char kconf_id_strings_str22[sizeof("endmenu")];
110 char kconf_id_strings_str23[sizeof("hex")]; 106 char kconf_id_strings_str23[sizeof("requires")];
111 char kconf_id_strings_str26[sizeof("source")]; 107 char kconf_id_strings_str24[sizeof("endchoice")];
112 char kconf_id_strings_str27[sizeof("depends")]; 108 char kconf_id_strings_str26[sizeof("config")];
113 char kconf_id_strings_str28[sizeof("optional")]; 109 char kconf_id_strings_str27[sizeof("modules")];
114 char kconf_id_strings_str31[sizeof("enable")]; 110 char kconf_id_strings_str28[sizeof("int")];
115 char kconf_id_strings_str32[sizeof("comment")]; 111 char kconf_id_strings_str29[sizeof("menu")];
116 char kconf_id_strings_str33[sizeof("requires")]; 112 char kconf_id_strings_str31[sizeof("prompt")];
113 char kconf_id_strings_str32[sizeof("depends")];
114 char kconf_id_strings_str33[sizeof("tristate")];
117 char kconf_id_strings_str34[sizeof("bool")]; 115 char kconf_id_strings_str34[sizeof("bool")];
116 char kconf_id_strings_str35[sizeof("menuconfig")];
117 char kconf_id_strings_str36[sizeof("select")];
118 char kconf_id_strings_str37[sizeof("boolean")]; 118 char kconf_id_strings_str37[sizeof("boolean")];
119 char kconf_id_strings_str41[sizeof("choice")]; 119 char kconf_id_strings_str39[sizeof("help")];
120 char kconf_id_strings_str46[sizeof("prompt")]; 120 char kconf_id_strings_str41[sizeof("source")];
121 char kconf_id_strings_str42[sizeof("comment")];
122 char kconf_id_strings_str43[sizeof("mainmenu")];
123 char kconf_id_strings_str46[sizeof("enable")];
121 }; 124 };
122static struct kconf_id_strings_t kconf_id_strings_contents = 125static struct kconf_id_strings_t kconf_id_strings_contents =
123 { 126 {
124 "if", 127 "on",
125 "int",
126 "help",
127 "endif",
128 "select",
129 "endmenu",
130 "tristate",
131 "endchoice",
132 "range",
133 "string", 128 "string",
134 "default", 129 "default",
135 "def_bool", 130 "def_bool",
136 "menu", 131 "range",
137 "def_boolean", 132 "def_boolean",
138 "def_tristate", 133 "def_tristate",
139 "mainmenu",
140 "menuconfig",
141 "config",
142 "on",
143 "hex", 134 "hex",
144 "source", 135 "defconfig_list",
145 "depends", 136 "option",
137 "if",
146 "optional", 138 "optional",
147 "enable", 139 "endif",
148 "comment", 140 "choice",
141 "endmenu",
149 "requires", 142 "requires",
143 "endchoice",
144 "config",
145 "modules",
146 "int",
147 "menu",
148 "prompt",
149 "depends",
150 "tristate",
150 "bool", 151 "bool",
152 "menuconfig",
153 "select",
151 "boolean", 154 "boolean",
152 "choice", 155 "help",
153 "prompt" 156 "source",
157 "comment",
158 "mainmenu",
159 "enable"
154 }; 160 };
155#define kconf_id_strings ((const char *) &kconf_id_strings_contents) 161#define kconf_id_strings ((const char *) &kconf_id_strings_contents)
156#ifdef __GNUC__ 162#ifdef __GNUC__
@@ -161,9 +167,9 @@ kconf_id_lookup (register const char *str, register unsigned int len)
161{ 167{
162 enum 168 enum
163 { 169 {
164 TOTAL_KEYWORDS = 30, 170 TOTAL_KEYWORDS = 33,
165 MIN_WORD_LENGTH = 2, 171 MIN_WORD_LENGTH = 2,
166 MAX_WORD_LENGTH = 12, 172 MAX_WORD_LENGTH = 14,
167 MIN_HASH_VALUE = 2, 173 MIN_HASH_VALUE = 2,
168 MAX_HASH_VALUE = 46 174 MAX_HASH_VALUE = 46
169 }; 175 };
@@ -171,43 +177,48 @@ kconf_id_lookup (register const char *str, register unsigned int len)
171 static struct kconf_id wordlist[] = 177 static struct kconf_id wordlist[] =
172 { 178 {
173 {-1}, {-1}, 179 {-1}, {-1},
174 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str2, T_IF, TF_COMMAND|TF_PARAM}, 180 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str2, T_ON, TF_PARAM},
175 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str3, T_TYPE, TF_COMMAND, S_INT}, 181 {-1}, {-1}, {-1},
176 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str4, T_HELP, TF_COMMAND}, 182 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str6, T_TYPE, TF_COMMAND, S_STRING},
177 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str5, T_ENDIF, TF_COMMAND}, 183 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str7, T_DEFAULT, TF_COMMAND, S_UNKNOWN},
178 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str6, T_SELECT, TF_COMMAND}, 184 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str8, T_DEFAULT, TF_COMMAND, S_BOOLEAN},
179 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str7, T_ENDMENU, TF_COMMAND}, 185 {-1},
180 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str8, T_TYPE, TF_COMMAND, S_TRISTATE},
181 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str9, T_ENDCHOICE, TF_COMMAND},
182 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str10, T_RANGE, TF_COMMAND}, 186 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str10, T_RANGE, TF_COMMAND},
183 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str11, T_TYPE, TF_COMMAND, S_STRING}, 187 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str11, T_DEFAULT, TF_COMMAND, S_BOOLEAN},
184 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str12, T_DEFAULT, TF_COMMAND, S_UNKNOWN}, 188 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str12, T_DEFAULT, TF_COMMAND, S_TRISTATE},
185 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str13, T_DEFAULT, TF_COMMAND, S_BOOLEAN}, 189 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str13, T_TYPE, TF_COMMAND, S_HEX},
186 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str14, T_MENU, TF_COMMAND}, 190 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str14, T_OPT_DEFCONFIG_LIST,TF_OPTION},
187 {-1}, 191 {-1},
188 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str16, T_DEFAULT, TF_COMMAND, S_BOOLEAN}, 192 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str16, T_OPTION, TF_COMMAND},
189 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str17, T_DEFAULT, TF_COMMAND, S_TRISTATE}, 193 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str17, T_IF, TF_COMMAND|TF_PARAM},
190 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str18, T_MAINMENU, TF_COMMAND}, 194 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str18, T_OPTIONAL, TF_COMMAND},
191 {-1}, 195 {-1},
192 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str20, T_MENUCONFIG, TF_COMMAND}, 196 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str20, T_ENDIF, TF_COMMAND},
193 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str21, T_CONFIG, TF_COMMAND}, 197 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str21, T_CHOICE, TF_COMMAND},
194 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str22, T_ON, TF_PARAM}, 198 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str22, T_ENDMENU, TF_COMMAND},
195 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str23, T_TYPE, TF_COMMAND, S_HEX}, 199 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str23, T_REQUIRES, TF_COMMAND},
196 {-1}, {-1}, 200 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str24, T_ENDCHOICE, TF_COMMAND},
197 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str26, T_SOURCE, TF_COMMAND}, 201 {-1},
198 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str27, T_DEPENDS, TF_COMMAND}, 202 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str26, T_CONFIG, TF_COMMAND},
199 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str28, T_OPTIONAL, TF_COMMAND}, 203 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str27, T_OPT_MODULES, TF_OPTION},
200 {-1}, {-1}, 204 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str28, T_TYPE, TF_COMMAND, S_INT},
201 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str31, T_SELECT, TF_COMMAND}, 205 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str29, T_MENU, TF_COMMAND},
202 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str32, T_COMMENT, TF_COMMAND}, 206 {-1},
203 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str33, T_REQUIRES, TF_COMMAND}, 207 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str31, T_PROMPT, TF_COMMAND},
208 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str32, T_DEPENDS, TF_COMMAND},
209 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str33, T_TYPE, TF_COMMAND, S_TRISTATE},
204 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str34, T_TYPE, TF_COMMAND, S_BOOLEAN}, 210 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str34, T_TYPE, TF_COMMAND, S_BOOLEAN},
205 {-1}, {-1}, 211 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str35, T_MENUCONFIG, TF_COMMAND},
212 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str36, T_SELECT, TF_COMMAND},
206 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str37, T_TYPE, TF_COMMAND, S_BOOLEAN}, 213 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str37, T_TYPE, TF_COMMAND, S_BOOLEAN},
207 {-1}, {-1}, {-1}, 214 {-1},
208 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str41, T_CHOICE, TF_COMMAND}, 215 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str39, T_HELP, TF_COMMAND},
209 {-1}, {-1}, {-1}, {-1}, 216 {-1},
210 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str46, T_PROMPT, TF_COMMAND} 217 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str41, T_SOURCE, TF_COMMAND},
218 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str42, T_COMMENT, TF_COMMAND},
219 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str43, T_MAINMENU, TF_COMMAND},
220 {-1}, {-1},
221 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str46, T_SELECT, TF_COMMAND}
211 }; 222 };
212 223
213 if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH) 224 if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH)
diff --git a/scripts/kconfig/zconf.tab.c_shipped b/scripts/kconfig/zconf.tab.c_shipped
index ea7755da82..2fb0a4fc61 100644
--- a/scripts/kconfig/zconf.tab.c_shipped
+++ b/scripts/kconfig/zconf.tab.c_shipped
@@ -1,7 +1,7 @@
1/* A Bison parser, made by GNU Bison 2.0. */ 1/* A Bison parser, made by GNU Bison 2.1. */
2 2
3/* Skeleton parser for Yacc-like parsing with Bison, 3/* Skeleton parser for Yacc-like parsing with Bison,
4 Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. 4 Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
@@ -15,8 +15,8 @@
15 15
16 You should have received a copy of the GNU General Public License 16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software 17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place - Suite 330, 18 Foundation, Inc., 51 Franklin Street, Fifth Floor,
19 Boston, MA 02111-1307, USA. */ 19 Boston, MA 02110-1301, USA. */
20 20
21/* As a special exception, when this file is copied by Bison into a 21/* As a special exception, when this file is copied by Bison into a
22 Bison output file, you may use that output file without restriction. 22 Bison output file, you may use that output file without restriction.
@@ -36,6 +36,9 @@
36/* Identify Bison output. */ 36/* Identify Bison output. */
37#define YYBISON 1 37#define YYBISON 1
38 38
39/* Bison version. */
40#define YYBISON_VERSION "2.1"
41
39/* Skeleton name. */ 42/* Skeleton name. */
40#define YYSKELETON_NAME "yacc.c" 43#define YYSKELETON_NAME "yacc.c"
41 44
@@ -82,19 +85,21 @@
82 T_DEFAULT = 276, 85 T_DEFAULT = 276,
83 T_SELECT = 277, 86 T_SELECT = 277,
84 T_RANGE = 278, 87 T_RANGE = 278,
85 T_ON = 279, 88 T_OPTION = 279,
86 T_WORD = 280, 89 T_ON = 280,
87 T_WORD_QUOTE = 281, 90 T_WORD = 281,
88 T_UNEQUAL = 282, 91 T_WORD_QUOTE = 282,
89 T_CLOSE_PAREN = 283, 92 T_UNEQUAL = 283,
90 T_OPEN_PAREN = 284, 93 T_CLOSE_PAREN = 284,
91 T_EOL = 285, 94 T_OPEN_PAREN = 285,
92 T_OR = 286, 95 T_EOL = 286,
93 T_AND = 287, 96 T_OR = 287,
94 T_EQUAL = 288, 97 T_AND = 288,
95 T_NOT = 289 98 T_EQUAL = 289,
99 T_NOT = 290
96 }; 100 };
97#endif 101#endif
102/* Tokens. */
98#define T_MAINMENU 258 103#define T_MAINMENU 258
99#define T_MENU 259 104#define T_MENU 259
100#define T_ENDMENU 260 105#define T_ENDMENU 260
@@ -116,17 +121,18 @@
116#define T_DEFAULT 276 121#define T_DEFAULT 276
117#define T_SELECT 277 122#define T_SELECT 277
118#define T_RANGE 278 123#define T_RANGE 278
119#define T_ON 279 124#define T_OPTION 279
120#define T_WORD 280 125#define T_ON 280
121#define T_WORD_QUOTE 281 126#define T_WORD 281
122#define T_UNEQUAL 282 127#define T_WORD_QUOTE 282
123#define T_CLOSE_PAREN 283 128#define T_UNEQUAL 283
124#define T_OPEN_PAREN 284 129#define T_CLOSE_PAREN 284
125#define T_EOL 285 130#define T_OPEN_PAREN 285
126#define T_OR 286 131#define T_EOL 286
127#define T_AND 287 132#define T_OR 287
128#define T_EQUAL 288 133#define T_AND 288
129#define T_NOT 289 134#define T_EQUAL 289
135#define T_NOT 290
130 136
131 137
132 138
@@ -187,6 +193,11 @@ static struct menu *current_menu, *current_entry;
187# define YYERROR_VERBOSE 0 193# define YYERROR_VERBOSE 0
188#endif 194#endif
189 195
196/* Enabling the token table. */
197#ifndef YYTOKEN_TABLE
198# define YYTOKEN_TABLE 0
199#endif
200
190#if ! defined (YYSTYPE) && ! defined (YYSTYPE_IS_DECLARED) 201#if ! defined (YYSTYPE) && ! defined (YYSTYPE_IS_DECLARED)
191 202
192typedef union YYSTYPE { 203typedef union YYSTYPE {
@@ -197,7 +208,7 @@ typedef union YYSTYPE {
197 struct menu *menu; 208 struct menu *menu;
198 struct kconf_id *id; 209 struct kconf_id *id;
199} YYSTYPE; 210} YYSTYPE;
200/* Line 190 of yacc.c. */ 211/* Line 196 of yacc.c. */
201 212
202# define yystype YYSTYPE /* obsolescent; will be withdrawn */ 213# define yystype YYSTYPE /* obsolescent; will be withdrawn */
203# define YYSTYPE_IS_DECLARED 1 214# define YYSTYPE_IS_DECLARED 1
@@ -209,17 +220,36 @@ typedef union YYSTYPE {
209/* Copy the second part of user declarations. */ 220/* Copy the second part of user declarations. */
210 221
211 222
212/* Line 213 of yacc.c. */ 223/* Line 219 of yacc.c. */
213 224
214 225
215#if ! defined (yyoverflow) || YYERROR_VERBOSE 226#if ! defined (YYSIZE_T) && defined (__SIZE_TYPE__)
227# define YYSIZE_T __SIZE_TYPE__
228#endif
229#if ! defined (YYSIZE_T) && defined (size_t)
230# define YYSIZE_T size_t
231#endif
232#if ! defined (YYSIZE_T) && (defined (__STDC__) || defined (__cplusplus))
233# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
234# define YYSIZE_T size_t
235#endif
236#if ! defined (YYSIZE_T)
237# define YYSIZE_T unsigned int
238#endif
216 239
217# ifndef YYFREE 240#ifndef YY_
218# define YYFREE free 241# if YYENABLE_NLS
242# if ENABLE_NLS
243# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
244# define YY_(msgid) dgettext ("bison-runtime", msgid)
245# endif
219# endif 246# endif
220# ifndef YYMALLOC 247# ifndef YY_
221# define YYMALLOC malloc 248# define YY_(msgid) msgid
222# endif 249# endif
250#endif
251
252#if ! defined (yyoverflow) || YYERROR_VERBOSE
223 253
224/* The parser invokes alloca or malloc; define the necessary symbols. */ 254/* The parser invokes alloca or malloc; define the necessary symbols. */
225 255
@@ -229,6 +259,10 @@ typedef union YYSTYPE {
229# define YYSTACK_ALLOC __builtin_alloca 259# define YYSTACK_ALLOC __builtin_alloca
230# else 260# else
231# define YYSTACK_ALLOC alloca 261# define YYSTACK_ALLOC alloca
262# if defined (__STDC__) || defined (__cplusplus)
263# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
264# define YYINCLUDED_STDLIB_H
265# endif
232# endif 266# endif
233# endif 267# endif
234# endif 268# endif
@@ -236,13 +270,39 @@ typedef union YYSTYPE {
236# ifdef YYSTACK_ALLOC 270# ifdef YYSTACK_ALLOC
237 /* Pacify GCC's `empty if-body' warning. */ 271 /* Pacify GCC's `empty if-body' warning. */
238# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0) 272# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
239# else 273# ifndef YYSTACK_ALLOC_MAXIMUM
240# if defined (__STDC__) || defined (__cplusplus) 274 /* The OS might guarantee only one guard page at the bottom of the stack,
241# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ 275 and a page size can be as small as 4096 bytes. So we cannot safely
242# define YYSIZE_T size_t 276 invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
277 to allow for a few compiler-allocated temporary stack slots. */
278# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2005 */
243# endif 279# endif
280# else
244# define YYSTACK_ALLOC YYMALLOC 281# define YYSTACK_ALLOC YYMALLOC
245# define YYSTACK_FREE YYFREE 282# define YYSTACK_FREE YYFREE
283# ifndef YYSTACK_ALLOC_MAXIMUM
284# define YYSTACK_ALLOC_MAXIMUM ((YYSIZE_T) -1)
285# endif
286# ifdef __cplusplus
287extern "C" {
288# endif
289# ifndef YYMALLOC
290# define YYMALLOC malloc
291# if (! defined (malloc) && ! defined (YYINCLUDED_STDLIB_H) \
292 && (defined (__STDC__) || defined (__cplusplus)))
293void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
294# endif
295# endif
296# ifndef YYFREE
297# define YYFREE free
298# if (! defined (free) && ! defined (YYINCLUDED_STDLIB_H) \
299 && (defined (__STDC__) || defined (__cplusplus)))
300void free (void *); /* INFRINGES ON USER NAME SPACE */
301# endif
302# endif
303# ifdef __cplusplus
304}
305# endif
246# endif 306# endif
247#endif /* ! defined (yyoverflow) || YYERROR_VERBOSE */ 307#endif /* ! defined (yyoverflow) || YYERROR_VERBOSE */
248 308
@@ -277,7 +337,7 @@ union yyalloc
277# define YYCOPY(To, From, Count) \ 337# define YYCOPY(To, From, Count) \
278 do \ 338 do \
279 { \ 339 { \
280 register YYSIZE_T yyi; \ 340 YYSIZE_T yyi; \
281 for (yyi = 0; yyi < (Count); yyi++) \ 341 for (yyi = 0; yyi < (Count); yyi++) \
282 (To)[yyi] = (From)[yyi]; \ 342 (To)[yyi] = (From)[yyi]; \
283 } \ 343 } \
@@ -312,22 +372,22 @@ union yyalloc
312/* YYFINAL -- State number of the termination state. */ 372/* YYFINAL -- State number of the termination state. */
313#define YYFINAL 3 373#define YYFINAL 3
314/* YYLAST -- Last index in YYTABLE. */ 374/* YYLAST -- Last index in YYTABLE. */
315#define YYLAST 264 375#define YYLAST 275
316 376
317/* YYNTOKENS -- Number of terminals. */ 377/* YYNTOKENS -- Number of terminals. */
318#define YYNTOKENS 35 378#define YYNTOKENS 36
319/* YYNNTS -- Number of nonterminals. */ 379/* YYNNTS -- Number of nonterminals. */
320#define YYNNTS 42 380#define YYNNTS 45
321/* YYNRULES -- Number of rules. */ 381/* YYNRULES -- Number of rules. */
322#define YYNRULES 104 382#define YYNRULES 110
323/* YYNRULES -- Number of states. */ 383/* YYNRULES -- Number of states. */
324#define YYNSTATES 175 384#define YYNSTATES 183
325 385
326/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */ 386/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
327#define YYUNDEFTOK 2 387#define YYUNDEFTOK 2
328#define YYMAXUTOK 289 388#define YYMAXUTOK 290
329 389
330#define YYTRANSLATE(YYX) \ 390#define YYTRANSLATE(YYX) \
331 ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) 391 ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
332 392
333/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */ 393/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
@@ -361,7 +421,8 @@ static const unsigned char yytranslate[] =
361 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, 421 2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
362 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 422 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
363 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 423 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
364 25, 26, 27, 28, 29, 30, 31, 32, 33, 34 424 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
425 35
365}; 426};
366 427
367#if YYDEBUG 428#if YYDEBUG
@@ -372,72 +433,75 @@ static const unsigned short int yyprhs[] =
372 0, 0, 3, 5, 6, 9, 12, 15, 20, 23, 433 0, 0, 3, 5, 6, 9, 12, 15, 20, 23,
373 28, 33, 37, 39, 41, 43, 45, 47, 49, 51, 434 28, 33, 37, 39, 41, 43, 45, 47, 49, 51,
374 53, 55, 57, 59, 61, 63, 67, 70, 74, 77, 435 53, 55, 57, 59, 61, 63, 67, 70, 74, 77,
375 81, 84, 85, 88, 91, 94, 97, 100, 104, 109, 436 81, 84, 85, 88, 91, 94, 97, 100, 103, 107,
376 114, 119, 125, 128, 131, 133, 137, 138, 141, 144, 437 112, 117, 122, 128, 132, 133, 137, 138, 141, 144,
377 147, 150, 153, 158, 162, 165, 170, 171, 174, 178, 438 147, 149, 153, 154, 157, 160, 163, 166, 169, 174,
378 180, 184, 185, 188, 191, 194, 198, 201, 203, 207, 439 178, 181, 186, 187, 190, 194, 196, 200, 201, 204,
379 208, 211, 214, 217, 221, 225, 228, 231, 234, 235, 440 207, 210, 214, 217, 219, 223, 224, 227, 230, 233,
380 238, 241, 244, 249, 253, 257, 258, 261, 263, 265, 441 237, 241, 244, 247, 250, 251, 254, 257, 260, 265,
381 268, 271, 274, 276, 279, 280, 283, 285, 289, 293, 442 269, 273, 274, 277, 279, 281, 284, 287, 290, 292,
382 297, 300, 304, 308, 310 443 295, 296, 299, 301, 305, 309, 313, 316, 320, 324,
444 326
383}; 445};
384 446
385/* YYRHS -- A `-1'-separated list of the rules' RHS. */ 447/* YYRHS -- A `-1'-separated list of the rules' RHS. */
386static const yysigned_char yyrhs[] = 448static const yysigned_char yyrhs[] =
387{ 449{
388 36, 0, -1, 37, -1, -1, 37, 39, -1, 37, 450 37, 0, -1, 38, -1, -1, 38, 40, -1, 38,
389 50, -1, 37, 61, -1, 37, 3, 71, 73, -1, 451 54, -1, 38, 65, -1, 38, 3, 75, 77, -1,
390 37, 72, -1, 37, 25, 1, 30, -1, 37, 38, 452 38, 76, -1, 38, 26, 1, 31, -1, 38, 39,
391 1, 30, -1, 37, 1, 30, -1, 16, -1, 19, 453 1, 31, -1, 38, 1, 31, -1, 16, -1, 19,
392 -1, 20, -1, 22, -1, 18, -1, 23, -1, 21, 454 -1, 20, -1, 22, -1, 18, -1, 23, -1, 21,
393 -1, 30, -1, 56, -1, 65, -1, 42, -1, 44, 455 -1, 31, -1, 60, -1, 69, -1, 43, -1, 45,
394 -1, 63, -1, 25, 1, 30, -1, 1, 30, -1, 456 -1, 67, -1, 26, 1, 31, -1, 1, 31, -1,
395 10, 25, 30, -1, 41, 45, -1, 11, 25, 30, 457 10, 26, 31, -1, 42, 46, -1, 11, 26, 31,
396 -1, 43, 45, -1, -1, 45, 46, -1, 45, 69, 458 -1, 44, 46, -1, -1, 46, 47, -1, 46, 48,
397 -1, 45, 67, -1, 45, 40, -1, 45, 30, -1, 459 -1, 46, 73, -1, 46, 71, -1, 46, 41, -1,
398 20, 70, 30, -1, 19, 71, 74, 30, -1, 21, 460 46, 31, -1, 20, 74, 31, -1, 19, 75, 78,
399 75, 74, 30, -1, 22, 25, 74, 30, -1, 23, 461 31, -1, 21, 79, 78, 31, -1, 22, 26, 78,
400 76, 76, 74, 30, -1, 7, 30, -1, 47, 51, 462 31, -1, 23, 80, 80, 78, 31, -1, 24, 49,
401 -1, 72, -1, 48, 53, 49, -1, -1, 51, 52, 463 31, -1, -1, 49, 26, 50, -1, -1, 34, 75,
402 -1, 51, 69, -1, 51, 67, -1, 51, 30, -1, 464 -1, 7, 31, -1, 51, 55, -1, 76, -1, 52,
403 51, 40, -1, 19, 71, 74, 30, -1, 20, 70, 465 57, 53, -1, -1, 55, 56, -1, 55, 73, -1,
404 30, -1, 18, 30, -1, 21, 25, 74, 30, -1, 466 55, 71, -1, 55, 31, -1, 55, 41, -1, 19,
405 -1, 53, 39, -1, 14, 75, 73, -1, 72, -1, 467 75, 78, 31, -1, 20, 74, 31, -1, 18, 31,
406 54, 57, 55, -1, -1, 57, 39, -1, 57, 61, 468 -1, 21, 26, 78, 31, -1, -1, 57, 40, -1,
407 -1, 57, 50, -1, 4, 71, 30, -1, 58, 68, 469 14, 79, 77, -1, 76, -1, 58, 61, 59, -1,
408 -1, 72, -1, 59, 62, 60, -1, -1, 62, 39, 470 -1, 61, 40, -1, 61, 65, -1, 61, 54, -1,
409 -1, 62, 61, -1, 62, 50, -1, 6, 71, 30, 471 4, 75, 31, -1, 62, 72, -1, 76, -1, 63,
410 -1, 9, 71, 30, -1, 64, 68, -1, 12, 30, 472 66, 64, -1, -1, 66, 40, -1, 66, 65, -1,
411 -1, 66, 13, -1, -1, 68, 69, -1, 68, 30, 473 66, 54, -1, 6, 75, 31, -1, 9, 75, 31,
412 -1, 68, 40, -1, 16, 24, 75, 30, -1, 16, 474 -1, 68, 72, -1, 12, 31, -1, 70, 13, -1,
413 75, 30, -1, 17, 75, 30, -1, -1, 71, 74, 475 -1, 72, 73, -1, 72, 31, -1, 72, 41, -1,
414 -1, 25, -1, 26, -1, 5, 30, -1, 8, 30, 476 16, 25, 79, 31, -1, 16, 79, 31, -1, 17,
415 -1, 15, 30, -1, 30, -1, 73, 30, -1, -1, 477 79, 31, -1, -1, 75, 78, -1, 26, -1, 27,
416 14, 75, -1, 76, -1, 76, 33, 76, -1, 76, 478 -1, 5, 31, -1, 8, 31, -1, 15, 31, -1,
417 27, 76, -1, 29, 75, 28, -1, 34, 75, -1, 479 31, -1, 77, 31, -1, -1, 14, 79, -1, 80,
418 75, 31, 75, -1, 75, 32, 75, -1, 25, -1, 480 -1, 80, 34, 80, -1, 80, 28, 80, -1, 30,
419 26, -1 481 79, 29, -1, 35, 79, -1, 79, 32, 79, -1,
482 79, 33, 79, -1, 26, -1, 27, -1
420}; 483};
421 484
422/* YYRLINE[YYN] -- source line where rule number YYN was defined. */ 485/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
423static const unsigned short int yyrline[] = 486static const unsigned short int yyrline[] =
424{ 487{
425 0, 103, 103, 105, 107, 108, 109, 110, 111, 112, 488 0, 105, 105, 107, 109, 110, 111, 112, 113, 114,
426 113, 117, 121, 121, 121, 121, 121, 121, 121, 125, 489 115, 119, 123, 123, 123, 123, 123, 123, 123, 127,
427 126, 127, 128, 129, 130, 134, 135, 141, 149, 155, 490 128, 129, 130, 131, 132, 136, 137, 143, 151, 157,
428 163, 173, 175, 176, 177, 178, 179, 182, 190, 196, 491 165, 175, 177, 178, 179, 180, 181, 182, 185, 193,
429 206, 212, 220, 229, 234, 242, 245, 247, 248, 249, 492 199, 209, 215, 221, 224, 226, 237, 238, 243, 252,
430 250, 251, 254, 260, 271, 277, 287, 289, 294, 302, 493 257, 265, 268, 270, 271, 272, 273, 274, 277, 283,
431 310, 313, 315, 316, 317, 322, 329, 334, 342, 345, 494 294, 300, 310, 312, 317, 325, 333, 336, 338, 339,
432 347, 348, 349, 352, 360, 367, 374, 380, 387, 389, 495 340, 345, 352, 357, 365, 368, 370, 371, 372, 375,
433 390, 391, 394, 399, 404, 412, 414, 419, 420, 423, 496 383, 390, 397, 403, 410, 412, 413, 414, 417, 422,
434 424, 425, 429, 430, 433, 434, 437, 438, 439, 440, 497 427, 435, 437, 442, 443, 446, 447, 448, 452, 453,
435 441, 442, 443, 446, 447 498 456, 457, 460, 461, 462, 463, 464, 465, 466, 469,
499 470
436}; 500};
437#endif 501#endif
438 502
439#if YYDEBUG || YYERROR_VERBOSE 503#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE
440/* YYTNME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. 504/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
441 First, the terminals, then, starting at YYNTOKENS, nonterminals. */ 505 First, the terminals, then, starting at YYNTOKENS, nonterminals. */
442static const char *const yytname[] = 506static const char *const yytname[] =
443{ 507{
@@ -445,17 +509,18 @@ static const char *const yytname[] =
445 "T_SOURCE", "T_CHOICE", "T_ENDCHOICE", "T_COMMENT", "T_CONFIG", 509 "T_SOURCE", "T_CHOICE", "T_ENDCHOICE", "T_COMMENT", "T_CONFIG",
446 "T_MENUCONFIG", "T_HELP", "T_HELPTEXT", "T_IF", "T_ENDIF", "T_DEPENDS", 510 "T_MENUCONFIG", "T_HELP", "T_HELPTEXT", "T_IF", "T_ENDIF", "T_DEPENDS",
447 "T_REQUIRES", "T_OPTIONAL", "T_PROMPT", "T_TYPE", "T_DEFAULT", 511 "T_REQUIRES", "T_OPTIONAL", "T_PROMPT", "T_TYPE", "T_DEFAULT",
448 "T_SELECT", "T_RANGE", "T_ON", "T_WORD", "T_WORD_QUOTE", "T_UNEQUAL", 512 "T_SELECT", "T_RANGE", "T_OPTION", "T_ON", "T_WORD", "T_WORD_QUOTE",
449 "T_CLOSE_PAREN", "T_OPEN_PAREN", "T_EOL", "T_OR", "T_AND", "T_EQUAL", 513 "T_UNEQUAL", "T_CLOSE_PAREN", "T_OPEN_PAREN", "T_EOL", "T_OR", "T_AND",
450 "T_NOT", "$accept", "input", "stmt_list", "option_name", "common_stmt", 514 "T_EQUAL", "T_NOT", "$accept", "input", "stmt_list", "option_name",
451 "option_error", "config_entry_start", "config_stmt", 515 "common_stmt", "option_error", "config_entry_start", "config_stmt",
452 "menuconfig_entry_start", "menuconfig_stmt", "config_option_list", 516 "menuconfig_entry_start", "menuconfig_stmt", "config_option_list",
453 "config_option", "choice", "choice_entry", "choice_end", "choice_stmt", 517 "config_option", "symbol_option", "symbol_option_list",
454 "choice_option_list", "choice_option", "choice_block", "if_entry", 518 "symbol_option_arg", "choice", "choice_entry", "choice_end",
455 "if_end", "if_stmt", "if_block", "menu", "menu_entry", "menu_end", 519 "choice_stmt", "choice_option_list", "choice_option", "choice_block",
456 "menu_stmt", "menu_block", "source_stmt", "comment", "comment_stmt", 520 "if_entry", "if_end", "if_stmt", "if_block", "menu", "menu_entry",
457 "help_start", "help", "depends_list", "depends", "prompt_stmt_opt", 521 "menu_end", "menu_stmt", "menu_block", "source_stmt", "comment",
458 "prompt", "end", "nl", "if_expr", "expr", "symbol", 0 522 "comment_stmt", "help_start", "help", "depends_list", "depends",
523 "prompt_stmt_opt", "prompt", "end", "nl", "if_expr", "expr", "symbol", 0
459}; 524};
460#endif 525#endif
461 526
@@ -467,24 +532,25 @@ static const unsigned short int yytoknum[] =
467 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, 532 0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
468 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 533 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
469 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 534 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
470 285, 286, 287, 288, 289 535 285, 286, 287, 288, 289, 290
471}; 536};
472# endif 537# endif
473 538
474/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ 539/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
475static const unsigned char yyr1[] = 540static const unsigned char yyr1[] =
476{ 541{
477 0, 35, 36, 37, 37, 37, 37, 37, 37, 37, 542 0, 36, 37, 38, 38, 38, 38, 38, 38, 38,
478 37, 37, 38, 38, 38, 38, 38, 38, 38, 39, 543 38, 38, 39, 39, 39, 39, 39, 39, 39, 40,
479 39, 39, 39, 39, 39, 40, 40, 41, 42, 43, 544 40, 40, 40, 40, 40, 41, 41, 42, 43, 44,
480 44, 45, 45, 45, 45, 45, 45, 46, 46, 46, 545 45, 46, 46, 46, 46, 46, 46, 46, 47, 47,
481 46, 46, 47, 48, 49, 50, 51, 51, 51, 51, 546 47, 47, 47, 48, 49, 49, 50, 50, 51, 52,
482 51, 51, 52, 52, 52, 52, 53, 53, 54, 55, 547 53, 54, 55, 55, 55, 55, 55, 55, 56, 56,
483 56, 57, 57, 57, 57, 58, 59, 60, 61, 62, 548 56, 56, 57, 57, 58, 59, 60, 61, 61, 61,
484 62, 62, 62, 63, 64, 65, 66, 67, 68, 68, 549 61, 62, 63, 64, 65, 66, 66, 66, 66, 67,
485 68, 68, 69, 69, 69, 70, 70, 71, 71, 72, 550 68, 69, 70, 71, 72, 72, 72, 72, 73, 73,
486 72, 72, 73, 73, 74, 74, 75, 75, 75, 75, 551 73, 74, 74, 75, 75, 76, 76, 76, 77, 77,
487 75, 75, 75, 76, 76 552 78, 78, 79, 79, 79, 79, 79, 79, 79, 80,
553 80
488}; 554};
489 555
490/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */ 556/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
@@ -493,14 +559,15 @@ static const unsigned char yyr2[] =
493 0, 2, 1, 0, 2, 2, 2, 4, 2, 4, 559 0, 2, 1, 0, 2, 2, 2, 4, 2, 4,
494 4, 3, 1, 1, 1, 1, 1, 1, 1, 1, 560 4, 3, 1, 1, 1, 1, 1, 1, 1, 1,
495 1, 1, 1, 1, 1, 3, 2, 3, 2, 3, 561 1, 1, 1, 1, 1, 3, 2, 3, 2, 3,
496 2, 0, 2, 2, 2, 2, 2, 3, 4, 4, 562 2, 0, 2, 2, 2, 2, 2, 2, 3, 4,
497 4, 5, 2, 2, 1, 3, 0, 2, 2, 2, 563 4, 4, 5, 3, 0, 3, 0, 2, 2, 2,
498 2, 2, 4, 3, 2, 4, 0, 2, 3, 1, 564 1, 3, 0, 2, 2, 2, 2, 2, 4, 3,
499 3, 0, 2, 2, 2, 3, 2, 1, 3, 0, 565 2, 4, 0, 2, 3, 1, 3, 0, 2, 2,
500 2, 2, 2, 3, 3, 2, 2, 2, 0, 2, 566 2, 3, 2, 1, 3, 0, 2, 2, 2, 3,
501 2, 2, 4, 3, 3, 0, 2, 1, 1, 2, 567 3, 2, 2, 2, 0, 2, 2, 2, 4, 3,
502 2, 2, 1, 2, 0, 2, 1, 3, 3, 3, 568 3, 0, 2, 1, 1, 2, 2, 2, 1, 2,
503 2, 3, 3, 1, 1 569 0, 2, 1, 3, 3, 3, 2, 3, 3, 1,
570 1
504}; 571};
505 572
506/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state 573/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
@@ -511,175 +578,164 @@ static const unsigned char yydefact[] =
511 3, 0, 0, 1, 0, 0, 0, 0, 0, 0, 578 3, 0, 0, 1, 0, 0, 0, 0, 0, 0,
512 0, 0, 0, 0, 0, 0, 12, 16, 13, 14, 579 0, 0, 0, 0, 0, 0, 12, 16, 13, 14,
513 18, 15, 17, 0, 19, 0, 4, 31, 22, 31, 580 18, 15, 17, 0, 19, 0, 4, 31, 22, 31,
514 23, 46, 56, 5, 61, 20, 78, 69, 6, 24, 581 23, 52, 62, 5, 67, 20, 84, 75, 6, 24,
515 78, 21, 8, 11, 87, 88, 0, 0, 89, 0, 582 84, 21, 8, 11, 93, 94, 0, 0, 95, 0,
516 42, 90, 0, 0, 0, 103, 104, 0, 0, 0, 583 48, 96, 0, 0, 0, 109, 110, 0, 0, 0,
517 96, 91, 0, 0, 0, 0, 0, 0, 0, 0, 584 102, 97, 0, 0, 0, 0, 0, 0, 0, 0,
518 0, 0, 92, 7, 65, 73, 74, 27, 29, 0, 585 0, 0, 98, 7, 71, 79, 80, 27, 29, 0,
519 100, 0, 0, 58, 0, 0, 9, 10, 0, 0, 586 106, 0, 0, 64, 0, 0, 9, 10, 0, 0,
520 0, 0, 0, 85, 0, 0, 0, 0, 36, 35, 587 0, 0, 0, 91, 0, 0, 0, 44, 0, 37,
521 32, 0, 34, 33, 0, 0, 85, 0, 50, 51, 588 36, 32, 33, 0, 35, 34, 0, 0, 91, 0,
522 47, 49, 48, 57, 45, 44, 62, 64, 60, 63, 589 56, 57, 53, 55, 54, 63, 51, 50, 68, 70,
523 59, 80, 81, 79, 70, 72, 68, 71, 67, 93, 590 66, 69, 65, 86, 87, 85, 76, 78, 74, 77,
524 99, 101, 102, 98, 97, 26, 76, 0, 0, 0, 591 73, 99, 105, 107, 108, 104, 103, 26, 82, 0,
525 94, 0, 94, 94, 94, 0, 0, 77, 54, 94, 592 0, 0, 100, 0, 100, 100, 100, 0, 0, 0,
526 0, 94, 0, 83, 84, 0, 0, 37, 86, 0, 593 83, 60, 100, 0, 100, 0, 89, 90, 0, 0,
527 0, 94, 25, 0, 53, 0, 82, 95, 38, 39, 594 38, 92, 0, 0, 100, 46, 43, 25, 0, 59,
528 40, 0, 52, 55, 41 595 0, 88, 101, 39, 40, 41, 0, 0, 45, 58,
596 61, 42, 47
529}; 597};
530 598
531/* YYDEFGOTO[NTERM-NUM]. */ 599/* YYDEFGOTO[NTERM-NUM]. */
532static const short int yydefgoto[] = 600static const short int yydefgoto[] =
533{ 601{
534 -1, 1, 2, 25, 26, 99, 27, 28, 29, 30, 602 -1, 1, 2, 25, 26, 100, 27, 28, 29, 30,
535 64, 100, 31, 32, 114, 33, 66, 110, 67, 34, 603 64, 101, 102, 148, 178, 31, 32, 116, 33, 66,
536 118, 35, 68, 36, 37, 126, 38, 70, 39, 40, 604 112, 67, 34, 120, 35, 68, 36, 37, 128, 38,
537 41, 101, 102, 69, 103, 141, 142, 42, 73, 156, 605 70, 39, 40, 41, 103, 104, 69, 105, 143, 144,
538 59, 60 606 42, 73, 159, 59, 60
539}; 607};
540 608
541/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing 609/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
542 STATE-NUM. */ 610 STATE-NUM. */
543#define YYPACT_NINF -78 611#define YYPACT_NINF -135
544static const short int yypact[] = 612static const short int yypact[] =
545{ 613{
546 -78, 2, 159, -78, -21, 0, 0, -12, 0, 1, 614 -135, 2, 170, -135, -14, 56, 56, -8, 56, 24,
547 4, 0, 27, 38, 60, 58, -78, -78, -78, -78, 615 67, 56, 7, 14, 62, 97, -135, -135, -135, -135,
548 -78, -78, -78, 100, -78, 104, -78, -78, -78, -78, 616 -135, -135, -135, 156, -135, 166, -135, -135, -135, -135,
549 -78, -78, -78, -78, -78, -78, -78, -78, -78, -78, 617 -135, -135, -135, -135, -135, -135, -135, -135, -135, -135,
550 -78, -78, -78, -78, -78, -78, 86, 113, -78, 114, 618 -135, -135, -135, -135, -135, -135, 138, 151, -135, 152,
551 -78, -78, 125, 127, 128, -78, -78, 60, 60, 210, 619 -135, -135, 163, 167, 176, -135, -135, 62, 62, 185,
552 65, -78, 141, 142, 39, 103, 182, 200, 6, 66, 620 -19, -135, 188, 190, 42, 103, 194, 85, 70, 222,
553 6, 131, -78, 146, -78, -78, -78, -78, -78, 196, 621 70, 132, -135, 191, -135, -135, -135, -135, -135, 127,
554 -78, 60, 60, 146, 40, 40, -78, -78, 155, 156, 622 -135, 62, 62, 191, 104, 104, -135, -135, 193, 203,
555 -2, 60, 0, 0, 60, 105, 40, 194, -78, -78, 623 9, 62, 56, 56, 62, 161, 104, -135, 196, -135,
556 -78, 206, -78, -78, 183, 0, 0, 195, -78, -78, 624 -135, -135, -135, 233, -135, -135, 204, 56, 56, 221,
557 -78, -78, -78, -78, -78, -78, -78, -78, -78, -78, 625 -135, -135, -135, -135, -135, -135, -135, -135, -135, -135,
558 -78, -78, -78, -78, -78, -78, -78, -78, -78, -78, 626 -135, -135, -135, -135, -135, -135, -135, -135, -135, -135,
559 -78, 197, -78, -78, -78, -78, -78, 60, 213, 216, 627 -135, -135, -135, 219, -135, -135, -135, -135, -135, 62,
560 212, 203, 212, 190, 212, 40, 208, -78, -78, 212, 628 209, 212, 240, 224, 240, -1, 240, 104, 41, 225,
561 222, 212, 219, -78, -78, 60, 223, -78, -78, 224, 629 -135, -135, 240, 226, 240, 218, -135, -135, 62, 227,
562 225, 212, -78, 226, -78, 227, -78, 47, -78, -78, 630 -135, -135, 228, 229, 240, 230, -135, -135, 231, -135,
563 -78, 228, -78, -78, -78 631 232, -135, 112, -135, -135, -135, 234, 56, -135, -135,
632 -135, -135, -135
564}; 633};
565 634
566/* YYPGOTO[NTERM-NUM]. */ 635/* YYPGOTO[NTERM-NUM]. */
567static const short int yypgoto[] = 636static const short int yypgoto[] =
568{ 637{
569 -78, -78, -78, -78, 164, -36, -78, -78, -78, -78, 638 -135, -135, -135, -135, 94, -45, -135, -135, -135, -135,
570 230, -78, -78, -78, -78, 29, -78, -78, -78, -78, 639 237, -135, -135, -135, -135, -135, -135, -135, -54, -135,
571 -78, -78, -78, -78, -78, -78, 59, -78, -78, -78, 640 -135, -135, -135, -135, -135, -135, -135, -135, -135, 1,
572 -78, -78, 198, 220, 24, 157, -5, 169, 202, 74, 641 -135, -135, -135, -135, -135, 195, 235, -44, 159, -5,
573 -53, -77 642 98, 210, -134, -53, -77
574}; 643};
575 644
576/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If 645/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
577 positive, shift that token. If negative, reduce the rule which 646 positive, shift that token. If negative, reduce the rule which
578 number is the opposite. If zero, do what YYDEFACT says. 647 number is the opposite. If zero, do what YYDEFACT says.
579 If YYTABLE_NINF, syntax error. */ 648 If YYTABLE_NINF, syntax error. */
580#define YYTABLE_NINF -76 649#define YYTABLE_NINF -82
581static const short int yytable[] = 650static const short int yytable[] =
582{ 651{
583 46, 47, 3, 49, 79, 80, 52, 133, 134, 43, 652 46, 47, 3, 49, 79, 80, 52, 135, 136, 84,
584 6, 7, 8, 9, 10, 11, 12, 13, 48, 145, 653 161, 162, 163, 158, 119, 85, 127, 43, 168, 147,
585 14, 15, 137, 55, 56, 44, 45, 57, 131, 132, 654 170, 111, 114, 48, 124, 125, 124, 125, 133, 134,
586 109, 50, 58, 122, 51, 122, 24, 138, 139, -28, 655 176, 81, 82, 53, 139, 55, 56, 140, 141, 57,
587 88, 143, -28, -28, -28, -28, -28, -28, -28, -28, 656 54, 145, -28, 88, 58, -28, -28, -28, -28, -28,
588 -28, 89, 53, -28, -28, 90, 91, -28, 92, 93, 657 -28, -28, -28, -28, 89, 50, -28, -28, 90, 91,
589 94, 95, 96, 54, 97, 55, 56, 88, 161, 98, 658 -28, 92, 93, 94, 95, 96, 97, 165, 98, 121,
590 -66, -66, -66, -66, -66, -66, -66, -66, 81, 82, 659 164, 129, 166, 99, 6, 7, 8, 9, 10, 11,
591 -66, -66, 90, 91, 152, 55, 56, 140, 61, 57, 660 12, 13, 44, 45, 14, 15, 155, 142, 55, 56,
592 112, 97, 84, 123, 58, 123, 121, 117, 85, 125, 661 7, 8, 57, 10, 11, 12, 13, 58, 51, 14,
593 149, 62, 167, -30, 88, 63, -30, -30, -30, -30, 662 15, 24, 152, -30, 88, 172, -30, -30, -30, -30,
594 -30, -30, -30, -30, -30, 89, 72, -30, -30, 90, 663 -30, -30, -30, -30, -30, 89, 24, -30, -30, 90,
595 91, -30, 92, 93, 94, 95, 96, 119, 97, 127, 664 91, -30, 92, 93, 94, 95, 96, 97, 61, 98,
596 144, -75, 88, 98, -75, -75, -75, -75, -75, -75, 665 55, 56, -81, 88, 99, -81, -81, -81, -81, -81,
597 -75, -75, -75, 74, 75, -75, -75, 90, 91, -75, 666 -81, -81, -81, -81, 81, 82, -81, -81, 90, 91,
598 -75, -75, -75, -75, -75, 76, 97, 77, 78, -2, 667 -81, -81, -81, -81, -81, -81, 132, 62, 98, 81,
599 4, 121, 5, 6, 7, 8, 9, 10, 11, 12, 668 82, 115, 118, 123, 126, 117, 122, 63, 130, 72,
600 13, 86, 87, 14, 15, 16, 129, 17, 18, 19, 669 -2, 4, 182, 5, 6, 7, 8, 9, 10, 11,
601 20, 21, 22, 88, 23, 135, 136, -43, -43, 24, 670 12, 13, 74, 75, 14, 15, 16, 146, 17, 18,
602 -43, -43, -43, -43, 89, 146, -43, -43, 90, 91, 671 19, 20, 21, 22, 76, 88, 23, 149, 77, -49,
603 104, 105, 106, 107, 155, 7, 8, 97, 10, 11, 672 -49, 24, -49, -49, -49, -49, 89, 78, -49, -49,
604 12, 13, 108, 148, 14, 15, 158, 159, 160, 147, 673 90, 91, 106, 107, 108, 109, 72, 81, 82, 86,
605 151, 81, 82, 163, 130, 165, 155, 81, 82, 82, 674 98, 87, 131, 88, 137, 110, -72, -72, -72, -72,
606 24, 113, 116, 157, 124, 171, 115, 120, 162, 128, 675 -72, -72, -72, -72, 138, 151, -72, -72, 90, 91,
607 72, 81, 82, 153, 81, 82, 154, 81, 82, 166, 676 156, 81, 82, 157, 81, 82, 150, 154, 98, 171,
608 81, 82, 164, 168, 169, 170, 172, 173, 174, 65, 677 81, 82, 82, 123, 158, 160, 167, 169, 173, 174,
609 71, 83, 0, 150, 111 678 175, 113, 179, 180, 177, 181, 65, 153, 0, 83,
679 0, 0, 0, 0, 0, 71
610}; 680};
611 681
612static const short int yycheck[] = 682static const short int yycheck[] =
613{ 683{
614 5, 6, 0, 8, 57, 58, 11, 84, 85, 30, 684 5, 6, 0, 8, 57, 58, 11, 84, 85, 28,
615 4, 5, 6, 7, 8, 9, 10, 11, 30, 96, 685 144, 145, 146, 14, 68, 34, 70, 31, 152, 96,
616 14, 15, 24, 25, 26, 25, 26, 29, 81, 82, 686 154, 66, 66, 31, 69, 69, 71, 71, 81, 82,
617 66, 30, 34, 69, 30, 71, 30, 90, 91, 0, 687 164, 32, 33, 26, 25, 26, 27, 90, 91, 30,
618 1, 94, 3, 4, 5, 6, 7, 8, 9, 10, 688 26, 94, 0, 1, 35, 3, 4, 5, 6, 7,
619 11, 12, 25, 14, 15, 16, 17, 18, 19, 20, 689 8, 9, 10, 11, 12, 31, 14, 15, 16, 17,
620 21, 22, 23, 25, 25, 25, 26, 1, 145, 30, 690 18, 19, 20, 21, 22, 23, 24, 26, 26, 68,
621 4, 5, 6, 7, 8, 9, 10, 11, 31, 32, 691 147, 70, 31, 31, 4, 5, 6, 7, 8, 9,
622 14, 15, 16, 17, 137, 25, 26, 92, 30, 29, 692 10, 11, 26, 27, 14, 15, 139, 92, 26, 27,
623 66, 25, 27, 69, 34, 71, 30, 68, 33, 70, 693 5, 6, 30, 8, 9, 10, 11, 35, 31, 14,
624 105, 1, 155, 0, 1, 1, 3, 4, 5, 6, 694 15, 31, 107, 0, 1, 158, 3, 4, 5, 6,
625 7, 8, 9, 10, 11, 12, 30, 14, 15, 16, 695 7, 8, 9, 10, 11, 12, 31, 14, 15, 16,
626 17, 18, 19, 20, 21, 22, 23, 68, 25, 70, 696 17, 18, 19, 20, 21, 22, 23, 24, 31, 26,
627 25, 0, 1, 30, 3, 4, 5, 6, 7, 8, 697 26, 27, 0, 1, 31, 3, 4, 5, 6, 7,
628 9, 10, 11, 30, 30, 14, 15, 16, 17, 18, 698 8, 9, 10, 11, 32, 33, 14, 15, 16, 17,
629 19, 20, 21, 22, 23, 30, 25, 30, 30, 0, 699 18, 19, 20, 21, 22, 23, 29, 1, 26, 32,
630 1, 30, 3, 4, 5, 6, 7, 8, 9, 10, 700 33, 67, 68, 31, 70, 67, 68, 1, 70, 31,
631 11, 30, 30, 14, 15, 16, 30, 18, 19, 20, 701 0, 1, 177, 3, 4, 5, 6, 7, 8, 9,
632 21, 22, 23, 1, 25, 30, 30, 5, 6, 30, 702 10, 11, 31, 31, 14, 15, 16, 26, 18, 19,
633 8, 9, 10, 11, 12, 1, 14, 15, 16, 17, 703 20, 21, 22, 23, 31, 1, 26, 1, 31, 5,
634 18, 19, 20, 21, 14, 5, 6, 25, 8, 9, 704 6, 31, 8, 9, 10, 11, 12, 31, 14, 15,
635 10, 11, 30, 30, 14, 15, 142, 143, 144, 13, 705 16, 17, 18, 19, 20, 21, 31, 32, 33, 31,
636 25, 31, 32, 149, 28, 151, 14, 31, 32, 32, 706 26, 31, 31, 1, 31, 31, 4, 5, 6, 7,
637 30, 67, 68, 30, 70, 161, 67, 68, 30, 70, 707 8, 9, 10, 11, 31, 31, 14, 15, 16, 17,
638 30, 31, 32, 30, 31, 32, 30, 31, 32, 30, 708 31, 32, 33, 31, 32, 33, 13, 26, 26, 31,
639 31, 32, 30, 30, 30, 30, 30, 30, 30, 29, 709 32, 33, 33, 31, 14, 31, 31, 31, 31, 31,
640 40, 59, -1, 106, 66 710 31, 66, 31, 31, 34, 31, 29, 108, -1, 59,
711 -1, -1, -1, -1, -1, 40
641}; 712};
642 713
643/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing 714/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
644 symbol of state STATE-NUM. */ 715 symbol of state STATE-NUM. */
645static const unsigned char yystos[] = 716static const unsigned char yystos[] =
646{ 717{
647 0, 36, 37, 0, 1, 3, 4, 5, 6, 7, 718 0, 37, 38, 0, 1, 3, 4, 5, 6, 7,
648 8, 9, 10, 11, 14, 15, 16, 18, 19, 20, 719 8, 9, 10, 11, 14, 15, 16, 18, 19, 20,
649 21, 22, 23, 25, 30, 38, 39, 41, 42, 43, 720 21, 22, 23, 26, 31, 39, 40, 42, 43, 44,
650 44, 47, 48, 50, 54, 56, 58, 59, 61, 63, 721 45, 51, 52, 54, 58, 60, 62, 63, 65, 67,
651 64, 65, 72, 30, 25, 26, 71, 71, 30, 71, 722 68, 69, 76, 31, 26, 27, 75, 75, 31, 75,
652 30, 30, 71, 25, 25, 25, 26, 29, 34, 75, 723 31, 31, 75, 26, 26, 26, 27, 30, 35, 79,
653 76, 30, 1, 1, 45, 45, 51, 53, 57, 68, 724 80, 31, 1, 1, 46, 46, 55, 57, 61, 72,
654 62, 68, 30, 73, 30, 30, 30, 30, 30, 75, 725 66, 72, 31, 77, 31, 31, 31, 31, 31, 79,
655 75, 31, 32, 73, 27, 33, 30, 30, 1, 12, 726 79, 32, 33, 77, 28, 34, 31, 31, 1, 12,
656 16, 17, 19, 20, 21, 22, 23, 25, 30, 40, 727 16, 17, 19, 20, 21, 22, 23, 24, 26, 31,
657 46, 66, 67, 69, 18, 19, 20, 21, 30, 40, 728 41, 47, 48, 70, 71, 73, 18, 19, 20, 21,
658 52, 67, 69, 39, 49, 72, 39, 50, 55, 61, 729 31, 41, 56, 71, 73, 40, 53, 76, 40, 54,
659 72, 30, 40, 69, 39, 50, 60, 61, 72, 30, 730 59, 65, 76, 31, 41, 73, 40, 54, 64, 65,
660 28, 75, 75, 76, 76, 30, 30, 24, 75, 75, 731 76, 31, 29, 79, 79, 80, 80, 31, 31, 25,
661 71, 70, 71, 75, 25, 76, 1, 13, 30, 71, 732 79, 79, 75, 74, 75, 79, 26, 80, 49, 1,
662 70, 25, 75, 30, 30, 14, 74, 30, 74, 74, 733 13, 31, 75, 74, 26, 79, 31, 31, 14, 78,
663 74, 76, 30, 74, 30, 74, 30, 75, 30, 30, 734 31, 78, 78, 78, 80, 26, 31, 31, 78, 31,
664 30, 74, 30, 30, 30 735 78, 31, 79, 31, 31, 31, 78, 34, 50, 31,
736 31, 31, 75
665}; 737};
666 738
667#if ! defined (YYSIZE_T) && defined (__SIZE_TYPE__)
668# define YYSIZE_T __SIZE_TYPE__
669#endif
670#if ! defined (YYSIZE_T) && defined (size_t)
671# define YYSIZE_T size_t
672#endif
673#if ! defined (YYSIZE_T)
674# if defined (__STDC__) || defined (__cplusplus)
675# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
676# define YYSIZE_T size_t
677# endif
678#endif
679#if ! defined (YYSIZE_T)
680# define YYSIZE_T unsigned int
681#endif
682
683#define yyerrok (yyerrstatus = 0) 739#define yyerrok (yyerrstatus = 0)
684#define yyclearin (yychar = YYEMPTY) 740#define yyclearin (yychar = YYEMPTY)
685#define YYEMPTY (-2) 741#define YYEMPTY (-2)
@@ -709,8 +765,8 @@ do \
709 goto yybackup; \ 765 goto yybackup; \
710 } \ 766 } \
711 else \ 767 else \
712 { \ 768 { \
713 yyerror ("syntax error: cannot back up");\ 769 yyerror (YY_("syntax error: cannot back up")); \
714 YYERROR; \ 770 YYERROR; \
715 } \ 771 } \
716while (0) 772while (0)
@@ -789,7 +845,7 @@ do { \
789 if (yydebug) \ 845 if (yydebug) \
790 { \ 846 { \
791 YYFPRINTF (stderr, "%s ", Title); \ 847 YYFPRINTF (stderr, "%s ", Title); \
792 yysymprint (stderr, \ 848 yysymprint (stderr, \
793 Type, Value); \ 849 Type, Value); \
794 YYFPRINTF (stderr, "\n"); \ 850 YYFPRINTF (stderr, "\n"); \
795 } \ 851 } \
@@ -837,13 +893,13 @@ yy_reduce_print (yyrule)
837#endif 893#endif
838{ 894{
839 int yyi; 895 int yyi;
840 unsigned int yylno = yyrline[yyrule]; 896 unsigned long int yylno = yyrline[yyrule];
841 YYFPRINTF (stderr, "Reducing stack by rule %d (line %u), ", 897 YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu), ",
842 yyrule - 1, yylno); 898 yyrule - 1, yylno);
843 /* Print the symbols being reduced, and their result. */ 899 /* Print the symbols being reduced, and their result. */
844 for (yyi = yyprhs[yyrule]; 0 <= yyrhs[yyi]; yyi++) 900 for (yyi = yyprhs[yyrule]; 0 <= yyrhs[yyi]; yyi++)
845 YYFPRINTF (stderr, "%s ", yytname [yyrhs[yyi]]); 901 YYFPRINTF (stderr, "%s ", yytname[yyrhs[yyi]]);
846 YYFPRINTF (stderr, "-> %s\n", yytname [yyr1[yyrule]]); 902 YYFPRINTF (stderr, "-> %s\n", yytname[yyr1[yyrule]]);
847} 903}
848 904
849# define YY_REDUCE_PRINT(Rule) \ 905# define YY_REDUCE_PRINT(Rule) \
@@ -872,7 +928,7 @@ int yydebug;
872 if the built-in stack extension method is used). 928 if the built-in stack extension method is used).
873 929
874 Do not make this value too large; the results are undefined if 930 Do not make this value too large; the results are undefined if
875 SIZE_MAX < YYSTACK_BYTES (YYMAXDEPTH) 931 YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
876 evaluated with infinite-precision integer arithmetic. */ 932 evaluated with infinite-precision integer arithmetic. */
877 933
878#ifndef YYMAXDEPTH 934#ifndef YYMAXDEPTH
@@ -896,7 +952,7 @@ yystrlen (yystr)
896 const char *yystr; 952 const char *yystr;
897# endif 953# endif
898{ 954{
899 register const char *yys = yystr; 955 const char *yys = yystr;
900 956
901 while (*yys++ != '\0') 957 while (*yys++ != '\0')
902 continue; 958 continue;
@@ -921,8 +977,8 @@ yystpcpy (yydest, yysrc)
921 const char *yysrc; 977 const char *yysrc;
922# endif 978# endif
923{ 979{
924 register char *yyd = yydest; 980 char *yyd = yydest;
925 register const char *yys = yysrc; 981 const char *yys = yysrc;
926 982
927 while ((*yyd++ = *yys++) != '\0') 983 while ((*yyd++ = *yys++) != '\0')
928 continue; 984 continue;
@@ -932,7 +988,55 @@ yystpcpy (yydest, yysrc)
932# endif 988# endif
933# endif 989# endif
934 990
935#endif /* !YYERROR_VERBOSE */ 991# ifndef yytnamerr
992/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
993 quotes and backslashes, so that it's suitable for yyerror. The
994 heuristic is that double-quoting is unnecessary unless the string
995 contains an apostrophe, a comma, or backslash (other than
996 backslash-backslash). YYSTR is taken from yytname. If YYRES is
997 null, do not copy; instead, return the length of what the result
998 would have been. */
999static YYSIZE_T
1000yytnamerr (char *yyres, const char *yystr)
1001{
1002 if (*yystr == '"')
1003 {
1004 size_t yyn = 0;
1005 char const *yyp = yystr;
1006
1007 for (;;)
1008 switch (*++yyp)
1009 {
1010 case '\'':
1011 case ',':
1012 goto do_not_strip_quotes;
1013
1014 case '\\':
1015 if (*++yyp != '\\')
1016 goto do_not_strip_quotes;
1017 /* Fall through. */
1018 default:
1019 if (yyres)
1020 yyres[yyn] = *yyp;
1021 yyn++;
1022 break;
1023
1024 case '"':
1025 if (yyres)
1026 yyres[yyn] = '\0';
1027 return yyn;
1028 }
1029 do_not_strip_quotes: ;
1030 }
1031
1032 if (! yyres)
1033 return yystrlen (yystr);
1034
1035 return yystpcpy (yyres, yystr) - yyres;
1036}
1037# endif
1038
1039#endif /* YYERROR_VERBOSE */
936 1040
937 1041
938 1042
@@ -998,7 +1102,7 @@ yydestruct (yymsg, yytype, yyvaluep)
998 1102
999 switch (yytype) 1103 switch (yytype)
1000 { 1104 {
1001 case 48: /* choice_entry */ 1105 case 52: /* "choice_entry" */
1002 1106
1003 { 1107 {
1004 fprintf(stderr, "%s:%d: missing end statement for this entry\n", 1108 fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -1008,7 +1112,7 @@ yydestruct (yymsg, yytype, yyvaluep)
1008}; 1112};
1009 1113
1010 break; 1114 break;
1011 case 54: /* if_entry */ 1115 case 58: /* "if_entry" */
1012 1116
1013 { 1117 {
1014 fprintf(stderr, "%s:%d: missing end statement for this entry\n", 1118 fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -1018,7 +1122,7 @@ yydestruct (yymsg, yytype, yyvaluep)
1018}; 1122};
1019 1123
1020 break; 1124 break;
1021 case 59: /* menu_entry */ 1125 case 63: /* "menu_entry" */
1022 1126
1023 { 1127 {
1024 fprintf(stderr, "%s:%d: missing end statement for this entry\n", 1128 fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -1082,13 +1186,13 @@ yyparse (void)
1082#else 1186#else
1083int 1187int
1084yyparse () 1188yyparse ()
1085 1189 ;
1086#endif 1190#endif
1087#endif 1191#endif
1088{ 1192{
1089 1193
1090 register int yystate; 1194 int yystate;
1091 register int yyn; 1195 int yyn;
1092 int yyresult; 1196 int yyresult;
1093 /* Number of tokens to shift before error messages enabled. */ 1197 /* Number of tokens to shift before error messages enabled. */
1094 int yyerrstatus; 1198 int yyerrstatus;
@@ -1106,12 +1210,12 @@ yyparse ()
1106 /* The state stack. */ 1210 /* The state stack. */
1107 short int yyssa[YYINITDEPTH]; 1211 short int yyssa[YYINITDEPTH];
1108 short int *yyss = yyssa; 1212 short int *yyss = yyssa;
1109 register short int *yyssp; 1213 short int *yyssp;
1110 1214
1111 /* The semantic value stack. */ 1215 /* The semantic value stack. */
1112 YYSTYPE yyvsa[YYINITDEPTH]; 1216 YYSTYPE yyvsa[YYINITDEPTH];
1113 YYSTYPE *yyvs = yyvsa; 1217 YYSTYPE *yyvs = yyvsa;
1114 register YYSTYPE *yyvsp; 1218 YYSTYPE *yyvsp;
1115 1219
1116 1220
1117 1221
@@ -1143,9 +1247,6 @@ yyparse ()
1143 yyssp = yyss; 1247 yyssp = yyss;
1144 yyvsp = yyvs; 1248 yyvsp = yyvs;
1145 1249
1146
1147 yyvsp[0] = yylval;
1148
1149 goto yysetstate; 1250 goto yysetstate;
1150 1251
1151/*------------------------------------------------------------. 1252/*------------------------------------------------------------.
@@ -1178,7 +1279,7 @@ yyparse ()
1178 data in use in that stack, in bytes. This used to be a 1279 data in use in that stack, in bytes. This used to be a
1179 conditional around just the two extra args, but that might 1280 conditional around just the two extra args, but that might
1180 be undefined if yyoverflow is a macro. */ 1281 be undefined if yyoverflow is a macro. */
1181 yyoverflow ("parser stack overflow", 1282 yyoverflow (YY_("memory exhausted"),
1182 &yyss1, yysize * sizeof (*yyssp), 1283 &yyss1, yysize * sizeof (*yyssp),
1183 &yyvs1, yysize * sizeof (*yyvsp), 1284 &yyvs1, yysize * sizeof (*yyvsp),
1184 1285
@@ -1189,11 +1290,11 @@ yyparse ()
1189 } 1290 }
1190#else /* no yyoverflow */ 1291#else /* no yyoverflow */
1191# ifndef YYSTACK_RELOCATE 1292# ifndef YYSTACK_RELOCATE
1192 goto yyoverflowlab; 1293 goto yyexhaustedlab;
1193# else 1294# else
1194 /* Extend the stack our own way. */ 1295 /* Extend the stack our own way. */
1195 if (YYMAXDEPTH <= yystacksize) 1296 if (YYMAXDEPTH <= yystacksize)
1196 goto yyoverflowlab; 1297 goto yyexhaustedlab;
1197 yystacksize *= 2; 1298 yystacksize *= 2;
1198 if (YYMAXDEPTH < yystacksize) 1299 if (YYMAXDEPTH < yystacksize)
1199 yystacksize = YYMAXDEPTH; 1300 yystacksize = YYMAXDEPTH;
@@ -1203,7 +1304,7 @@ yyparse ()
1203 union yyalloc *yyptr = 1304 union yyalloc *yyptr =
1204 (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); 1305 (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
1205 if (! yyptr) 1306 if (! yyptr)
1206 goto yyoverflowlab; 1307 goto yyexhaustedlab;
1207 YYSTACK_RELOCATE (yyss); 1308 YYSTACK_RELOCATE (yyss);
1208 YYSTACK_RELOCATE (yyvs); 1309 YYSTACK_RELOCATE (yyvs);
1209 1310
@@ -1403,7 +1504,7 @@ yyreduce:
1403;} 1504;}
1404 break; 1505 break;
1405 1506
1406 case 37: 1507 case 38:
1407 1508
1408 { 1509 {
1409 menu_set_type((yyvsp[-2].id)->stype); 1510 menu_set_type((yyvsp[-2].id)->stype);
@@ -1413,7 +1514,7 @@ yyreduce:
1413;} 1514;}
1414 break; 1515 break;
1415 1516
1416 case 38: 1517 case 39:
1417 1518
1418 { 1519 {
1419 menu_add_prompt(P_PROMPT, (yyvsp[-2].string), (yyvsp[-1].expr)); 1520 menu_add_prompt(P_PROMPT, (yyvsp[-2].string), (yyvsp[-1].expr));
@@ -1421,7 +1522,7 @@ yyreduce:
1421;} 1522;}
1422 break; 1523 break;
1423 1524
1424 case 39: 1525 case 40:
1425 1526
1426 { 1527 {
1427 menu_add_expr(P_DEFAULT, (yyvsp[-2].expr), (yyvsp[-1].expr)); 1528 menu_add_expr(P_DEFAULT, (yyvsp[-2].expr), (yyvsp[-1].expr));
@@ -1433,7 +1534,7 @@ yyreduce:
1433;} 1534;}
1434 break; 1535 break;
1435 1536
1436 case 40: 1537 case 41:
1437 1538
1438 { 1539 {
1439 menu_add_symbol(P_SELECT, sym_lookup((yyvsp[-2].string), 0), (yyvsp[-1].expr)); 1540 menu_add_symbol(P_SELECT, sym_lookup((yyvsp[-2].string), 0), (yyvsp[-1].expr));
@@ -1441,7 +1542,7 @@ yyreduce:
1441;} 1542;}
1442 break; 1543 break;
1443 1544
1444 case 41: 1545 case 42:
1445 1546
1446 { 1547 {
1447 menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,(yyvsp[-3].symbol), (yyvsp[-2].symbol)), (yyvsp[-1].expr)); 1548 menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,(yyvsp[-3].symbol), (yyvsp[-2].symbol)), (yyvsp[-1].expr));
@@ -1449,7 +1550,29 @@ yyreduce:
1449;} 1550;}
1450 break; 1551 break;
1451 1552
1452 case 42: 1553 case 45:
1554
1555 {
1556 struct kconf_id *id = kconf_id_lookup((yyvsp[-1].string), strlen((yyvsp[-1].string)));
1557 if (id && id->flags & TF_OPTION)
1558 menu_add_option(id->token, (yyvsp[0].string));
1559 else
1560 zconfprint("warning: ignoring unknown option %s", (yyvsp[-1].string));
1561 free((yyvsp[-1].string));
1562;}
1563 break;
1564
1565 case 46:
1566
1567 { (yyval.string) = NULL; ;}
1568 break;
1569
1570 case 47:
1571
1572 { (yyval.string) = (yyvsp[0].string); ;}
1573 break;
1574
1575 case 48:
1453 1576
1454 { 1577 {
1455 struct symbol *sym = sym_lookup(NULL, 0); 1578 struct symbol *sym = sym_lookup(NULL, 0);
@@ -1460,14 +1583,14 @@ yyreduce:
1460;} 1583;}
1461 break; 1584 break;
1462 1585
1463 case 43: 1586 case 49:
1464 1587
1465 { 1588 {
1466 (yyval.menu) = menu_add_menu(); 1589 (yyval.menu) = menu_add_menu();
1467;} 1590;}
1468 break; 1591 break;
1469 1592
1470 case 44: 1593 case 50:
1471 1594
1472 { 1595 {
1473 if (zconf_endtoken((yyvsp[0].id), T_CHOICE, T_ENDCHOICE)) { 1596 if (zconf_endtoken((yyvsp[0].id), T_CHOICE, T_ENDCHOICE)) {
@@ -1477,7 +1600,7 @@ yyreduce:
1477;} 1600;}
1478 break; 1601 break;
1479 1602
1480 case 52: 1603 case 58:
1481 1604
1482 { 1605 {
1483 menu_add_prompt(P_PROMPT, (yyvsp[-2].string), (yyvsp[-1].expr)); 1606 menu_add_prompt(P_PROMPT, (yyvsp[-2].string), (yyvsp[-1].expr));
@@ -1485,7 +1608,7 @@ yyreduce:
1485;} 1608;}
1486 break; 1609 break;
1487 1610
1488 case 53: 1611 case 59:
1489 1612
1490 { 1613 {
1491 if ((yyvsp[-2].id)->stype == S_BOOLEAN || (yyvsp[-2].id)->stype == S_TRISTATE) { 1614 if ((yyvsp[-2].id)->stype == S_BOOLEAN || (yyvsp[-2].id)->stype == S_TRISTATE) {
@@ -1498,7 +1621,7 @@ yyreduce:
1498;} 1621;}
1499 break; 1622 break;
1500 1623
1501 case 54: 1624 case 60:
1502 1625
1503 { 1626 {
1504 current_entry->sym->flags |= SYMBOL_OPTIONAL; 1627 current_entry->sym->flags |= SYMBOL_OPTIONAL;
@@ -1506,7 +1629,7 @@ yyreduce:
1506;} 1629;}
1507 break; 1630 break;
1508 1631
1509 case 55: 1632 case 61:
1510 1633
1511 { 1634 {
1512 if ((yyvsp[-3].id)->stype == S_UNKNOWN) { 1635 if ((yyvsp[-3].id)->stype == S_UNKNOWN) {
@@ -1518,7 +1641,7 @@ yyreduce:
1518;} 1641;}
1519 break; 1642 break;
1520 1643
1521 case 58: 1644 case 64:
1522 1645
1523 { 1646 {
1524 printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno()); 1647 printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
@@ -1528,7 +1651,7 @@ yyreduce:
1528;} 1651;}
1529 break; 1652 break;
1530 1653
1531 case 59: 1654 case 65:
1532 1655
1533 { 1656 {
1534 if (zconf_endtoken((yyvsp[0].id), T_IF, T_ENDIF)) { 1657 if (zconf_endtoken((yyvsp[0].id), T_IF, T_ENDIF)) {
@@ -1538,7 +1661,7 @@ yyreduce:
1538;} 1661;}
1539 break; 1662 break;
1540 1663
1541 case 65: 1664 case 71:
1542 1665
1543 { 1666 {
1544 menu_add_entry(NULL); 1667 menu_add_entry(NULL);
@@ -1547,14 +1670,14 @@ yyreduce:
1547;} 1670;}
1548 break; 1671 break;
1549 1672
1550 case 66: 1673 case 72:
1551 1674
1552 { 1675 {
1553 (yyval.menu) = menu_add_menu(); 1676 (yyval.menu) = menu_add_menu();
1554;} 1677;}
1555 break; 1678 break;
1556 1679
1557 case 67: 1680 case 73:
1558 1681
1559 { 1682 {
1560 if (zconf_endtoken((yyvsp[0].id), T_MENU, T_ENDMENU)) { 1683 if (zconf_endtoken((yyvsp[0].id), T_MENU, T_ENDMENU)) {
@@ -1564,7 +1687,7 @@ yyreduce:
1564;} 1687;}
1565 break; 1688 break;
1566 1689
1567 case 73: 1690 case 79:
1568 1691
1569 { 1692 {
1570 printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), (yyvsp[-1].string)); 1693 printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), (yyvsp[-1].string));
@@ -1572,7 +1695,7 @@ yyreduce:
1572;} 1695;}
1573 break; 1696 break;
1574 1697
1575 case 74: 1698 case 80:
1576 1699
1577 { 1700 {
1578 menu_add_entry(NULL); 1701 menu_add_entry(NULL);
@@ -1581,14 +1704,14 @@ yyreduce:
1581;} 1704;}
1582 break; 1705 break;
1583 1706
1584 case 75: 1707 case 81:
1585 1708
1586 { 1709 {
1587 menu_end_entry(); 1710 menu_end_entry();
1588;} 1711;}
1589 break; 1712 break;
1590 1713
1591 case 76: 1714 case 82:
1592 1715
1593 { 1716 {
1594 printd(DEBUG_PARSE, "%s:%d:help\n", zconf_curname(), zconf_lineno()); 1717 printd(DEBUG_PARSE, "%s:%d:help\n", zconf_curname(), zconf_lineno());
@@ -1596,14 +1719,14 @@ yyreduce:
1596;} 1719;}
1597 break; 1720 break;
1598 1721
1599 case 77: 1722 case 83:
1600 1723
1601 { 1724 {
1602 current_entry->sym->help = (yyvsp[0].string); 1725 current_entry->sym->help = (yyvsp[0].string);
1603;} 1726;}
1604 break; 1727 break;
1605 1728
1606 case 82: 1729 case 88:
1607 1730
1608 { 1731 {
1609 menu_add_dep((yyvsp[-1].expr)); 1732 menu_add_dep((yyvsp[-1].expr));
@@ -1611,7 +1734,7 @@ yyreduce:
1611;} 1734;}
1612 break; 1735 break;
1613 1736
1614 case 83: 1737 case 89:
1615 1738
1616 { 1739 {
1617 menu_add_dep((yyvsp[-1].expr)); 1740 menu_add_dep((yyvsp[-1].expr));
@@ -1619,7 +1742,7 @@ yyreduce:
1619;} 1742;}
1620 break; 1743 break;
1621 1744
1622 case 84: 1745 case 90:
1623 1746
1624 { 1747 {
1625 menu_add_dep((yyvsp[-1].expr)); 1748 menu_add_dep((yyvsp[-1].expr));
@@ -1627,87 +1750,88 @@ yyreduce:
1627;} 1750;}
1628 break; 1751 break;
1629 1752
1630 case 86: 1753 case 92:
1631 1754
1632 { 1755 {
1633 menu_add_prompt(P_PROMPT, (yyvsp[-1].string), (yyvsp[0].expr)); 1756 menu_add_prompt(P_PROMPT, (yyvsp[-1].string), (yyvsp[0].expr));
1634;} 1757;}
1635 break; 1758 break;
1636 1759
1637 case 89: 1760 case 95:
1638 1761
1639 { (yyval.id) = (yyvsp[-1].id); ;} 1762 { (yyval.id) = (yyvsp[-1].id); ;}
1640 break; 1763 break;
1641 1764
1642 case 90: 1765 case 96:
1643 1766
1644 { (yyval.id) = (yyvsp[-1].id); ;} 1767 { (yyval.id) = (yyvsp[-1].id); ;}
1645 break; 1768 break;
1646 1769
1647 case 91: 1770 case 97:
1648 1771
1649 { (yyval.id) = (yyvsp[-1].id); ;} 1772 { (yyval.id) = (yyvsp[-1].id); ;}
1650 break; 1773 break;
1651 1774
1652 case 94: 1775 case 100:
1653 1776
1654 { (yyval.expr) = NULL; ;} 1777 { (yyval.expr) = NULL; ;}
1655 break; 1778 break;
1656 1779
1657 case 95: 1780 case 101:
1658 1781
1659 { (yyval.expr) = (yyvsp[0].expr); ;} 1782 { (yyval.expr) = (yyvsp[0].expr); ;}
1660 break; 1783 break;
1661 1784
1662 case 96: 1785 case 102:
1663 1786
1664 { (yyval.expr) = expr_alloc_symbol((yyvsp[0].symbol)); ;} 1787 { (yyval.expr) = expr_alloc_symbol((yyvsp[0].symbol)); ;}
1665 break; 1788 break;
1666 1789
1667 case 97: 1790 case 103:
1668 1791
1669 { (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[-2].symbol), (yyvsp[0].symbol)); ;} 1792 { (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[-2].symbol), (yyvsp[0].symbol)); ;}
1670 break; 1793 break;
1671 1794
1672 case 98: 1795 case 104:
1673 1796
1674 { (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[-2].symbol), (yyvsp[0].symbol)); ;} 1797 { (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[-2].symbol), (yyvsp[0].symbol)); ;}
1675 break; 1798 break;
1676 1799
1677 case 99: 1800 case 105:
1678 1801
1679 { (yyval.expr) = (yyvsp[-1].expr); ;} 1802 { (yyval.expr) = (yyvsp[-1].expr); ;}
1680 break; 1803 break;
1681 1804
1682 case 100: 1805 case 106:
1683 1806
1684 { (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[0].expr)); ;} 1807 { (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[0].expr)); ;}
1685 break; 1808 break;
1686 1809
1687 case 101: 1810 case 107:
1688 1811
1689 { (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[-2].expr), (yyvsp[0].expr)); ;} 1812 { (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[-2].expr), (yyvsp[0].expr)); ;}
1690 break; 1813 break;
1691 1814
1692 case 102: 1815 case 108:
1693 1816
1694 { (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[-2].expr), (yyvsp[0].expr)); ;} 1817 { (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[-2].expr), (yyvsp[0].expr)); ;}
1695 break; 1818 break;
1696 1819
1697 case 103: 1820 case 109:
1698 1821
1699 { (yyval.symbol) = sym_lookup((yyvsp[0].string), 0); free((yyvsp[0].string)); ;} 1822 { (yyval.symbol) = sym_lookup((yyvsp[0].string), 0); free((yyvsp[0].string)); ;}
1700 break; 1823 break;
1701 1824
1702 case 104: 1825 case 110:
1703 1826
1704 { (yyval.symbol) = sym_lookup((yyvsp[0].string), 1); free((yyvsp[0].string)); ;} 1827 { (yyval.symbol) = sym_lookup((yyvsp[0].string), 1); free((yyvsp[0].string)); ;}
1705 break; 1828 break;
1706 1829
1707 1830
1831 default: break;
1708 } 1832 }
1709 1833
1710/* Line 1037 of yacc.c. */ 1834/* Line 1126 of yacc.c. */
1711 1835
1712 1836
1713 yyvsp -= yylen; 1837 yyvsp -= yylen;
@@ -1747,12 +1871,36 @@ yyerrlab:
1747 1871
1748 if (YYPACT_NINF < yyn && yyn < YYLAST) 1872 if (YYPACT_NINF < yyn && yyn < YYLAST)
1749 { 1873 {
1750 YYSIZE_T yysize = 0;
1751 int yytype = YYTRANSLATE (yychar); 1874 int yytype = YYTRANSLATE (yychar);
1752 const char* yyprefix; 1875 YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
1753 char *yymsg; 1876 YYSIZE_T yysize = yysize0;
1877 YYSIZE_T yysize1;
1878 int yysize_overflow = 0;
1879 char *yymsg = 0;
1880# define YYERROR_VERBOSE_ARGS_MAXIMUM 5
1881 char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
1754 int yyx; 1882 int yyx;
1755 1883
1884#if 0
1885 /* This is so xgettext sees the translatable formats that are
1886 constructed on the fly. */
1887 YY_("syntax error, unexpected %s");
1888 YY_("syntax error, unexpected %s, expecting %s");
1889 YY_("syntax error, unexpected %s, expecting %s or %s");
1890 YY_("syntax error, unexpected %s, expecting %s or %s or %s");
1891 YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s");
1892#endif
1893 char *yyfmt;
1894 char const *yyf;
1895 static char const yyunexpected[] = "syntax error, unexpected %s";
1896 static char const yyexpecting[] = ", expecting %s";
1897 static char const yyor[] = " or %s";
1898 char yyformat[sizeof yyunexpected
1899 + sizeof yyexpecting - 1
1900 + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
1901 * (sizeof yyor - 1))];
1902 char const *yyprefix = yyexpecting;
1903
1756 /* Start YYX at -YYN if negative to avoid negative indexes in 1904 /* Start YYX at -YYN if negative to avoid negative indexes in
1757 YYCHECK. */ 1905 YYCHECK. */
1758 int yyxbegin = yyn < 0 ? -yyn : 0; 1906 int yyxbegin = yyn < 0 ? -yyn : 0;
@@ -1760,48 +1908,68 @@ yyerrlab:
1760 /* Stay within bounds of both yycheck and yytname. */ 1908 /* Stay within bounds of both yycheck and yytname. */
1761 int yychecklim = YYLAST - yyn; 1909 int yychecklim = YYLAST - yyn;
1762 int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; 1910 int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
1763 int yycount = 0; 1911 int yycount = 1;
1912
1913 yyarg[0] = yytname[yytype];
1914 yyfmt = yystpcpy (yyformat, yyunexpected);
1764 1915
1765 yyprefix = ", expecting ";
1766 for (yyx = yyxbegin; yyx < yyxend; ++yyx) 1916 for (yyx = yyxbegin; yyx < yyxend; ++yyx)
1767 if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR) 1917 if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
1768 { 1918 {
1769 yysize += yystrlen (yyprefix) + yystrlen (yytname [yyx]); 1919 if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
1770 yycount += 1;
1771 if (yycount == 5)
1772 { 1920 {
1773 yysize = 0; 1921 yycount = 1;
1922 yysize = yysize0;
1923 yyformat[sizeof yyunexpected - 1] = '\0';
1774 break; 1924 break;
1775 } 1925 }
1926 yyarg[yycount++] = yytname[yyx];
1927 yysize1 = yysize + yytnamerr (0, yytname[yyx]);
1928 yysize_overflow |= yysize1 < yysize;
1929 yysize = yysize1;
1930 yyfmt = yystpcpy (yyfmt, yyprefix);
1931 yyprefix = yyor;
1776 } 1932 }
1777 yysize += (sizeof ("syntax error, unexpected ")
1778 + yystrlen (yytname[yytype]));
1779 yymsg = (char *) YYSTACK_ALLOC (yysize);
1780 if (yymsg != 0)
1781 {
1782 char *yyp = yystpcpy (yymsg, "syntax error, unexpected ");
1783 yyp = yystpcpy (yyp, yytname[yytype]);
1784 1933
1785 if (yycount < 5) 1934 yyf = YY_(yyformat);
1935 yysize1 = yysize + yystrlen (yyf);
1936 yysize_overflow |= yysize1 < yysize;
1937 yysize = yysize1;
1938
1939 if (!yysize_overflow && yysize <= YYSTACK_ALLOC_MAXIMUM)
1940 yymsg = (char *) YYSTACK_ALLOC (yysize);
1941 if (yymsg)
1942 {
1943 /* Avoid sprintf, as that infringes on the user's name space.
1944 Don't have undefined behavior even if the translation
1945 produced a string with the wrong number of "%s"s. */
1946 char *yyp = yymsg;
1947 int yyi = 0;
1948 while ((*yyp = *yyf))
1786 { 1949 {
1787 yyprefix = ", expecting "; 1950 if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
1788 for (yyx = yyxbegin; yyx < yyxend; ++yyx) 1951 {
1789 if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR) 1952 yyp += yytnamerr (yyp, yyarg[yyi++]);
1790 { 1953 yyf += 2;
1791 yyp = yystpcpy (yyp, yyprefix); 1954 }
1792 yyp = yystpcpy (yyp, yytname[yyx]); 1955 else
1793 yyprefix = " or "; 1956 {
1794 } 1957 yyp++;
1958 yyf++;
1959 }
1795 } 1960 }
1796 yyerror (yymsg); 1961 yyerror (yymsg);
1797 YYSTACK_FREE (yymsg); 1962 YYSTACK_FREE (yymsg);
1798 } 1963 }
1799 else 1964 else
1800 yyerror ("syntax error; also virtual memory exhausted"); 1965 {
1966 yyerror (YY_("syntax error"));
1967 goto yyexhaustedlab;
1968 }
1801 } 1969 }
1802 else 1970 else
1803#endif /* YYERROR_VERBOSE */ 1971#endif /* YYERROR_VERBOSE */
1804 yyerror ("syntax error"); 1972 yyerror (YY_("syntax error"));
1805 } 1973 }
1806 1974
1807 1975
@@ -1813,18 +1981,9 @@ yyerrlab:
1813 1981
1814 if (yychar <= YYEOF) 1982 if (yychar <= YYEOF)
1815 { 1983 {
1816 /* If at end of input, pop the error token, 1984 /* Return failure if at end of input. */
1817 then the rest of the stack, then return failure. */
1818 if (yychar == YYEOF) 1985 if (yychar == YYEOF)
1819 for (;;) 1986 YYABORT;
1820 {
1821
1822 YYPOPSTACK;
1823 if (yyssp == yyss)
1824 YYABORT;
1825 yydestruct ("Error: popping",
1826 yystos[*yyssp], yyvsp);
1827 }
1828 } 1987 }
1829 else 1988 else
1830 { 1989 {
@@ -1843,12 +2002,11 @@ yyerrlab:
1843`---------------------------------------------------*/ 2002`---------------------------------------------------*/
1844yyerrorlab: 2003yyerrorlab:
1845 2004
1846#ifdef __GNUC__ 2005 /* Pacify compilers like GCC when the user code never invokes
1847 /* Pacify GCC when the user code never invokes YYERROR and the label 2006 YYERROR and the label yyerrorlab therefore never appears in user
1848 yyerrorlab therefore never appears in user code. */ 2007 code. */
1849 if (0) 2008 if (0)
1850 goto yyerrorlab; 2009 goto yyerrorlab;
1851#endif
1852 2010
1853yyvsp -= yylen; 2011yyvsp -= yylen;
1854 yyssp -= yylen; 2012 yyssp -= yylen;
@@ -1911,23 +2069,29 @@ yyacceptlab:
1911| yyabortlab -- YYABORT comes here. | 2069| yyabortlab -- YYABORT comes here. |
1912`-----------------------------------*/ 2070`-----------------------------------*/
1913yyabortlab: 2071yyabortlab:
1914 yydestruct ("Error: discarding lookahead",
1915 yytoken, &yylval);
1916 yychar = YYEMPTY;
1917 yyresult = 1; 2072 yyresult = 1;
1918 goto yyreturn; 2073 goto yyreturn;
1919 2074
1920#ifndef yyoverflow 2075#ifndef yyoverflow
1921/*----------------------------------------------. 2076/*-------------------------------------------------.
1922| yyoverflowlab -- parser overflow comes here. | 2077| yyexhaustedlab -- memory exhaustion comes here. |
1923`----------------------------------------------*/ 2078`-------------------------------------------------*/
1924yyoverflowlab: 2079yyexhaustedlab:
1925 yyerror ("parser stack overflow"); 2080 yyerror (YY_("memory exhausted"));
1926 yyresult = 2; 2081 yyresult = 2;
1927 /* Fall through. */ 2082 /* Fall through. */
1928#endif 2083#endif
1929 2084
1930yyreturn: 2085yyreturn:
2086 if (yychar != YYEOF && yychar != YYEMPTY)
2087 yydestruct ("Cleanup: discarding lookahead",
2088 yytoken, &yylval);
2089 while (yyssp != yyss)
2090 {
2091 yydestruct ("Cleanup: popping",
2092 yystos[*yyssp], yyvsp);
2093 YYPOPSTACK;
2094 }
1931#ifndef yyoverflow 2095#ifndef yyoverflow
1932 if (yyss != yyssa) 2096 if (yyss != yyssa)
1933 YYSTACK_FREE (yyss); 2097 YYSTACK_FREE (yyss);
@@ -1948,7 +2112,9 @@ void conf_parse(const char *name)
1948 2112
1949 sym_init(); 2113 sym_init();
1950 menu_init(); 2114 menu_init();
1951 modules_sym = sym_lookup("MODULES", 0); 2115 modules_sym = sym_lookup(NULL, 0);
2116 modules_sym->type = S_BOOLEAN;
2117 modules_sym->flags |= SYMBOL_AUTO;
1952 rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL); 2118 rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
1953 2119
1954#if YYDEBUG 2120#if YYDEBUG
@@ -1958,6 +2124,12 @@ void conf_parse(const char *name)
1958 zconfparse(); 2124 zconfparse();
1959 if (zconfnerrs) 2125 if (zconfnerrs)
1960 exit(1); 2126 exit(1);
2127 if (!modules_sym->prop) {
2128 struct property *prop;
2129
2130 prop = prop_alloc(P_DEFAULT, modules_sym);
2131 prop->expr = expr_alloc_symbol(sym_lookup("MODULES", 0));
2132 }
1961 menu_finalize(&rootmenu); 2133 menu_finalize(&rootmenu);
1962 for_all_symbols(i, sym) { 2134 for_all_symbols(i, sym) {
1963 sym_check_deps(sym); 2135 sym_check_deps(sym);
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 1f61fba6aa..ab44feb3c6 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -71,6 +71,7 @@ static struct menu *current_menu, *current_entry;
71%token <id>T_DEFAULT 71%token <id>T_DEFAULT
72%token <id>T_SELECT 72%token <id>T_SELECT
73%token <id>T_RANGE 73%token <id>T_RANGE
74%token <id>T_OPTION
74%token <id>T_ON 75%token <id>T_ON
75%token <string> T_WORD 76%token <string> T_WORD
76%token <string> T_WORD_QUOTE 77%token <string> T_WORD_QUOTE
@@ -91,6 +92,7 @@ static struct menu *current_menu, *current_entry;
91%type <id> end 92%type <id> end
92%type <id> option_name 93%type <id> option_name
93%type <menu> if_entry menu_entry choice_entry 94%type <menu> if_entry menu_entry choice_entry
95%type <string> symbol_option_arg
94 96
95%destructor { 97%destructor {
96 fprintf(stderr, "%s:%d: missing end statement for this entry\n", 98 fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -173,6 +175,7 @@ menuconfig_stmt: menuconfig_entry_start config_option_list
173config_option_list: 175config_option_list:
174 /* empty */ 176 /* empty */
175 | config_option_list config_option 177 | config_option_list config_option
178 | config_option_list symbol_option
176 | config_option_list depends 179 | config_option_list depends
177 | config_option_list help 180 | config_option_list help
178 | config_option_list option_error 181 | config_option_list option_error
@@ -215,6 +218,26 @@ config_option: T_RANGE symbol symbol if_expr T_EOL
215 printd(DEBUG_PARSE, "%s:%d:range\n", zconf_curname(), zconf_lineno()); 218 printd(DEBUG_PARSE, "%s:%d:range\n", zconf_curname(), zconf_lineno());
216}; 219};
217 220
221symbol_option: T_OPTION symbol_option_list T_EOL
222;
223
224symbol_option_list:
225 /* empty */
226 | symbol_option_list T_WORD symbol_option_arg
227{
228 struct kconf_id *id = kconf_id_lookup($2, strlen($2));
229 if (id && id->flags & TF_OPTION)
230 menu_add_option(id->token, $3);
231 else
232 zconfprint("warning: ignoring unknown option %s", $2);
233 free($2);
234};
235
236symbol_option_arg:
237 /* empty */ { $$ = NULL; }
238 | T_EQUAL prompt { $$ = $2; }
239;
240
218/* choice entry */ 241/* choice entry */
219 242
220choice: T_CHOICE T_EOL 243choice: T_CHOICE T_EOL
@@ -458,7 +481,9 @@ void conf_parse(const char *name)
458 481
459 sym_init(); 482 sym_init();
460 menu_init(); 483 menu_init();
461 modules_sym = sym_lookup("MODULES", 0); 484 modules_sym = sym_lookup(NULL, 0);
485 modules_sym->type = S_BOOLEAN;
486 modules_sym->flags |= SYMBOL_AUTO;
462 rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL); 487 rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
463 488
464#if YYDEBUG 489#if YYDEBUG
@@ -468,6 +493,12 @@ void conf_parse(const char *name)
468 zconfparse(); 493 zconfparse();
469 if (zconfnerrs) 494 if (zconfnerrs)
470 exit(1); 495 exit(1);
496 if (!modules_sym->prop) {
497 struct property *prop;
498
499 prop = prop_alloc(P_DEFAULT, modules_sym);
500 prop->expr = expr_alloc_symbol(sym_lookup("MODULES", 0));
501 }
471 menu_finalize(&rootmenu); 502 menu_finalize(&rootmenu);
472 for_all_symbols(i, sym) { 503 for_all_symbols(i, sym) {
473 sym_check_deps(sym); 504 sym_check_deps(sym);
diff --git a/scripts/mod/mk_elfconfig.c b/scripts/mod/mk_elfconfig.c
index 3c92c83733..725d61c0fb 100644
--- a/scripts/mod/mk_elfconfig.c
+++ b/scripts/mod/mk_elfconfig.c
@@ -28,7 +28,7 @@ main(int argc, char **argv)
28 printf("#define KERNEL_ELFCLASS ELFCLASS64\n"); 28 printf("#define KERNEL_ELFCLASS ELFCLASS64\n");
29 break; 29 break;
30 default: 30 default:
31 abort(); 31 exit(1);
32 } 32 }
33 switch (ei[EI_DATA]) { 33 switch (ei[EI_DATA]) {
34 case ELFDATA2LSB: 34 case ELFDATA2LSB:
@@ -38,7 +38,7 @@ main(int argc, char **argv)
38 printf("#define KERNEL_ELFDATA ELFDATA2MSB\n"); 38 printf("#define KERNEL_ELFDATA ELFDATA2MSB\n");
39 break; 39 break;
40 default: 40 default:
41 abort(); 41 exit(1);
42 } 42 }
43 43
44 if (sizeof(unsigned long) == 4) { 44 if (sizeof(unsigned long) == 4) {
@@ -53,7 +53,7 @@ main(int argc, char **argv)
53 else if (memcmp(endian_test.c, "\x02\x01", 2) == 0) 53 else if (memcmp(endian_test.c, "\x02\x01", 2) == 0)
54 printf("#define HOST_ELFDATA ELFDATA2LSB\n"); 54 printf("#define HOST_ELFDATA ELFDATA2LSB\n");
55 else 55 else
56 abort(); 56 exit(1);
57 57
58 if ((strcmp(argv[1], "v850") == 0) || (strcmp(argv[1], "h8300") == 0)) 58 if ((strcmp(argv[1], "v850") == 0) || (strcmp(argv[1], "h8300") == 0))
59 printf("#define MODULE_SYMBOL_PREFIX \"_\"\n"); 59 printf("#define MODULE_SYMBOL_PREFIX \"_\"\n");
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index d0f86ed43f..0dd1617764 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -13,6 +13,7 @@
13 13
14#include <ctype.h> 14#include <ctype.h>
15#include "modpost.h" 15#include "modpost.h"
16#include "../../include/linux/license.h"
16 17
17/* Are we using CONFIG_MODVERSIONS? */ 18/* Are we using CONFIG_MODVERSIONS? */
18int modversions = 0; 19int modversions = 0;
@@ -22,6 +23,8 @@ int have_vmlinux = 0;
22static int all_versions = 0; 23static int all_versions = 0;
23/* If we are modposting external module set to 1 */ 24/* If we are modposting external module set to 1 */
24static int external_module = 0; 25static int external_module = 0;
26/* How a symbol is exported */
27enum export {export_plain, export_gpl, export_gpl_future, export_unknown};
25 28
26void fatal(const char *fmt, ...) 29void fatal(const char *fmt, ...)
27{ 30{
@@ -97,6 +100,7 @@ static struct module *new_module(char *modname)
97 100
98 /* add to list */ 101 /* add to list */
99 mod->name = p; 102 mod->name = p;
103 mod->gpl_compatible = -1;
100 mod->next = modules; 104 mod->next = modules;
101 modules = mod; 105 modules = mod;
102 106
@@ -118,6 +122,7 @@ struct symbol {
118 unsigned int kernel:1; /* 1 if symbol is from kernel 122 unsigned int kernel:1; /* 1 if symbol is from kernel
119 * (only for external modules) **/ 123 * (only for external modules) **/
120 unsigned int preloaded:1; /* 1 if symbol from Module.symvers */ 124 unsigned int preloaded:1; /* 1 if symbol from Module.symvers */
125 enum export export; /* Type of export */
121 char name[0]; 126 char name[0];
122}; 127};
123 128
@@ -153,7 +158,8 @@ static struct symbol *alloc_symbol(const char *name, unsigned int weak,
153} 158}
154 159
155/* For the hash of exported symbols */ 160/* For the hash of exported symbols */
156static struct symbol *new_symbol(const char *name, struct module *module) 161static struct symbol *new_symbol(const char *name, struct module *module,
162 enum export export)
157{ 163{
158 unsigned int hash; 164 unsigned int hash;
159 struct symbol *new; 165 struct symbol *new;
@@ -161,6 +167,7 @@ static struct symbol *new_symbol(const char *name, struct module *module)
161 hash = tdb_hash(name) % SYMBOL_HASH_SIZE; 167 hash = tdb_hash(name) % SYMBOL_HASH_SIZE;
162 new = symbolhash[hash] = alloc_symbol(name, 0, symbolhash[hash]); 168 new = symbolhash[hash] = alloc_symbol(name, 0, symbolhash[hash]);
163 new->module = module; 169 new->module = module;
170 new->export = export;
164 return new; 171 return new;
165} 172}
166 173
@@ -179,16 +186,55 @@ static struct symbol *find_symbol(const char *name)
179 return NULL; 186 return NULL;
180} 187}
181 188
189static struct {
190 const char *str;
191 enum export export;
192} export_list[] = {
193 { .str = "EXPORT_SYMBOL", .export = export_plain },
194 { .str = "EXPORT_SYMBOL_GPL", .export = export_gpl },
195 { .str = "EXPORT_SYMBOL_GPL_FUTURE", .export = export_gpl_future },
196 { .str = "(unknown)", .export = export_unknown },
197};
198
199
200static const char *export_str(enum export ex)
201{
202 return export_list[ex].str;
203}
204
205static enum export export_no(const char * s)
206{
207 int i;
208 for (i = 0; export_list[i].export != export_unknown; i++) {
209 if (strcmp(export_list[i].str, s) == 0)
210 return export_list[i].export;
211 }
212 return export_unknown;
213}
214
215static enum export export_from_sec(struct elf_info *elf, Elf_Section sec)
216{
217 if (sec == elf->export_sec)
218 return export_plain;
219 else if (sec == elf->export_gpl_sec)
220 return export_gpl;
221 else if (sec == elf->export_gpl_future_sec)
222 return export_gpl_future;
223 else
224 return export_unknown;
225}
226
182/** 227/**
183 * Add an exported symbol - it may have already been added without a 228 * Add an exported symbol - it may have already been added without a
184 * CRC, in this case just update the CRC 229 * CRC, in this case just update the CRC
185 **/ 230 **/
186static struct symbol *sym_add_exported(const char *name, struct module *mod) 231static struct symbol *sym_add_exported(const char *name, struct module *mod,
232 enum export export)
187{ 233{
188 struct symbol *s = find_symbol(name); 234 struct symbol *s = find_symbol(name);
189 235
190 if (!s) { 236 if (!s) {
191 s = new_symbol(name, mod); 237 s = new_symbol(name, mod, export);
192 } else { 238 } else {
193 if (!s->preloaded) { 239 if (!s->preloaded) {
194 warn("%s: '%s' exported twice. Previous export " 240 warn("%s: '%s' exported twice. Previous export "
@@ -200,16 +246,17 @@ static struct symbol *sym_add_exported(const char *name, struct module *mod)
200 s->preloaded = 0; 246 s->preloaded = 0;
201 s->vmlinux = is_vmlinux(mod->name); 247 s->vmlinux = is_vmlinux(mod->name);
202 s->kernel = 0; 248 s->kernel = 0;
249 s->export = export;
203 return s; 250 return s;
204} 251}
205 252
206static void sym_update_crc(const char *name, struct module *mod, 253static void sym_update_crc(const char *name, struct module *mod,
207 unsigned int crc) 254 unsigned int crc, enum export export)
208{ 255{
209 struct symbol *s = find_symbol(name); 256 struct symbol *s = find_symbol(name);
210 257
211 if (!s) 258 if (!s)
212 s = new_symbol(name, mod); 259 s = new_symbol(name, mod, export);
213 s->crc = crc; 260 s->crc = crc;
214 s->crc_valid = 1; 261 s->crc_valid = 1;
215} 262}
@@ -283,7 +330,7 @@ static void parse_elf(struct elf_info *info, const char *filename)
283 hdr = grab_file(filename, &info->size); 330 hdr = grab_file(filename, &info->size);
284 if (!hdr) { 331 if (!hdr) {
285 perror(filename); 332 perror(filename);
286 abort(); 333 exit(1);
287 } 334 }
288 info->hdr = hdr; 335 info->hdr = hdr;
289 if (info->size < sizeof(*hdr)) 336 if (info->size < sizeof(*hdr))
@@ -309,13 +356,21 @@ static void parse_elf(struct elf_info *info, const char *filename)
309 for (i = 1; i < hdr->e_shnum; i++) { 356 for (i = 1; i < hdr->e_shnum; i++) {
310 const char *secstrings 357 const char *secstrings
311 = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 358 = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
359 const char *secname;
312 360
313 if (sechdrs[i].sh_offset > info->size) 361 if (sechdrs[i].sh_offset > info->size)
314 goto truncated; 362 goto truncated;
315 if (strcmp(secstrings+sechdrs[i].sh_name, ".modinfo") == 0) { 363 secname = secstrings + sechdrs[i].sh_name;
364 if (strcmp(secname, ".modinfo") == 0) {
316 info->modinfo = (void *)hdr + sechdrs[i].sh_offset; 365 info->modinfo = (void *)hdr + sechdrs[i].sh_offset;
317 info->modinfo_len = sechdrs[i].sh_size; 366 info->modinfo_len = sechdrs[i].sh_size;
318 } 367 } else if (strcmp(secname, "__ksymtab") == 0)
368 info->export_sec = i;
369 else if (strcmp(secname, "__ksymtab_gpl") == 0)
370 info->export_gpl_sec = i;
371 else if (strcmp(secname, "__ksymtab_gpl_future") == 0)
372 info->export_gpl_future_sec = i;
373
319 if (sechdrs[i].sh_type != SHT_SYMTAB) 374 if (sechdrs[i].sh_type != SHT_SYMTAB)
320 continue; 375 continue;
321 376
@@ -353,6 +408,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
353 Elf_Sym *sym, const char *symname) 408 Elf_Sym *sym, const char *symname)
354{ 409{
355 unsigned int crc; 410 unsigned int crc;
411 enum export export = export_from_sec(info, sym->st_shndx);
356 412
357 switch (sym->st_shndx) { 413 switch (sym->st_shndx) {
358 case SHN_COMMON: 414 case SHN_COMMON:
@@ -362,7 +418,8 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
362 /* CRC'd symbol */ 418 /* CRC'd symbol */
363 if (memcmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) { 419 if (memcmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
364 crc = (unsigned int) sym->st_value; 420 crc = (unsigned int) sym->st_value;
365 sym_update_crc(symname + strlen(CRC_PFX), mod, crc); 421 sym_update_crc(symname + strlen(CRC_PFX), mod, crc,
422 export);
366 } 423 }
367 break; 424 break;
368 case SHN_UNDEF: 425 case SHN_UNDEF:
@@ -406,7 +463,8 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
406 default: 463 default:
407 /* All exported symbols */ 464 /* All exported symbols */
408 if (memcmp(symname, KSYMTAB_PFX, strlen(KSYMTAB_PFX)) == 0) { 465 if (memcmp(symname, KSYMTAB_PFX, strlen(KSYMTAB_PFX)) == 0) {
409 sym_add_exported(symname + strlen(KSYMTAB_PFX), mod); 466 sym_add_exported(symname + strlen(KSYMTAB_PFX), mod,
467 export);
410 } 468 }
411 if (strcmp(symname, MODULE_SYMBOL_PREFIX "init_module") == 0) 469 if (strcmp(symname, MODULE_SYMBOL_PREFIX "init_module") == 0)
412 mod->has_init = 1; 470 mod->has_init = 1;
@@ -437,13 +495,18 @@ static char *next_string(char *string, unsigned long *secsize)
437 return string; 495 return string;
438} 496}
439 497
440static char *get_modinfo(void *modinfo, unsigned long modinfo_len, 498static char *get_next_modinfo(void *modinfo, unsigned long modinfo_len,
441 const char *tag) 499 const char *tag, char *info)
442{ 500{
443 char *p; 501 char *p;
444 unsigned int taglen = strlen(tag); 502 unsigned int taglen = strlen(tag);
445 unsigned long size = modinfo_len; 503 unsigned long size = modinfo_len;
446 504
505 if (info) {
506 size -= info - (char *)modinfo;
507 modinfo = next_string(info, &size);
508 }
509
447 for (p = modinfo; p; p = next_string(p, &size)) { 510 for (p = modinfo; p; p = next_string(p, &size)) {
448 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') 511 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
449 return p + taglen + 1; 512 return p + taglen + 1;
@@ -451,6 +514,13 @@ static char *get_modinfo(void *modinfo, unsigned long modinfo_len,
451 return NULL; 514 return NULL;
452} 515}
453 516
517static char *get_modinfo(void *modinfo, unsigned long modinfo_len,
518 const char *tag)
519
520{
521 return get_next_modinfo(modinfo, modinfo_len, tag, NULL);
522}
523
454/** 524/**
455 * Test if string s ends in string sub 525 * Test if string s ends in string sub
456 * return 0 if match 526 * return 0 if match
@@ -821,6 +891,10 @@ static int init_section_ref_ok(const char *name)
821 ".pci_fixup_final", 891 ".pci_fixup_final",
822 ".pdr", 892 ".pdr",
823 "__param", 893 "__param",
894 "__ex_table",
895 ".fixup",
896 ".smp_locks",
897 ".plt", /* seen on ARCH=um build on x86_64. Harmless */
824 NULL 898 NULL
825 }; 899 };
826 /* Start of section names */ 900 /* Start of section names */
@@ -846,6 +920,8 @@ static int init_section_ref_ok(const char *name)
846 for (s = namelist3; *s; s++) 920 for (s = namelist3; *s; s++)
847 if (strstr(name, *s) != NULL) 921 if (strstr(name, *s) != NULL)
848 return 1; 922 return 1;
923 if (strrcmp(name, ".init") == 0)
924 return 1;
849 return 0; 925 return 0;
850} 926}
851 927
@@ -892,6 +968,10 @@ static int exit_section_ref_ok(const char *name)
892 ".exitcall.exit", 968 ".exitcall.exit",
893 ".eh_frame", 969 ".eh_frame",
894 ".stab", 970 ".stab",
971 "__ex_table",
972 ".fixup",
973 ".smp_locks",
974 ".plt", /* seen on ARCH=um build on x86_64. Harmless */
895 NULL 975 NULL
896 }; 976 };
897 /* Start of section names */ 977 /* Start of section names */
@@ -921,6 +1001,7 @@ static void read_symbols(char *modname)
921{ 1001{
922 const char *symname; 1002 const char *symname;
923 char *version; 1003 char *version;
1004 char *license;
924 struct module *mod; 1005 struct module *mod;
925 struct elf_info info = { }; 1006 struct elf_info info = { };
926 Elf_Sym *sym; 1007 Elf_Sym *sym;
@@ -936,6 +1017,18 @@ static void read_symbols(char *modname)
936 mod->skip = 1; 1017 mod->skip = 1;
937 } 1018 }
938 1019
1020 license = get_modinfo(info.modinfo, info.modinfo_len, "license");
1021 while (license) {
1022 if (license_is_gpl_compatible(license))
1023 mod->gpl_compatible = 1;
1024 else {
1025 mod->gpl_compatible = 0;
1026 break;
1027 }
1028 license = get_next_modinfo(info.modinfo, info.modinfo_len,
1029 "license", license);
1030 }
1031
939 for (sym = info.symtab_start; sym < info.symtab_stop; sym++) { 1032 for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
940 symname = info.strtab + sym->st_name; 1033 symname = info.strtab + sym->st_name;
941 1034
@@ -992,6 +1085,41 @@ void buf_write(struct buffer *buf, const char *s, int len)
992 buf->pos += len; 1085 buf->pos += len;
993} 1086}
994 1087
1088void check_license(struct module *mod)
1089{
1090 struct symbol *s, *exp;
1091
1092 for (s = mod->unres; s; s = s->next) {
1093 const char *basename;
1094 if (mod->gpl_compatible == 1) {
1095 /* GPL-compatible modules may use all symbols */
1096 continue;
1097 }
1098 exp = find_symbol(s->name);
1099 if (!exp || exp->module == mod)
1100 continue;
1101 basename = strrchr(mod->name, '/');
1102 if (basename)
1103 basename++;
1104 switch (exp->export) {
1105 case export_gpl:
1106 fatal("modpost: GPL-incompatible module %s "
1107 "uses GPL-only symbol '%s'\n",
1108 basename ? basename : mod->name,
1109 exp->name);
1110 break;
1111 case export_gpl_future:
1112 warn("modpost: GPL-incompatible module %s "
1113 "uses future GPL-only symbol '%s'\n",
1114 basename ? basename : mod->name,
1115 exp->name);
1116 break;
1117 case export_plain: /* ignore */ break;
1118 case export_unknown: /* ignore */ break;
1119 }
1120 }
1121}
1122
995/** 1123/**
996 * Header for the generated file 1124 * Header for the generated file
997 **/ 1125 **/
@@ -1142,6 +1270,9 @@ static void write_if_changed(struct buffer *b, const char *fname)
1142 fclose(file); 1270 fclose(file);
1143} 1271}
1144 1272
1273/* parse Module.symvers file. line format:
1274 * 0x12345678<tab>symbol<tab>module[<tab>export]
1275 **/
1145static void read_dump(const char *fname, unsigned int kernel) 1276static void read_dump(const char *fname, unsigned int kernel)
1146{ 1277{
1147 unsigned long size, pos = 0; 1278 unsigned long size, pos = 0;
@@ -1153,7 +1284,7 @@ static void read_dump(const char *fname, unsigned int kernel)
1153 return; 1284 return;
1154 1285
1155 while ((line = get_next_line(&pos, file, size))) { 1286 while ((line = get_next_line(&pos, file, size))) {
1156 char *symname, *modname, *d; 1287 char *symname, *modname, *d, *export;
1157 unsigned int crc; 1288 unsigned int crc;
1158 struct module *mod; 1289 struct module *mod;
1159 struct symbol *s; 1290 struct symbol *s;
@@ -1164,8 +1295,9 @@ static void read_dump(const char *fname, unsigned int kernel)
1164 if (!(modname = strchr(symname, '\t'))) 1295 if (!(modname = strchr(symname, '\t')))
1165 goto fail; 1296 goto fail;
1166 *modname++ = '\0'; 1297 *modname++ = '\0';
1167 if (strchr(modname, '\t')) 1298 if ((export = strchr(modname, '\t')) != NULL)
1168 goto fail; 1299 *export++ = '\0';
1300
1169 crc = strtoul(line, &d, 16); 1301 crc = strtoul(line, &d, 16);
1170 if (*symname == '\0' || *modname == '\0' || *d != '\0') 1302 if (*symname == '\0' || *modname == '\0' || *d != '\0')
1171 goto fail; 1303 goto fail;
@@ -1177,10 +1309,10 @@ static void read_dump(const char *fname, unsigned int kernel)
1177 mod = new_module(NOFAIL(strdup(modname))); 1309 mod = new_module(NOFAIL(strdup(modname)));
1178 mod->skip = 1; 1310 mod->skip = 1;
1179 } 1311 }
1180 s = sym_add_exported(symname, mod); 1312 s = sym_add_exported(symname, mod, export_no(export));
1181 s->kernel = kernel; 1313 s->kernel = kernel;
1182 s->preloaded = 1; 1314 s->preloaded = 1;
1183 sym_update_crc(symname, mod, crc); 1315 sym_update_crc(symname, mod, crc, export_no(export));
1184 } 1316 }
1185 return; 1317 return;
1186fail: 1318fail:
@@ -1210,9 +1342,10 @@ static void write_dump(const char *fname)
1210 symbol = symbolhash[n]; 1342 symbol = symbolhash[n];
1211 while (symbol) { 1343 while (symbol) {
1212 if (dump_sym(symbol)) 1344 if (dump_sym(symbol))
1213 buf_printf(&buf, "0x%08x\t%s\t%s\n", 1345 buf_printf(&buf, "0x%08x\t%s\t%s\t%s\n",
1214 symbol->crc, symbol->name, 1346 symbol->crc, symbol->name,
1215 symbol->module->name); 1347 symbol->module->name,
1348 export_str(symbol->export));
1216 symbol = symbol->next; 1349 symbol = symbol->next;
1217 } 1350 }
1218 } 1351 }
@@ -1263,6 +1396,12 @@ int main(int argc, char **argv)
1263 for (mod = modules; mod; mod = mod->next) { 1396 for (mod = modules; mod; mod = mod->next) {
1264 if (mod->skip) 1397 if (mod->skip)
1265 continue; 1398 continue;
1399 check_license(mod);
1400 }
1401
1402 for (mod = modules; mod; mod = mod->next) {
1403 if (mod->skip)
1404 continue;
1266 1405
1267 buf.pos = 0; 1406 buf.pos = 0;
1268 1407
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 861d866fcd..2b00c60628 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -100,6 +100,7 @@ buf_write(struct buffer *buf, const char *s, int len);
100struct module { 100struct module {
101 struct module *next; 101 struct module *next;
102 const char *name; 102 const char *name;
103 int gpl_compatible;
103 struct symbol *unres; 104 struct symbol *unres;
104 int seen; 105 int seen;
105 int skip; 106 int skip;
@@ -115,6 +116,9 @@ struct elf_info {
115 Elf_Shdr *sechdrs; 116 Elf_Shdr *sechdrs;
116 Elf_Sym *symtab_start; 117 Elf_Sym *symtab_start;
117 Elf_Sym *symtab_stop; 118 Elf_Sym *symtab_stop;
119 Elf_Section export_sec;
120 Elf_Section export_gpl_sec;
121 Elf_Section export_gpl_future_sec;
118 const char *strtab; 122 const char *strtab;
119 char *modinfo; 123 char *modinfo;
120 unsigned int modinfo_len; 124 unsigned int modinfo_len;
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index 0b10387375..df892841b1 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -73,8 +73,13 @@ echo "%ifarch ia64"
73echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE" 73echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
74echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/" 74echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
75echo "%else" 75echo "%else"
76echo "%ifarch ppc64"
77echo "cp vmlinux arch/powerpc/boot"
78echo "cp arch/powerpc/boot/"'$KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/vmlinuz-$KERNELRELEASE"
79echo "%else"
76echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/vmlinuz-$KERNELRELEASE" 80echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/vmlinuz-$KERNELRELEASE"
77echo "%endif" 81echo "%endif"
82echo "%endif"
78 83
79echo 'cp System.map $RPM_BUILD_ROOT'"/boot/System.map-$KERNELRELEASE" 84echo 'cp System.map $RPM_BUILD_ROOT'"/boot/System.map-$KERNELRELEASE"
80 85
diff --git a/scripts/rt-tester/check-all.sh b/scripts/rt-tester/check-all.sh
new file mode 100644
index 0000000000..43098afe74
--- /dev/null
+++ b/scripts/rt-tester/check-all.sh
@@ -0,0 +1,22 @@
1
2
3function testit ()
4{
5 printf "%-30s: " $1
6 ./rt-tester.py $1 | grep Pass
7}
8
9testit t2-l1-2rt-sameprio.tst
10testit t2-l1-pi.tst
11testit t2-l1-signal.tst
12#testit t2-l2-2rt-deadlock.tst
13testit t3-l1-pi-1rt.tst
14testit t3-l1-pi-2rt.tst
15testit t3-l1-pi-3rt.tst
16testit t3-l1-pi-signal.tst
17testit t3-l1-pi-steal.tst
18testit t3-l2-pi.tst
19testit t4-l2-pi-deboost.tst
20testit t5-l4-pi-boost-deboost.tst
21testit t5-l4-pi-boost-deboost-setsched.tst
22
diff --git a/scripts/rt-tester/rt-tester.py b/scripts/rt-tester/rt-tester.py
new file mode 100644
index 0000000000..4c79660793
--- /dev/null
+++ b/scripts/rt-tester/rt-tester.py
@@ -0,0 +1,222 @@
1#!/usr/bin/env python
2#
3# rt-mutex tester
4#
5# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
6#
7# This program is free software; you can redistribute it and/or modify
8# it under the terms of the GNU General Public License version 2 as
9# published by the Free Software Foundation.
10#
11import os
12import sys
13import getopt
14import shutil
15import string
16
17# Globals
18quiet = 0
19test = 0
20comments = 0
21
22sysfsprefix = "/sys/devices/system/rttest/rttest"
23statusfile = "/status"
24commandfile = "/command"
25
26# Command opcodes
27cmd_opcodes = {
28 "schedother" : "1",
29 "schedfifo" : "2",
30 "lock" : "3",
31 "locknowait" : "4",
32 "lockint" : "5",
33 "lockintnowait" : "6",
34 "lockcont" : "7",
35 "unlock" : "8",
36 "lockbkl" : "9",
37 "unlockbkl" : "10",
38 "signal" : "11",
39 "resetevent" : "98",
40 "reset" : "99",
41 }
42
43test_opcodes = {
44 "prioeq" : ["P" , "eq" , None],
45 "priolt" : ["P" , "lt" , None],
46 "priogt" : ["P" , "gt" , None],
47 "nprioeq" : ["N" , "eq" , None],
48 "npriolt" : ["N" , "lt" , None],
49 "npriogt" : ["N" , "gt" , None],
50 "unlocked" : ["M" , "eq" , 0],
51 "trylock" : ["M" , "eq" , 1],
52 "blocked" : ["M" , "eq" , 2],
53 "blockedwake" : ["M" , "eq" , 3],
54 "locked" : ["M" , "eq" , 4],
55 "opcodeeq" : ["O" , "eq" , None],
56 "opcodelt" : ["O" , "lt" , None],
57 "opcodegt" : ["O" , "gt" , None],
58 "eventeq" : ["E" , "eq" , None],
59 "eventlt" : ["E" , "lt" , None],
60 "eventgt" : ["E" , "gt" , None],
61 }
62
63# Print usage information
64def usage():
65 print "rt-tester.py <-c -h -q -t> <testfile>"
66 print " -c display comments after first command"
67 print " -h help"
68 print " -q quiet mode"
69 print " -t test mode (syntax check)"
70 print " testfile: read test specification from testfile"
71 print " otherwise from stdin"
72 return
73
74# Print progress when not in quiet mode
75def progress(str):
76 if not quiet:
77 print str
78
79# Analyse a status value
80def analyse(val, top, arg):
81
82 intval = int(val)
83
84 if top[0] == "M":
85 intval = intval / (10 ** int(arg))
86 intval = intval % 10
87 argval = top[2]
88 elif top[0] == "O":
89 argval = int(cmd_opcodes.get(arg, arg))
90 else:
91 argval = int(arg)
92
93 # progress("%d %s %d" %(intval, top[1], argval))
94
95 if top[1] == "eq" and intval == argval:
96 return 1
97 if top[1] == "lt" and intval < argval:
98 return 1
99 if top[1] == "gt" and intval > argval:
100 return 1
101 return 0
102
103# Parse the commandline
104try:
105 (options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
106except getopt.GetoptError, ex:
107 usage()
108 sys.exit(1)
109
110# Parse commandline options
111for option, value in options:
112 if option == "-c":
113 comments = 1
114 elif option == "-q":
115 quiet = 1
116 elif option == "-t":
117 test = 1
118 elif option == '-h':
119 usage()
120 sys.exit(0)
121
122# Select the input source
123if arguments:
124 try:
125 fd = open(arguments[0])
126 except Exception,ex:
127 sys.stderr.write("File not found %s\n" %(arguments[0]))
128 sys.exit(1)
129else:
130 fd = sys.stdin
131
132linenr = 0
133
134# Read the test patterns
135while 1:
136
137 linenr = linenr + 1
138 line = fd.readline()
139 if not len(line):
140 break
141
142 line = line.strip()
143 parts = line.split(":")
144
145 if not parts or len(parts) < 1:
146 continue
147
148 if len(parts[0]) == 0:
149 continue
150
151 if parts[0].startswith("#"):
152 if comments > 1:
153 progress(line)
154 continue
155
156 if comments == 1:
157 comments = 2
158
159 progress(line)
160
161 cmd = parts[0].strip().lower()
162 opc = parts[1].strip().lower()
163 tid = parts[2].strip()
164 dat = parts[3].strip()
165
166 try:
167 # Test or wait for a status value
168 if cmd == "t" or cmd == "w":
169 testop = test_opcodes[opc]
170
171 fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
172 if test:
173 print fname
174 continue
175
176 while 1:
177 query = 1
178 fsta = open(fname, 'r')
179 status = fsta.readline().strip()
180 fsta.close()
181 stat = status.split(",")
182 for s in stat:
183 s = s.strip()
184 if s.startswith(testop[0]):
185 # Seperate status value
186 val = s[2:].strip()
187 query = analyse(val, testop, dat)
188 break
189 if query or cmd == "t":
190 break
191
192 progress(" " + status)
193
194 if not query:
195 sys.stderr.write("Test failed in line %d\n" %(linenr))
196 sys.exit(1)
197
198 # Issue a command to the tester
199 elif cmd == "c":
200 cmdnr = cmd_opcodes[opc]
201 # Build command string and sys filename
202 cmdstr = "%s:%s" %(cmdnr, dat)
203 fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
204 if test:
205 print fname
206 continue
207 fcmd = open(fname, 'w')
208 fcmd.write(cmdstr)
209 fcmd.close()
210
211 except Exception,ex:
212 sys.stderr.write(str(ex))
213 sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
214 if not test:
215 fd.close()
216 sys.exit(1)
217
218# Normal exit pass
219print "Pass"
220sys.exit(0)
221
222
diff --git a/scripts/rt-tester/t2-l1-2rt-sameprio.tst b/scripts/rt-tester/t2-l1-2rt-sameprio.tst
new file mode 100644
index 0000000000..8821f27cc8
--- /dev/null
+++ b/scripts/rt-tester/t2-l1-2rt-sameprio.tst
@@ -0,0 +1,99 @@
1#
2# RT-Mutex test
3#
4# Op: C(ommand)/T(est)/W(ait)
5# | opcode
6# | | threadid: 0-7
7# | | | opcode argument
8# | | | |
9# C: lock: 0: 0
10#
11# Commands
12#
13# opcode opcode argument
14# schedother nice value
15# schedfifo priority
16# lock lock nr (0-7)
17# locknowait lock nr (0-7)
18# lockint lock nr (0-7)
19# lockintnowait lock nr (0-7)
20# lockcont lock nr (0-7)
21# unlock lock nr (0-7)
22# lockbkl lock nr (0-7)
23# unlockbkl lock nr (0-7)
24# signal 0
25# reset 0
26# resetevent 0
27#
28# Tests / Wait
29#
30# opcode opcode argument
31#
32# prioeq priority
33# priolt priority
34# priogt priority
35# nprioeq normal priority
36# npriolt normal priority
37# npriogt normal priority
38# locked lock nr (0-7)
39# blocked lock nr (0-7)
40# blockedwake lock nr (0-7)
41# unlocked lock nr (0-7)
42# lockedbkl dont care
43# blockedbkl dont care
44# unlockedbkl dont care
45# opcodeeq command opcode or number
46# opcodelt number
47# opcodegt number
48# eventeq number
49# eventgt number
50# eventlt number
51
52#
53# 2 threads 1 lock
54#
55C: resetevent: 0: 0
56W: opcodeeq: 0: 0
57
58# Set schedulers
59C: schedfifo: 0: 80
60C: schedfifo: 1: 80
61
62# T0 lock L0
63C: locknowait: 0: 0
64C: locknowait: 1: 0
65W: locked: 0: 0
66W: blocked: 1: 0
67T: prioeq: 0: 80
68
69# T0 unlock L0
70C: unlock: 0: 0
71W: locked: 1: 0
72
73# Verify T0
74W: unlocked: 0: 0
75T: prioeq: 0: 80
76
77# Unlock
78C: unlock: 1: 0
79W: unlocked: 1: 0
80
81# T1,T0 lock L0
82C: locknowait: 1: 0
83C: locknowait: 0: 0
84W: locked: 1: 0
85W: blocked: 0: 0
86T: prioeq: 1: 80
87
88# T1 unlock L0
89C: unlock: 1: 0
90W: locked: 0: 0
91
92# Verify T1
93W: unlocked: 1: 0
94T: prioeq: 1: 80
95
96# Unlock and exit
97C: unlock: 0: 0
98W: unlocked: 0: 0
99
diff --git a/scripts/rt-tester/t2-l1-pi.tst b/scripts/rt-tester/t2-l1-pi.tst
new file mode 100644
index 0000000000..cde1f189a0
--- /dev/null
+++ b/scripts/rt-tester/t2-l1-pi.tst
@@ -0,0 +1,82 @@
1#
2# RT-Mutex test
3#
4# Op: C(ommand)/T(est)/W(ait)
5# | opcode
6# | | threadid: 0-7
7# | | | opcode argument
8# | | | |
9# C: lock: 0: 0
10#
11# Commands
12#
13# opcode opcode argument
14# schedother nice value
15# schedfifo priority
16# lock lock nr (0-7)
17# locknowait lock nr (0-7)
18# lockint lock nr (0-7)
19# lockintnowait lock nr (0-7)
20# lockcont lock nr (0-7)
21# unlock lock nr (0-7)
22# lockbkl lock nr (0-7)
23# unlockbkl lock nr (0-7)
24# signal 0
25# reset 0
26# resetevent 0
27#
28# Tests / Wait
29#
30# opcode opcode argument
31#
32# prioeq priority
33# priolt priority
34# priogt priority
35# nprioeq normal priority
36# npriolt normal priority
37# npriogt normal priority
38# locked lock nr (0-7)
39# blocked lock nr (0-7)
40# blockedwake lock nr (0-7)
41# unlocked lock nr (0-7)
42# lockedbkl dont care
43# blockedbkl dont care
44# unlockedbkl dont care
45# opcodeeq command opcode or number
46# opcodelt number
47# opcodegt number
48# eventeq number
49# eventgt number
50# eventlt number
51
52#
53# 2 threads 1 lock with priority inversion
54#
55C: resetevent: 0: 0
56W: opcodeeq: 0: 0
57
58# Set schedulers
59C: schedother: 0: 0
60C: schedfifo: 1: 80
61
62# T0 lock L0
63C: locknowait: 0: 0
64W: locked: 0: 0
65
66# T1 lock L0
67C: locknowait: 1: 0
68W: blocked: 1: 0
69T: prioeq: 0: 80
70
71# T0 unlock L0
72C: unlock: 0: 0
73W: locked: 1: 0
74
75# Verify T1
76W: unlocked: 0: 0
77T: priolt: 0: 1
78
79# Unlock and exit
80C: unlock: 1: 0
81W: unlocked: 1: 0
82
diff --git a/scripts/rt-tester/t2-l1-signal.tst b/scripts/rt-tester/t2-l1-signal.tst
new file mode 100644
index 0000000000..3ab0bfc499
--- /dev/null
+++ b/scripts/rt-tester/t2-l1-signal.tst
@@ -0,0 +1,77 @@
1#
2# RT-Mutex test
3#
4# Op: C(ommand)/T(est)/W(ait)
5# | opcode
6# | | threadid: 0-7
7# | | | opcode argument
8# | | | |
9# C: lock: 0: 0
10#
11# Commands
12#
13# opcode opcode argument
14# schedother nice value
15# schedfifo priority
16# lock lock nr (0-7)
17# locknowait lock nr (0-7)
18# lockint lock nr (0-7)
19# lockintnowait lock nr (0-7)
20# lockcont lock nr (0-7)
21# unlock lock nr (0-7)
22# lockbkl lock nr (0-7)
23# unlockbkl lock nr (0-7)
24# signal 0
25# reset 0
26# resetevent 0
27#
28# Tests / Wait
29#
30# opcode opcode argument
31#
32# prioeq priority
33# priolt priority
34# priogt priority
35# nprioeq normal priority
36# npriolt normal priority
37# npriogt normal priority
38# locked lock nr (0-7)
39# blocked lock nr (0-7)
40# blockedwake lock nr (0-7)
41# unlocked lock nr (0-7)
42# lockedbkl dont care
43# blockedbkl dont care
44# unlockedbkl dont care
45# opcodeeq command opcode or number
46# opcodelt number
47# opcodegt number
48# eventeq number
49# eventgt number
50# eventlt number
51
52#
53# 2 threads 1 lock with priority inversion
54#
55C: resetevent: 0: 0
56W: opcodeeq: 0: 0
57
58# Set schedulers
59C: schedother: 0: 0
60C: schedother: 1: 0
61
62# T0 lock L0
63C: locknowait: 0: 0
64W: locked: 0: 0
65
66# T1 lock L0
67C: lockintnowait: 1: 0
68W: blocked: 1: 0
69
70# Interrupt T1
71C: signal: 1: 0
72W: unlocked: 1: 0
73T: opcodeeq: 1: -4
74
75# Unlock and exit
76C: unlock: 0: 0
77W: unlocked: 0: 0
diff --git a/scripts/rt-tester/t2-l2-2rt-deadlock.tst b/scripts/rt-tester/t2-l2-2rt-deadlock.tst
new file mode 100644
index 0000000000..f4b5d5d621
--- /dev/null
+++ b/scripts/rt-tester/t2-l2-2rt-deadlock.tst
@@ -0,0 +1,89 @@
1#
2# RT-Mutex test
3#
4# Op: C(ommand)/T(est)/W(ait)
5# | opcode
6# | | threadid: 0-7
7# | | | opcode argument
8# | | | |
9# C: lock: 0: 0
10#
11# Commands
12#
13# opcode opcode argument
14# schedother nice value
15# schedfifo priority
16# lock lock nr (0-7)
17# locknowait lock nr (0-7)
18# lockint lock nr (0-7)
19# lockintnowait lock nr (0-7)
20# lockcont lock nr (0-7)
21# unlock lock nr (0-7)
22# lockbkl lock nr (0-7)
23# unlockbkl lock nr (0-7)
24# signal 0
25# reset 0
26# resetevent 0
27#
28# Tests / Wait
29#
30# opcode opcode argument
31#
32# prioeq priority
33# priolt priority
34# priogt priority
35# nprioeq normal priority
36# npriolt normal priority
37# npriogt normal priority
38# locked lock nr (0-7)
39# blocked lock nr (0-7)
40# blockedwake lock nr (0-7)
41# unlocked lock nr (0-7)
42# lockedbkl dont care
43# blockedbkl dont care
44# unlockedbkl dont care
45# opcodeeq command opcode or number
46# opcodelt number
47# opcodegt number
48# eventeq number
49# eventgt number
50# eventlt number
51
52#
53# 2 threads 2 lock
54#
55C: resetevent: 0: 0
56W: opcodeeq: 0: 0
57
58# Set schedulers
59C: schedfifo: 0: 80
60C: schedfifo: 1: 80
61
62# T0 lock L0
63C: locknowait: 0: 0
64W: locked: 0: 0
65
66# T1 lock L1
67C: locknowait: 1: 1
68W: locked: 1: 1
69
70# T0 lock L1
71C: lockintnowait: 0: 1
72W: blocked: 0: 1
73
74# T1 lock L0
75C: lockintnowait: 1: 0
76W: blocked: 1: 0
77
78# Make deadlock go away
79C: signal: 1: 0
80W: unlocked: 1: 0
81C: signal: 0: 0
82W: unlocked: 0: 1
83
84# Unlock and exit
85C: unlock: 0: 0
86W: unlocked: 0: 0
87C: unlock: 1: 1
88W: unlocked: 1: 1
89
diff --git a/scripts/rt-tester/t3-l1-pi-1rt.tst b/scripts/rt-tester/t3-l1-pi-1rt.tst
new file mode 100644
index 0000000000..63440ca2cc
--- /dev/null
+++ b/scripts/rt-tester/t3-l1-pi-1rt.tst
@@ -0,0 +1,92 @@
1#
2# rt-mutex test
3#
4# Op: C(ommand)/T(est)/W(ait)
5# | opcode
6# | | threadid: 0-7
7# | | | opcode argument
8# | | | |
9# C: lock: 0: 0
10#
11# Commands
12#
13# opcode opcode argument
14# schedother nice value
15# schedfifo priority
16# lock lock nr (0-7)
17# locknowait lock nr (0-7)
18# lockint lock nr (0-7)
19# lockintnowait lock nr (0-7)
20# lockcont lock nr (0-7)
21# unlock lock nr (0-7)
22# lockbkl lock nr (0-7)
23# unlockbkl lock nr (0-7)
24# signal thread to signal (0-7)
25# reset 0
26# resetevent 0
27#
28# Tests / Wait
29#
30# opcode opcode argument
31#
32# prioeq priority
33# priolt priority
34# priogt priority
35# nprioeq normal priority
36# npriolt normal priority
37# npriogt normal priority
38# locked lock nr (0-7)
39# blocked lock nr (0-7)
40# blockedwake lock nr (0-7)
41# unlocked lock nr (0-7)
42# lockedbkl dont care
43# blockedbkl dont care
44# unlockedbkl dont care
45# opcodeeq command opcode or number
46# opcodelt number
47# opcodegt number
48# eventeq number
49# eventgt number
50# eventlt number
51
52#
53# 3 threads 1 lock PI
54#
55C: resetevent: 0: 0
56W: opcodeeq: 0: 0
57
58# Set schedulers
59C: schedother: 0: 0
60C: schedother: 1: 0
61C: schedfifo: 2: 82
62
63# T0 lock L0
64C: locknowait: 0: 0
65W: locked: 0: 0
66
67# T1 lock L0
68C: locknowait: 1: 0
69W: blocked: 1: 0
70T: priolt: 0: 1
71
72# T2 lock L0
73C: locknowait: 2: 0
74W: blocked: 2: 0
75T: prioeq: 0: 82
76
77# T0 unlock L0
78C: unlock: 0: 0
79
80# Wait until T2 got the lock
81W: locked: 2: 0
82W: unlocked: 0: 0
83T: priolt: 0: 1
84
85# T2 unlock L0
86C: unlock: 2: 0
87
88W: unlocked: 2: 0
89W: locked: 1: 0
90
91C: unlock: 1: 0
92W: unlocked: 1: 0
diff --git a/scripts/rt-tester/t3-l1-pi-2rt.tst b/scripts/rt-tester/t3-l1-pi-2rt.tst
new file mode 100644
index 0000000000..e5816fe67d
--- /dev/null
+++ b/scripts/rt-tester/t3-l1-pi-2rt.tst
@@ -0,0 +1,93 @@
1#
2# rt-mutex test
3#
4# Op: C(ommand)/T(est)/W(ait)
5# | opcode
6# | | threadid: 0-7
7# | | | opcode argument
8# | | | |
9# C: lock: 0: 0
10#
11# Commands
12#
13# opcode opcode argument
14# schedother nice value
15# schedfifo priority
16# lock lock nr (0-7)
17# locknowait lock nr (0-7)
18# lockint lock nr (0-7)
19# lockintnowait lock nr (0-7)
20# lockcont lock nr (0-7)
21# unlock lock nr (0-7)
22# lockbkl lock nr (0-7)
23# unlockbkl lock nr (0-7)
24# signal thread to signal (0-7)
25# reset 0
26# resetevent 0
27#
28# Tests / Wait
29#
30# opcode opcode argument
31#
32# prioeq priority
33# priolt priority
34# priogt priority
35# nprioeq normal priority
36# npriolt normal priority
37# npriogt normal priority
38# locked lock nr (0-7)
39# blocked lock nr (0-7)
40# blockedwake lock nr (0-7)
41# unlocked lock nr (0-7)
42# lockedbkl dont care
43# blockedbkl dont care
44# unlockedbkl dont care
45# opcodeeq command opcode or number
46# opcodelt number
47# opcodegt number
48# eventeq number
49# eventgt number
50# eventlt number
51
52#
53# 3 threads 1 lock PI
54#
55C: resetevent: 0: 0
56W: opcodeeq: 0: 0
57
58# Set schedulers
59C: schedother: 0: 0
60C: schedfifo: 1: 81
61C: schedfifo: 2: 82
62
63# T0 lock L0
64C: locknowait: 0: 0
65W: locked: 0: 0
66
67# T1 lock L0
68C: locknowait: 1: 0
69W: blocked: 1: 0
70T: prioeq: 0: 81
71
72# T2 lock L0
73C: locknowait: 2: 0
74W: blocked: 2: 0
75T: prioeq: 0: 82
76T: prioeq: 1: 81
77
78# T0 unlock L0
79C: unlock: 0: 0
80
81# Wait until T2 got the lock
82W: locked: 2: 0
83W: unlocked: 0: 0
84T: priolt: 0: 1
85
86# T2 unlock L0
87C: unlock: 2: 0
88
89W: unlocked: 2: 0
90W: locked: 1: 0
91
92C: unlock: 1: 0
93W: unlocked: 1: 0
diff --git a/scripts/rt-tester/t3-l1-pi-3rt.tst b/scripts/rt-tester/t3-l1-pi-3rt.tst
new file mode 100644
index 0000000000..718b82b5d3
--- /dev/null
+++ b/scripts/rt-tester/t3-l1-pi-3rt.tst
@@ -0,0 +1,92 @@
1#
2# rt-mutex test
3#
4# Op: C(ommand)/T(est)/W(ait)
5# | opcode
6# | | threadid: 0-7
7# | | | opcode argument
8# | | | |
9# C: lock: 0: 0
10#
11# Commands
12#
13# opcode opcode argument
14# schedother nice value
15# schedfifo priority
16# lock lock nr (0-7)
17# locknowait lock nr (0-7)
18# lockint lock nr (0-7)
19# lockintnowait lock nr (0-7)
20# lockcont lock nr (0-7)
21# unlock lock nr (0-7)
22# lockbkl lock nr (0-7)
23# unlockbkl lock nr (0-7)
24# signal thread to signal (0-7)
25# reset 0
26# resetevent 0
27#
28# Tests / Wait
29#
30# opcode opcode argument
31#
32# prioeq priority
33# priolt priority
34# priogt priority
35# nprioeq normal priority
36# npriolt normal priority
37# npriogt normal priority
38# locked lock nr (0-7)
39# blocked lock nr (0-7)
40# blockedwake lock nr (0-7)
41# unlocked lock nr (0-7)
42# lockedbkl dont care
43# blockedbkl dont care
44# unlockedbkl dont care
45# opcodeeq command opcode or number
46# opcodelt number
47# opcodegt number
48# eventeq number
49# eventgt number
50# eventlt number
51
52#
53# 3 threads 1 lock PI
54#
55C: resetevent: 0: 0
56W: opcodeeq: 0: 0
57
58# Set schedulers
59C: schedfifo: 0: 80
60C: schedfifo: 1: 81
61C: schedfifo: 2: 82
62
63# T0 lock L0
64C: locknowait: 0: 0
65W: locked: 0: 0
66
67# T1 lock L0
68C: locknowait: 1: 0
69W: blocked: 1: 0
70T: prioeq: 0: 81
71
72# T2 lock L0
73C: locknowait: 2: 0
74W: blocked: 2: 0
75T: prioeq: 0: 82
76
77# T0 unlock L0
78C: unlock: 0: 0
79
80# Wait until T2 got the lock
81W: locked: 2: 0
82W: unlocked: 0: 0
83T: prioeq: 0: 80
84
85# T2 unlock L0
86C: unlock: 2: 0
87
88W: locked: 1: 0
89W: unlocked: 2: 0
90
91C: unlock: 1: 0
92W: unlocked: 1: 0
diff --git a/scripts/rt-tester/t3-l1-pi-signal.tst b/scripts/rt-tester/t3-l1-pi-signal.tst
new file mode 100644
index 0000000000..c6e2135634
--- /dev/null
+++ b/scripts/rt-tester/t3-l1-pi-signal.tst
@@ -0,0 +1,98 @@
1#
2# rt-mutex test
3#
4# Op: C(ommand)/T(est)/W(ait)
5# | opcode
6# | | threadid: 0-7
7# | | | opcode argument
8# | | | |
9# C: lock: 0: 0
10#
11# Commands
12#
13# opcode opcode argument
14# schedother nice value
15# schedfifo priority
16# lock lock nr (0-7)
17# locknowait lock nr (0-7)
18# lockint lock nr (0-7)
19# lockintnowait lock nr (0-7)
20# lockcont lock nr (0-7)
21# unlock lock nr (0-7)
22# lockbkl lock nr (0-7)
23# unlockbkl lock nr (0-7)
24# signal thread to signal (0-7)
25# reset 0
26# resetevent 0
27#
28# Tests / Wait
29#
30# opcode opcode argument
31#
32# prioeq priority
33# priolt priority
34# priogt priority
35# nprioeq normal priority
36# npriolt normal priority
37# npriogt normal priority
38# locked lock nr (0-7)
39# blocked lock nr (0-7)
40# blockedwake lock nr (0-7)
41# unlocked lock nr (0-7)
42# lockedbkl dont care
43# blockedbkl dont care
44# unlockedbkl dont care
45# opcodeeq command opcode or number
46# opcodelt number
47# opcodegt number
48# eventeq number
49# eventgt number
50# eventlt number
51
52# Reset event counter
53C: resetevent: 0: 0
54W: opcodeeq: 0: 0
55
56# Set priorities
57C: schedother: 0: 0
58C: schedfifo: 1: 80
59C: schedfifo: 2: 81
60
61# T0 lock L0
62C: lock: 0: 0
63W: locked: 0: 0
64
65# T1 lock L0, no wait in the wakeup path
66C: locknowait: 1: 0
67W: blocked: 1: 0
68T: prioeq: 0: 80
69T: prioeq: 1: 80
70
71# T2 lock L0 interruptible, no wait in the wakeup path
72C: lockintnowait: 2: 0
73W: blocked: 2: 0
74T: prioeq: 0: 81
75T: prioeq: 1: 80
76
77# Interrupt T2
78C: signal: 2: 2
79W: unlocked: 2: 0
80T: prioeq: 1: 80
81T: prioeq: 0: 80
82
83T: locked: 0: 0
84T: blocked: 1: 0
85
86# T0 unlock L0
87C: unlock: 0: 0
88
89# Wait until T1 has locked L0 and exit
90W: locked: 1: 0
91W: unlocked: 0: 0
92T: priolt: 0: 1
93
94C: unlock: 1: 0
95W: unlocked: 1: 0
96
97
98
diff --git a/scripts/rt-tester/t3-l1-pi-steal.tst b/scripts/rt-tester/t3-l1-pi-steal.tst
new file mode 100644
index 0000000000..f53749d59d
--- /dev/null
+++ b/scripts/rt-tester/t3-l1-pi-steal.tst
@@ -0,0 +1,96 @@
1#
2# rt-mutex test
3#
4# Op: C(ommand)/T(est)/W(ait)
5# | opcode
6# | | threadid: 0-7
7# | | | opcode argument
8# | | | |
9# C: lock: 0: 0
10#
11# Commands
12#
13# opcode opcode argument
14# schedother nice value
15# schedfifo priority
16# lock lock nr (0-7)
17# locknowait lock nr (0-7)
18# lockint lock nr (0-7)
19# lockintnowait lock nr (0-7)
20# lockcont lock nr (0-7)
21# unlock lock nr (0-7)
22# lockbkl lock nr (0-7)
23# unlockbkl lock nr (0-7)
24# signal thread to signal (0-7)
25# reset 0
26# resetevent 0
27#
28# Tests / Wait
29#
30# opcode opcode argument
31#
32# prioeq priority
33# priolt priority
34# priogt priority
35# nprioeq normal priority
36# npriolt normal priority
37# npriogt normal priority
38# locked lock nr (0-7)
39# blocked lock nr (0-7)
40# blockedwake lock nr (0-7)
41# unlocked lock nr (0-7)
42# lockedbkl dont care
43# blockedbkl dont care
44# unlockedbkl dont care
45# opcodeeq command opcode or number
46# opcodelt number
47# opcodegt number
48# eventeq number
49# eventgt number
50# eventlt number
51
52#
53# 3 threads 1 lock PI steal pending ownership
54#
55C: resetevent: 0: 0
56W: opcodeeq: 0: 0
57
58# Set schedulers
59C: schedother: 0: 0
60C: schedfifo: 1: 80
61C: schedfifo: 2: 81
62
63# T0 lock L0
64C: lock: 0: 0
65W: locked: 0: 0
66
67# T1 lock L0
68C: lock: 1: 0
69W: blocked: 1: 0
70T: prioeq: 0: 80
71
72# T0 unlock L0
73C: unlock: 0: 0
74
75# Wait until T1 is in the wakeup loop
76W: blockedwake: 1: 0
77T: priolt: 0: 1
78
79# T2 lock L0
80C: lock: 2: 0
81# T1 leave wakeup loop
82C: lockcont: 1: 0
83
84# T2 must have the lock and T1 must be blocked
85W: locked: 2: 0
86W: blocked: 1: 0
87
88# T2 unlock L0
89C: unlock: 2: 0
90
91# Wait until T1 is in the wakeup loop and let it run
92W: blockedwake: 1: 0
93C: lockcont: 1: 0
94W: locked: 1: 0
95C: unlock: 1: 0
96W: unlocked: 1: 0
diff --git a/scripts/rt-tester/t3-l2-pi.tst b/scripts/rt-tester/t3-l2-pi.tst
new file mode 100644
index 0000000000..cdc3e4fd7b
--- /dev/null
+++ b/scripts/rt-tester/t3-l2-pi.tst
@@ -0,0 +1,92 @@
1#
2# rt-mutex test
3#
4# Op: C(ommand)/T(est)/W(ait)
5# | opcode
6# | | threadid: 0-7
7# | | | opcode argument
8# | | | |
9# C: lock: 0: 0
10#
11# Commands
12#
13# opcode opcode argument
14# schedother nice value
15# schedfifo priority
16# lock lock nr (0-7)
17# locknowait lock nr (0-7)
18# lockint lock nr (0-7)
19# lockintnowait lock nr (0-7)
20# lockcont lock nr (0-7)
21# unlock lock nr (0-7)
22# lockbkl lock nr (0-7)
23# unlockbkl lock nr (0-7)
24# signal thread to signal (0-7)
25# reset 0
26# resetevent 0
27#
28# Tests / Wait
29#
30# opcode opcode argument
31#
32# prioeq priority
33# priolt priority
34# priogt priority
35# nprioeq normal priority
36# npriolt normal priority
37# npriogt normal priority
38# locked lock nr (0-7)
39# blocked lock nr (0-7)
40# blockedwake lock nr (0-7)
41# unlocked lock nr (0-7)
42# lockedbkl dont care
43# blockedbkl dont care
44# unlockedbkl dont care
45# opcodeeq command opcode or number
46# opcodelt number
47# opcodegt number
48# eventeq number
49# eventgt number
50# eventlt number
51
52#
53# 3 threads 2 lock PI
54#
55C: resetevent: 0: 0
56W: opcodeeq: 0: 0
57
58# Set schedulers
59C: schedother: 0: 0
60C: schedother: 1: 0
61C: schedfifo: 2: 82
62
63# T0 lock L0
64C: locknowait: 0: 0
65W: locked: 0: 0
66
67# T1 lock L0
68C: locknowait: 1: 0
69W: blocked: 1: 0
70T: priolt: 0: 1
71
72# T2 lock L0
73C: locknowait: 2: 0
74W: blocked: 2: 0
75T: prioeq: 0: 82
76
77# T0 unlock L0
78C: unlock: 0: 0
79
80# Wait until T2 got the lock
81W: locked: 2: 0
82W: unlocked: 0: 0
83T: priolt: 0: 1
84
85# T2 unlock L0
86C: unlock: 2: 0
87
88W: unlocked: 2: 0
89W: locked: 1: 0
90
91C: unlock: 1: 0
92W: unlocked: 1: 0
diff --git a/scripts/rt-tester/t4-l2-pi-deboost.tst b/scripts/rt-tester/t4-l2-pi-deboost.tst
new file mode 100644
index 0000000000..baa14137f4
--- /dev/null
+++ b/scripts/rt-tester/t4-l2-pi-deboost.tst
@@ -0,0 +1,123 @@
1#
2# rt-mutex test
3#
4# Op: C(ommand)/T(est)/W(ait)
5# | opcode
6# | | threadid: 0-7
7# | | | opcode argument
8# | | | |
9# C: lock: 0: 0
10#
11# Commands
12#
13# opcode opcode argument
14# schedother nice value
15# schedfifo priority
16# lock lock nr (0-7)
17# locknowait lock nr (0-7)
18# lockint lock nr (0-7)
19# lockintnowait lock nr (0-7)
20# lockcont lock nr (0-7)
21# unlock lock nr (0-7)
22# lockbkl lock nr (0-7)
23# unlockbkl lock nr (0-7)
24# signal thread to signal (0-7)
25# reset 0
26# resetevent 0
27#
28# Tests / Wait
29#
30# opcode opcode argument
31#
32# prioeq priority
33# priolt priority
34# priogt priority
35# nprioeq normal priority
36# npriolt normal priority
37# npriogt normal priority
38# locked lock nr (0-7)
39# blocked lock nr (0-7)
40# blockedwake lock nr (0-7)
41# unlocked lock nr (0-7)
42# lockedbkl dont care
43# blockedbkl dont care
44# unlockedbkl dont care
45# opcodeeq command opcode or number
46# opcodelt number
47# opcodegt number
48# eventeq number
49# eventgt number
50# eventlt number
51
52#
53# 4 threads 2 lock PI
54#
55C: resetevent: 0: 0
56W: opcodeeq: 0: 0
57
58# Set schedulers
59C: schedother: 0: 0
60C: schedother: 1: 0
61C: schedfifo: 2: 82
62C: schedfifo: 3: 83
63
64# T0 lock L0
65C: locknowait: 0: 0
66W: locked: 0: 0
67
68# T1 lock L1
69C: locknowait: 1: 1
70W: locked: 1: 1
71
72# T3 lock L0
73C: lockintnowait: 3: 0
74W: blocked: 3: 0
75T: prioeq: 0: 83
76
77# T0 lock L1
78C: lock: 0: 1
79W: blocked: 0: 1
80T: prioeq: 1: 83
81
82# T1 unlock L1
83C: unlock: 1: 1
84
85# Wait until T0 is in the wakeup code
86W: blockedwake: 0: 1
87
88# Verify that T1 is unboosted
89W: unlocked: 1: 1
90T: priolt: 1: 1
91
92# T2 lock L1 (T0 is boosted and pending owner !)
93C: locknowait: 2: 1
94W: blocked: 2: 1
95T: prioeq: 0: 83
96
97# Interrupt T3 and wait until T3 returned
98C: signal: 3: 0
99W: unlocked: 3: 0
100
101# Verify prio of T0 (still pending owner,
102# but T2 is enqueued due to the previous boost by T3
103T: prioeq: 0: 82
104
105# Let T0 continue
106C: lockcont: 0: 1
107W: locked: 0: 1
108
109# Unlock L1 and let T2 get L1
110C: unlock: 0: 1
111W: locked: 2: 1
112
113# Verify that T0 is unboosted
114W: unlocked: 0: 1
115T: priolt: 0: 1
116
117# Unlock everything and exit
118C: unlock: 2: 1
119W: unlocked: 2: 1
120
121C: unlock: 0: 0
122W: unlocked: 0: 0
123
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
new file mode 100644
index 0000000000..e6ec0c81b5
--- /dev/null
+++ b/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
@@ -0,0 +1,183 @@
1#
2# rt-mutex test
3#
4# Op: C(ommand)/T(est)/W(ait)
5# | opcode
6# | | threadid: 0-7
7# | | | opcode argument
8# | | | |
9# C: lock: 0: 0
10#
11# Commands
12#
13# opcode opcode argument
14# schedother nice value
15# schedfifo priority
16# lock lock nr (0-7)
17# locknowait lock nr (0-7)
18# lockint lock nr (0-7)
19# lockintnowait lock nr (0-7)
20# lockcont lock nr (0-7)
21# unlock lock nr (0-7)
22# lockbkl lock nr (0-7)
23# unlockbkl lock nr (0-7)
24# signal thread to signal (0-7)
25# reset 0
26# resetevent 0
27#
28# Tests / Wait
29#
30# opcode opcode argument
31#
32# prioeq priority
33# priolt priority
34# priogt priority
35# nprioeq normal priority
36# npriolt normal priority
37# npriogt normal priority
38# locked lock nr (0-7)
39# blocked lock nr (0-7)
40# blockedwake lock nr (0-7)
41# unlocked lock nr (0-7)
42# lockedbkl dont care
43# blockedbkl dont care
44# unlockedbkl dont care
45# opcodeeq command opcode or number
46# opcodelt number
47# opcodegt number
48# eventeq number
49# eventgt number
50# eventlt number
51
52#
53# 5 threads 4 lock PI - modify priority of blocked threads
54#
55C: resetevent: 0: 0
56W: opcodeeq: 0: 0
57
58# Set schedulers
59C: schedother: 0: 0
60C: schedfifo: 1: 81
61C: schedfifo: 2: 82
62C: schedfifo: 3: 83
63C: schedfifo: 4: 84
64
65# T0 lock L0
66C: locknowait: 0: 0
67W: locked: 0: 0
68
69# T1 lock L1
70C: locknowait: 1: 1
71W: locked: 1: 1
72
73# T1 lock L0
74C: lockintnowait: 1: 0
75W: blocked: 1: 0
76T: prioeq: 0: 81
77
78# T2 lock L2
79C: locknowait: 2: 2
80W: locked: 2: 2
81
82# T2 lock L1
83C: lockintnowait: 2: 1
84W: blocked: 2: 1
85T: prioeq: 0: 82
86T: prioeq: 1: 82
87
88# T3 lock L3
89C: locknowait: 3: 3
90W: locked: 3: 3
91
92# T3 lock L2
93C: lockintnowait: 3: 2
94W: blocked: 3: 2
95T: prioeq: 0: 83
96T: prioeq: 1: 83
97T: prioeq: 2: 83
98
99# T4 lock L3
100C: lockintnowait: 4: 3
101W: blocked: 4: 3
102T: prioeq: 0: 84
103T: prioeq: 1: 84
104T: prioeq: 2: 84
105T: prioeq: 3: 84
106
107# Reduce prio of T4
108C: schedfifo: 4: 80
109T: prioeq: 0: 83
110T: prioeq: 1: 83
111T: prioeq: 2: 83
112T: prioeq: 3: 83
113T: prioeq: 4: 80
114
115# Increase prio of T4
116C: schedfifo: 4: 84
117T: prioeq: 0: 84
118T: prioeq: 1: 84
119T: prioeq: 2: 84
120T: prioeq: 3: 84
121T: prioeq: 4: 84
122
123# Reduce prio of T3
124C: schedfifo: 3: 80
125T: prioeq: 0: 84
126T: prioeq: 1: 84
127T: prioeq: 2: 84
128T: prioeq: 3: 84
129T: prioeq: 4: 84
130
131# Increase prio of T3
132C: schedfifo: 3: 85
133T: prioeq: 0: 85
134T: prioeq: 1: 85
135T: prioeq: 2: 85
136T: prioeq: 3: 85
137T: prioeq: 4: 84
138
139# Reduce prio of T3
140C: schedfifo: 3: 83
141T: prioeq: 0: 84
142T: prioeq: 1: 84
143T: prioeq: 2: 84
144T: prioeq: 3: 84
145T: prioeq: 4: 84
146
147# Signal T4
148C: signal: 4: 0
149W: unlocked: 4: 3
150T: prioeq: 0: 83
151T: prioeq: 1: 83
152T: prioeq: 2: 83
153T: prioeq: 3: 83
154
155# Signal T3
156C: signal: 3: 0
157W: unlocked: 3: 2
158T: prioeq: 0: 82
159T: prioeq: 1: 82
160T: prioeq: 2: 82
161
162# Signal T2
163C: signal: 2: 0
164W: unlocked: 2: 1
165T: prioeq: 0: 81
166T: prioeq: 1: 81
167
168# Signal T1
169C: signal: 1: 0
170W: unlocked: 1: 0
171T: priolt: 0: 1
172
173# Unlock and exit
174C: unlock: 3: 3
175C: unlock: 2: 2
176C: unlock: 1: 1
177C: unlock: 0: 0
178
179W: unlocked: 3: 3
180W: unlocked: 2: 2
181W: unlocked: 1: 1
182W: unlocked: 0: 0
183
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost.tst
new file mode 100644
index 0000000000..ca64f8bbf4
--- /dev/null
+++ b/scripts/rt-tester/t5-l4-pi-boost-deboost.tst
@@ -0,0 +1,143 @@
1#
2# rt-mutex test
3#
4# Op: C(ommand)/T(est)/W(ait)
5# | opcode
6# | | threadid: 0-7
7# | | | opcode argument
8# | | | |
9# C: lock: 0: 0
10#
11# Commands
12#
13# opcode opcode argument
14# schedother nice value
15# schedfifo priority
16# lock lock nr (0-7)
17# locknowait lock nr (0-7)
18# lockint lock nr (0-7)
19# lockintnowait lock nr (0-7)
20# lockcont lock nr (0-7)
21# unlock lock nr (0-7)
22# lockbkl lock nr (0-7)
23# unlockbkl lock nr (0-7)
24# signal thread to signal (0-7)
25# reset 0
26# resetevent 0
27#
28# Tests / Wait
29#
30# opcode opcode argument
31#
32# prioeq priority
33# priolt priority
34# priogt priority
35# nprioeq normal priority
36# npriolt normal priority
37# npriogt normal priority
38# locked lock nr (0-7)
39# blocked lock nr (0-7)
40# blockedwake lock nr (0-7)
41# unlocked lock nr (0-7)
42# lockedbkl dont care
43# blockedbkl dont care
44# unlockedbkl dont care
45# opcodeeq command opcode or number
46# opcodelt number
47# opcodegt number
48# eventeq number
49# eventgt number
50# eventlt number
51
52#
53# 5 threads 4 lock PI
54#
55C: resetevent: 0: 0
56W: opcodeeq: 0: 0
57
58# Set schedulers
59C: schedother: 0: 0
60C: schedfifo: 1: 81
61C: schedfifo: 2: 82
62C: schedfifo: 3: 83
63C: schedfifo: 4: 84
64
65# T0 lock L0
66C: locknowait: 0: 0
67W: locked: 0: 0
68
69# T1 lock L1
70C: locknowait: 1: 1
71W: locked: 1: 1
72
73# T1 lock L0
74C: lockintnowait: 1: 0
75W: blocked: 1: 0
76T: prioeq: 0: 81
77
78# T2 lock L2
79C: locknowait: 2: 2
80W: locked: 2: 2
81
82# T2 lock L1
83C: lockintnowait: 2: 1
84W: blocked: 2: 1
85T: prioeq: 0: 82
86T: prioeq: 1: 82
87
88# T3 lock L3
89C: locknowait: 3: 3
90W: locked: 3: 3
91
92# T3 lock L2
93C: lockintnowait: 3: 2
94W: blocked: 3: 2
95T: prioeq: 0: 83
96T: prioeq: 1: 83
97T: prioeq: 2: 83
98
99# T4 lock L3
100C: lockintnowait: 4: 3
101W: blocked: 4: 3
102T: prioeq: 0: 84
103T: prioeq: 1: 84
104T: prioeq: 2: 84
105T: prioeq: 3: 84
106
107# Signal T4
108C: signal: 4: 0
109W: unlocked: 4: 3
110T: prioeq: 0: 83
111T: prioeq: 1: 83
112T: prioeq: 2: 83
113T: prioeq: 3: 83
114
115# Signal T3
116C: signal: 3: 0
117W: unlocked: 3: 2
118T: prioeq: 0: 82
119T: prioeq: 1: 82
120T: prioeq: 2: 82
121
122# Signal T2
123C: signal: 2: 0
124W: unlocked: 2: 1
125T: prioeq: 0: 81
126T: prioeq: 1: 81
127
128# Signal T1
129C: signal: 1: 0
130W: unlocked: 1: 0
131T: priolt: 0: 1
132
133# Unlock and exit
134C: unlock: 3: 3
135C: unlock: 2: 2
136C: unlock: 1: 1
137C: unlock: 0: 0
138
139W: unlocked: 3: 3
140W: unlocked: 2: 2
141W: unlocked: 1: 1
142W: unlocked: 0: 0
143
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 9a23825218..82e4993f0a 100644
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -11,12 +11,12 @@ cd "${1:-.}" || usage
11# Check for git and a git repo. 11# Check for git and a git repo.
12if head=`git rev-parse --verify HEAD 2>/dev/null`; then 12if head=`git rev-parse --verify HEAD 2>/dev/null`; then
13 # Do we have an untagged version? 13 # Do we have an untagged version?
14 if [ "`git name-rev --tags HEAD`" = "HEAD undefined" ]; then 14 if git name-rev --tags HEAD | grep -E '^HEAD[[:space:]]+(.*~[0-9]*|undefined)$' > /dev/null; then
15 printf '%s%s' -g `echo "$head" | cut -c1-8` 15 printf '%s%s' -g `echo "$head" | cut -c1-8`
16 fi 16 fi
17 17
18 # Are there uncommitted changes? 18 # Are there uncommitted changes?
19 if git diff-files | read dummy; then 19 if git diff-index HEAD | read dummy; then
20 printf '%s' -dirty 20 printf '%s' -dirty
21 fi 21 fi
22fi 22fi
diff --git a/security/Kconfig b/security/Kconfig
index 34f593410d..67785df264 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -22,16 +22,22 @@ config KEYS
22 If you are unsure as to whether this is required, answer N. 22 If you are unsure as to whether this is required, answer N.
23 23
24config KEYS_DEBUG_PROC_KEYS 24config KEYS_DEBUG_PROC_KEYS
25 bool "Enable the /proc/keys file by which all keys may be viewed" 25 bool "Enable the /proc/keys file by which keys may be viewed"
26 depends on KEYS 26 depends on KEYS
27 help 27 help
28 This option turns on support for the /proc/keys file through which 28 This option turns on support for the /proc/keys file - through which
29 all the keys on the system can be listed. 29 can be listed all the keys on the system that are viewable by the
30 reading process.
30 31
31 This option is a slight security risk in that it makes it possible 32 The only keys included in the list are those that grant View
32 for anyone to see all the keys on the system. Normally the manager 33 permission to the reading process whether or not it possesses them.
33 pretends keys that are inaccessible to a process don't exist as far 34 Note that LSM security checks are still performed, and may further
34 as that process is concerned. 35 filter out keys that the current process is not authorised to view.
36
37 Only key attributes are listed here; key payloads are not included in
38 the resulting table.
39
40 If you are unsure as to whether this is required, answer N.
35 41
36config SECURITY 42config SECURITY
37 bool "Enable different security models" 43 bool "Enable different security models"
diff --git a/security/dummy.c b/security/dummy.c
index c3c5493581..310fcdf7b7 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -870,7 +870,8 @@ static int dummy_setprocattr(struct task_struct *p, char *name, void *value, siz
870} 870}
871 871
872#ifdef CONFIG_KEYS 872#ifdef CONFIG_KEYS
873static inline int dummy_key_alloc(struct key *key, struct task_struct *ctx) 873static inline int dummy_key_alloc(struct key *key, struct task_struct *ctx,
874 unsigned long flags)
874{ 875{
875 return 0; 876 return 0;
876} 877}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index e066e60579..3c2877f066 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -99,7 +99,8 @@ extern int install_process_keyring(struct task_struct *tsk);
99extern struct key *request_key_and_link(struct key_type *type, 99extern struct key *request_key_and_link(struct key_type *type,
100 const char *description, 100 const char *description,
101 const char *callout_info, 101 const char *callout_info,
102 struct key *dest_keyring); 102 struct key *dest_keyring,
103 unsigned long flags);
103 104
104/* 105/*
105 * request_key authorisation 106 * request_key authorisation
diff --git a/security/keys/key.c b/security/keys/key.c
index 51f8515573..80de8c3e9c 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -11,15 +11,16 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/poison.h>
14#include <linux/sched.h> 15#include <linux/sched.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <linux/security.h> 17#include <linux/security.h>
17#include <linux/workqueue.h> 18#include <linux/workqueue.h>
19#include <linux/random.h>
18#include <linux/err.h> 20#include <linux/err.h>
19#include "internal.h" 21#include "internal.h"
20 22
21static kmem_cache_t *key_jar; 23static kmem_cache_t *key_jar;
22static key_serial_t key_serial_next = 3;
23struct rb_root key_serial_tree; /* tree of keys indexed by serial */ 24struct rb_root key_serial_tree; /* tree of keys indexed by serial */
24DEFINE_SPINLOCK(key_serial_lock); 25DEFINE_SPINLOCK(key_serial_lock);
25 26
@@ -169,22 +170,23 @@ static void __init __key_insert_serial(struct key *key)
169/*****************************************************************************/ 170/*****************************************************************************/
170/* 171/*
171 * assign a key the next unique serial number 172 * assign a key the next unique serial number
172 * - we work through all the serial numbers between 2 and 2^31-1 in turn and 173 * - these are assigned randomly to avoid security issues through covert
173 * then wrap 174 * channel problems
174 */ 175 */
175static inline void key_alloc_serial(struct key *key) 176static inline void key_alloc_serial(struct key *key)
176{ 177{
177 struct rb_node *parent, **p; 178 struct rb_node *parent, **p;
178 struct key *xkey; 179 struct key *xkey;
179 180
180 spin_lock(&key_serial_lock); 181 /* propose a random serial number and look for a hole for it in the
181
182 /* propose a likely serial number and look for a hole for it in the
183 * serial number tree */ 182 * serial number tree */
184 key->serial = key_serial_next; 183 do {
185 if (key->serial < 3) 184 get_random_bytes(&key->serial, sizeof(key->serial));
186 key->serial = 3; 185
187 key_serial_next = key->serial + 1; 186 key->serial >>= 1; /* negative numbers are not permitted */
187 } while (key->serial < 3);
188
189 spin_lock(&key_serial_lock);
188 190
189 parent = NULL; 191 parent = NULL;
190 p = &key_serial_tree.rb_node; 192 p = &key_serial_tree.rb_node;
@@ -204,12 +206,11 @@ static inline void key_alloc_serial(struct key *key)
204 206
205 /* we found a key with the proposed serial number - walk the tree from 207 /* we found a key with the proposed serial number - walk the tree from
206 * that point looking for the next unused serial number */ 208 * that point looking for the next unused serial number */
207 serial_exists: 209serial_exists:
208 for (;;) { 210 for (;;) {
209 key->serial = key_serial_next; 211 key->serial++;
210 if (key->serial < 2) 212 if (key->serial < 2)
211 key->serial = 2; 213 key->serial = 2;
212 key_serial_next = key->serial + 1;
213 214
214 if (!rb_parent(parent)) 215 if (!rb_parent(parent))
215 p = &key_serial_tree.rb_node; 216 p = &key_serial_tree.rb_node;
@@ -228,7 +229,7 @@ static inline void key_alloc_serial(struct key *key)
228 } 229 }
229 230
230 /* we've found a suitable hole - arrange for this key to occupy it */ 231 /* we've found a suitable hole - arrange for this key to occupy it */
231 insert_here: 232insert_here:
232 rb_link_node(&key->serial_node, parent, p); 233 rb_link_node(&key->serial_node, parent, p);
233 rb_insert_color(&key->serial_node, &key_serial_tree); 234 rb_insert_color(&key->serial_node, &key_serial_tree);
234 235
@@ -248,7 +249,7 @@ static inline void key_alloc_serial(struct key *key)
248 */ 249 */
249struct key *key_alloc(struct key_type *type, const char *desc, 250struct key *key_alloc(struct key_type *type, const char *desc,
250 uid_t uid, gid_t gid, struct task_struct *ctx, 251 uid_t uid, gid_t gid, struct task_struct *ctx,
251 key_perm_t perm, int not_in_quota) 252 key_perm_t perm, unsigned long flags)
252{ 253{
253 struct key_user *user = NULL; 254 struct key_user *user = NULL;
254 struct key *key; 255 struct key *key;
@@ -269,12 +270,14 @@ struct key *key_alloc(struct key_type *type, const char *desc,
269 270
270 /* check that the user's quota permits allocation of another key and 271 /* check that the user's quota permits allocation of another key and
271 * its description */ 272 * its description */
272 if (!not_in_quota) { 273 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
273 spin_lock(&user->lock); 274 spin_lock(&user->lock);
274 if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS || 275 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
275 user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES 276 if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS ||
276 ) 277 user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES
277 goto no_quota; 278 )
279 goto no_quota;
280 }
278 281
279 user->qnkeys++; 282 user->qnkeys++;
280 user->qnbytes += quotalen; 283 user->qnbytes += quotalen;
@@ -308,7 +311,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
308 key->payload.data = NULL; 311 key->payload.data = NULL;
309 key->security = NULL; 312 key->security = NULL;
310 313
311 if (!not_in_quota) 314 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
312 key->flags |= 1 << KEY_FLAG_IN_QUOTA; 315 key->flags |= 1 << KEY_FLAG_IN_QUOTA;
313 316
314 memset(&key->type_data, 0, sizeof(key->type_data)); 317 memset(&key->type_data, 0, sizeof(key->type_data));
@@ -318,7 +321,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
318#endif 321#endif
319 322
320 /* let the security module know about the key */ 323 /* let the security module know about the key */
321 ret = security_key_alloc(key, ctx); 324 ret = security_key_alloc(key, ctx, flags);
322 if (ret < 0) 325 if (ret < 0)
323 goto security_error; 326 goto security_error;
324 327
@@ -332,7 +335,7 @@ error:
332security_error: 335security_error:
333 kfree(key->description); 336 kfree(key->description);
334 kmem_cache_free(key_jar, key); 337 kmem_cache_free(key_jar, key);
335 if (!not_in_quota) { 338 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
336 spin_lock(&user->lock); 339 spin_lock(&user->lock);
337 user->qnkeys--; 340 user->qnkeys--;
338 user->qnbytes -= quotalen; 341 user->qnbytes -= quotalen;
@@ -345,7 +348,7 @@ security_error:
345no_memory_3: 348no_memory_3:
346 kmem_cache_free(key_jar, key); 349 kmem_cache_free(key_jar, key);
347no_memory_2: 350no_memory_2:
348 if (!not_in_quota) { 351 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
349 spin_lock(&user->lock); 352 spin_lock(&user->lock);
350 user->qnkeys--; 353 user->qnkeys--;
351 user->qnbytes -= quotalen; 354 user->qnbytes -= quotalen;
@@ -761,7 +764,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
761 const char *description, 764 const char *description,
762 const void *payload, 765 const void *payload,
763 size_t plen, 766 size_t plen,
764 int not_in_quota) 767 unsigned long flags)
765{ 768{
766 struct key_type *ktype; 769 struct key_type *ktype;
767 struct key *keyring, *key = NULL; 770 struct key *keyring, *key = NULL;
@@ -822,7 +825,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
822 825
823 /* allocate a new key */ 826 /* allocate a new key */
824 key = key_alloc(ktype, description, current->fsuid, current->fsgid, 827 key = key_alloc(ktype, description, current->fsuid, current->fsgid,
825 current, perm, not_in_quota); 828 current, perm, flags);
826 if (IS_ERR(key)) { 829 if (IS_ERR(key)) {
827 key_ref = ERR_PTR(PTR_ERR(key)); 830 key_ref = ERR_PTR(PTR_ERR(key));
828 goto error_3; 831 goto error_3;
@@ -986,7 +989,7 @@ void unregister_key_type(struct key_type *ktype)
986 if (key->type == ktype) { 989 if (key->type == ktype) {
987 if (ktype->destroy) 990 if (ktype->destroy)
988 ktype->destroy(key); 991 ktype->destroy(key);
989 memset(&key->payload, 0xbd, sizeof(key->payload)); 992 memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
990 } 993 }
991 } 994 }
992 995
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index ed71d86d2c..329411cf87 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -102,7 +102,7 @@ asmlinkage long sys_add_key(const char __user *_type,
102 /* create or update the requested key and add it to the target 102 /* create or update the requested key and add it to the target
103 * keyring */ 103 * keyring */
104 key_ref = key_create_or_update(keyring_ref, type, description, 104 key_ref = key_create_or_update(keyring_ref, type, description,
105 payload, plen, 0); 105 payload, plen, KEY_ALLOC_IN_QUOTA);
106 if (!IS_ERR(key_ref)) { 106 if (!IS_ERR(key_ref)) {
107 ret = key_ref_to_ptr(key_ref)->serial; 107 ret = key_ref_to_ptr(key_ref)->serial;
108 key_ref_put(key_ref); 108 key_ref_put(key_ref);
@@ -184,7 +184,8 @@ asmlinkage long sys_request_key(const char __user *_type,
184 184
185 /* do the search */ 185 /* do the search */
186 key = request_key_and_link(ktype, description, callout_info, 186 key = request_key_and_link(ktype, description, callout_info,
187 key_ref_to_ptr(dest_ref)); 187 key_ref_to_ptr(dest_ref),
188 KEY_ALLOC_IN_QUOTA);
188 if (IS_ERR(key)) { 189 if (IS_ERR(key)) {
189 ret = PTR_ERR(key); 190 ret = PTR_ERR(key);
190 goto error5; 191 goto error5;
@@ -672,6 +673,7 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
672 */ 673 */
673long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid) 674long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
674{ 675{
676 struct key_user *newowner, *zapowner = NULL;
675 struct key *key; 677 struct key *key;
676 key_ref_t key_ref; 678 key_ref_t key_ref;
677 long ret; 679 long ret;
@@ -695,19 +697,50 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
695 if (!capable(CAP_SYS_ADMIN)) { 697 if (!capable(CAP_SYS_ADMIN)) {
696 /* only the sysadmin can chown a key to some other UID */ 698 /* only the sysadmin can chown a key to some other UID */
697 if (uid != (uid_t) -1 && key->uid != uid) 699 if (uid != (uid_t) -1 && key->uid != uid)
698 goto no_access; 700 goto error_put;
699 701
700 /* only the sysadmin can set the key's GID to a group other 702 /* only the sysadmin can set the key's GID to a group other
701 * than one of those that the current process subscribes to */ 703 * than one of those that the current process subscribes to */
702 if (gid != (gid_t) -1 && gid != key->gid && !in_group_p(gid)) 704 if (gid != (gid_t) -1 && gid != key->gid && !in_group_p(gid))
703 goto no_access; 705 goto error_put;
704 } 706 }
705 707
706 /* change the UID (have to update the quotas) */ 708 /* change the UID */
707 if (uid != (uid_t) -1 && uid != key->uid) { 709 if (uid != (uid_t) -1 && uid != key->uid) {
708 /* don't support UID changing yet */ 710 ret = -ENOMEM;
709 ret = -EOPNOTSUPP; 711 newowner = key_user_lookup(uid);
710 goto no_access; 712 if (!newowner)
713 goto error_put;
714
715 /* transfer the quota burden to the new user */
716 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
717 spin_lock(&newowner->lock);
718 if (newowner->qnkeys + 1 >= KEYQUOTA_MAX_KEYS ||
719 newowner->qnbytes + key->quotalen >=
720 KEYQUOTA_MAX_BYTES)
721 goto quota_overrun;
722
723 newowner->qnkeys++;
724 newowner->qnbytes += key->quotalen;
725 spin_unlock(&newowner->lock);
726
727 spin_lock(&key->user->lock);
728 key->user->qnkeys--;
729 key->user->qnbytes -= key->quotalen;
730 spin_unlock(&key->user->lock);
731 }
732
733 atomic_dec(&key->user->nkeys);
734 atomic_inc(&newowner->nkeys);
735
736 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
737 atomic_dec(&key->user->nikeys);
738 atomic_inc(&newowner->nikeys);
739 }
740
741 zapowner = key->user;
742 key->user = newowner;
743 key->uid = uid;
711 } 744 }
712 745
713 /* change the GID */ 746 /* change the GID */
@@ -716,12 +749,20 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
716 749
717 ret = 0; 750 ret = 0;
718 751
719 no_access: 752error_put:
720 up_write(&key->sem); 753 up_write(&key->sem);
721 key_put(key); 754 key_put(key);
722 error: 755 if (zapowner)
756 key_user_put(zapowner);
757error:
723 return ret; 758 return ret;
724 759
760quota_overrun:
761 spin_unlock(&newowner->lock);
762 zapowner = newowner;
763 ret = -EDQUOT;
764 goto error_put;
765
725} /* end keyctl_chown_key() */ 766} /* end keyctl_chown_key() */
726 767
727/*****************************************************************************/ 768/*****************************************************************************/
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 1357207fc9..e8d02acc51 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -49,6 +49,7 @@ static inline unsigned keyring_hash(const char *desc)
49static int keyring_instantiate(struct key *keyring, 49static int keyring_instantiate(struct key *keyring,
50 const void *data, size_t datalen); 50 const void *data, size_t datalen);
51static int keyring_match(const struct key *keyring, const void *criterion); 51static int keyring_match(const struct key *keyring, const void *criterion);
52static void keyring_revoke(struct key *keyring);
52static void keyring_destroy(struct key *keyring); 53static void keyring_destroy(struct key *keyring);
53static void keyring_describe(const struct key *keyring, struct seq_file *m); 54static void keyring_describe(const struct key *keyring, struct seq_file *m);
54static long keyring_read(const struct key *keyring, 55static long keyring_read(const struct key *keyring,
@@ -59,6 +60,7 @@ struct key_type key_type_keyring = {
59 .def_datalen = sizeof(struct keyring_list), 60 .def_datalen = sizeof(struct keyring_list),
60 .instantiate = keyring_instantiate, 61 .instantiate = keyring_instantiate,
61 .match = keyring_match, 62 .match = keyring_match,
63 .revoke = keyring_revoke,
62 .destroy = keyring_destroy, 64 .destroy = keyring_destroy,
63 .describe = keyring_describe, 65 .describe = keyring_describe,
64 .read = keyring_read, 66 .read = keyring_read,
@@ -240,7 +242,7 @@ static long keyring_read(const struct key *keyring,
240 * allocate a keyring and link into the destination keyring 242 * allocate a keyring and link into the destination keyring
241 */ 243 */
242struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, 244struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
243 struct task_struct *ctx, int not_in_quota, 245 struct task_struct *ctx, unsigned long flags,
244 struct key *dest) 246 struct key *dest)
245{ 247{
246 struct key *keyring; 248 struct key *keyring;
@@ -249,7 +251,7 @@ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
249 keyring = key_alloc(&key_type_keyring, description, 251 keyring = key_alloc(&key_type_keyring, description,
250 uid, gid, ctx, 252 uid, gid, ctx,
251 (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL, 253 (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL,
252 not_in_quota); 254 flags);
253 255
254 if (!IS_ERR(keyring)) { 256 if (!IS_ERR(keyring)) {
255 ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL); 257 ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL);
@@ -953,3 +955,22 @@ int keyring_clear(struct key *keyring)
953} /* end keyring_clear() */ 955} /* end keyring_clear() */
954 956
955EXPORT_SYMBOL(keyring_clear); 957EXPORT_SYMBOL(keyring_clear);
958
959/*****************************************************************************/
960/*
961 * dispose of the links from a revoked keyring
962 * - called with the key sem write-locked
963 */
964static void keyring_revoke(struct key *keyring)
965{
966 struct keyring_list *klist = keyring->payload.subscriptions;
967
968 /* adjust the quota */
969 key_payload_reserve(keyring, 0);
970
971 if (klist) {
972 rcu_assign_pointer(keyring->payload.subscriptions, NULL);
973 call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
974 }
975
976} /* end keyring_revoke() */
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 12b750e51f..686a9ee0c5 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -137,6 +137,13 @@ static int proc_keys_show(struct seq_file *m, void *v)
137 struct timespec now; 137 struct timespec now;
138 unsigned long timo; 138 unsigned long timo;
139 char xbuf[12]; 139 char xbuf[12];
140 int rc;
141
142 /* check whether the current task is allowed to view the key (assuming
143 * non-possession) */
144 rc = key_task_permission(make_key_ref(key, 0), current, KEY_VIEW);
145 if (rc < 0)
146 return 0;
140 147
141 now = current_kernel_time(); 148 now = current_kernel_time();
142 149
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 4d9825f996..32150cf7c3 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -77,7 +77,8 @@ int alloc_uid_keyring(struct user_struct *user,
77 /* concoct a default session keyring */ 77 /* concoct a default session keyring */
78 sprintf(buf, "_uid_ses.%u", user->uid); 78 sprintf(buf, "_uid_ses.%u", user->uid);
79 79
80 session_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, ctx, 0, NULL); 80 session_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, ctx,
81 KEY_ALLOC_IN_QUOTA, NULL);
81 if (IS_ERR(session_keyring)) { 82 if (IS_ERR(session_keyring)) {
82 ret = PTR_ERR(session_keyring); 83 ret = PTR_ERR(session_keyring);
83 goto error; 84 goto error;
@@ -87,8 +88,8 @@ int alloc_uid_keyring(struct user_struct *user,
87 * keyring */ 88 * keyring */
88 sprintf(buf, "_uid.%u", user->uid); 89 sprintf(buf, "_uid.%u", user->uid);
89 90
90 uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, ctx, 0, 91 uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, ctx,
91 session_keyring); 92 KEY_ALLOC_IN_QUOTA, session_keyring);
92 if (IS_ERR(uid_keyring)) { 93 if (IS_ERR(uid_keyring)) {
93 key_put(session_keyring); 94 key_put(session_keyring);
94 ret = PTR_ERR(uid_keyring); 95 ret = PTR_ERR(uid_keyring);
@@ -144,7 +145,8 @@ int install_thread_keyring(struct task_struct *tsk)
144 145
145 sprintf(buf, "_tid.%u", tsk->pid); 146 sprintf(buf, "_tid.%u", tsk->pid);
146 147
147 keyring = keyring_alloc(buf, tsk->uid, tsk->gid, tsk, 1, NULL); 148 keyring = keyring_alloc(buf, tsk->uid, tsk->gid, tsk,
149 KEY_ALLOC_QUOTA_OVERRUN, NULL);
148 if (IS_ERR(keyring)) { 150 if (IS_ERR(keyring)) {
149 ret = PTR_ERR(keyring); 151 ret = PTR_ERR(keyring);
150 goto error; 152 goto error;
@@ -178,7 +180,8 @@ int install_process_keyring(struct task_struct *tsk)
178 if (!tsk->signal->process_keyring) { 180 if (!tsk->signal->process_keyring) {
179 sprintf(buf, "_pid.%u", tsk->tgid); 181 sprintf(buf, "_pid.%u", tsk->tgid);
180 182
181 keyring = keyring_alloc(buf, tsk->uid, tsk->gid, tsk, 1, NULL); 183 keyring = keyring_alloc(buf, tsk->uid, tsk->gid, tsk,
184 KEY_ALLOC_QUOTA_OVERRUN, NULL);
182 if (IS_ERR(keyring)) { 185 if (IS_ERR(keyring)) {
183 ret = PTR_ERR(keyring); 186 ret = PTR_ERR(keyring);
184 goto error; 187 goto error;
@@ -209,6 +212,7 @@ error:
209static int install_session_keyring(struct task_struct *tsk, 212static int install_session_keyring(struct task_struct *tsk,
210 struct key *keyring) 213 struct key *keyring)
211{ 214{
215 unsigned long flags;
212 struct key *old; 216 struct key *old;
213 char buf[20]; 217 char buf[20];
214 218
@@ -218,7 +222,12 @@ static int install_session_keyring(struct task_struct *tsk,
218 if (!keyring) { 222 if (!keyring) {
219 sprintf(buf, "_ses.%u", tsk->tgid); 223 sprintf(buf, "_ses.%u", tsk->tgid);
220 224
221 keyring = keyring_alloc(buf, tsk->uid, tsk->gid, tsk, 1, NULL); 225 flags = KEY_ALLOC_QUOTA_OVERRUN;
226 if (tsk->signal->session_keyring)
227 flags = KEY_ALLOC_IN_QUOTA;
228
229 keyring = keyring_alloc(buf, tsk->uid, tsk->gid, tsk,
230 flags, NULL);
222 if (IS_ERR(keyring)) 231 if (IS_ERR(keyring))
223 return PTR_ERR(keyring); 232 return PTR_ERR(keyring);
224 } 233 }
@@ -728,7 +737,8 @@ long join_session_keyring(const char *name)
728 keyring = find_keyring_by_name(name, 0); 737 keyring = find_keyring_by_name(name, 0);
729 if (PTR_ERR(keyring) == -ENOKEY) { 738 if (PTR_ERR(keyring) == -ENOKEY) {
730 /* not found - try and create a new one */ 739 /* not found - try and create a new one */
731 keyring = keyring_alloc(name, tsk->uid, tsk->gid, tsk, 0, NULL); 740 keyring = keyring_alloc(name, tsk->uid, tsk->gid, tsk,
741 KEY_ALLOC_IN_QUOTA, NULL);
732 if (IS_ERR(keyring)) { 742 if (IS_ERR(keyring)) {
733 ret = PTR_ERR(keyring); 743 ret = PTR_ERR(keyring);
734 goto error2; 744 goto error2;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index eab66a06ca..58d1efd4fc 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -48,8 +48,8 @@ static int call_sbin_request_key(struct key *key,
48 /* allocate a new session keyring */ 48 /* allocate a new session keyring */
49 sprintf(desc, "_req.%u", key->serial); 49 sprintf(desc, "_req.%u", key->serial);
50 50
51 keyring = keyring_alloc(desc, current->fsuid, current->fsgid, 51 keyring = keyring_alloc(desc, current->fsuid, current->fsgid, current,
52 current, 1, NULL); 52 KEY_ALLOC_QUOTA_OVERRUN, NULL);
53 if (IS_ERR(keyring)) { 53 if (IS_ERR(keyring)) {
54 ret = PTR_ERR(keyring); 54 ret = PTR_ERR(keyring);
55 goto error_alloc; 55 goto error_alloc;
@@ -126,7 +126,8 @@ error_alloc:
126 */ 126 */
127static struct key *__request_key_construction(struct key_type *type, 127static struct key *__request_key_construction(struct key_type *type,
128 const char *description, 128 const char *description,
129 const char *callout_info) 129 const char *callout_info,
130 unsigned long flags)
130{ 131{
131 request_key_actor_t actor; 132 request_key_actor_t actor;
132 struct key_construction cons; 133 struct key_construction cons;
@@ -134,12 +135,12 @@ static struct key *__request_key_construction(struct key_type *type,
134 struct key *key, *authkey; 135 struct key *key, *authkey;
135 int ret, negated; 136 int ret, negated;
136 137
137 kenter("%s,%s,%s", type->name, description, callout_info); 138 kenter("%s,%s,%s,%lx", type->name, description, callout_info, flags);
138 139
139 /* create a key and add it to the queue */ 140 /* create a key and add it to the queue */
140 key = key_alloc(type, description, 141 key = key_alloc(type, description,
141 current->fsuid, current->fsgid, 142 current->fsuid, current->fsgid, current, KEY_POS_ALL,
142 current, KEY_POS_ALL, 0); 143 flags);
143 if (IS_ERR(key)) 144 if (IS_ERR(key))
144 goto alloc_failed; 145 goto alloc_failed;
145 146
@@ -258,15 +259,16 @@ alloc_failed:
258static struct key *request_key_construction(struct key_type *type, 259static struct key *request_key_construction(struct key_type *type,
259 const char *description, 260 const char *description,
260 struct key_user *user, 261 struct key_user *user,
261 const char *callout_info) 262 const char *callout_info,
263 unsigned long flags)
262{ 264{
263 struct key_construction *pcons; 265 struct key_construction *pcons;
264 struct key *key, *ckey; 266 struct key *key, *ckey;
265 267
266 DECLARE_WAITQUEUE(myself, current); 268 DECLARE_WAITQUEUE(myself, current);
267 269
268 kenter("%s,%s,{%d},%s", 270 kenter("%s,%s,{%d},%s,%lx",
269 type->name, description, user->uid, callout_info); 271 type->name, description, user->uid, callout_info, flags);
270 272
271 /* see if there's such a key under construction already */ 273 /* see if there's such a key under construction already */
272 down_write(&key_construction_sem); 274 down_write(&key_construction_sem);
@@ -282,7 +284,8 @@ static struct key *request_key_construction(struct key_type *type,
282 } 284 }
283 285
284 /* see about getting userspace to construct the key */ 286 /* see about getting userspace to construct the key */
285 key = __request_key_construction(type, description, callout_info); 287 key = __request_key_construction(type, description, callout_info,
288 flags);
286 error: 289 error:
287 kleave(" = %p", key); 290 kleave(" = %p", key);
288 return key; 291 return key;
@@ -389,14 +392,15 @@ static void request_key_link(struct key *key, struct key *dest_keyring)
389struct key *request_key_and_link(struct key_type *type, 392struct key *request_key_and_link(struct key_type *type,
390 const char *description, 393 const char *description,
391 const char *callout_info, 394 const char *callout_info,
392 struct key *dest_keyring) 395 struct key *dest_keyring,
396 unsigned long flags)
393{ 397{
394 struct key_user *user; 398 struct key_user *user;
395 struct key *key; 399 struct key *key;
396 key_ref_t key_ref; 400 key_ref_t key_ref;
397 401
398 kenter("%s,%s,%s,%p", 402 kenter("%s,%s,%s,%p,%lx",
399 type->name, description, callout_info, dest_keyring); 403 type->name, description, callout_info, dest_keyring, flags);
400 404
401 /* search all the process keyrings for a key */ 405 /* search all the process keyrings for a key */
402 key_ref = search_process_keyrings(type, description, type->match, 406 key_ref = search_process_keyrings(type, description, type->match,
@@ -429,7 +433,8 @@ struct key *request_key_and_link(struct key_type *type,
429 /* ask userspace (returns NULL if it waited on a key 433 /* ask userspace (returns NULL if it waited on a key
430 * being constructed) */ 434 * being constructed) */
431 key = request_key_construction(type, description, 435 key = request_key_construction(type, description,
432 user, callout_info); 436 user, callout_info,
437 flags);
433 if (key) 438 if (key)
434 break; 439 break;
435 440
@@ -485,7 +490,8 @@ struct key *request_key(struct key_type *type,
485 const char *description, 490 const char *description,
486 const char *callout_info) 491 const char *callout_info)
487{ 492{
488 return request_key_and_link(type, description, callout_info, NULL); 493 return request_key_and_link(type, description, callout_info, NULL,
494 KEY_ALLOC_IN_QUOTA);
489 495
490} /* end request_key() */ 496} /* end request_key() */
491 497
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index cb9817ced3..cbf58a91b0 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -187,7 +187,7 @@ struct key *request_key_auth_new(struct key *target, const char *callout_info)
187 authkey = key_alloc(&key_type_request_key_auth, desc, 187 authkey = key_alloc(&key_type_request_key_auth, desc,
188 current->fsuid, current->fsgid, current, 188 current->fsuid, current->fsgid, current,
189 KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | 189 KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH |
190 KEY_USR_VIEW, 1); 190 KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA);
191 if (IS_ERR(authkey)) { 191 if (IS_ERR(authkey)) {
192 ret = PTR_ERR(authkey); 192 ret = PTR_ERR(authkey);
193 goto error_alloc; 193 goto error_alloc;
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 8e71895b97..5bbfdebb7a 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -28,6 +28,7 @@ struct key_type key_type_user = {
28 .instantiate = user_instantiate, 28 .instantiate = user_instantiate,
29 .update = user_update, 29 .update = user_update,
30 .match = user_match, 30 .match = user_match,
31 .revoke = user_revoke,
31 .destroy = user_destroy, 32 .destroy = user_destroy,
32 .describe = user_describe, 33 .describe = user_describe,
33 .read = user_read, 34 .read = user_read,
@@ -67,6 +68,7 @@ error:
67 return ret; 68 return ret;
68 69
69} /* end user_instantiate() */ 70} /* end user_instantiate() */
71
70EXPORT_SYMBOL_GPL(user_instantiate); 72EXPORT_SYMBOL_GPL(user_instantiate);
71 73
72/*****************************************************************************/ 74/*****************************************************************************/
@@ -141,7 +143,28 @@ EXPORT_SYMBOL_GPL(user_match);
141 143
142/*****************************************************************************/ 144/*****************************************************************************/
143/* 145/*
144 * dispose of the data dangling from the corpse of a user 146 * dispose of the links from a revoked keyring
147 * - called with the key sem write-locked
148 */
149void user_revoke(struct key *key)
150{
151 struct user_key_payload *upayload = key->payload.data;
152
153 /* clear the quota */
154 key_payload_reserve(key, 0);
155
156 if (upayload) {
157 rcu_assign_pointer(key->payload.data, NULL);
158 call_rcu(&upayload->rcu, user_update_rcu_disposal);
159 }
160
161} /* end user_revoke() */
162
163EXPORT_SYMBOL(user_revoke);
164
165/*****************************************************************************/
166/*
167 * dispose of the data dangling from the corpse of a user key
145 */ 168 */
146void user_destroy(struct key *key) 169void user_destroy(struct key *key)
147{ 170{
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 79c16e31c8..28832e6898 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1099,6 +1099,17 @@ static int may_create(struct inode *dir,
1099 FILESYSTEM__ASSOCIATE, &ad); 1099 FILESYSTEM__ASSOCIATE, &ad);
1100} 1100}
1101 1101
1102/* Check whether a task can create a key. */
1103static int may_create_key(u32 ksid,
1104 struct task_struct *ctx)
1105{
1106 struct task_security_struct *tsec;
1107
1108 tsec = ctx->security;
1109
1110 return avc_has_perm(tsec->sid, ksid, SECCLASS_KEY, KEY__CREATE, NULL);
1111}
1112
1102#define MAY_LINK 0 1113#define MAY_LINK 0
1103#define MAY_UNLINK 1 1114#define MAY_UNLINK 1
1104#define MAY_RMDIR 2 1115#define MAY_RMDIR 2
@@ -1521,8 +1532,10 @@ static int selinux_bprm_set_security(struct linux_binprm *bprm)
1521 /* Default to the current task SID. */ 1532 /* Default to the current task SID. */
1522 bsec->sid = tsec->sid; 1533 bsec->sid = tsec->sid;
1523 1534
1524 /* Reset create SID on execve. */ 1535 /* Reset fs, key, and sock SIDs on execve. */
1525 tsec->create_sid = 0; 1536 tsec->create_sid = 0;
1537 tsec->keycreate_sid = 0;
1538 tsec->sockcreate_sid = 0;
1526 1539
1527 if (tsec->exec_sid) { 1540 if (tsec->exec_sid) {
1528 newsid = tsec->exec_sid; 1541 newsid = tsec->exec_sid;
@@ -2574,9 +2587,11 @@ static int selinux_task_alloc_security(struct task_struct *tsk)
2574 tsec2->osid = tsec1->osid; 2587 tsec2->osid = tsec1->osid;
2575 tsec2->sid = tsec1->sid; 2588 tsec2->sid = tsec1->sid;
2576 2589
2577 /* Retain the exec and create SIDs across fork */ 2590 /* Retain the exec, fs, key, and sock SIDs across fork */
2578 tsec2->exec_sid = tsec1->exec_sid; 2591 tsec2->exec_sid = tsec1->exec_sid;
2579 tsec2->create_sid = tsec1->create_sid; 2592 tsec2->create_sid = tsec1->create_sid;
2593 tsec2->keycreate_sid = tsec1->keycreate_sid;
2594 tsec2->sockcreate_sid = tsec1->sockcreate_sid;
2580 2595
2581 /* Retain ptracer SID across fork, if any. 2596 /* Retain ptracer SID across fork, if any.
2582 This will be reset by the ptrace hook upon any 2597 This will be reset by the ptrace hook upon any
@@ -2926,12 +2941,14 @@ static int selinux_socket_create(int family, int type,
2926{ 2941{
2927 int err = 0; 2942 int err = 0;
2928 struct task_security_struct *tsec; 2943 struct task_security_struct *tsec;
2944 u32 newsid;
2929 2945
2930 if (kern) 2946 if (kern)
2931 goto out; 2947 goto out;
2932 2948
2933 tsec = current->security; 2949 tsec = current->security;
2934 err = avc_has_perm(tsec->sid, tsec->sid, 2950 newsid = tsec->sockcreate_sid ? : tsec->sid;
2951 err = avc_has_perm(tsec->sid, newsid,
2935 socket_type_to_security_class(family, type, 2952 socket_type_to_security_class(family, type,
2936 protocol), SOCKET__CREATE, NULL); 2953 protocol), SOCKET__CREATE, NULL);
2937 2954
@@ -2944,12 +2961,14 @@ static void selinux_socket_post_create(struct socket *sock, int family,
2944{ 2961{
2945 struct inode_security_struct *isec; 2962 struct inode_security_struct *isec;
2946 struct task_security_struct *tsec; 2963 struct task_security_struct *tsec;
2964 u32 newsid;
2947 2965
2948 isec = SOCK_INODE(sock)->i_security; 2966 isec = SOCK_INODE(sock)->i_security;
2949 2967
2950 tsec = current->security; 2968 tsec = current->security;
2969 newsid = tsec->sockcreate_sid ? : tsec->sid;
2951 isec->sclass = socket_type_to_security_class(family, type, protocol); 2970 isec->sclass = socket_type_to_security_class(family, type, protocol);
2952 isec->sid = kern ? SECINITSID_KERNEL : tsec->sid; 2971 isec->sid = kern ? SECINITSID_KERNEL : newsid;
2953 isec->initialized = 1; 2972 isec->initialized = 1;
2954 2973
2955 return; 2974 return;
@@ -4150,6 +4169,10 @@ static int selinux_getprocattr(struct task_struct *p,
4150 sid = tsec->exec_sid; 4169 sid = tsec->exec_sid;
4151 else if (!strcmp(name, "fscreate")) 4170 else if (!strcmp(name, "fscreate"))
4152 sid = tsec->create_sid; 4171 sid = tsec->create_sid;
4172 else if (!strcmp(name, "keycreate"))
4173 sid = tsec->keycreate_sid;
4174 else if (!strcmp(name, "sockcreate"))
4175 sid = tsec->sockcreate_sid;
4153 else 4176 else
4154 return -EINVAL; 4177 return -EINVAL;
4155 4178
@@ -4182,6 +4205,10 @@ static int selinux_setprocattr(struct task_struct *p,
4182 error = task_has_perm(current, p, PROCESS__SETEXEC); 4205 error = task_has_perm(current, p, PROCESS__SETEXEC);
4183 else if (!strcmp(name, "fscreate")) 4206 else if (!strcmp(name, "fscreate"))
4184 error = task_has_perm(current, p, PROCESS__SETFSCREATE); 4207 error = task_has_perm(current, p, PROCESS__SETFSCREATE);
4208 else if (!strcmp(name, "keycreate"))
4209 error = task_has_perm(current, p, PROCESS__SETKEYCREATE);
4210 else if (!strcmp(name, "sockcreate"))
4211 error = task_has_perm(current, p, PROCESS__SETSOCKCREATE);
4185 else if (!strcmp(name, "current")) 4212 else if (!strcmp(name, "current"))
4186 error = task_has_perm(current, p, PROCESS__SETCURRENT); 4213 error = task_has_perm(current, p, PROCESS__SETCURRENT);
4187 else 4214 else
@@ -4211,6 +4238,13 @@ static int selinux_setprocattr(struct task_struct *p,
4211 tsec->exec_sid = sid; 4238 tsec->exec_sid = sid;
4212 else if (!strcmp(name, "fscreate")) 4239 else if (!strcmp(name, "fscreate"))
4213 tsec->create_sid = sid; 4240 tsec->create_sid = sid;
4241 else if (!strcmp(name, "keycreate")) {
4242 error = may_create_key(sid, p);
4243 if (error)
4244 return error;
4245 tsec->keycreate_sid = sid;
4246 } else if (!strcmp(name, "sockcreate"))
4247 tsec->sockcreate_sid = sid;
4214 else if (!strcmp(name, "current")) { 4248 else if (!strcmp(name, "current")) {
4215 struct av_decision avd; 4249 struct av_decision avd;
4216 4250
@@ -4264,7 +4298,8 @@ static int selinux_setprocattr(struct task_struct *p,
4264 4298
4265#ifdef CONFIG_KEYS 4299#ifdef CONFIG_KEYS
4266 4300
4267static int selinux_key_alloc(struct key *k, struct task_struct *tsk) 4301static int selinux_key_alloc(struct key *k, struct task_struct *tsk,
4302 unsigned long flags)
4268{ 4303{
4269 struct task_security_struct *tsec = tsk->security; 4304 struct task_security_struct *tsec = tsk->security;
4270 struct key_security_struct *ksec; 4305 struct key_security_struct *ksec;
@@ -4274,7 +4309,10 @@ static int selinux_key_alloc(struct key *k, struct task_struct *tsk)
4274 return -ENOMEM; 4309 return -ENOMEM;
4275 4310
4276 ksec->obj = k; 4311 ksec->obj = k;
4277 ksec->sid = tsec->sid; 4312 if (tsec->keycreate_sid)
4313 ksec->sid = tsec->keycreate_sid;
4314 else
4315 ksec->sid = tsec->sid;
4278 k->security = ksec; 4316 k->security = ksec;
4279 4317
4280 return 0; 4318 return 0;
@@ -4513,8 +4551,10 @@ static __init int selinux_init(void)
4513 4551
4514#ifdef CONFIG_KEYS 4552#ifdef CONFIG_KEYS
4515 /* Add security information to initial keyrings */ 4553 /* Add security information to initial keyrings */
4516 security_key_alloc(&root_user_keyring, current); 4554 selinux_key_alloc(&root_user_keyring, current,
4517 security_key_alloc(&root_session_keyring, current); 4555 KEY_ALLOC_NOT_IN_QUOTA);
4556 selinux_key_alloc(&root_session_keyring, current,
4557 KEY_ALLOC_NOT_IN_QUOTA);
4518#endif 4558#endif
4519 4559
4520 return 0; 4560 return 0;
diff --git a/security/selinux/include/av_perm_to_string.h b/security/selinux/include/av_perm_to_string.h
index bc020bde6c..7c9b583808 100644
--- a/security/selinux/include/av_perm_to_string.h
+++ b/security/selinux/include/av_perm_to_string.h
@@ -72,6 +72,8 @@
72 S_(SECCLASS_PROCESS, PROCESS__EXECMEM, "execmem") 72 S_(SECCLASS_PROCESS, PROCESS__EXECMEM, "execmem")
73 S_(SECCLASS_PROCESS, PROCESS__EXECSTACK, "execstack") 73 S_(SECCLASS_PROCESS, PROCESS__EXECSTACK, "execstack")
74 S_(SECCLASS_PROCESS, PROCESS__EXECHEAP, "execheap") 74 S_(SECCLASS_PROCESS, PROCESS__EXECHEAP, "execheap")
75 S_(SECCLASS_PROCESS, PROCESS__SETKEYCREATE, "setkeycreate")
76 S_(SECCLASS_PROCESS, PROCESS__SETSOCKCREATE, "setsockcreate")
75 S_(SECCLASS_MSGQ, MSGQ__ENQUEUE, "enqueue") 77 S_(SECCLASS_MSGQ, MSGQ__ENQUEUE, "enqueue")
76 S_(SECCLASS_MSG, MSG__SEND, "send") 78 S_(SECCLASS_MSG, MSG__SEND, "send")
77 S_(SECCLASS_MSG, MSG__RECEIVE, "receive") 79 S_(SECCLASS_MSG, MSG__RECEIVE, "receive")
@@ -248,3 +250,4 @@
248 S_(SECCLASS_KEY, KEY__SEARCH, "search") 250 S_(SECCLASS_KEY, KEY__SEARCH, "search")
249 S_(SECCLASS_KEY, KEY__LINK, "link") 251 S_(SECCLASS_KEY, KEY__LINK, "link")
250 S_(SECCLASS_KEY, KEY__SETATTR, "setattr") 252 S_(SECCLASS_KEY, KEY__SETATTR, "setattr")
253 S_(SECCLASS_KEY, KEY__CREATE, "create")
diff --git a/security/selinux/include/av_permissions.h b/security/selinux/include/av_permissions.h
index 1205227a3a..69fd4b4820 100644
--- a/security/selinux/include/av_permissions.h
+++ b/security/selinux/include/av_permissions.h
@@ -467,6 +467,8 @@
467#define PROCESS__EXECMEM 0x02000000UL 467#define PROCESS__EXECMEM 0x02000000UL
468#define PROCESS__EXECSTACK 0x04000000UL 468#define PROCESS__EXECSTACK 0x04000000UL
469#define PROCESS__EXECHEAP 0x08000000UL 469#define PROCESS__EXECHEAP 0x08000000UL
470#define PROCESS__SETKEYCREATE 0x10000000UL
471#define PROCESS__SETSOCKCREATE 0x20000000UL
470 472
471#define IPC__CREATE 0x00000001UL 473#define IPC__CREATE 0x00000001UL
472#define IPC__DESTROY 0x00000002UL 474#define IPC__DESTROY 0x00000002UL
@@ -966,4 +968,4 @@
966#define KEY__SEARCH 0x00000008UL 968#define KEY__SEARCH 0x00000008UL
967#define KEY__LINK 0x00000010UL 969#define KEY__LINK 0x00000010UL
968#define KEY__SETATTR 0x00000020UL 970#define KEY__SETATTR 0x00000020UL
969 971#define KEY__CREATE 0x00000040UL
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 8f5547ad18..cf54a30416 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -32,6 +32,8 @@ struct task_security_struct {
32 u32 sid; /* current SID */ 32 u32 sid; /* current SID */
33 u32 exec_sid; /* exec SID */ 33 u32 exec_sid; /* exec SID */
34 u32 create_sid; /* fscreate SID */ 34 u32 create_sid; /* fscreate SID */
35 u32 keycreate_sid; /* keycreate SID */
36 u32 sockcreate_sid; /* fscreate SID */
35 u32 ptrace_sid; /* SID of ptrace parent */ 37 u32 ptrace_sid; /* SID of ptrace parent */
36}; 38};
37 39
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 39c60d9e1e..63e91431a2 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -31,7 +31,7 @@ struct snd_seq_event_cell {
31 struct snd_seq_event_cell *next; /* next cell */ 31 struct snd_seq_event_cell *next; /* next cell */
32}; 32};
33 33
34/* design note: the pool is a contigious block of memory, if we dynamicly 34/* design note: the pool is a contiguous block of memory, if we dynamicly
35 want to add additional cells to the pool be better store this in another 35 want to add additional cells to the pool be better store this in another
36 pool as we need to know the base address of the pool when releasing 36 pool as we need to know the base address of the pool when releasing
37 memory. */ 37 memory. */
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index 95754e2f71..3b8cdbca26 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -839,6 +839,6 @@ config SOUND_SH_DAC_AUDIO
839 depends on SOUND_PRIME && CPU_SH3 839 depends on SOUND_PRIME && CPU_SH3
840 840
841config SOUND_SH_DAC_AUDIO_CHANNEL 841config SOUND_SH_DAC_AUDIO_CHANNEL
842 int " DAC channel" 842 int "DAC channel"
843 default "1" 843 default "1"
844 depends on SOUND_SH_DAC_AUDIO 844 depends on SOUND_SH_DAC_AUDIO
diff --git a/sound/oss/sb_ess.c b/sound/oss/sb_ess.c
index fae05fe3de..180e95c87e 100644
--- a/sound/oss/sb_ess.c
+++ b/sound/oss/sb_ess.c
@@ -97,19 +97,19 @@
97 * 97 *
98 * The documentation is an adventure: it's close but not fully accurate. I 98 * The documentation is an adventure: it's close but not fully accurate. I
99 * found out that after a reset some registers are *NOT* reset, though the 99 * found out that after a reset some registers are *NOT* reset, though the
100 * docs say the would be. Interresting ones are 0x7f, 0x7d and 0x7a. They are 100 * docs say the would be. Interesting ones are 0x7f, 0x7d and 0x7a. They are
101 * related to the Audio 2 channel. I also was suprised about the consequenses 101 * related to the Audio 2 channel. I also was surprised about the consequences
102 * of writing 0x00 to 0x7f (which should be done by reset): The ES1887 moves 102 * of writing 0x00 to 0x7f (which should be done by reset): The ES1887 moves
103 * into ES1888 mode. This means that it claims IRQ 11, which happens to be my 103 * into ES1888 mode. This means that it claims IRQ 11, which happens to be my
104 * ISDN adapter. Needless to say it no longer worked. I now understand why 104 * ISDN adapter. Needless to say it no longer worked. I now understand why
105 * after rebooting 0x7f already was 0x05, the value of my choice: the BIOS 105 * after rebooting 0x7f already was 0x05, the value of my choice: the BIOS
106 * did it. 106 * did it.
107 * 107 *
108 * Oh, and this is another trap: in ES1887 docs mixer register 0x70 is decribed 108 * Oh, and this is another trap: in ES1887 docs mixer register 0x70 is
109 * as if it's exactly the same as register 0xa1. This is *NOT* true. The 109 * described as if it's exactly the same as register 0xa1. This is *NOT* true.
110 * description of 0x70 in ES1869 docs is accurate however. 110 * The description of 0x70 in ES1869 docs is accurate however.
111 * Well, the assumption about ES1869 was wrong: register 0x70 is very much 111 * Well, the assumption about ES1869 was wrong: register 0x70 is very much
112 * like register 0xa1, except that bit 7 is allways 1, whatever you want 112 * like register 0xa1, except that bit 7 is always 1, whatever you want
113 * it to be. 113 * it to be.
114 * 114 *
115 * When using audio 2 mixer register 0x72 seems te be meaningless. Only 0xa2 115 * When using audio 2 mixer register 0x72 seems te be meaningless. Only 0xa2
@@ -117,10 +117,10 @@
117 * 117 *
118 * Software reset not being able to reset all registers is great! Especially 118 * Software reset not being able to reset all registers is great! Especially
119 * the fact that register 0x78 isn't reset is great when you wanna change back 119 * the fact that register 0x78 isn't reset is great when you wanna change back
120 * to single dma operation (simplex): audio 2 is still operation, and uses the 120 * to single dma operation (simplex): audio 2 is still operational, and uses
121 * same dma as audio 1: your ess changes into a funny echo machine. 121 * the same dma as audio 1: your ess changes into a funny echo machine.
122 * 122 *
123 * Received the new that ES1688 is detected as a ES1788. Did some thinking: 123 * Received the news that ES1688 is detected as a ES1788. Did some thinking:
124 * the ES1887 detection scheme suggests in step 2 to try if bit 3 of register 124 * the ES1887 detection scheme suggests in step 2 to try if bit 3 of register
125 * 0x64 can be changed. This is inaccurate, first I inverted the * check: "If 125 * 0x64 can be changed. This is inaccurate, first I inverted the * check: "If
126 * can be modified, it's a 1688", which lead to a correct detection 126 * can be modified, it's a 1688", which lead to a correct detection
@@ -135,7 +135,7 @@
135 * About recognition of ESS chips 135 * About recognition of ESS chips
136 * 136 *
137 * The distinction of ES688, ES1688, ES1788, ES1887 and ES1888 is described in 137 * The distinction of ES688, ES1688, ES1788, ES1887 and ES1888 is described in
138 * a (preliminary ??) datasheet on ES1887. It's aim is to identify ES1887, but 138 * a (preliminary ??) datasheet on ES1887. Its aim is to identify ES1887, but
139 * during detection the text claims that "this chip may be ..." when a step 139 * during detection the text claims that "this chip may be ..." when a step
140 * fails. This scheme is used to distinct between the above chips. 140 * fails. This scheme is used to distinct between the above chips.
141 * It appears however that some PnP chips like ES1868 are recognized as ES1788 141 * It appears however that some PnP chips like ES1868 are recognized as ES1788
@@ -156,9 +156,9 @@
156 * 156 *
157 * The existing ES1688 support didn't take care of the ES1688+ recording 157 * The existing ES1688 support didn't take care of the ES1688+ recording
158 * levels very well. Whenever a device was selected (recmask) for recording 158 * levels very well. Whenever a device was selected (recmask) for recording
159 * it's recording level was loud, and it couldn't be changed. The fact that 159 * its recording level was loud, and it couldn't be changed. The fact that
160 * internal register 0xb4 could take care of RECLEV, didn't work meaning until 160 * internal register 0xb4 could take care of RECLEV, didn't work meaning until
161 * it's value was restored every time the chip was reset; this reset the 161 * its value was restored every time the chip was reset; this reset the
162 * value of 0xb4 too. I guess that's what 4front also had (have?) trouble with. 162 * value of 0xb4 too. I guess that's what 4front also had (have?) trouble with.
163 * 163 *
164 * About ES1887 support: 164 * About ES1887 support:
@@ -169,9 +169,9 @@
169 * the latter case the recording volumes are 0. 169 * the latter case the recording volumes are 0.
170 * Now recording levels of inputs can be controlled, by changing the playback 170 * Now recording levels of inputs can be controlled, by changing the playback
171 * levels. Futhermore several devices can be recorded together (which is not 171 * levels. Futhermore several devices can be recorded together (which is not
172 * possible with the ES1688. 172 * possible with the ES1688).
173 * Besides the separate recording level control for each input, the common 173 * Besides the separate recording level control for each input, the common
174 * recordig level can also be controlled by RECLEV as described above. 174 * recording level can also be controlled by RECLEV as described above.
175 * 175 *
176 * Not only ES1887 have this recording mixer. I know the following from the 176 * Not only ES1887 have this recording mixer. I know the following from the
177 * documentation: 177 * documentation:
diff --git a/sound/oss/via82cxxx_audio.c b/sound/oss/via82cxxx_audio.c
index 1a921ee71a..2343dedd44 100644
--- a/sound/oss/via82cxxx_audio.c
+++ b/sound/oss/via82cxxx_audio.c
@@ -24,6 +24,7 @@
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/mm.h> 25#include <linux/mm.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/poison.h>
27#include <linux/init.h> 28#include <linux/init.h>
28#include <linux/interrupt.h> 29#include <linux/interrupt.h>
29#include <linux/proc_fs.h> 30#include <linux/proc_fs.h>
@@ -3522,7 +3523,7 @@ err_out_have_mixer:
3522 3523
3523err_out_kfree: 3524err_out_kfree:
3524#ifndef VIA_NDEBUG 3525#ifndef VIA_NDEBUG
3525 memset (card, 0xAB, sizeof (*card)); /* poison memory */ 3526 memset (card, OSS_POISON_FREE, sizeof (*card)); /* poison memory */
3526#endif 3527#endif
3527 kfree (card); 3528 kfree (card);
3528 3529
@@ -3559,7 +3560,7 @@ static void __devexit via_remove_one (struct pci_dev *pdev)
3559 via_ac97_cleanup (card); 3560 via_ac97_cleanup (card);
3560 3561
3561#ifndef VIA_NDEBUG 3562#ifndef VIA_NDEBUG
3562 memset (card, 0xAB, sizeof (*card)); /* poison memory */ 3563 memset (card, OSS_POISON_FREE, sizeof (*card)); /* poison memory */
3563#endif 3564#endif
3564 kfree (card); 3565 kfree (card);
3565 3566
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index f0a48693d6..5450a9e8f1 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -143,7 +143,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
143 if (dma->periods == periods && dma->period_bytes == period_bytes) 143 if (dma->periods == periods && dma->period_bytes == period_bytes)
144 return 0; 144 return 0;
145 145
146 /* the u32 cast is okay because in snd*create we succesfully told 146 /* the u32 cast is okay because in snd*create we successfully told
147 pci alloc that we're only 32 bit capable so the uppper will be 0 */ 147 pci alloc that we're only 32 bit capable so the uppper will be 0 */
148 addr = (u32) substream->runtime->dma_addr; 148 addr = (u32) substream->runtime->dma_addr;
149 desc_addr = (u32) dma->desc_buf.addr; 149 desc_addr = (u32) dma->desc_buf.addr;
diff --git a/sound/ppc/toonie.c b/sound/ppc/toonie.c
deleted file mode 100644
index e69de29bb2..0000000000
--- a/sound/ppc/toonie.c
+++ /dev/null
diff --git a/usr/Makefile b/usr/Makefile
index 19d74e6f26..e93824269d 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -21,8 +21,7 @@ ramfs-input := $(if $(filter-out "",$(CONFIG_INITRAMFS_SOURCE)), \
21 $(CONFIG_INITRAMFS_SOURCE),-d) 21 $(CONFIG_INITRAMFS_SOURCE),-d)
22ramfs-args := \ 22ramfs-args := \
23 $(if $(CONFIG_INITRAMFS_ROOT_UID), -u $(CONFIG_INITRAMFS_ROOT_UID)) \ 23 $(if $(CONFIG_INITRAMFS_ROOT_UID), -u $(CONFIG_INITRAMFS_ROOT_UID)) \
24 $(if $(CONFIG_INITRAMFS_ROOT_GID), -g $(CONFIG_INITRAMFS_ROOT_GID)) \ 24 $(if $(CONFIG_INITRAMFS_ROOT_GID), -g $(CONFIG_INITRAMFS_ROOT_GID))
25 $(ramfs-input)
26 25
27# .initramfs_data.cpio.gz.d is used to identify all files included 26# .initramfs_data.cpio.gz.d is used to identify all files included
28# in initramfs and to detect if any files are added/removed. 27# in initramfs and to detect if any files are added/removed.